1
0
mirror of https://github.com/rcore-os/rCore.git synced 2024-11-23 00:16:17 +04:00

update for new paging mechanism on riscv. move kernel va to 0x(ffffffff)c0000000.

This commit is contained in:
WangRunji 2019-01-31 18:49:29 +08:00
parent 1ceb7ae12d
commit fc8888db72
11 changed files with 37 additions and 141 deletions

View File

@ -3,7 +3,7 @@ use alloc::alloc::{Layout, GlobalAlloc};
use core::marker::PhantomData;
pub trait NoMMUSupport {
type Alloc: GlobalAlloc;
type Alloc: GlobalAlloc + 'static;
fn allocator() -> &'static Self::Alloc;
}

2
kernel/Cargo.lock generated
View File

@ -307,7 +307,7 @@ dependencies = [
[[package]]
name = "riscv"
version = "0.3.0"
source = "git+https://github.com/riscv-and-rust-and-decaf/riscv#f7bea54d7f254b63b5a6130285ab421c23d2f3bd"
source = "git+https://github.com/riscv-and-rust-and-decaf/riscv#f6f475e35b36717cf4455e298e4468773a57eb84"
dependencies = [
"bare-metal 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)",
"bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",

View File

@ -553,7 +553,7 @@ fi
CNAME=riscv
if ! [[ -f ${OUTDIR}/${CNAME}.o ]]
then
rustc --crate-name riscv $CARGO_PATH/git/checkouts/riscv-1e845b622ce46f1d/f7bea54*/src/lib.rs \
rustc --crate-name riscv $CARGO_PATH/git/checkouts/riscv-1e845b622ce46f1d/f6f475e*/src/lib.rs \
--color always --crate-type lib --emit=metadata,llvm-bc \
-C opt-level=1 \
-C debuginfo=2 \

View File

@ -6,7 +6,7 @@
OUTPUT_ARCH(riscv)
ENTRY(_start)
BASE_ADDRESS = 0xffffffff80020000;
BASE_ADDRESS = 0xffffffffc0020000;
SECTIONS
{

View File

@ -6,11 +6,11 @@
OUTPUT_ARCH(riscv)
ENTRY(_start)
BASE_ADDRESS = 0x80020000;
BASE_ADDRESS = 0xC0020000;
SECTIONS
{
. = 0x80000000;
. = 0xC0000000;
.boot : {
KEEP(*(.text.boot))
}

View File

@ -6,7 +6,7 @@
OUTPUT_ARCH(riscv)
ENTRY(_start)
BASE_ADDRESS = 0xffffffff80020000;
BASE_ADDRESS = 0xffffffffc0020000;
SECTIONS
{

View File

@ -4,5 +4,5 @@
#[no_mangle]
pub extern fn abort() {
loop {}
panic!("abort");
}

View File

@ -1,25 +1,26 @@
// Physical address available on THINPAD:
// [0x80000000, 0x80800000]
#[cfg(target_arch = "riscv32")]
pub const RECURSIVE_INDEX: usize = 0x3fe;
pub const RECURSIVE_INDEX: usize = 0x3fd;
#[cfg(target_arch = "riscv64")]
pub const RECURSIVE_INDEX: usize = 0x1fd;
pub const RECURSIVE_INDEX: usize = 0o774;
// Under riscv64, upon booting, paging is enabled by bbl and
// root_table[0777] maps to p3_table,
// and p3_table[0776] maps to gigapage 8000_0000H,
// and p3_table[0777] maps to gigapage 8000_0000H,
// so 0xFFFF_FFFF_8000_0000 maps to 0x8000_0000
// root_table[0775] points to root_table itself as page table
// root_table[0776] points to root_table itself as leaf page
// root_table[0774] points to root_table itself as page table
// root_table[0775] points to root_table itself as leaf page
// root_table[0776] points to a temp page table as leaf page
#[cfg(target_arch = "riscv32")]
pub const KERN_VA_BASE: usize = 0;
pub const KERN_VA_BASE: usize = 0xC000_0000;
#[cfg(target_arch = "riscv64")]
pub const KERN_VA_BASE: usize = 0xFFFF_FFFF_0000_0000;
pub const KERN_VA_BASE: usize = 0xFFFF_FFFF_C000_0000;
#[cfg(target_arch = "riscv32")]
pub const KERNEL_P2_INDEX: usize = 0x8000_0000 >> 12 >> 10;
pub const KERNEL_P2_INDEX: usize = (KERN_VA_BASE >> 12 >> 10) & 0x3ff;
#[cfg(target_arch = "riscv64")]
pub const KERNEL_P4_INDEX: usize = 0x0000_FFFF_8000_0000 >> 12 >> 9 >> 9 >> 9;
pub const KERNEL_P4_INDEX: usize = (KERN_VA_BASE >> 12 >> 9 >> 9 >> 9) & 0o777;
#[cfg(feature = "board_k210")]
pub const KERNEL_HEAP_SIZE: usize = 0x0010_0000;
@ -40,8 +41,9 @@ pub const MEMORY_END: usize = 0x4060_0000;
#[cfg(all(target_arch = "riscv64", not(feature = "board_k210")))]
pub const MEMORY_END: usize = 0x8100_0000;
pub const USER_STACK_OFFSET: usize = 0x70000000;
// FIXME: rv64 `sh` and `ls` will crash if stack top > 0x80000000 ???
pub const USER_STACK_OFFSET: usize = 0x80000000 - USER_STACK_SIZE;
pub const USER_STACK_SIZE: usize = 0x10000;
pub const USER32_STACK_OFFSET: usize = USER_STACK_OFFSET;
pub const USER32_STACK_OFFSET: usize = 0xC0000000 - USER_STACK_SIZE;
pub const MAX_DTB_SIZE: usize = 0x2000;

View File

@ -36,7 +36,7 @@ pub fn init(dtb: usize) {
pub fn init_other() {
unsafe {
sstatus::set_sum(); // Allow user memory access
asm!("csrw 0x180, $0; sfence.vma" :: "r"(SATP) :: "volatile");
asm!("csrw satp, $0; sfence.vma" :: "r"(SATP) :: "volatile");
}
}
@ -49,7 +49,7 @@ fn init_frame_allocator() {
use core::ops::Range;
let mut ba = FRAME_ALLOCATOR.lock();
let range = to_range((end as usize) - KERN_VA_BASE + PAGE_SIZE, MEMORY_END);
let range = to_range((end as usize) - KERN_VA_BASE + MEMORY_OFFSET + PAGE_SIZE, MEMORY_END);
ba.insert(range);
/*
@ -72,7 +72,7 @@ fn init_frame_allocator() {
/// Remap the kernel memory address with 4K page recorded in p1 page table
#[cfg(not(feature = "no_mmu"))]
fn remap_the_kernel(dtb: usize) {
let offset = -(super::consts::KERN_VA_BASE as isize);
let offset = -(KERN_VA_BASE as isize - MEMORY_OFFSET as isize);
let mut ms = MemorySet::new_bare();
ms.push(stext as usize, etext as usize, Linear::new(offset, MemoryAttr::default().execute().readonly()), "text");
ms.push(sdata as usize, edata as usize, Linear::new(offset, MemoryAttr::default()), "data");

View File

@ -17,18 +17,10 @@ pub struct ActivePageTable(RecursivePageTable<'static>, PageEntry);
/// PageTableEntry: the contents of this entry.
/// Page: this entry is the pte of page `Page`.
pub struct PageEntry(PageTableEntry, Page);
pub struct PageEntry(&'static mut PageTableEntry, Page);
impl PageTable for ActivePageTable {
/*
* @param:
* addr: the virtual addr to be matched
* target: the physical addr to be matched with addr
* @brief:
* map the virtual address 'addr' to the physical address 'target' in pagetable.
* @retval:
* the matched PageEntry
*/
fn map(&mut self, addr: usize, target: usize) -> &mut Entry {
// use riscv::paging:Mapper::map_to,
// map the 4K `page` to the 4K `frame` with `flags`
@ -41,128 +33,31 @@ impl PageTable for ActivePageTable {
self.get_entry(addr).expect("fail to get entry")
}
/*
* @param:
* addr: virtual address of which the mapped physical frame should be unmapped
* @bridf:
^ unmap the virtual addresses' mapped physical frame
*/
fn unmap(&mut self, addr: usize) {
let page = Page::of_addr(VirtAddr::new(addr));
let (_, flush) = self.0.unmap(page).unwrap();
flush.flush();
}
/*
* @param:
* addr: input virtual address
* @brief:
* get the pageEntry of 'addr'
* @retval:
* a mutable PageEntry reference of 'addr'
*/
#[cfg(target_arch = "riscv32")]
fn get_entry(&mut self, vaddr: usize) -> Option<&mut Entry> {
let p2_table = unsafe { ROOT_PAGE_TABLE.as_mut().unwrap() };
let page = Page::of_addr(VirtAddr::new(vaddr));
if !p2_table[page.p2_index()].flags().contains(EF::VALID) {
return None;
if let Ok(e) = self.0.ref_entry(page.clone()) {
let e = unsafe { &mut *(e as *mut PageTableEntry) };
self.1 = PageEntry(e, page);
Some(&mut self.1 as &mut Entry)
} else {
None
}
let entry = edit_entry_of(&page, |entry| *entry);
self.1 = PageEntry(entry, page);
Some(&mut self.1)
}
/*
* @param:
* addr: input virtual address
* @brief:
* get the pageEntry of 'addr'
* @retval:
* a mutable PageEntry reference of 'addr'
*/
#[cfg(target_arch = "riscv64")]
fn get_entry(&mut self, vaddr: usize) -> Option<&mut Entry> {
let vaddr = VirtAddr::new(vaddr);
let page = Page::of_addr(vaddr);
if ! self.0.is_mapped(
vaddr.p4_index(), vaddr.p3_index(), vaddr.p2_index(), vaddr.p1_index()) {
return None;
}
let entry = edit_entry_of(&page, |entry| *entry);
self.1 = PageEntry(entry, page);
Some(&mut self.1)
}
}
impl PageTableExt for ActivePageTable {}
#[cfg(target_arch = "riscv32")]
fn edit_entry_of<T>(page: &Page, f: impl FnOnce(&mut PageTableEntry) -> T) -> T {
let p2_table = unsafe { ROOT_PAGE_TABLE.as_mut().unwrap() };
let p1_table = unsafe {
&mut *(Page::from_page_table_indices(RECURSIVE_INDEX, page.p2_index()).
start_address().as_usize() as *mut RvPageTable)
};
let p2_flags = p2_table[page.p2_index()].flags_mut();
p2_flags.insert(EF::READABLE | EF::WRITABLE);
let ret = f(&mut p1_table[page.p1_index()]);
p2_flags.remove(EF::READABLE | EF::WRITABLE);
ret
}
// TODO: better the gofy design
#[cfg(target_arch = "riscv64")]
fn edit_entry_of<T>(page: &Page, f: impl FnOnce(&mut PageTableEntry) -> T) -> T {
let p4_table = unsafe { ROOT_PAGE_TABLE.as_mut().unwrap() };
let p3_table = unsafe {
&mut *(Page::from_page_table_indices(
RECURSIVE_INDEX, RECURSIVE_INDEX, RECURSIVE_INDEX,
page.p4_index()).start_address().as_usize() as *mut RvPageTable)
};
let p2_table = unsafe {
&mut *(Page::from_page_table_indices(
RECURSIVE_INDEX, RECURSIVE_INDEX, page.p4_index(),
page.p3_index()).start_address().as_usize() as *mut RvPageTable)
};
let p1_table = unsafe {
&mut *(Page::from_page_table_indices(
RECURSIVE_INDEX, page.p4_index(), page.p3_index(),
page.p2_index()).start_address().as_usize() as *mut RvPageTable)
};
let p4_flags = p4_table[page.p4_index()].flags_mut();
let p3_flags = p3_table[page.p3_index()].flags_mut();
let p2_flags = p2_table[page.p2_index()].flags_mut();
p4_flags.insert(EF::READABLE | EF::WRITABLE) ; sfence_vma_all();
p3_flags.insert(EF::READABLE | EF::WRITABLE) ; sfence_vma_all();
p4_flags.remove(EF::READABLE | EF::WRITABLE) ; sfence_vma_all();
p2_flags.insert(EF::READABLE | EF::WRITABLE) ; sfence_vma_all();
p4_flags.insert(EF::READABLE | EF::WRITABLE) ; sfence_vma_all();
p3_flags.remove(EF::READABLE | EF::WRITABLE) ; sfence_vma_all();
p4_flags.remove(EF::READABLE | EF::WRITABLE) ; sfence_vma_all();
let ret = f(&mut p1_table[page.p1_index()]) ;
p4_flags.insert(EF::READABLE | EF::WRITABLE) ; sfence_vma_all();
p3_flags.insert(EF::READABLE | EF::WRITABLE) ; sfence_vma_all();
p4_flags.remove(EF::READABLE | EF::WRITABLE) ; sfence_vma_all();
p2_flags.remove(EF::READABLE | EF::WRITABLE) ; sfence_vma_all();
p4_flags.insert(EF::READABLE | EF::WRITABLE) ; sfence_vma_all();
p3_flags.remove(EF::READABLE | EF::WRITABLE) ; sfence_vma_all();
p4_flags.remove(EF::READABLE | EF::WRITABLE) ; sfence_vma_all();
ret
}
// define the ROOT_PAGE_TABLE, and the virtual address of it?
#[cfg(target_arch = "riscv32")]
const ROOT_PAGE_TABLE: *mut RvPageTable =
(((RECURSIVE_INDEX << 10) | (RECURSIVE_INDEX + 1)) << 12) as *mut RvPageTable;
((RECURSIVE_INDEX << 12 << 10) |
((RECURSIVE_INDEX+1) << 12)) as *mut RvPageTable;
#[cfg(target_arch = "riscv64")]
const ROOT_PAGE_TABLE: *mut RvPageTable =
((0xFFFF_0000_0000_0000) |
@ -175,7 +70,7 @@ impl ActivePageTable {
pub unsafe fn new() -> Self {
ActivePageTable(
RecursivePageTable::new(&mut *ROOT_PAGE_TABLE).unwrap(),
::core::mem::zeroed()
::core::mem::uninitialized()
)
}
}
@ -183,7 +78,6 @@ impl ActivePageTable {
/// implementation for the Entry trait in /crate/memory/src/paging/mod.rs
impl Entry for PageEntry {
fn update(&mut self) {
edit_entry_of(&self.1, |entry| *entry = self.0);
sfence_vma(0, self.1.start_address());
}
fn accessed(&self) -> bool { self.0.flags().contains(EF::ACCESSED) }
@ -288,7 +182,7 @@ impl InactivePageTable for InactivePageTable0 {
}
unsafe fn set_token(token: usize) {
asm!("csrw 0x180, $0" :: "r"(token) :: "volatile");
asm!("csrw satp, $0" :: "r"(token) :: "volatile");
}
fn active_token() -> usize {

@ -1 +1 @@
Subproject commit f4ec45a908005331149af5816247dcb89fbd4654
Subproject commit e309fff977870e7596f87788c891dfd393fd5296