1
0
mirror of https://github.com/rcore-os/rCore.git synced 2024-11-22 16:16:16 +04:00

Use PageTable interface in OS

This commit is contained in:
WangRunji 2018-06-23 19:11:41 +08:00
parent f500086b9e
commit ade0f0110f
10 changed files with 161 additions and 67 deletions

View File

@ -36,6 +36,7 @@ uart_16550 = "0.1"
lazy_static = { version = "1.0.0", features = ["spin_no_std"] }
simple-filesystem = { git = "https://github.com/wangrunji0408/SimpleFileSystem-Rust" }
bit-allocator = { path = "crate/bit-allocator" }
ucore-memory = { path = "crate/memory" }
[build-dependencies]
cc = "1.0"

View File

@ -1,5 +1,5 @@
[package]
name = "memory"
name = "ucore-memory"
version = "0.1.0"
authors = ["WangRunji <wangrunji0408@163.com>"]

View File

@ -22,6 +22,7 @@ impl<T: PageTable> CowExt<T> {
let entry = self.page_table.map(addr, target);
entry.set_writable(false);
entry.set_shared(writable);
entry.update();
let frame = target / PAGE_SIZE;
match writable {
true => self.rc_map.write_increase(&frame),
@ -52,6 +53,7 @@ impl<T: PageTable> CowExt<T> {
if self.rc_map.read_count(&frame) == 0 && self.rc_map.write_count(&frame) == 1 {
entry.clear_shared();
entry.set_writable(true);
entry.update();
self.rc_map.write_decrease(&frame);
return true;
}

View File

@ -22,6 +22,7 @@ pub struct MockEntry {
}
impl Entry for MockEntry {
fn update(&mut self) {}
fn accessed(&self) -> bool { self.accessed }
fn dirty(&self) -> bool { self.dirty }
fn writable(&self) -> bool { self.writable }
@ -42,6 +43,11 @@ impl Entry for MockEntry {
self.writable_shared = false;
self.readonly_shared = false;
}
fn user(&self) -> bool { unimplemented!() }
fn set_user(&mut self, value: bool) { unimplemented!() }
fn execute(&self) -> bool { unimplemented!() }
fn set_execute(&mut self, value: bool) { unimplemented!() }
}
type PageFaultHandler = Box<FnMut(&mut MockPageTable, VirtAddr)>;
@ -49,7 +55,6 @@ type PageFaultHandler = Box<FnMut(&mut MockPageTable, VirtAddr)>;
impl PageTable for MockPageTable {
type Entry = MockEntry;
/// Map a page, return false if no more space
fn map(&mut self, addr: VirtAddr, target: PhysAddr) -> &mut Self::Entry {
let entry = &mut self.entries[addr / PAGE_SIZE];
assert!(!entry.present);

View File

@ -1,6 +1,12 @@
//! Generic page table interface
//!
//! Implemented for every architecture, used by OS.
use super::*;
#[cfg(test)]
pub use self::mock_page_table::MockPageTable;
#[cfg(test)]
mod mock_page_table;
pub trait PageTable {
@ -13,6 +19,11 @@ pub trait PageTable {
}
pub trait Entry {
/// IMPORTANT!
/// This must be called after any change to ensure it become effective.
/// Usually this will make a flush to TLB/MMU.
fn update(&mut self);
/// Will be set when accessed
fn accessed(&self) -> bool;
/// Will be set when written
@ -34,4 +45,9 @@ pub trait Entry {
fn readonly_shared(&self) -> bool;
fn set_shared(&mut self, writable: bool);
fn clear_shared(&mut self);
fn user(&self) -> bool;
fn set_user(&mut self, value: bool);
fn execute(&self) -> bool;
fn set_execute(&mut self, value: bool);
}

View File

@ -1,20 +1,11 @@
use memory::*;
//pub use self::cow::*;
use x86_64::structures::paging::*;
use x86_64::registers::control::{Cr3, Cr3Flags};
pub use ucore_memory::paging::{Entry, PageTable};
use x86_64::instructions::tlb;
use x86_64::registers::control::{Cr3, Cr3Flags};
use x86_64::structures::paging::{Mapper, PageTable as x86PageTable, PageTableEntry, PageTableFlags as EF, RecursivePageTable};
pub use x86_64::structures::paging::{FrameAllocator, FrameDeallocator, Page, PageRange, PhysFrame as Frame, Size4KiB};
use x86_64::ux::u9;
pub type Frame = PhysFrame;
pub type EntryFlags = PageTableFlags;
pub type ActivePageTable = RecursivePageTable<'static>;
pub use x86_64::structures::paging::{Page, PageRange, Mapper, FrameAllocator, FrameDeallocator, Size4KiB, PageTable};
//mod cow;
const ENTRY_COUNT: usize = 512;
pub trait PageExt {
fn of_addr(address: VirtAddr) -> Self;
fn range_of(begin: VirtAddr, end: VirtAddr) -> PageRange;
@ -40,18 +31,53 @@ impl FrameExt for Frame {
}
}
pub trait ActiveTableExt {
fn with(&mut self, table: &mut InactivePageTable, f: impl FnOnce(&mut ActivePageTable));
fn map_to_(&mut self, page: Page, frame: Frame, flags: EntryFlags);
pub struct ActivePageTable(RecursivePageTable<'static>);
pub struct PageEntry(PageTableEntry);
impl PageTable for ActivePageTable {
type Entry = PageEntry;
fn map(&mut self, addr: usize, target: usize) -> &mut PageEntry {
let flags = EF::PRESENT | EF::WRITABLE | EF::NO_EXECUTE;
self.0.map_to(Page::of_addr(addr), Frame::of_addr(target), flags, &mut frame_allocator())
.unwrap().flush();
self.get_entry(addr)
}
fn unmap(&mut self, addr: usize) {
let (frame, flush) = self.0.unmap(Page::of_addr(addr)).unwrap();
flush.flush();
}
fn get_entry(&mut self, addr: usize) -> &mut PageEntry {
let entry_addr = ((addr >> 9) & 0o777_777_777_7770) | 0xffffff80_00000000;
unsafe { &mut *(entry_addr as *mut PageEntry) }
}
fn read_page(&mut self, addr: usize, data: &mut [u8]) {
use core::slice;
let mem = unsafe { slice::from_raw_parts((addr & !0xfffusize) as *const u8, 4096) };
data.copy_from_slice(mem);
}
fn write_page(&mut self, addr: usize, data: &[u8]) {
use core::slice;
let mem = unsafe { slice::from_raw_parts_mut((addr & !0xfffusize) as *mut u8, 4096) };
mem.copy_from_slice(data);
}
}
impl ActiveTableExt for ActivePageTable {
fn with(&mut self, table: &mut InactivePageTable, f: impl FnOnce(&mut ActivePageTable)) {
with_temporary_map(self, &Cr3::read().0, |active_table, p4_table: &mut PageTable| {
impl ActivePageTable {
pub unsafe fn new() -> Self {
ActivePageTable(RecursivePageTable::new(&mut *(0xffffffff_fffff000 as *mut _)).unwrap())
}
pub fn with(&mut self, table: &mut InactivePageTable, f: impl FnOnce(&mut ActivePageTable)) {
with_temporary_map(self, &Cr3::read().0, |active_table, p4_table: &mut x86PageTable| {
let backup = p4_table[0o777].clone();
// overwrite recursive mapping
p4_table[0o777].set_frame(table.p4_frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE);
p4_table[0o777].set_frame(table.p4_frame.clone(), EF::PRESENT | EF::WRITABLE);
tlb::flush_all();
// execute f in the new context
@ -62,20 +88,55 @@ impl ActiveTableExt for ActivePageTable {
tlb::flush_all();
});
}
fn map_to_(&mut self, page: Page<Size4KiB>, frame: PhysFrame<Size4KiB>, flags: EntryFlags) {
self.map_to(page, frame, flags, &mut frame_allocator()).unwrap().flush();
pub fn map_to(&mut self, page: Page, frame: Frame) -> &mut PageEntry {
self.map(page.start_address().as_u64() as usize, frame.start_address().as_u64() as usize)
}
}
// Set user bit for p1-p4 entry
// It's a workaround since x86_64 PageTable do not set user bit.
if flags.contains(EntryFlags::USER_ACCESSIBLE) {
let mut addr = page.start_address().as_u64();
for _ in 0..4 {
impl Entry for PageEntry {
fn update(&mut self) {
use x86_64::{VirtAddr, instructions::tlb::flush};
let addr = VirtAddr::new_unchecked((self as *const _ as u64) << 9);
flush(addr);
}
fn accessed(&self) -> bool { self.0.flags().contains(EF::ACCESSED) }
fn dirty(&self) -> bool { self.0.flags().contains(EF::DIRTY) }
fn writable(&self) -> bool { self.0.flags().contains(EF::WRITABLE) }
fn present(&self) -> bool { self.0.flags().contains(EF::PRESENT) }
fn clear_accessed(&mut self) { self.as_flags().remove(EF::ACCESSED); }
fn clear_dirty(&mut self) { self.as_flags().remove(EF::DIRTY); }
fn set_writable(&mut self, value: bool) { self.as_flags().set(EF::WRITABLE, value); }
fn set_present(&mut self, value: bool) { self.as_flags().set(EF::PRESENT, value); }
fn target(&self) -> usize { self.0.addr().as_u64() as usize }
fn writable_shared(&self) -> bool { self.0.flags().contains(EF::BIT_10) }
fn readonly_shared(&self) -> bool { self.0.flags().contains(EF::BIT_9) }
fn set_shared(&mut self, writable: bool) {
let flags = self.as_flags();
flags.set(EF::BIT_10, writable);
flags.set(EF::BIT_9, !writable);
}
fn clear_shared(&mut self) { self.as_flags().remove(EF::BIT_9 | EF::BIT_10); }
fn user(&self) -> bool { self.0.flags().contains(EF::USER_ACCESSIBLE) }
fn set_user(&mut self, value: bool) {
self.as_flags().set(EF::USER_ACCESSIBLE, value);
if value {
let mut addr = self as *const _ as usize;
for _ in 0..3 {
// Upper level entry
addr = ((addr >> 9) & 0o777_777_777_7770) | 0xffffff80_00000000;
// set USER_ACCESSIBLE
unsafe { (*(addr as *mut EntryFlags)).insert(EntryFlags::USER_ACCESSIBLE) };
unsafe { (*(addr as *mut EF)).insert(EF::USER_ACCESSIBLE) };
}
}
}
fn execute(&self) -> bool { !self.0.flags().contains(EF::NO_EXECUTE) }
fn set_execute(&mut self, value: bool) { self.as_flags().set(EF::NO_EXECUTE, !value); }
}
impl PageEntry {
fn as_flags(&mut self) -> &mut EF {
unsafe { &mut *(self as *mut _ as *mut EF) }
}
}
#[derive(Debug)]
@ -85,13 +146,23 @@ pub struct InactivePageTable {
impl InactivePageTable {
pub fn new(frame: Frame, active_table: &mut ActivePageTable) -> InactivePageTable {
with_temporary_map(active_table, &frame, |_, table: &mut PageTable| {
with_temporary_map(active_table, &frame, |_, table: &mut x86PageTable| {
table.zero();
// set up recursive mapping for the table
table[511].set_frame(frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE);
table[511].set_frame(frame.clone(), EF::PRESENT | EF::WRITABLE);
});
InactivePageTable { p4_frame: frame }
}
pub fn map_kernel(&mut self, active_table: &mut ActivePageTable) {
let mut table = unsafe { &mut *(0xffffffff_fffff000 as *mut x86PageTable) };
let e510 = table[510].clone();
let e509 = table[509].clone();
active_table.with(self, |pt: &mut ActivePageTable| {
table[510] = e510;
table[509] = e509;
});
}
pub fn switch(&self) {
let old_frame = Cr3::read().0;
let new_frame = self.p4_frame.clone();
@ -112,15 +183,15 @@ impl Drop for InactivePageTable {
}
}
fn with_temporary_map(active_table: &mut ActivePageTable, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut PageTable)) {
fn with_temporary_map(active_table: &mut ActivePageTable, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut x86PageTable)) {
// Create a temporary page
let page = Page::of_addr(0xcafebabe);
assert!(active_table.translate_page(page).is_none(), "temporary page is already mapped");
assert!(active_table.0.translate_page(page).is_none(), "temporary page is already mapped");
// Map it to table
active_table.map_to_(page, frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE);
active_table.map_to(page, frame.clone());
// Call f
let table = unsafe { &mut *page.start_address().as_mut_ptr() };
f(active_table, table);
// Unmap the page
active_table.unmap(page).unwrap().1.flush();
active_table.unmap(0xcafebabe);
}

View File

@ -36,6 +36,7 @@ extern crate simple_filesystem;
extern crate spin;
extern crate syscall as redox_syscall;
extern crate uart_16550;
extern crate ucore_memory;
extern crate volatile;
#[macro_use]
extern crate x86_64;

View File

@ -67,54 +67,61 @@ impl MemoryArea {
Some(phys_start) => {
for page in Page::range_of(self.start_addr, self.end_addr) {
let frame = Frame::of_addr(phys_start.get() + page.start_address().as_u64() as usize - self.start_addr);
pt.map_to_(page, frame, self.flags.0);
self.flags.apply(pt.map_to(page, frame));
}
}
None => {
for page in Page::range_of(self.start_addr, self.end_addr) {
let frame = alloc_frame();
pt.map_to_(page, frame, self.flags.0);
self.flags.apply(pt.map_to(page, frame));
}
}
}
}
fn unmap(&self, pt: &mut ActivePageTable) {
for page in Page::range_of(self.start_addr, self.end_addr) {
let (frame, flush) = pt.unmap(page).unwrap();
flush.flush();
let addr = page.start_address().as_u64() as usize;
if self.phys_start_addr.is_none() {
let frame = Frame::of_addr(pt.get_entry(addr).target());
dealloc_frame(frame);
}
pt.unmap(addr);
}
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct MemoryAttr(EntryFlags);
impl Default for MemoryAttr {
fn default() -> Self {
MemoryAttr(EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE)
}
#[derive(Debug, Copy, Clone, Eq, PartialEq, Default)]
pub struct MemoryAttr {
user: bool,
readonly: bool,
execute: bool,
hide: bool,
}
impl MemoryAttr {
pub fn user(mut self) -> Self {
self.0 |= EntryFlags::USER_ACCESSIBLE;
self.user = true;
self
}
pub fn readonly(mut self) -> Self {
self.0.remove(EntryFlags::WRITABLE);
self.readonly = true;
self
}
pub fn execute(mut self) -> Self {
self.0.remove(EntryFlags::NO_EXECUTE);
self.execute = true;
self
}
pub fn hide(mut self) -> Self {
self.0.remove(EntryFlags::PRESENT);
self.hide = true;
self
}
fn apply(&self, entry: &mut impl Entry) {
if self.user { entry.set_user(true); }
if self.readonly { entry.set_writable(false); }
if self.execute { entry.set_execute(true); }
if self.hide { entry.set_present(false); }
if self.user || self.readonly || self.execute || self.hide { entry.update(); }
}
}
/// 内存空间集合,包含若干段连续空间
@ -226,15 +233,6 @@ fn new_page_table_with_kernel() -> InactivePageTable {
let frame = alloc_frame();
let mut active_table = active_table();
let mut page_table = InactivePageTable::new(frame, &mut active_table);
use consts::{KERNEL_HEAP_PML4, KERNEL_PML4};
let mut table = unsafe { &mut *(0xffffffff_fffff000 as *mut PageTable) };
let e510 = table[KERNEL_PML4].clone();
let e509 = table[KERNEL_HEAP_PML4].clone();
active_table.with(&mut page_table, |pt: &mut ActivePageTable| {
table[KERNEL_PML4] = e510;
table[KERNEL_HEAP_PML4] = e509;
});
page_table.map_kernel(&mut active_table);
page_table
}

View File

@ -8,6 +8,7 @@ pub use self::memory_set::*;
pub use self::stack_allocator::*;
use spin::{Mutex, MutexGuard};
use super::HEAP_ALLOCATOR;
use ucore_memory::paging::PageTable;
mod memory_set;
mod stack_allocator;
@ -42,7 +43,7 @@ fn alloc_stack(size_in_pages: usize) -> Stack {
fn active_table() -> MutexGuard<'static, ActivePageTable> {
lazy_static! {
static ref ACTIVE_TABLE: Mutex<ActivePageTable> = Mutex::new(unsafe {
ActivePageTable::new(&mut *(0xffffffff_fffff000 as *mut _)).unwrap()
ActivePageTable::new()
});
}
ACTIVE_TABLE.lock()
@ -153,11 +154,10 @@ fn get_init_kstack_and_set_guard_page() -> Stack {
extern { fn stack_bottom(); }
let stack_bottom = PhysAddr::new(stack_bottom as u64).to_kernel_virtual();
let stack_bottom_page = Page::of_addr(stack_bottom);
// turn the stack bottom into a guard page
active_table().unmap(stack_bottom_page);
debug!("guard page at {:?}", stack_bottom_page.start_address());
active_table().unmap(stack_bottom);
debug!("guard page at {:?}", stack_bottom);
Stack::new(stack_bottom + 8 * PAGE_SIZE, stack_bottom + 1 * PAGE_SIZE)
}

View File

@ -1,5 +1,5 @@
use super::*;
use memory::PAGE_SIZE;
use super::*;
// TODO: use BitAllocator & alloc fixed size stack
pub struct StackAllocator {
@ -41,7 +41,7 @@ impl StackAllocator {
// map stack pages to physical frames
for page in Page::range_inclusive(start, end) {
let frame = alloc_frame();
active_table.map_to_(page, frame, EntryFlags::PRESENT | EntryFlags::WRITABLE);
active_table.map_to(page, frame);
}
// create a new stack