1
0
mirror of https://github.com/rcore-os/rCore.git synced 2024-11-22 16:16:16 +04:00

Finish add more comment for kernel and finish set user memory swappable.

This commit is contained in:
lcy1996 2018-10-23 00:00:09 +08:00
parent 0a81014007
commit 63349ade19
6 changed files with 137 additions and 34 deletions

View File

@ -13,7 +13,7 @@ use core::ops::Range;
/// alloc: allocate a free bit. /// alloc: allocate a free bit.
/// dealloc: free an allocated bit. /// dealloc: free an allocated bit.
/// ///
/// insert: mark bits in the range as allocated /// insert: mark bits in the range as allocated (available)
/// remove: reverse of insert /// remove: reverse of insert
/// ///
/// any: whether there are free bits remaining /// any: whether there are free bits remaining
@ -38,7 +38,7 @@ pub type BitAlloc256M = BitAllocCascade16<BitAlloc16M>;
/// Implement the bit allocator by segment tree algorithm. /// Implement the bit allocator by segment tree algorithm.
#[derive(Default)] #[derive(Default)]
pub struct BitAllocCascade16<T: BitAlloc> { pub struct BitAllocCascade16<T: BitAlloc> {
bitset: u16, bitset: u16, // for each bit, 1 indicates available, 0 indicates inavailable
sub: [T; 16], sub: [T; 16],
} }

View File

@ -20,7 +20,7 @@ pub trait InactivePageTable {
*/ */
fn new() -> Self; fn new() -> Self;
/* /*
** @brief create a inactive page table without kernel memory mapped ** @brief create an inactive page table without kernel memory mapped
** @retval InactivePageTable the created inactive page table ** @retval InactivePageTable the created inactive page table
*/ */
fn new_bare() -> Self; fn new_bare() -> Self;
@ -180,7 +180,7 @@ impl MemoryArea {
} }
} }
/* /*
** @brief map the memory area from the physice address in a page table ** @brief unmap the memory area from the physice address in a page table
** @param pt: &mut T::Active the page table to use ** @param pt: &mut T::Active the page table to use
** @retval none ** @retval none
*/ */
@ -194,6 +194,14 @@ impl MemoryArea {
pt.unmap(addr); pt.unmap(addr);
} }
} }
pub fn get_start_addr(&self) -> VirtAddr {
self.start_addr
}
pub fn get_end_addr(&self) -> VirtAddr{
self.end_addr
}
} }
/// The attributes of the memory /// The attributes of the memory

View File

@ -98,6 +98,15 @@ impl<T: PageTable, M: SwapManager, S: Swapper> SwapExt<T, M, S> {
swapper, swapper,
} }
} }
/*
** @brief set a virtual address (a page) swappable
** @param addr: VirtAddr the target page's virtual address
*/
pub fn set_swappable(&mut self, addr: VirtAddr){
self.swap_manager.push(addr);
}
/* /*
** @brief map the virtual address to a target physics address as swappable ** @brief map the virtual address to a target physics address as swappable
** @param addr: VirtAddr the virual address to map ** @param addr: VirtAddr the virual address to map
@ -180,12 +189,13 @@ impl<T: PageTable, M: SwapManager, S: Swapper> SwapExt<T, M, S> {
** of beginning of the page ** of beginning of the page
** @retval bool whether swap in happens. ** @retval bool whether swap in happens.
*/ */
pub fn page_fault_handler(&mut self, addr: VirtAddr, alloc_frame: impl FnOnce() -> Option<PhysAddr>) -> bool { pub fn page_fault_handler(&mut self, addr: VirtAddr, alloc_frame: impl FnOnce() -> PhysAddr) -> bool {
if !self.page_table.get_entry(addr).swapped() { if !self.page_table.get_entry(addr).swapped() {
return false; return false;
} }
// Allocate a frame, if failed, swap out a page // Allocate a frame, if failed, swap out a page
let frame = alloc_frame().unwrap_or_else(|| self.swap_out_any().ok().unwrap()); //let frame = alloc_frame().unwrap_or_else(|| self.swap_out_any().ok().unwrap());
let frame = alloc_frame();
self.swap_in(addr, frame).ok().unwrap(); self.swap_in(addr, frame).ok().unwrap();
true true
} }
@ -220,7 +230,7 @@ impl<T: PageTable, M: SwapManager, S: Swapper> DerefMut for SwapExt<T, M, S> {
mod test { mod test {
use super::*; use super::*;
use super::mock_swapper::MockSwapper; use super::mock_swapper::MockSwapper;
use alloc::{arc::Arc, boxed::Box}; use alloc::{sync::Arc, boxed::Box};
use core::cell::RefCell; use core::cell::RefCell;
use paging::MockPageTable; use paging::MockPageTable;

View File

@ -58,6 +58,7 @@ impl PageTable for ActivePageTable {
let page = Page::of_addr(VirtAddr::new(addr)); let page = Page::of_addr(VirtAddr::new(addr));
let frame = Frame::of_addr(PhysAddr::new(target as u32)); let frame = Frame::of_addr(PhysAddr::new(target as u32));
// map the page to the frame using FrameAllocatorForRiscv // map the page to the frame using FrameAllocatorForRiscv
// we may need frame allocator to alloc frame for new page table(first/second)
self.0.map_to(page, frame, flags, &mut FrameAllocatorForRiscv) self.0.map_to(page, frame, flags, &mut FrameAllocatorForRiscv)
.unwrap().flush(); .unwrap().flush();
self.get_entry(addr) self.get_entry(addr)
@ -135,6 +136,14 @@ impl ActivePageTable {
pub unsafe fn new() -> Self { pub unsafe fn new() -> Self {
ActivePageTable(RecursivePageTable::new(&mut *ROOT_PAGE_TABLE).unwrap()) ActivePageTable(RecursivePageTable::new(&mut *ROOT_PAGE_TABLE).unwrap())
} }
/*
* @param:
* frame: the target physical frame which will be temporarily mapped
* f: the function you would like to apply for once
* @brief:
* do something on the target physical frame?
*/
fn with_temporary_map(&mut self, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut RvPageTable)) { fn with_temporary_map(&mut self, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut RvPageTable)) {
// Create a temporary page // Create a temporary page
let page = Page::of_addr(VirtAddr::new(0xcafebabe)); let page = Page::of_addr(VirtAddr::new(0xcafebabe));
@ -148,7 +157,7 @@ impl ActivePageTable {
self.unmap(0xcafebabe); self.unmap(0xcafebabe);
} }
} }
/// implementation for the Entry trait in /crate/memory/src/paging/mod.rs
impl Entry for PageEntry { impl Entry for PageEntry {
fn update(&mut self) { fn update(&mut self) {
let addr = VirtAddr::new((self as *const _ as usize) << 10); let addr = VirtAddr::new((self as *const _ as usize) << 10);
@ -176,8 +185,9 @@ impl Entry for PageEntry {
flags.set(EF::RESERVED2, !writable); flags.set(EF::RESERVED2, !writable);
} }
fn clear_shared(&mut self) { self.as_flags().remove(EF::RESERVED1 | EF::RESERVED2); } fn clear_shared(&mut self) { self.as_flags().remove(EF::RESERVED1 | EF::RESERVED2); }
fn swapped(&self) -> bool { unimplemented!() } // valid property must be 0 used when swapped
fn set_swapped(&mut self, value: bool) { unimplemented!() } fn swapped(&self) -> bool { self.0.flags().contains(EF::RESERVED1) }
fn set_swapped(&mut self, value: bool) { self.as_flags().set(EF::RESERVED1, value); }
fn user(&self) -> bool { self.0.flags().contains(EF::USER) } fn user(&self) -> bool { self.0.flags().contains(EF::USER) }
fn set_user(&mut self, value: bool) { self.as_flags().set(EF::USER, value); } fn set_user(&mut self, value: bool) { self.as_flags().set(EF::USER, value); }
fn execute(&self) -> bool { self.0.flags().contains(EF::EXECUTABLE) } fn execute(&self) -> bool { self.0.flags().contains(EF::EXECUTABLE) }
@ -198,12 +208,24 @@ pub struct InactivePageTable0 {
impl InactivePageTable for InactivePageTable0 { impl InactivePageTable for InactivePageTable0 {
type Active = ActivePageTable; type Active = ActivePageTable;
/*
* @brief:
* get a new pagetable (for a new process or thread)
* @retbal:
* the new pagetable
*/
fn new() -> Self { fn new() -> Self {
let mut pt = Self::new_bare(); let mut pt = Self::new_bare();
pt.map_kernel(); pt.map_kernel();
pt pt
} }
/*
* @brief:
* allocate a new frame and then self-mapping it and regard it as the inactivepagetale
* retval:
* the inactive page table
*/
fn new_bare() -> Self { fn new_bare() -> Self {
let frame = Self::alloc_frame().map(|target| Frame::of_addr(PhysAddr::new(target as u32))) let frame = Self::alloc_frame().map(|target| Frame::of_addr(PhysAddr::new(target as u32)))
.expect("failed to allocate frame"); .expect("failed to allocate frame");
@ -214,6 +236,12 @@ impl InactivePageTable for InactivePageTable0 {
InactivePageTable0 { p2_frame: frame } InactivePageTable0 { p2_frame: frame }
} }
/*
* @param:
* f: a function to do something for a muatable activepagetable
* @brief:
* temporarily map the pagetable as an active page and apply f on the it
*/
fn edit(&mut self, f: impl FnOnce(&mut Self::Active)) { fn edit(&mut self, f: impl FnOnce(&mut Self::Active)) {
active_table().with_temporary_map(&satp::read().frame(), |active_table, p2_table: &mut RvPageTable| { active_table().with_temporary_map(&satp::read().frame(), |active_table, p2_table: &mut RvPageTable| {
let backup = p2_table[RECURSIVE_PAGE_PML4].clone(); let backup = p2_table[RECURSIVE_PAGE_PML4].clone();
@ -275,6 +303,10 @@ impl InactivePageTable for InactivePageTable0 {
} }
impl InactivePageTable0 { impl InactivePageTable0 {
/*
* @brief:
* map the kernel code memory address (p2 page table) in the new inactive page table according the current active page table
*/
fn map_kernel(&mut self) { fn map_kernel(&mut self) {
let table = unsafe { &mut *ROOT_PAGE_TABLE }; let table = unsafe { &mut *ROOT_PAGE_TABLE };
let e0 = table[0x40]; let e0 = table[0x40];

View File

@ -22,26 +22,6 @@ lazy_static! {
pub static ref FRAME_ALLOCATOR: Mutex<FrameAlloc> = Mutex::new(FrameAlloc::default()); pub static ref FRAME_ALLOCATOR: Mutex<FrameAlloc> = Mutex::new(FrameAlloc::default());
} }
pub fn alloc_frame() -> Option<usize> {
let ret = FRAME_ALLOCATOR.lock().alloc().map(|id| id * PAGE_SIZE + MEMORY_OFFSET);
trace!("Allocate frame: {:x?}", ret);
ret
}
pub fn dealloc_frame(target: usize) {
trace!("Deallocate frame: {:x}", target);
FRAME_ALLOCATOR.lock().dealloc((target - MEMORY_OFFSET) / PAGE_SIZE);
}
// alloc from heap
pub fn alloc_stack() -> Stack {
use alloc::alloc::{alloc, Layout};
const STACK_SIZE: usize = 0x8000;
let bottom = unsafe{ alloc(Layout::from_size_align(STACK_SIZE, 0x8000).unwrap()) } as usize;
let top = bottom + STACK_SIZE;
Stack { top, bottom }
}
lazy_static! { lazy_static! {
static ref ACTIVE_TABLE: Mutex<CowExt<ActivePageTable>> = Mutex::new(unsafe { static ref ACTIVE_TABLE: Mutex<CowExt<ActivePageTable>> = Mutex::new(unsafe {
CowExt::new(ActivePageTable::new()) CowExt::new(ActivePageTable::new())
@ -63,6 +43,36 @@ pub fn active_table_swap() -> MutexGuard<'static, SwapExt<ActivePageTable, fifo:
ACTIVE_TABLE_SWAP.lock() ACTIVE_TABLE_SWAP.lock()
} }
/*
* @brief:
* allocate a free physical frame, if no free frame, then swap out one page and reture mapped frame as the free one
* @retval:
* the physical address for the allocated frame
*/
pub fn alloc_frame() -> Option<usize> {
// get the real address of the alloc frame
let ret = FRAME_ALLOCATOR.lock().alloc().map(|id| id * PAGE_SIZE + MEMORY_OFFSET);
trace!("Allocate frame: {:x?}", ret);
//do we need : unsafe { ACTIVE_TABLE_SWAP.force_unlock(); } ???
Some(ret.unwrap_or_else(|| active_table_swap().swap_out_any().ok().unwrap()))
}
pub fn dealloc_frame(target: usize) {
trace!("Deallocate frame: {:x}", target);
FRAME_ALLOCATOR.lock().dealloc((target - MEMORY_OFFSET) / PAGE_SIZE);
}
// alloc from heap
pub fn alloc_stack() -> Stack {
use alloc::alloc::{alloc, Layout};
const STACK_SIZE: usize = 0x8000;
let bottom = unsafe{ alloc(Layout::from_size_align(STACK_SIZE, 0x8000).unwrap()) } as usize;
let top = bottom + STACK_SIZE;
Stack { top, bottom }
}
/* /*
* @param: * @param:
* addr: the virtual address of the page fault * addr: the virtual address of the page fault
@ -72,14 +82,14 @@ pub fn active_table_swap() -> MutexGuard<'static, SwapExt<ActivePageTable, fifo:
* Return true to continue, false to halt * Return true to continue, false to halt
*/ */
pub fn page_fault_handler(addr: usize) -> bool { pub fn page_fault_handler(addr: usize) -> bool {
// Handle copy on write // Handle copy on write (not being used now)
unsafe { ACTIVE_TABLE.force_unlock(); } unsafe { ACTIVE_TABLE.force_unlock(); }
if active_table().page_fault_handler(addr, || alloc_frame().unwrap()){ if active_table().page_fault_handler(addr, || alloc_frame().unwrap()){
return true; return true;
} }
// handle the swap in/out // handle the swap in/out
unsafe { ACTIVE_TABLE_SWAP.force_unlock(); } unsafe { ACTIVE_TABLE_SWAP.force_unlock(); }
if active_table_swap().page_fault_handler(addr, || alloc_frame()){ if active_table_swap().page_fault_handler(addr, || alloc_frame().unwrap()){
return true; return true;
} }
false false

View File

@ -1,7 +1,8 @@
use arch::interrupt::{TrapFrame, Context as ArchContext}; use arch::interrupt::{TrapFrame, Context as ArchContext};
use memory::{MemoryArea, MemoryAttr, MemorySet}; use memory::{MemoryArea, MemoryAttr, MemorySet, active_table_swap};
use xmas_elf::{ElfFile, header, program::{Flags, ProgramHeader, Type}}; use xmas_elf::{ElfFile, header, program::{Flags, ProgramHeader, Type}};
use core::fmt::{Debug, Error, Formatter}; use core::fmt::{Debug, Error, Formatter};
use ucore_memory::{Page};
pub struct Context { pub struct Context {
arch: ArchContext, arch: ArchContext,
@ -9,13 +10,29 @@ pub struct Context {
} }
impl ::ucore_process::processor::Context for Context { impl ::ucore_process::processor::Context for Context {
/*
* @param:
* target: the target process context
* @brief:
* switch to the target process context
*/
unsafe fn switch(&mut self, target: &mut Self) { unsafe fn switch(&mut self, target: &mut Self) {
super::PROCESSOR.try().unwrap().force_unlock(); super::PROCESSOR.try().unwrap().force_unlock();
self.arch.switch(&mut target.arch); self.arch.switch(&mut target.arch);
use core::mem::forget; use core::mem::forget;
// don't run the distructor of processor()
forget(super::processor()); forget(super::processor());
} }
/*
* @param:
* entry: the program entry for the process
* arg: a0 (a parameter)
* @brief:
* new a kernel thread Context
* @retval:
* the new kernel thread Context
*/
fn new_kernel(entry: extern fn(usize) -> !, arg: usize) -> Self { fn new_kernel(entry: extern fn(usize) -> !, arg: usize) -> Self {
let ms = MemorySet::new(); let ms = MemorySet::new();
Context { Context {
@ -34,6 +51,14 @@ impl Context {
} }
/// Make a new user thread from ELF data /// Make a new user thread from ELF data
/*
* @param:
* data: the ELF data stream
* @brief:
* make a new thread from ELF data
* @retval:
* the new user thread Context
*/
pub fn new_user(data: &[u8]) -> Self { pub fn new_user(data: &[u8]) -> Self {
// Parse elf // Parse elf
let elf = ElfFile::new(data).expect("failed to read elf"); let elf = ElfFile::new(data).expect("failed to read elf");
@ -81,6 +106,15 @@ impl Context {
}); });
} }
//set the user Memory pages in the memory set swappable
for area in memory_set.iter(){
for page in Page::range_of(area.get_start_addr(), area.get_end_addr()) {
let addr = page.start_address();
active_table_swap().set_swappable(addr);
}
}
info!("finish setting memory swappable.");
Context { Context {
arch: unsafe { arch: unsafe {
ArchContext::new_user_thread( ArchContext::new_user_thread(
@ -123,6 +157,14 @@ impl Debug for Context {
} }
} }
/*
* @param:
* elf: the source ELF file
* @brief:
* generate a memory set according to the elf file
* @retval:
* the new memory set
*/
fn memory_set_from<'a>(elf: &'a ElfFile<'a>) -> MemorySet { fn memory_set_from<'a>(elf: &'a ElfFile<'a>) -> MemorySet {
let mut set = MemorySet::new(); let mut set = MemorySet::new();
for ph in elf.program_iter() { for ph in elf.program_iter() {
@ -131,9 +173,10 @@ fn memory_set_from<'a>(elf: &'a ElfFile<'a>) -> MemorySet {
} }
let (virt_addr, mem_size, flags) = match ph { let (virt_addr, mem_size, flags) = match ph {
ProgramHeader::Ph32(ph) => (ph.virtual_addr as usize, ph.mem_size as usize, ph.flags), ProgramHeader::Ph32(ph) => (ph.virtual_addr as usize, ph.mem_size as usize, ph.flags),
ProgramHeader::Ph64(ph) => (ph.virtual_addr as usize, ph.mem_size as usize, ph.flags), ProgramHeader::Ph64(ph) => (ph.virtual_addr as usize, ph.mem_size as usize, ph.flags),//???
}; };
set.push(MemoryArea::new(virt_addr, virt_addr + mem_size, memory_attr_from(flags), "")); set.push(MemoryArea::new(virt_addr, virt_addr + mem_size, memory_attr_from(flags), ""));
} }
set set
} }