1
0
mirror of https://github.com/rcore-os/rCore.git synced 2024-11-22 16:16:16 +04:00

finish frame delayed allocating

This commit is contained in:
lcy1996 2018-11-03 14:33:36 +08:00
parent 4f03eff015
commit b1425a53f9
6 changed files with 107 additions and 11 deletions

View File

@ -91,6 +91,28 @@ impl<T: PageTable> CowExt<T> {
** @retval bool whether copy-on-write happens.
*/
pub fn page_fault_handler(&mut self, addr: VirtAddr, alloc_frame: impl FnOnce() -> PhysAddr) -> bool {
// handle page delayed allocating
{
info!("try handling delayed frame allocator");
let need_alloc ={
let entry = self.page_table.get_entry(addr);
//info!("got entry!");
!entry.present() && !entry.swapped()
};
if need_alloc{
info!("need_alloc!");
let frame = alloc_frame();
let entry = self.page_table.get_entry(addr);
entry.set_target(frame);
//let new_entry = self.page_table.map(addr, frame);
entry.set_present(true);
entry.update();
//area.get_flags().apply(new_entry); this instruction may be used when hide attr is used
info!("allocated successfully");
return true;
}
}
// below is not being used now(no shared pages)
{
let entry = self.page_table.get_entry(addr);
if !entry.readonly_shared() && !entry.writable_shared() {

View File

@ -173,11 +173,22 @@ impl MemoryArea {
}
}
None => {
info!("map delayed!");
for page in Page::range_of(self.start_addr, self.end_addr) {
let addr = page.start_address();
let target = T::alloc_frame().expect("failed to allocate frame");
self.flags.apply(pt.map(addr, target));
//let target = T::alloc_frame().expect("failed to allocate frame");
//self.flags.apply(pt.map(addr, target));
// for frame delayed allocation
{
let entry = pt.map(addr,0);
self.flags.apply(entry);
}
let entry = pt.get_entry(addr);
entry.set_present(false);
entry.update();
}
info!("finish map delayed!");
}
}
}
@ -190,8 +201,14 @@ impl MemoryArea {
for page in Page::range_of(self.start_addr, self.end_addr) {
let addr = page.start_address();
if self.phys_start_addr.is_none() {
let target = pt.get_entry(addr).target();
T::dealloc_frame(target);
if pt.get_entry(addr).present(){
let target = pt.get_entry(addr).target();
T::dealloc_frame(target);
}
else{
// set valid for pt.unmap function
pt.get_entry(addr).set_present(true);
}
}
pt.unmap(addr);
}
@ -204,6 +221,11 @@ impl MemoryArea {
pub fn get_end_addr(&self) -> VirtAddr{
self.end_addr
}
pub fn get_flags(&self) -> &MemoryAttr{
&self.flags
}
}
/// The attributes of the memory
@ -377,6 +399,7 @@ impl<T: InactivePageTable> MemorySet<T> {
pub fn get_page_table_mut(&mut self) -> &mut T{
&mut self.page_table
}
}
impl<T: InactivePageTable> Clone for MemorySet<T> {

View File

@ -147,6 +147,7 @@ impl<T: PageTable, M: SwapManager, S: Swapper> SwapExt<T, M, S> {
entry.update();
token
};
info!("swap in vaddr {:x?} at remove from swappable.", addr);
let data = page_table.get_page_slice_mut(addr);
swapper.swap_in(token, data).unwrap();
});
@ -172,6 +173,7 @@ impl<T: PageTable, M: SwapManager, S: Swapper> SwapExt<T, M, S> {
** the error if failed
*/
pub fn swap_out_any<T2: InactivePageTable>(&mut self) -> Result<PhysAddr, SwapError> {
info!("COME in to swap_out_any");
let victim: Option<Frame> = {
let Self {ref mut page_table, ref mut swap_manager, ref mut swapper} = self;
swap_manager.pop(page_table, swapper)

View File

@ -7,8 +7,9 @@ use ucore_memory::{*, paging::PageTable};
use ucore_memory::cow::CowExt;
pub use ucore_memory::memory_set::{MemoryArea, MemoryAttr, MemorySet as MemorySet_, Stack};
use ucore_memory::swap::*;
use process::processor;
use process::{processor, PROCESSOR};
use sync::{SpinNoIrqLock, SpinNoIrq, MutexGuard};
use ucore_memory::paging::Entry;
pub type MemorySet = MemorySet_<InactivePageTable0>;
@ -85,16 +86,48 @@ pub fn alloc_stack() -> Stack {
* Return true to continue, false to halt
*/
pub fn page_fault_handler(addr: usize) -> bool {
// Handle copy on write (not being used now)
// since some page fault for frame delayed allocating may occur in the building of the process, we can't use processor() to get the real memset for it
// unless we write a mmset manager for all the memory set, which is a little bit hard.
/*
info!("come in to page fault handler.");
{
info!("1");
unsafe{ PROCESSOR.try().unwrap().force_unlock();}
let mut temp_proc = processor();
info!("2");
let mmset = temp_proc.current_context_mut().get_memory_set_mut();
let target_area = mmset.find_area(addr);
// check whether the virtual address is valid
info!("checked the valid virtual address");
match target_area {
None => {
info!("invalid virtual address: {:x?}", addr);
return false;
},
Some(area)=>{
// Handle delayed frame allocate and copy on write (the second not being used now)
unsafe { ACTIVE_TABLE.force_unlock(); }
if active_table().page_fault_handler(addr, || alloc_frame().unwrap()) {
return true;
}
},
}
}
*/
// Handle delayed frame allocate and copy on write (the second not being used now)
unsafe { ACTIVE_TABLE.force_unlock(); }
if active_table().page_fault_handler(addr, || alloc_frame().unwrap()){
info!("general page fault handle successfully!");
return true;
}
// handle the swap in/out
info!("start handling swap in/out page fault");
unsafe { ACTIVE_TABLE_SWAP.force_unlock(); }
let mut temp_proc = processor();
let pt = temp_proc.current_context_mut().get_memory_set_mut().get_page_table_mut();
unsafe { ACTIVE_TABLE_SWAP.force_unlock(); }
if active_table_swap().page_fault_handler(pt as *mut InactivePageTable0, addr, || alloc_frame().unwrap()){
return true;
}

View File

@ -61,6 +61,7 @@ impl Context {
* the new user thread Context
*/
pub fn new_user(data: &[u8]) -> Self {
info!("come into new user");
// Parse elf
let elf = ElfFile::new(data).expect("failed to read elf");
let is32 = match elf.header.pt2 {
@ -70,6 +71,7 @@ impl Context {
assert_eq!(elf.header.pt2.type_().as_type(), header::Type::Executable, "ELF is not executable");
// User stack
info!("start building suer stack");
use consts::{USER_STACK_OFFSET, USER_STACK_SIZE, USER32_STACK_OFFSET};
let (user_stack_buttom, user_stack_top) = match is32 {
true => (USER32_STACK_OFFSET, USER32_STACK_OFFSET + USER_STACK_SIZE),
@ -77,13 +79,16 @@ impl Context {
};
// Make page table
info!("make page table!");
let mut memory_set = memory_set_from(&elf);
info!("start to push user stack to the mmset");
memory_set.push(MemoryArea::new(user_stack_buttom, user_stack_top, MemoryAttr::default().user(), "user_stack"));
trace!("{:#x?}", memory_set);
let entry_addr = elf.header.pt2.entry_point() as usize;
// Temporary switch to it, in order to copy data
info!("starting copy data.");
unsafe {
memory_set.with(|| {
for ph in elf.program_iter() {
@ -93,9 +98,13 @@ impl Context {
if file_size == 0 {
return;
}
info!("file virtaddr: {:x?}, file size: {:x?}", virt_addr, file_size);
use core::slice;
info!("starting copy!");
let target = unsafe { slice::from_raw_parts_mut(virt_addr as *mut u8, file_size) };
info!("target got!");
target.copy_from_slice(&data[offset..offset + file_size]);
info!("finish copy!");
}
if is32 {
unsafe {
@ -106,10 +115,10 @@ impl Context {
}
});
}
info!("ending copy data.");
//set the user Memory pages in the memory set swappable
memory_set_map_swappable(&mut memory_set);
//memory_set_map_swappable(&mut memory_set);
Context {
arch: unsafe {
@ -156,6 +165,7 @@ impl Context {
impl Drop for Context{
fn drop(&mut self){
/*
//set the user Memory pages in the memory set unswappable
let Self {ref mut arch, ref mut memory_set} = self;
let pt = {
@ -170,7 +180,7 @@ impl Drop for Context{
}
}
info!("Finishing setting pages unswappable");
*/
}
}
@ -189,6 +199,7 @@ impl Debug for Context {
* the new memory set
*/
fn memory_set_from<'a>(elf: &'a ElfFile<'a>) -> MemorySet {
info!("come in to memory_set_from");
let mut set = MemorySet::new();
for ph in elf.program_iter() {
if ph.get_type() != Ok(Type::Load) {
@ -198,6 +209,7 @@ fn memory_set_from<'a>(elf: &'a ElfFile<'a>) -> MemorySet {
ProgramHeader::Ph32(ph) => (ph.virtual_addr as usize, ph.mem_size as usize, ph.flags),
ProgramHeader::Ph64(ph) => (ph.virtual_addr as usize, ph.mem_size as usize, ph.flags),//???
};
info!("push!");
set.push(MemoryArea::new(virt_addr, virt_addr + mem_size, memory_attr_from(flags), ""));
}

View File

@ -12,8 +12,12 @@ pub fn timer() {
pub fn before_return() {
if let Some(processor) = PROCESSOR.try() {
processor.lock().schedule();
// try lock for delayed frame allocated to avoid deadlock
if processor.try_lock().is_some() {
processor.lock().schedule();
}
}
//info!("finish before return!");
}
/*