diff --git a/crate/memory/src/cow.rs b/crate/memory/src/cow.rs index 1325d331..3f132be0 100644 --- a/crate/memory/src/cow.rs +++ b/crate/memory/src/cow.rs @@ -103,31 +103,39 @@ impl DerefMut for CowExt { } /// A map contains reference count for shared frame +/// +/// It will lazily construct the `BTreeMap`, to avoid heap alloc when heap is unavailable. #[derive(Default)] -struct FrameRcMap(BTreeMap); +struct FrameRcMap(Option>); type Frame = usize; impl FrameRcMap { fn read_count(&mut self, frame: &Frame) -> u16 { - self.0.get(frame).unwrap_or(&(0, 0)).0 + self.map().get(frame).unwrap_or(&(0, 0)).0 } fn write_count(&mut self, frame: &Frame) -> u16 { - self.0.get(frame).unwrap_or(&(0, 0)).1 + self.map().get(frame).unwrap_or(&(0, 0)).1 } fn read_increase(&mut self, frame: &Frame) { - let (r, w) = self.0.get(&frame).unwrap_or(&(0, 0)).clone(); - self.0.insert(frame.clone(), (r + 1, w)); + let (r, w) = self.map().get(&frame).unwrap_or(&(0, 0)).clone(); + self.map().insert(frame.clone(), (r + 1, w)); } fn read_decrease(&mut self, frame: &Frame) { - self.0.get_mut(frame).unwrap().0 -= 1; + self.map().get_mut(frame).unwrap().0 -= 1; } fn write_increase(&mut self, frame: &Frame) { - let (r, w) = self.0.get(&frame).unwrap_or(&(0, 0)).clone(); - self.0.insert(frame.clone(), (r, w + 1)); + let (r, w) = self.map().get(&frame).unwrap_or(&(0, 0)).clone(); + self.map().insert(frame.clone(), (r, w + 1)); } fn write_decrease(&mut self, frame: &Frame) { - self.0.get_mut(frame).unwrap().1 -= 1; + self.map().get_mut(frame).unwrap().1 -= 1; + } + fn map(&mut self) -> &mut BTreeMap { + if self.0.is_none() { + self.0 = Some(BTreeMap::new()); + } + self.0.as_mut().unwrap() } } diff --git a/src/memory.rs b/src/memory.rs index 62eea2db..54735d55 100644 --- a/src/memory.rs +++ b/src/memory.rs @@ -4,7 +4,6 @@ use consts::MEMORY_OFFSET; use spin::{Mutex, MutexGuard}; use super::HEAP_ALLOCATOR; use ucore_memory::{*, paging::PageTable}; -#[cfg(target_arch = "x86_64")] use ucore_memory::cow::CowExt; pub use ucore_memory::memory_set::{MemoryArea, MemoryAttr, MemorySet as MemorySet_, Stack}; @@ -34,44 +33,24 @@ pub fn alloc_stack() -> Stack { Stack { top, bottom } } -#[cfg(target_arch = "x86_64")] lazy_static! { static ref ACTIVE_TABLE: Mutex> = Mutex::new(unsafe { CowExt::new(ActivePageTable::new()) }); } -#[cfg(target_arch = "riscv")] -lazy_static! { - static ref ACTIVE_TABLE: Mutex = Mutex::new(unsafe { - ActivePageTable::new() - }); -} - /// The only way to get active page table -#[cfg(target_arch = "x86_64")] pub fn active_table() -> MutexGuard<'static, CowExt> { ACTIVE_TABLE.lock() } -#[cfg(target_arch = "riscv")] -pub fn active_table() -> MutexGuard<'static, ActivePageTable> { - ACTIVE_TABLE.lock() -} - // Return true to continue, false to halt -#[cfg(target_arch = "x86_64")] pub fn page_fault_handler(addr: usize) -> bool { // Handle copy on write unsafe { ACTIVE_TABLE.force_unlock(); } active_table().page_fault_handler(addr, || alloc_frame().unwrap()) } -#[cfg(target_arch = "riscv")] -pub fn page_fault_handler(addr: usize) -> bool { - false -} - pub fn init_heap() { use consts::{KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE}; unsafe { HEAP_ALLOCATOR.lock().init(KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE); }