1
0
mirror of https://github.com/rcore-os/rCore.git synced 2024-11-25 17:33:28 +04:00

refactor to linear mapping for x86_64

This commit is contained in:
WangRunji 2019-05-11 01:06:44 +08:00
parent 8149793b9a
commit cd22273ef9
33 changed files with 269 additions and 467 deletions

View File

@ -11,7 +11,7 @@ pub mod cow;
pub mod memory_set;
pub mod no_mmu;
pub mod paging;
pub mod swap;
//pub mod swap;
pub use crate::addr::*;

View File

@ -25,15 +25,13 @@ impl<T: FrameAllocator> MemoryHandler for ByFrame<T> {
fn clone_map(
&self,
pt: &mut PageTable,
with: &Fn(&mut FnMut()),
src_pt: &mut PageTable,
addr: VirtAddr,
attr: &MemoryAttr,
) {
let data = Vec::from(pt.get_page_slice_mut(addr));
with(&mut || {
self.map(pt, addr, attr);
pt.get_page_slice_mut(addr).copy_from_slice(&data);
});
self.map(pt, addr, attr);
let data = src_pt.get_page_slice_mut(addr);
pt.get_page_slice_mut(addr).copy_from_slice(data);
}
fn handle_page_fault(&self, _pt: &mut PageTable, _addr: VirtAddr) -> bool {

View File

@ -30,24 +30,21 @@ impl<T: FrameAllocator> MemoryHandler for Delay<T> {
fn clone_map(
&self,
pt: &mut PageTable,
with: &Fn(&mut FnMut()),
src_pt: &mut PageTable,
addr: VirtAddr,
attr: &MemoryAttr,
) {
let entry = pt.get_entry(addr).expect("failed to get entry");
let entry = src_pt.get_entry(addr).expect("failed to get entry");
if entry.present() {
// eager map and copy data
let data = Vec::from(pt.get_page_slice_mut(addr));
with(&mut || {
let target = self.allocator.alloc().expect("failed to alloc frame");
let target_data = pt.get_page_slice_mut(addr);
let entry = pt.map(addr, target);
target_data.copy_from_slice(&data);
attr.apply(entry);
});
let data = src_pt.get_page_slice_mut(addr);
let target = self.allocator.alloc().expect("failed to alloc frame");
let entry = pt.map(addr, target);
attr.apply(entry);
pt.get_page_slice_mut(addr).copy_from_slice(data);
} else {
// delay map
with(&mut || self.map(pt, addr, attr));
self.map(pt, addr, attr);
}
}

View File

@ -39,24 +39,21 @@ impl<F: Read, T: FrameAllocator> MemoryHandler for File<F, T> {
fn clone_map(
&self,
pt: &mut PageTable,
with: &Fn(&mut FnMut()),
src_pt: &mut PageTable,
addr: usize,
attr: &MemoryAttr,
) {
let entry = pt.get_entry(addr).expect("failed to get entry");
let entry = src_pt.get_entry(addr).expect("failed to get entry");
if entry.present() && !attr.readonly {
// eager map and copy data
let data = Vec::from(pt.get_page_slice_mut(addr));
with(&mut || {
let target = self.allocator.alloc().expect("failed to alloc frame");
let target_data = pt.get_page_slice_mut(addr);
let entry = pt.map(addr, target);
target_data.copy_from_slice(&data);
attr.apply(entry);
});
let data = src_pt.get_page_slice_mut(addr);
let target = self.allocator.alloc().expect("failed to alloc frame");
let entry = pt.map(addr, target);
attr.apply(entry);
pt.get_page_slice_mut(addr).copy_from_slice(data);
} else {
// delay map
with(&mut || self.map(pt, addr, attr));
self.map(pt, addr, attr);
}
}
@ -69,16 +66,9 @@ impl<F: Read, T: FrameAllocator> MemoryHandler for File<F, T> {
let frame = self.allocator.alloc().expect("failed to alloc frame");
entry.set_target(frame);
entry.set_present(true);
let writable = entry.writable();
entry.set_writable(true);
entry.update();
self.fill_data(pt, addr);
let entry = pt.get_entry(addr).expect("failed to get entry");
entry.set_writable(writable);
entry.update();
true
}
}

View File

@ -23,11 +23,11 @@ impl MemoryHandler for Linear {
fn clone_map(
&self,
pt: &mut PageTable,
with: &Fn(&mut FnMut()),
_src_pt: &mut PageTable,
addr: VirtAddr,
attr: &MemoryAttr,
) {
with(&mut || self.map(pt, addr, attr));
self.map(pt, addr, attr);
}
fn handle_page_fault(&self, _pt: &mut PageTable, _addr: VirtAddr) -> bool {

View File

@ -18,7 +18,7 @@ pub trait MemoryHandler: Debug + Send + Sync + 'static {
fn clone_map(
&self,
pt: &mut PageTable,
with: &Fn(&mut FnMut()),
src_pt: &mut PageTable,
addr: VirtAddr,
attr: &MemoryAttr,
);

View File

@ -1,5 +1,4 @@
//! memory set, area
//! and the inactive page table
//! Memory management structures
use alloc::{boxed::Box, string::String, vec::Vec};
use core::fmt::{Debug, Error, Formatter};
@ -13,8 +12,7 @@ use self::handler::MemoryHandler;
pub mod handler;
/// a continuous memory space when the same attribute
/// like `vma_struct` in ucore
/// A continuous memory space when the same attribute
#[derive(Debug, Clone)]
pub struct MemoryArea {
start_addr: VirtAddr,
@ -25,31 +23,7 @@ pub struct MemoryArea {
}
impl MemoryArea {
/*
** @brief get slice of the content in the memory area
** @retval &[u8] the slice of the content in the memory area
*/
pub unsafe fn as_slice(&self) -> &[u8] {
::core::slice::from_raw_parts(
self.start_addr as *const u8,
self.end_addr - self.start_addr,
)
}
/*
** @brief get mutable slice of the content in the memory area
** @retval &mut[u8] the mutable slice of the content in the memory area
*/
pub unsafe fn as_slice_mut(&self) -> &mut [u8] {
::core::slice::from_raw_parts_mut(
self.start_addr as *mut u8,
self.end_addr - self.start_addr,
)
}
/*
** @brief test whether a virtual address is in the memory area
** @param addr: VirtAddr the virtual address to test
** @retval bool whether the virtual address is in the memory area
*/
/// Test whether a virtual address is in the memory area
pub fn contains(&self, addr: VirtAddr) -> bool {
addr >= self.start_addr && addr < self.end_addr
}
@ -121,42 +95,22 @@ pub struct MemoryAttr {
}
impl MemoryAttr {
/*
** @brief set the memory attribute's user bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn user(mut self) -> Self {
self.user = true;
self
}
/*
** @brief set the memory attribute's readonly bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn readonly(mut self) -> Self {
self.readonly = true;
self
}
/*
** @brief unset the memory attribute's readonly bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn writable(mut self) -> Self {
self.readonly = false;
self
}
/*
** @brief set the memory attribute's execute bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn execute(mut self) -> Self {
self.execute = true;
self
}
/*
** @brief set the MMIO type
** @retval MemoryAttr the memory attribute itself
*/
pub fn mmio(mut self, value: u8) -> Self {
self.mmio = value;
self
@ -172,26 +126,23 @@ impl MemoryAttr {
}
}
/// set of memory space with multiple memory area with associated page table and stack space
/// like `mm_struct` in ucore
/// A set of memory space with multiple memory areas with associated page table
/// NOTE: Don't remove align(64), or you will fail to run MIPS.
#[repr(align(64))]
pub struct MemorySet<T: InactivePageTable> {
pub struct MemorySet<T: PageTableExt> {
areas: Vec<MemoryArea>,
page_table: T,
}
impl<T: InactivePageTable> MemorySet<T> {
/*
** @brief create a memory set
** @retval MemorySet<T> the memory set created
*/
impl<T: PageTableExt> MemorySet<T> {
/// Create a new `MemorySet`
pub fn new() -> Self {
MemorySet {
areas: Vec::new(),
page_table: T::new(),
}
}
/// Create a new `MemorySet` for kernel remap
pub fn new_bare() -> Self {
MemorySet {
areas: Vec::new(),
@ -284,11 +235,7 @@ impl<T: InactivePageTable> MemorySet<T> {
.find(|area| area.is_overlap_with(start_addr, end_addr))
.is_none()
}
/*
** @brief add the memory area to the memory set
** @param area: MemoryArea the memory area to add
** @retval none
*/
/// Add an area to this set
pub fn push(
&mut self,
start_addr: VirtAddr,
@ -309,7 +256,7 @@ impl<T: InactivePageTable> MemorySet<T> {
handler: Box::new(handler),
name,
};
self.page_table.edit(|pt| area.map(pt));
area.map(&mut self.page_table);
// keep order by start address
let idx = self
.areas
@ -321,28 +268,21 @@ impl<T: InactivePageTable> MemorySet<T> {
self.areas.insert(idx, area);
}
/*
** @brief remove the memory area from the memory set
** @param area: MemoryArea the memory area to remove
** @retval none
*/
/// Remove the area `[start_addr, end_addr)` from `MemorySet`
pub fn pop(&mut self, start_addr: VirtAddr, end_addr: VirtAddr) {
assert!(start_addr <= end_addr, "invalid memory area");
for i in 0..self.areas.len() {
if self.areas[i].start_addr == start_addr && self.areas[i].end_addr == end_addr {
let area = self.areas.remove(i);
self.page_table.edit(|pt| area.unmap(pt));
area.unmap(&mut self.page_table);
return;
}
}
panic!("no memory area found");
}
/*
** @brief remove the memory area from the memory set and split existed ones when necessary
** @param area: MemoryArea the memory area to remove
** @retval none
*/
/// Remove the area `[start_addr, end_addr)` from `MemorySet`
/// and split existed ones when necessary.
pub fn pop_with_split(&mut self, start_addr: VirtAddr, end_addr: VirtAddr) {
assert!(start_addr <= end_addr, "invalid memory area");
let mut i = 0;
@ -351,7 +291,7 @@ impl<T: InactivePageTable> MemorySet<T> {
if self.areas[i].start_addr >= start_addr && self.areas[i].end_addr <= end_addr {
// subset
let area = self.areas.remove(i);
self.page_table.edit(|pt| area.unmap(pt));
area.unmap(&mut self.page_table);
i -= 1;
} else if self.areas[i].start_addr >= start_addr
&& self.areas[i].start_addr < end_addr
@ -365,7 +305,7 @@ impl<T: InactivePageTable> MemorySet<T> {
handler: area.handler.box_clone(),
name: area.name,
};
self.page_table.edit(|pt| dead_area.unmap(pt));
dead_area.unmap(&mut self.page_table);
let new_area = MemoryArea {
start_addr: end_addr,
end_addr: area.end_addr,
@ -379,13 +319,13 @@ impl<T: InactivePageTable> MemorySet<T> {
// postfix
let area = self.areas.remove(i);
let dead_area = MemoryArea {
start_addr: start_addr,
start_addr,
end_addr: area.end_addr,
attr: area.attr,
handler: area.handler.box_clone(),
name: area.name,
};
self.page_table.edit(|pt| dead_area.unmap(pt));
dead_area.unmap(&mut self.page_table);
let new_area = MemoryArea {
start_addr: area.start_addr,
end_addr: start_addr,
@ -398,13 +338,13 @@ impl<T: InactivePageTable> MemorySet<T> {
// superset
let area = self.areas.remove(i);
let dead_area = MemoryArea {
start_addr: start_addr,
end_addr: end_addr,
start_addr,
end_addr,
attr: area.attr,
handler: area.handler.box_clone(),
name: area.name,
};
self.page_table.edit(|pt| dead_area.unmap(pt));
dead_area.unmap(&mut self.page_table);
let new_area_left = MemoryArea {
start_addr: area.start_addr,
end_addr: start_addr,
@ -428,74 +368,50 @@ impl<T: InactivePageTable> MemorySet<T> {
}
}
/*
** @brief get iterator of the memory area
** @retval impl Iterator<Item=&MemoryArea>
** the memory area iterator
*/
/// Get iterator of areas
pub fn iter(&self) -> impl Iterator<Item = &MemoryArea> {
self.areas.iter()
}
pub fn edit(&mut self, f: impl FnOnce(&mut T::Active)) {
self.page_table.edit(f);
}
/*
** @brief execute function with the associated page table
** @param f: impl FnOnce() the function to be executed
** @retval none
*/
/// Execute function `f` with the associated page table
pub unsafe fn with(&self, f: impl FnOnce()) {
self.page_table.with(f);
}
/*
** @brief activate the associated page table
** @retval none
*/
/// Activate the associated page table
pub unsafe fn activate(&self) {
self.page_table.activate();
}
/*
** @brief get the token of the associated page table
** @retval usize the token of the inactive page table
*/
/// Get the token of the associated page table
pub fn token(&self) -> usize {
self.page_table.token()
}
/*
** @brief clear the memory set
** @retval none
*/
/// Clear and unmap all areas
pub fn clear(&mut self) {
let Self {
ref mut page_table,
ref mut areas,
..
} = self;
page_table.edit(|pt| {
for area in areas.iter() {
area.unmap(pt);
}
});
for area in areas.iter() {
area.unmap(page_table);
}
areas.clear();
}
/// Get physical address of the page of given virtual `addr`
pub fn translate(&mut self, addr: VirtAddr) -> Option<PhysAddr> {
self.page_table.edit(|pt| {
pt.get_entry(addr).and_then(|entry| {
if entry.user() {
Some(entry.target())
} else {
None
}
})
self.page_table.get_entry(addr).and_then(|entry| {
if entry.user() {
Some(entry.target())
} else {
None
}
})
}
/*
** @brief get the mutable reference for the inactive page table
** @retval: &mut T the mutable reference of the inactive page table
*/
/// Get the reference of inner page table
pub fn get_page_table_mut(&mut self) -> &mut T {
&mut self.page_table
}
@ -503,32 +419,28 @@ impl<T: InactivePageTable> MemorySet<T> {
pub fn handle_page_fault(&mut self, addr: VirtAddr) -> bool {
let area = self.areas.iter().find(|area| area.contains(addr));
match area {
Some(area) => self
.page_table
.edit(|pt| area.handler.handle_page_fault(pt, addr)),
Some(area) => area.handler.handle_page_fault(&mut self.page_table, addr),
None => false,
}
}
pub fn clone(&mut self) -> Self {
let new_page_table = T::new();
let mut new_page_table = T::new();
let Self {
ref mut page_table,
ref areas,
..
} = self;
page_table.edit(|pt| {
for area in areas.iter() {
for page in Page::range_of(area.start_addr, area.end_addr) {
area.handler.clone_map(
pt,
&|f| unsafe { new_page_table.with(f) },
page.start_address(),
&area.attr,
);
}
for area in areas.iter() {
for page in Page::range_of(area.start_addr, area.end_addr) {
area.handler.clone_map(
&mut new_page_table,
page_table,
page.start_address(),
&area.attr,
);
}
});
}
MemorySet {
areas: areas.clone(),
page_table: new_page_table,
@ -536,13 +448,13 @@ impl<T: InactivePageTable> MemorySet<T> {
}
}
impl<T: InactivePageTable> Drop for MemorySet<T> {
impl<T: PageTableExt> Drop for MemorySet<T> {
fn drop(&mut self) {
self.clear();
}
}
impl<T: InactivePageTable> Debug for MemorySet<T> {
impl<T: PageTableExt> Debug for MemorySet<T> {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
f.debug_list().entries(self.areas.iter()).finish()
}

View File

@ -1,23 +0,0 @@
//! Helper functions
use super::*;
pub trait PageTableExt: PageTable {
// Take some special care here.
// TEMP_PAGE_ADDR mapping might be overwritten in the `f` below.
// So this should be really high in kernel space when necessary.
const TEMP_PAGE_ADDR: VirtAddr = 0xcafeb000;
fn with_temporary_map<T, D>(
&mut self,
target: PhysAddr,
f: impl FnOnce(&mut Self, &mut D) -> T,
) -> T {
self.map(Self::TEMP_PAGE_ADDR, target);
let data =
unsafe { &mut *(self.get_page_slice_mut(Self::TEMP_PAGE_ADDR).as_ptr() as *mut D) };
let ret = f(self, data);
self.unmap(Self::TEMP_PAGE_ADDR);
ret
}
}

View File

@ -2,12 +2,10 @@
//!
//! Implemented for every architecture, used by OS.
pub use self::ext::*;
#[cfg(test)]
pub use self::mock_page_table::MockPageTable;
use super::*;
mod ext;
#[cfg(test)]
mod mock_page_table;
@ -26,31 +24,18 @@ pub trait PageTable {
fn get_entry(&mut self, addr: VirtAddr) -> Option<&mut Entry>;
/// Get a mutable reference of the content of a page of virtual address `addr`
/// Used for testing with mock
fn get_page_slice_mut<'a>(&mut self, addr: VirtAddr) -> &'a mut [u8] {
unsafe { core::slice::from_raw_parts_mut((addr & !(PAGE_SIZE - 1)) as *mut u8, PAGE_SIZE) }
}
fn get_page_slice_mut<'a>(&mut self, addr: VirtAddr) -> &'a mut [u8];
/// Read data from virtual address `addr`
/// Used for testing with mock
fn read(&mut self, addr: VirtAddr) -> u8 {
unsafe { (addr as *const u8).read() }
fn read(&mut self, _addr: VirtAddr) -> u8 {
unimplemented!()
}
/// Write data to virtual address `addr`
/// Used for testing with mock
fn write(&mut self, addr: VirtAddr, data: u8) {
unsafe { (addr as *mut u8).write(data) }
}
/// When `vaddr` is not mapped, map it to `paddr`.
fn map_if_not_exists(&mut self, vaddr: VirtAddr, paddr: usize) -> bool {
if let Some(entry) = self.get_entry(vaddr) {
if entry.present() {
return false;
}
}
self.map(vaddr, paddr);
true
fn write(&mut self, _addr: VirtAddr, _data: u8) {
unimplemented!()
}
}
@ -99,13 +84,8 @@ pub trait Entry {
fn set_mmio(&mut self, value: u8);
}
/// An inactive page table
/// Note: InactivePageTable is not a PageTable
/// but it can be activated and "become" a PageTable
pub trait InactivePageTable: Sized {
/// the active version of page table
type Active: PageTable;
/// Extra methods of `PageTable` for non-trait-object usage
pub trait PageTableExt: PageTable + Sized {
/// Create a new page table with kernel memory mapped
fn new() -> Self {
let mut pt = Self::new_bare();
@ -125,10 +105,6 @@ pub trait InactivePageTable: Sized {
fn active_token() -> usize;
fn flush_tlb();
/// Make this page table editable
/// Set the recursive entry of current active page table to this
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T;
/// Activate this page table
unsafe fn activate(&self) {
let old_token = Self::active_token();

28
kernel/Cargo.lock generated
View File

@ -78,7 +78,7 @@ source = "git+https://github.com/myrrlyn/bitvec.git#8ab20a3e33fe068fc3a4a05eda12
[[package]]
name = "bootloader"
version = "0.4.0"
source = "git+https://github.com/rcore-os/bootloader#18e4fec0d82e8a5571abceb69d1d11fc0edccba1"
source = "git+https://github.com/rcore-os/bootloader?branch=linear#cc33d7d2d2d33f5adcbd0f596964ba99127b51af"
dependencies = [
"apic 0.1.0 (git+https://github.com/rcore-os/apic-rs)",
"fixedvec 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
@ -101,6 +101,11 @@ name = "byteorder"
version = "1.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "cast"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "cc"
version = "1.0.31"
@ -354,7 +359,7 @@ dependencies = [
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"bitmap-allocator 0.1.0 (git+https://github.com/rcore-os/bitmap-allocator)",
"bitvec 0.11.0 (git+https://github.com/myrrlyn/bitvec.git)",
"bootloader 0.4.0 (git+https://github.com/rcore-os/bootloader)",
"bootloader 0.4.0 (git+https://github.com/rcore-os/bootloader?branch=linear)",
"buddy_system_allocator 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"cc 1.0.31 (registry+https://github.com/rust-lang/crates.io-index)",
"console-traits 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -377,7 +382,7 @@ dependencies = [
"spin 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"uart_16550 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"volatile 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"x86_64 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)",
"x86_64 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
"xmas-elf 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -626,6 +631,19 @@ dependencies = [
"ux 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "x86_64"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"array-init 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"cast 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"raw-cpuid 6.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ux 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "xmas-elf"
version = "0.6.2"
@ -650,9 +668,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum bitmap-allocator 0.1.0 (git+https://github.com/rcore-os/bitmap-allocator)" = "<none>"
"checksum bitvec 0.11.0 (git+https://github.com/myrrlyn/bitvec.git)" = "<none>"
"checksum bitvec 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cfadef5c4e2c2e64067b9ecc061179f12ac7ec65ba613b1f60f3972bbada1f5b"
"checksum bootloader 0.4.0 (git+https://github.com/rcore-os/bootloader)" = "<none>"
"checksum bootloader 0.4.0 (git+https://github.com/rcore-os/bootloader?branch=linear)" = "<none>"
"checksum buddy_system_allocator 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "59da15ef556589ee78370281d75b67f2d69ed26465ec0e0f3961e2021502426f"
"checksum byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a019b10a2a7cdeb292db131fc8113e57ea2a908f6e7894b0c3c671893b65dbeb"
"checksum cast 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "926013f2860c46252efceabb19f4a6b308197505082c609025aa6706c011d427"
"checksum cc 1.0.31 (registry+https://github.com/rust-lang/crates.io-index)" = "c9ce8bb087aacff865633f0bd5aeaed910fe2fe55b55f4739527f2e023a2e53d"
"checksum cfg-if 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "11d43355396e872eefb45ce6342e4374ed7bc2b3a502d1b28e36d6e23c05d1f4"
"checksum console-traits 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f711b3d1d5c3f7ae7d6428901c0f3e5d5f5c800fcfac86bf0252e96373a2cec6"
@ -717,5 +736,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum x86 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "841e1ca5a87068718a2a26f2473c6f93cf3b8119f9778fa0ae4b39b664d9e66a"
"checksum x86_64 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "f9258d7e2dd25008d69e8c9e9ee37865887a5e1e3d06a62f1cb3f6c209e6f177"
"checksum x86_64 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1d0a8201f52d2c7b373c7243dcdfb27c0dd5012f221ef6a126f507ee82005204"
"checksum x86_64 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d69bf2d256c74df90fcc68aaf99862dd205310609e9d56247a5c82ead2f28a93"
"checksum xmas-elf 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "22678df5df766e8d1e5d609da69f0c3132d794edf6ab5e75e7abcd2270d4cf58"
"checksum zero 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "5f1bc8a6b2005884962297587045002d8cfb8dcec9db332f4ca216ddc5de82c5"

View File

@ -70,9 +70,9 @@ rcore-fs = { git = "https://github.com/rcore-os/rcore-fs" }
rcore-fs-sfs = { git = "https://github.com/rcore-os/rcore-fs" }
[target.'cfg(target_arch = "x86_64")'.dependencies]
bootloader = { git = "https://github.com/rcore-os/bootloader" }
bootloader = { git = "https://github.com/rcore-os/bootloader", branch = "linear" }
apic = { git = "https://github.com/rcore-os/apic-rs" }
x86_64 = "0.5"
x86_64 = "0.6"
raw-cpuid = "6.0"
uart_16550 = "0.2"
pc-keyboard = "0.5"

View File

@ -1,5 +1,5 @@
// Depends on kernel
use crate::memory::{active_table, alloc_frame, dealloc_frame};
use crate::memory::{alloc_frame, dealloc_frame};
use log::*;
use mips::addr::*;
use mips::paging::{FrameAllocator, FrameDeallocator};

View File

@ -1,6 +1,4 @@
use super::consts::KERNEL_OFFSET;
use crate::memory::active_table;
use rcore_memory::paging::PageTable;
/// Mask all external interrupt except serial.
pub unsafe fn init_external_interrupt() {

View File

@ -1,6 +1,7 @@
pub const MEMORY_OFFSET: usize = 0;
pub const KERNEL_OFFSET: usize = 0xffffff00_00000000;
pub const KERNEL_HEAP_SIZE: usize = 8 * 1024 * 1024; // 8 MB
pub const PHYSICAL_MEMORY_OFFSET: usize = 0xfffffc00_00000000;
pub const USER_STACK_OFFSET: usize = 0x00008000_00000000 - USER_STACK_SIZE;
pub const USER_STACK_SIZE: usize = 8 * 1024 * 1024; // 8 MB, the default config of Linux

View File

@ -1,3 +1,4 @@
use crate::memory::phys_to_virt;
use apic::{LocalApic, XApic};
use raw_cpuid::CpuId;
use x86_64::registers::control::{Cr0, Cr0Flags};
@ -21,12 +22,12 @@ pub fn id() -> usize {
}
pub fn send_ipi(cpu_id: usize) {
let mut lapic = unsafe { XApic::new(0xffffff00_fee00000) };
let mut lapic = unsafe { XApic::new(phys_to_virt(0xfee00000)) };
lapic.send_ipi(cpu_id as u8, 0x30); // TODO: Find a IPI trap num
}
pub fn init() {
let mut lapic = unsafe { XApic::new(0xffffff00_fee00000) };
let mut lapic = unsafe { XApic::new(phys_to_virt(0xfee00000)) };
lapic.cpu_init();
// enable FPU, the manual Volume 3 Chapter 13

View File

@ -7,6 +7,7 @@ pub fn init() {
use crate::arch::interrupt::consts;
use crate::arch::interrupt::enable_irq;
enable_irq(consts::Keyboard);
info!("keyboard: init end");
}
/// Receive character from keyboard

View File

@ -12,6 +12,7 @@ pub fn init() {
COM2.lock().init();
enable_irq(consts::COM1);
enable_irq(consts::COM2);
info!("serial: init end");
}
pub trait SerialRead {

View File

@ -6,7 +6,7 @@ use spin::Mutex;
use volatile::Volatile;
use x86_64::instructions::port::Port;
use crate::consts::KERNEL_OFFSET;
use crate::memory::phys_to_virt;
use crate::util::color::ConsoleColor;
use crate::util::escape_parser::{EscapeParser, CSI};
@ -100,8 +100,7 @@ impl VgaBuffer {
lazy_static! {
pub static ref VGA_WRITER: Mutex<VgaWriter> = Mutex::new(
// VGA virtual address is specified at bootloader
VgaWriter::new(unsafe{ &mut *((KERNEL_OFFSET + 0xf0000000) as *mut VgaBuffer) })
VgaWriter::new(unsafe{ &mut *((phys_to_virt(0xb8000)) as *mut VgaBuffer) })
);
}

View File

@ -7,6 +7,7 @@ pub use self::handler::*;
pub use self::trapframe::*;
use crate::consts::KERNEL_OFFSET;
use apic::*;
use crate::memory::phys_to_virt;
#[inline(always)]
pub unsafe fn enable() {
@ -39,12 +40,12 @@ pub fn no_interrupt(f: impl FnOnce()) {
#[inline(always)]
pub fn enable_irq(irq: u8) {
let mut ioapic = unsafe { IoApic::new(KERNEL_OFFSET + IOAPIC_ADDR as usize) };
let mut ioapic = unsafe { IoApic::new(phys_to_virt(IOAPIC_ADDR as usize)) };
ioapic.enable(irq, 0);
}
#[inline(always)]
pub fn ack(_irq: u8) {
let mut lapic = unsafe { XApic::new(KERNEL_OFFSET + LAPIC_ADDR) };
let mut lapic = unsafe { XApic::new(phys_to_virt(LAPIC_ADDR)) };
lapic.eoi();
}

View File

@ -1,15 +1,10 @@
use crate::consts::KERNEL_OFFSET;
use bitmap_allocator::BitAlloc;
// Depends on kernel
use super::{BootInfo, MemoryRegionType};
use crate::memory::{active_table, init_heap, FRAME_ALLOCATOR};
use log::*;
use crate::memory::{init_heap, FRAME_ALLOCATOR};
use bitmap_allocator::BitAlloc;
use rcore_memory::paging::*;
use rcore_memory::PAGE_SIZE;
pub fn init(boot_info: &BootInfo) {
init_frame_allocator(boot_info);
init_device_vm_map();
init_heap();
info!("memory: init end");
}
@ -25,15 +20,3 @@ fn init_frame_allocator(boot_info: &BootInfo) {
}
}
}
fn init_device_vm_map() {
let mut page_table = active_table();
// IOAPIC
page_table
.map(KERNEL_OFFSET + 0xfec00000, 0xfec00000)
.update();
// LocalAPIC
page_table
.map(KERNEL_OFFSET + 0xfee00000, 0xfee00000)
.update();
}

View File

@ -25,16 +25,20 @@ pub extern "C" fn _start(boot_info: &'static BootInfo) -> ! {
println!("Hello world! from CPU {}!", cpu_id);
if cpu_id != 0 {
while !AP_CAN_INIT.load(Ordering::Relaxed) {}
while !AP_CAN_INIT.load(Ordering::Relaxed) {
spin_loop_hint();
}
other_start();
}
// First init log mod, so that we can print log info.
crate::logging::init();
info!("{:#?}", boot_info);
info!("{:#x?}", boot_info);
assert_eq!(boot_info.physical_memory_offset as usize, consts::PHYSICAL_MEMORY_OFFSET);
// Init trap handling.
idt::init();
// setup fast syscall in x86_64
interrupt::fast_syscall::init();
// Init physical memory management and heap.
@ -60,14 +64,14 @@ pub extern "C" fn _start(boot_info: &'static BootInfo) -> ! {
/// The entry point for other processors
fn other_start() -> ! {
// Init trap handling.
// init trap handling.
idt::init();
// init gdt
gdt::init();
// init local apic
cpu::init();
// setup fast syscall in xv6-64
// setup fast syscall in x86_64
interrupt::fast_syscall::init();
//call the first main function in kernel.
// call the first main function in kernel.
crate::kmain();
}

View File

@ -1,6 +1,5 @@
// Depends on kernel
use crate::consts::KERNEL_OFFSET;
use crate::memory::{active_table, alloc_frame, dealloc_frame};
use crate::memory::{alloc_frame, dealloc_frame, phys_to_virt};
use core::sync::atomic::Ordering;
use log::*;
use rcore_memory::paging::*;
@ -8,12 +7,12 @@ use x86_64::instructions::tlb;
use x86_64::registers::control::{Cr3, Cr3Flags};
use x86_64::structures::paging::{
frame::PhysFrame as Frame,
mapper::{Mapper, RecursivePageTable},
mapper::{MappedPageTable, Mapper},
page::{Page, PageRange, Size4KiB},
page_table::{PageTable as x86PageTable, PageTableEntry, PageTableFlags as EF},
FrameAllocator, FrameDeallocator,
};
use x86_64::{VirtAddr, PhysAddr};
use x86_64::{PhysAddr, VirtAddr};
pub trait PageExt {
fn of_addr(address: usize) -> Self;
@ -40,11 +39,15 @@ impl FrameExt for Frame {
}
}
pub struct ActivePageTable(RecursivePageTable<'static>);
pub struct PageTableImpl(
MappedPageTable<'static, fn(Frame) -> *mut x86PageTable>,
PageEntry,
Frame,
);
pub struct PageEntry(PageTableEntry);
pub struct PageEntry(&'static mut PageTableEntry, Page, Frame);
impl PageTable for ActivePageTable {
impl PageTable for PageTableImpl {
fn map(&mut self, addr: usize, target: usize) -> &mut Entry {
let flags = EF::PRESENT | EF::WRITABLE | EF::NO_EXECUTE;
unsafe {
@ -59,7 +62,7 @@ impl PageTable for ActivePageTable {
.flush();
}
flush_tlb_all(addr);
unsafe { &mut *(get_entry_ptr(addr, 1)) }
self.get_entry(addr).unwrap()
}
fn unmap(&mut self, addr: usize) {
@ -68,33 +71,39 @@ impl PageTable for ActivePageTable {
}
fn get_entry(&mut self, addr: usize) -> Option<&mut Entry> {
for level in 0..3 {
let entry = get_entry_ptr(addr, 4 - level);
if unsafe { !(*entry).present() } {
let mut page_table = frame_to_page_table(self.2);
for level in 0..4 {
let index = (addr >> (12 + (3 - level) * 9)) & 0o777;
let entry = unsafe { &mut (&mut *page_table)[index] };
if level == 3 {
let page = Page::of_addr(addr);
self.1 = PageEntry(entry, page, self.2);
return Some(&mut self.1 as &mut Entry);
}
if !entry.flags().contains(EF::PRESENT) {
return None;
}
page_table = frame_to_page_table(entry.frame().unwrap());
}
unsafe { Some(&mut *(get_entry_ptr(addr, 1))) }
unreachable!();
}
fn get_page_slice_mut<'a>(&mut self, addr: usize) -> &'a mut [u8] {
let frame = self.0.translate_page(Page::of_addr(addr)).unwrap();
let vaddr = phys_to_virt(frame.start_address().as_u64() as usize);
unsafe { core::slice::from_raw_parts_mut(vaddr as *mut u8, 0x1000) }
}
}
impl PageTableExt for ActivePageTable {
// FIXME: the default value 0xcafebe000 is so low that allocation might overwrite it sometimes.
// However, putting it to KERNEL_OFFSET | 0xcafeb000 has unintended effects.
// Someone needs to reconsider this and use an ultimate solution.
// const TEMP_PAGE_ADDR: usize = KERNEL_OFFSET | 0xcafeb000;
}
impl ActivePageTable {
pub unsafe fn new() -> Self {
ActivePageTable(RecursivePageTable::new(&mut *(0xffffffff_fffff000 as *mut _)).unwrap())
}
fn frame_to_page_table(frame: Frame) -> *mut x86PageTable {
let vaddr = phys_to_virt(frame.start_address().as_u64() as usize);
vaddr as *mut x86PageTable
}
impl Entry for PageEntry {
fn update(&mut self) {
use x86_64::{instructions::tlb::flush, VirtAddr};
let addr = VirtAddr::new_unchecked((self as *const _ as u64) << 9);
let addr = self.1.start_address();
flush(addr);
flush_tlb_all(addr.as_u64() as usize);
}
@ -153,14 +162,18 @@ impl Entry for PageEntry {
self.0.flags().contains(EF::USER_ACCESSIBLE)
}
fn set_user(&mut self, value: bool) {
self.as_flags().set(EF::USER_ACCESSIBLE, value);
// x86_64 page table struct do not implement setting USER bit
if value {
let mut addr = self as *const _ as usize;
for _ in 0..3 {
// Upper level entry
addr = ((addr >> 9) & 0o777_777_777_7770) | 0xffffff80_00000000;
// set USER_ACCESSIBLE
unsafe { (*(addr as *mut EF)).insert(EF::USER_ACCESSIBLE) };
let mut page_table = frame_to_page_table(self.2);
for level in 0..4 {
let index =
(self.1.start_address().as_u64() as usize >> (12 + (3 - level) * 9)) & 0o777;
let entry = unsafe { &mut (&mut *page_table)[index] };
entry.set_flags(entry.flags() | EF::USER_ACCESSIBLE);
if level == 3 {
return;
}
page_table = frame_to_page_table(entry.frame().unwrap());
}
}
}
@ -176,51 +189,57 @@ impl Entry for PageEntry {
fn set_mmio(&mut self, _value: u8) {}
}
fn get_entry_ptr(addr: usize, level: u8) -> *mut PageEntry {
debug_assert!(level <= 4);
let entry_addr = ((addr >> (level * 9)) & !0x7) | !((1 << (48 - level * 9)) - 1);
entry_addr as *mut PageEntry
}
impl PageEntry {
fn as_flags(&mut self) -> &mut EF {
unsafe { &mut *(self as *mut _ as *mut EF) }
unsafe { &mut *(self.0 as *mut _ as *mut EF) }
}
}
#[derive(Debug)]
pub struct InactivePageTable0 {
p4_frame: Frame,
impl PageTableImpl {
/// Unsafely get the current active page table.
/// WARN: You MUST call `core::mem::forget` for it after use!
pub unsafe fn active() -> Self {
let frame = Cr3::read().0;
let table = unsafe { &mut *frame_to_page_table(frame) };
PageTableImpl(
MappedPageTable::new(table, frame_to_page_table),
core::mem::uninitialized(),
frame,
)
}
}
impl InactivePageTable for InactivePageTable0 {
type Active = ActivePageTable;
impl PageTableExt for PageTableImpl {
fn new_bare() -> Self {
let target = alloc_frame().expect("failed to allocate frame");
let frame = Frame::of_addr(target);
active_table().with_temporary_map(target, |_, table: &mut x86PageTable| {
table.zero();
// set up recursive mapping for the table
table[511].set_frame(frame.clone(), EF::PRESENT | EF::WRITABLE);
});
InactivePageTable0 { p4_frame: frame }
let table = unsafe { &mut *frame_to_page_table(frame) };
table.zero();
unsafe {
PageTableImpl(
MappedPageTable::new(table, frame_to_page_table),
core::mem::uninitialized(),
frame,
)
}
}
fn map_kernel(&mut self) {
let table = unsafe { &mut *(0xffffffff_fffff000 as *mut x86PageTable) };
let table = unsafe { &mut *frame_to_page_table(Cr3::read().0) };
// Kernel at 0xffff_ff00_0000_0000
// Kernel stack at 0x0000_57ac_0000_0000 (defined in bootloader crate)
let e510 = table[510].clone();
let ekernel = table[510].clone();
let ephysical = table[0x1f8].clone();
let estack = table[175].clone();
self.edit(|_| {
table[510].set_addr(e510.addr(), e510.flags() | EF::GLOBAL);
table[175].set_addr(estack.addr(), estack.flags() | EF::GLOBAL);
});
let table = unsafe { &mut *frame_to_page_table(self.2) };
table[510].set_addr(ekernel.addr(), ekernel.flags() | EF::GLOBAL);
table[0x1f8].set_addr(ephysical.addr(), ephysical.flags() | EF::GLOBAL);
table[175].set_addr(estack.addr(), estack.flags() | EF::GLOBAL);
}
fn token(&self) -> usize {
self.p4_frame.start_address().as_u64() as usize // as CR3
self.2.start_address().as_u64() as usize // as CR3
}
unsafe fn set_token(token: usize) {
@ -237,40 +256,18 @@ impl InactivePageTable for InactivePageTable0 {
fn flush_tlb() {
tlb::flush_all();
}
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T {
let target = Cr3::read().0.start_address().as_u64() as usize;
if self.p4_frame == Cr3::read().0 {
return f(&mut active_table());
}
active_table().with_temporary_map(target, |active_table, p4_table: &mut x86PageTable| {
let backup = p4_table[0o777].clone();
// overwrite recursive mapping
p4_table[0o777].set_frame(self.p4_frame.clone(), EF::PRESENT | EF::WRITABLE);
tlb::flush_all();
// execute f in the new context
let ret = f(active_table);
// restore recursive mapping to original p4 table
p4_table[0o777] = backup;
tlb::flush_all();
ret
})
}
}
impl Drop for InactivePageTable0 {
impl Drop for PageTableImpl {
fn drop(&mut self) {
info!("PageTable dropping: {:?}", self);
dealloc_frame(self.p4_frame.start_address().as_u64() as usize);
info!("PageTable dropping: {:?}", self.2);
dealloc_frame(self.2.start_address().as_u64() as usize);
}
}
struct FrameAllocatorForX86;
impl FrameAllocator<Size4KiB> for FrameAllocatorForX86 {
unsafe impl FrameAllocator<Size4KiB> for FrameAllocatorForX86 {
fn allocate_frame(&mut self) -> Option<Frame> {
alloc_frame().map(|addr| Frame::of_addr(addr))
}
@ -284,6 +281,8 @@ impl FrameDeallocator<Size4KiB> for FrameAllocatorForX86 {
/// Flush TLB for `vaddr` on all CPU
fn flush_tlb_all(vaddr: usize) {
// FIXME: too slow, disable now.
return;
if !super::AP_CAN_INIT.load(Ordering::Relaxed) {
return;
}

View File

@ -8,16 +8,15 @@ use bitflags::*;
use device_tree::util::SliceRead;
use device_tree::Node;
use log::*;
use rcore_memory::paging::PageTable;
use rcore_memory::PAGE_SIZE;
use volatile::Volatile;
use crate::drivers::BlockDriver;
use crate::memory::active_table;
use crate::sync::SpinNoIrqLock as Mutex;
use super::super::bus::virtio_mmio::*;
use super::super::{DeviceType, Driver, BLK_DRIVERS, DRIVERS};
use crate::memory::phys_to_virt;
pub struct VirtIOBlk {
interrupt_parent: u32,
@ -106,8 +105,6 @@ impl Driver for VirtIOBlkDriver {
fn try_handle_interrupt(&self, _irq: Option<u32>) -> bool {
let driver = self.0.lock();
// ensure header page is mapped
active_table().map_if_not_exists(driver.header as usize, driver.header as usize);
let header = unsafe { &mut *(driver.header as *mut VirtIOHeader) };
let interrupt = header.interrupt_status.read();
if interrupt != 0 {
@ -127,9 +124,6 @@ impl Driver for VirtIOBlkDriver {
fn read_block(&self, block_id: usize, buf: &mut [u8]) -> bool {
let mut driver = self.0.lock();
// ensure header page is mapped
active_table().map_if_not_exists(driver.header as usize, driver.header as usize);
let mut req = VirtIOBlkReadReq::default();
req.req_type = VIRTIO_BLK_T_IN;
req.reserved = 0;
@ -155,9 +149,6 @@ impl Driver for VirtIOBlkDriver {
fn write_block(&self, block_id: usize, buf: &[u8]) -> bool {
let mut driver = self.0.lock();
// ensure header page is mapped
active_table().map_if_not_exists(driver.header as usize, driver.header as usize);
let mut req: VirtIOBlkWriteReq = unsafe { zeroed() };
req.req_type = VIRTIO_BLK_T_OUT;
req.reserved = 0;
@ -184,8 +175,9 @@ impl Driver for VirtIOBlkDriver {
pub fn virtio_blk_init(node: &Node) {
let reg = node.prop_raw("reg").unwrap();
let from = reg.as_slice().read_be_u64(0).unwrap();
let header = unsafe { &mut *(from as *mut VirtIOHeader) };
let paddr = reg.as_slice().read_be_u64(0).unwrap();
let vaddr = phys_to_virt(paddr as usize);
let header = unsafe { &mut *(vaddr as *mut VirtIOHeader) };
header.status.write(VirtIODeviceStatus::DRIVER.bits());
@ -199,7 +191,7 @@ pub fn virtio_blk_init(node: &Node) {
header.write_driver_features(driver_features);
// read configuration space
let config = unsafe { &mut *((from + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIOBlkConfig) };
let config = unsafe { &mut *((vaddr + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIOBlkConfig) };
info!("Config: {:?}", config);
info!(
"Found a block device of size {}KB",
@ -213,7 +205,7 @@ pub fn virtio_blk_init(node: &Node) {
let driver = VirtIOBlkDriver(Mutex::new(VirtIOBlk {
interrupt: node.prop_u32("interrupts").unwrap(),
interrupt_parent: node.prop_u32("interrupt-parent").unwrap(),
header: from as usize,
header: vaddr as usize,
queue: VirtIOVirtqueue::new(header, 0, 16),
capacity: config.capacity.read() as usize,
}));

View File

@ -2,11 +2,11 @@ use crate::consts::KERNEL_OFFSET;
use crate::drivers::block::*;
use crate::drivers::net::*;
use crate::drivers::{Driver, DRIVERS, NET_DRIVERS};
use crate::memory::active_table;
use crate::memory::phys_to_virt;
use alloc::collections::BTreeMap;
use alloc::sync::Arc;
use pci::*;
use rcore_memory::{paging::PageTable, PAGE_SIZE};
use rcore_memory::PAGE_SIZE;
use spin::Mutex;
const PCI_COMMAND: u16 = 0x04;
@ -141,12 +141,7 @@ pub fn init_driver(dev: &PCIDevice) {
// 82574L Gigabit Network Connection
if let Some(BAR::Memory(addr, len, _, _)) = dev.bars[0] {
let irq = unsafe { enable(dev.loc) };
let vaddr = KERNEL_OFFSET + addr as usize;
let mut current_addr = addr as usize;
while current_addr < addr as usize + len as usize {
active_table().map_if_not_exists(KERNEL_OFFSET + current_addr, current_addr);
current_addr = current_addr + PAGE_SIZE;
}
let vaddr = phys_to_virt(addr as usize);
let index = NET_DRIVERS.read().len();
e1000::init(name, irq, vaddr, len as usize, index);
}
@ -155,12 +150,7 @@ pub fn init_driver(dev: &PCIDevice) {
// 82599ES 10-Gigabit SFI/SFP+ Network Connection
if let Some(BAR::Memory(addr, len, _, _)) = dev.bars[0] {
let irq = unsafe { enable(dev.loc) };
let vaddr = KERNEL_OFFSET + addr as usize;
let mut current_addr = addr as usize;
while current_addr < addr as usize + len as usize {
active_table().map_if_not_exists(KERNEL_OFFSET + current_addr, current_addr);
current_addr = current_addr + PAGE_SIZE;
}
let vaddr = phys_to_virt(addr as usize);
let index = NET_DRIVERS.read().len();
PCI_DRIVERS.lock().insert(
dev.loc,
@ -173,8 +163,7 @@ pub fn init_driver(dev: &PCIDevice) {
if let Some(BAR::Memory(addr, len, _, _)) = dev.bars[5] {
let irq = unsafe { enable(dev.loc) };
assert!(len as usize <= PAGE_SIZE);
let vaddr = KERNEL_OFFSET + addr as usize;
active_table().map(vaddr, addr as usize);
let vaddr = phys_to_virt(addr as usize);
PCI_DRIVERS
.lock()
.insert(dev.loc, ahci::init(irq, vaddr, len as usize));

View File

@ -8,18 +8,17 @@ use bitflags::*;
use device_tree::util::SliceRead;
use device_tree::Node;
use log::*;
use rcore_memory::paging::PageTable;
use rcore_memory::PAGE_SIZE;
use volatile::{ReadOnly, Volatile, WriteOnly};
use crate::arch::consts::{KERNEL_OFFSET, MEMORY_OFFSET};
use crate::memory::active_table;
use crate::HEAP_ALLOCATOR;
use super::super::block::virtio_blk;
use super::super::gpu::virtio_gpu;
use super::super::input::virtio_input;
use super::super::net::virtio_net;
use crate::memory::phys_to_virt;
// virtio 4.2.4 Legacy interface
#[repr(C)]
@ -85,10 +84,10 @@ impl VirtIOVirtqueue {
assert_eq!(header.queue_pfn.read(), 0); // not in use
let queue_num_max = header.queue_num_max.read();
assert!(queue_num_max >= queue_num as u32); // queue available
assert!(queue_num & (queue_num - 1) == 0); // power of two
assert_eq!(queue_num & (queue_num - 1), 0); // power of two
let align = PAGE_SIZE;
let size = virtqueue_size(queue_num, align);
assert!(size % align == 0);
assert_eq!(size % align, 0);
// alloc continuous pages
let address =
unsafe { HEAP_ALLOCATOR.alloc_zeroed(Layout::from_size_align(size, align).unwrap()) }
@ -265,7 +264,7 @@ impl VirtIOVirtqueue {
}
}
pub const VIRTIO_CONFIG_SPACE_OFFSET: u64 = 0x100;
pub const VIRTIO_CONFIG_SPACE_OFFSET: usize = 0x100;
impl VirtIOHeader {
pub fn read_device_features(&mut self) -> u64 {
@ -354,12 +353,12 @@ pub fn virtqueue_used_elem_offset(num: usize, align: usize) -> usize {
pub fn virtio_probe(node: &Node) {
if let Some(reg) = node.prop_raw("reg") {
let from = reg.as_slice().read_be_u64(0).unwrap();
let paddr = reg.as_slice().read_be_u64(0).unwrap();
let vaddr = phys_to_virt(paddr as usize);
let size = reg.as_slice().read_be_u64(8).unwrap();
// assuming one page
assert_eq!(size as usize, PAGE_SIZE);
active_table().map(from as usize, from as usize);
let header = unsafe { &mut *(from as *mut VirtIOHeader) };
let header = unsafe { &mut *(vaddr as *mut VirtIOHeader) };
let magic = header.magic.read();
let version = header.version.read();
let device_id = header.device_id.read();
@ -374,23 +373,13 @@ pub fn virtio_probe(node: &Node) {
// virtio 3.1.1 Device Initialization
header.status.write(0);
header.status.write(VirtIODeviceStatus::ACKNOWLEDGE.bits());
if device_id == 1 {
// net device
virtio_net::virtio_net_init(node);
} else if device_id == 2 {
// blk device
virtio_blk::virtio_blk_init(node);
} else if device_id == 16 {
// gpu device
virtio_gpu::virtio_gpu_init(node);
} else if device_id == 18 {
// input device
virtio_input::virtio_input_init(node);
} else {
println!("Unrecognized virtio device {}", device_id);
match device_id {
1 => virtio_net::virtio_net_init(node),
2 => virtio_blk::virtio_blk_init(node),
16 => virtio_gpu::virtio_gpu_init(node),
18 => virtio_input::virtio_input_init(node),
_ => warn!("Unrecognized virtio device {}", device_id),
}
} else {
active_table().unmap(from as usize);
}
}
}

View File

@ -7,19 +7,18 @@ use bitflags::*;
use device_tree::util::SliceRead;
use device_tree::Node;
use log::*;
use rcore_memory::paging::PageTable;
use rcore_memory::PAGE_SIZE;
use volatile::{ReadOnly, Volatile, WriteOnly};
use crate::arch::consts::{KERNEL_OFFSET, MEMORY_OFFSET};
use crate::arch::cpu;
use crate::memory::active_table;
use crate::sync::SpinNoIrqLock as Mutex;
use crate::HEAP_ALLOCATOR;
use super::super::bus::virtio_mmio::*;
use super::super::{DeviceType, Driver, DRIVERS};
use super::test::mandelbrot;
use crate::memory::phys_to_virt;
const VIRTIO_GPU_EVENT_DISPLAY: u32 = 1 << 0;
@ -198,11 +197,6 @@ impl Driver for VirtIOGpuDriver {
let mut driver = self.0.lock();
// ensure header page is mapped
// TODO: this should be mapped in all page table by default
let header_addr = &mut driver.header as *mut _ as usize;
active_table().map_if_not_exists(header_addr, header_addr);
let interrupt = driver.header.interrupt_status.read();
if interrupt != 0 {
driver.header.interrupt_ack.write(interrupt);
@ -350,8 +344,9 @@ fn flush_frame_buffer_to_screen(driver: &mut VirtIOGpu) {
pub fn virtio_gpu_init(node: &Node) {
let reg = node.prop_raw("reg").unwrap();
let from = reg.as_slice().read_be_u64(0).unwrap();
let header = unsafe { &mut *(from as *mut VirtIOHeader) };
let paddr = reg.as_slice().read_be_u64(0).unwrap();
let vaddr = phys_to_virt(paddr as usize);
let header = unsafe { &mut *(vaddr as *mut VirtIOHeader) };
header.status.write(VirtIODeviceStatus::DRIVER.bits());
@ -365,7 +360,7 @@ pub fn virtio_gpu_init(node: &Node) {
header.write_driver_features(driver_features);
// read configuration space
let config = unsafe { &mut *((from + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIOGpuConfig) };
let config = unsafe { &mut *((vaddr + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIOGpuConfig) };
info!("Config: {:?}", config);
// virtio 4.2.4 Legacy interface

View File

@ -16,11 +16,11 @@ use rcore_memory::PAGE_SIZE;
use volatile::Volatile;
use crate::arch::cpu;
use crate::memory::active_table;
use crate::sync::SpinNoIrqLock as Mutex;
use super::super::bus::virtio_mmio::*;
use super::super::{DeviceType, Driver, DRIVERS};
use crate::memory::phys_to_virt;
struct VirtIOInput {
interrupt_parent: u32,
@ -125,11 +125,6 @@ impl VirtIOInput {
return false;
}
// ensure header page is mapped
// TODO: this should be mapped in all page table by default
let header_addr = self.header as *mut _ as usize;
active_table().map_if_not_exists(header_addr, header_addr);
let interrupt = self.header.interrupt_status.read();
if interrupt != 0 {
self.header.interrupt_ack.write(interrupt);
@ -173,8 +168,9 @@ impl Driver for VirtIOInputDriver {
pub fn virtio_input_init(node: &Node) {
let reg = node.prop_raw("reg").unwrap();
let from = reg.as_slice().read_be_u64(0).unwrap();
let header = unsafe { &mut *(from as *mut VirtIOHeader) };
let paddr = reg.as_slice().read_be_u64(0).unwrap();
let vaddr = phys_to_virt(paddr as usize);
let header = unsafe { &mut *(vaddr as *mut VirtIOHeader) };
header.status.write(VirtIODeviceStatus::DRIVER.bits());
@ -188,7 +184,7 @@ pub fn virtio_input_init(node: &Node) {
header.write_driver_features(driver_features);
// read configuration space
let config = unsafe { &mut *((from + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIOInputConfig) };
let config = unsafe { &mut *((vaddr + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIOInputConfig) };
info!("Config: {:?}", config);
// virtio 4.2.4 Legacy interface

View File

@ -9,7 +9,6 @@ use bitflags::*;
use device_tree::util::SliceRead;
use device_tree::Node;
use log::*;
use rcore_memory::paging::PageTable;
use rcore_memory::PAGE_SIZE;
use smoltcp::phy::{self, DeviceCapabilities};
use smoltcp::time::Instant;
@ -17,12 +16,12 @@ use smoltcp::wire::{EthernetAddress, Ipv4Address};
use smoltcp::Result;
use volatile::{ReadOnly, Volatile};
use crate::memory::active_table;
use crate::sync::SpinNoIrqLock as Mutex;
use crate::HEAP_ALLOCATOR;
use super::super::bus::virtio_mmio::*;
use super::super::{DeviceType, Driver, DRIVERS, NET_DRIVERS};
use crate::memory::phys_to_virt;
pub struct VirtIONet {
interrupt_parent: u32,
@ -43,9 +42,6 @@ impl Driver for VirtIONetDriver {
fn try_handle_interrupt(&self, _irq: Option<u32>) -> bool {
let driver = self.0.lock();
// ensure header page is mapped
active_table().map_if_not_exists(driver.header as usize, driver.header as usize);
let header = unsafe { &mut *(driver.header as *mut VirtIOHeader) };
let interrupt = header.interrupt_status.read();
if interrupt != 0 {
@ -138,10 +134,6 @@ impl phy::RxToken for VirtIONetRxToken {
{
let (input, output, _, user_data) = {
let mut driver = (self.0).0.lock();
// ensure header page is mapped
active_table().map_if_not_exists(driver.header as usize, driver.header as usize);
driver.queues[VIRTIO_QUEUE_RECEIVE].get().unwrap()
};
let result = f(&input[0][size_of::<VirtIONetHeader>()..]);
@ -159,10 +151,6 @@ impl phy::TxToken for VirtIONetTxToken {
{
let output = {
let mut driver = (self.0).0.lock();
// ensure header page is mapped
active_table().map_if_not_exists(driver.header as usize, driver.header as usize);
if let Some((_, output, _, _)) = driver.queues[VIRTIO_QUEUE_TRANSMIT].get() {
unsafe { slice::from_raw_parts_mut(output[0].as_ptr() as *mut u8, output[0].len()) }
} else {
@ -252,8 +240,9 @@ struct VirtIONetHeader {
pub fn virtio_net_init(node: &Node) {
let reg = node.prop_raw("reg").unwrap();
let from = reg.as_slice().read_be_u64(0).unwrap();
let header = unsafe { &mut *(from as *mut VirtIOHeader) };
let paddr = reg.as_slice().read_be_u64(0).unwrap();
let vaddr = phys_to_virt(paddr as usize);
let header = unsafe { &mut *(vaddr as *mut VirtIOHeader) };
header.status.write(VirtIODeviceStatus::DRIVER.bits());
@ -267,7 +256,7 @@ pub fn virtio_net_init(node: &Node) {
header.write_driver_features(driver_features);
// read configuration space
let config = unsafe { &mut *((from + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIONetworkConfig) };
let config = unsafe { &mut *((vaddr + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIONetworkConfig) };
let mac = config.mac;
let status = VirtIONetworkStatus::from_bits_truncate(config.status.read());
debug!("Got MAC address {:?} and status {:?}", mac, status);
@ -280,7 +269,7 @@ pub fn virtio_net_init(node: &Node) {
let mut driver = VirtIONet {
interrupt: node.prop_u32("interrupts").unwrap(),
interrupt_parent: node.prop_u32("interrupt-parent").unwrap(),
header: from as usize,
header: vaddr as usize,
mac: EthernetAddress(mac),
queues: [
VirtIOVirtqueue::new(header, VIRTIO_QUEUE_RECEIVE, queue_num),

View File

@ -3,8 +3,7 @@ use alloc::alloc::{alloc_zeroed, dealloc, Layout};
use isomorphic_drivers::provider;
use rcore_memory::paging::PageTable;
use rcore_memory::PAGE_SIZE;
use crate::memory::active_table;
pub use crate::arch::paging::PageTableImpl;
pub struct Provider;
@ -14,7 +13,9 @@ impl provider::Provider for Provider {
fn alloc_dma(size: usize) -> (usize, usize) {
let layout = Layout::from_size_align(size, PAGE_SIZE).unwrap();
let vaddr = unsafe { alloc_zeroed(layout) } as usize;
let paddr = active_table().get_entry(vaddr).unwrap().target();
let mut page_table = unsafe { PageTableImpl::active() };
let paddr = page_table.get_entry(vaddr).unwrap().target();
core::mem::forget(page_table);
(vaddr, paddr)
}

View File

@ -20,7 +20,7 @@ mod pipe;
mod pseudo;
mod stdio;
/// Hard link user programs
// Hard link user programs
#[cfg(feature = "link_user")]
global_asm!(concat!(
r#"

View File

@ -14,7 +14,7 @@
use super::HEAP_ALLOCATOR;
pub use crate::arch::paging::*;
use crate::consts::{KERNEL_OFFSET, MEMORY_OFFSET};
use crate::consts::{KERNEL_OFFSET, MEMORY_OFFSET, PHYSICAL_MEMORY_OFFSET};
use crate::process::current_thread;
use crate::sync::{SpinNoIrqLock, MutexGuard, SpinNoIrq};
use alloc::boxed::Box;
@ -27,7 +27,7 @@ pub use rcore_memory::memory_set::{handler::*, MemoryArea, MemoryAttr};
use rcore_memory::paging::PageTable;
use rcore_memory::*;
pub type MemorySet = rcore_memory::memory_set::MemorySet<InactivePageTable0>;
pub type MemorySet = rcore_memory::memory_set::MemorySet<PageTableImpl>;
// x86_64 support up to 64G memory
#[cfg(target_arch = "x86_64")]
@ -52,19 +52,11 @@ pub type FrameAlloc = bitmap_allocator::BitAlloc4K;
lazy_static! {
pub static ref FRAME_ALLOCATOR: SpinNoIrqLock<FrameAlloc> =
SpinNoIrqLock::new(FrameAlloc::default());
pub static ref ACTIVE_TABLE: SpinNoIrqLock<ActivePageTable> =
SpinNoIrqLock::new(unsafe { ActivePageTable::new() });
}
/// The only way to get current active page table safely
///
/// NOTE:
/// Current implementation of recursive page table has a problem that
/// will cause race condition to the initial page table.
/// So we have to add a global mutex to avoid the racing.
/// This will be removed after replacing recursive mapping by linear mapping.
pub fn active_table() -> MutexGuard<'static, ActivePageTable, SpinNoIrq> {
ACTIVE_TABLE.lock()
/// Convert physical address to virtual address
pub const fn phys_to_virt(paddr: usize) -> usize {
PHYSICAL_MEMORY_OFFSET + paddr
}
#[derive(Debug, Clone, Copy)]
@ -148,7 +140,7 @@ pub fn init_heap() {
pub fn enlarge_heap(heap: &mut Heap) {
info!("Enlarging heap to avoid oom");
let mut page_table = active_table();
let mut page_table = unsafe { PageTableImpl::active() };
let mut addrs = [(0, 0); 32];
let mut addr_len = 0;
#[cfg(target_arch = "x86_64")]
@ -178,4 +170,5 @@ pub fn enlarge_heap(heap: &mut Heap) {
heap.init(*addr, *len);
}
}
core::mem::forget(page_table);
}

View File

@ -76,8 +76,8 @@ pub struct Process {
pub child_exit_code: BTreeMap<usize, usize>, // child process store its exit code here
}
/// Records the mapping between pid and Process struct.
lazy_static! {
/// Records the mapping between pid and Process struct.
pub static ref PROCESSES: RwLock<BTreeMap<usize, Weak<Mutex<Process>>>> =
RwLock::new(BTreeMap::new());
}

View File

@ -79,7 +79,7 @@ impl Syscall<'_> {
}
}
/// should be initialized together
// should be initialized together
lazy_static! {
pub static ref EPOCH_BASE: u64 = crate::arch::timer::read_epoch();
pub static ref TICK_BASE: u64 = unsafe { crate::trap::TICK as u64 };