1
0
mirror of https://github.com/rcore-os/rCore.git synced 2024-11-23 16:36:18 +04:00

Merge branch 'memory' into process

# Conflicts:
#	src/arch/x86_64/smp.rs
#	src/memory/mod.rs
This commit is contained in:
WangRunji 2018-04-28 12:59:58 +08:00
commit 5707dfd00a
31 changed files with 848 additions and 159 deletions

6
crate/memory/Cargo.toml Normal file
View File

@ -0,0 +1,6 @@
[package]
name = "memory"
version = "0.1.0"
authors = ["WangRunji <wangrunji0408@163.com>"]
[dependencies]

12
crate/memory/src/lib.rs Normal file
View File

@ -0,0 +1,12 @@
#![no_std]
#![feature(alloc)]
extern crate alloc;
pub mod physical;
pub mod paging;
pub mod memory_set;
pub mod swap;
type VirtAddr = usize;
const PAGE_SIZE: usize = 4096;

View File

@ -0,0 +1,89 @@
use alloc::vec::Vec;
type Addr = usize;
/// 一片连续内存空间,有相同的访问权限
/// 对应ucore中 `vma_struct`
#[derive(Debug, Eq, PartialEq)]
pub struct MemoryArea {
pub start_addr: Addr,
pub end_addr: Addr,
pub flags: u32,
pub name: &'static str,
}
impl MemoryArea {
pub fn contains(&self, addr: Addr) -> bool {
addr >= self.start_addr && addr < self.end_addr
}
fn is_overlap_with(&self, other: &MemoryArea) -> bool {
!(self.end_addr <= other.start_addr || self.start_addr >= other.end_addr)
}
}
/// 内存空间集合,包含若干段连续空间
/// 对应ucore中 `mm_struct`
#[derive(Debug)]
pub struct MemorySet {
areas: Vec<MemoryArea>,
}
impl MemorySet {
pub fn new() -> Self {
MemorySet { areas: Vec::<MemoryArea>::new() }
}
pub fn find_area(&self, addr: Addr) -> Option<&MemoryArea> {
self.areas.iter().find(|area| area.contains(addr))
}
pub fn push(&mut self, area: MemoryArea) {
assert!(area.start_addr <= area.end_addr, "invalid memory area");
if self.areas.iter()
.find(|other| area.is_overlap_with(other))
.is_some() {
panic!("memory area overlap");
}
self.areas.push(area);
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn push_and_find() {
let mut ms = MemorySet::new();
ms.push(MemoryArea {
start_addr: 0x0,
end_addr: 0x8,
flags: 0x0,
name: "code",
});
ms.push(MemoryArea {
start_addr: 0x8,
end_addr: 0x10,
flags: 0x1,
name: "data",
});
assert_eq!(ms.find_area(0x6).unwrap().name, "code");
assert_eq!(ms.find_area(0x11), None);
}
#[test]
#[should_panic]
fn push_overlap() {
let mut ms = MemorySet::new();
ms.push(MemoryArea {
start_addr: 0x0,
end_addr: 0x8,
flags: 0x0,
name: "code",
});
ms.push(MemoryArea {
start_addr: 0x4,
end_addr: 0x10,
flags: 0x1,
name: "data",
});
}
}

View File

@ -0,0 +1,4 @@
pub use self::page_table::*;
use super::*;
mod page_table;

View File

@ -0,0 +1,130 @@
use alloc::boxed::Box;
use super::*;
const PAGE_COUNT: usize = 16;
const PAGE_SIZE: usize = 4096;
pub struct MockPageTable {
mapped: [bool; PAGE_COUNT],
accessed: [bool; PAGE_COUNT],
dirty: [bool; PAGE_COUNT],
data: [u8; PAGE_SIZE * PAGE_COUNT],
page_fault_handler: Option<PageFaultHandler>,
capacity: usize,
}
type PageFaultHandler = Box<FnMut(&mut MockPageTable, VirtAddr)>;
impl PageTable for MockPageTable {
fn accessed(&self, addr: VirtAddr) -> bool {
self.accessed[addr / PAGE_SIZE]
}
fn dirty(&self, addr: VirtAddr) -> bool {
self.dirty[addr / PAGE_SIZE]
}
fn clear_accessed(&mut self, addr: usize) {
self.accessed[addr / PAGE_SIZE] = false;
}
fn clear_dirty(&mut self, addr: usize) {
self.dirty[addr / PAGE_SIZE] = false;
}
/// Map a page, return false if no more space
fn map(&mut self, addr: VirtAddr) -> bool {
if self.mapped.iter().filter(|&&b| b).count() == self.capacity {
return false;
}
self.mapped[addr / PAGE_SIZE] = true;
true
}
fn unmap(&mut self, addr: VirtAddr) {
self.mapped[addr / PAGE_SIZE] = false;
}
}
impl MockPageTable {
pub fn new(capacity: usize) -> Self {
use core::mem::uninitialized;
MockPageTable {
mapped: [false; PAGE_COUNT],
accessed: [false; PAGE_COUNT],
dirty: [false; PAGE_COUNT],
data: unsafe{ uninitialized() },
page_fault_handler: None,
capacity,
}
}
pub fn set_handler(&mut self, page_fault_handler: PageFaultHandler) {
self.page_fault_handler = Some(page_fault_handler);
}
fn trigger_page_fault_if_not_present(&mut self, addr: VirtAddr) {
let page_id = addr / PAGE_SIZE;
while !self.mapped[page_id] {
let self_mut = unsafe{ &mut *(self as *mut Self) };
(self.page_fault_handler.as_mut().unwrap())(self_mut, addr);
}
}
/// Read memory, mark accessed, trigger page fault if not present
pub fn read(&mut self, addr: VirtAddr) -> u8 {
let page_id = addr / PAGE_SIZE;
self.trigger_page_fault_if_not_present(addr);
self.accessed[page_id] = true;
self.data[addr]
}
/// Write memory, mark accessed and dirty, trigger page fault if not present
pub fn write(&mut self, addr: VirtAddr, data: u8) {
let page_id = addr / PAGE_SIZE;
self.trigger_page_fault_if_not_present(addr);
self.accessed[page_id] = true;
self.dirty[page_id] = true;
self.data[addr] = data;
}
}
#[cfg(test)]
mod test {
use super::*;
use alloc::arc::Arc;
use core::cell::RefCell;
#[test]
fn test() {
let page_fault_count = Arc::new(RefCell::new(0usize));
let mut pt = MockPageTable::new(2);
pt.set_handler(Box::new({
let page_fault_count1 = page_fault_count.clone();
move |pt: &mut MockPageTable, addr: VirtAddr| {
*page_fault_count1.borrow_mut() += 1;
pt.map(addr);
}
}));
pt.map(0);
pt.read(0);
assert_eq!(*page_fault_count.borrow(), 0);
assert!(pt.accessed(0));
assert!(!pt.dirty(0));
pt.clear_accessed(0);
assert!(!pt.accessed(0));
pt.read(1);
assert_eq!(*page_fault_count.borrow(), 0);
assert!(pt.accessed(0));
pt.write(0x1000, 0xff);
assert_eq!(*page_fault_count.borrow(), 1);
assert!(pt.accessed(0x1000));
assert!(pt.dirty(0x1000));
assert_eq!(pt.read(0x1000), 0xff);
pt.clear_dirty(0x1000);
assert!(!pt.dirty(0x1000));
assert_eq!(pt.map(0x2000), false);
pt.unmap(0);
pt.read(0);
assert_eq!(*page_fault_count.borrow(), 2);
}
}

View File

@ -0,0 +1,13 @@
use super::*;
pub use self::mock_page_table::MockPageTable;
mod mock_page_table;
pub trait PageTable {
fn accessed(&self, addr: VirtAddr) -> bool;
fn dirty(&self, addr: VirtAddr) -> bool;
fn clear_accessed(&mut self, addr: VirtAddr);
fn clear_dirty(&mut self, addr: VirtAddr);
fn map(&mut self, addr: VirtAddr) -> bool;
fn unmap(&mut self, addr: VirtAddr);
}

View File

@ -0,0 +1,33 @@
use super::*;
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Frame {
number: usize,
}
impl Frame {
pub fn containing_address(address: PhysAddr) -> Frame {
Frame{ number: address.get() as usize / PAGE_SIZE }
}
//TODO: Set private
pub fn start_address(&self) -> PhysAddr {
PhysAddr::new((self.number * PAGE_SIZE) as u64)
}
pub fn clone(&self) -> Frame {
Frame { number: self.number }
}
//TODO: Set private
// pub fn range_inclusive(start: Frame, end: Frame) -> FrameIter {
// FrameIter {
// start: start,
// end: end,
// }
// }
}
impl Drop for Frame {
fn drop(&mut self) {
panic!("frame must be deallocate");
}
}

View File

@ -0,0 +1,11 @@
use super::*;
pub trait FrameAllocator {
fn allocate_frame(&mut self) -> Option<Frame>;
fn deallocate_frame(&mut self, frame: Frame);
}
pub trait MemoryArea {
fn begin(&self) -> PhysAddr;
fn end(&self) -> PhysAddr;
}

View File

@ -0,0 +1,9 @@
pub use self::physaddr::PhysAddr;
pub use self::frame::Frame;
pub use self::frame_allocator::FrameAllocator;
use super::*;
mod frame;
mod physaddr;
mod frame_allocator;

View File

@ -0,0 +1,50 @@
use core::fmt;
/// Represents a physical memory address
#[derive(Copy, Clone, Eq, Ord, PartialEq, PartialOrd)]
pub struct PhysAddr(u64);
impl PhysAddr {
pub fn new(addr: u64) -> PhysAddr {
PhysAddr(addr)
}
pub fn get(&self) -> u64 {
self.0
}
}
impl fmt::Debug for PhysAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:#x}", self.0)
}
}
impl fmt::Binary for PhysAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::Display for PhysAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::LowerHex for PhysAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::Octal for PhysAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::UpperHex for PhysAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}

View File

@ -0,0 +1,135 @@
use alloc::vec_deque::VecDeque;
use super::*;
pub struct EnhancedClockSwapManager<T: 'static + SwappablePageTable> {
page_table: &'static mut T,
clock_ptr: usize,
deque: VecDeque<VirtAddr>,
}
impl<T: 'static + SwappablePageTable> SwapManager for EnhancedClockSwapManager<T> {
fn tick(&mut self) {
}
fn push(&mut self, addr: usize) {
let pos = if self.clock_ptr == 0 {self.deque.len()} else {self.clock_ptr};
self.deque.insert(pos, addr);
}
fn remove(&mut self, addr: usize) {
let id = self.deque.iter()
.position(|&x| x == addr)
.expect("address not found");
if id < self.clock_ptr {
self.clock_ptr -= 1;
}
self.deque.remove(id);
}
fn pop(&mut self) -> Option<usize> {
loop {
let addr = self.deque[self.clock_ptr];
let accessed = self.page_table.accessed(addr);
let dirty = self.page_table.dirty(addr);
match (accessed, dirty) {
(true, _) => {
self.page_table.clear_accessed(addr);
},
(false, true) => {
if self.page_table.swap_out(addr).is_ok() {
self.page_table.clear_dirty(addr);
}
},
_ => {
return self.remove_current();
}
}
self.move_next();
}
}
}
impl<T: 'static + SwappablePageTable> EnhancedClockSwapManager<T> {
pub fn new(page_table: &'static mut T) -> Self {
EnhancedClockSwapManager {
page_table,
clock_ptr: 0,
deque: VecDeque::<VirtAddr>::new()
}
}
fn remove_current(&mut self) -> Option<VirtAddr> {
let addr = self.deque.remove(self.clock_ptr);
if self.clock_ptr == self.deque.len() {
self.clock_ptr = 0;
}
return addr;
}
fn move_next(&mut self) {
self.clock_ptr += 1;
if self.clock_ptr == self.deque.len() {
self.clock_ptr = 0;
}
}
}
#[cfg(test)]
mod test {
use super::*;
use alloc::{arc::Arc, boxed::Box};
use core::mem::uninitialized;
use core::cell::RefCell;
use paging::MockPageTable;
impl SwappablePageTable for MockPageTable {
fn swap_out(&mut self, addr: usize) -> Result<(), ()> {
Ok(())
}
}
enum MemOp {
R(usize), W(usize)
}
#[test]
fn test() {
use self::MemOp::{R, W};
let page_fault_count = Arc::new(RefCell::new(0usize));
let mut pt = Box::new(MockPageTable::new(4));
let static_pt = unsafe{ &mut *(pt.as_mut() as *mut MockPageTable) };
pt.set_handler(Box::new({
let page_fault_count1 = page_fault_count.clone();
let mut clock = EnhancedClockSwapManager::new(static_pt);
move |pt: &mut MockPageTable, addr: VirtAddr| {
*page_fault_count1.borrow_mut() += 1;
if !pt.map(addr) { // is full?
pt.unmap(clock.pop().unwrap());
pt.map(addr);
}
clock.push(addr);
}
}));
let op_seq = [
R(0x1000), R(0x2000), R(0x3000), R(0x4000),
R(0x3000), W(0x1000), R(0x4000), W(0x2000), R(0x5000),
R(0x2000), W(0x1000), R(0x2000), R(0x3000), R(0x4000)];
let pgfault_count = [
1, 2, 3, 4,
4, 4, 4, 4, 5,
5, 5, 5, 6, 7];
for (op, &count) in op_seq.iter().zip(pgfault_count.iter()) {
match op {
R(addr) => {pt.read(*addr);},
W(addr) => pt.write(*addr, 0),
}
assert_eq!(*(*page_fault_count).borrow(), count);
}
}
}

View File

@ -0,0 +1,87 @@
use alloc::vec_deque::VecDeque;
use super::*;
pub struct FifoSwapManager {
deque: VecDeque<VirtAddr>,
}
impl SwapManager for FifoSwapManager {
fn tick(&mut self) {
}
fn push(&mut self, addr: usize) {
self.deque.push_back(addr);
}
fn remove(&mut self, addr: usize) {
let id = self.deque.iter()
.position(|&x| x == addr)
.expect("address not found");
self.deque.remove(id);
}
fn pop(&mut self) -> Option<VirtAddr> {
self.deque.pop_front()
}
}
impl FifoSwapManager {
pub fn new() -> Self {
FifoSwapManager {
deque: VecDeque::<VirtAddr>::new()
}
}
}
#[cfg(test)]
mod test {
use super::*;
use alloc::{arc::Arc, boxed::Box};
use core::cell::RefCell;
use paging::MockPageTable;
enum MemOp {
R(usize), W(usize)
}
#[test]
fn test() {
use self::MemOp::{R, W};
let page_fault_count = Arc::new(RefCell::new(0usize));
let mut pt = MockPageTable::new(4);
pt.set_handler(Box::new({
let page_fault_count1 = page_fault_count.clone();
let mut fifo = FifoSwapManager::new();
move |pt: &mut MockPageTable, addr: VirtAddr| {
*page_fault_count1.borrow_mut() += 1;
if !pt.map(addr) { // is full?
pt.unmap(fifo.pop().unwrap());
pt.map(addr);
}
fifo.push(addr);
}
}));
let op_seq = [
R(0x1000), R(0x2000), R(0x3000), R(0x4000),
W(0x3000), W(0x1000), W(0x4000), W(0x2000), W(0x5000),
W(0x2000), W(0x1000), W(0x2000), W(0x3000), W(0x4000),
W(0x5000), R(0x1000), W(0x1000)];
let pgfault_count = [
1, 2, 3, 4,
4, 4, 4, 4, 5,
5, 6, 7, 8, 9,
10, 11, 11];
for (op, &count) in op_seq.iter().zip(pgfault_count.iter()) {
match op {
R(addr) => {pt.read(*addr);},
W(addr) => pt.write(*addr, 0),
}
assert_eq!(*(*page_fault_count).borrow(), count);
}
}
}

View File

@ -0,0 +1,79 @@
use super::Swapper;
use alloc::btree_map::BTreeMap;
pub struct MockSwapper {
map: BTreeMap<usize, [u8; 4096]>,
}
impl Swapper for MockSwapper {
fn swap_out(&mut self, data: &[u8; 4096]) -> Result<usize, ()> {
let id = self.alloc_id();
self.map.insert(id, data.clone());
Ok(id)
}
fn swap_update(&mut self, token: usize, data: &[u8; 4096]) -> Result<(), ()> {
if !self.map.contains_key(&token) {
return Err(());
}
self.map.insert(token, data.clone());
Ok(())
}
fn swap_in(&mut self, token: usize, data: &mut [u8; 4096]) -> Result<(), ()> {
match self.map.remove(&token) {
Some(d) => *data = d,
None => return Err(()),
}
Ok(())
}
}
impl MockSwapper {
pub fn new() -> Self {
MockSwapper {map: BTreeMap::new()}
}
fn alloc_id(&self) -> usize {
(0 .. 100usize).find(|i| !self.map.contains_key(i)).unwrap()
}
}
#[cfg(test)]
mod test {
use super::*;
use core::mem::uninitialized;
fn assert_data_eq(data1: &[u8; 4096], data2: &[u8; 4096]) {
for (&a, &b) in data2.iter().zip(data1.iter()) {
assert_eq!(a, b);
}
}
#[test]
fn swap_out_in() {
let mut swapper = MockSwapper::new();
let mut data: [u8; 4096] = unsafe{ uninitialized() };
let data1: [u8; 4096] = unsafe{ uninitialized() };
let token = swapper.swap_out(&data1).unwrap();
swapper.swap_in(token, &mut data).unwrap();
assert_data_eq(&data, &data1);
}
#[test]
fn swap_update() {
let mut swapper = MockSwapper::new();
let mut data: [u8; 4096] = unsafe{ uninitialized() };
let data1: [u8; 4096] = unsafe{ uninitialized() };
let data2: [u8; 4096] = unsafe{ uninitialized() };
let token = swapper.swap_out(&data1).unwrap();
swapper.swap_update(token, &data2).unwrap();
swapper.swap_in(token, &mut data).unwrap();
assert_data_eq(&data, &data2);
}
#[test]
fn invalid_token() {
let mut swapper = MockSwapper::new();
let mut data: [u8; 4096] = unsafe{ uninitialized() };
assert_eq!(swapper.swap_in(0, &mut data), Err(()));
}
}

View File

@ -0,0 +1,30 @@
pub use self::fifo::FifoSwapManager;
pub use self::enhanced_clock::EnhancedClockSwapManager;
use super::*;
use super::paging::PageTable;
mod fifo;
mod enhanced_clock;
mod mock_swapper;
pub trait SwapManager {
/// Called when tick interrupt occured
fn tick(&mut self);
/// Called when map a swappable page into the memory
fn push(&mut self, addr: VirtAddr);
/// Called to delete the addr entry from the swap manager
fn remove(&mut self, addr: VirtAddr);
/// Try to swap out a page, return then victim
fn pop(&mut self) -> Option<VirtAddr>;
}
pub trait Swapper {
fn swap_out(&mut self, data: &[u8; 4096]) -> Result<usize, ()>;
fn swap_update(&mut self, token: usize, data: &[u8; 4096]) -> Result<(), ()>;
fn swap_in(&mut self, token: usize, data: &mut [u8; 4096]) -> Result<(), ()>;
}
pub trait SwappablePageTable: PageTable {
fn swap_out(&mut self, addr: VirtAddr) -> Result<(), ()>;
}

View File

@ -4,23 +4,23 @@ mod structs;
use self::structs::*; use self::structs::*;
use consts::*; use consts::*;
pub fn init() -> Result<ACPI_Result, ACPI_Error> { pub fn init() -> Result<AcpiResult, AcpiError> {
let rsdp = find_rsdp().expect("acpi: rsdp not found."); let rsdp = find_rsdp().expect("acpi: rsdp not found.");
if rsdp.RsdtPhysicalAddress > PHYSICAL_MEMORY_LIMIT { if rsdp.rsdt_physical_address > PHYSICAL_MEMORY_LIMIT {
return Err(ACPI_Error::NotMapped); return Err(AcpiError::NotMapped);
} }
debug!("RSDT at {:#x}", rsdp.RsdtPhysicalAddress); debug!("RSDT at {:#x}", rsdp.rsdt_physical_address);
let rsdt = unsafe{ &*(rsdp.RsdtPhysicalAddress as *const rsdt) }; let rsdt = unsafe{ &*(rsdp.rsdt_physical_address as *const Rsdt) };
let mut madt: Option<&'static madt> = None; let mut madt: Option<&'static Madt> = None;
for i in 0 .. rsdt.entry_count() { for i in 0 .. rsdt.entry_count() {
let entry = rsdt.entry_at(i); let entry = rsdt.entry_at(i);
if entry > PHYSICAL_MEMORY_LIMIT { if entry > PHYSICAL_MEMORY_LIMIT {
return Err(ACPI_Error::NotMapped); return Err(AcpiError::NotMapped);
} }
let header = unsafe{ &*(entry as *const header) }; let header = unsafe{ &*(entry as *const Header) };
// debug!("{:?}", header); // debug!("{:?}", header);
if &header.Signature == b"APIC" { if &header.signature == b"APIC" {
madt = Some(unsafe{ &*(entry as *const madt) }); madt = Some(unsafe{ &*(entry as *const Madt) });
} }
} }
debug!("{:?}", madt); debug!("{:?}", madt);
@ -33,7 +33,7 @@ const PHYSICAL_MEMORY_LIMIT: u32 = 0x0E000000;
const PHYSICAL_MEMORY_LIMIT: u32 = 0x80000000; const PHYSICAL_MEMORY_LIMIT: u32 = 0x80000000;
#[derive(Debug)] #[derive(Debug)]
pub struct ACPI_Result { pub struct AcpiResult {
pub cpu_num: u8, pub cpu_num: u8,
pub cpu_acpi_ids: [u8; MAX_CPU_NUM], pub cpu_acpi_ids: [u8; MAX_CPU_NUM],
pub ioapic_id: u8, pub ioapic_id: u8,
@ -41,13 +41,13 @@ pub struct ACPI_Result {
} }
#[derive(Debug)] #[derive(Debug)]
pub enum ACPI_Error { pub enum AcpiError {
NotMapped, NotMapped,
IOACPI_NotFound, IoacpiNotFound,
} }
fn config_smp(madt: &'static madt) -> Result<ACPI_Result, ACPI_Error> { fn config_smp(madt: &'static Madt) -> Result<AcpiResult, AcpiError> {
let lapic_addr = madt.LapicAddress as *const (); let lapic_addr = madt.lapic_address as *const ();
let mut cpu_num = 0u8; let mut cpu_num = 0u8;
let mut cpu_acpi_ids: [u8; MAX_CPU_NUM] = [0; MAX_CPU_NUM]; let mut cpu_acpi_ids: [u8; MAX_CPU_NUM] = [0; MAX_CPU_NUM];
@ -56,33 +56,33 @@ fn config_smp(madt: &'static madt) -> Result<ACPI_Result, ACPI_Error> {
debug!("{:?}", entry); debug!("{:?}", entry);
match &entry { match &entry {
&MadtEntry::LocalApic(ref lapic) => { &MadtEntry::LocalApic(ref lapic) => {
cpu_acpi_ids[cpu_num as usize] = lapic.Id; cpu_acpi_ids[cpu_num as usize] = lapic.id;
cpu_num += 1; cpu_num += 1;
}, },
&MadtEntry::IoApic(ref ioapic) => { &MadtEntry::IoApic(ref ioapic) => {
ioapic_id = Some(ioapic.Id); ioapic_id = Some(ioapic.id);
}, },
_ => {}, _ => {},
} }
} }
if ioapic_id.is_none() { if ioapic_id.is_none() {
return Err(ACPI_Error::IOACPI_NotFound); return Err(AcpiError::IoacpiNotFound);
} }
let ioapic_id = ioapic_id.unwrap(); let ioapic_id = ioapic_id.unwrap();
Ok(ACPI_Result { cpu_num, cpu_acpi_ids, ioapic_id, lapic_addr }) Ok(AcpiResult { cpu_num, cpu_acpi_ids, ioapic_id, lapic_addr })
} }
/// See https://wiki.osdev.org/RSDP -- Detecting the RSDP /// See https://wiki.osdev.org/RSDP -- Detecting the RSDP
fn find_rsdp() -> Option<&'static rsdp> { fn find_rsdp() -> Option<&'static Rsdp> {
use util::{Checkable, find_in_memory}; use util::{Checkable, find_in_memory};
let ebda = unsafe { *(0x40E as *const u16) as usize } << 4; let ebda = unsafe { *(0x40E as *const u16) as usize } << 4;
debug!("EBDA at {:#x}", ebda); debug!("EBDA at {:#x}", ebda);
macro_rules! return_if_find_in { macro_rules! return_if_find_in {
($begin:expr, $end:expr) => ( ($begin:expr, $end:expr) => (
if let Some(addr) = unsafe{ find_in_memory::<rsdp>($begin, $end, 4) } { if let Some(addr) = unsafe{ find_in_memory::<Rsdp>($begin, $end, 4) } {
return Some(unsafe{ &*(addr as *const rsdp) }); return Some(unsafe{ &*(addr as *const Rsdp) });
} }
) )
} }

View File

@ -6,48 +6,48 @@ use core::mem::size_of;
#[repr(C)] #[repr(C)]
#[derive(Debug)] #[derive(Debug)]
pub struct rsdp { pub struct Rsdp {
pub Signature: [u8; 8], pub signature: [u8; 8],
pub Checksum: u8, pub checksum: u8,
pub OemId: [i8; 6], pub oem_id: [i8; 6],
pub Revision: u8, pub revision: u8,
pub RsdtPhysicalAddress: u32, pub rsdt_physical_address: u32,
pub Length: u32, pub length: u32,
pub XsdtPhysicalAddress: u64, pub xsdt_physical_address: u64,
pub ExtendedChecksum: u8, pub extended_checksum: u8,
pub Reserved: [u8; 3], pub reserved: [u8; 3],
} }
impl Checkable for rsdp { impl Checkable for Rsdp {
fn check(&self) -> bool { fn check(&self) -> bool {
&self.Signature == b"RSD PTR " && bytes_sum(self) == 0 &self.signature == b"RSD PTR " && bytes_sum(self) == 0
} }
} }
#[repr(C)] #[repr(C)]
#[derive(Debug)] #[derive(Debug)]
pub struct header { pub struct Header {
pub Signature: [u8; 4], pub signature: [u8; 4],
pub Length: u32, pub length: u32,
pub Revision: u8, pub revision: u8,
pub Checksum: u8, pub checksum: u8,
pub OemId: [i8; 6], pub oem_id: [i8; 6],
pub OemTableId: [i8; 8], pub oem_table_id: [i8; 8],
pub OemRevision: u32, pub oem_revision: u32,
pub AslCompilerId: [i8; 4], pub asl_compiler_id: [i8; 4],
pub AslCompilerRevision: u32, pub asl_compiler_revision: u32,
} }
#[repr(C)] #[repr(C)]
#[derive(Debug)] #[derive(Debug)]
pub struct rsdt { pub struct Rsdt {
pub Header: header, pub header: Header,
TableOffsetEntry: [u32; 0], table_offset_entry: [u32; 0],
} }
impl rsdt { impl Rsdt {
pub fn entry_count(&self) -> usize { pub fn entry_count(&self) -> usize {
(self.Header.Length as usize - size_of::<Self>()) / 4 (self.header.length as usize - size_of::<Self>()) / 4
} }
pub fn entry_at(&self, id: usize) -> u32 { pub fn entry_at(&self, id: usize) -> u32 {
assert!(id < self.entry_count()); assert!(id < self.entry_count());
@ -60,62 +60,62 @@ impl rsdt {
#[repr(C)] #[repr(C)]
#[derive(Debug)] #[derive(Debug)]
pub struct madt { pub struct Madt {
pub Header: header, pub header: Header,
pub LapicAddress: u32, pub lapic_address: u32,
pub Flags: u32, pub flags: u32,
Table: [u32; 0], table: [u32; 0],
} }
impl Checkable for madt { impl Checkable for Madt {
fn check(&self) -> bool { fn check(&self) -> bool {
&self.Header.Signature == b"APIC" && self.Header.Length >= size_of::<Self>() as u32 &self.header.signature == b"APIC" && self.header.length >= size_of::<Self>() as u32
} }
} }
#[derive(Debug)] #[derive(Debug)]
pub enum MadtEntry { pub enum MadtEntry {
Unknown(MadtEntry_Unknown), Unknown(MadtEntryUnknown),
LocalApic(MadtEntry_LocalApic), LocalApic(MadtEntryLocalApic),
IoApic(MadtEntry_IoApic), IoApic(MadtEntryIoApic),
} }
#[repr(C)] #[repr(C)]
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct MadtEntry_Unknown { pub struct MadtEntryUnknown {
pub Type: u8, pub type_: u8,
pub Length: u8, pub length: u8,
} }
#[repr(C)] #[repr(C)]
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct MadtEntry_LocalApic { pub struct MadtEntryLocalApic {
pub Type: u8, // 0 pub type_: u8, // 0
pub Length: u8, pub length: u8,
pub ProcessorId: u8, pub processor_id: u8,
pub Id: u8, pub id: u8,
pub LapicFlags: u32, pub lapic_flags: u32,
} }
#[repr(C)] #[repr(C)]
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct MadtEntry_IoApic { pub struct MadtEntryIoApic {
pub Type: u8, // 1 pub type_: u8, // 1
pub Length: u8, pub length: u8,
pub Id: u8, pub id: u8,
pub Reserved: u8, pub reserved: u8,
pub Address: u32, pub address: u32,
pub GlobalIrqBase: u32, pub global_irq_base: u32,
} }
#[derive(Debug)] #[derive(Debug)]
pub struct MadtEntryIter<'a> { pub struct MadtEntryIter<'a> {
madt: &'a madt, madt: &'a Madt,
ptr: *const u8, ptr: *const u8,
end_ptr: *const u8, end_ptr: *const u8,
} }
impl madt { impl Madt {
pub fn entry_iter(&self) -> MadtEntryIter { pub fn entry_iter(&self) -> MadtEntryIter {
let ptr = unsafe{ (self as *const Self).offset(1) } as *const u8; let ptr = unsafe{ (self as *const Self).offset(1) } as *const u8;
let end_ptr = unsafe{ ptr.offset(self.Header.Length as isize) }; let end_ptr = unsafe{ ptr.offset(self.header.length as isize) };
MadtEntryIter { madt: self, ptr, end_ptr } MadtEntryIter { madt: self, ptr, end_ptr }
} }
} }
@ -127,12 +127,12 @@ impl<'a> Iterator for MadtEntryIter<'a> {
return None; return None;
} }
unsafe { unsafe {
let typeId = *self.ptr.offset(0); let type_id = *self.ptr.offset(0);
let len = *self.ptr.offset(1); let len = *self.ptr.offset(1);
let ret = Some(match typeId { let ret = Some(match type_id {
0 => MadtEntry::LocalApic( (&*(self.ptr as *const MadtEntry_LocalApic)).clone() ), 0 => MadtEntry::LocalApic( (&*(self.ptr as *const MadtEntryLocalApic)).clone() ),
1 => MadtEntry::IoApic( (&*(self.ptr as *const MadtEntry_IoApic)).clone() ), 1 => MadtEntry::IoApic( (&*(self.ptr as *const MadtEntryIoApic)).clone() ),
_ => MadtEntry::Unknown( (&*(self.ptr as *const MadtEntry_Unknown)).clone() ), _ => MadtEntry::Unknown( (&*(self.ptr as *const MadtEntryUnknown)).clone() ),
}); });
self.ptr = self.ptr.offset(len as isize); self.ptr = self.ptr.offset(len as isize);
ret ret

View File

@ -1,4 +1,5 @@
extern { extern {
//noinspection RsStaticConstNaming
static mut lapic: *const (); static mut lapic: *const ();
fn lapicinit(); // must set `lapic` first fn lapicinit(); // must set `lapic` first
fn lapiceoi(); // ack fn lapiceoi(); // ack

View File

@ -3,7 +3,7 @@ use x86::cpuid::CpuId;
use x86::msr::*; use x86::msr::*;
use memory::Frame; use memory::Frame;
use paging::{ActivePageTable, PhysicalAddress, Page, VirtualAddress}; use paging::{ActivePageTable, PhysAddr, Page, VirtualAddress};
use paging::entry::EntryFlags; use paging::entry::EntryFlags;
pub static mut LOCAL_APIC: LocalApic = LocalApic { pub static mut LOCAL_APIC: LocalApic = LocalApic {
@ -32,7 +32,7 @@ impl LocalApic {
if ! self.x2 { if ! self.x2 {
let page = Page::containing_address(VirtualAddress::new(self.address)); let page = Page::containing_address(VirtualAddress::new(self.address));
let frame = Frame::containing_address(PhysicalAddress::new(self.address - ::KERNEL_OFFSET)); let frame = Frame::containing_address(PhysAddr::new(self.address - ::KERNEL_OFFSET));
let result = active_table.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE); let result = active_table.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE);
result.flush(active_table); result.flush(active_table);
} }

View File

@ -7,7 +7,7 @@ pub mod pic;
pub mod keyboard; pub mod keyboard;
pub mod pit; pub mod pit;
pub fn init<F>(mut page_map: F) -> acpi::ACPI_Result pub fn init<F>(mut page_map: F) -> acpi::AcpiResult
where F: FnMut(usize) { where F: FnMut(usize) {
assert_has_not_been_called!(); assert_has_not_been_called!();

View File

@ -18,7 +18,7 @@ impl Entry {
pub fn pointed_frame(&self) -> Option<Frame> { pub fn pointed_frame(&self) -> Option<Frame> {
if self.flags().contains(EntryFlags::PRESENT) { if self.flags().contains(EntryFlags::PRESENT) {
Some(Frame::containing_address( Some(Frame::of_addr(
self.0 as usize & 0x000fffff_fffff000 self.0 as usize & 0x000fffff_fffff000
)) ))
} else { } else {
@ -27,7 +27,7 @@ impl Entry {
} }
pub fn set(&mut self, frame: Frame, flags: EntryFlags) { pub fn set(&mut self, frame: Frame, flags: EntryFlags) {
assert!(frame.start_address().0 & !0x000fffff_fffff000 == 0); assert_eq!(frame.start_address().0 & !0x000fffff_fffff000, 0);
self.0 = (frame.start_address().0) | flags.bits(); self.0 = (frame.start_address().0) | flags.bits();
} }
} }

View File

@ -22,10 +22,10 @@ impl Mapper {
unsafe { self.p4.as_mut() } unsafe { self.p4.as_mut() }
} }
pub fn translate(&self, virtual_address: VirtualAddress) -> Option<PhysicalAddress> { pub fn translate(&self, virtual_address: VirtAddr) -> Option<PhysAddr> {
let offset = virtual_address % PAGE_SIZE; let offset = virtual_address % PAGE_SIZE;
self.translate_page(Page::containing_address(virtual_address)) self.translate_page(Page::of_addr(virtual_address))
.map(|frame| PhysicalAddress((frame.start_address().get() + offset) as u64)) .map(|frame| PhysAddr((frame.start_address().get() + offset) as u64))
} }
pub fn translate_page(&self, page: Page) -> Option<Frame> { pub fn translate_page(&self, page: Page) -> Option<Frame> {
@ -38,8 +38,8 @@ impl Mapper {
if let Some(start_frame) = p3_entry.pointed_frame() { if let Some(start_frame) = p3_entry.pointed_frame() {
if p3_entry.flags().contains(EntryFlags::HUGE_PAGE) { if p3_entry.flags().contains(EntryFlags::HUGE_PAGE) {
// address must be 1GiB aligned // address must be 1GiB aligned
assert!(start_frame.start_address().get() % (ENTRY_COUNT * ENTRY_COUNT * PAGE_SIZE) == 0); assert_eq!(start_frame.start_address().get() % (ENTRY_COUNT * ENTRY_COUNT * PAGE_SIZE), 0);
return Some(Frame::containing_address( return Some(Frame::of_addr(
start_frame.start_address().get() + start_frame.start_address().get() +
(page.p2_index() * ENTRY_COUNT + page.p1_index()) * PAGE_SIZE (page.p2_index() * ENTRY_COUNT + page.p1_index()) * PAGE_SIZE
)); ));
@ -51,8 +51,8 @@ impl Mapper {
if let Some(start_frame) = p2_entry.pointed_frame() { if let Some(start_frame) = p2_entry.pointed_frame() {
if p2_entry.flags().contains(EntryFlags::HUGE_PAGE) { if p2_entry.flags().contains(EntryFlags::HUGE_PAGE) {
// address must be 2MiB aligned // address must be 2MiB aligned
assert!(start_frame.start_address().get() % ENTRY_COUNT == 0); assert_eq!(start_frame.start_address().get() % ENTRY_COUNT, 0);
return Some(Frame::containing_address( return Some(Frame::of_addr(
start_frame.start_address().get() + page.p1_index() * PAGE_SIZE start_frame.start_address().get() + page.p1_index() * PAGE_SIZE
)); ));
} }
@ -92,7 +92,7 @@ impl Mapper {
pub fn identity_map<A>(&mut self, frame: Frame, flags: EntryFlags, allocator: &mut A) pub fn identity_map<A>(&mut self, frame: Frame, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator where A: FrameAllocator
{ {
let page = Page::containing_address(frame.start_address().to_identity_virtual()); let page = Page::of_addr(frame.start_address().to_identity_virtual());
self.map_to(page, frame, flags, allocator) self.map_to(page, frame, flags, allocator)
} }

View File

@ -17,7 +17,7 @@ pub struct Page {
} }
impl Page { impl Page {
pub fn containing_address(address: VirtualAddress) -> Page { pub fn of_addr(address: VirtAddr) -> Page {
assert!(address < 0x0000_8000_0000_0000 || assert!(address < 0x0000_8000_0000_0000 ||
address >= 0xffff_8000_0000_0000, address >= 0xffff_8000_0000_0000,
"invalid address: 0x{:x}", address); "invalid address: 0x{:x}", address);
@ -43,8 +43,8 @@ impl Page {
pub fn range_inclusive(start: Page, end: Page) -> PageIter { pub fn range_inclusive(start: Page, end: Page) -> PageIter {
PageIter { PageIter {
start: start, start,
end: end, end,
} }
} }
} }
@ -113,7 +113,7 @@ impl ActivePageTable {
use x86_64::registers::control_regs; use x86_64::registers::control_regs;
{ {
let backup = Frame::containing_address( let backup = Frame::of_addr(
control_regs::cr3().0 as usize); control_regs::cr3().0 as usize);
// map temporary_page to current p4 table // map temporary_page to current p4 table
@ -139,7 +139,7 @@ impl ActivePageTable {
use x86_64::registers::control_regs; use x86_64::registers::control_regs;
let old_table = InactivePageTable { let old_table = InactivePageTable {
p4_frame: Frame::containing_address( p4_frame: Frame::of_addr(
control_regs::cr3().0 as usize control_regs::cr3().0 as usize
), ),
}; };

View File

@ -1,6 +1,6 @@
use super::{Page, ActivePageTable}; use super::{Page, ActivePageTable};
use super::table::{Table, Level1}; use super::table::{Table, Level1};
use memory::{Frame, FrameAllocator, VirtualAddress}; use memory::{Frame, FrameAllocator, VirtAddr};
pub struct TemporaryPage { pub struct TemporaryPage {
page: Page, page: Page,
@ -12,7 +12,7 @@ impl TemporaryPage {
where A: FrameAllocator where A: FrameAllocator
{ {
TemporaryPage { TemporaryPage {
page: page, page,
allocator: TinyAllocator::new(allocator), allocator: TinyAllocator::new(allocator),
} }
} }
@ -20,7 +20,7 @@ impl TemporaryPage {
/// Maps the temporary page to the given frame in the active table. /// Maps the temporary page to the given frame in the active table.
/// Returns the start address of the temporary page. /// Returns the start address of the temporary page.
pub fn map(&mut self, frame: Frame, active_table: &mut ActivePageTable) pub fn map(&mut self, frame: Frame, active_table: &mut ActivePageTable)
-> VirtualAddress -> VirtAddr
{ {
use super::entry::EntryFlags; use super::entry::EntryFlags;

View File

@ -1,5 +1,5 @@
use arch::driver::{acpi::ACPI_Result, apic::start_ap}; use arch::driver::{acpi::AcpiResult, apic::start_ap};
use memory::{MemoryController, PhysicalAddress}; use memory::{MemoryController, PhysAddr};
extern { extern {
fn entryother_start(); // physical addr of entryother fn entryother_start(); // physical addr of entryother
@ -8,11 +8,11 @@ extern {
const ENTRYOTHER_ADDR: u32 = 0x7000; const ENTRYOTHER_ADDR: u32 = 0x7000;
pub fn start_other_cores(acpi: &ACPI_Result, mc: &mut MemoryController) { pub fn start_other_cores(acpi: &AcpiResult, mc: &mut MemoryController) {
mc.map_page_identity(ENTRYOTHER_ADDR as usize - 1); mc.map_page_identity(ENTRYOTHER_ADDR as usize - 1);
mc.map_page_identity(ENTRYOTHER_ADDR as usize); mc.map_page_identity(ENTRYOTHER_ADDR as usize);
mc.map_page_identity(entryother_start as usize); mc.map_page_identity(entryother_start as usize);
mc.map_page_p2v(PhysicalAddress(0)); mc.map_page_p2v(PhysAddr(0));
copy_entryother(); copy_entryother();
let args = unsafe{ &mut *(ENTRYOTHER_ADDR as *mut EntryArgs).offset(-1) }; let args = unsafe{ &mut *(ENTRYOTHER_ADDR as *mut EntryArgs).offset(-1) };

View File

@ -1,3 +1,4 @@
#![allow(dead_code)]
pub const MAX_CPU_NUM: usize = 8; pub const MAX_CPU_NUM: usize = 8;
// Copy from Redox consts.rs: // Copy from Redox consts.rs:

View File

@ -21,7 +21,7 @@ impl VgaWriter {
VgaWriter { VgaWriter {
column_position: 0, column_position: 0,
color: Color::LightGray, color: Color::LightGray,
buffer: buffer, buffer,
} }
} }

View File

@ -1,27 +1,27 @@
use consts::{KERNEL_OFFSET, KERNEL_SIZE}; use consts::{KERNEL_OFFSET, KERNEL_SIZE};
pub use x86_64::{PhysicalAddress}; pub use x86_64::PhysicalAddress as PhysAddr;
pub type VirtualAddress = usize; pub type VirtAddr = usize;
pub trait FromToVirtualAddress { pub trait FromToVirtualAddress {
fn get(&self) -> usize; fn get(&self) -> usize;
fn to_identity_virtual(&self) -> VirtualAddress; fn to_identity_virtual(&self) -> VirtAddr;
fn to_kernel_virtual(&self) -> VirtualAddress; fn to_kernel_virtual(&self) -> VirtAddr;
fn from_kernel_virtual(addr: VirtualAddress) -> Self; fn from_kernel_virtual(addr: VirtAddr) -> Self;
} }
impl FromToVirtualAddress for PhysicalAddress { impl FromToVirtualAddress for PhysAddr {
fn get(&self) -> usize { fn get(&self) -> usize {
self.0 as usize self.0 as usize
} }
fn to_identity_virtual(&self) -> VirtualAddress { fn to_identity_virtual(&self) -> VirtAddr {
self.0 as usize self.0 as usize
} }
fn to_kernel_virtual(&self) -> VirtualAddress { fn to_kernel_virtual(&self) -> VirtAddr {
assert!((self.0 as usize) < KERNEL_SIZE); assert!((self.0 as usize) < KERNEL_SIZE);
self.0 as usize + KERNEL_OFFSET self.0 as usize + KERNEL_OFFSET
} }
fn from_kernel_virtual(addr: VirtualAddress) -> Self { fn from_kernel_virtual(addr: VirtAddr) -> Self {
assert!(addr >= KERNEL_OFFSET && addr < KERNEL_OFFSET + KERNEL_SIZE); assert!(addr >= KERNEL_OFFSET && addr < KERNEL_OFFSET + KERNEL_SIZE);
PhysicalAddress((addr - KERNEL_OFFSET) as u64) PhysAddr((addr - KERNEL_OFFSET) as u64)
} }
} }

View File

@ -1,4 +1,4 @@
use memory::{Frame, FrameAllocator, PhysicalAddress}; use memory::{Frame, FrameAllocator, PhysAddr};
use multiboot2::{MemoryAreaIter, MemoryArea}; use multiboot2::{MemoryAreaIter, MemoryArea};
pub struct AreaFrameAllocator { pub struct AreaFrameAllocator {
@ -21,7 +21,7 @@ impl FrameAllocator for AreaFrameAllocator {
// the last frame of the current area // the last frame of the current area
let current_area_last_frame = { let current_area_last_frame = {
let address = area.base_addr + area.length - 1; let address = area.base_addr + area.length - 1;
Frame::containing_address(address as usize) Frame::of_addr(address as usize)
}; };
if frame > current_area_last_frame { if frame > current_area_last_frame {
@ -55,18 +55,18 @@ impl FrameAllocator for AreaFrameAllocator {
} }
impl AreaFrameAllocator { impl AreaFrameAllocator {
pub fn new(kernel_start: PhysicalAddress, kernel_end: PhysicalAddress, pub fn new(kernel_start: PhysAddr, kernel_end: PhysAddr,
multiboot_start: PhysicalAddress, multiboot_end: PhysicalAddress, multiboot_start: PhysAddr, multiboot_end: PhysAddr,
memory_areas: MemoryAreaIter) -> AreaFrameAllocator memory_areas: MemoryAreaIter) -> AreaFrameAllocator
{ {
let mut allocator = AreaFrameAllocator { let mut allocator = AreaFrameAllocator {
next_free_frame: Frame::containing_address(0), next_free_frame: Frame::of_addr(0),
current_area: None, current_area: None,
areas: memory_areas, areas: memory_areas,
kernel_start: Frame::containing_address(kernel_start.0 as usize), kernel_start: Frame::of_addr(kernel_start.0 as usize),
kernel_end: Frame::containing_address(kernel_end.0 as usize), kernel_end: Frame::of_addr(kernel_end.0 as usize),
multiboot_start: Frame::containing_address(multiboot_start.0 as usize), multiboot_start: Frame::of_addr(multiboot_start.0 as usize),
multiboot_end: Frame::containing_address(multiboot_end.0 as usize), multiboot_end: Frame::of_addr(multiboot_end.0 as usize),
}; };
allocator.choose_next_area(); allocator.choose_next_area();
allocator allocator
@ -75,11 +75,11 @@ impl AreaFrameAllocator {
fn choose_next_area(&mut self) { fn choose_next_area(&mut self) {
self.current_area = self.areas.clone().filter(|area| { self.current_area = self.areas.clone().filter(|area| {
let address = area.base_addr + area.length - 1; let address = area.base_addr + area.length - 1;
Frame::containing_address(address as usize) >= self.next_free_frame Frame::of_addr(address as usize) >= self.next_free_frame
}).min_by_key(|area| area.base_addr); }).min_by_key(|area| area.base_addr);
if let Some(area) = self.current_area { if let Some(area) = self.current_area {
let start_frame = Frame::containing_address(area.base_addr as usize); let start_frame = Frame::of_addr(area.base_addr as usize);
if self.next_free_frame < start_frame { if self.next_free_frame < start_frame {
self.next_free_frame = start_frame; self.next_free_frame = start_frame;
} }

View File

@ -1,4 +1,4 @@
use super::address::PhysicalAddress; use super::address::PhysAddr;
pub const PAGE_SIZE: usize = 4096; pub const PAGE_SIZE: usize = 4096;
@ -8,12 +8,12 @@ pub struct Frame {
} }
impl Frame { impl Frame {
pub fn containing_address(address: usize) -> Frame { pub fn of_addr(address: usize) -> Frame {
Frame{ number: address / PAGE_SIZE } Frame{ number: address / PAGE_SIZE }
} }
//TODO: Set private //TODO: Set private
pub fn start_address(&self) -> PhysicalAddress { pub fn start_address(&self) -> PhysAddr {
PhysicalAddress((self.number * PAGE_SIZE) as u64) PhysAddr((self.number * PAGE_SIZE) as u64)
} }
pub fn clone(&self) -> Frame { pub fn clone(&self) -> Frame {
@ -22,8 +22,8 @@ impl Frame {
//TODO: Set private //TODO: Set private
pub fn range_inclusive(start: Frame, end: Frame) -> FrameIter { pub fn range_inclusive(start: Frame, end: Frame) -> FrameIter {
FrameIter { FrameIter {
start: start, start,
end: end, end,
} }
} }
} }

View File

@ -22,13 +22,13 @@ pub fn init(boot_info: &BootInformation) -> MemoryController {
let elf_sections_tag = boot_info.elf_sections_tag().expect( let elf_sections_tag = boot_info.elf_sections_tag().expect(
"Elf sections tag required"); "Elf sections tag required");
let kernel_start = PhysicalAddress(elf_sections_tag.sections() let kernel_start = PhysAddr(elf_sections_tag.sections()
.filter(|s| s.is_allocated()).map(|s| s.start_address()).min().unwrap() as u64); .filter(|s| s.is_allocated()).map(|s| s.start_address()).min().unwrap() as u64);
let kernel_end = PhysicalAddress::from_kernel_virtual(elf_sections_tag.sections() let kernel_end = PhysAddr::from_kernel_virtual(elf_sections_tag.sections()
.filter(|s| s.is_allocated()).map(|s| s.end_address()).max().unwrap()); .filter(|s| s.is_allocated()).map(|s| s.end_address()).max().unwrap());
let boot_info_start = PhysicalAddress(boot_info.start_address() as u64); let boot_info_start = PhysAddr(boot_info.start_address() as u64);
let boot_info_end = PhysicalAddress(boot_info.end_address() as u64); let boot_info_end = PhysAddr(boot_info.end_address() as u64);
println!("kernel start: {:#x}, kernel end: {:#x}", println!("kernel start: {:#x}, kernel end: {:#x}",
kernel_start, kernel_start,
@ -51,8 +51,8 @@ pub fn init(boot_info: &BootInformation) -> MemoryController {
use self::paging::Page; use self::paging::Page;
use consts::{KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE}; use consts::{KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE};
let heap_start_page = Page::containing_address(KERNEL_HEAP_OFFSET); let heap_start_page = Page::of_addr(KERNEL_HEAP_OFFSET);
let heap_end_page = Page::containing_address(KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE-1); let heap_end_page = Page::of_addr(KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE-1);
for page in Page::range_inclusive(heap_start_page, heap_end_page) { for page in Page::range_inclusive(heap_start_page, heap_end_page) {
active_table.map(page, EntryFlags::WRITABLE, &mut frame_allocator); active_table.map(page, EntryFlags::WRITABLE, &mut frame_allocator);
@ -78,7 +78,7 @@ pub fn remap_the_kernel<A>(allocator: &mut A, boot_info: &BootInformation)
-> (ActivePageTable, Stack) -> (ActivePageTable, Stack)
where A: FrameAllocator where A: FrameAllocator
{ {
let mut temporary_page = TemporaryPage::new(Page::containing_address(0xcafebabe), allocator); let mut temporary_page = TemporaryPage::new(Page::of_addr(0xcafebabe), allocator);
let mut active_table = unsafe { ActivePageTable::new() }; let mut active_table = unsafe { ActivePageTable::new() };
let mut new_table = { let mut new_table = {
@ -95,8 +95,7 @@ pub fn remap_the_kernel<A>(allocator: &mut A, boot_info: &BootInformation)
// section is not loaded to memory // section is not loaded to memory
continue; continue;
} }
assert!(section.start_address() % PAGE_SIZE == 0, assert_eq!(section.start_address() % PAGE_SIZE, 0, "sections need to be page aligned");
"sections need to be page aligned");
println!("mapping section at addr: {:#x}, size: {:#x}", println!("mapping section at addr: {:#x}, size: {:#x}",
section.addr, section.size); section.addr, section.size);
@ -104,7 +103,7 @@ pub fn remap_the_kernel<A>(allocator: &mut A, boot_info: &BootInformation)
let flags = EntryFlags::from_elf_section_flags(section); let flags = EntryFlags::from_elf_section_flags(section);
fn to_physical_frame(addr: usize) -> Frame { fn to_physical_frame(addr: usize) -> Frame {
Frame::containing_address( Frame::of_addr(
if addr < KERNEL_OFFSET { addr } if addr < KERNEL_OFFSET { addr }
else { addr - KERNEL_OFFSET }) else { addr - KERNEL_OFFSET })
} }
@ -113,18 +112,18 @@ pub fn remap_the_kernel<A>(allocator: &mut A, boot_info: &BootInformation)
let end_frame = to_physical_frame(section.end_address() - 1); let end_frame = to_physical_frame(section.end_address() - 1);
for frame in Frame::range_inclusive(start_frame, end_frame) { for frame in Frame::range_inclusive(start_frame, end_frame) {
let page = Page::containing_address(frame.start_address().to_kernel_virtual()); let page = Page::of_addr(frame.start_address().to_kernel_virtual());
mapper.map_to(page, frame, flags, allocator); mapper.map_to(page, frame, flags, allocator);
} }
} }
// identity map the VGA text buffer // identity map the VGA text buffer
let vga_buffer_frame = Frame::containing_address(0xb8000); let vga_buffer_frame = Frame::of_addr(0xb8000);
mapper.identity_map(vga_buffer_frame, EntryFlags::WRITABLE, allocator); mapper.identity_map(vga_buffer_frame, EntryFlags::WRITABLE, allocator);
// identity map the multiboot info structure // identity map the multiboot info structure
let multiboot_start = Frame::containing_address(boot_info.start_address()); let multiboot_start = Frame::of_addr(boot_info.start_address());
let multiboot_end = Frame::containing_address(boot_info.end_address() - 1); let multiboot_end = Frame::of_addr(boot_info.end_address() - 1);
for frame in Frame::range_inclusive(multiboot_start, multiboot_end) { for frame in Frame::range_inclusive(multiboot_start, multiboot_end) {
mapper.identity_map(frame, EntryFlags::PRESENT, allocator); mapper.identity_map(frame, EntryFlags::PRESENT, allocator);
} }
@ -135,8 +134,8 @@ pub fn remap_the_kernel<A>(allocator: &mut A, boot_info: &BootInformation)
// turn the stack bottom into a guard page // turn the stack bottom into a guard page
extern { fn stack_bottom(); } extern { fn stack_bottom(); }
let stack_bottom = PhysicalAddress(stack_bottom as u64).to_kernel_virtual(); let stack_bottom = PhysAddr(stack_bottom as u64).to_kernel_virtual();
let stack_bottom_page = Page::containing_address(stack_bottom); let stack_bottom_page = Page::of_addr(stack_bottom);
active_table.unmap(stack_bottom_page, allocator); active_table.unmap(stack_bottom_page, allocator);
let kernel_stack = Stack::new(stack_bottom + 8 * PAGE_SIZE, stack_bottom + 1 * PAGE_SIZE); let kernel_stack = Stack::new(stack_bottom + 8 * PAGE_SIZE, stack_bottom + 1 * PAGE_SIZE);
println!("guard page at {:#x}", stack_bottom_page.start_address()); println!("guard page at {:#x}", stack_bottom_page.start_address());
@ -161,13 +160,13 @@ impl MemoryController {
size_in_pages) size_in_pages)
} }
pub fn map_page_identity(&mut self, addr: usize) { pub fn map_page_identity(&mut self, addr: usize) {
let frame = Frame::containing_address(addr); let frame = Frame::of_addr(addr);
let flags = EntryFlags::WRITABLE; let flags = EntryFlags::WRITABLE;
self.active_table.identity_map(frame, flags, &mut self.frame_allocator); self.active_table.identity_map(frame, flags, &mut self.frame_allocator);
} }
pub fn map_page_p2v(&mut self, addr: PhysicalAddress) { pub fn map_page_p2v(&mut self, addr: PhysAddr) {
let page = Page::containing_address(addr.to_kernel_virtual()); let page = Page::of_addr(addr.to_kernel_virtual());
let frame = Frame::containing_address(addr.get()); let frame = Frame::of_addr(addr.get());
let flags = EntryFlags::WRITABLE; let flags = EntryFlags::WRITABLE;
self.active_table.map_to(page, frame, flags, &mut self.frame_allocator); self.active_table.map_to(page, frame, flags, &mut self.frame_allocator);
} }

View File

@ -64,8 +64,8 @@ impl Stack {
pub(super) fn new(top: usize, bottom: usize) -> Stack { pub(super) fn new(top: usize, bottom: usize) -> Stack {
assert!(top > bottom); assert!(top > bottom);
Stack { Stack {
top: top, top,
bottom: bottom, bottom,
} }
} }