1
0
mirror of https://github.com/sgmarz/osblog.git synced 2024-11-24 02:16:19 +04:00

Ran rustfmt and removed dead code

This commit is contained in:
Stephen Marz 2019-10-09 20:24:19 -04:00
parent 8ad7d2c24c
commit 7a6ba92732
3 changed files with 358 additions and 206 deletions

View File

@ -3,12 +3,11 @@
// Stephen Marz // Stephen Marz
// 7 October 2019 // 7 October 2019
use crate::page::{Table, align_val, zalloc, dealloc, PAGE_SIZE}; use crate::page::{align_val, zalloc, Table, PAGE_SIZE};
use core::{mem::size_of, ptr::null_mut}; use core::{mem::size_of, ptr::null_mut};
#[repr(usize)] #[repr(usize)]
enum AllocListFlags { enum AllocListFlags {
None = 0,
Taken = 1 << 63, Taken = 1 << 63,
} }
impl AllocListFlags { impl AllocListFlags {
@ -24,15 +23,19 @@ impl AllocList {
pub fn is_taken(&self) -> bool { pub fn is_taken(&self) -> bool {
self.flags_size & AllocListFlags::Taken.val() != 0 self.flags_size & AllocListFlags::Taken.val() != 0
} }
pub fn is_free(&self) -> bool { pub fn is_free(&self) -> bool {
!self.is_taken() !self.is_taken()
} }
pub fn set_taken(&mut self) { pub fn set_taken(&mut self) {
self.flags_size |= AllocListFlags::Taken.val(); self.flags_size |= AllocListFlags::Taken.val();
} }
pub fn set_free(&mut self) { pub fn set_free(&mut self) {
self.flags_size &= !AllocListFlags::Taken.val(); self.flags_size &= !AllocListFlags::Taken.val();
} }
pub fn set_size(&mut self, sz: usize) { pub fn set_size(&mut self, sz: usize) {
let k = self.is_taken(); let k = self.is_taken();
self.flags_size = sz & !AllocListFlags::Taken.val(); self.flags_size = sz & !AllocListFlags::Taken.val();
@ -40,6 +43,7 @@ impl AllocList {
self.flags_size |= AllocListFlags::Taken.val(); self.flags_size |= AllocListFlags::Taken.val();
} }
} }
pub fn get_size(&self) -> usize { pub fn get_size(&self) -> usize {
self.flags_size & !AllocListFlags::Taken.val() self.flags_size & !AllocListFlags::Taken.val()
} }
@ -83,7 +87,8 @@ pub fn kmalloc(sz: usize) -> *mut u8 {
unsafe { unsafe {
let size = align_val(sz, 3) + size_of::<AllocList>(); let size = align_val(sz, 3) + size_of::<AllocList>();
let mut head = KMEM_HEAD; let mut head = KMEM_HEAD;
let tail = (KMEM_HEAD as *mut u8).add(KMEM_ALLOC * PAGE_SIZE) as *mut AllocList; let tail = (KMEM_HEAD as *mut u8).add(KMEM_ALLOC * PAGE_SIZE)
as *mut AllocList;
while head < tail { while head < tail {
if (*head).is_free() && size <= (*head).get_size() { if (*head).is_free() && size <= (*head).get_size() {
@ -91,7 +96,8 @@ pub fn kmalloc(sz: usize) -> *mut u8 {
let rem = chunk_size - size; let rem = chunk_size - size;
(*head).set_taken(); (*head).set_taken();
if rem > size_of::<AllocList>() { if rem > size_of::<AllocList>() {
let next = (head as *mut u8).add(size) as *mut AllocList; let next = (head as *mut u8).add(size)
as *mut AllocList;
// There is space remaining here. // There is space remaining here.
(*next).set_free(); (*next).set_free();
(*next).set_size(rem); (*next).set_size(rem);
@ -104,7 +110,8 @@ pub fn kmalloc(sz: usize) -> *mut u8 {
return head.add(1) as *mut u8; return head.add(1) as *mut u8;
} }
else { else {
head = (head as *mut u8).add((*head).get_size()) as *mut AllocList; head = (head as *mut u8).add((*head).get_size())
as *mut AllocList;
} }
} }
} }
@ -128,10 +135,12 @@ pub fn kfree(ptr: *mut u8) {
pub fn coalesce() { pub fn coalesce() {
unsafe { unsafe {
let mut head = KMEM_HEAD; let mut head = KMEM_HEAD;
let tail = (KMEM_HEAD as *mut u8).add(KMEM_ALLOC * PAGE_SIZE) as *mut AllocList; let tail = (KMEM_HEAD as *mut u8).add(KMEM_ALLOC * PAGE_SIZE)
as *mut AllocList;
while head < tail { while head < tail {
let next = (head as *mut u8).add((*head).get_size()) as *mut AllocList; let next = (head as *mut u8).add((*head).get_size())
as *mut AllocList;
if (*head).get_size() == 0 { if (*head).get_size() == 0 {
break; break;
} }
@ -139,10 +148,15 @@ pub fn coalesce() {
break; break;
} }
else if (*head).is_free() && (*next).is_free() { else if (*head).is_free() && (*next).is_free() {
(*head).set_size((*head).get_size() + (*next).get_size()); (*head).set_size(
(*head).get_size()
+ (*next).get_size(),
);
} }
// If we get here, we might've moved. Recalculate new head. // If we get here, we might've moved. Recalculate new
head = (head as *mut u8).add((*head).get_size()) as *mut AllocList; // head.
head = (head as *mut u8).add((*head).get_size())
as *mut AllocList;
} }
} }
} }
@ -151,10 +165,17 @@ pub fn coalesce() {
pub fn print_table() { pub fn print_table() {
unsafe { unsafe {
let mut head = KMEM_HEAD; let mut head = KMEM_HEAD;
let tail = (KMEM_HEAD as *mut u8).add(KMEM_ALLOC * PAGE_SIZE) as *mut AllocList; let tail = (KMEM_HEAD as *mut u8).add(KMEM_ALLOC * PAGE_SIZE)
as *mut AllocList;
while head < tail { while head < tail {
println!("{:p}: Length = {:<10} Taken = {}", head, (*head).get_size(), (*head).is_taken()); println!(
head = (head as *mut u8).add((*head).get_size()) as *mut AllocList; "{:p}: Length = {:<10} Taken = {}",
head,
(*head).get_size(),
(*head).is_taken()
);
head = (head as *mut u8).add((*head).get_size())
as *mut AllocList;
} }
} }
} }

View File

@ -2,7 +2,12 @@
// Stephen Marz // Stephen Marz
// 21 Sep 2019 // 21 Sep 2019
#![no_std] #![no_std]
#![feature(panic_info_message,asm,allocator_api,alloc_error_handler,alloc_prelude,const_raw_ptr_to_usize_cast)] #![feature(panic_info_message,
asm,
allocator_api,
alloc_error_handler,
alloc_prelude,
const_raw_ptr_to_usize_cast)]
#[macro_use] #[macro_use]
extern crate alloc; extern crate alloc;
@ -57,8 +62,7 @@ fn panic(info: &core::panic::PanicInfo) -> ! {
abort(); abort();
} }
#[no_mangle] #[no_mangle]
extern "C" extern "C" fn abort() -> ! {
fn abort() -> ! {
loop { loop {
unsafe { unsafe {
asm!("wfi"::::"volatile"); asm!("wfi"::::"volatile");
@ -72,8 +76,7 @@ fn abort() -> ! {
// const STR_Y: &str = "\x1b[38;2;79;221;13m✓\x1b[m"; // const STR_Y: &str = "\x1b[38;2;79;221;13m✓\x1b[m";
// const STR_N: &str = "\x1b[38;2;221;41;13m✘\x1b[m"; // const STR_N: &str = "\x1b[38;2;221;41;13m✘\x1b[m";
extern "C" extern "C" {
{
static TEXT_START: usize; static TEXT_START: usize;
static TEXT_END: usize; static TEXT_END: usize;
static DATA_START: usize; static DATA_START: usize;
@ -82,8 +85,6 @@ extern "C"
static RODATA_END: usize; static RODATA_END: usize;
static BSS_START: usize; static BSS_START: usize;
static BSS_END: usize; static BSS_END: usize;
static HEAP_START: usize;
static HEAP_SIZE: usize;
static KERNEL_STACK_START: usize; static KERNEL_STACK_START: usize;
static KERNEL_STACK_END: usize; static KERNEL_STACK_END: usize;
static mut KERNEL_TABLE: usize; static mut KERNEL_TABLE: usize;
@ -91,8 +92,14 @@ extern "C"
/// Identity map range /// Identity map range
/// Takes a contiguous allocation of memory and maps it using PAGE_SIZE /// Takes a contiguous allocation of memory and maps it using PAGE_SIZE
/// This assumes that start <= end /// This assumes that start <= end
pub fn id_map_range(root: &mut page::Table, start: usize, end: usize, bits: i64) { pub fn id_map_range(root: &mut page::Table,
let num_pages = (page::align_val(end, 12) - (start & !(page::PAGE_SIZE-1))) / page::PAGE_SIZE; start: usize,
end: usize,
bits: i64)
{
let num_pages = (page::align_val(end, 12)
- (start & !(page::PAGE_SIZE - 1)))
/ page::PAGE_SIZE;
for i in 0..num_pages { for i in 0..num_pages {
let m = (start & !(page::PAGE_SIZE - 1)) + (i << 12); let m = (start & !(page::PAGE_SIZE - 1)) + (i << 12);
page::map(root, m, m, bits); page::map(root, m, m, bits);
@ -102,8 +109,7 @@ pub fn id_map_range(root: &mut page::Table, start: usize, end: usize, bits: i64)
// / ENTRY POINT // / ENTRY POINT
// /////////////////////////////////// // ///////////////////////////////////
#[no_mangle] #[no_mangle]
extern "C" extern "C" fn kinit() -> usize {
fn kinit() -> usize {
// We created kinit, which runs in super-duper mode // We created kinit, which runs in super-duper mode
// 3 called "machine mode". // 3 called "machine mode".
// The job of kinit() is to get us into supervisor mode // The job of kinit() is to get us into supervisor mode
@ -118,53 +124,123 @@ fn kinit() -> usize {
let mut root = unsafe { root_ptr.as_mut().unwrap() }; let mut root = unsafe { root_ptr.as_mut().unwrap() };
let kheap_head = kmem::get_head() as usize; let kheap_head = kmem::get_head() as usize;
let total_pages = kmem::get_num_allocations(); let total_pages = kmem::get_num_allocations();
id_map_range(&mut root, kheap_head, kheap_head + (total_pages << 12), page::EntryBits::ReadWrite.val()); id_map_range(
&mut root,
kheap_head,
kheap_head + (total_pages << 12),
page::EntryBits::ReadWrite.val(),
);
unsafe { unsafe {
// Map executable section // Map executable section
id_map_range(&mut root, TEXT_START, TEXT_END, page::EntryBits::ReadExecute.val()); id_map_range(
&mut root,
TEXT_START,
TEXT_END,
page::EntryBits::ReadExecute.val(),
);
// Map rodata section // Map rodata section
// We put the ROdata section into the text section, so they can potentially overlap // We put the ROdata section into the text section, so they can
// however, we only care that it's read only. // potentially overlap however, we only care that it's read
id_map_range(&mut root, RODATA_START, RODATA_END, page::EntryBits::ReadExecute.val()); // only.
id_map_range(
&mut root,
RODATA_START,
RODATA_END,
page::EntryBits::ReadExecute.val(),
);
// Map data section // Map data section
id_map_range(&mut root, DATA_START, DATA_END, page::EntryBits::ReadWrite.val()); id_map_range(
&mut root,
DATA_START,
DATA_END,
page::EntryBits::ReadWrite.val(),
);
// Map bss section // Map bss section
id_map_range(&mut root, BSS_START, BSS_END, page::EntryBits::ReadWrite.val()); id_map_range(
&mut root,
BSS_START,
BSS_END,
page::EntryBits::ReadWrite.val(),
);
// Map kernel stack // Map kernel stack
id_map_range(&mut root, KERNEL_STACK_START, KERNEL_STACK_END, page::EntryBits::ReadWrite.val()); id_map_range(
&mut root,
KERNEL_STACK_START,
KERNEL_STACK_END,
page::EntryBits::ReadWrite.val(),
);
} }
// UART // UART
page::map(&mut root, 0x1000_0000, 0x1000_0000, page::EntryBits::ReadWrite.val()); page::map(
&mut root,
0x1000_0000,
0x1000_0000,
page::EntryBits::ReadWrite.val(),
);
// CLINT // CLINT
// -> MSIP // -> MSIP
page::map(&mut root, 0x0200_0000, 0x0200_0000, page::EntryBits::ReadWrite.val()); page::map(
&mut root,
0x0200_0000,
0x0200_0000,
page::EntryBits::ReadWrite.val(),
);
// -> MTIMECMP // -> MTIMECMP
page::map(&mut root, 0x0200_b000, 0x0200_b000, page::EntryBits::ReadWrite.val()); page::map(
&mut root,
0x0200_b000,
0x0200_b000,
page::EntryBits::ReadWrite.val(),
);
// -> MTIME // -> MTIME
page::map(&mut root, 0x0200_b000, 0x0200_b000, page::EntryBits::ReadWrite.val()); page::map(
page::map(&mut root, 0x0200_c000, 0x0200_c000, page::EntryBits::ReadWrite.val()); &mut root,
0x0200_b000,
0x0200_b000,
page::EntryBits::ReadWrite.val(),
);
page::map(
&mut root,
0x0200_c000,
0x0200_c000,
page::EntryBits::ReadWrite.val(),
);
// PLIC // PLIC
// -> Source priority // -> Source priority
page::map(&mut root, 0x0c00_0000, 0x0c00_0000, page::EntryBits::ReadWrite.val()); page::map(
&mut root,
0x0c00_0000,
0x0c00_0000,
page::EntryBits::ReadWrite.val(),
);
// -> Pending array // -> Pending array
page::map(&mut root, 0x0c00_1000, 0x0c00_1000, page::EntryBits::ReadWrite.val()); page::map(
&mut root,
0x0c00_1000,
0x0c00_1000,
page::EntryBits::ReadWrite.val(),
);
// -> Interrupt enables // -> Interrupt enables
page::map(&mut root, 0x0c00_2000, 0x0c00_2000, page::EntryBits::ReadWrite.val()); page::map(
&mut root,
0x0c00_2000,
0x0c00_2000,
page::EntryBits::ReadWrite.val(),
);
// -> Priority threshold and claim/complete registers // -> Priority threshold and claim/complete registers
for i in 0..=8 { for i in 0..=8 {
let m = 0x0c20_0000 + (i << 12); let m = 0x0c20_0000 + (i << 12);
page::map(&mut root, m, m, page::EntryBits::ReadWrite.val()); page::map(&mut root, m, m, page::EntryBits::ReadWrite.val());
} }
// When we return from here, we'll go back to boot.S and switch into supervisor mode // When we return from here, we'll go back to boot.S and switch into
// We will return the SATP register to be written when we return. // supervisor mode We will return the SATP register to be written when
// root_u is the root page table's address. When stored into the SATP register, this is // we return. root_u is the root page table's address. When stored into
// divided by 4 KiB (right shift by 12 bits). // the SATP register, this is divided by 4 KiB (right shift by 12 bits).
// We enable the MMU by setting mode 8. Bits 63, 62, 61, 60 determine the mode. // We enable the MMU by setting mode 8. Bits 63, 62, 61, 60 determine
// 0 = Bare (no translation) // the mode. 0 = Bare (no translation)
// 8 = Sv39 // 8 = Sv39
// 9 = Sv48 // 9 = Sv48
unsafe { unsafe {
@ -174,8 +250,7 @@ fn kinit() -> usize {
} }
#[no_mangle] #[no_mangle]
extern "C" extern "C" fn kmain() {
fn kmain() {
// Main should initialize all sub-systems and get // Main should initialize all sub-systems and get
// ready to start scheduling. The last thing this // ready to start scheduling. The last thing this
// should do is start the timer. // should do is start the timer.
@ -190,8 +265,12 @@ fn kmain() {
println!(); println!();
println!(); println!();
println!("This is my operating system!"); println!("This is my operating system!");
println!("I'm so awesome. If you start typing something, I'll show you what you typed!"); println!(
// Create a new scope so that we can test the global allocator and deallocator "I'm so awesome. If you start typing something, I'll show \
you what you typed!"
);
// Create a new scope so that we can test the global allocator and
// deallocator
{ {
// We have the global allocator, so let's see if that works! // We have the global allocator, so let's see if that works!
let k: Box<u32> = Box::new(100); let k: Box<u32> = Box::new(100);
@ -205,14 +284,15 @@ fn kmain() {
println!("String = {}", sparkle_heart); println!("String = {}", sparkle_heart);
} }
// Now see if we can read stuff: // Now see if we can read stuff:
// Usually we can use #[test] modules in Rust, but it would convolute the // Usually we can use #[test] modules in Rust, but it would convolute
// task at hand. So, we'll just add testing snippets. // the task at hand. So, we'll just add testing snippets.
loop { loop {
if let Some(c) = my_uart.get() { if let Some(c) = my_uart.get() {
match c { match c {
8 => { 8 => {
// This is a backspace, so we essentially have // This is a backspace, so we
// to write a space and backup again: // essentially have to write a space and
// backup again:
print!("{} {}", 8 as char, 8 as char); print!("{} {}", 8 as char, 8 as char);
}, },
10 | 13 => { 10 | 13 => {
@ -220,17 +300,24 @@ fn kmain() {
println!(); println!();
}, },
0x1b => { 0x1b => {
// Those familiar with ANSI escape sequences // Those familiar with ANSI escape
// knows that this is one of them. The next // sequences knows that this is one of
// thing we should get is the left bracket [ // them. The next thing we should get is
// These are multi-byte sequences, so we can take // the left bracket [
// a chance and get from UART ourselves. // These are multi-byte sequences, so we
// Later, we'll button this up. // can take a chance and get from UART
// ourselves. Later, we'll button this
// up.
if let Some(next_byte) = my_uart.get() { if let Some(next_byte) = my_uart.get() {
if next_byte == 91 { if next_byte == 91 {
// This is a right bracket! We're on our way! // This is a right
if let Some(b) = my_uart.get() { // bracket! We're on our
match b as char { // way!
if let Some(b) =
my_uart.get()
{
match b as char
{
'A' => { 'A' => {
println!("That's the up arrow!"); println!("That's the up arrow!");
}, },
@ -245,7 +332,7 @@ fn kmain() {
}, },
_ => { _ => {
println!("That's something else....."); println!("That's something else.....");
} },
} }
} }
} }
@ -253,7 +340,7 @@ fn kmain() {
}, },
_ => { _ => {
print!("{}", c as char); print!("{}", c as char);
} },
} }
} }
} }
@ -263,6 +350,6 @@ fn kmain() {
// / RUST MODULES // / RUST MODULES
// /////////////////////////////////// // ///////////////////////////////////
pub mod uart;
pub mod page;
pub mod kmem; pub mod kmem;
pub mod page;
pub mod uart;

View File

@ -3,8 +3,7 @@
// Stephen Marz // Stephen Marz
// 6 October 2019 // 6 October 2019
use core::ptr::null_mut; use core::{mem::size_of, ptr::null_mut};
use core::mem::size_of;
// //////////////////////////////// // ////////////////////////////////
// // Allocation routines // // Allocation routines
@ -33,7 +32,7 @@ pub const fn align_val(val: usize, order: usize) -> usize {
pub enum PageBits { pub enum PageBits {
Empty = 0, Empty = 0,
Taken = 1 << 0, Taken = 1 << 0,
Last = 1 << 1 Last = 1 << 1,
} }
impl PageBits { impl PageBits {
@ -48,7 +47,7 @@ impl PageBits {
// as well, where each 4096-byte chunk of memory has a structure // as well, where each 4096-byte chunk of memory has a structure
// associated with it. However, there structure is much larger. // associated with it. However, there structure is much larger.
pub struct Page { pub struct Page {
flags: u8 flags: u8,
} }
impl Page { impl Page {
@ -62,6 +61,7 @@ impl Page {
false false
} }
} }
// If the page is marked as being taken (allocated), then // If the page is marked as being taken (allocated), then
// this function returns true. Otherwise, it returns false. // this function returns true. Otherwise, it returns false.
pub fn is_taken(&self) -> bool { pub fn is_taken(&self) -> bool {
@ -72,43 +72,54 @@ impl Page {
false false
} }
} }
// This is the opposite of is_taken(). // This is the opposite of is_taken().
pub fn is_free(&self) -> bool { pub fn is_free(&self) -> bool {
!self.is_taken() !self.is_taken()
} }
// Clear the Page structure and all associated allocations. // Clear the Page structure and all associated allocations.
pub fn clear(&mut self) { pub fn clear(&mut self) {
self.flags = PageBits::Empty.val(); self.flags = PageBits::Empty.val();
} }
// Set a certain flag. We ran into trouble here since PageBits // Set a certain flag. We ran into trouble here since PageBits
// is an enumeration and we haven't implemented the BitOr Trait // is an enumeration and we haven't implemented the BitOr Trait
// on it. // on it.
pub fn set_flag(&mut self, flag: PageBits) { pub fn set_flag(&mut self, flag: PageBits) {
self.flags |= flag.val(); self.flags |= flag.val();
} }
pub fn clear_flag(&mut self, flag: PageBits) { pub fn clear_flag(&mut self, flag: PageBits) {
self.flags &= !(flag.val()); self.flags &= !(flag.val());
} }
} }
/// Initialize the allocation system. There are several ways that we can implement the /// Initialize the allocation system. There are several ways that we can
/// page allocator: /// implement the page allocator:
/// 1. Free list (singly linked list where it starts at the first free allocation) /// 1. Free list (singly linked list where it starts at the first free
/// 2. Bookkeeping list (structure contains a taken and length) /// allocation) 2. Bookkeeping list (structure contains a taken and length)
/// 3. Allocate one Page structure per 4096 bytes (this is what I chose) /// 3. Allocate one Page structure per 4096 bytes (this is what I chose)
/// 4. Others /// 4. Others
pub fn init() { pub fn init() {
unsafe { unsafe {
let num_pages = HEAP_SIZE / PAGE_SIZE; let num_pages = HEAP_SIZE / PAGE_SIZE;
let ptr = HEAP_START as *mut Page; let ptr = HEAP_START as *mut Page;
// Clear all pages to make sure that they aren't accidentally taken // Clear all pages to make sure that they aren't accidentally
// taken
for i in 0..num_pages { for i in 0..num_pages {
(*ptr.add(i)).clear(); (*ptr.add(i)).clear();
} }
// Determine where the actual useful memory starts. This will be after all Page // Determine where the actual useful memory starts. This will be
// structures. We also must align the ALLOC_START to a page-boundary (PAGE_SIZE = 4096). // after all Page structures. We also must align the ALLOC_START
// ALLOC_START = (HEAP_START + num_pages * size_of::<Page>() + PAGE_SIZE - 1) & !(PAGE_SIZE - 1); // to a page-boundary (PAGE_SIZE = 4096). ALLOC_START =
ALLOC_START = align_val(HEAP_START + num_pages * size_of::<Page>(), PAGE_ORDER); // (HEAP_START + num_pages * size_of::<Page>() + PAGE_SIZE - 1)
// & !(PAGE_SIZE - 1);
ALLOC_START = align_val(
HEAP_START
+ num_pages * size_of::<Page,>(),
PAGE_ORDER,
);
} }
} }
@ -118,44 +129,48 @@ pub fn alloc(pages: usize) -> *mut u8 {
// We have to find a contiguous allocation of pages // We have to find a contiguous allocation of pages
assert!(pages > 0); assert!(pages > 0);
unsafe { unsafe {
// We create a Page structure for each page on the heap. We actually might // We create a Page structure for each page on the heap. We
// have more since HEAP_SIZE moves and so does the size of our structure, but // actually might have more since HEAP_SIZE moves and so does
// we'll only waste a few bytes. // the size of our structure, but we'll only waste a few bytes.
let num_pages = HEAP_SIZE / PAGE_SIZE; let num_pages = HEAP_SIZE / PAGE_SIZE;
let ptr = HEAP_START as *mut Page; let ptr = HEAP_START as *mut Page;
for i in 0..num_pages - pages { for i in 0..num_pages - pages {
let mut found = false; let mut found = false;
// Check to see if this Page is free. If so, we have our first // Check to see if this Page is free. If so, we have our
// candidate memory address. // first candidate memory address.
if (*ptr.add(i)).is_free() { if (*ptr.add(i)).is_free() {
// It was FREE! Yay! // It was FREE! Yay!
found = true; found = true;
for j in i..i + pages { for j in i..i + pages {
// Now check to see if we have a contiguous allocation // Now check to see if we have a
// for all of the request pages. If not, we should check // contiguous allocation for all of the
// somewhere else. // request pages. If not, we should
// check somewhere else.
if (*ptr.add(j)).is_taken() { if (*ptr.add(j)).is_taken() {
found = false; found = false;
break; break;
} }
} }
} }
// We've checked to see if there are enough contiguous pages // We've checked to see if there are enough contiguous
// to form what we need. If we couldn't, found will be false, // pages to form what we need. If we couldn't, found
// otherwise it will be true, which means we've found valid // will be false, otherwise it will be true, which means
// memory we can allocate. // we've found valid memory we can allocate.
if found { if found {
for k in i..i + pages - 1 { for k in i..i + pages - 1 {
(*ptr.add(k)).set_flag(PageBits::Taken); (*ptr.add(k)).set_flag(PageBits::Taken);
} }
// The marker for the last page is PageBits::Last // The marker for the last page is
// This lets us know when we've hit the end of this particular // PageBits::Last This lets us know when we've
// allocation. // hit the end of this particular allocation.
(*ptr.add(i+pages-1)).set_flag(PageBits::Taken); (*ptr.add(i+pages-1)).set_flag(PageBits::Taken);
(*ptr.add(i+pages-1)).set_flag(PageBits::Last); (*ptr.add(i+pages-1)).set_flag(PageBits::Last);
// The Page structures themselves aren't the useful memory. Instead, // The Page structures themselves aren't the
// there is 1 Page structure per 4096 bytes starting at ALLOC_START. // useful memory. Instead, there is 1 Page
return (ALLOC_START + PAGE_SIZE * i) as *mut u8; // structure per 4096 bytes starting at
// ALLOC_START.
return (ALLOC_START + PAGE_SIZE * i)
as *mut u8;
} }
} }
} }
@ -198,9 +213,10 @@ pub fn dealloc(ptr: *mut u8) {
// Make sure we don't try to free a null pointer. // Make sure we don't try to free a null pointer.
assert!(!ptr.is_null()); assert!(!ptr.is_null());
unsafe { unsafe {
let addr = HEAP_START + (ptr as usize - ALLOC_START) / PAGE_SIZE; let addr =
// Make sure that the address makes sense. The address we calculate here HEAP_START + (ptr as usize - ALLOC_START) / PAGE_SIZE;
// is the page structure, not the HEAP address! // Make sure that the address makes sense. The address we
// calculate here is the page structure, not the HEAP address!
assert!(addr >= HEAP_START && addr < HEAP_START + HEAP_SIZE); assert!(addr >= HEAP_START && addr < HEAP_START + HEAP_SIZE);
let mut p = addr as *mut Page; let mut p = addr as *mut Page;
// Keep clearing pages until we hit the last page. // Keep clearing pages until we hit the last page.
@ -210,7 +226,11 @@ pub fn dealloc(ptr: *mut u8) {
} }
// If the following assertion fails, it is most likely // If the following assertion fails, it is most likely
// caused by a double-free. // caused by a double-free.
assert!((*p).is_last() == true, "Possible double-free detected! (Not taken found before last)"); assert!(
(*p).is_last() == true,
"Possible double-free detected! (Not taken found \
before last)"
);
// If we get here, we've taken care of all previous pages and // If we get here, we've taken care of all previous pages and
// we are on the last page. // we are on the last page.
(*p).clear(); (*p).clear();
@ -227,20 +247,33 @@ pub fn print_page_allocations() {
let alloc_beg = ALLOC_START; let alloc_beg = ALLOC_START;
let alloc_end = ALLOC_START + num_pages * PAGE_SIZE; let alloc_end = ALLOC_START + num_pages * PAGE_SIZE;
println!(); println!();
println!("PAGE ALLOCATION TABLE\nMETA: {:p} -> {:p}\nPHYS: 0x{:x} -> 0x{:x}", beg, end, alloc_beg, alloc_end); println!(
"PAGE ALLOCATION TABLE\nMETA: {:p} -> {:p}\nPHYS: \
0x{:x} -> 0x{:x}",
beg, end, alloc_beg, alloc_end
);
println!("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"); println!("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
let mut num = 0; let mut num = 0;
while beg < end { while beg < end {
if (*beg).is_taken() { if (*beg).is_taken() {
let start = beg as usize; let start = beg as usize;
let memaddr = ALLOC_START + (start - HEAP_START) * PAGE_SIZE; let memaddr = ALLOC_START
+ (start - HEAP_START)
* PAGE_SIZE;
print!("0x{:x} => ", memaddr); print!("0x{:x} => ", memaddr);
loop { loop {
num += 1; num += 1;
if (*beg).is_last() { if (*beg).is_last() {
let end = beg as usize; let end = beg as usize;
let memaddr = ALLOC_START + (end - HEAP_START) * PAGE_SIZE + 0xfff; let memaddr =
print!("0x{:x}: {:>3} page(s)", memaddr, (end - start + 1)); ALLOC_START
+ (end - HEAP_START)
* PAGE_SIZE + 0xfff;
print!(
"0x{:x}: {:>3} page(s)",
memaddr,
(end - start + 1)
);
println!("."); println!(".");
break; break;
} }
@ -255,7 +288,6 @@ pub fn print_page_allocations() {
} }
} }
// //////////////////////////////// // ////////////////////////////////
// // MMU Routines // // MMU Routines
// //////////////////////////////// // ////////////////////////////////
@ -300,7 +332,7 @@ impl EntryBits {
// since RISC-V requires that the reserved sections // since RISC-V requires that the reserved sections
// take on the most significant bit. // take on the most significant bit.
pub struct Entry { pub struct Entry {
pub entry: i64 pub entry: i64,
} }
// The Entry structure describes one of the 512 entries per table, which is // The Entry structure describes one of the 512 entries per table, which is
@ -309,21 +341,26 @@ impl Entry {
pub fn is_valid(&self) -> bool { pub fn is_valid(&self) -> bool {
self.get_entry() & EntryBits::Valid.val() != 0 self.get_entry() & EntryBits::Valid.val() != 0
} }
// The first bit (bit index #0) is the V bit for // The first bit (bit index #0) is the V bit for
// valid. // valid.
pub fn is_invalid(&self) -> bool { pub fn is_invalid(&self) -> bool {
!self.is_valid() !self.is_valid()
} }
// A leaf has one or more RWX bits set // A leaf has one or more RWX bits set
pub fn is_leaf(&self) -> bool { pub fn is_leaf(&self) -> bool {
self.get_entry() & 0xe != 0 self.get_entry() & 0xe != 0
} }
pub fn is_branch(&self) -> bool { pub fn is_branch(&self) -> bool {
!self.is_leaf() !self.is_leaf()
} }
pub fn set_entry(&mut self, entry: i64) { pub fn set_entry(&mut self, entry: i64) {
self.entry = entry; self.entry = entry;
} }
pub fn get_entry(&self) -> i64 { pub fn get_entry(&self) -> i64 {
self.entry self.entry
} }
@ -364,7 +401,7 @@ pub fn map(root: &mut Table, vaddr: usize, paddr: usize, bits: i64) {
// VPN[1] = vaddr[29:21] // VPN[1] = vaddr[29:21]
(vaddr >> 21) & 0x1ff, (vaddr >> 21) & 0x1ff,
// VPN[2] = vaddr[38:30] // VPN[2] = vaddr[38:30]
(vaddr >> 30) & 0x1ff (vaddr >> 30) & 0x1ff,
]; ];
// Just like the virtual address, extract the physical address // Just like the virtual address, extract the physical address
@ -377,32 +414,37 @@ pub fn map(root: &mut Table, vaddr: usize, paddr: usize, bits: i64) {
// PPN[1] = paddr[29:21] // PPN[1] = paddr[29:21]
(paddr >> 21) & 0x1ff, (paddr >> 21) & 0x1ff,
// PPN[2] = paddr[55:30] // PPN[2] = paddr[55:30]
(paddr >> 30) & 0x3ff_ffff (paddr >> 30) & 0x3ff_ffff,
]; ];
// We will use this as a floating reference so that we can set individual // We will use this as a floating reference so that we can set
// entries as we walk the table. // individual entries as we walk the table.
let mut v = &mut root.entries[vpn[2]]; let mut v = &mut root.entries[vpn[2]];
// Now, we're going to traverse the page table and set the bits // Now, we're going to traverse the page table and set the bits
// properly. We expect the root to be valid, however we're required to // properly. We expect the root to be valid, however we're required to
// create anything beyond the root. // create anything beyond the root.
// In Rust, we create an iterator using the .. operator. // In Rust, we create an iterator using the .. operator.
// The .rev() will reverse the iteration since we need to start with VPN[2] // The .rev() will reverse the iteration since we need to start with
// The .. operator is inclusive on start but exclusive on end. So, (0..2) // VPN[2] The .. operator is inclusive on start but exclusive on end.
// will iterate 0 and 1. // So, (0..2) will iterate 0 and 1.
for i in (0..2).rev() { for i in (0..2).rev() {
if !v.is_valid() { if !v.is_valid() {
// Allocate a page // Allocate a page
let page = zalloc(1); let page = zalloc(1);
// The page is already aligned by 4,096, so store it directly // The page is already aligned by 4,096, so store it
// The page is stored in the entry shifted right by 2 places. // directly The page is stored in the entry shifted
v.set_entry((page as i64 >> 2) | EntryBits::Valid.val()); // right by 2 places.
v.set_entry(
(page as i64 >> 2)
| EntryBits::Valid.val(),
);
} }
let entry = ((v.get_entry() & !0x3ff) << 2) as *mut Entry; let entry = ((v.get_entry() & !0x3ff) << 2) as *mut Entry;
v = unsafe { entry.add(vpn[i]).as_mut().unwrap() }; v = unsafe { entry.add(vpn[i]).as_mut().unwrap() };
} }
// When we get here, we should be at VPN[0] and v should be pointing to our // When we get here, we should be at VPN[0] and v should be pointing to
// entry. // our entry.
// The entry structure is Figure 4.18 in the RISC-V Privileged Specification // The entry structure is Figure 4.18 in the RISC-V Privileged
// Specification
let entry: i64 = (ppn[2] << 28) as i64 | // PPN[2] = [53:28] let entry: i64 = (ppn[2] << 28) as i64 | // PPN[2] = [53:28]
(ppn[1] << 19) as i64 | // PPN[1] = [27:19] (ppn[1] << 19) as i64 | // PPN[1] = [27:19]
(ppn[0] << 10) as i64 | // PPN[0] = [18:10] (ppn[0] << 10) as i64 | // PPN[0] = [18:10]
@ -424,12 +466,17 @@ pub fn unmap(root: &mut Table) {
if entry_lv2.is_valid() && entry_lv2.is_branch() { if entry_lv2.is_valid() && entry_lv2.is_branch() {
// This is a valid entry, so drill down and free. // This is a valid entry, so drill down and free.
let memaddr_lv1 = (entry_lv2.get_entry() & !0x3ff) << 2; let memaddr_lv1 = (entry_lv2.get_entry() & !0x3ff) << 2;
let table_lv1 = unsafe { (memaddr_lv1 as *mut Table).as_mut().unwrap() }; let table_lv1 = unsafe {
(memaddr_lv1 as *mut Table).as_mut().unwrap()
};
for lv1 in 0..Table::len() { for lv1 in 0..Table::len() {
let ref entry_lv1 = table_lv1.entries[lv1]; let ref entry_lv1 = table_lv1.entries[lv1];
if entry_lv1.is_valid() && entry_lv1.is_branch() { if entry_lv1.is_valid() && entry_lv1.is_branch()
let memaddr_lv0 = (entry_lv1.get_entry() & !0x3ff) << 2; {
// The next level is level 0, which cannot have branches, therefore, let memaddr_lv0 = (entry_lv1.get_entry()
& !0x3ff) << 2;
// The next level is level 0, which
// cannot have branches, therefore,
// we free here. // we free here.
dealloc(memaddr_lv0 as *mut u8); dealloc(memaddr_lv0 as *mut u8);
} }
@ -451,7 +498,7 @@ pub fn walk(root: &Table, vaddr: usize) -> Option<usize> {
// VPN[1] = vaddr[29:21] // VPN[1] = vaddr[29:21]
(vaddr >> 21) & 0x1ff, (vaddr >> 21) & 0x1ff,
// VPN[2] = vaddr[38:30] // VPN[2] = vaddr[38:30]
(vaddr >> 30) & 0x1ff (vaddr >> 30) & 0x1ff,
]; ];
// The last 12 bits (0xfff) is not translated by // The last 12 bits (0xfff) is not translated by
@ -484,18 +531,15 @@ pub fn walk(root: &Table, vaddr: usize) -> Option<usize> {
// I don't like mixing return with the expression-type returns, but it // I don't like mixing return with the expression-type returns, but it
// keeps this code cleaner. // keeps this code cleaner.
if v.is_invalid() || v.is_branch() { if v.is_invalid() || v.is_branch() {
// If we get here, that means the page is either invalid or not a leaf, // If we get here, that means the page is either invalid or not
// which both are cause for a page fault. // a leaf, which both are cause for a page fault.
None None
} }
else { else {
// The physical address starts at bit 10 in the entry, however it is // The physical address starts at bit 10 in the entry, however
// supposed to start at bit 12, so we shift it up and then add the // it is supposed to start at bit 12, so we shift it up and then
// page offset. // add the page offset.
let addr = ((v.get_entry() & !0x3ff) << 2) as usize; let addr = ((v.get_entry() & !0x3ff) << 2) as usize;
Some(addr | pgoff) Some(addr | pgoff)
} }
} }