1
0
mirror of https://github.com/sgmarz/osblog.git synced 2024-11-23 18:06:20 +04:00

Created a larger pager which autodetects the range for GB, MB, and KB pages

This commit is contained in:
Stephen Marz 2019-10-11 22:52:35 -04:00
parent 72ea82b224
commit 4772efe99d
3 changed files with 102 additions and 63 deletions

View File

@ -50,6 +50,9 @@ impl AllocList {
}
static mut KMEM_HEAD: *mut AllocList = null_mut();
// In the future, we will have on-demand pages
// so, we need to keep track of our memory footprint to
// see if we actually need to allocate more.
static mut KMEM_ALLOC: usize = 0;
static mut KMEM_PAGE_TABLE: *mut Table = null_mut();
@ -102,6 +105,8 @@ pub fn kmalloc(sz: usize) -> *mut u8 {
unsafe {
let size = align_val(sz, 3) + size_of::<AllocList>();
let mut head = KMEM_HEAD;
// .add() uses pointer arithmetic, so we type-cast into a u8
// so that we multiply by an absolute size (KMEM_ALLOC * PAGE_SIZE).
let tail = (KMEM_HEAD as *mut u8).add(KMEM_ALLOC * PAGE_SIZE)
as *mut AllocList;
@ -125,11 +130,15 @@ pub fn kmalloc(sz: usize) -> *mut u8 {
return head.add(1) as *mut u8;
}
else {
// If we get here, what we saw wasn't a free chunk, move on to the
// next.
head = (head as *mut u8).add((*head).get_size())
as *mut AllocList;
}
}
}
// If we get here, we didn't find any free chunks--i.e. there isn't enough memory for this.
// TODO: Add on-demand page allocation.
null_mut()
}
@ -141,6 +150,8 @@ pub fn kfree(ptr: *mut u8) {
if (*p).is_taken() {
(*p).set_free();
}
// After we free, see if we can combine adjacent free
// spots to see if we can reduce fragmentation.
coalesce();
}
}

View File

@ -97,13 +97,29 @@ pub fn id_map_range(root: &mut page::Table,
end: usize,
bits: i64)
{
let start_aligned = start & !(page::PAGE_SIZE - 1);
let num_pages = (page::align_val(end, 12)
- start_aligned)
let mut memaddr = start & !(page::PAGE_SIZE - 1);
let mut num_kb_pages = (page::align_val(end, 12)
- memaddr)
/ page::PAGE_SIZE;
for i in 0..num_pages {
let m = start_aligned + (i << 12);
page::map(root, m, m, bits);
// There are 262,144, 4096-byte chunks for a gigabyte
// page.
let num_gb_pages = num_kb_pages / 262_144;
num_kb_pages -= num_gb_pages * 262_144;
// There are 512, 4096-byte chunks for a 2 MiB page.
let num_mb_pages = num_kb_pages / 512;
num_kb_pages -= num_mb_pages * 512;
for _ in 0..num_gb_pages {
page::map(root, memaddr, memaddr, bits, 2);
memaddr += 1 << 30;
}
for _ in 0..num_mb_pages {
page::map(root, memaddr, memaddr, bits, 1);
memaddr += 1 << 21;
}
for _ in 0..num_kb_pages {
page::map(root, memaddr, memaddr, bits, 0);
memaddr += 1 << 12;
}
}
// ///////////////////////////////////
@ -126,10 +142,20 @@ extern "C" fn kinit() -> usize {
let mut root = unsafe { root_ptr.as_mut().unwrap() };
let kheap_head = kmem::get_head() as usize;
let total_pages = kmem::get_num_allocations();
println!();
println!();
unsafe {
println!("TEXT: 0x{:x} -> 0x{:x}", TEXT_START, TEXT_END);
println!("RODATA: 0x{:x} -> 0x{:x}", RODATA_START, RODATA_END);
println!("DATA: 0x{:x} -> 0x{:x}", DATA_START, DATA_END);
println!("BSS: 0x{:x} -> 0x{:x}", BSS_START, BSS_END);
println!("STACK: 0x{:x} -> 0x{:x}", KERNEL_STACK_START, KERNEL_STACK_END);
println!("HEAP: 0x{:x} -> 0x{:x}", kheap_head, kheap_head + total_pages * 4096);
}
id_map_range(
&mut root,
kheap_head,
kheap_head + (total_pages << 12),
kheap_head + total_pages * 4096,
page::EntryBits::ReadWrite.val(),
);
unsafe {
@ -179,6 +205,7 @@ extern "C" fn kinit() -> usize {
0x1000_0000,
0x1000_0000,
page::EntryBits::ReadWrite.val(),
0
);
// CLINT
@ -188,6 +215,7 @@ extern "C" fn kinit() -> usize {
0x0200_0000,
0x0200_0000,
page::EntryBits::ReadWrite.val(),
0
);
// -> MTIMECMP
page::map(
@ -195,6 +223,7 @@ extern "C" fn kinit() -> usize {
0x0200_b000,
0x0200_b000,
page::EntryBits::ReadWrite.val(),
0
);
// -> MTIME
page::map(
@ -202,6 +231,7 @@ extern "C" fn kinit() -> usize {
0x0200_c000,
0x0200_c000,
page::EntryBits::ReadWrite.val(),
0
);
// PLIC
id_map_range(
@ -217,18 +247,23 @@ extern "C" fn kinit() -> usize {
page::EntryBits::ReadWrite.val(),
);
page::print_page_allocations();
let p = 0x8005_7000 as usize;
let m = page::walk(&root, p).unwrap_or(0);
println!("Walk 0x{:x} = 0x{:x}", p, m);
// When we return from here, we'll go back to boot.S and switch into
// supervisor mode We will return the SATP register to be written when
// we return. root_u is the root page table's address. When stored into
// the SATP register, this is divided by 4 KiB (right shift by 12 bits).
// We enable the MMU by setting mode 8. Bits 63, 62, 61, 60 determine
// the mode. 0 = Bare (no translation)
// the mode.
// 0 = Bare (no translation)
// 8 = Sv39
// 9 = Sv48
unsafe {
KERNEL_TABLE = root_u;
}
(root_u >> 12) | (8 << 60)
// table / 4096 Sv39 mode
(root_u >> 12) | (8 << 60)
}
#[no_mangle]
@ -242,19 +277,11 @@ extern "C" fn kmain() {
// now, lets connect to it and see if we can initialize it and write
// to it.
let mut my_uart = uart::Uart::new(0x1000_0000);
println!();
println!();
println!("This is my operating system!");
println!(
"I'm so awesome. If you start typing something, I'll show \
you what you typed!"
);
// Create a new scope so that we can test the global allocator and
// deallocator
{
// We have the global allocator, so let's see if that works!
let k: Box<u32> = Box::new(100);
let k = Box::<u32>::new(100);
println!("Boxed value = {}", *k);
kmem::print_table();
// The following comes from the Rust documentation:
@ -266,7 +293,8 @@ extern "C" fn kmain() {
}
// Now see if we can read stuff:
// Usually we can use #[test] modules in Rust, but it would convolute
// the task at hand. So, we'll just add testing snippets.
// the task at hand, and it requires us to create the testing harness
// since the embedded testing system is part of the "std" library.
loop {
if let Some(c) = my_uart.get() {
match c {

View File

@ -265,10 +265,11 @@ pub fn print_page_allocations() {
num += 1;
if (*beg).is_last() {
let end = beg as usize;
let memaddr =
ALLOC_START
+ (end - HEAP_START)
* PAGE_SIZE + 0xfff;
let memaddr = ALLOC_START
+ (end
- HEAP_START)
* PAGE_SIZE
+ PAGE_SIZE - 1;
print!(
"0x{:x}: {:>3} page(s)",
memaddr,
@ -283,8 +284,16 @@ pub fn print_page_allocations() {
beg = beg.add(1);
}
println!("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
println!("Allocated: {:>5} pages ({:>9} bytes).", num, num * PAGE_SIZE);
println!("Free : {:>5} pages ({:>9} bytes).", num_pages-num, (num_pages-num) * PAGE_SIZE);
println!(
"Allocated: {:>5} pages ({:>9} bytes).",
num,
num * PAGE_SIZE
);
println!(
"Free : {:>5} pages ({:>9} bytes).",
num_pages - num,
(num_pages - num) * PAGE_SIZE
);
println!();
}
}
@ -389,7 +398,7 @@ impl Table {
/// The bits MUST include one or more of the following:
/// Read, Write, Execute
/// The valid bit automatically gets added.
pub fn map(root: &mut Table, vaddr: usize, paddr: usize, bits: i64) {
pub fn map(root: &mut Table, vaddr: usize, paddr: usize, bits: i64, level: usize) {
// Make sure that Read, Write, or Execute have been provided
// otherwise, we'll leak memory and always create a page fault.
assert!(bits & 0xe != 0);
@ -423,11 +432,11 @@ pub fn map(root: &mut Table, vaddr: usize, paddr: usize, bits: i64) {
// Now, we're going to traverse the page table and set the bits
// properly. We expect the root to be valid, however we're required to
// create anything beyond the root.
// In Rust, we create an iterator using the .. operator.
// In Rust, we create a range iterator using the .. operator.
// The .rev() will reverse the iteration since we need to start with
// VPN[2] The .. operator is inclusive on start but exclusive on end.
// So, (0..2) will iterate 0 and 1.
for i in (0..2).rev() {
for i in (level..2).rev() {
if !v.is_valid() {
// Allocate a page
let page = zalloc(1);
@ -446,12 +455,13 @@ pub fn map(root: &mut Table, vaddr: usize, paddr: usize, bits: i64) {
// our entry.
// The entry structure is Figure 4.18 in the RISC-V Privileged
// Specification
let entry: i64 = (ppn[2] << 28) as i64 | // PPN[2] = [53:28]
(ppn[1] << 19) as i64 | // PPN[1] = [27:19]
(ppn[0] << 10) as i64 | // PPN[0] = [18:10]
bits | // Specified bits, such as User, Read, Write, etc
EntryBits::Valid.val(); // Valid bit
// Set the entry. V should be set to the correct pointer by the loop above.
let entry = (ppn[2] << 28) as i64 | // PPN[2] = [53:28]
(ppn[1] << 19) as i64 | // PPN[1] = [27:19]
(ppn[0] << 10) as i64 | // PPN[0] = [18:10]
bits | // Specified bits, such as User, Read, Write, etc
EntryBits::Valid.val(); // Valid bit
// Set the entry. V should be set to the correct pointer by the loop
// above.
v.set_entry(entry);
}
@ -469,6 +479,7 @@ pub fn unmap(root: &mut Table) {
// This is a valid entry, so drill down and free.
let memaddr_lv1 = (entry_lv2.get_entry() & !0x3ff) << 2;
let table_lv1 = unsafe {
// Make table_lv1 a mutable reference instead of a pointer.
(memaddr_lv1 as *mut Table).as_mut().unwrap()
};
for lv1 in 0..Table::len() {
@ -503,45 +514,34 @@ pub fn walk(root: &Table, vaddr: usize) -> Option<usize> {
(vaddr >> 30) & 0x1ff,
];
// The last 12 bits (0xfff) is not translated by
// the MMU, so we have to copy it from the vaddr
// to the physical address.
let pgoff = vaddr & 0xfff;
let mut v = &root.entries[vpn[2]];
for i in (0..2).rev() {
for i in (0..=2).rev() {
if v.is_invalid() {
// This is an invalid entry, page fault.
return None;
break;
}
else if v.is_leaf() {
// According to RISC-V, a leaf can at any level, however
// our page allocator doesn't do that. So, if we get
// a leaf here, something is wrong.
return None;
// According to RISC-V, a leaf can be at any level.
// The offset mask masks off the PPN. Each PPN is 9
// bits and they start at bit #12. So, our formula
// 12 + i * 9
let off_mask = (1 << (12 + i * 9)) - 1;
let vaddr_pgoff = vaddr & off_mask;
let addr = ((v.get_entry() << 2) as usize) & !off_mask;
return Some(addr | vaddr_pgoff);
}
// Set v to the next entry which is pointed to by this
// entry. However, the address was shifted right by 2 places
// when stored in the page table entry, so we shift it left
// to get it back into place.
let entry = ((v.get_entry() & !0x3ff) << 2) as *const Entry;
v = unsafe { entry.add(vpn[i]).as_ref().unwrap() };
}
// If we get here, we should be at level 0 since we haven't returned
// yet. If we got a page fault earlier, we would've short circuited
// by returning None early.
// I don't like mixing return with the expression-type returns, but it
// keeps this code cleaner.
if v.is_invalid() || v.is_branch() {
// If we get here, that means the page is either invalid or not
// a leaf, which both are cause for a page fault.
None
}
else {
// The physical address starts at bit 10 in the entry, however
// it is supposed to start at bit 12, so we shift it up and then
// add the page offset.
let addr = ((v.get_entry() & !0x3ff) << 2) as usize;
Some(addr | pgoff)
// We do i - 1 here, however we should get None or Some() above
// before we do 0 - 1 = -1.
v = unsafe { entry.add(vpn[i - 1]).as_ref().unwrap() };
}
// If we get here, we've exhausted all valid tables and haven't
// found a leaf.
None
}