1
0
mirror of https://github.com/sgmarz/osblog.git synced 2024-11-24 02:16:19 +04:00

Created a larger pager which autodetects the range for GB, MB, and KB pages

This commit is contained in:
Stephen Marz 2019-10-11 22:52:35 -04:00
parent 72ea82b224
commit 4772efe99d
3 changed files with 102 additions and 63 deletions

View File

@ -50,6 +50,9 @@ impl AllocList {
} }
static mut KMEM_HEAD: *mut AllocList = null_mut(); static mut KMEM_HEAD: *mut AllocList = null_mut();
// In the future, we will have on-demand pages
// so, we need to keep track of our memory footprint to
// see if we actually need to allocate more.
static mut KMEM_ALLOC: usize = 0; static mut KMEM_ALLOC: usize = 0;
static mut KMEM_PAGE_TABLE: *mut Table = null_mut(); static mut KMEM_PAGE_TABLE: *mut Table = null_mut();
@ -102,6 +105,8 @@ pub fn kmalloc(sz: usize) -> *mut u8 {
unsafe { unsafe {
let size = align_val(sz, 3) + size_of::<AllocList>(); let size = align_val(sz, 3) + size_of::<AllocList>();
let mut head = KMEM_HEAD; let mut head = KMEM_HEAD;
// .add() uses pointer arithmetic, so we type-cast into a u8
// so that we multiply by an absolute size (KMEM_ALLOC * PAGE_SIZE).
let tail = (KMEM_HEAD as *mut u8).add(KMEM_ALLOC * PAGE_SIZE) let tail = (KMEM_HEAD as *mut u8).add(KMEM_ALLOC * PAGE_SIZE)
as *mut AllocList; as *mut AllocList;
@ -125,11 +130,15 @@ pub fn kmalloc(sz: usize) -> *mut u8 {
return head.add(1) as *mut u8; return head.add(1) as *mut u8;
} }
else { else {
// If we get here, what we saw wasn't a free chunk, move on to the
// next.
head = (head as *mut u8).add((*head).get_size()) head = (head as *mut u8).add((*head).get_size())
as *mut AllocList; as *mut AllocList;
} }
} }
} }
// If we get here, we didn't find any free chunks--i.e. there isn't enough memory for this.
// TODO: Add on-demand page allocation.
null_mut() null_mut()
} }
@ -141,6 +150,8 @@ pub fn kfree(ptr: *mut u8) {
if (*p).is_taken() { if (*p).is_taken() {
(*p).set_free(); (*p).set_free();
} }
// After we free, see if we can combine adjacent free
// spots to see if we can reduce fragmentation.
coalesce(); coalesce();
} }
} }

View File

@ -97,13 +97,29 @@ pub fn id_map_range(root: &mut page::Table,
end: usize, end: usize,
bits: i64) bits: i64)
{ {
let start_aligned = start & !(page::PAGE_SIZE - 1); let mut memaddr = start & !(page::PAGE_SIZE - 1);
let num_pages = (page::align_val(end, 12) let mut num_kb_pages = (page::align_val(end, 12)
- start_aligned) - memaddr)
/ page::PAGE_SIZE; / page::PAGE_SIZE;
for i in 0..num_pages { // There are 262,144, 4096-byte chunks for a gigabyte
let m = start_aligned + (i << 12); // page.
page::map(root, m, m, bits); let num_gb_pages = num_kb_pages / 262_144;
num_kb_pages -= num_gb_pages * 262_144;
// There are 512, 4096-byte chunks for a 2 MiB page.
let num_mb_pages = num_kb_pages / 512;
num_kb_pages -= num_mb_pages * 512;
for _ in 0..num_gb_pages {
page::map(root, memaddr, memaddr, bits, 2);
memaddr += 1 << 30;
}
for _ in 0..num_mb_pages {
page::map(root, memaddr, memaddr, bits, 1);
memaddr += 1 << 21;
}
for _ in 0..num_kb_pages {
page::map(root, memaddr, memaddr, bits, 0);
memaddr += 1 << 12;
} }
} }
// /////////////////////////////////// // ///////////////////////////////////
@ -126,10 +142,20 @@ extern "C" fn kinit() -> usize {
let mut root = unsafe { root_ptr.as_mut().unwrap() }; let mut root = unsafe { root_ptr.as_mut().unwrap() };
let kheap_head = kmem::get_head() as usize; let kheap_head = kmem::get_head() as usize;
let total_pages = kmem::get_num_allocations(); let total_pages = kmem::get_num_allocations();
println!();
println!();
unsafe {
println!("TEXT: 0x{:x} -> 0x{:x}", TEXT_START, TEXT_END);
println!("RODATA: 0x{:x} -> 0x{:x}", RODATA_START, RODATA_END);
println!("DATA: 0x{:x} -> 0x{:x}", DATA_START, DATA_END);
println!("BSS: 0x{:x} -> 0x{:x}", BSS_START, BSS_END);
println!("STACK: 0x{:x} -> 0x{:x}", KERNEL_STACK_START, KERNEL_STACK_END);
println!("HEAP: 0x{:x} -> 0x{:x}", kheap_head, kheap_head + total_pages * 4096);
}
id_map_range( id_map_range(
&mut root, &mut root,
kheap_head, kheap_head,
kheap_head + (total_pages << 12), kheap_head + total_pages * 4096,
page::EntryBits::ReadWrite.val(), page::EntryBits::ReadWrite.val(),
); );
unsafe { unsafe {
@ -179,6 +205,7 @@ extern "C" fn kinit() -> usize {
0x1000_0000, 0x1000_0000,
0x1000_0000, 0x1000_0000,
page::EntryBits::ReadWrite.val(), page::EntryBits::ReadWrite.val(),
0
); );
// CLINT // CLINT
@ -188,6 +215,7 @@ extern "C" fn kinit() -> usize {
0x0200_0000, 0x0200_0000,
0x0200_0000, 0x0200_0000,
page::EntryBits::ReadWrite.val(), page::EntryBits::ReadWrite.val(),
0
); );
// -> MTIMECMP // -> MTIMECMP
page::map( page::map(
@ -195,6 +223,7 @@ extern "C" fn kinit() -> usize {
0x0200_b000, 0x0200_b000,
0x0200_b000, 0x0200_b000,
page::EntryBits::ReadWrite.val(), page::EntryBits::ReadWrite.val(),
0
); );
// -> MTIME // -> MTIME
page::map( page::map(
@ -202,6 +231,7 @@ extern "C" fn kinit() -> usize {
0x0200_c000, 0x0200_c000,
0x0200_c000, 0x0200_c000,
page::EntryBits::ReadWrite.val(), page::EntryBits::ReadWrite.val(),
0
); );
// PLIC // PLIC
id_map_range( id_map_range(
@ -217,18 +247,23 @@ extern "C" fn kinit() -> usize {
page::EntryBits::ReadWrite.val(), page::EntryBits::ReadWrite.val(),
); );
page::print_page_allocations(); page::print_page_allocations();
let p = 0x8005_7000 as usize;
let m = page::walk(&root, p).unwrap_or(0);
println!("Walk 0x{:x} = 0x{:x}", p, m);
// When we return from here, we'll go back to boot.S and switch into // When we return from here, we'll go back to boot.S and switch into
// supervisor mode We will return the SATP register to be written when // supervisor mode We will return the SATP register to be written when
// we return. root_u is the root page table's address. When stored into // we return. root_u is the root page table's address. When stored into
// the SATP register, this is divided by 4 KiB (right shift by 12 bits). // the SATP register, this is divided by 4 KiB (right shift by 12 bits).
// We enable the MMU by setting mode 8. Bits 63, 62, 61, 60 determine // We enable the MMU by setting mode 8. Bits 63, 62, 61, 60 determine
// the mode. 0 = Bare (no translation) // the mode.
// 0 = Bare (no translation)
// 8 = Sv39 // 8 = Sv39
// 9 = Sv48 // 9 = Sv48
unsafe { unsafe {
KERNEL_TABLE = root_u; KERNEL_TABLE = root_u;
} }
(root_u >> 12) | (8 << 60) // table / 4096 Sv39 mode
(root_u >> 12) | (8 << 60)
} }
#[no_mangle] #[no_mangle]
@ -242,19 +277,11 @@ extern "C" fn kmain() {
// now, lets connect to it and see if we can initialize it and write // now, lets connect to it and see if we can initialize it and write
// to it. // to it.
let mut my_uart = uart::Uart::new(0x1000_0000); let mut my_uart = uart::Uart::new(0x1000_0000);
println!();
println!();
println!("This is my operating system!");
println!(
"I'm so awesome. If you start typing something, I'll show \
you what you typed!"
);
// Create a new scope so that we can test the global allocator and // Create a new scope so that we can test the global allocator and
// deallocator // deallocator
{ {
// We have the global allocator, so let's see if that works! // We have the global allocator, so let's see if that works!
let k: Box<u32> = Box::new(100); let k = Box::<u32>::new(100);
println!("Boxed value = {}", *k); println!("Boxed value = {}", *k);
kmem::print_table(); kmem::print_table();
// The following comes from the Rust documentation: // The following comes from the Rust documentation:
@ -266,7 +293,8 @@ extern "C" fn kmain() {
} }
// Now see if we can read stuff: // Now see if we can read stuff:
// Usually we can use #[test] modules in Rust, but it would convolute // Usually we can use #[test] modules in Rust, but it would convolute
// the task at hand. So, we'll just add testing snippets. // the task at hand, and it requires us to create the testing harness
// since the embedded testing system is part of the "std" library.
loop { loop {
if let Some(c) = my_uart.get() { if let Some(c) = my_uart.get() {
match c { match c {

View File

@ -265,10 +265,11 @@ pub fn print_page_allocations() {
num += 1; num += 1;
if (*beg).is_last() { if (*beg).is_last() {
let end = beg as usize; let end = beg as usize;
let memaddr = let memaddr = ALLOC_START
ALLOC_START + (end
+ (end - HEAP_START) - HEAP_START)
* PAGE_SIZE + 0xfff; * PAGE_SIZE
+ PAGE_SIZE - 1;
print!( print!(
"0x{:x}: {:>3} page(s)", "0x{:x}: {:>3} page(s)",
memaddr, memaddr,
@ -283,8 +284,16 @@ pub fn print_page_allocations() {
beg = beg.add(1); beg = beg.add(1);
} }
println!("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"); println!("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
println!("Allocated: {:>5} pages ({:>9} bytes).", num, num * PAGE_SIZE); println!(
println!("Free : {:>5} pages ({:>9} bytes).", num_pages-num, (num_pages-num) * PAGE_SIZE); "Allocated: {:>5} pages ({:>9} bytes).",
num,
num * PAGE_SIZE
);
println!(
"Free : {:>5} pages ({:>9} bytes).",
num_pages - num,
(num_pages - num) * PAGE_SIZE
);
println!(); println!();
} }
} }
@ -389,7 +398,7 @@ impl Table {
/// The bits MUST include one or more of the following: /// The bits MUST include one or more of the following:
/// Read, Write, Execute /// Read, Write, Execute
/// The valid bit automatically gets added. /// The valid bit automatically gets added.
pub fn map(root: &mut Table, vaddr: usize, paddr: usize, bits: i64) { pub fn map(root: &mut Table, vaddr: usize, paddr: usize, bits: i64, level: usize) {
// Make sure that Read, Write, or Execute have been provided // Make sure that Read, Write, or Execute have been provided
// otherwise, we'll leak memory and always create a page fault. // otherwise, we'll leak memory and always create a page fault.
assert!(bits & 0xe != 0); assert!(bits & 0xe != 0);
@ -423,11 +432,11 @@ pub fn map(root: &mut Table, vaddr: usize, paddr: usize, bits: i64) {
// Now, we're going to traverse the page table and set the bits // Now, we're going to traverse the page table and set the bits
// properly. We expect the root to be valid, however we're required to // properly. We expect the root to be valid, however we're required to
// create anything beyond the root. // create anything beyond the root.
// In Rust, we create an iterator using the .. operator. // In Rust, we create a range iterator using the .. operator.
// The .rev() will reverse the iteration since we need to start with // The .rev() will reverse the iteration since we need to start with
// VPN[2] The .. operator is inclusive on start but exclusive on end. // VPN[2] The .. operator is inclusive on start but exclusive on end.
// So, (0..2) will iterate 0 and 1. // So, (0..2) will iterate 0 and 1.
for i in (0..2).rev() { for i in (level..2).rev() {
if !v.is_valid() { if !v.is_valid() {
// Allocate a page // Allocate a page
let page = zalloc(1); let page = zalloc(1);
@ -446,12 +455,13 @@ pub fn map(root: &mut Table, vaddr: usize, paddr: usize, bits: i64) {
// our entry. // our entry.
// The entry structure is Figure 4.18 in the RISC-V Privileged // The entry structure is Figure 4.18 in the RISC-V Privileged
// Specification // Specification
let entry: i64 = (ppn[2] << 28) as i64 | // PPN[2] = [53:28] let entry = (ppn[2] << 28) as i64 | // PPN[2] = [53:28]
(ppn[1] << 19) as i64 | // PPN[1] = [27:19] (ppn[1] << 19) as i64 | // PPN[1] = [27:19]
(ppn[0] << 10) as i64 | // PPN[0] = [18:10] (ppn[0] << 10) as i64 | // PPN[0] = [18:10]
bits | // Specified bits, such as User, Read, Write, etc bits | // Specified bits, such as User, Read, Write, etc
EntryBits::Valid.val(); // Valid bit EntryBits::Valid.val(); // Valid bit
// Set the entry. V should be set to the correct pointer by the loop above. // Set the entry. V should be set to the correct pointer by the loop
// above.
v.set_entry(entry); v.set_entry(entry);
} }
@ -469,6 +479,7 @@ pub fn unmap(root: &mut Table) {
// This is a valid entry, so drill down and free. // This is a valid entry, so drill down and free.
let memaddr_lv1 = (entry_lv2.get_entry() & !0x3ff) << 2; let memaddr_lv1 = (entry_lv2.get_entry() & !0x3ff) << 2;
let table_lv1 = unsafe { let table_lv1 = unsafe {
// Make table_lv1 a mutable reference instead of a pointer.
(memaddr_lv1 as *mut Table).as_mut().unwrap() (memaddr_lv1 as *mut Table).as_mut().unwrap()
}; };
for lv1 in 0..Table::len() { for lv1 in 0..Table::len() {
@ -503,45 +514,34 @@ pub fn walk(root: &Table, vaddr: usize) -> Option<usize> {
(vaddr >> 30) & 0x1ff, (vaddr >> 30) & 0x1ff,
]; ];
// The last 12 bits (0xfff) is not translated by
// the MMU, so we have to copy it from the vaddr
// to the physical address.
let pgoff = vaddr & 0xfff;
let mut v = &root.entries[vpn[2]]; let mut v = &root.entries[vpn[2]];
for i in (0..2).rev() { for i in (0..=2).rev() {
if v.is_invalid() { if v.is_invalid() {
// This is an invalid entry, page fault. // This is an invalid entry, page fault.
return None; break;
} }
else if v.is_leaf() { else if v.is_leaf() {
// According to RISC-V, a leaf can at any level, however // According to RISC-V, a leaf can be at any level.
// our page allocator doesn't do that. So, if we get
// a leaf here, something is wrong. // The offset mask masks off the PPN. Each PPN is 9
return None; // bits and they start at bit #12. So, our formula
// 12 + i * 9
let off_mask = (1 << (12 + i * 9)) - 1;
let vaddr_pgoff = vaddr & off_mask;
let addr = ((v.get_entry() << 2) as usize) & !off_mask;
return Some(addr | vaddr_pgoff);
} }
// Set v to the next entry which is pointed to by this // Set v to the next entry which is pointed to by this
// entry. However, the address was shifted right by 2 places // entry. However, the address was shifted right by 2 places
// when stored in the page table entry, so we shift it left // when stored in the page table entry, so we shift it left
// to get it back into place. // to get it back into place.
let entry = ((v.get_entry() & !0x3ff) << 2) as *const Entry; let entry = ((v.get_entry() & !0x3ff) << 2) as *const Entry;
v = unsafe { entry.add(vpn[i]).as_ref().unwrap() }; // We do i - 1 here, however we should get None or Some() above
} // before we do 0 - 1 = -1.
// If we get here, we should be at level 0 since we haven't returned v = unsafe { entry.add(vpn[i - 1]).as_ref().unwrap() };
// yet. If we got a page fault earlier, we would've short circuited
// by returning None early.
// I don't like mixing return with the expression-type returns, but it
// keeps this code cleaner.
if v.is_invalid() || v.is_branch() {
// If we get here, that means the page is either invalid or not
// a leaf, which both are cause for a page fault.
None
}
else {
// The physical address starts at bit 10 in the entry, however
// it is supposed to start at bit 12, so we shift it up and then
// add the page offset.
let addr = ((v.get_entry() & !0x3ff) << 2) as usize;
Some(addr | pgoff)
} }
// If we get here, we've exhausted all valid tables and haven't
// found a leaf.
None
} }