1
0
mirror of https://github.com/sgmarz/osblog.git synced 2024-11-24 02:16:19 +04:00

Switched to using mret for trap

This commit is contained in:
Stephen Marz 2019-10-14 16:14:22 -04:00
parent a10926df76
commit ccaf9b6a5c
6 changed files with 150 additions and 192 deletions

View File

@ -47,9 +47,14 @@ _start:
# to the very end of the stack range. # to the very end of the stack range.
la sp, _stack_end la sp, _stack_end
# Setting `mstatus` register: # Setting `mstatus` register:
# 0b11 << 11: Machine's previous protection mode is 3 (MPP=3). # 0b01 << 11: Machine's previous protection mode is 2 (MPP=2).
li t0, 0b11 << 11 li t0, 0b11 << 11
csrw mstatus, t0 csrw mstatus, t0
# li t2, (1 << 1) | (1 << 5) | (1 << 9)
# li t2, 0xffff
# csrw mideleg, t2
li t3, 0xaaa
csrw mie, t3
# Machine's exception program counter (MEPC) is set to `kinit`. # Machine's exception program counter (MEPC) is set to `kinit`.
la t1, kinit la t1, kinit
csrw mepc, t1 csrw mepc, t1
@ -71,10 +76,10 @@ _start:
# 1 << 1 : Supervisor's interrupt-enable bit will be set to 1 after sret. # 1 << 1 : Supervisor's interrupt-enable bit will be set to 1 after sret.
# We set the "previous" bits because the sret will write the current bits # We set the "previous" bits because the sret will write the current bits
# with the previous bits. # with the previous bits.
li t0, (1 << 8) | (1 << 5) li t0, (1 << 11)
csrw sstatus, t0 csrw mstatus, t0
la t1, kmain la t1, kmain
csrw sepc, t1 csrw mepc, t1
# Setting `mideleg` (machine interrupt delegate) register: # Setting `mideleg` (machine interrupt delegate) register:
# 1 << 1 : Software interrupt delegated to supervisor mode # 1 << 1 : Software interrupt delegated to supervisor mode
# 1 << 5 : Timer interrupt delegated to supervisor mode # 1 << 5 : Timer interrupt delegated to supervisor mode
@ -83,20 +88,19 @@ _start:
# cause an elevation to the machine privilege mode (mode 3). # cause an elevation to the machine privilege mode (mode 3).
# When we delegate, we're telling the CPU to only elevate to # When we delegate, we're telling the CPU to only elevate to
# the supervisor privilege mode (mode 1) # the supervisor privilege mode (mode 1)
li t2, (1 << 1) | (1 << 5) | (1 << 9)
csrw mideleg, t2
# Setting `sie` (supervisor interrupt enable) register: # Setting `sie` (supervisor interrupt enable) register:
# This register takes the same bits as mideleg # This register takes the same bits as mideleg
# 1 << 1 : Supervisor software interrupt enable (SSIE=1 [Enabled]) # 1 << 1 : Supervisor software interrupt enable (SSIE=1 [Enabled])
# 1 << 5 : Supervisor timer interrupt enable (STIE=1 [Enabled]) # 1 << 5 : Supervisor timer interrupt enable (STIE=1 [Enabled])
# 1 << 9 : Supervisor external interrupt enable (SEIE=1 [Enabled]) # 1 << 9 : Supervisor external interrupt enable (SEIE=1 [Enabled])
csrw sie, t2 # li t2, (1 << 1) | (1 << 5) | (1 << 9)
# csrw sie, t2
# Setting `stvec` (supervisor trap vector) register: # Setting `stvec` (supervisor trap vector) register:
# Essentially this is a function pointer, but the last two bits can be 00 or 01 # Essentially this is a function pointer, but the last two bits can be 00 or 01
# 00 : All exceptions set pc to BASE # 00 : All exceptions set pc to BASE
# 01 : Asynchronous interrupts set pc to BASE + 4 x scause # 01 : Asynchronous interrupts set pc to BASE + 4 x scause
la t3, s_trap_vector # la t3, s_trap
csrw stvec, t3 # csrw stvec, t3
# kinit() is required to return back the SATP value (including MODE) via a0 # kinit() is required to return back the SATP value (including MODE) via a0
csrw satp, a0 csrw satp, a0
# Force the CPU to take our SATP register. # Force the CPU to take our SATP register.
@ -105,8 +109,10 @@ _start:
# it in memory, it will be the old table. So, sfence.vma will ensure that the MMU always # it in memory, it will be the old table. So, sfence.vma will ensure that the MMU always
# grabs a fresh copy of the SATP register and associated tables. # grabs a fresh copy of the SATP register and associated tables.
sfence.vma sfence.vma
# ret
# call kmain
# sret will put us in supervisor mode and re-enable interrupts # sret will put us in supervisor mode and re-enable interrupts
sret mret
3: 3:
# Parked harts go here. We need to set these # Parked harts go here. We need to set these
@ -117,6 +123,27 @@ _start:
# where base address is 0x0200_0000 (MMIO CLINT base address) # where base address is 0x0200_0000 (MMIO CLINT base address)
# We only use additional harts to run user-space programs, although this may # We only use additional harts to run user-space programs, although this may
# change. # change.
la sp, _stack_end
li t0, 0x10000
csrr t1, mhartid
mul t0, t0, t1
sub sp, sp, t0
li t0, 0b01 << 11 | (1 << 5)
csrw mstatus, t0
li t3, 0xaaa
csrw mie, t3
# Machine's exception program counter (MEPC) is set to `kinit`.
la t1, 4f
csrw mepc, t1
# Machine's trap vector base address is set to `m_trap_vector`, for
# "machine" trap vector.
la t2, m_trap_vector
csrw mtvec, t2
# We use mret here so that the mstatus register is properly updated.
mret
4: 4:
wfi wfi
j 4b j 4b

View File

@ -5,13 +5,14 @@
# 24 February 2019 # 24 February 2019
.option norvc .option norvc
.altmacro .altmacro
.set NUM_GP_REGS, 34 # Number of registers per context .set NUM_GP_REGS, 32 # Number of registers per context
.set NUM_FP_REGS, 32 .set NUM_FP_REGS, 32
.set REG_SIZE, 8 # Register size (in bytes) .set REG_SIZE, 8 # Register size (in bytes)
.set MAX_CPUS, 8 # Maximum number of CPUs
.section .bss .section .bss
.global _GLOBAL_CTX .global _GLOBAL_CTX
.lcomm _GLOBAL_CTX, (NUM_FP_REGS + NUM_GP_REGS) * REG_SIZE .lcomm _GLOBAL_CTX, (NUM_FP_REGS + NUM_GP_REGS) * REG_SIZE * MAX_CPUS
# Use macros for saving and restoring multiple registers # Use macros for saving and restoring multiple registers
.macro save_gp i, basereg=t6 .macro save_gp i, basereg=t6
@ -39,21 +40,17 @@ m_trap_vector:
# base register for saving # base register for saving
csrw mscratch, t6 csrw mscratch, t6
# Disable the MMU since we're switching.
csrr t6, satp csrr t6, satp
slli t6, t6, 4 slli t6, t6, 1
srli t6, t6, 4 srli t6, t6, 1
csrw satp, t6 csrw satp, t6
# Disable interrupts
li t6, 1 << 3
csrrc zero, mstatus, t6
# Start saving the registers. We can't do much until the registers # Start saving the registers. We can't do much until the registers
# have been saved. # have been saved.
la t6, _GLOBAL_CTX la t6, _GLOBAL_CTX
.set i, 0
.rept 31 # Set i = 1. i = 0 is the zero register and is a waste of time.
.set i, 1
.rept 30
save_gp %i save_gp %i
.set i, i+1 .set i, i+1
.endr .endr
@ -63,16 +60,15 @@ m_trap_vector:
csrr t6, mscratch csrr t6, mscratch
save_gp 31, t5 save_gp 31, t5
# csrr t0, satp # We need to check the status of the floating
# sd t0, 32*8(t5) # point system. (FS bits are described in 3.1.6.5
# and table 3.3 in the RISC-V Privilege spec).
csrr t1, mstatus csrr t1, mstatus
srli t0, t1, 13 srli t0, t1, 13
andi t0, t0, 3 andi t0, t0, 3
li t1, 3
# Skip saving the FP registers if the FP is turned off. # Skip saving the FP registers if the FP is turned off.
bne t0, t1, 3f beqz t0, 3f
la t6, _GLOBAL_CTX la t6, _GLOBAL_CTX
.set i, 0 .set i, 0
.rept 32 .rept 32
@ -86,22 +82,37 @@ m_trap_vector:
# trap_handler returns the new mepc # trap_handler returns the new mepc
# via a0 # via a0
la sp, _stack_end la sp, _stack_end
li t0, 0x10000
csrr t1, mhartid
mul t0, t0, t1
sub sp, sp, t0
csrr a0, mepc csrr a0, mepc
csrr a1, mtval csrr a1, mtval
csrr a2, mcause csrr a2, mcause
csrr a3, mhartid csrr a3, mhartid
call m_trap csrr a4, mstatus
la t1, m_trap
csrw mepc, t1
la ra, 3f
li t2, 0b11 << 11
csrw mstatus, t2
mret
3:
csrw mepc, a0 csrw mepc, a0
# Move to the context for this CPU # Move to the context for this CPU
la t6, _GLOBAL_CTX la t6, _GLOBAL_CTX
# Go to the context for this hart
csrr t5, mhartid
slli t5, t5, 6
add t6, t6, t5
csrr t1, mstatus csrr t1, mstatus
srli t0, t1, 13 srli t0, t1, 13
andi t0, t0, 3 andi t0, t0, 3
li t1, 3
# Skip loading the FP registers if the FP is turned off. # Skip loading the FP registers if the FP is turned off.
bne t0, t1, 3f beqz t0, 3f
.set i, 0 .set i, 0
.rept 32 .rept 32
load_fp %i load_fp %i
@ -109,128 +120,22 @@ m_trap_vector:
.endr .endr
3: 3:
# csrr t5, mhartid
# slli t5, t5, CTX_SHL
# add t6, t6, t5
# Restore the registers # Restore the registers
.set i, 0 .set i, 1
.rept 32
load_gp %i
.set i, i+1
.endr
# Get the MMU going
csrw mscratch, t6
la t6, _GLOBAL_CTX
ld t6, 32*8(t6)
csrw satp, t6
sfence.vma
csrr t6, mscratch
# x31 gets restored at the last iteration
mret
.global s_trap_vector
# This must be aligned by 4 since the last two bits
# of the mtvec register do not contribute to the address
# of this vector.
.align 4
s_trap_vector:
# We have to save t6 (x31), since it will be our
# base register for saving
csrw sscratch, t6
# Disable the MMU since we're switching.
csrr t6, satp
slli t6, t6, 4
srli t6, t6, 4
csrw satp, t6
# Disable interrupts
li t6, 1 << 3
csrrc zero, sstatus, t6
# Start saving the registers. We can't do much until the registers
# have been saved.
la t6, _GLOBAL_CTX
.set i, 0
.rept 31 .rept 31
save_gp %i
.set i, i+1
.endr
# We saved 31 registers. All but t6
# t5 is saved, so we can overwrite it.
mv t5, t6
csrr t6, sscratch
save_gp 31, t5
# csrr t0, satp
# sd t0, 32*8(t5)
csrr t1, sstatus
srli t0, t1, 13
andi t0, t0, 3
li t1, 3
# Skip saving the FP registers if the FP is turned off.
bne t0, t1, 3f
la t6, _GLOBAL_CTX
.set i, 0
.rept 32
save_fp %i
.set i, i+1
.endr
3:
# Go to Rust
# usize trap_handler(mepc, mcause)
# trap_handler returns the new mepc
# via a0
la sp, _stack_end
csrr a0, sepc
csrr a1, stval
csrr a2, scause
call s_trap
csrw sepc, a0
# Move to the context for this CPU
la t6, _GLOBAL_CTX
csrr t1, sstatus
srli t0, t1, 13
andi t0, t0, 3
li t1, 3
# Skip loading the FP registers if the FP is turned off.
bne t0, t1, 3f
.set i, 0
.rept 32
load_fp %i
.set i, i+1
.endr
3:
# csrr t5, mhartid
# slli t5, t5, CTX_SHL
# add t6, t6, t5
# Restore the registers
.set i, 0
.rept 32
load_gp %i load_gp %i
.set i, i+1 .set i, i+1
.endr .endr
# Get the MMU going
csrw sscratch, t6 # The MMU should be going at this point via
la t6, _GLOBAL_CTX # Rust.
ld t6, 32*8(t6)
csrw satp, t6 li t2, (1 << 8) | (1 << 5)
sfence.vma csrw mstatus, t2
csrr t6, mscratch
# x31 gets restored at the last iteration # x31 gets restored at the last iteration
sret sret
.global make_syscall .global make_syscall
make_syscall: make_syscall:
ecall ecall

View File

@ -79,10 +79,10 @@ pub fn get_num_allocations() -> usize {
/// alloc/dealloc from the page crate. /// alloc/dealloc from the page crate.
pub fn init() { pub fn init() {
unsafe { unsafe {
// Allocate 64 kernel pages (64 * 4096 = 262 KiB) // Allocate kernel pages (KMEM_ALLOC)
let k_alloc = zalloc(64); KMEM_ALLOC = 512;
let k_alloc = zalloc(KMEM_ALLOC);
assert!(!k_alloc.is_null()); assert!(!k_alloc.is_null());
KMEM_ALLOC = 64;
KMEM_HEAD = k_alloc as *mut AllocList; KMEM_HEAD = k_alloc as *mut AllocList;
(*KMEM_HEAD).set_free(); (*KMEM_HEAD).set_free();
(*KMEM_HEAD).set_size(KMEM_ALLOC * PAGE_SIZE); (*KMEM_HEAD).set_size(KMEM_ALLOC * PAGE_SIZE);

View File

@ -146,12 +146,12 @@ extern "C" fn kinit() -> usize {
println!("DATA: 0x{:x} -> 0x{:x}", DATA_START, DATA_END); println!("DATA: 0x{:x} -> 0x{:x}", DATA_START, DATA_END);
println!("BSS: 0x{:x} -> 0x{:x}", BSS_START, BSS_END); println!("BSS: 0x{:x} -> 0x{:x}", BSS_START, BSS_END);
println!("STACK: 0x{:x} -> 0x{:x}", KERNEL_STACK_START, KERNEL_STACK_END); println!("STACK: 0x{:x} -> 0x{:x}", KERNEL_STACK_START, KERNEL_STACK_END);
println!("HEAP: 0x{:x} -> 0x{:x}", kheap_head, kheap_head + total_pages * 4096); println!("HEAP: 0x{:x} -> 0x{:x}", kheap_head, kheap_head + total_pages * page::PAGE_SIZE);
} }
id_map_range( id_map_range(
&mut root, &mut root,
kheap_head, kheap_head,
kheap_head + total_pages * 4096, kheap_head + total_pages * page::PAGE_SIZE,
page::EntryBits::ReadWrite.val(), page::EntryBits::ReadWrite.val(),
); );
unsafe { unsafe {
@ -203,38 +203,20 @@ extern "C" fn kinit() -> usize {
} }
// UART // UART
page::map( id_map_range(
&mut root, &mut root,
0x1000_0000, 0x1000_0000,
0x1000_0000, 0x1000_0100,
page::EntryBits::ReadWrite.val(), page::EntryBits::ReadWrite.val(),
0
); );
// CLINT // CLINT
// -> MSIP // -> MSIP
page::map( id_map_range(
&mut root, &mut root,
0x0200_0000, 0x0200_0000,
0x0200_0000, 0x0200_ffff,
page::EntryBits::ReadWrite.val(), page::EntryBits::ReadWrite.val(),
0
);
// -> MTIMECMP
page::map(
&mut root,
0x0200_b000,
0x0200_b000,
page::EntryBits::ReadWrite.val(),
0
);
// -> MTIME
page::map(
&mut root,
0x0200_c000,
0x0200_c000,
page::EntryBits::ReadWrite.val(),
0
); );
// PLIC // PLIC
id_map_range( id_map_range(
@ -255,7 +237,7 @@ extern "C" fn kinit() -> usize {
// space application requires services. Since the user space application // space application requires services. Since the user space application
// only knows virtual addresses, we have to translate silently behind // only knows virtual addresses, we have to translate silently behind
// the scenes. // the scenes.
let p = 0x0200_0000 as usize; let p = 0x0200_4000 as usize;
let m = page::virt_to_phys(&root, p).unwrap_or(0); let m = page::virt_to_phys(&root, p).unwrap_or(0);
println!("Walk 0x{:x} = 0x{:x}", p, m); println!("Walk 0x{:x} = 0x{:x}", p, m);
// When we return from here, we'll go back to boot.S and switch into // When we return from here, we'll go back to boot.S and switch into
@ -271,6 +253,7 @@ extern "C" fn kinit() -> usize {
// We have to store the kernel's table. The tables will be moved back // We have to store the kernel's table. The tables will be moved back
// and forth between the kernel's table and user applicatons' tables. // and forth between the kernel's table and user applicatons' tables.
KERNEL_TABLE = root_u; KERNEL_TABLE = root_u;
println!("Setting 0x{:x}", KERNEL_TABLE);
} }
// table / 4096 Sv39 mode // table / 4096 Sv39 mode
(root_u >> 12) | (8 << 60) (root_u >> 12) | (8 << 60)
@ -290,18 +273,23 @@ extern "C" fn kmain() {
// We have the global allocator, so let's see if that works! // We have the global allocator, so let's see if that works!
let k = Box::<u32>::new(100); let k = Box::<u32>::new(100);
println!("Boxed value = {}", *k); println!("Boxed value = {}", *k);
kmem::print_table();
// The following comes from the Rust documentation: // The following comes from the Rust documentation:
// some bytes, in a vector // some bytes, in a vector
let sparkle_heart = vec![240, 159, 146, 150]; let sparkle_heart = vec![240, 159, 146, 150];
// We know these bytes are valid, so we'll use `unwrap()`. // We know these bytes are valid, so we'll use `unwrap()`.
// This will MOVE the vector.
let sparkle_heart = String::from_utf8(sparkle_heart).unwrap(); let sparkle_heart = String::from_utf8(sparkle_heart).unwrap();
println!("String = {}", sparkle_heart); println!("String = {}", sparkle_heart);
println!("\n\nAllocations of a box, vector, and string");
kmem::print_table();
} }
println!("\n\nEverything should now be free:");
kmem::print_table();
unsafe { unsafe {
let val = 0x0200_0000 as *mut u32; // asm!("csrw sip, $0" :: "r"(1));
val.write_volatile(1); let mtimecmp = 0x0200_4000 as *mut u64;
asm!("ecall"); let mtime = 0x0200_bff8 as *const u64;
mtimecmp.write_volatile(mtime.read_volatile() + 10_000_000);
} }
// If we get here, the Box, vec, and String should all be freed since // If we get here, the Box, vec, and String should all be freed since
// they go out of scope. This calls their "Drop" trait. // they go out of scope. This calls their "Drop" trait.

View File

@ -285,12 +285,12 @@ pub fn print_page_allocations() {
} }
println!("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"); println!("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
println!( println!(
"Allocated: {:>5} pages ({:>9} bytes).", "Allocated: {:>6} pages ({:>10} bytes).",
num, num,
num * PAGE_SIZE num * PAGE_SIZE
); );
println!( println!(
"Free : {:>5} pages ({:>9} bytes).", "Free : {:>6} pages ({:>10} bytes).",
num_pages - num, num_pages - num,
(num_pages - num) * PAGE_SIZE (num_pages - num) * PAGE_SIZE
); );
@ -459,7 +459,10 @@ pub fn map(root: &mut Table, vaddr: usize, paddr: usize, bits: i64, level: usize
(ppn[1] << 19) as i64 | // PPN[1] = [27:19] (ppn[1] << 19) as i64 | // PPN[1] = [27:19]
(ppn[0] << 10) as i64 | // PPN[0] = [18:10] (ppn[0] << 10) as i64 | // PPN[0] = [18:10]
bits | // Specified bits, such as User, Read, Write, etc bits | // Specified bits, such as User, Read, Write, etc
EntryBits::Valid.val(); // Valid bit EntryBits::Valid.val() | // Valid bit
EntryBits::Dirty.val() | // Some machines require this to =1
EntryBits::Access.val() // Just like dirty, some machines require this
;
// Set the entry. V should be set to the correct pointer by the loop // Set the entry. V should be set to the correct pointer by the loop
// above. // above.
v.set_entry(entry); v.set_entry(entry);

View File

@ -9,7 +9,7 @@ extern "C" {
#[no_mangle] #[no_mangle]
extern "C" extern "C"
fn s_trap(epc: usize, tval: usize, cause: usize) -> usize { fn s_trap(epc: usize, tval: usize, cause: isize) -> usize {
println!("STRAP (cause: 0x{:x} @ 0x{:x})", cause, epc); println!("STRAP (cause: 0x{:x} @ 0x{:x})", cause, epc);
unsafe { unsafe {
// Switch to kernel's page table. // Switch to kernel's page table.
@ -17,12 +17,47 @@ fn s_trap(epc: usize, tval: usize, cause: usize) -> usize {
let satp = KERNEL_TABLE >> 12 | 8 << 60; let satp = KERNEL_TABLE >> 12 | 8 << 60;
asm!("csrw satp, $0" :: "r"(satp)); asm!("csrw satp, $0" :: "r"(satp));
} }
if cause < 0 {
epc
}
else {
epc + 4 epc + 4
}
} }
#[no_mangle] #[no_mangle]
extern "C" extern "C"
fn m_trap(epc: usize, tval: usize, cause: usize, hart: usize) -> usize { fn m_trap(epc: usize, tval: usize, cause: isize, hart: usize, stat: usize) -> usize {
println!("MTRAP (cause: 0x{:x} @ 0x{:x})", cause, epc); println!("MTRAP ({}) (cause: 0x{:x} @ 0x{:x}) [0x{:x}]", hart, cause, epc, stat);
epc + 4 unsafe {
if cause < 0 {
// Asynchronous
match cause & 0xff {
4 | 5 | 7 => {
let satp: usize = KERNEL_TABLE >> 12 | 8 << 60;
println!("Kernel table = 0x{:x}", KERNEL_TABLE);
// asm!("csrw satp, $0" :: "r"(satp) :: "volatile");
// asm!("sfence.vma" :::: "volatile");
// asm!("csrw mie, zero" :::: "volatile");
},
_ => { println!("Async cause\n"); }
}
}
else {
match cause {
2 => {
panic!("Illegal instruction");
},
12 => {
panic!("Instruction page fault.");
},
13 => {
panic!("Load page fault.");
},
_ => { println!("Sync cause\n"); }
}
}
}
epc
} }