From a1f9d2c6cd3f7336cdb5872b6d26083ddd82c2b0 Mon Sep 17 00:00:00 2001 From: Stephen Marz Date: Sun, 26 Apr 2020 20:11:01 -0400 Subject: [PATCH] Add Mutex to process list. --- risc_v/src/lock.rs | 57 +++++++++++++++++++++++++++++++++++++++++++ risc_v/src/main.rs | 1 + risc_v/src/process.rs | 25 +++++++++++++++++-- risc_v/src/syscall.rs | 4 +++ risc_v/src/test.rs | 9 ++++++- 5 files changed, 93 insertions(+), 3 deletions(-) create mode 100644 risc_v/src/lock.rs diff --git a/risc_v/src/lock.rs b/risc_v/src/lock.rs new file mode 100644 index 0000000..6f6514f --- /dev/null +++ b/risc_v/src/lock.rs @@ -0,0 +1,57 @@ +// lock.rs +// Locking routines +// Stephen Marz +// 26 Apr 2020 + +use crate::syscall::syscall_sleep; + +pub const DEFAULT_LOCK_SLEEP: usize = 10000; +#[repr(u32)] +pub enum MutexState { + Locked = 0, + Unlocked = 1, +} + +#[repr(C)] +pub struct Mutex { + state: MutexState, +} + +impl<'a> Mutex { + pub const fn new() -> Self { + Self { state: MutexState::Unlocked, } + } + + pub fn val(&'a self) -> &'a MutexState { + &self.state + } + + pub fn lock(&mut self) -> bool { + unsafe { + let ret: MutexState; + llvm_asm!("amoswap.w.aq $0, $1, ($2)\n" : "=r"(ret) : "r"(1), "r"(self) :: "volatile"); + match ret { + MutexState::Locked => { false }, + MutexState::Unlocked => true, + } + } + } + + /// Do NOT sleep lock inside of an interrupt context! + pub fn sleep_lock(&mut self) { + while self.lock() == false { + syscall_sleep(DEFAULT_LOCK_SLEEP); + } + } + + /// Can safely be used inside of an interrupt context. + pub fn spin_lock(&mut self) { + while self.lock() == false {} + } + + pub fn unlock(&mut self) { + unsafe { + llvm_asm!("amoswap.w.rl zero, zero, ($0)" :: "r"(self) :: "volatile"); + } + } +} diff --git a/risc_v/src/main.rs b/risc_v/src/main.rs index 306d619..9154630 100755 --- a/risc_v/src/main.rs +++ b/risc_v/src/main.rs @@ -179,6 +179,7 @@ pub mod cpu; pub mod elf; pub mod fs; pub mod kmem; +pub mod lock; pub mod minixfs; pub mod page; pub mod plic; diff --git a/risc_v/src/process.rs b/risc_v/src/process.rs index 4a5bcca..4196a60 100644 --- a/risc_v/src/process.rs +++ b/risc_v/src/process.rs @@ -20,6 +20,7 @@ use crate::{cpu::{build_satp, syscall::syscall_exit}; use alloc::collections::vec_deque::VecDeque; use core::ptr::null_mut; +use crate::lock::Mutex; // How many pages are we going to give a process for their // stack? @@ -41,6 +42,7 @@ pub const PROCESS_STARTING_ADDR: usize = 0x2000_0000; // a VecDeque at compile time, so we are somewhat forced to // do this. pub static mut PROCESS_LIST: Option> = None; +pub static mut PROCESS_LIST_MUTEX: Mutex = Mutex::new(); // We can search through the process list to get a new PID, but // it's probably easier and faster just to increase the pid: pub static mut NEXT_PID: u16 = 1; @@ -57,6 +59,7 @@ pub fn set_running(pid: u16) -> bool { // of process pointers. let mut retval = false; unsafe { + PROCESS_LIST_MUTEX.spin_lock(); if let Some(mut pl) = PROCESS_LIST.take() { for proc in pl.iter_mut() { if proc.pid == pid { @@ -70,6 +73,7 @@ pub fn set_running(pid: u16) -> bool { // Some(pl). PROCESS_LIST.replace(pl); } + PROCESS_LIST_MUTEX.unlock(); } retval } @@ -82,6 +86,7 @@ pub fn set_waiting(pid: u16) -> bool { // of process pointers. let mut retval = false; unsafe { + PROCESS_LIST_MUTEX.spin_lock(); if let Some(mut pl) = PROCESS_LIST.take() { for proc in pl.iter_mut() { if proc.pid == pid { @@ -95,6 +100,7 @@ pub fn set_waiting(pid: u16) -> bool { // Some(pl). PROCESS_LIST.replace(pl); } + PROCESS_LIST_MUTEX.unlock(); } retval } @@ -105,6 +111,7 @@ pub fn set_sleeping(pid: u16, duration: usize) -> bool { // of process pointers. let mut retval = false; unsafe { + PROCESS_LIST_MUTEX.spin_lock(); if let Some(mut pl) = PROCESS_LIST.take() { for proc in pl.iter_mut() { if proc.pid == pid { @@ -122,6 +129,7 @@ pub fn set_sleeping(pid: u16, duration: usize) -> bool { // Some(pl). PROCESS_LIST.replace(pl); } + PROCESS_LIST_MUTEX.unlock(); } retval } @@ -130,6 +138,7 @@ pub fn set_sleeping(pid: u16, duration: usize) -> bool { /// this function does nothing. pub fn delete_process(pid: u16) { unsafe { + PROCESS_LIST_MUTEX.spin_lock(); if let Some(mut pl) = PROCESS_LIST.take() { for i in 0..pl.len() { let p = pl.get_mut(i).unwrap(); @@ -145,6 +154,7 @@ pub fn delete_process(pid: u16) { // Some(pl). PROCESS_LIST.replace(pl); } + PROCESS_LIST_MUTEX.unlock(); } } @@ -152,6 +162,7 @@ pub fn delete_process(pid: u16) { /// unsafe since the process can be deleted and we'll still have a pointer. pub unsafe fn get_by_pid(pid: u16) -> *mut Process { let mut ret = null_mut(); + PROCESS_LIST_MUTEX.spin_lock(); if let Some(mut pl) = PROCESS_LIST.take() { for i in pl.iter_mut() { if i.get_pid() == pid { @@ -161,7 +172,7 @@ pub unsafe fn get_by_pid(pid: u16) -> *mut Process { } PROCESS_LIST.replace(pl); } - + PROCESS_LIST_MUTEX.unlock(); ret } @@ -203,6 +214,7 @@ pub fn add_process_default(pr: fn()) { // then move ownership back to the PROCESS_LIST. // This allows mutual exclusion as anyone else trying to grab // the process list will get None rather than the Deque. + PROCESS_LIST_MUTEX.spin_lock(); if let Some(mut pl) = PROCESS_LIST.take() { // .take() will replace PROCESS_LIST with None and give // us the only copy of the Deque. @@ -213,6 +225,7 @@ pub fn add_process_default(pr: fn()) { // Some(pl). PROCESS_LIST.replace(pl); } + PROCESS_LIST_MUTEX.unlock(); // TODO: When we get to multi-hart processing, we need to keep // trying to grab the process list. We can do this with an // atomic instruction. but right now, we're a single-processor @@ -230,6 +243,7 @@ pub fn add_kernel_process(func: fn()) -> u16 { // then move ownership back to the PROCESS_LIST. // This allows mutual exclusion as anyone else trying to grab // the process list will get None rather than the Deque. + unsafe { PROCESS_LIST_MUTEX.spin_lock(); } if let Some(mut pl) = unsafe { PROCESS_LIST.take() } { // .take() will replace PROCESS_LIST with None and give // us the only copy of the Deque. @@ -278,10 +292,12 @@ pub fn add_kernel_process(func: fn()) -> u16 { // Some(pl). unsafe { PROCESS_LIST.replace(pl); + PROCESS_LIST_MUTEX.unlock(); } - my_pid + return my_pid; } else { + unsafe { PROCESS_LIST_MUTEX.unlock(); } // TODO: When we get to multi-hart processing, we need to keep // trying to grab the process list. We can do this with an // atomic instruction. but right now, we're a single-processor @@ -311,6 +327,7 @@ pub fn add_kernel_process_args(func: fn(args_ptr: usize), args: usize) -> u16 { // then move ownership back to the PROCESS_LIST. // This allows mutual exclusion as anyone else trying to grab // the process list will get None rather than the Deque. + unsafe {PROCESS_LIST_MUTEX.spin_lock(); } if let Some(mut pl) = unsafe { PROCESS_LIST.take() } { // .take() will replace PROCESS_LIST with None and give // us the only copy of the Deque. @@ -360,10 +377,14 @@ pub fn add_kernel_process_args(func: fn(args_ptr: usize), args: usize) -> u16 { // Some(pl). unsafe { PROCESS_LIST.replace(pl); + PROCESS_LIST_MUTEX.unlock(); } my_pid } else { + unsafe { + PROCESS_LIST_MUTEX.unlock(); + } // TODO: When we get to multi-hart processing, we need to keep // trying to grab the process list. We can do this with an // atomic instruction. but right now, we're a single-processor diff --git a/risc_v/src/syscall.rs b/risc_v/src/syscall.rs index 88903ee..e192e38 100755 --- a/risc_v/src/syscall.rs +++ b/risc_v/src/syscall.rs @@ -180,6 +180,10 @@ pub fn syscall_block_read(dev: usize, ) as u8 } +pub fn syscall_sleep(duration: usize) +{ + let _ = do_make_syscall(10, duration, 0, 0, 0, 0, 0); +} // These system call numbers come from libgloss so that we can use newlib // for our system calls. // Libgloss wants the system call number in A7 and arguments in A0..A6 diff --git a/risc_v/src/test.rs b/risc_v/src/test.rs index 1a37ea7..441702e 100644 --- a/risc_v/src/test.rs +++ b/risc_v/src/test.rs @@ -13,7 +13,8 @@ use crate::{cpu::{build_satp, ProcessData, ProcessState, NEXT_PID, - PROCESS_LIST, + PROCESS_LIST, + PROCESS_LIST_MUTEX, STACK_ADDR, STACK_PAGES}, syscall::syscall_fs_read}; @@ -189,6 +190,9 @@ pub fn test_elf() { // I took a different tact here than in process.rs. In there I created // the process while holding onto the process list. It might // matter since this is asynchronous--it is being ran as a kernel process. + unsafe { + PROCESS_LIST_MUTEX.spin_lock(); + } if let Some(mut pl) = unsafe { PROCESS_LIST.take() } { // As soon as we push this process on the list, it'll be // schedule-able. @@ -207,6 +211,9 @@ pub fn test_elf() { // will be dropped and all of the associated allocations // will be deallocated through the process' Drop trait. } + unsafe { + PROCESS_LIST_MUTEX.unlock(); + } println!(); }