1
0
mirror of https://github.com/sgmarz/osblog.git synced 2024-11-23 18:06:20 +04:00

Add Mutex to process list.

This commit is contained in:
Stephen Marz 2020-04-26 20:11:01 -04:00
parent 03018cb55f
commit a1f9d2c6cd
5 changed files with 93 additions and 3 deletions

57
risc_v/src/lock.rs Normal file
View File

@ -0,0 +1,57 @@
// lock.rs
// Locking routines
// Stephen Marz
// 26 Apr 2020
use crate::syscall::syscall_sleep;
pub const DEFAULT_LOCK_SLEEP: usize = 10000;
#[repr(u32)]
pub enum MutexState {
Locked = 0,
Unlocked = 1,
}
#[repr(C)]
pub struct Mutex {
state: MutexState,
}
impl<'a> Mutex {
pub const fn new() -> Self {
Self { state: MutexState::Unlocked, }
}
pub fn val(&'a self) -> &'a MutexState {
&self.state
}
pub fn lock(&mut self) -> bool {
unsafe {
let ret: MutexState;
llvm_asm!("amoswap.w.aq $0, $1, ($2)\n" : "=r"(ret) : "r"(1), "r"(self) :: "volatile");
match ret {
MutexState::Locked => { false },
MutexState::Unlocked => true,
}
}
}
/// Do NOT sleep lock inside of an interrupt context!
pub fn sleep_lock(&mut self) {
while self.lock() == false {
syscall_sleep(DEFAULT_LOCK_SLEEP);
}
}
/// Can safely be used inside of an interrupt context.
pub fn spin_lock(&mut self) {
while self.lock() == false {}
}
pub fn unlock(&mut self) {
unsafe {
llvm_asm!("amoswap.w.rl zero, zero, ($0)" :: "r"(self) :: "volatile");
}
}
}

View File

@ -179,6 +179,7 @@ pub mod cpu;
pub mod elf;
pub mod fs;
pub mod kmem;
pub mod lock;
pub mod minixfs;
pub mod page;
pub mod plic;

View File

@ -20,6 +20,7 @@ use crate::{cpu::{build_satp,
syscall::syscall_exit};
use alloc::collections::vec_deque::VecDeque;
use core::ptr::null_mut;
use crate::lock::Mutex;
// How many pages are we going to give a process for their
// stack?
@ -41,6 +42,7 @@ pub const PROCESS_STARTING_ADDR: usize = 0x2000_0000;
// a VecDeque at compile time, so we are somewhat forced to
// do this.
pub static mut PROCESS_LIST: Option<VecDeque<Process>> = None;
pub static mut PROCESS_LIST_MUTEX: Mutex = Mutex::new();
// We can search through the process list to get a new PID, but
// it's probably easier and faster just to increase the pid:
pub static mut NEXT_PID: u16 = 1;
@ -57,6 +59,7 @@ pub fn set_running(pid: u16) -> bool {
// of process pointers.
let mut retval = false;
unsafe {
PROCESS_LIST_MUTEX.spin_lock();
if let Some(mut pl) = PROCESS_LIST.take() {
for proc in pl.iter_mut() {
if proc.pid == pid {
@ -70,6 +73,7 @@ pub fn set_running(pid: u16) -> bool {
// Some(pl).
PROCESS_LIST.replace(pl);
}
PROCESS_LIST_MUTEX.unlock();
}
retval
}
@ -82,6 +86,7 @@ pub fn set_waiting(pid: u16) -> bool {
// of process pointers.
let mut retval = false;
unsafe {
PROCESS_LIST_MUTEX.spin_lock();
if let Some(mut pl) = PROCESS_LIST.take() {
for proc in pl.iter_mut() {
if proc.pid == pid {
@ -95,6 +100,7 @@ pub fn set_waiting(pid: u16) -> bool {
// Some(pl).
PROCESS_LIST.replace(pl);
}
PROCESS_LIST_MUTEX.unlock();
}
retval
}
@ -105,6 +111,7 @@ pub fn set_sleeping(pid: u16, duration: usize) -> bool {
// of process pointers.
let mut retval = false;
unsafe {
PROCESS_LIST_MUTEX.spin_lock();
if let Some(mut pl) = PROCESS_LIST.take() {
for proc in pl.iter_mut() {
if proc.pid == pid {
@ -122,6 +129,7 @@ pub fn set_sleeping(pid: u16, duration: usize) -> bool {
// Some(pl).
PROCESS_LIST.replace(pl);
}
PROCESS_LIST_MUTEX.unlock();
}
retval
}
@ -130,6 +138,7 @@ pub fn set_sleeping(pid: u16, duration: usize) -> bool {
/// this function does nothing.
pub fn delete_process(pid: u16) {
unsafe {
PROCESS_LIST_MUTEX.spin_lock();
if let Some(mut pl) = PROCESS_LIST.take() {
for i in 0..pl.len() {
let p = pl.get_mut(i).unwrap();
@ -145,6 +154,7 @@ pub fn delete_process(pid: u16) {
// Some(pl).
PROCESS_LIST.replace(pl);
}
PROCESS_LIST_MUTEX.unlock();
}
}
@ -152,6 +162,7 @@ pub fn delete_process(pid: u16) {
/// unsafe since the process can be deleted and we'll still have a pointer.
pub unsafe fn get_by_pid(pid: u16) -> *mut Process {
let mut ret = null_mut();
PROCESS_LIST_MUTEX.spin_lock();
if let Some(mut pl) = PROCESS_LIST.take() {
for i in pl.iter_mut() {
if i.get_pid() == pid {
@ -161,7 +172,7 @@ pub unsafe fn get_by_pid(pid: u16) -> *mut Process {
}
PROCESS_LIST.replace(pl);
}
PROCESS_LIST_MUTEX.unlock();
ret
}
@ -203,6 +214,7 @@ pub fn add_process_default(pr: fn()) {
// then move ownership back to the PROCESS_LIST.
// This allows mutual exclusion as anyone else trying to grab
// the process list will get None rather than the Deque.
PROCESS_LIST_MUTEX.spin_lock();
if let Some(mut pl) = PROCESS_LIST.take() {
// .take() will replace PROCESS_LIST with None and give
// us the only copy of the Deque.
@ -213,6 +225,7 @@ pub fn add_process_default(pr: fn()) {
// Some(pl).
PROCESS_LIST.replace(pl);
}
PROCESS_LIST_MUTEX.unlock();
// TODO: When we get to multi-hart processing, we need to keep
// trying to grab the process list. We can do this with an
// atomic instruction. but right now, we're a single-processor
@ -230,6 +243,7 @@ pub fn add_kernel_process(func: fn()) -> u16 {
// then move ownership back to the PROCESS_LIST.
// This allows mutual exclusion as anyone else trying to grab
// the process list will get None rather than the Deque.
unsafe { PROCESS_LIST_MUTEX.spin_lock(); }
if let Some(mut pl) = unsafe { PROCESS_LIST.take() } {
// .take() will replace PROCESS_LIST with None and give
// us the only copy of the Deque.
@ -278,10 +292,12 @@ pub fn add_kernel_process(func: fn()) -> u16 {
// Some(pl).
unsafe {
PROCESS_LIST.replace(pl);
PROCESS_LIST_MUTEX.unlock();
}
my_pid
return my_pid;
}
else {
unsafe { PROCESS_LIST_MUTEX.unlock(); }
// TODO: When we get to multi-hart processing, we need to keep
// trying to grab the process list. We can do this with an
// atomic instruction. but right now, we're a single-processor
@ -311,6 +327,7 @@ pub fn add_kernel_process_args(func: fn(args_ptr: usize), args: usize) -> u16 {
// then move ownership back to the PROCESS_LIST.
// This allows mutual exclusion as anyone else trying to grab
// the process list will get None rather than the Deque.
unsafe {PROCESS_LIST_MUTEX.spin_lock(); }
if let Some(mut pl) = unsafe { PROCESS_LIST.take() } {
// .take() will replace PROCESS_LIST with None and give
// us the only copy of the Deque.
@ -360,10 +377,14 @@ pub fn add_kernel_process_args(func: fn(args_ptr: usize), args: usize) -> u16 {
// Some(pl).
unsafe {
PROCESS_LIST.replace(pl);
PROCESS_LIST_MUTEX.unlock();
}
my_pid
}
else {
unsafe {
PROCESS_LIST_MUTEX.unlock();
}
// TODO: When we get to multi-hart processing, we need to keep
// trying to grab the process list. We can do this with an
// atomic instruction. but right now, we're a single-processor

View File

@ -180,6 +180,10 @@ pub fn syscall_block_read(dev: usize,
) as u8
}
pub fn syscall_sleep(duration: usize)
{
let _ = do_make_syscall(10, duration, 0, 0, 0, 0, 0);
}
// These system call numbers come from libgloss so that we can use newlib
// for our system calls.
// Libgloss wants the system call number in A7 and arguments in A0..A6

View File

@ -13,7 +13,8 @@ use crate::{cpu::{build_satp,
ProcessData,
ProcessState,
NEXT_PID,
PROCESS_LIST,
PROCESS_LIST,
PROCESS_LIST_MUTEX,
STACK_ADDR,
STACK_PAGES},
syscall::syscall_fs_read};
@ -189,6 +190,9 @@ pub fn test_elf() {
// I took a different tact here than in process.rs. In there I created
// the process while holding onto the process list. It might
// matter since this is asynchronous--it is being ran as a kernel process.
unsafe {
PROCESS_LIST_MUTEX.spin_lock();
}
if let Some(mut pl) = unsafe { PROCESS_LIST.take() } {
// As soon as we push this process on the list, it'll be
// schedule-able.
@ -207,6 +211,9 @@ pub fn test_elf() {
// will be dropped and all of the associated allocations
// will be deallocated through the process' Drop trait.
}
unsafe {
PROCESS_LIST_MUTEX.unlock();
}
println!();
}