Implement sys_read && allocate pid and kernel stack dynamically.

This commit is contained in:
Yifan Wu 2020-12-08 17:17:28 +08:00
parent 1bc53c0b5f
commit eddbc8954c
7 changed files with 162 additions and 19 deletions

View File

@ -47,6 +47,9 @@ impl MemorySet {
areas: Vec::new(), areas: Vec::new(),
} }
} }
pub fn dealloc_all_frames(&mut self) {
*self = Self::new_bare();
}
pub fn token(&self) -> usize { pub fn token(&self) -> usize {
self.page_table.token() self.page_table.token()
} }
@ -59,6 +62,14 @@ impl MemorySet {
permission, permission,
), None); ), None);
} }
pub fn remove_area_with_start_vpn(&mut self, start_vpn: VirtPageNum) {
if let Some((idx, area)) = self.areas.iter_mut().enumerate()
.find(|(_, area)| area.vpn_range.get_start() == start_vpn) {
area.unmap(&mut self.page_table);
self.areas.remove(idx);
}
panic!("Area not found!");
}
fn push(&mut self, mut map_area: MapArea, data: Option<&[u8]>) { fn push(&mut self, mut map_area: MapArea, data: Option<&[u8]>) {
map_area.map(&mut self.page_table); map_area.map(&mut self.page_table);
if let Some(data) = data { if let Some(data) = data {
@ -228,7 +239,6 @@ impl MapArea {
let pte_flags = PTEFlags::from_bits(self.map_perm.bits).unwrap(); let pte_flags = PTEFlags::from_bits(self.map_perm.bits).unwrap();
page_table.map(vpn, ppn, pte_flags); page_table.map(vpn, ppn, pte_flags);
} }
#[allow(unused)]
pub fn unmap_one(&mut self, page_table: &mut PageTable, vpn: VirtPageNum) { pub fn unmap_one(&mut self, page_table: &mut PageTable, vpn: VirtPageNum) {
match self.map_type { match self.map_type {
MapType::Framed => { MapType::Framed => {
@ -243,7 +253,6 @@ impl MapArea {
self.map_one(page_table, vpn); self.map_one(page_table, vpn);
} }
} }
#[allow(unused)]
pub fn unmap(&mut self, page_table: &mut PageTable) { pub fn unmap(&mut self, page_table: &mut PageTable) {
for vpn in self.vpn_range { for vpn in self.vpn_range {
self.unmap_one(page_table, vpn); self.unmap_one(page_table, vpn);

View File

@ -131,7 +131,7 @@ impl PageTable {
} }
} }
pub fn translated_byte_buffer(token: usize, ptr: *const u8, len: usize) -> Vec<&'static [u8]> { pub fn translated_byte_buffer(token: usize, ptr: *const u8, len: usize) -> Vec<&'static mut [u8]> {
let page_table = PageTable::from_token(token); let page_table = PageTable::from_token(token);
let mut start = ptr as usize; let mut start = ptr as usize;
let end = start + len; let end = start + len;
@ -146,7 +146,7 @@ pub fn translated_byte_buffer(token: usize, ptr: *const u8, len: usize) -> Vec<&
vpn.step(); vpn.step();
let mut end_va: VirtAddr = vpn.into(); let mut end_va: VirtAddr = vpn.into();
end_va = end_va.min(VirtAddr::from(end)); end_va = end_va.min(VirtAddr::from(end));
v.push(&ppn.get_bytes_array()[start_va.page_offset()..end_va.page_offset()]); v.push(&mut ppn.get_bytes_array()[start_va.page_offset()..end_va.page_offset()]);
start = end_va.into(); start = end_va.into();
} }
v v

View File

@ -1,6 +1,8 @@
use crate::mm::translated_byte_buffer; use crate::mm::translated_byte_buffer;
use crate::task::current_user_token; use crate::task::{current_user_token, suspend_current_and_run_next};
use crate::sbi::console_getchar;
const FD_STDIN: usize = 0;
const FD_STDOUT: usize = 1; const FD_STDOUT: usize = 1;
pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize { pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize {
@ -17,3 +19,28 @@ pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize {
} }
} }
} }
pub fn sys_read(fd: usize, buf: *const u8, len: usize) -> isize {
match fd {
FD_STDIN => {
assert_eq!(len, 1, "Only support len = 1 in sys_read!");
let mut c: usize;
loop {
c = console_getchar();
if c == 0 {
suspend_current_and_run_next();
continue;
} else {
break;
}
}
let ch = c as u8;
let mut buffers = translated_byte_buffer(current_user_token(), buf, len);
unsafe { buffers[0].as_mut_ptr().write_volatile(ch); }
1
}
_ => {
panic!("Unsupported fd in sys_read!");
}
}
}

View File

@ -1,3 +1,4 @@
const SYSCALL_READ: usize = 63;
const SYSCALL_WRITE: usize = 64; const SYSCALL_WRITE: usize = 64;
const SYSCALL_EXIT: usize = 93; const SYSCALL_EXIT: usize = 93;
const SYSCALL_YIELD: usize = 124; const SYSCALL_YIELD: usize = 124;
@ -11,6 +12,7 @@ use process::*;
pub fn syscall(syscall_id: usize, args: [usize; 3]) -> isize { pub fn syscall(syscall_id: usize, args: [usize; 3]) -> isize {
match syscall_id { match syscall_id {
SYSCALL_READ => sys_read(args[0], args[1] as *const u8, args[2]),
SYSCALL_WRITE => sys_write(args[0], args[1] as *const u8, args[2]), SYSCALL_WRITE => sys_write(args[0], args[1] as *const u8, args[2]),
SYSCALL_EXIT => sys_exit(args[0] as i32), SYSCALL_EXIT => sys_exit(args[0] as i32),
SYSCALL_YIELD => sys_yield(), SYSCALL_YIELD => sys_yield(),

View File

@ -3,6 +3,7 @@ mod switch;
mod task; mod task;
mod manager; mod manager;
mod processor; mod processor;
mod pid;
use crate::loader::{get_num_app, get_app_data}; use crate::loader::{get_num_app, get_app_data};
use crate::trap::TrapContext; use crate::trap::TrapContext;
@ -14,6 +15,7 @@ use alloc::vec::Vec;
use alloc::sync::Arc; use alloc::sync::Arc;
use spin::Mutex; use spin::Mutex;
use manager::fetch_task; use manager::fetch_task;
use pid::{PidHandle, pid_alloc, KernelStack};
pub use context::TaskContext; pub use context::TaskContext;
pub use processor::{ pub use processor::{

104
os/src/task/pid.rs Normal file
View File

@ -0,0 +1,104 @@
use alloc::vec::Vec;
use lazy_static::*;
use spin::Mutex;
use crate::mm::{KERNEL_SPACE, MapPermission, VirtAddr};
use crate::config::{
PAGE_SIZE,
TRAMPOLINE,
KERNEL_STACK_SIZE,
};
struct PidAllocator {
current: usize,
recycled: Vec<usize>,
}
impl PidAllocator {
pub fn new() -> Self {
PidAllocator {
current: 0,
recycled: Vec::new(),
}
}
pub fn alloc(&mut self) -> PidHandle {
if let Some(pid) = self.recycled.pop() {
PidHandle(pid)
} else {
self.current += 1;
PidHandle(self.current - 1)
}
}
pub fn dealloc(&mut self, pid: usize) {
assert!(pid < self.current);
assert!(
self.recycled.iter().find(|ppid| **ppid == pid).is_none(),
"pid {} has been deallocated!", pid
);
self.recycled.push(pid);
}
}
lazy_static! {
static ref PID_ALLOCATOR : Mutex<PidAllocator> = Mutex::new(PidAllocator::new());
}
pub struct PidHandle(pub usize);
impl Drop for PidHandle {
fn drop(&mut self) {
PID_ALLOCATOR.lock().dealloc(self.0);
}
}
pub fn pid_alloc() -> PidHandle {
PID_ALLOCATOR.lock().alloc()
}
/// Return (bottom, top) of a kernel stack in kernel space.
pub fn kernel_stack_position(app_id: usize) -> (usize, usize) {
let top = TRAMPOLINE - app_id * (KERNEL_STACK_SIZE + PAGE_SIZE);
let bottom = top - KERNEL_STACK_SIZE;
(bottom, top)
}
pub struct KernelStack {
pid: usize,
}
impl KernelStack {
pub fn new(pid_handle: &PidHandle) -> Self {
let pid = pid_handle.0;
let (kernel_stack_bottom, kernel_stack_top) = kernel_stack_position(pid);
KERNEL_SPACE
.lock()
.insert_framed_area(
kernel_stack_bottom.into(),
kernel_stack_top.into(),
MapPermission::R | MapPermission::W,
);
KernelStack {
pid: pid_handle.0,
}
}
pub fn push_on_top<T>(&self, value: T) -> *mut T where
T: Sized, {
let (_, kernel_stack_top) = kernel_stack_position(self.pid);
let ptr_mut = (kernel_stack_top - core::mem::size_of::<T>()) as *mut T;
unsafe { *ptr_mut = value; }
ptr_mut
}
pub fn get_top(&self) -> usize {
let (_, kernel_stack_top) = kernel_stack_position(self.pid);
kernel_stack_top
}
}
impl Drop for KernelStack {
fn drop(&mut self) {
let (kernel_stack_bottom, _) = kernel_stack_position(self.pid);
let kernel_stack_bottom_va: VirtAddr = kernel_stack_bottom.into();
KERNEL_SPACE
.lock()
.remove_area_with_start_vpn(kernel_stack_bottom_va.into());
}
}

View File

@ -2,12 +2,14 @@ use crate::mm::{MemorySet, MapPermission, PhysPageNum, KERNEL_SPACE, VirtAddr};
use crate::trap::{TrapContext, trap_handler}; use crate::trap::{TrapContext, trap_handler};
use crate::config::{TRAP_CONTEXT, kernel_stack_position}; use crate::config::{TRAP_CONTEXT, kernel_stack_position};
use super::TaskContext; use super::TaskContext;
use super::{PidHandle, pid_alloc, KernelStack};
pub struct TaskControlBlock { pub struct TaskControlBlock {
// immutable // immutable
pub trap_cx_ppn: PhysPageNum, pub trap_cx_ppn: PhysPageNum,
pub base_size: usize, pub base_size: usize,
//pub pid: usize, pub pid: PidHandle,
pub kernel_stack: KernelStack,
// mutable // mutable
pub task_cx_ptr: usize, pub task_cx_ptr: usize,
pub task_status: TaskStatus, pub task_status: TaskStatus,
@ -32,23 +34,20 @@ impl TaskControlBlock {
.unwrap() .unwrap()
.ppn(); .ppn();
let task_status = TaskStatus::Ready; let task_status = TaskStatus::Ready;
// map a kernel-stack in kernel space // alloc a pid and a kernel stack in kernel space
let (kernel_stack_bottom, kernel_stack_top) = kernel_stack_position(app_id); let pid_handle = pid_alloc();
KERNEL_SPACE let kernel_stack = KernelStack::new(&pid_handle);
.lock() let kernel_stack_top = kernel_stack.get_top();
.insert_framed_area( // push a task context which goes to trap_return to the top of kernel stack
kernel_stack_bottom.into(), let task_cx_ptr = kernel_stack.push_on_top(TaskContext::goto_trap_return());
kernel_stack_top.into(),
MapPermission::R | MapPermission::W,
);
let task_cx_ptr = (kernel_stack_top - core::mem::size_of::<TaskContext>()) as *mut TaskContext;
unsafe { *task_cx_ptr = TaskContext::goto_trap_return(); }
let task_control_block = Self { let task_control_block = Self {
trap_cx_ppn,
base_size: user_sp,
pid: pid_handle,
kernel_stack,
task_cx_ptr: task_cx_ptr as usize, task_cx_ptr: task_cx_ptr as usize,
task_status, task_status,
memory_set, memory_set,
trap_cx_ppn,
base_size: user_sp,
}; };
// prepare TrapContext in user space // prepare TrapContext in user space
let trap_cx = task_control_block.get_trap_cx(); let trap_cx = task_control_block.get_trap_cx();