Merge pull request #70 from hypocrasy/ch6

Ch6
This commit is contained in:
chyyuu 2022-05-03 20:08:22 +08:00 committed by GitHub
commit 40b5370f6b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
36 changed files with 378 additions and 119 deletions

View File

@ -16,10 +16,10 @@ jobs:
rustup component add llvm-tools-preview
rustup component add rust-src
cd os
cargo doc --no-deps --verbose
- name: Deploy to Github Pages
cargo doc --document-private-items --verbose
- name: Push to gh-pages
uses: peaceiris/actions-gh-pages@v3
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./os/target/riscv64gc-unknown-none-elf/doc
destination_dir: ${{ github.ref_name }}
destination_dir: ${{ github.ref_name }}

View File

@ -1,16 +1,16 @@
use super::{get_block_cache, BlockDevice, BLOCK_SZ};
use alloc::sync::Arc;
/// A bitmap block
type BitmapBlock = [u64; 64];
/// Number of bits in a block
const BLOCK_BITS: usize = BLOCK_SZ * 8;
/// A bitmap
pub struct Bitmap {
start_block_id: usize,
blocks: usize,
}
/// Return (block_pos, bits64_pos, inner_pos)
/// Decompose bits into (block_pos, bits64_pos, inner_pos)
fn decomposition(mut bit: usize) -> (usize, usize, usize) {
let block_pos = bit / BLOCK_BITS;
bit %= BLOCK_BITS;
@ -18,13 +18,14 @@ fn decomposition(mut bit: usize) -> (usize, usize, usize) {
}
impl Bitmap {
/// A new bitmap from start block id and number of blocks
pub fn new(start_block_id: usize, blocks: usize) -> Self {
Self {
start_block_id,
blocks,
}
}
/// Allocate a new block from a block device
pub fn alloc(&self, block_device: &Arc<dyn BlockDevice>) -> Option<usize> {
for block_id in 0..self.blocks {
let pos = get_block_cache(
@ -52,7 +53,7 @@ impl Bitmap {
}
None
}
/// Deallocate a block
pub fn dealloc(&self, block_device: &Arc<dyn BlockDevice>, bit: usize) {
let (block_pos, bits64_pos, inner_pos) = decomposition(bit);
get_block_cache(block_pos + self.start_block_id, Arc::clone(block_device))
@ -62,7 +63,7 @@ impl Bitmap {
bitmap_block[bits64_pos] -= 1u64 << inner_pos;
});
}
/// Get the max number of allocatable blocks
pub fn maximum(&self) -> usize {
self.blocks * BLOCK_BITS
}

View File

@ -3,11 +3,15 @@ use alloc::collections::VecDeque;
use alloc::sync::Arc;
use lazy_static::*;
use spin::Mutex;
/// Cached block inside memory
pub struct BlockCache {
/// cached block data
cache: [u8; BLOCK_SZ],
/// underlying block id
block_id: usize,
/// underlying block device
block_device: Arc<dyn BlockDevice>,
/// whether the block is dirty
modified: bool,
}
@ -23,7 +27,7 @@ impl BlockCache {
modified: false,
}
}
/// Get the address of an offset inside the cached block data
fn addr_of_offset(&self, offset: usize) -> usize {
&self.cache[offset] as *const _ as usize
}
@ -70,7 +74,7 @@ impl Drop for BlockCache {
self.sync()
}
}
/// Use a block cache of 16 blocks
const BLOCK_CACHE_SIZE: usize = 16;
pub struct BlockCacheManager {
@ -118,10 +122,11 @@ impl BlockCacheManager {
}
lazy_static! {
/// The global block cache manager
pub static ref BLOCK_CACHE_MANAGER: Mutex<BlockCacheManager> =
Mutex::new(BlockCacheManager::new());
}
/// Get the block cache corresponding to the given block id and block device
pub fn get_block_cache(
block_id: usize,
block_device: Arc<dyn BlockDevice>,
@ -130,7 +135,7 @@ pub fn get_block_cache(
.lock()
.get_block_cache(block_id, block_device)
}
/// Sync all block cache to block device
pub fn block_cache_sync_all() {
let manager = BLOCK_CACHE_MANAGER.lock();
for (_, cache) in manager.queue.iter() {

View File

@ -1,6 +1,9 @@
use core::any::Any;
/// Trait for block devices
/// which reads and writes data in the unit of blocks
pub trait BlockDevice: Send + Sync + Any {
///Read data form block to buffer
fn read_block(&self, block_id: usize, buf: &mut [u8]);
///Write data from buffer to block
fn write_block(&self, block_id: usize, buf: &[u8]);
}

View File

@ -5,18 +5,22 @@ use super::{
use crate::BLOCK_SZ;
use alloc::sync::Arc;
use spin::Mutex;
///An easy file system on block
pub struct EasyFileSystem {
///Real device
pub block_device: Arc<dyn BlockDevice>,
///Inode bitmap
pub inode_bitmap: Bitmap,
///Data bitmap
pub data_bitmap: Bitmap,
inode_area_start_block: u32,
data_area_start_block: u32,
}
type DataBlock = [u8; BLOCK_SZ];
/// An easy fs over a block device
impl EasyFileSystem {
/// A data block of block size
pub fn create(
block_device: Arc<dyn BlockDevice>,
total_blocks: u32,
@ -77,7 +81,7 @@ impl EasyFileSystem {
block_cache_sync_all();
Arc::new(Mutex::new(efs))
}
/// Open a block device as a filesystem
pub fn open(block_device: Arc<dyn BlockDevice>) -> Arc<Mutex<Self>> {
// read SuperBlock
get_block_cache(0, Arc::clone(&block_device))
@ -99,7 +103,7 @@ impl EasyFileSystem {
Arc::new(Mutex::new(efs))
})
}
/// Get the root inode of the filesystem
pub fn root_inode(efs: &Arc<Mutex<Self>>) -> Inode {
let block_device = Arc::clone(&efs.lock().block_device);
// acquire efs lock temporarily
@ -107,7 +111,7 @@ impl EasyFileSystem {
// release efs lock
Inode::new(block_id, block_offset, Arc::clone(efs), block_device)
}
/// Get inode by id
pub fn get_disk_inode_pos(&self, inode_id: u32) -> (u32, usize) {
let inode_size = core::mem::size_of::<DiskInode>();
let inodes_per_block = (BLOCK_SZ / inode_size) as u32;
@ -117,20 +121,20 @@ impl EasyFileSystem {
(inode_id % inodes_per_block) as usize * inode_size,
)
}
/// Get data block by id
pub fn get_data_block_id(&self, data_block_id: u32) -> u32 {
self.data_area_start_block + data_block_id
}
/// Allocate a new inode
pub fn alloc_inode(&mut self) -> u32 {
self.inode_bitmap.alloc(&self.block_device).unwrap() as u32
}
/// Return a block ID not ID in the data area.
/// Allocate a data block
pub fn alloc_data(&mut self) -> u32 {
self.data_bitmap.alloc(&self.block_device).unwrap() as u32 + self.data_area_start_block
}
/// Deallocate a data block
pub fn dealloc_data(&mut self, block_id: u32) {
get_block_cache(block_id as usize, Arc::clone(&self.block_device))
.lock()

View File

@ -3,16 +3,24 @@ use alloc::sync::Arc;
use alloc::vec::Vec;
use core::fmt::{Debug, Formatter, Result};
/// Magic number for sanity check
const EFS_MAGIC: u32 = 0x3b800001;
/// The max number of direct inodes
const INODE_DIRECT_COUNT: usize = 28;
/// The max length of inode name
const NAME_LENGTH_LIMIT: usize = 27;
/// The max number of indirect1 inodes
const INODE_INDIRECT1_COUNT: usize = BLOCK_SZ / 4;
/// The max number of indirect2 inodes
const INODE_INDIRECT2_COUNT: usize = INODE_INDIRECT1_COUNT * INODE_INDIRECT1_COUNT;
/// The upper bound of direct inode index
const DIRECT_BOUND: usize = INODE_DIRECT_COUNT;
/// The upper bound of indirect1 inode index
const INDIRECT1_BOUND: usize = DIRECT_BOUND + INODE_INDIRECT1_COUNT;
/// The upper bound of indirect2 inode indexs
#[allow(unused)]
const INDIRECT2_BOUND: usize = INDIRECT1_BOUND + INODE_INDIRECT2_COUNT;
/// Super block of a filesystem
#[repr(C)]
pub struct SuperBlock {
magic: u32,
@ -36,6 +44,7 @@ impl Debug for SuperBlock {
}
impl SuperBlock {
/// Initialize a super block
pub fn initialize(
&mut self,
total_blocks: u32,
@ -53,20 +62,23 @@ impl SuperBlock {
data_area_blocks,
}
}
/// Check if a super block is valid using efs magic
pub fn is_valid(&self) -> bool {
self.magic == EFS_MAGIC
}
}
/// Type of a disk inode
#[derive(PartialEq)]
pub enum DiskInodeType {
File,
Directory,
}
/// A indirect block
type IndirectBlock = [u32; BLOCK_SZ / 4];
/// A data block
type DataBlock = [u8; BLOCK_SZ];
/// A disk inode
#[repr(C)]
pub struct DiskInode {
pub size: u32,
@ -77,7 +89,8 @@ pub struct DiskInode {
}
impl DiskInode {
/// indirect1 and indirect2 block are allocated only when they are needed.
/// Initialize a disk inode, as well as all direct inodes under it
/// indirect1 and indirect2 block are allocated only when they are needed
pub fn initialize(&mut self, type_: DiskInodeType) {
self.size = 0;
self.direct.iter_mut().for_each(|v| *v = 0);
@ -85,9 +98,11 @@ impl DiskInode {
self.indirect2 = 0;
self.type_ = type_;
}
/// Whether this inode is a directory
pub fn is_dir(&self) -> bool {
self.type_ == DiskInodeType::Directory
}
/// Whether this inode is a file
#[allow(unused)]
pub fn is_file(&self) -> bool {
self.type_ == DiskInodeType::File
@ -116,10 +131,12 @@ impl DiskInode {
}
total as u32
}
/// Get the number of data blocks that have to be allocated given the new size of data
pub fn blocks_num_needed(&self, new_size: u32) -> u32 {
assert!(new_size >= self.size);
Self::total_blocks(new_size) - Self::total_blocks(self.size)
}
/// Get id of block given inner id
pub fn get_block_id(&self, inner_id: u32, block_device: &Arc<dyn BlockDevice>) -> u32 {
let inner_id = inner_id as usize;
if inner_id < INODE_DIRECT_COUNT {
@ -144,6 +161,7 @@ impl DiskInode {
})
}
}
/// Inncrease the size of current disk inode
pub fn increase_size(
&mut self,
new_size: u32,
@ -218,7 +236,6 @@ impl DiskInode {
}
/// Clear size to zero and return blocks that should be deallocated.
///
/// We will clear the block contents to zero later.
pub fn clear_size(&mut self, block_device: &Arc<dyn BlockDevice>) -> Vec<u32> {
let mut v: Vec<u32> = Vec::new();
@ -291,6 +308,7 @@ impl DiskInode {
self.indirect2 = 0;
v
}
/// Read data from current disk inode
pub fn read_at(
&self,
offset: usize,
@ -330,7 +348,8 @@ impl DiskInode {
}
read_size
}
/// File size must be adjusted before.
/// Write data into current disk inode
/// size must be adjusted properly beforehand
pub fn write_at(
&mut self,
offset: usize,
@ -369,22 +388,24 @@ impl DiskInode {
write_size
}
}
/// A directory entry
#[repr(C)]
pub struct DirEntry {
name: [u8; NAME_LENGTH_LIMIT + 1],
inode_number: u32,
}
/// Size of a directory entry
pub const DIRENT_SZ: usize = 32;
impl DirEntry {
/// Create an empty directory entry
pub fn empty() -> Self {
Self {
name: [0u8; NAME_LENGTH_LIMIT + 1],
inode_number: 0,
}
}
/// Crate a directory entry from name and inode number
pub fn new(name: &str, inode_number: u32) -> Self {
let mut bytes = [0u8; NAME_LENGTH_LIMIT + 1];
bytes[..name.len()].copy_from_slice(name.as_bytes());
@ -393,16 +414,20 @@ impl DirEntry {
inode_number,
}
}
/// Serialize into bytes
pub fn as_bytes(&self) -> &[u8] {
unsafe { core::slice::from_raw_parts(self as *const _ as usize as *const u8, DIRENT_SZ) }
}
/// Serialize into mutable bytes
pub fn as_bytes_mut(&mut self) -> &mut [u8] {
unsafe { core::slice::from_raw_parts_mut(self as *mut _ as usize as *mut u8, DIRENT_SZ) }
}
/// Get name of the entry
pub fn name(&self) -> &str {
let len = (0usize..).find(|i| self.name[*i] == 0).unwrap();
core::str::from_utf8(&self.name[..len]).unwrap()
}
/// Get inode number of the entry
pub fn inode_number(&self) -> u32 {
self.inode_number
}

View File

@ -1,14 +1,14 @@
//!An easy file system isolated from the kernel
#![no_std]
#![deny(missing_docs)]
extern crate alloc;
mod bitmap;
mod block_cache;
mod block_dev;
mod efs;
mod layout;
mod vfs;
/// Use a block size of 512 bytes
pub const BLOCK_SZ: usize = 512;
use bitmap::Bitmap;
use block_cache::{block_cache_sync_all, get_block_cache};

View File

@ -6,7 +6,7 @@ use alloc::string::String;
use alloc::sync::Arc;
use alloc::vec::Vec;
use spin::{Mutex, MutexGuard};
/// Virtual filesystem layer over easy-fs
pub struct Inode {
block_id: usize,
block_offset: usize,
@ -15,7 +15,7 @@ pub struct Inode {
}
impl Inode {
/// We should not acquire efs lock here.
/// Create a vfs inode
pub fn new(
block_id: u32,
block_offset: usize,
@ -29,19 +29,19 @@ impl Inode {
block_device,
}
}
/// Call a function over a disk inode to read it
fn read_disk_inode<V>(&self, f: impl FnOnce(&DiskInode) -> V) -> V {
get_block_cache(self.block_id, Arc::clone(&self.block_device))
.lock()
.read(self.block_offset, f)
}
/// Call a function over a disk inode to modify it
fn modify_disk_inode<V>(&self, f: impl FnOnce(&mut DiskInode) -> V) -> V {
get_block_cache(self.block_id, Arc::clone(&self.block_device))
.lock()
.modify(self.block_offset, f)
}
/// Find inode under a disk inode by name
fn find_inode_id(&self, name: &str, disk_inode: &DiskInode) -> Option<u32> {
// assert it is a directory
assert!(disk_inode.is_dir());
@ -58,7 +58,7 @@ impl Inode {
}
None
}
/// Find inode under current inode by name
pub fn find(&self, name: &str) -> Option<Arc<Inode>> {
let fs = self.fs.lock();
self.read_disk_inode(|disk_inode| {
@ -73,7 +73,7 @@ impl Inode {
})
})
}
/// Increase the size of a disk inode
fn increase_size(
&self,
new_size: u32,
@ -90,7 +90,7 @@ impl Inode {
}
disk_inode.increase_size(new_size, v, &self.block_device);
}
/// Create inode under current inode by name
pub fn create(&self, name: &str) -> Option<Arc<Inode>> {
let mut fs = self.fs.lock();
let op = |root_inode: &DiskInode| {
@ -138,7 +138,7 @@ impl Inode {
)))
// release efs lock automatically by compiler
}
/// List inodes under current inode
pub fn ls(&self) -> Vec<String> {
let _fs = self.fs.lock();
self.read_disk_inode(|disk_inode| {
@ -155,12 +155,12 @@ impl Inode {
v
})
}
/// Read data from current inode
pub fn read_at(&self, offset: usize, buf: &mut [u8]) -> usize {
let _fs = self.fs.lock();
self.read_disk_inode(|disk_inode| disk_inode.read_at(offset, buf, &self.block_device))
}
/// Write data to current inode
pub fn write_at(&self, offset: usize, buf: &[u8]) -> usize {
let mut fs = self.fs.lock();
let size = self.modify_disk_inode(|disk_inode| {
@ -170,7 +170,7 @@ impl Inode {
block_cache_sync_all();
size
}
/// Clear the data in current inode
pub fn clear(&self) {
let mut fs = self.fs.lock();
self.modify_disk_inode(|disk_inode| {

View File

@ -1,3 +1,4 @@
//! Constants used in rCore
#[allow(unused)]
pub const USER_STACK_SIZE: usize = 4096 * 2;

View File

@ -1,3 +1,4 @@
//! SBI console driver, for text output
use crate::sbi::console_putchar;
use core::fmt::{self, Write};
@ -17,6 +18,7 @@ pub fn print(args: fmt::Arguments) {
}
#[macro_export]
/// print string macro
macro_rules! print {
($fmt: literal $(, $($arg: tt)+)?) => {
$crate::console::print(format_args!($fmt $(, $($arg)+)?));
@ -24,6 +26,7 @@ macro_rules! print {
}
#[macro_export]
/// println string macro
macro_rules! println {
($fmt: literal $(, $($arg: tt)+)?) => {
$crate::console::print(format_args!(concat!($fmt, "\n") $(, $($arg)+)?));

View File

@ -1,3 +1,9 @@
//! `Arc<Inode>` -> `OSInodeInner`: In order to open files concurrently
//! we need to wrap `Inode` into `Arc`,but `Mutex` in `Inode` prevents
//! file systems from being accessed simultaneously
//!
//! `UPSafeCell<OSInodeInner>` -> `OSInode`: for static `ROOT_INODE`,we
//! need to wrap `OSInodeInner` into `UPSafeCell`
use super::File;
use crate::drivers::BLOCK_DEVICE;
use crate::mm::UserBuffer;
@ -7,19 +13,21 @@ use alloc::vec::Vec;
use bitflags::*;
use easy_fs::{EasyFileSystem, Inode};
use lazy_static::*;
/// A wrapper around a filesystem inode
/// to implement File trait atop
pub struct OSInode {
readable: bool,
writable: bool,
inner: UPSafeCell<OSInodeInner>,
}
/// The OS inode inner in 'UPSafeCell'
pub struct OSInodeInner {
offset: usize,
inode: Arc<Inode>,
}
impl OSInode {
/// Construct an OS inode from a inode
pub fn new(readable: bool, writable: bool, inode: Arc<Inode>) -> Self {
Self {
readable,
@ -27,6 +35,7 @@ impl OSInode {
inner: unsafe { UPSafeCell::new(OSInodeInner { offset: 0, inode }) },
}
}
/// Read all data inside a inode into vector
pub fn read_all(&self) -> Vec<u8> {
let mut inner = self.inner.exclusive_access();
let mut buffer = [0u8; 512];
@ -49,7 +58,7 @@ lazy_static! {
Arc::new(EasyFileSystem::root_inode(&efs))
};
}
/// List all files in the filesystems
pub fn list_apps() {
println!("/**** APPS ****");
for app in ROOT_INODE.ls() {
@ -58,12 +67,18 @@ pub fn list_apps() {
println!("**************/");
}
bitflags! {
bitflags! {
///Open file flags
pub struct OpenFlags: u32 {
///Read only
const RDONLY = 0;
///Write only
const WRONLY = 1 << 0;
///Read & Write
const RDWR = 1 << 1;
///Allow create
const CREATE = 1 << 9;
///Clear file and return an empty one
const TRUNC = 1 << 10;
}
}
@ -81,7 +96,7 @@ impl OpenFlags {
}
}
}
///Open file with flags
pub fn open_file(name: &str, flags: OpenFlags) -> Option<Arc<OSInode>> {
let (readable, writable) = flags.read_write();
if flags.contains(OpenFlags::CREATE) {

View File

@ -1,12 +1,17 @@
//! File system in os
mod inode;
mod stdio;
use crate::mm::UserBuffer;
/// File trait
pub trait File: Send + Sync {
/// If readable
fn readable(&self) -> bool;
/// If writable
fn writable(&self) -> bool;
/// Read file to `UserBuffer`
fn read(&self, buf: UserBuffer) -> usize;
/// Write `UserBuffer` to file
fn write(&self, buf: UserBuffer) -> usize;
}

View File

@ -1,10 +1,11 @@
//!Stdin & Stdout
use super::File;
use crate::mm::UserBuffer;
use crate::sbi::console_getchar;
use crate::task::suspend_current_and_run_next;
///Standard input
pub struct Stdin;
///Standard output
pub struct Stdout;
impl File for Stdin {

View File

@ -1,3 +1,4 @@
//! The panic handler
use crate::sbi::shutdown;
use core::panic::PanicInfo;

View File

@ -1,3 +1,26 @@
//! The main module and entrypoint
//!
//! Various facilities of the kernels are implemented as submodules. The most
//! important ones are:
//!
//! - [`trap`]: Handles all cases of switching from userspace to the kernel
//! - [`task`]: Task management
//! - [`syscall`]: System call handling and implementation
//! - [`mm`]: Address map using SV39
//! - [`sync`]: Wrap a static data structure inside it so that we are able to access it without any `unsafe`.
//! - [`fs`]: Separate user from file system with some structures
//!
//! The operating system also starts in this module. Kernel code starts
//! executing from `entry.asm`, after which [`rust_main()`] is called to
//! initialize various pieces of functionality. (See its source code for
//! details.)
//!
//! We then call [`task::run_tasks()`] and for the first time go to
//! userspace.
#![deny(missing_docs)]
#![deny(warnings)]
#![allow(unused_imports)]
#![no_std]
#![no_main]
#![feature(panic_info_message)]
@ -19,20 +42,20 @@ mod board;
mod console;
mod config;
mod drivers;
mod fs;
mod lang_items;
mod mm;
mod sbi;
mod sync;
mod syscall;
mod task;
mod timer;
mod trap;
pub mod fs;
pub mod lang_items;
pub mod mm;
pub mod sbi;
pub mod sync;
pub mod syscall;
pub mod task;
pub mod timer;
pub mod trap;
use core::arch::global_asm;
global_asm!(include_str!("entry.asm"));
/// clear BSS segment
fn clear_bss() {
extern "C" {
fn sbss();
@ -45,6 +68,7 @@ fn clear_bss() {
}
#[no_mangle]
/// the rust entry-point of os
pub fn rust_main() -> ! {
clear_bss();
println!("[kernel] Hello, world!");

View File

@ -1,3 +1,4 @@
//! Implementation of physical and virtual address and page number.
use super::PageTableEntry;
use crate::config::{PAGE_SIZE, PAGE_SIZE_BITS};
use core::fmt::{self, Debug, Formatter};
@ -14,14 +15,17 @@ pub struct PhysAddr(pub usize);
#[repr(C)]
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
///virtual address
pub struct VirtAddr(pub usize);
#[repr(C)]
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
///phiscal page number
pub struct PhysPageNum(pub usize);
#[repr(C)]
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
///virtual page number
pub struct VirtPageNum(pub usize);
/// Debugging
@ -91,17 +95,21 @@ impl From<VirtPageNum> for usize {
v.0
}
}
///
impl VirtAddr {
///`VirtAddr`->`VirtPageNum`
pub fn floor(&self) -> VirtPageNum {
VirtPageNum(self.0 / PAGE_SIZE)
}
///`VirtAddr`->`VirtPageNum`
pub fn ceil(&self) -> VirtPageNum {
VirtPageNum((self.0 - 1 + PAGE_SIZE) / PAGE_SIZE)
}
///Get page offset
pub fn page_offset(&self) -> usize {
self.0 & (PAGE_SIZE - 1)
}
///Check page aligned
pub fn aligned(&self) -> bool {
self.page_offset() == 0
}
@ -118,15 +126,19 @@ impl From<VirtPageNum> for VirtAddr {
}
}
impl PhysAddr {
///`PhysAddr`->`PhysPageNum`
pub fn floor(&self) -> PhysPageNum {
PhysPageNum(self.0 / PAGE_SIZE)
}
///`PhysAddr`->`PhysPageNum`
pub fn ceil(&self) -> PhysPageNum {
PhysPageNum((self.0 - 1 + PAGE_SIZE) / PAGE_SIZE)
}
///Get page offset
pub fn page_offset(&self) -> usize {
self.0 & (PAGE_SIZE - 1)
}
///Check page aligned
pub fn aligned(&self) -> bool {
self.page_offset() == 0
}
@ -144,6 +156,7 @@ impl From<PhysPageNum> for PhysAddr {
}
impl VirtPageNum {
///Return VPN 3 level index
pub fn indexes(&self) -> [usize; 3] {
let mut vpn = self.0;
let mut idx = [0usize; 3];
@ -156,29 +169,35 @@ impl VirtPageNum {
}
impl PhysAddr {
///Get reference to `PhysAddr` value
pub fn get_ref<T>(&self) -> &'static T {
unsafe { (self.0 as *const T).as_ref().unwrap() }
}
///Get mutable reference to `PhysAddr` value
pub fn get_mut<T>(&self) -> &'static mut T {
unsafe { (self.0 as *mut T).as_mut().unwrap() }
}
}
impl PhysPageNum {
///Get `PageTableEntry` on `PhysPageNum`
pub fn get_pte_array(&self) -> &'static mut [PageTableEntry] {
let pa: PhysAddr = (*self).into();
unsafe { core::slice::from_raw_parts_mut(pa.0 as *mut PageTableEntry, 512) }
}
///Get u8 array on `PhysPageNum`
pub fn get_bytes_array(&self) -> &'static mut [u8] {
let pa: PhysAddr = (*self).into();
unsafe { core::slice::from_raw_parts_mut(pa.0 as *mut u8, 4096) }
}
///Get Get mutable reference to `PhysAddr` value on `PhysPageNum`
pub fn get_mut<T>(&self) -> &'static mut T {
let pa: PhysAddr = (*self).into();
pa.get_mut()
}
}
///Add value by one
pub trait StepByOne {
///Add value by one
fn step(&mut self);
}
impl StepByOne for VirtPageNum {
@ -193,6 +212,7 @@ impl StepByOne for PhysPageNum {
}
#[derive(Copy, Clone)]
/// a simple range structure for type T
pub struct SimpleRange<T>
where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug,
@ -225,6 +245,7 @@ where
SimpleRangeIterator::new(self.l, self.r)
}
}
/// iterator for the simple range structure
pub struct SimpleRangeIterator<T>
where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug,
@ -255,4 +276,5 @@ where
}
}
}
/// a simple range structure for virtual page number
pub type VPNRange = SimpleRange<VirtPageNum>;

View File

@ -1,3 +1,5 @@
//! Implementation of [`FrameAllocator`] which
//! controls all the frames in the operating system.
use super::{PhysAddr, PhysPageNum};
use crate::config::MEMORY_END;
use crate::sync::UPSafeCell;
@ -5,11 +7,14 @@ use alloc::vec::Vec;
use core::fmt::{self, Debug, Formatter};
use lazy_static::*;
/// manage a frame which has the same lifecycle as the tracker
pub struct FrameTracker {
///
pub ppn: PhysPageNum,
}
impl FrameTracker {
///Create an empty `FrameTracker`
pub fn new(ppn: PhysPageNum) -> Self {
// page cleaning
let bytes_array = ppn.get_bytes_array();
@ -37,7 +42,7 @@ trait FrameAllocator {
fn alloc(&mut self) -> Option<PhysPageNum>;
fn dealloc(&mut self, ppn: PhysPageNum);
}
/// an implementation for frame allocator
pub struct StackFrameAllocator {
current: usize,
end: usize,
@ -83,10 +88,11 @@ impl FrameAllocator for StackFrameAllocator {
type FrameAllocatorImpl = StackFrameAllocator;
lazy_static! {
/// frame allocator instance through lazy_static!
pub static ref FRAME_ALLOCATOR: UPSafeCell<FrameAllocatorImpl> =
unsafe { UPSafeCell::new(FrameAllocatorImpl::new()) };
}
/// initiate the frame allocator using `ekernel` and `MEMORY_END`
pub fn init_frame_allocator() {
extern "C" {
fn ekernel();
@ -96,19 +102,20 @@ pub fn init_frame_allocator() {
PhysAddr::from(MEMORY_END).floor(),
);
}
/// allocate a frame
pub fn frame_alloc() -> Option<FrameTracker> {
FRAME_ALLOCATOR
.exclusive_access()
.alloc()
.map(FrameTracker::new)
}
/// deallocate a frame
pub fn frame_dealloc(ppn: PhysPageNum) {
FRAME_ALLOCATOR.exclusive_access().dealloc(ppn);
}
#[allow(unused)]
/// a simple test for frame allocator
pub fn frame_allocator_test() {
let mut v: Vec<FrameTracker> = Vec::new();
for i in 0..5 {

View File

@ -1,16 +1,19 @@
//! The global allocator
use crate::config::KERNEL_HEAP_SIZE;
use buddy_system_allocator::LockedHeap;
#[global_allocator]
/// heap allocator instance
static HEAP_ALLOCATOR: LockedHeap = LockedHeap::empty();
#[alloc_error_handler]
/// panic when heap allocation error occurs
pub fn handle_alloc_error(layout: core::alloc::Layout) -> ! {
panic!("Heap allocation error, layout = {:?}", layout);
}
/// heap space ([u8; KERNEL_HEAP_SIZE])
static mut HEAP_SPACE: [u8; KERNEL_HEAP_SIZE] = [0; KERNEL_HEAP_SIZE];
/// initiate heap allocator
pub fn init_heap() {
unsafe {
HEAP_ALLOCATOR

View File

@ -1,3 +1,4 @@
//! Implementation of [`MapArea`] and [`MemorySet`].
use super::{frame_alloc, FrameTracker};
use super::{PTEFlags, PageTable, PageTableEntry};
use super::{PhysAddr, PhysPageNum, VirtAddr, VirtPageNum};
@ -25,26 +26,29 @@ extern "C" {
}
lazy_static! {
/// a memory set instance through lazy_static! managing kernel space
pub static ref KERNEL_SPACE: Arc<UPSafeCell<MemorySet>> =
Arc::new(unsafe { UPSafeCell::new(MemorySet::new_kernel()) });
}
///Get kernelspace root ppn
pub fn kernel_token() -> usize {
KERNEL_SPACE.exclusive_access().token()
}
/// memory set structure, controls virtual-memory space
pub struct MemorySet {
page_table: PageTable,
areas: Vec<MapArea>,
}
impl MemorySet {
///Create an empty `MemorySet`
pub fn new_bare() -> Self {
Self {
page_table: PageTable::new(),
areas: Vec::new(),
}
}
///Get pagetable `root_ppn`
pub fn token(&self) -> usize {
self.page_table.token()
}
@ -60,6 +64,7 @@ impl MemorySet {
None,
);
}
///Remove `MapArea` that starts with `start_vpn`
pub fn remove_area_with_start_vpn(&mut self, start_vpn: VirtPageNum) {
if let Some((idx, area)) = self
.areas
@ -231,6 +236,7 @@ impl MemorySet {
elf.header.pt2.entry_point() as usize,
)
}
///Clone a same `MemorySet`
pub fn from_existed_user(user_space: &MemorySet) -> MemorySet {
let mut memory_set = Self::new_bare();
// map trampoline
@ -250,6 +256,7 @@ impl MemorySet {
}
memory_set
}
///Refresh TLB with `sfence.vma`
pub fn activate(&self) {
let satp = self.page_table.token();
unsafe {
@ -257,15 +264,17 @@ impl MemorySet {
asm!("sfence.vma");
}
}
///Translate throuth pagetable
pub fn translate(&self, vpn: VirtPageNum) -> Option<PageTableEntry> {
self.page_table.translate(vpn)
}
///Remove all `MapArea`
pub fn recycle_data_pages(&mut self) {
//*self = Self::new_bare();
self.areas.clear();
}
}
/// map area structure, controls a contiguous piece of virtual memory
pub struct MapArea {
vpn_range: VPNRange,
data_frames: BTreeMap<VirtPageNum, FrameTracker>,
@ -353,21 +362,28 @@ impl MapArea {
}
#[derive(Copy, Clone, PartialEq, Debug)]
/// map type for memory set: identical or framed
pub enum MapType {
Identical,
Framed,
}
bitflags! {
/// map permission corresponding to that in pte: `R W X U`
pub struct MapPermission: u8 {
///Readable
const R = 1 << 1;
///Writable
const W = 1 << 2;
///Excutable
const X = 1 << 3;
///Accessible in U mode
const U = 1 << 4;
}
}
#[allow(unused)]
///Check PageTable running correctly
pub fn remap_test() {
let mut kernel_space = KERNEL_SPACE.exclusive_access();
let mid_text: VirtAddr = ((stext as usize + etext as usize) / 2).into();

View File

@ -1,3 +1,10 @@
//! Memory management implementation
//!
//! SV39 page-based virtual-memory architecture for RV64 systems, and
//! everything about memory management, like frame allocator, page table,
//! map area and memory set, is implemented here.
//!
//! Every task or process has a memory_set to control its virtual memory.
mod address;
mod frame_allocator;
mod heap_allocator;
@ -14,7 +21,7 @@ pub use page_table::{
translated_byte_buffer, translated_ref, translated_refmut, translated_str, PageTable,
PageTableEntry, UserBuffer, UserBufferIterator,
};
/// initiate heap allocator, frame allocator and kernel space
pub fn init() {
heap_allocator::init_heap();
frame_allocator::init_frame_allocator();

View File

@ -1,3 +1,4 @@
//! Implementation of [`PageTableEntry`] and [`PageTable`].
use super::{frame_alloc, FrameTracker, PhysAddr, PhysPageNum, StepByOne, VirtAddr, VirtPageNum};
use alloc::string::String;
use alloc::vec;
@ -19,39 +20,49 @@ bitflags! {
#[derive(Copy, Clone)]
#[repr(C)]
/// page table entry structure
pub struct PageTableEntry {
///PTE
pub bits: usize,
}
impl PageTableEntry {
///Create a PTE from ppn
pub fn new(ppn: PhysPageNum, flags: PTEFlags) -> Self {
PageTableEntry {
bits: ppn.0 << 10 | flags.bits as usize,
}
}
///Return an empty PTE
pub fn empty() -> Self {
PageTableEntry { bits: 0 }
}
///Return 44bit ppn
pub fn ppn(&self) -> PhysPageNum {
(self.bits >> 10 & ((1usize << 44) - 1)).into()
}
///Return 10bit flag
pub fn flags(&self) -> PTEFlags {
PTEFlags::from_bits(self.bits as u8).unwrap()
}
///Check PTE valid
pub fn is_valid(&self) -> bool {
(self.flags() & PTEFlags::V) != PTEFlags::empty()
}
///Check PTE readable
pub fn readable(&self) -> bool {
(self.flags() & PTEFlags::R) != PTEFlags::empty()
}
///Check PTE writable
pub fn writable(&self) -> bool {
(self.flags() & PTEFlags::W) != PTEFlags::empty()
}
///Check PTE executable
pub fn executable(&self) -> bool {
(self.flags() & PTEFlags::X) != PTEFlags::empty()
}
}
///Record root ppn and has the same lifetime as 1 and 2 level `PageTableEntry`
pub struct PageTable {
root_ppn: PhysPageNum,
frames: Vec<FrameTracker>,
@ -59,6 +70,7 @@ pub struct PageTable {
/// Assume that it won't oom when creating/mapping.
impl PageTable {
/// Create an empty `PageTable`
pub fn new() -> Self {
let frame = frame_alloc().unwrap();
PageTable {
@ -73,6 +85,7 @@ impl PageTable {
frames: Vec::new(),
}
}
/// Find phsical address by virtual address, create a frame if not exist
fn find_pte_create(&mut self, vpn: VirtPageNum) -> Option<&mut PageTableEntry> {
let idxs = vpn.indexes();
let mut ppn = self.root_ppn;
@ -92,6 +105,7 @@ impl PageTable {
}
result
}
/// Find phsical address by virtual address
fn find_pte(&self, vpn: VirtPageNum) -> Option<&mut PageTableEntry> {
let idxs = vpn.indexes();
let mut ppn = self.root_ppn;
@ -110,20 +124,24 @@ impl PageTable {
result
}
#[allow(unused)]
/// Create a mapping form `vpn` to `ppn`
pub fn map(&mut self, vpn: VirtPageNum, ppn: PhysPageNum, flags: PTEFlags) {
let pte = self.find_pte_create(vpn).unwrap();
assert!(!pte.is_valid(), "vpn {:?} is mapped before mapping", vpn);
*pte = PageTableEntry::new(ppn, flags | PTEFlags::V);
}
#[allow(unused)]
/// Delete a mapping form `vpn`
pub fn unmap(&mut self, vpn: VirtPageNum) {
let pte = self.find_pte(vpn).unwrap();
assert!(pte.is_valid(), "vpn {:?} is invalid before unmapping", vpn);
*pte = PageTableEntry::empty();
}
/// Translate `VirtPageNum` to `PageTableEntry`
pub fn translate(&self, vpn: VirtPageNum) -> Option<PageTableEntry> {
self.find_pte(vpn).map(|pte| *pte)
}
/// Translate `VirtAddr` to `PhysAddr`
pub fn translate_va(&self, va: VirtAddr) -> Option<PhysAddr> {
self.find_pte(va.clone().floor()).map(|pte| {
let aligned_pa: PhysAddr = pte.ppn().into();
@ -132,12 +150,13 @@ impl PageTable {
(aligned_pa_usize + offset).into()
})
}
/// Get root ppn
pub fn token(&self) -> usize {
8usize << 60 | self.root_ppn.0
}
}
pub fn translated_byte_buffer(token: usize, ptr: *const u8, len: usize) -> Vec<&'static mut [u8]> {
/// Translate a pointer to a mutable u8 Vec through page table
pub fn translated_byte_buffer(token: usize, ptr: *const u8, len: usize) -> Vec<&'static mut [u8]> {
let page_table = PageTable::from_token(token);
let mut start = ptr as usize;
let end = start + len;
@ -159,7 +178,7 @@ pub fn translated_byte_buffer(token: usize, ptr: *const u8, len: usize) -> Vec<&
v
}
/// Load a string from other address spaces into kernel space without an end `\0`.
/// Translate a pointer to a mutable u8 Vec end with `\0` through page table to a `String`
pub fn translated_str(token: usize, ptr: *const u8) -> String {
let page_table = PageTable::from_token(token);
let mut string = String::new();
@ -179,6 +198,7 @@ pub fn translated_str(token: usize, ptr: *const u8) -> String {
}
#[allow(unused)]
///Translate a generic through page table and return a reference
pub fn translated_ref<T>(token: usize, ptr: *const T) -> &'static T {
let page_table = PageTable::from_token(token);
page_table
@ -186,7 +206,7 @@ pub fn translated_ref<T>(token: usize, ptr: *const T) -> &'static T {
.unwrap()
.get_ref()
}
///Translate a generic through page table and return a mutable reference
pub fn translated_refmut<T>(token: usize, ptr: *mut T) -> &'static mut T {
let page_table = PageTable::from_token(token);
let va = ptr as usize;
@ -195,15 +215,18 @@ pub fn translated_refmut<T>(token: usize, ptr: *mut T) -> &'static mut T {
.unwrap()
.get_mut()
}
///Array of u8 slice that user communicate with os
pub struct UserBuffer {
///U8 vec
pub buffers: Vec<&'static mut [u8]>,
}
impl UserBuffer {
///Create a `UserBuffer` by parameter
pub fn new(buffers: Vec<&'static mut [u8]>) -> Self {
Self { buffers }
}
///Length of `UserBuffer`
pub fn len(&self) -> usize {
let mut total: usize = 0;
for b in self.buffers.iter() {
@ -224,7 +247,7 @@ impl IntoIterator for UserBuffer {
}
}
}
/// Iterator of `UserBuffer`
pub struct UserBufferIterator {
buffers: Vec<&'static mut [u8]>,
current_buffer: usize,

View File

@ -1,3 +1,4 @@
//! SBI call wrappers
#![allow(unused)]
use core::arch::asm;
@ -11,7 +12,7 @@ const SBI_REMOTE_FENCE_I: usize = 5;
const SBI_REMOTE_SFENCE_VMA: usize = 6;
const SBI_REMOTE_SFENCE_VMA_ASID: usize = 7;
const SBI_SHUTDOWN: usize = 8;
/// general sbi call
#[inline(always)]
fn sbi_call(which: usize, arg0: usize, arg1: usize, arg2: usize) -> usize {
let mut ret;
@ -26,19 +27,19 @@ fn sbi_call(which: usize, arg0: usize, arg1: usize, arg2: usize) -> usize {
}
ret
}
/// use sbi call to set timer
pub fn set_timer(timer: usize) {
sbi_call(SBI_SET_TIMER, timer, 0, 0);
}
/// use sbi call to putchar in console (qemu uart handler)
pub fn console_putchar(c: usize) {
sbi_call(SBI_CONSOLE_PUTCHAR, c, 0, 0);
}
/// use sbi call to getchar from console (qemu uart handler)
pub fn console_getchar() -> usize {
sbi_call(SBI_CONSOLE_GETCHAR, 0, 0, 0)
}
/// use sbi call to shutdown the kernel
pub fn shutdown() -> ! {
sbi_call(SBI_SHUTDOWN, 0, 0, 0);
panic!("It should shutdown!");

View File

@ -1,3 +1,4 @@
//! Synchronization and interior mutability primitives
mod up;
pub use up::UPSafeCell;

View File

@ -1,3 +1,4 @@
//! Uniprocessor interior mutability primitives
use core::cell::{RefCell, RefMut};
/// Wrap a static data structure inside it so that we are
@ -22,7 +23,7 @@ impl<T> UPSafeCell<T> {
inner: RefCell::new(value),
}
}
/// Panic if the data has been borrowed.
/// Exclusive access inner data in UPSafeCell. Panic if the data has been borrowed.
pub fn exclusive_access(&self) -> RefMut<'_, T> {
self.inner.borrow_mut()
}

View File

@ -1,3 +1,4 @@
//! File and filesystem-related syscalls
use crate::fs::{open_file, OpenFlags};
use crate::mm::{translated_byte_buffer, translated_str, UserBuffer};
use crate::task::{current_task, current_user_token};

View File

@ -1,3 +1,14 @@
//! Implementation of syscalls
//!
//! The single entry point to all system calls, [`syscall()`], is called
//! whenever userspace wishes to perform a system call using the `ecall`
//! instruction. In this case, the processor raises an 'Environment call from
//! U-mode' exception, which is handled as one of the cases in
//! [`crate::trap::trap_handler`].
//!
//! For clarity, each single syscall is implemented as its own function, named
//! `sys_` then the name of the syscall. You can find functions like this in
//! submodules, and you should also implement syscalls this way.
const SYSCALL_OPEN: usize = 56;
const SYSCALL_CLOSE: usize = 57;
const SYSCALL_READ: usize = 63;
@ -15,7 +26,7 @@ mod process;
use fs::*;
use process::*;
/// handle syscall exception with `syscall_id` and other arguments
pub fn syscall(syscall_id: usize, args: [usize; 3]) -> isize {
match syscall_id {
SYSCALL_OPEN => sys_open(args[0] as *const u8, args[1] as u32),

View File

@ -1,13 +1,19 @@
//! Implementation of [`TaskContext`]
use crate::trap::trap_return;
#[repr(C)]
/// task context structure containing some registers
pub struct TaskContext {
/// return address ( e.g. __restore ) of __switch ASM function
ra: usize,
/// kernel stack pointer of app
sp: usize,
/// s0-11 register, callee saved
s: [usize; 12],
}
impl TaskContext {
/// init task context
pub fn zero_init() -> Self {
Self {
ra: 0,
@ -15,6 +21,7 @@ impl TaskContext {
s: [0; 12],
}
}
/// set Task Context{__restore ASM funciton: trap_return, sp: kstack_ptr, s: s_0..12}
pub fn goto_trap_return(kstack_ptr: usize) -> Self {
Self {
ra: trap_return as usize,

View File

@ -1,23 +1,27 @@
//!Implementation of [`TaskManager`]
use super::TaskControlBlock;
use crate::sync::UPSafeCell;
use alloc::collections::VecDeque;
use alloc::sync::Arc;
use lazy_static::*;
///A array of `TaskControlBlock` that is thread-safe
pub struct TaskManager {
ready_queue: VecDeque<Arc<TaskControlBlock>>,
}
/// A simple FIFO scheduler.
impl TaskManager {
///Creat an empty TaskManager
pub fn new() -> Self {
Self {
ready_queue: VecDeque::new(),
}
}
///Add a task to `TaskManager`
pub fn add(&mut self, task: Arc<TaskControlBlock>) {
self.ready_queue.push_back(task);
}
///Remove the first task and return it,or `None` if `TaskManager` is empty
pub fn fetch(&mut self) -> Option<Arc<TaskControlBlock>> {
self.ready_queue.pop_front()
}
@ -27,11 +31,11 @@ lazy_static! {
pub static ref TASK_MANAGER: UPSafeCell<TaskManager> =
unsafe { UPSafeCell::new(TaskManager::new()) };
}
///Interface offered to add task
pub fn add_task(task: Arc<TaskControlBlock>) {
TASK_MANAGER.exclusive_access().add(task);
}
///Interface offered to pop the first task
pub fn fetch_task() -> Option<Arc<TaskControlBlock>> {
TASK_MANAGER.exclusive_access().fetch()
}

View File

@ -1,25 +1,42 @@
//! Task management implementation
//!
//! Everything about task management, like starting and switching tasks is
//! implemented here.
//!
//! A single global instance of [`TaskManager`] called `TASK_MANAGER` controls
//! all the tasks in the whole operating system.
//!
//! A single global instance of [`Processor`] called `PROCESSOR` monitors running
//! task(s) for each core.
//!
//! A single global instance of [`PidAllocator`] called `PID_ALLOCATOR` allocates
//! pid for user apps.
//!
//! Be careful when you see `__switch` ASM function in `switch.S`. Control flow around this function
//! might not be what you expect.
mod context;
mod manager;
mod pid;
mod processor;
mod switch;
#[allow(clippy::module_inception)]
#[allow(rustdoc::private_intra_doc_links)]
mod task;
use crate::fs::{open_file, OpenFlags};
use alloc::sync::Arc;
pub use context::TaskContext;
use lazy_static::*;
use manager::fetch_task;
pub use manager::{fetch_task,TaskManager};
use switch::__switch;
use task::{TaskControlBlock, TaskStatus};
pub use manager::add_task;
pub use pid::{pid_alloc, KernelStack, PidHandle};
pub use pid::{pid_alloc, KernelStack, PidHandle,PidAllocator};
pub use processor::{
current_task, current_trap_cx, current_user_token, run_tasks, schedule, take_current_task,
current_task, current_trap_cx, current_user_token, run_tasks, schedule, take_current_task,Processor
};
/// Suspend the current 'Running' task and run the next task in task list.
pub fn suspend_current_and_run_next() {
// There must be an application running.
let task = take_current_task().unwrap();
@ -37,7 +54,7 @@ pub fn suspend_current_and_run_next() {
// jump to scheduling cycle
schedule(task_cx_ptr);
}
/// Exit the current 'Running' task and run the next task in task list.
pub fn exit_current_and_run_next(exit_code: i32) {
// take from Processor
let task = take_current_task().unwrap();
@ -72,13 +89,14 @@ pub fn exit_current_and_run_next(exit_code: i32) {
}
lazy_static! {
///Globle process that init user shell
pub static ref INITPROC: Arc<TaskControlBlock> = Arc::new({
let inode = open_file("initproc", OpenFlags::RDONLY).unwrap();
let v = inode.read_all();
TaskControlBlock::new(v.as_slice())
});
}
///Add init process to the manager
pub fn add_initproc() {
add_task(INITPROC.clone());
}

View File

@ -1,21 +1,24 @@
//!Implementation of [`PidAllocator`]
use crate::config::{KERNEL_STACK_SIZE, PAGE_SIZE, TRAMPOLINE};
use crate::mm::{MapPermission, VirtAddr, KERNEL_SPACE};
use crate::sync::UPSafeCell;
use alloc::vec::Vec;
use lazy_static::*;
struct PidAllocator {
///Pid Allocator struct
pub struct PidAllocator {
current: usize,
recycled: Vec<usize>,
}
impl PidAllocator {
///Create an empty `PidAllocator`
pub fn new() -> Self {
PidAllocator {
current: 0,
recycled: Vec::new(),
}
}
///Allocate a pid
pub fn alloc(&mut self) -> PidHandle {
if let Some(pid) = self.recycled.pop() {
PidHandle(pid)
@ -24,6 +27,7 @@ impl PidAllocator {
PidHandle(self.current - 1)
}
}
///Recycle a pid
pub fn dealloc(&mut self, pid: usize) {
assert!(pid < self.current);
assert!(
@ -36,10 +40,10 @@ impl PidAllocator {
}
lazy_static! {
static ref PID_ALLOCATOR: UPSafeCell<PidAllocator> =
pub static ref PID_ALLOCATOR: UPSafeCell<PidAllocator> =
unsafe { UPSafeCell::new(PidAllocator::new()) };
}
///Bind pid lifetime to `PidHandle`
pub struct PidHandle(pub usize);
impl Drop for PidHandle {
@ -48,7 +52,7 @@ impl Drop for PidHandle {
PID_ALLOCATOR.exclusive_access().dealloc(self.0);
}
}
///Allocate a pid from PID_ALLOCATOR
pub fn pid_alloc() -> PidHandle {
PID_ALLOCATOR.exclusive_access().alloc()
}
@ -59,12 +63,13 @@ pub fn kernel_stack_position(app_id: usize) -> (usize, usize) {
let bottom = top - KERNEL_STACK_SIZE;
(bottom, top)
}
///Kernelstack for app
pub struct KernelStack {
pid: usize,
}
impl KernelStack {
///Create a kernelstack from pid
pub fn new(pid_handle: &PidHandle) -> Self {
let pid = pid_handle.0;
let (kernel_stack_bottom, kernel_stack_top) = kernel_stack_position(pid);
@ -76,6 +81,7 @@ impl KernelStack {
KernelStack { pid: pid_handle.0 }
}
#[allow(unused)]
///Push a value on top of kernelstack
pub fn push_on_top<T>(&self, value: T) -> *mut T
where
T: Sized,
@ -87,6 +93,7 @@ impl KernelStack {
}
ptr_mut
}
///Get the value on the top of kernelstack
pub fn get_top(&self) -> usize {
let (_, kernel_stack_top) = kernel_stack_position(self.pid);
kernel_stack_top

View File

@ -1,3 +1,4 @@
//!Implementation of [`Processor`] and Intersection of control flow
use super::__switch;
use super::{fetch_task, TaskStatus};
use super::{TaskContext, TaskControlBlock};
@ -5,25 +6,31 @@ use crate::sync::UPSafeCell;
use crate::trap::TrapContext;
use alloc::sync::Arc;
use lazy_static::*;
///Processor management structure
pub struct Processor {
///The task currently executing on the current processor
current: Option<Arc<TaskControlBlock>>,
///The basic control flow of each core, helping to select and switch process
idle_task_cx: TaskContext,
}
impl Processor {
///Create an empty Processor
pub fn new() -> Self {
Self {
current: None,
idle_task_cx: TaskContext::zero_init(),
}
}
///Get mutable reference to `idle_task_cx`
fn get_idle_task_cx_ptr(&mut self) -> *mut TaskContext {
&mut self.idle_task_cx as *mut _
}
///Get current task in moving semanteme
pub fn take_current(&mut self) -> Option<Arc<TaskControlBlock>> {
self.current.take()
}
///Get current task in cloning semanteme
pub fn current(&self) -> Option<Arc<TaskControlBlock>> {
self.current.as_ref().map(Arc::clone)
}
@ -32,7 +39,8 @@ impl Processor {
lazy_static! {
pub static ref PROCESSOR: UPSafeCell<Processor> = unsafe { UPSafeCell::new(Processor::new()) };
}
///The main part of process execution and scheduling
///Loop `fetch_task` to get the process that needs to run, and switch the process through `__switch`
pub fn run_tasks() {
loop {
let mut processor = PROCESSOR.exclusive_access();
@ -53,28 +61,28 @@ pub fn run_tasks() {
}
}
}
///Take the current task,leaving a None in its place
pub fn take_current_task() -> Option<Arc<TaskControlBlock>> {
PROCESSOR.exclusive_access().take_current()
}
///Get running task
pub fn current_task() -> Option<Arc<TaskControlBlock>> {
PROCESSOR.exclusive_access().current()
}
///Get token of the address space of current task
pub fn current_user_token() -> usize {
let task = current_task().unwrap();
let token = task.inner_exclusive_access().get_user_token();
token
}
///Get the mutable reference to trap context of current task
pub fn current_trap_cx() -> &'static mut TrapContext {
current_task()
.unwrap()
.inner_exclusive_access()
.get_trap_cx()
}
///Return to idle control flow for new scheduling
pub fn schedule(switched_task_cx_ptr: *mut TaskContext) {
let mut processor = PROCESSOR.exclusive_access();
let idle_task_cx_ptr = processor.get_idle_task_cx_ptr();

View File

@ -1,3 +1,4 @@
//!Wrap `switch.S` as a function
use super::TaskContext;
use core::arch::global_asm;

View File

@ -1,3 +1,4 @@
//!Implementation of [`TaskControlBlock`]
use super::TaskContext;
use super::{pid_alloc, KernelStack, PidHandle};
use crate::config::TRAP_CONTEXT;

View File

@ -1,18 +1,21 @@
//! RISC-V timer-related functionality
use crate::config::CLOCK_FREQ;
use crate::sbi::set_timer;
use riscv::register::time;
const TICKS_PER_SEC: usize = 100;
const MSEC_PER_SEC: usize = 1000;
///get current time
pub fn get_time() -> usize {
time::read()
}
/// get current time in microseconds
pub fn get_time_ms() -> usize {
time::read() / (CLOCK_FREQ / MSEC_PER_SEC)
}
/// set the next timer interrupt
pub fn set_next_trigger() {
set_timer(get_time() + CLOCK_FREQ / TICKS_PER_SEC);
}

View File

@ -1,20 +1,30 @@
//! Implementation of [`TrapContext`]
use riscv::register::sstatus::{self, Sstatus, SPP};
#[repr(C)]
#[derive(Debug)]
///trap context structure containing sstatus, sepc and registers
pub struct TrapContext {
/// general regs[0..31]
pub x: [usize; 32],
/// CSR sstatus
pub sstatus: Sstatus,
/// CSR sepc
pub sepc: usize,
/// Addr of Page Table
pub kernel_satp: usize,
/// kernel stack
pub kernel_sp: usize,
/// Addr of trap_handler function
pub trap_handler: usize,
}
impl TrapContext {
///set stack pointer to x_2 reg (sp)
pub fn set_sp(&mut self, sp: usize) {
self.x[2] = sp;
}
///init app context
pub fn app_init_context(
entry: usize,
sp: usize,

View File

@ -1,3 +1,16 @@
//! Trap handling functionality
//!
//! For rCore, we have a single trap entry point, namely `__alltraps`. At
//! initialization in [`init()`], we set the `stvec` CSR to point to it.
//!
//! All traps go through `__alltraps`, which is defined in `trap.S`. The
//! assembly language code does just enough work restore the kernel space
//! context, ensuring that Rust code safely runs, and transfers control to
//! [`trap_handler()`].
//!
//! It then calls different functionality based on what exactly the exception
//! was. For example, timer interrupts trigger task preemption, and syscalls go
//! to [`syscall()`].
mod context;
use crate::config::{TRAMPOLINE, TRAP_CONTEXT};
@ -14,7 +27,7 @@ use riscv::register::{
};
global_asm!(include_str!("trap.S"));
/// initialize CSR `stvec` as the entry of `__alltraps`
pub fn init() {
set_kernel_trap_entry();
}
@ -30,7 +43,7 @@ fn set_user_trap_entry() {
stvec::write(TRAMPOLINE as usize, TrapMode::Direct);
}
}
/// enable timer interrupt in sie CSR
pub fn enable_timer_interrupt() {
unsafe {
sie::set_stimer();
@ -38,6 +51,7 @@ pub fn enable_timer_interrupt() {
}
#[no_mangle]
/// handle an interrupt, exception, or system call from user space
pub fn trap_handler() -> ! {
set_kernel_trap_entry();
let scause = scause::read();
@ -90,6 +104,9 @@ pub fn trap_handler() -> ! {
}
#[no_mangle]
/// set the new addr of __restore asm function in TRAMPOLINE page,
/// set the reg a0 = trap_cx_ptr, reg a1 = phy addr of usr page table,
/// finally, jump to new addr of __restore asm function
pub fn trap_return() -> ! {
set_user_trap_entry();
let trap_cx_ptr = TRAP_CONTEXT;
@ -112,6 +129,8 @@ pub fn trap_return() -> ! {
}
#[no_mangle]
/// Unimplement: traps/interrupts/exceptions from kernel mode
/// Todo: Chapter 9: I/O device
pub fn trap_from_kernel() -> ! {
use riscv::register::sepc;
println!("stval = {:#x}, sepc = {:#x}", stval::read(), sepc::read());