mirror of
https://github.com/rcore-os/rCore-Tutorial-v3.git
synced 2024-11-24 18:36:24 +04:00
Merge branch 'ch8' into main
This commit is contained in:
commit
3d2909e990
1
.gitignore
vendored
1
.gitignore
vendored
@ -2,6 +2,7 @@
|
|||||||
os/target/*
|
os/target/*
|
||||||
os/.idea/*
|
os/.idea/*
|
||||||
os/src/link_app.S
|
os/src/link_app.S
|
||||||
|
os/last-*
|
||||||
os/Cargo.lock
|
os/Cargo.lock
|
||||||
os/last-*
|
os/last-*
|
||||||
user/target/*
|
user/target/*
|
||||||
|
70
README.md
70
README.md
@ -1,76 +1,16 @@
|
|||||||
# rCore-Tutorial-v3
|
# rCore-Tutorial-v3
|
||||||
rCore-Tutorial version 3.5. See the [Documentation in Chinese](https://rcore-os.github.io/rCore-Tutorial-Book-v3/).
|
rCore-Tutorial version 3.x
|
||||||
|
|
||||||
## news
|
|
||||||
- 2021.07.29: Now we are updating our labs. Please checkout chX-dev Branches for our current new labs. (Notice: please see the [Dependency] section in the end of this doc)
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
This project aims to show how to write an **Unix-like OS** running on **RISC-V** platforms **from scratch** in **[Rust](https://www.rust-lang.org/)** for **beginners** without any background knowledge about **computer architectures, assembly languages or operating systems**.
|
|
||||||
|
|
||||||
## Features
|
|
||||||
|
|
||||||
* Platform supported: `qemu-system-riscv64` simulator or dev boards based on [Kendryte K210 SoC](https://canaan.io/product/kendryteai) such as [Maix Dock](https://www.seeedstudio.com/Sipeed-MAIX-Dock-p-4815.html)
|
|
||||||
* OS
|
|
||||||
* concurrency of multiple processes
|
|
||||||
* preemptive scheduling(Round-Robin algorithm)
|
|
||||||
* dynamic memory management in kernel
|
|
||||||
* virtual memory
|
|
||||||
* a simple file system with a block cache
|
|
||||||
* an interactive shell in the userspace
|
|
||||||
* **only 4K+ LoC**
|
|
||||||
* [A detailed documentation in Chinese](https://rcore-os.github.io/rCore-Tutorial-Book-v3/) in spite of the lack of comments in the code(English version is not available at present)
|
|
||||||
|
|
||||||
## Run our project
|
|
||||||
|
|
||||||
TODO:
|
|
||||||
|
|
||||||
## Working in progress
|
|
||||||
|
|
||||||
Now we are still updating our project, you can find latest changes on branches `chX-dev` such as `ch1-dev`. We are intended to publish first release 3.5.0 after completing most of the tasks mentioned below.
|
|
||||||
|
|
||||||
Overall progress: ch7
|
|
||||||
|
|
||||||
### Completed
|
|
||||||
|
|
||||||
* [x] automatically clean up and rebuild before running our project on a different platform
|
|
||||||
* [x] fix `power` series application in early chapters, now you can find modulus in the output
|
|
||||||
* [x] use `UPSafeCell` instead of `RefCell` or `spin::Mutex` in order to access static data structures and adjust its API so that it cannot be borrowed twice at a time(mention `& .exclusive_access().task[0]` in `run_first_task`)
|
|
||||||
* [x] move `TaskContext` into `TaskControlBlock` instead of restoring it in place on kernel stack(since ch3), eliminating annoying `task_cx_ptr2`
|
|
||||||
* [x] replace `llvm_asm!` with `asm!`
|
|
||||||
* [x] expand the fs image size generated by `rcore-fs-fuse` to 128MiB
|
|
||||||
* [x] add a new test named `huge_write` which evaluates the fs performance(qemu\~500KiB/s k210\~50KiB/s)
|
|
||||||
* [x] flush all block cache to disk after a fs transaction which involves write operation
|
|
||||||
|
|
||||||
### Todo(High priority)
|
|
||||||
|
|
||||||
* [ ] bug fix: we should call `find_pte` rather than `find_pte_create` in `PageTable::unmap`
|
|
||||||
* [ ] bug fix: check validity of level-3 pte in `find_pte` instead of checking it outside this function
|
|
||||||
* [ ] use old fs image optionally, do not always rebuild the image
|
|
||||||
* [ ] replace `spin::Mutex` with `UPSafeCell` before SMP chapter
|
|
||||||
* [ ] add new system calls: getdents64/fstat
|
|
||||||
* [ ] shell functionality improvement(to be continued...)
|
|
||||||
* [ ] add a new chapter about synchronization & mutual exclusion(uniprocessor only)
|
|
||||||
* [ ] give every non-zero process exit code an unique and clear error type
|
|
||||||
* [ ] effective error handling of mm module
|
|
||||||
|
|
||||||
### Todo(Low priority)
|
|
||||||
|
|
||||||
* [ ] rewrite practice doc and remove some inproper questions
|
|
||||||
* [ ] provide smooth debug experience at a Rust source code level
|
|
||||||
* [ ] format the code using official tools
|
|
||||||
* [ ] support Allwinner's RISC-V D1 chip
|
|
||||||
|
|
||||||
## Dependency
|
## Dependency
|
||||||
|
|
||||||
### Binaries
|
### Binaries
|
||||||
|
|
||||||
* rustc 1.56.0-nightly (b03ccace5 2021-08-24)
|
* rustc 1.56.0-nightly (08095fc1f 2021-07-26)
|
||||||
|
|
||||||
* qemu: 5.0.0
|
* qemu: 5.0.0
|
||||||
|
|
||||||
* rustsbi: qemu[7d71bfb7] k210[563144b0]
|
* rustsbi-lib: 0.2.0-alpha.4
|
||||||
|
|
||||||
### Crates
|
rustsbi-qemu: d4968dd2
|
||||||
|
|
||||||
We will add them later.
|
rustsbi-k210: b689314e
|
||||||
|
Binary file not shown.
Binary file not shown.
@ -56,13 +56,13 @@ fn easy_fs_pack() -> std::io::Result<()> {
|
|||||||
.write(true)
|
.write(true)
|
||||||
.create(true)
|
.create(true)
|
||||||
.open(format!("{}{}", target_path, "fs.img"))?;
|
.open(format!("{}{}", target_path, "fs.img"))?;
|
||||||
f.set_len(8192 * 512).unwrap();
|
f.set_len(16 * 2048 * 512).unwrap();
|
||||||
f
|
f
|
||||||
})));
|
})));
|
||||||
// 4MiB, at most 4095 files
|
// 16MiB, at most 4095 files
|
||||||
let efs = EasyFileSystem::create(
|
let efs = EasyFileSystem::create(
|
||||||
block_file.clone(),
|
block_file.clone(),
|
||||||
8192,
|
16 * 2048,
|
||||||
1,
|
1,
|
||||||
);
|
);
|
||||||
let root_inode = Arc::new(EasyFileSystem::root_inode(&efs));
|
let root_inode = Arc::new(EasyFileSystem::root_inode(&efs));
|
||||||
@ -165,4 +165,4 @@ fn efs_test() -> std::io::Result<()> {
|
|||||||
random_str_test(2000 * BLOCK_SZ);
|
random_str_test(2000 * BLOCK_SZ);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,7 @@ pub struct BlockCache {
|
|||||||
impl BlockCache {
|
impl BlockCache {
|
||||||
/// Load a new BlockCache from disk.
|
/// Load a new BlockCache from disk.
|
||||||
pub fn new(
|
pub fn new(
|
||||||
block_id: usize,
|
block_id: usize,
|
||||||
block_device: Arc<dyn BlockDevice>
|
block_device: Arc<dyn BlockDevice>
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let mut cache = [0u8; BLOCK_SZ];
|
let mut cache = [0u8; BLOCK_SZ];
|
||||||
@ -125,4 +125,11 @@ pub fn get_block_cache(
|
|||||||
block_device: Arc<dyn BlockDevice>
|
block_device: Arc<dyn BlockDevice>
|
||||||
) -> Arc<Mutex<BlockCache>> {
|
) -> Arc<Mutex<BlockCache>> {
|
||||||
BLOCK_CACHE_MANAGER.lock().get_block_cache(block_id, block_device)
|
BLOCK_CACHE_MANAGER.lock().get_block_cache(block_id, block_device)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn block_cache_sync_all() {
|
||||||
|
let manager = BLOCK_CACHE_MANAGER.lock();
|
||||||
|
for (_, cache) in manager.queue.iter() {
|
||||||
|
cache.lock().sync();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -8,6 +8,7 @@ use super::{
|
|||||||
DiskInodeType,
|
DiskInodeType,
|
||||||
Inode,
|
Inode,
|
||||||
get_block_cache,
|
get_block_cache,
|
||||||
|
block_cache_sync_all,
|
||||||
};
|
};
|
||||||
use crate::BLOCK_SZ;
|
use crate::BLOCK_SZ;
|
||||||
|
|
||||||
@ -50,7 +51,7 @@ impl EasyFileSystem {
|
|||||||
// clear all blocks
|
// clear all blocks
|
||||||
for i in 0..total_blocks {
|
for i in 0..total_blocks {
|
||||||
get_block_cache(
|
get_block_cache(
|
||||||
i as usize,
|
i as usize,
|
||||||
Arc::clone(&block_device)
|
Arc::clone(&block_device)
|
||||||
)
|
)
|
||||||
.lock()
|
.lock()
|
||||||
@ -82,6 +83,7 @@ impl EasyFileSystem {
|
|||||||
.modify(root_inode_offset, |disk_inode: &mut DiskInode| {
|
.modify(root_inode_offset, |disk_inode: &mut DiskInode| {
|
||||||
disk_inode.initialize(DiskInodeType::Directory);
|
disk_inode.initialize(DiskInodeType::Directory);
|
||||||
});
|
});
|
||||||
|
block_cache_sync_all();
|
||||||
Arc::new(Mutex::new(efs))
|
Arc::new(Mutex::new(efs))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -107,7 +109,7 @@ impl EasyFileSystem {
|
|||||||
data_area_start_block: 1 + inode_total_blocks + super_block.data_bitmap_blocks,
|
data_area_start_block: 1 + inode_total_blocks + super_block.data_bitmap_blocks,
|
||||||
};
|
};
|
||||||
Arc::new(Mutex::new(efs))
|
Arc::new(Mutex::new(efs))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn root_inode(efs: &Arc<Mutex<Self>>) -> Inode {
|
pub fn root_inode(efs: &Arc<Mutex<Self>>) -> Inode {
|
||||||
|
@ -211,6 +211,49 @@ impl DiskInode {
|
|||||||
Arc::clone(block_device)
|
Arc::clone(block_device)
|
||||||
)
|
)
|
||||||
.lock()
|
.lock()
|
||||||
|
.modify(0, |indirect2: &mut IndirectBlock| {
|
||||||
|
while (a0 < a1) || (a0 == a1 && b0 < b1) {
|
||||||
|
if b0 == 0 {
|
||||||
|
indirect2[a0] = new_blocks.next().unwrap();
|
||||||
|
}
|
||||||
|
// fill current
|
||||||
|
get_block_cache(
|
||||||
|
indirect2[a0] as usize,
|
||||||
|
Arc::clone(block_device)
|
||||||
|
)
|
||||||
|
.lock()
|
||||||
|
.modify(0, |indirect1: &mut IndirectBlock| {
|
||||||
|
indirect1[b0] = new_blocks.next().unwrap();
|
||||||
|
});
|
||||||
|
// move to next
|
||||||
|
b0 += 1;
|
||||||
|
if b0 == INODE_INDIRECT1_COUNT {
|
||||||
|
b0 = 0;
|
||||||
|
a0 += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
// alloc indirect2
|
||||||
|
if total_blocks > INODE_INDIRECT1_COUNT as u32 {
|
||||||
|
if current_blocks == INODE_INDIRECT1_COUNT as u32 {
|
||||||
|
self.indirect2 = new_blocks.next().unwrap();
|
||||||
|
}
|
||||||
|
current_blocks -= INODE_INDIRECT1_COUNT as u32;
|
||||||
|
total_blocks -= INODE_INDIRECT1_COUNT as u32;
|
||||||
|
} else {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// fill indirect2 from (a0, b0) -> (a1, b1)
|
||||||
|
let mut a0 = current_blocks as usize / INODE_INDIRECT1_COUNT;
|
||||||
|
let mut b0 = current_blocks as usize % INODE_INDIRECT1_COUNT;
|
||||||
|
let a1 = total_blocks as usize / INODE_INDIRECT1_COUNT;
|
||||||
|
let b1 = total_blocks as usize % INODE_INDIRECT1_COUNT;
|
||||||
|
// alloc low-level indirect1
|
||||||
|
get_block_cache(
|
||||||
|
self.indirect2 as usize,
|
||||||
|
Arc::clone(block_device)
|
||||||
|
)
|
||||||
|
.lock()
|
||||||
.modify(0, |indirect2: &mut IndirectBlock| {
|
.modify(0, |indirect2: &mut IndirectBlock| {
|
||||||
while (a0 < a1) || (a0 == a1 && b0 < b1) {
|
while (a0 < a1) || (a0 == a1 && b0 < b1) {
|
||||||
if b0 == 0 {
|
if b0 == 0 {
|
||||||
@ -416,7 +459,7 @@ impl DirEntry {
|
|||||||
}
|
}
|
||||||
pub fn new(name: &str, inode_number: u32) -> Self {
|
pub fn new(name: &str, inode_number: u32) -> Self {
|
||||||
let mut bytes = [0u8; NAME_LENGTH_LIMIT + 1];
|
let mut bytes = [0u8; NAME_LENGTH_LIMIT + 1];
|
||||||
&mut bytes[..name.len()].copy_from_slice(name.as_bytes());
|
bytes[..name.len()].copy_from_slice(name.as_bytes());
|
||||||
Self {
|
Self {
|
||||||
name: bytes,
|
name: bytes,
|
||||||
inode_number,
|
inode_number,
|
||||||
|
@ -15,4 +15,4 @@ pub use efs::EasyFileSystem;
|
|||||||
pub use vfs::Inode;
|
pub use vfs::Inode;
|
||||||
use layout::*;
|
use layout::*;
|
||||||
use bitmap::Bitmap;
|
use bitmap::Bitmap;
|
||||||
use block_cache::get_block_cache;
|
use block_cache::{get_block_cache, block_cache_sync_all};
|
@ -6,6 +6,7 @@ use super::{
|
|||||||
EasyFileSystem,
|
EasyFileSystem,
|
||||||
DIRENT_SZ,
|
DIRENT_SZ,
|
||||||
get_block_cache,
|
get_block_cache,
|
||||||
|
block_cache_sync_all,
|
||||||
};
|
};
|
||||||
use alloc::sync::Arc;
|
use alloc::sync::Arc;
|
||||||
use alloc::string::String;
|
use alloc::string::String;
|
||||||
@ -145,6 +146,7 @@ impl Inode {
|
|||||||
});
|
});
|
||||||
|
|
||||||
let (block_id, block_offset) = fs.get_disk_inode_pos(new_inode_id);
|
let (block_id, block_offset) = fs.get_disk_inode_pos(new_inode_id);
|
||||||
|
block_cache_sync_all();
|
||||||
// return inode
|
// return inode
|
||||||
Some(Arc::new(Self::new(
|
Some(Arc::new(Self::new(
|
||||||
block_id,
|
block_id,
|
||||||
@ -185,10 +187,12 @@ impl Inode {
|
|||||||
|
|
||||||
pub fn write_at(&self, offset: usize, buf: &[u8]) -> usize {
|
pub fn write_at(&self, offset: usize, buf: &[u8]) -> usize {
|
||||||
let mut fs = self.fs.lock();
|
let mut fs = self.fs.lock();
|
||||||
self.modify_disk_inode(|disk_inode| {
|
let size = self.modify_disk_inode(|disk_inode| {
|
||||||
self.increase_size((offset + buf.len()) as u32, disk_inode, &mut fs);
|
self.increase_size((offset + buf.len()) as u32, disk_inode, &mut fs);
|
||||||
disk_inode.write_at(offset, buf, &self.block_device)
|
disk_inode.write_at(offset, buf, &self.block_device)
|
||||||
})
|
});
|
||||||
|
block_cache_sync_all();
|
||||||
|
size
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn clear(&self) {
|
pub fn clear(&self) {
|
||||||
@ -201,5 +205,6 @@ impl Inode {
|
|||||||
fs.dealloc_data(data_block);
|
fs.dealloc_data(data_block);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
block_cache_sync_all();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -10,7 +10,6 @@ edition = "2018"
|
|||||||
riscv = { git = "https://github.com/rcore-os/riscv", features = ["inline-asm"] }
|
riscv = { git = "https://github.com/rcore-os/riscv", features = ["inline-asm"] }
|
||||||
lazy_static = { version = "1.4.0", features = ["spin_no_std"] }
|
lazy_static = { version = "1.4.0", features = ["spin_no_std"] }
|
||||||
buddy_system_allocator = "0.6"
|
buddy_system_allocator = "0.6"
|
||||||
spin = "0.7.0"
|
|
||||||
bitflags = "1.2.1"
|
bitflags = "1.2.1"
|
||||||
xmas-elf = "0.7.0"
|
xmas-elf = "0.7.0"
|
||||||
virtio-drivers = { git = "https://github.com/rcore-os/virtio-drivers" }
|
virtio-drivers = { git = "https://github.com/rcore-os/virtio-drivers" }
|
||||||
|
22
os/Makefile
22
os/Makefile
@ -32,24 +32,32 @@ OBJCOPY := rust-objcopy --binary-architecture=riscv64
|
|||||||
# Disassembly
|
# Disassembly
|
||||||
DISASM ?= -x
|
DISASM ?= -x
|
||||||
|
|
||||||
build: env $(KERNEL_BIN) $(FS_IMG)
|
build: env switch-check $(KERNEL_BIN) fs-img
|
||||||
|
|
||||||
|
switch-check:
|
||||||
|
ifeq ($(BOARD), qemu)
|
||||||
|
(which last-qemu) || (rm last-k210 -f && touch last-qemu && make clean)
|
||||||
|
else ifeq ($(BOARD), k210)
|
||||||
|
(which last-k210) || (rm last-qemu -f && touch last-k210 && make clean)
|
||||||
|
endif
|
||||||
|
|
||||||
env:
|
env:
|
||||||
(rustup target list | grep "riscv64gc-unknown-none-elf (installed)") || rustup target add $(TARGET)
|
(rustup target list | grep "riscv64gc-unknown-none-elf (installed)") || rustup target add $(TARGET)
|
||||||
cargo install cargo-binutils --vers ~0.2
|
cargo install cargo-binutils --vers =0.3.3
|
||||||
rustup component add rust-src
|
rustup component add rust-src
|
||||||
rustup component add llvm-tools-preview
|
rustup component add llvm-tools-preview
|
||||||
|
|
||||||
sdcard: $(FS_IMG)
|
sdcard: fs-img
|
||||||
@echo "Are you sure write to $(SDCARD) ? [y/N] " && read ans && [ $${ans:-N} = y ]
|
@echo "Are you sure write to $(SDCARD) ? [y/N] " && read ans && [ $${ans:-N} = y ]
|
||||||
@sudo dd if=/dev/zero of=$(SDCARD) bs=1048576 count=16
|
@sudo dd if=/dev/zero of=$(SDCARD) bs=1048576 count=32
|
||||||
@sudo dd if=$(FS_IMG) of=$(SDCARD)
|
@sudo dd if=$(FS_IMG) of=$(SDCARD)
|
||||||
|
|
||||||
$(KERNEL_BIN): kernel
|
$(KERNEL_BIN): kernel
|
||||||
@$(OBJCOPY) $(KERNEL_ELF) --strip-all -O binary $@
|
@$(OBJCOPY) $(KERNEL_ELF) --strip-all -O binary $@
|
||||||
|
|
||||||
$(FS_IMG): $(APPS)
|
fs-img: $(APPS)
|
||||||
@cd ../user && make build
|
@cd ../user && make build
|
||||||
|
@rm $(FS_IMG) -f
|
||||||
@cd ../easy-fs-fuse && cargo run --release -- -s ../user/src/bin/ -t ../user/target/riscv64gc-unknown-none-elf/release/
|
@cd ../easy-fs-fuse && cargo run --release -- -s ../user/src/bin/ -t ../user/target/riscv64gc-unknown-none-elf/release/
|
||||||
|
|
||||||
$(APPS):
|
$(APPS):
|
||||||
@ -73,8 +81,6 @@ disasm-vim: kernel
|
|||||||
|
|
||||||
run: run-inner
|
run: run-inner
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
run-inner: build
|
run-inner: build
|
||||||
ifeq ($(BOARD),qemu)
|
ifeq ($(BOARD),qemu)
|
||||||
@qemu-system-riscv64 \
|
@qemu-system-riscv64 \
|
||||||
@ -100,4 +106,4 @@ debug: build
|
|||||||
tmux split-window -h "riscv64-unknown-elf-gdb -ex 'file $(KERNEL_ELF)' -ex 'set arch riscv:rv64' -ex 'target remote localhost:1234'" && \
|
tmux split-window -h "riscv64-unknown-elf-gdb -ex 'file $(KERNEL_ELF)' -ex 'set arch riscv:rv64' -ex 'target remote localhost:1234'" && \
|
||||||
tmux -2 attach-session -d
|
tmux -2 attach-session -d
|
||||||
|
|
||||||
.PHONY: build env kernel clean disasm disasm-vim run-inner
|
.PHONY: build env kernel clean disasm disasm-vim run-inner switch-check fs-img
|
||||||
|
@ -8,7 +8,7 @@ pub const PAGE_SIZE: usize = 0x1000;
|
|||||||
pub const PAGE_SIZE_BITS: usize = 0xc;
|
pub const PAGE_SIZE_BITS: usize = 0xc;
|
||||||
|
|
||||||
pub const TRAMPOLINE: usize = usize::MAX - PAGE_SIZE + 1;
|
pub const TRAMPOLINE: usize = usize::MAX - PAGE_SIZE + 1;
|
||||||
pub const TRAP_CONTEXT: usize = TRAMPOLINE - PAGE_SIZE;
|
pub const TRAP_CONTEXT_BASE: usize = TRAMPOLINE - PAGE_SIZE;
|
||||||
|
|
||||||
#[cfg(feature = "board_k210")]
|
#[cfg(feature = "board_k210")]
|
||||||
pub const CLOCK_FREQ: usize = 403000000 / 62;
|
pub const CLOCK_FREQ: usize = 403000000 / 62;
|
||||||
@ -39,4 +39,4 @@ pub const MMIO: &[(usize, usize)] = &[
|
|||||||
(0x5200_0000, 0x1000), /* SPI0 */
|
(0x5200_0000, 0x1000), /* SPI0 */
|
||||||
(0x5300_0000, 0x1000), /* SPI1 */
|
(0x5300_0000, 0x1000), /* SPI1 */
|
||||||
(0x5400_0000, 0x1000), /* SPI2 */
|
(0x5400_0000, 0x1000), /* SPI2 */
|
||||||
];
|
];
|
||||||
|
@ -19,14 +19,14 @@ pub fn print(args: fmt::Arguments) {
|
|||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! print {
|
macro_rules! print {
|
||||||
($fmt: literal $(, $($arg: tt)+)?) => {
|
($fmt: literal $(, $($arg: tt)+)?) => {
|
||||||
$crate::console::print(format_args!($fmt $(, $($arg)+)?));
|
$crate::console::print(format_args!($fmt $(, $($arg)+)?))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! println {
|
macro_rules! println {
|
||||||
($fmt: literal $(, $($arg: tt)+)?) => {
|
($fmt: literal $(, $($arg: tt)+)?) => {
|
||||||
$crate::console::print(format_args!(concat!($fmt, "\n") $(, $($arg)+)?));
|
$crate::console::print(format_args!(concat!($fmt, "\n") $(, $($arg)+)?))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -13,7 +13,7 @@ use k210_soc::{
|
|||||||
sysctl,
|
sysctl,
|
||||||
sleep::usleep,
|
sleep::usleep,
|
||||||
};
|
};
|
||||||
use spin::Mutex;
|
use crate::sync::UPSafeCell;
|
||||||
use lazy_static::*;
|
use lazy_static::*;
|
||||||
use super::BlockDevice;
|
use super::BlockDevice;
|
||||||
use core::convert::TryInto;
|
use core::convert::TryInto;
|
||||||
@ -711,7 +711,9 @@ fn io_init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
static ref PERIPHERALS: Mutex<Peripherals> = Mutex::new(Peripherals::take().unwrap());
|
static ref PERIPHERALS: UPSafeCell<Peripherals> = unsafe {
|
||||||
|
UPSafeCell::new(Peripherals::take().unwrap())
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
fn init_sdcard() -> SDCard<SPIImpl<SPI0>> {
|
fn init_sdcard() -> SDCard<SPIImpl<SPI0>> {
|
||||||
@ -735,19 +737,19 @@ fn init_sdcard() -> SDCard<SPIImpl<SPI0>> {
|
|||||||
sd
|
sd
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct SDCardWrapper(Mutex<SDCard<SPIImpl<SPI0>>>);
|
pub struct SDCardWrapper(UPSafeCell<SDCard<SPIImpl<SPI0>>>);
|
||||||
|
|
||||||
impl SDCardWrapper {
|
impl SDCardWrapper {
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self(Mutex::new(init_sdcard()))
|
unsafe { Self(UPSafeCell::new(init_sdcard())) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BlockDevice for SDCardWrapper {
|
impl BlockDevice for SDCardWrapper {
|
||||||
fn read_block(&self, block_id: usize, buf: &mut [u8]) {
|
fn read_block(&self, block_id: usize, buf: &mut [u8]) {
|
||||||
self.0.lock().read_sector(buf,block_id as u32).unwrap();
|
self.0.exclusive_access().read_sector(buf,block_id as u32).unwrap();
|
||||||
}
|
}
|
||||||
fn write_block(&self, block_id: usize, buf: &[u8]) {
|
fn write_block(&self, block_id: usize, buf: &[u8]) {
|
||||||
self.0.lock().write_sector(buf,block_id as u32).unwrap();
|
self.0.exclusive_access().write_sector(buf,block_id as u32).unwrap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -12,34 +12,42 @@ use crate::mm::{
|
|||||||
kernel_token,
|
kernel_token,
|
||||||
};
|
};
|
||||||
use super::BlockDevice;
|
use super::BlockDevice;
|
||||||
use spin::Mutex;
|
use crate::sync::UPSafeCell;
|
||||||
use alloc::vec::Vec;
|
use alloc::vec::Vec;
|
||||||
use lazy_static::*;
|
use lazy_static::*;
|
||||||
|
|
||||||
#[allow(unused)]
|
#[allow(unused)]
|
||||||
const VIRTIO0: usize = 0x10001000;
|
const VIRTIO0: usize = 0x10001000;
|
||||||
|
|
||||||
pub struct VirtIOBlock(Mutex<VirtIOBlk<'static>>);
|
pub struct VirtIOBlock(UPSafeCell<VirtIOBlk<'static>>);
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
static ref QUEUE_FRAMES: Mutex<Vec<FrameTracker>> = Mutex::new(Vec::new());
|
static ref QUEUE_FRAMES: UPSafeCell<Vec<FrameTracker>> = unsafe {
|
||||||
|
UPSafeCell::new(Vec::new())
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BlockDevice for VirtIOBlock {
|
impl BlockDevice for VirtIOBlock {
|
||||||
fn read_block(&self, block_id: usize, buf: &mut [u8]) {
|
fn read_block(&self, block_id: usize, buf: &mut [u8]) {
|
||||||
self.0.lock().read_block(block_id, buf).expect("Error when reading VirtIOBlk");
|
self.0.exclusive_access()
|
||||||
|
.read_block(block_id, buf)
|
||||||
|
.expect("Error when reading VirtIOBlk");
|
||||||
}
|
}
|
||||||
fn write_block(&self, block_id: usize, buf: &[u8]) {
|
fn write_block(&self, block_id: usize, buf: &[u8]) {
|
||||||
self.0.lock().write_block(block_id, buf).expect("Error when writing VirtIOBlk");
|
self.0.exclusive_access()
|
||||||
|
.write_block(block_id, buf)
|
||||||
|
.expect("Error when writing VirtIOBlk");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VirtIOBlock {
|
impl VirtIOBlock {
|
||||||
#[allow(unused)]
|
#[allow(unused)]
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self(Mutex::new(VirtIOBlk::new(
|
unsafe {
|
||||||
unsafe { &mut *(VIRTIO0 as *mut VirtIOHeader) }
|
Self(UPSafeCell::new(VirtIOBlk::new(
|
||||||
).unwrap()))
|
&mut *(VIRTIO0 as *mut VirtIOHeader)
|
||||||
|
).unwrap()))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -50,7 +58,7 @@ pub extern "C" fn virtio_dma_alloc(pages: usize) -> PhysAddr {
|
|||||||
let frame = frame_alloc().unwrap();
|
let frame = frame_alloc().unwrap();
|
||||||
if i == 0 { ppn_base = frame.ppn; }
|
if i == 0 { ppn_base = frame.ppn; }
|
||||||
assert_eq!(frame.ppn.0, ppn_base.0 + i);
|
assert_eq!(frame.ppn.0, ppn_base.0 + i);
|
||||||
QUEUE_FRAMES.lock().push(frame);
|
QUEUE_FRAMES.exclusive_access().push(frame);
|
||||||
}
|
}
|
||||||
ppn_base.into()
|
ppn_base.into()
|
||||||
}
|
}
|
||||||
|
@ -9,4 +9,4 @@ _start:
|
|||||||
boot_stack:
|
boot_stack:
|
||||||
.space 4096 * 16
|
.space 4096 * 16
|
||||||
.globl boot_stack_top
|
.globl boot_stack_top
|
||||||
boot_stack_top:
|
boot_stack_top:
|
||||||
|
@ -3,18 +3,18 @@ use easy_fs::{
|
|||||||
Inode,
|
Inode,
|
||||||
};
|
};
|
||||||
use crate::drivers::BLOCK_DEVICE;
|
use crate::drivers::BLOCK_DEVICE;
|
||||||
|
use crate::sync::UPSafeCell;
|
||||||
use alloc::sync::Arc;
|
use alloc::sync::Arc;
|
||||||
use lazy_static::*;
|
use lazy_static::*;
|
||||||
use bitflags::*;
|
use bitflags::*;
|
||||||
use alloc::vec::Vec;
|
use alloc::vec::Vec;
|
||||||
use spin::Mutex;
|
|
||||||
use super::File;
|
use super::File;
|
||||||
use crate::mm::UserBuffer;
|
use crate::mm::UserBuffer;
|
||||||
|
|
||||||
pub struct OSInode {
|
pub struct OSInode {
|
||||||
readable: bool,
|
readable: bool,
|
||||||
writable: bool,
|
writable: bool,
|
||||||
inner: Mutex<OSInodeInner>,
|
inner: UPSafeCell<OSInodeInner>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct OSInodeInner {
|
pub struct OSInodeInner {
|
||||||
@ -31,14 +31,14 @@ impl OSInode {
|
|||||||
Self {
|
Self {
|
||||||
readable,
|
readable,
|
||||||
writable,
|
writable,
|
||||||
inner: Mutex::new(OSInodeInner {
|
inner: unsafe { UPSafeCell::new(OSInodeInner {
|
||||||
offset: 0,
|
offset: 0,
|
||||||
inode,
|
inode,
|
||||||
}),
|
})},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn read_all(&self) -> Vec<u8> {
|
pub fn read_all(&self) -> Vec<u8> {
|
||||||
let mut inner = self.inner.lock();
|
let mut inner = self.inner.exclusive_access();
|
||||||
let mut buffer = [0u8; 512];
|
let mut buffer = [0u8; 512];
|
||||||
let mut v: Vec<u8> = Vec::new();
|
let mut v: Vec<u8> = Vec::new();
|
||||||
loop {
|
loop {
|
||||||
@ -133,7 +133,7 @@ impl File for OSInode {
|
|||||||
fn readable(&self) -> bool { self.readable }
|
fn readable(&self) -> bool { self.readable }
|
||||||
fn writable(&self) -> bool { self.writable }
|
fn writable(&self) -> bool { self.writable }
|
||||||
fn read(&self, mut buf: UserBuffer) -> usize {
|
fn read(&self, mut buf: UserBuffer) -> usize {
|
||||||
let mut inner = self.inner.lock();
|
let mut inner = self.inner.exclusive_access();
|
||||||
let mut total_read_size = 0usize;
|
let mut total_read_size = 0usize;
|
||||||
for slice in buf.buffers.iter_mut() {
|
for slice in buf.buffers.iter_mut() {
|
||||||
let read_size = inner.inode.read_at(inner.offset, *slice);
|
let read_size = inner.inode.read_at(inner.offset, *slice);
|
||||||
@ -146,7 +146,7 @@ impl File for OSInode {
|
|||||||
total_read_size
|
total_read_size
|
||||||
}
|
}
|
||||||
fn write(&self, buf: UserBuffer) -> usize {
|
fn write(&self, buf: UserBuffer) -> usize {
|
||||||
let mut inner = self.inner.lock();
|
let mut inner = self.inner.exclusive_access();
|
||||||
let mut total_write_size = 0usize;
|
let mut total_write_size = 0usize;
|
||||||
for slice in buf.buffers.iter() {
|
for slice in buf.buffers.iter() {
|
||||||
let write_size = inner.inode.write_at(inner.offset, *slice);
|
let write_size = inner.inode.write_at(inner.offset, *slice);
|
||||||
|
@ -1,26 +1,25 @@
|
|||||||
use super::File;
|
use super::File;
|
||||||
use alloc::sync::{Arc, Weak};
|
use alloc::sync::{Arc, Weak};
|
||||||
use spin::Mutex;
|
use crate::sync::UPSafeCell;
|
||||||
use crate::mm::{
|
use crate::mm::UserBuffer;
|
||||||
UserBuffer,
|
|
||||||
};
|
|
||||||
use crate::task::suspend_current_and_run_next;
|
use crate::task::suspend_current_and_run_next;
|
||||||
|
|
||||||
pub struct Pipe {
|
pub struct Pipe {
|
||||||
readable: bool,
|
readable: bool,
|
||||||
writable: bool,
|
writable: bool,
|
||||||
buffer: Arc<Mutex<PipeRingBuffer>>,
|
buffer: Arc<UPSafeCell<PipeRingBuffer>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Pipe {
|
impl Pipe {
|
||||||
pub fn read_end_with_buffer(buffer: Arc<Mutex<PipeRingBuffer>>) -> Self {
|
pub fn read_end_with_buffer(buffer: Arc<UPSafeCell<PipeRingBuffer>>) -> Self {
|
||||||
Self {
|
Self {
|
||||||
readable: true,
|
readable: true,
|
||||||
writable: false,
|
writable: false,
|
||||||
buffer,
|
buffer,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn write_end_with_buffer(buffer: Arc<Mutex<PipeRingBuffer>>) -> Self {
|
pub fn write_end_with_buffer(buffer: Arc<UPSafeCell<PipeRingBuffer>>) -> Self {
|
||||||
Self {
|
Self {
|
||||||
readable: false,
|
readable: false,
|
||||||
writable: true,
|
writable: true,
|
||||||
@ -101,14 +100,16 @@ impl PipeRingBuffer {
|
|||||||
|
|
||||||
/// Return (read_end, write_end)
|
/// Return (read_end, write_end)
|
||||||
pub fn make_pipe() -> (Arc<Pipe>, Arc<Pipe>) {
|
pub fn make_pipe() -> (Arc<Pipe>, Arc<Pipe>) {
|
||||||
let buffer = Arc::new(Mutex::new(PipeRingBuffer::new()));
|
let buffer = Arc::new(unsafe {
|
||||||
|
UPSafeCell::new(PipeRingBuffer::new())
|
||||||
|
});
|
||||||
let read_end = Arc::new(
|
let read_end = Arc::new(
|
||||||
Pipe::read_end_with_buffer(buffer.clone())
|
Pipe::read_end_with_buffer(buffer.clone())
|
||||||
);
|
);
|
||||||
let write_end = Arc::new(
|
let write_end = Arc::new(
|
||||||
Pipe::write_end_with_buffer(buffer.clone())
|
Pipe::write_end_with_buffer(buffer.clone())
|
||||||
);
|
);
|
||||||
buffer.lock().set_write_end(&write_end);
|
buffer.exclusive_access().set_write_end(&write_end);
|
||||||
(read_end, write_end)
|
(read_end, write_end)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -120,7 +121,7 @@ impl File for Pipe {
|
|||||||
let mut buf_iter = buf.into_iter();
|
let mut buf_iter = buf.into_iter();
|
||||||
let mut read_size = 0usize;
|
let mut read_size = 0usize;
|
||||||
loop {
|
loop {
|
||||||
let mut ring_buffer = self.buffer.lock();
|
let mut ring_buffer = self.buffer.exclusive_access();
|
||||||
let loop_read = ring_buffer.available_read();
|
let loop_read = ring_buffer.available_read();
|
||||||
if loop_read == 0 {
|
if loop_read == 0 {
|
||||||
if ring_buffer.all_write_ends_closed() {
|
if ring_buffer.all_write_ends_closed() {
|
||||||
@ -146,7 +147,7 @@ impl File for Pipe {
|
|||||||
let mut buf_iter = buf.into_iter();
|
let mut buf_iter = buf.into_iter();
|
||||||
let mut write_size = 0usize;
|
let mut write_size = 0usize;
|
||||||
loop {
|
loop {
|
||||||
let mut ring_buffer = self.buffer.lock();
|
let mut ring_buffer = self.buffer.exclusive_access();
|
||||||
let loop_write = ring_buffer.available_write();
|
let loop_write = ring_buffer.available_write();
|
||||||
if loop_write == 0 {
|
if loop_write == 0 {
|
||||||
drop(ring_buffer);
|
drop(ring_buffer);
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
use core::panic::PanicInfo;
|
use core::panic::PanicInfo;
|
||||||
use crate::sbi::shutdown;
|
use crate::sbi::shutdown;
|
||||||
|
use crate::task::current_kstack_top;
|
||||||
|
|
||||||
#[panic_handler]
|
#[panic_handler]
|
||||||
fn panic(info: &PanicInfo) -> ! {
|
fn panic(info: &PanicInfo) -> ! {
|
||||||
@ -14,5 +15,19 @@ fn panic(info: &PanicInfo) -> ! {
|
|||||||
}
|
}
|
||||||
None => println!("[kernel] panicked at '{}'", info.message().unwrap())
|
None => println!("[kernel] panicked at '{}'", info.message().unwrap())
|
||||||
}
|
}
|
||||||
|
unsafe { backtrace(); }
|
||||||
shutdown()
|
shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unsafe fn backtrace() {
|
||||||
|
let mut fp: usize;
|
||||||
|
let stop = current_kstack_top();
|
||||||
|
asm!("mv {}, s0", out(reg) fp);
|
||||||
|
println!("---START BACKTRACE---");
|
||||||
|
for i in 0..10 {
|
||||||
|
if fp == stop { break; }
|
||||||
|
println!("#{}:ra={:#x}", i, *((fp-8) as *const usize));
|
||||||
|
fp = *((fp-16) as *const usize);
|
||||||
|
}
|
||||||
|
println!("---END BACKTRACE---");
|
||||||
|
}
|
||||||
|
62
os/src/loader.rs
Normal file
62
os/src/loader.rs
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
use alloc::vec::Vec;
|
||||||
|
use lazy_static::*;
|
||||||
|
|
||||||
|
pub fn get_num_app() -> usize {
|
||||||
|
extern "C" { fn _num_app(); }
|
||||||
|
unsafe { (_num_app as usize as *const usize).read_volatile() }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_app_data(app_id: usize) -> &'static [u8] {
|
||||||
|
extern "C" { fn _num_app(); }
|
||||||
|
let num_app_ptr = _num_app as usize as *const usize;
|
||||||
|
let num_app = get_num_app();
|
||||||
|
let app_start = unsafe {
|
||||||
|
core::slice::from_raw_parts(num_app_ptr.add(1), num_app + 1)
|
||||||
|
};
|
||||||
|
assert!(app_id < num_app);
|
||||||
|
unsafe {
|
||||||
|
core::slice::from_raw_parts(
|
||||||
|
app_start[app_id] as *const u8,
|
||||||
|
app_start[app_id + 1] - app_start[app_id]
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
static ref APP_NAMES: Vec<&'static str> = {
|
||||||
|
let num_app = get_num_app();
|
||||||
|
extern "C" { fn _app_names(); }
|
||||||
|
let mut start = _app_names as usize as *const u8;
|
||||||
|
let mut v = Vec::new();
|
||||||
|
unsafe {
|
||||||
|
for _ in 0..num_app {
|
||||||
|
let mut end = start;
|
||||||
|
while end.read_volatile() != '\0' as u8 {
|
||||||
|
end = end.add(1);
|
||||||
|
}
|
||||||
|
let slice = core::slice::from_raw_parts(start, end as usize - start as usize);
|
||||||
|
let str = core::str::from_utf8(slice).unwrap();
|
||||||
|
v.push(str);
|
||||||
|
start = end.add(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
v
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#[allow(unused)]
|
||||||
|
pub fn get_app_data_by_name(name: &str) -> Option<&'static [u8]> {
|
||||||
|
let num_app = get_num_app();
|
||||||
|
(0..num_app)
|
||||||
|
.find(|&i| APP_NAMES[i] == name)
|
||||||
|
.map(|i| get_app_data(i))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn list_apps() {
|
||||||
|
println!("/**** APPS ****");
|
||||||
|
for app in APP_NAMES.iter() {
|
||||||
|
println!("{}", app);
|
||||||
|
}
|
||||||
|
println!("**************/");
|
||||||
|
}
|
@ -1,9 +1,8 @@
|
|||||||
#![no_std]
|
#![no_std]
|
||||||
#![no_main]
|
#![no_main]
|
||||||
#![feature(global_asm)]
|
#![feature(global_asm)]
|
||||||
#![feature(llvm_asm)]
|
#![feature(asm)]
|
||||||
#![feature(panic_info_message)]
|
#![feature(panic_info_message)]
|
||||||
#![feature(const_in_array_repeat_expressions)]
|
|
||||||
#![feature(alloc_error_handler)]
|
#![feature(alloc_error_handler)]
|
||||||
|
|
||||||
extern crate alloc;
|
extern crate alloc;
|
||||||
@ -20,6 +19,7 @@ mod trap;
|
|||||||
mod config;
|
mod config;
|
||||||
mod task;
|
mod task;
|
||||||
mod timer;
|
mod timer;
|
||||||
|
mod sync;
|
||||||
mod mm;
|
mod mm;
|
||||||
mod fs;
|
mod fs;
|
||||||
mod drivers;
|
mod drivers;
|
||||||
@ -31,9 +31,12 @@ fn clear_bss() {
|
|||||||
fn sbss();
|
fn sbss();
|
||||||
fn ebss();
|
fn ebss();
|
||||||
}
|
}
|
||||||
(sbss as usize..ebss as usize).for_each(|a| {
|
unsafe {
|
||||||
unsafe { (a as *mut u8).write_volatile(0) }
|
core::slice::from_raw_parts_mut(
|
||||||
});
|
sbss as usize as *mut u8,
|
||||||
|
ebss as usize - sbss as usize,
|
||||||
|
).fill(0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
@ -49,4 +52,4 @@ pub fn rust_main() -> ! {
|
|||||||
task::add_initproc();
|
task::add_initproc();
|
||||||
task::run_tasks();
|
task::run_tasks();
|
||||||
panic!("Unreachable in rust_main!");
|
panic!("Unreachable in rust_main!");
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,11 @@ use crate::config::{PAGE_SIZE, PAGE_SIZE_BITS};
|
|||||||
use super::PageTableEntry;
|
use super::PageTableEntry;
|
||||||
use core::fmt::{self, Debug, Formatter};
|
use core::fmt::{self, Debug, Formatter};
|
||||||
|
|
||||||
|
const PA_WIDTH_SV39: usize = 56;
|
||||||
|
const VA_WIDTH_SV39: usize = 39;
|
||||||
|
const PPN_WIDTH_SV39: usize = PA_WIDTH_SV39 - PAGE_SIZE_BITS;
|
||||||
|
const VPN_WIDTH_SV39: usize = VA_WIDTH_SV39 - PAGE_SIZE_BITS;
|
||||||
|
|
||||||
/// Definitions
|
/// Definitions
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
|
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
|
||||||
@ -47,16 +52,16 @@ impl Debug for PhysPageNum {
|
|||||||
/// usize -> T: usize.into()
|
/// usize -> T: usize.into()
|
||||||
|
|
||||||
impl From<usize> for PhysAddr {
|
impl From<usize> for PhysAddr {
|
||||||
fn from(v: usize) -> Self { Self(v) }
|
fn from(v: usize) -> Self { Self(v & ( (1 << PA_WIDTH_SV39) - 1 )) }
|
||||||
}
|
}
|
||||||
impl From<usize> for PhysPageNum {
|
impl From<usize> for PhysPageNum {
|
||||||
fn from(v: usize) -> Self { Self(v) }
|
fn from(v: usize) -> Self { Self(v & ( (1 << PPN_WIDTH_SV39) - 1 )) }
|
||||||
}
|
}
|
||||||
impl From<usize> for VirtAddr {
|
impl From<usize> for VirtAddr {
|
||||||
fn from(v: usize) -> Self { Self(v) }
|
fn from(v: usize) -> Self { Self(v & ( (1 << VA_WIDTH_SV39) - 1 )) }
|
||||||
}
|
}
|
||||||
impl From<usize> for VirtPageNum {
|
impl From<usize> for VirtPageNum {
|
||||||
fn from(v: usize) -> Self { Self(v) }
|
fn from(v: usize) -> Self { Self(v & ( (1 << VPN_WIDTH_SV39) - 1 )) }
|
||||||
}
|
}
|
||||||
impl From<PhysAddr> for usize {
|
impl From<PhysAddr> for usize {
|
||||||
fn from(v: PhysAddr) -> Self { v.0 }
|
fn from(v: PhysAddr) -> Self { v.0 }
|
||||||
@ -206,4 +211,4 @@ impl<T> Iterator for SimpleRangeIterator<T> where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub type VPNRange = SimpleRange<VirtPageNum>;
|
pub type VPNRange = SimpleRange<VirtPageNum>;
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use super::{PhysAddr, PhysPageNum};
|
use super::{PhysAddr, PhysPageNum};
|
||||||
use alloc::vec::Vec;
|
use alloc::vec::Vec;
|
||||||
use spin::Mutex;
|
use crate::sync::UPSafeCell;
|
||||||
use crate::config::MEMORY_END;
|
use crate::config::MEMORY_END;
|
||||||
use lazy_static::*;
|
use lazy_static::*;
|
||||||
use core::fmt::{self, Debug, Formatter};
|
use core::fmt::{self, Debug, Formatter};
|
||||||
@ -88,8 +88,9 @@ impl FrameAllocator for StackFrameAllocator {
|
|||||||
type FrameAllocatorImpl = StackFrameAllocator;
|
type FrameAllocatorImpl = StackFrameAllocator;
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
pub static ref FRAME_ALLOCATOR: Mutex<FrameAllocatorImpl> =
|
pub static ref FRAME_ALLOCATOR: UPSafeCell<FrameAllocatorImpl> = unsafe {
|
||||||
Mutex::new(FrameAllocatorImpl::new());
|
UPSafeCell::new(FrameAllocatorImpl::new())
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn init_frame_allocator() {
|
pub fn init_frame_allocator() {
|
||||||
@ -97,20 +98,20 @@ pub fn init_frame_allocator() {
|
|||||||
fn ekernel();
|
fn ekernel();
|
||||||
}
|
}
|
||||||
FRAME_ALLOCATOR
|
FRAME_ALLOCATOR
|
||||||
.lock()
|
.exclusive_access()
|
||||||
.init(PhysAddr::from(ekernel as usize).ceil(), PhysAddr::from(MEMORY_END).floor());
|
.init(PhysAddr::from(ekernel as usize).ceil(), PhysAddr::from(MEMORY_END).floor());
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn frame_alloc() -> Option<FrameTracker> {
|
pub fn frame_alloc() -> Option<FrameTracker> {
|
||||||
FRAME_ALLOCATOR
|
FRAME_ALLOCATOR
|
||||||
.lock()
|
.exclusive_access()
|
||||||
.alloc()
|
.alloc()
|
||||||
.map(|ppn| FrameTracker::new(ppn))
|
.map(|ppn| FrameTracker::new(ppn))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn frame_dealloc(ppn: PhysPageNum) {
|
pub fn frame_dealloc(ppn: PhysPageNum) {
|
||||||
FRAME_ALLOCATOR
|
FRAME_ALLOCATOR
|
||||||
.lock()
|
.exclusive_access()
|
||||||
.dealloc(ppn);
|
.dealloc(ppn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7,13 +7,11 @@ use alloc::vec::Vec;
|
|||||||
use riscv::register::satp;
|
use riscv::register::satp;
|
||||||
use alloc::sync::Arc;
|
use alloc::sync::Arc;
|
||||||
use lazy_static::*;
|
use lazy_static::*;
|
||||||
use spin::Mutex;
|
use crate::sync::UPSafeCell;
|
||||||
use crate::config::{
|
use crate::config::{
|
||||||
MEMORY_END,
|
MEMORY_END,
|
||||||
PAGE_SIZE,
|
PAGE_SIZE,
|
||||||
TRAMPOLINE,
|
TRAMPOLINE,
|
||||||
TRAP_CONTEXT,
|
|
||||||
USER_STACK_SIZE,
|
|
||||||
MMIO,
|
MMIO,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -31,13 +29,13 @@ extern "C" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
pub static ref KERNEL_SPACE: Arc<Mutex<MemorySet>> = Arc::new(Mutex::new(
|
pub static ref KERNEL_SPACE: Arc<UPSafeCell<MemorySet>> = Arc::new(unsafe {
|
||||||
MemorySet::new_kernel()
|
UPSafeCell::new(MemorySet::new_kernel())
|
||||||
));
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn kernel_token() -> usize {
|
pub fn kernel_token() -> usize {
|
||||||
KERNEL_SPACE.lock().token()
|
KERNEL_SPACE.exclusive_access().token()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct MemorySet {
|
pub struct MemorySet {
|
||||||
@ -142,8 +140,8 @@ impl MemorySet {
|
|||||||
}
|
}
|
||||||
memory_set
|
memory_set
|
||||||
}
|
}
|
||||||
/// Include sections in elf and trampoline and TrapContext and user stack,
|
/// Include sections in elf and trampoline,
|
||||||
/// also returns user_sp and entry point.
|
/// also returns user_sp_base and entry point.
|
||||||
pub fn from_elf(elf_data: &[u8]) -> (Self, usize, usize) {
|
pub fn from_elf(elf_data: &[u8]) -> (Self, usize, usize) {
|
||||||
let mut memory_set = Self::new_bare();
|
let mut memory_set = Self::new_bare();
|
||||||
// map trampoline
|
// map trampoline
|
||||||
@ -178,26 +176,10 @@ impl MemorySet {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// map user stack with U flags
|
|
||||||
let max_end_va: VirtAddr = max_end_vpn.into();
|
let max_end_va: VirtAddr = max_end_vpn.into();
|
||||||
let mut user_stack_bottom: usize = max_end_va.into();
|
let mut user_stack_base: usize = max_end_va.into();
|
||||||
// guard page
|
user_stack_base += PAGE_SIZE;
|
||||||
user_stack_bottom += PAGE_SIZE;
|
(memory_set, user_stack_base, elf.header.pt2.entry_point() as usize)
|
||||||
let user_stack_top = user_stack_bottom + USER_STACK_SIZE;
|
|
||||||
memory_set.push(MapArea::new(
|
|
||||||
user_stack_bottom.into(),
|
|
||||||
user_stack_top.into(),
|
|
||||||
MapType::Framed,
|
|
||||||
MapPermission::R | MapPermission::W | MapPermission::U,
|
|
||||||
), None);
|
|
||||||
// map TrapContext
|
|
||||||
memory_set.push(MapArea::new(
|
|
||||||
TRAP_CONTEXT.into(),
|
|
||||||
TRAMPOLINE.into(),
|
|
||||||
MapType::Framed,
|
|
||||||
MapPermission::R | MapPermission::W,
|
|
||||||
), None);
|
|
||||||
(memory_set, user_stack_top, elf.header.pt2.entry_point() as usize)
|
|
||||||
}
|
}
|
||||||
pub fn from_existed_user(user_space: &MemorySet) -> MemorySet {
|
pub fn from_existed_user(user_space: &MemorySet) -> MemorySet {
|
||||||
let mut memory_set = Self::new_bare();
|
let mut memory_set = Self::new_bare();
|
||||||
@ -220,7 +202,7 @@ impl MemorySet {
|
|||||||
let satp = self.page_table.token();
|
let satp = self.page_table.token();
|
||||||
unsafe {
|
unsafe {
|
||||||
satp::write(satp);
|
satp::write(satp);
|
||||||
llvm_asm!("sfence.vma" :::: "volatile");
|
asm!("sfence.vma");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn translate(&self, vpn: VirtPageNum) -> Option<PageTableEntry> {
|
pub fn translate(&self, vpn: VirtPageNum) -> Option<PageTableEntry> {
|
||||||
@ -338,7 +320,7 @@ bitflags! {
|
|||||||
|
|
||||||
#[allow(unused)]
|
#[allow(unused)]
|
||||||
pub fn remap_test() {
|
pub fn remap_test() {
|
||||||
let mut kernel_space = KERNEL_SPACE.lock();
|
let mut kernel_space = KERNEL_SPACE.exclusive_access();
|
||||||
let mid_text: VirtAddr = ((stext as usize + etext as usize) / 2).into();
|
let mid_text: VirtAddr = ((stext as usize + etext as usize) / 2).into();
|
||||||
let mid_rodata: VirtAddr = ((srodata as usize + erodata as usize) / 2).into();
|
let mid_rodata: VirtAddr = ((srodata as usize + erodata as usize) / 2).into();
|
||||||
let mid_data: VirtAddr = ((sdata as usize + edata as usize) / 2).into();
|
let mid_data: VirtAddr = ((sdata as usize + edata as usize) / 2).into();
|
||||||
@ -355,4 +337,4 @@ pub fn remap_test() {
|
|||||||
false,
|
false,
|
||||||
);
|
);
|
||||||
println!("remap_test passed!");
|
println!("remap_test passed!");
|
||||||
}
|
}
|
||||||
|
@ -24,5 +24,5 @@ pub use memory_set::remap_test;
|
|||||||
pub fn init() {
|
pub fn init() {
|
||||||
heap_allocator::init_heap();
|
heap_allocator::init_heap();
|
||||||
frame_allocator::init_frame_allocator();
|
frame_allocator::init_frame_allocator();
|
||||||
KERNEL_SPACE.lock().activate();
|
KERNEL_SPACE.exclusive_access().activate();
|
||||||
}
|
}
|
@ -252,4 +252,4 @@ impl Iterator for UserBufferIterator {
|
|||||||
Some(r)
|
Some(r)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -14,11 +14,12 @@ const SBI_SHUTDOWN: usize = 8;
|
|||||||
fn sbi_call(which: usize, arg0: usize, arg1: usize, arg2: usize) -> usize {
|
fn sbi_call(which: usize, arg0: usize, arg1: usize, arg2: usize) -> usize {
|
||||||
let mut ret;
|
let mut ret;
|
||||||
unsafe {
|
unsafe {
|
||||||
llvm_asm!("ecall"
|
asm!(
|
||||||
: "={x10}" (ret)
|
"ecall",
|
||||||
: "{x10}" (arg0), "{x11}" (arg1), "{x12}" (arg2), "{x17}" (which)
|
inlateout("x10") arg0 => ret,
|
||||||
: "memory"
|
in("x11") arg1,
|
||||||
: "volatile"
|
in("x12") arg2,
|
||||||
|
in("x17") which,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
ret
|
ret
|
||||||
|
7
os/src/sync/mod.rs
Normal file
7
os/src/sync/mod.rs
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
mod up;
|
||||||
|
mod mutex;
|
||||||
|
mod semaphore;
|
||||||
|
|
||||||
|
pub use up::UPSafeCell;
|
||||||
|
pub use mutex::{Mutex, MutexSpin, MutexBlocking};
|
||||||
|
pub use semaphore::Semaphore;
|
87
os/src/sync/mutex.rs
Normal file
87
os/src/sync/mutex.rs
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
use super::UPSafeCell;
|
||||||
|
use crate::task::{block_current_and_run_next, suspend_current_and_run_next};
|
||||||
|
use crate::task::TaskControlBlock;
|
||||||
|
use crate::task::{add_task, current_task};
|
||||||
|
use alloc::{sync::Arc, collections::VecDeque};
|
||||||
|
|
||||||
|
pub trait Mutex: Sync + Send {
|
||||||
|
fn lock(&self);
|
||||||
|
fn unlock(&self);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct MutexSpin {
|
||||||
|
locked: UPSafeCell<bool>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MutexSpin {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
locked: unsafe { UPSafeCell::new(false) },
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Mutex for MutexSpin {
|
||||||
|
fn lock(&self) {
|
||||||
|
loop {
|
||||||
|
let mut locked = self.locked.exclusive_access();
|
||||||
|
if *locked {
|
||||||
|
drop(locked);
|
||||||
|
suspend_current_and_run_next();
|
||||||
|
continue;
|
||||||
|
} else {
|
||||||
|
*locked = true;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn unlock(&self) {
|
||||||
|
let mut locked = self.locked.exclusive_access();
|
||||||
|
*locked = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct MutexBlocking {
|
||||||
|
inner: UPSafeCell<MutexBlockingInner>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct MutexBlockingInner {
|
||||||
|
locked: bool,
|
||||||
|
wait_queue: VecDeque<Arc<TaskControlBlock>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MutexBlocking {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
inner: unsafe {
|
||||||
|
UPSafeCell::new(MutexBlockingInner {
|
||||||
|
locked: false,
|
||||||
|
wait_queue: VecDeque::new(),
|
||||||
|
})
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Mutex for MutexBlocking {
|
||||||
|
fn lock(&self) {
|
||||||
|
let mut mutex_inner = self.inner.exclusive_access();
|
||||||
|
if mutex_inner.locked {
|
||||||
|
mutex_inner.wait_queue.push_back(current_task().unwrap());
|
||||||
|
drop(mutex_inner);
|
||||||
|
block_current_and_run_next();
|
||||||
|
} else {
|
||||||
|
mutex_inner.locked = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn unlock(&self) {
|
||||||
|
let mut mutex_inner = self.inner.exclusive_access();
|
||||||
|
assert_eq!(mutex_inner.locked, true);
|
||||||
|
mutex_inner.locked = false;
|
||||||
|
if let Some(waking_task) = mutex_inner.wait_queue.pop_front() {
|
||||||
|
add_task(waking_task);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
45
os/src/sync/semaphore.rs
Normal file
45
os/src/sync/semaphore.rs
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
use alloc::{sync::Arc, collections::VecDeque};
|
||||||
|
use crate::task::{add_task, TaskControlBlock, current_task, block_current_and_run_next};
|
||||||
|
use crate::sync::UPSafeCell;
|
||||||
|
|
||||||
|
pub struct Semaphore {
|
||||||
|
pub inner: UPSafeCell<SemaphoreInner>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct SemaphoreInner {
|
||||||
|
pub count: isize,
|
||||||
|
pub wait_queue: VecDeque<Arc<TaskControlBlock>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Semaphore {
|
||||||
|
pub fn new(res_count: usize) -> Self {
|
||||||
|
Self {
|
||||||
|
inner: unsafe { UPSafeCell::new(
|
||||||
|
SemaphoreInner {
|
||||||
|
count: res_count as isize,
|
||||||
|
wait_queue: VecDeque::new(),
|
||||||
|
}
|
||||||
|
)},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn up(&self) {
|
||||||
|
let mut inner = self.inner.exclusive_access();
|
||||||
|
inner.count += 1;
|
||||||
|
if inner.count <= 0 {
|
||||||
|
if let Some(task) = inner.wait_queue.pop_front() {
|
||||||
|
add_task(task);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn down(&self) {
|
||||||
|
let mut inner = self.inner.exclusive_access();
|
||||||
|
inner.count -= 1;
|
||||||
|
if inner.count < 0 {
|
||||||
|
inner.wait_queue.push_back(current_task().unwrap());
|
||||||
|
drop(inner);
|
||||||
|
block_current_and_run_next();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
27
os/src/sync/up.rs
Normal file
27
os/src/sync/up.rs
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
use core::cell::{RefCell, RefMut};
|
||||||
|
|
||||||
|
/// Wrap a static data structure inside it so that we are
|
||||||
|
/// able to access it without any `unsafe`.
|
||||||
|
///
|
||||||
|
/// We should only use it in uniprocessor.
|
||||||
|
///
|
||||||
|
/// In order to get mutable reference of inner data, call
|
||||||
|
/// `exclusive_access`.
|
||||||
|
pub struct UPSafeCell<T> {
|
||||||
|
/// inner data
|
||||||
|
inner: RefCell<T>,
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe impl<T> Sync for UPSafeCell<T> {}
|
||||||
|
|
||||||
|
impl<T> UPSafeCell<T> {
|
||||||
|
/// User is responsible to guarantee that inner struct is only used in
|
||||||
|
/// uniprocessor.
|
||||||
|
pub unsafe fn new(value: T) -> Self {
|
||||||
|
Self { inner: RefCell::new(value) }
|
||||||
|
}
|
||||||
|
/// Panic if the data has been borrowed.
|
||||||
|
pub fn exclusive_access(&self) -> RefMut<'_, T> {
|
||||||
|
self.inner.borrow_mut()
|
||||||
|
}
|
||||||
|
}
|
@ -4,14 +4,14 @@ use crate::mm::{
|
|||||||
translated_refmut,
|
translated_refmut,
|
||||||
translated_str,
|
translated_str,
|
||||||
};
|
};
|
||||||
use crate::task::{current_user_token, current_task};
|
use crate::task::{current_user_token, current_process};
|
||||||
use crate::fs::{make_pipe, OpenFlags, open_file};
|
use crate::fs::{make_pipe, OpenFlags, open_file};
|
||||||
use alloc::sync::Arc;
|
use alloc::sync::Arc;
|
||||||
|
|
||||||
pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize {
|
pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize {
|
||||||
let token = current_user_token();
|
let token = current_user_token();
|
||||||
let task = current_task().unwrap();
|
let process = current_process();
|
||||||
let inner = task.acquire_inner_lock();
|
let inner = process.inner_exclusive_access();
|
||||||
if fd >= inner.fd_table.len() {
|
if fd >= inner.fd_table.len() {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -20,7 +20,7 @@ pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize {
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
let file = file.clone();
|
let file = file.clone();
|
||||||
// release Task lock manually to avoid deadlock
|
// release current task TCB manually to avoid multi-borrow
|
||||||
drop(inner);
|
drop(inner);
|
||||||
file.write(
|
file.write(
|
||||||
UserBuffer::new(translated_byte_buffer(token, buf, len))
|
UserBuffer::new(translated_byte_buffer(token, buf, len))
|
||||||
@ -32,8 +32,8 @@ pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize {
|
|||||||
|
|
||||||
pub fn sys_read(fd: usize, buf: *const u8, len: usize) -> isize {
|
pub fn sys_read(fd: usize, buf: *const u8, len: usize) -> isize {
|
||||||
let token = current_user_token();
|
let token = current_user_token();
|
||||||
let task = current_task().unwrap();
|
let process = current_process();
|
||||||
let inner = task.acquire_inner_lock();
|
let inner = process.inner_exclusive_access();
|
||||||
if fd >= inner.fd_table.len() {
|
if fd >= inner.fd_table.len() {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -42,7 +42,7 @@ pub fn sys_read(fd: usize, buf: *const u8, len: usize) -> isize {
|
|||||||
if !file.readable() {
|
if !file.readable() {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
// release Task lock manually to avoid deadlock
|
// release current task TCB manually to avoid multi-borrow
|
||||||
drop(inner);
|
drop(inner);
|
||||||
file.read(
|
file.read(
|
||||||
UserBuffer::new(translated_byte_buffer(token, buf, len))
|
UserBuffer::new(translated_byte_buffer(token, buf, len))
|
||||||
@ -53,14 +53,14 @@ pub fn sys_read(fd: usize, buf: *const u8, len: usize) -> isize {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn sys_open(path: *const u8, flags: u32) -> isize {
|
pub fn sys_open(path: *const u8, flags: u32) -> isize {
|
||||||
let task = current_task().unwrap();
|
let process = current_process();
|
||||||
let token = current_user_token();
|
let token = current_user_token();
|
||||||
let path = translated_str(token, path);
|
let path = translated_str(token, path);
|
||||||
if let Some(inode) = open_file(
|
if let Some(inode) = open_file(
|
||||||
path.as_str(),
|
path.as_str(),
|
||||||
OpenFlags::from_bits(flags).unwrap()
|
OpenFlags::from_bits(flags).unwrap()
|
||||||
) {
|
) {
|
||||||
let mut inner = task.acquire_inner_lock();
|
let mut inner = process.inner_exclusive_access();
|
||||||
let fd = inner.alloc_fd();
|
let fd = inner.alloc_fd();
|
||||||
inner.fd_table[fd] = Some(inode);
|
inner.fd_table[fd] = Some(inode);
|
||||||
fd as isize
|
fd as isize
|
||||||
@ -70,8 +70,8 @@ pub fn sys_open(path: *const u8, flags: u32) -> isize {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn sys_close(fd: usize) -> isize {
|
pub fn sys_close(fd: usize) -> isize {
|
||||||
let task = current_task().unwrap();
|
let process = current_process();
|
||||||
let mut inner = task.acquire_inner_lock();
|
let mut inner = process.inner_exclusive_access();
|
||||||
if fd >= inner.fd_table.len() {
|
if fd >= inner.fd_table.len() {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -83,9 +83,9 @@ pub fn sys_close(fd: usize) -> isize {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn sys_pipe(pipe: *mut usize) -> isize {
|
pub fn sys_pipe(pipe: *mut usize) -> isize {
|
||||||
let task = current_task().unwrap();
|
let process = current_process();
|
||||||
let token = current_user_token();
|
let token = current_user_token();
|
||||||
let mut inner = task.acquire_inner_lock();
|
let mut inner = process.inner_exclusive_access();
|
||||||
let (pipe_read, pipe_write) = make_pipe();
|
let (pipe_read, pipe_write) = make_pipe();
|
||||||
let read_fd = inner.alloc_fd();
|
let read_fd = inner.alloc_fd();
|
||||||
inner.fd_table[read_fd] = Some(pipe_read);
|
inner.fd_table[read_fd] = Some(pipe_read);
|
||||||
@ -97,8 +97,8 @@ pub fn sys_pipe(pipe: *mut usize) -> isize {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn sys_dup(fd: usize) -> isize {
|
pub fn sys_dup(fd: usize) -> isize {
|
||||||
let task = current_task().unwrap();
|
let process = current_process();
|
||||||
let mut inner = task.acquire_inner_lock();
|
let mut inner = process.inner_exclusive_access();
|
||||||
if fd >= inner.fd_table.len() {
|
if fd >= inner.fd_table.len() {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -108,4 +108,4 @@ pub fn sys_dup(fd: usize) -> isize {
|
|||||||
let new_fd = inner.alloc_fd();
|
let new_fd = inner.alloc_fd();
|
||||||
inner.fd_table[new_fd] = Some(Arc::clone(inner.fd_table[fd].as_ref().unwrap()));
|
inner.fd_table[new_fd] = Some(Arc::clone(inner.fd_table[fd].as_ref().unwrap()));
|
||||||
new_fd as isize
|
new_fd as isize
|
||||||
}
|
}
|
||||||
|
@ -5,18 +5,32 @@ const SYSCALL_PIPE: usize = 59;
|
|||||||
const SYSCALL_READ: usize = 63;
|
const SYSCALL_READ: usize = 63;
|
||||||
const SYSCALL_WRITE: usize = 64;
|
const SYSCALL_WRITE: usize = 64;
|
||||||
const SYSCALL_EXIT: usize = 93;
|
const SYSCALL_EXIT: usize = 93;
|
||||||
|
const SYSCALL_SLEEP: usize = 101;
|
||||||
const SYSCALL_YIELD: usize = 124;
|
const SYSCALL_YIELD: usize = 124;
|
||||||
const SYSCALL_GET_TIME: usize = 169;
|
const SYSCALL_GET_TIME: usize = 169;
|
||||||
const SYSCALL_GETPID: usize = 172;
|
const SYSCALL_GETPID: usize = 172;
|
||||||
const SYSCALL_FORK: usize = 220;
|
const SYSCALL_FORK: usize = 220;
|
||||||
const SYSCALL_EXEC: usize = 221;
|
const SYSCALL_EXEC: usize = 221;
|
||||||
const SYSCALL_WAITPID: usize = 260;
|
const SYSCALL_WAITPID: usize = 260;
|
||||||
|
const SYSCALL_THREAD_CREATE: usize = 1000;
|
||||||
|
const SYSCALL_GETTID: usize = 1001;
|
||||||
|
const SYSCALL_WAITTID: usize = 1002;
|
||||||
|
const SYSCALL_MUTEX_CREATE: usize = 1010;
|
||||||
|
const SYSCALL_MUTEX_LOCK: usize = 1011;
|
||||||
|
const SYSCALL_MUTEX_UNLOCK: usize = 1012;
|
||||||
|
const SYSCALL_SEMAPHORE_CREATE: usize = 1020;
|
||||||
|
const SYSCALL_SEMAPHORE_UP: usize = 1021;
|
||||||
|
const SYSCALL_SEMAPHORE_DOWN: usize = 1022;
|
||||||
|
|
||||||
mod fs;
|
mod fs;
|
||||||
mod process;
|
mod process;
|
||||||
|
mod thread;
|
||||||
|
mod sync;
|
||||||
|
|
||||||
use fs::*;
|
use fs::*;
|
||||||
use process::*;
|
use process::*;
|
||||||
|
use thread::*;
|
||||||
|
use sync::*;
|
||||||
|
|
||||||
pub fn syscall(syscall_id: usize, args: [usize; 3]) -> isize {
|
pub fn syscall(syscall_id: usize, args: [usize; 3]) -> isize {
|
||||||
match syscall_id {
|
match syscall_id {
|
||||||
@ -27,12 +41,22 @@ pub fn syscall(syscall_id: usize, args: [usize; 3]) -> isize {
|
|||||||
SYSCALL_READ => sys_read(args[0], args[1] as *const u8, args[2]),
|
SYSCALL_READ => sys_read(args[0], args[1] as *const u8, args[2]),
|
||||||
SYSCALL_WRITE => sys_write(args[0], args[1] as *const u8, args[2]),
|
SYSCALL_WRITE => sys_write(args[0], args[1] as *const u8, args[2]),
|
||||||
SYSCALL_EXIT => sys_exit(args[0] as i32),
|
SYSCALL_EXIT => sys_exit(args[0] as i32),
|
||||||
|
SYSCALL_SLEEP => sys_sleep(args[0]),
|
||||||
SYSCALL_YIELD => sys_yield(),
|
SYSCALL_YIELD => sys_yield(),
|
||||||
SYSCALL_GET_TIME => sys_get_time(),
|
SYSCALL_GET_TIME => sys_get_time(),
|
||||||
SYSCALL_GETPID => sys_getpid(),
|
SYSCALL_GETPID => sys_getpid(),
|
||||||
SYSCALL_FORK => sys_fork(),
|
SYSCALL_FORK => sys_fork(),
|
||||||
SYSCALL_EXEC => sys_exec(args[0] as *const u8, args[1] as *const usize),
|
SYSCALL_EXEC => sys_exec(args[0] as *const u8, args[1] as *const usize),
|
||||||
SYSCALL_WAITPID => sys_waitpid(args[0] as isize, args[1] as *mut i32),
|
SYSCALL_WAITPID => sys_waitpid(args[0] as isize, args[1] as *mut i32),
|
||||||
|
SYSCALL_THREAD_CREATE => sys_thread_create(args[0], args[1]),
|
||||||
|
SYSCALL_GETTID => sys_gettid(),
|
||||||
|
SYSCALL_WAITTID => sys_waittid(args[0]) as isize,
|
||||||
|
SYSCALL_MUTEX_CREATE => sys_mutex_create(args[0] == 1),
|
||||||
|
SYSCALL_MUTEX_LOCK => sys_mutex_lock(args[0]),
|
||||||
|
SYSCALL_MUTEX_UNLOCK => sys_mutex_unlock(args[0]),
|
||||||
|
SYSCALL_SEMAPHORE_CREATE => sys_semaphore_creare(args[0]),
|
||||||
|
SYSCALL_SEMAPHORE_UP => sys_semaphore_up(args[0]),
|
||||||
|
SYSCALL_SEMAPHORE_DOWN => sys_semaphore_down(args[0]),
|
||||||
_ => panic!("Unsupported syscall_id: {}", syscall_id),
|
_ => panic!("Unsupported syscall_id: {}", syscall_id),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2,8 +2,8 @@ use crate::task::{
|
|||||||
suspend_current_and_run_next,
|
suspend_current_and_run_next,
|
||||||
exit_current_and_run_next,
|
exit_current_and_run_next,
|
||||||
current_task,
|
current_task,
|
||||||
|
current_process,
|
||||||
current_user_token,
|
current_user_token,
|
||||||
add_task,
|
|
||||||
};
|
};
|
||||||
use crate::timer::get_time_ms;
|
use crate::timer::get_time_ms;
|
||||||
use crate::mm::{
|
use crate::mm::{
|
||||||
@ -34,20 +34,20 @@ pub fn sys_get_time() -> isize {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn sys_getpid() -> isize {
|
pub fn sys_getpid() -> isize {
|
||||||
current_task().unwrap().pid.0 as isize
|
current_task().unwrap().process.upgrade().unwrap().getpid() as isize
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn sys_fork() -> isize {
|
pub fn sys_fork() -> isize {
|
||||||
let current_task = current_task().unwrap();
|
let current_process = current_process();
|
||||||
let new_task = current_task.fork();
|
let new_process = current_process.fork();
|
||||||
let new_pid = new_task.pid.0;
|
let new_pid = new_process.getpid();
|
||||||
// modify trap context of new_task, because it returns immediately after switching
|
// modify trap context of new_task, because it returns immediately after switching
|
||||||
let trap_cx = new_task.acquire_inner_lock().get_trap_cx();
|
let new_process_inner = new_process.inner_exclusive_access();
|
||||||
|
let task = new_process_inner.tasks[0].as_ref().unwrap();
|
||||||
|
let trap_cx = task.inner_exclusive_access().get_trap_cx();
|
||||||
// we do not have to move to next instruction since we have done it before
|
// we do not have to move to next instruction since we have done it before
|
||||||
// for child process, fork returns 0
|
// for child process, fork returns 0
|
||||||
trap_cx.x[10] = 0;
|
trap_cx.x[10] = 0;
|
||||||
// add new task to scheduler
|
|
||||||
add_task(new_task);
|
|
||||||
new_pid as isize
|
new_pid as isize
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -65,9 +65,9 @@ pub fn sys_exec(path: *const u8, mut args: *const usize) -> isize {
|
|||||||
}
|
}
|
||||||
if let Some(app_inode) = open_file(path.as_str(), OpenFlags::RDONLY) {
|
if let Some(app_inode) = open_file(path.as_str(), OpenFlags::RDONLY) {
|
||||||
let all_data = app_inode.read_all();
|
let all_data = app_inode.read_all();
|
||||||
let task = current_task().unwrap();
|
let process = current_process();
|
||||||
let argc = args_vec.len();
|
let argc = args_vec.len();
|
||||||
task.exec(all_data.as_slice(), args_vec);
|
process.exec(all_data.as_slice(), args_vec);
|
||||||
// return argc because cx.x[10] will be covered with it later
|
// return argc because cx.x[10] will be covered with it later
|
||||||
argc as isize
|
argc as isize
|
||||||
} else {
|
} else {
|
||||||
@ -78,38 +78,37 @@ pub fn sys_exec(path: *const u8, mut args: *const usize) -> isize {
|
|||||||
/// If there is not a child process whose pid is same as given, return -1.
|
/// If there is not a child process whose pid is same as given, return -1.
|
||||||
/// Else if there is a child process but it is still running, return -2.
|
/// Else if there is a child process but it is still running, return -2.
|
||||||
pub fn sys_waitpid(pid: isize, exit_code_ptr: *mut i32) -> isize {
|
pub fn sys_waitpid(pid: isize, exit_code_ptr: *mut i32) -> isize {
|
||||||
let task = current_task().unwrap();
|
let process = current_process();
|
||||||
// find a child process
|
// find a child process
|
||||||
|
|
||||||
// ---- hold current PCB lock
|
let mut inner = process.inner_exclusive_access();
|
||||||
let mut inner = task.acquire_inner_lock();
|
|
||||||
if inner.children
|
if inner.children
|
||||||
.iter()
|
.iter()
|
||||||
.find(|p| {pid == -1 || pid as usize == p.getpid()})
|
.find(|p| {pid == -1 || pid as usize == p.getpid()})
|
||||||
.is_none() {
|
.is_none() {
|
||||||
return -1;
|
return -1;
|
||||||
// ---- release current PCB lock
|
// ---- release current PCB
|
||||||
}
|
}
|
||||||
let pair = inner.children
|
let pair = inner.children
|
||||||
.iter()
|
.iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.find(|(_, p)| {
|
.find(|(_, p)| {
|
||||||
// ++++ temporarily hold child PCB lock
|
// ++++ temporarily access child PCB exclusively
|
||||||
p.acquire_inner_lock().is_zombie() && (pid == -1 || pid as usize == p.getpid())
|
p.inner_exclusive_access().is_zombie && (pid == -1 || pid as usize == p.getpid())
|
||||||
// ++++ release child PCB lock
|
// ++++ release child PCB
|
||||||
});
|
});
|
||||||
if let Some((idx, _)) = pair {
|
if let Some((idx, _)) = pair {
|
||||||
let child = inner.children.remove(idx);
|
let child = inner.children.remove(idx);
|
||||||
// confirm that child will be deallocated after being removed from children list
|
// confirm that child will be deallocated after being removed from children list
|
||||||
assert_eq!(Arc::strong_count(&child), 1);
|
assert_eq!(Arc::strong_count(&child), 1);
|
||||||
let found_pid = child.getpid();
|
let found_pid = child.getpid();
|
||||||
// ++++ temporarily hold child lock
|
// ++++ temporarily access child PCB exclusively
|
||||||
let exit_code = child.acquire_inner_lock().exit_code;
|
let exit_code = child.inner_exclusive_access().exit_code;
|
||||||
// ++++ release child PCB lock
|
// ++++ release child PCB
|
||||||
*translated_refmut(inner.memory_set.token(), exit_code_ptr) = exit_code;
|
*translated_refmut(inner.memory_set.token(), exit_code_ptr) = exit_code;
|
||||||
found_pid as isize
|
found_pid as isize
|
||||||
} else {
|
} else {
|
||||||
-2
|
-2
|
||||||
}
|
}
|
||||||
// ---- release current PCB lock automatically
|
// ---- release current PCB automatically
|
||||||
}
|
}
|
||||||
|
89
os/src/syscall/sync.rs
Normal file
89
os/src/syscall/sync.rs
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
use crate::task::{current_task, current_process, block_current_and_run_next};
|
||||||
|
use crate::sync::{MutexSpin, MutexBlocking, Semaphore};
|
||||||
|
use crate::timer::{get_time_ms, add_timer};
|
||||||
|
use alloc::sync::Arc;
|
||||||
|
|
||||||
|
pub fn sys_sleep(ms: usize) -> isize {
|
||||||
|
let expire_ms = get_time_ms() + ms;
|
||||||
|
let task = current_task().unwrap();
|
||||||
|
add_timer(expire_ms, task);
|
||||||
|
block_current_and_run_next();
|
||||||
|
0
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn sys_mutex_create(blocking: bool) -> isize {
|
||||||
|
let process = current_process();
|
||||||
|
let mut process_inner = process.inner_exclusive_access();
|
||||||
|
if let Some(id) = process_inner
|
||||||
|
.mutex_list
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.find(|(_, item)| item.is_none())
|
||||||
|
.map(|(id, _)| id) {
|
||||||
|
process_inner.mutex_list[id] = if !blocking {
|
||||||
|
Some(Arc::new(MutexSpin::new()))
|
||||||
|
} else {
|
||||||
|
Some(Arc::new(MutexBlocking::new()))
|
||||||
|
};
|
||||||
|
id as isize
|
||||||
|
} else {
|
||||||
|
process_inner.mutex_list.push(Some(Arc::new(MutexSpin::new())));
|
||||||
|
process_inner.mutex_list.len() as isize - 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn sys_mutex_lock(mutex_id: usize) -> isize {
|
||||||
|
let process = current_process();
|
||||||
|
let process_inner = process.inner_exclusive_access();
|
||||||
|
let mutex = Arc::clone(process_inner.mutex_list[mutex_id].as_ref().unwrap());
|
||||||
|
drop(process_inner);
|
||||||
|
drop(process);
|
||||||
|
mutex.lock();
|
||||||
|
0
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn sys_mutex_unlock(mutex_id: usize) -> isize {
|
||||||
|
let process = current_process();
|
||||||
|
let process_inner = process.inner_exclusive_access();
|
||||||
|
let mutex = Arc::clone(process_inner.mutex_list[mutex_id].as_ref().unwrap());
|
||||||
|
drop(process_inner);
|
||||||
|
drop(process);
|
||||||
|
mutex.unlock();
|
||||||
|
0
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn sys_semaphore_creare(res_count: usize) -> isize {
|
||||||
|
let process = current_process();
|
||||||
|
let mut process_inner = process.inner_exclusive_access();
|
||||||
|
let id = if let Some(id) = process_inner
|
||||||
|
.semaphore_list
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.find(|(_, item)| item.is_none())
|
||||||
|
.map(|(id, _)| id) {
|
||||||
|
process_inner.semaphore_list[id] = Some(Arc::new(Semaphore::new(res_count)));
|
||||||
|
id
|
||||||
|
} else {
|
||||||
|
process_inner.semaphore_list.push(Some(Arc::new(Semaphore::new(res_count))));
|
||||||
|
process_inner.semaphore_list.len() - 1
|
||||||
|
};
|
||||||
|
id as isize
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn sys_semaphore_up(sem_id: usize) -> isize {
|
||||||
|
let process = current_process();
|
||||||
|
let process_inner = process.inner_exclusive_access();
|
||||||
|
let sem = Arc::clone(process_inner.semaphore_list[sem_id].as_ref().unwrap());
|
||||||
|
drop(process_inner);
|
||||||
|
sem.up();
|
||||||
|
0
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn sys_semaphore_down(sem_id: usize) -> isize {
|
||||||
|
let process = current_process();
|
||||||
|
let process_inner = process.inner_exclusive_access();
|
||||||
|
let sem = Arc::clone(process_inner.semaphore_list[sem_id].as_ref().unwrap());
|
||||||
|
drop(process_inner);
|
||||||
|
sem.down();
|
||||||
|
0
|
||||||
|
}
|
71
os/src/syscall/thread.rs
Normal file
71
os/src/syscall/thread.rs
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
use alloc::sync::Arc;
|
||||||
|
use crate::{mm::kernel_token, task::{TaskControlBlock, add_task, current_task}, trap::{TrapContext, trap_handler}};
|
||||||
|
|
||||||
|
pub fn sys_thread_create(entry: usize, arg: usize) -> isize {
|
||||||
|
let task = current_task().unwrap();
|
||||||
|
let process = task.process.upgrade().unwrap();
|
||||||
|
// create a new thread
|
||||||
|
let new_task = Arc::new(TaskControlBlock::new(
|
||||||
|
Arc::clone(&process),
|
||||||
|
task.inner_exclusive_access().res.as_ref().unwrap().ustack_base,
|
||||||
|
true,
|
||||||
|
));
|
||||||
|
// add new task to scheduler
|
||||||
|
add_task(Arc::clone(&new_task));
|
||||||
|
let new_task_inner = new_task.inner_exclusive_access();
|
||||||
|
let new_task_res = new_task_inner.res.as_ref().unwrap();
|
||||||
|
let new_task_tid = new_task_res.tid;
|
||||||
|
let mut process_inner = process.inner_exclusive_access();
|
||||||
|
// add new thread to current process
|
||||||
|
let tasks = &mut process_inner.tasks;
|
||||||
|
while tasks.len() < new_task_tid + 1 {
|
||||||
|
tasks.push(None);
|
||||||
|
}
|
||||||
|
tasks[new_task_tid] = Some(Arc::clone(&new_task));
|
||||||
|
let new_task_trap_cx = new_task_inner.get_trap_cx();
|
||||||
|
*new_task_trap_cx = TrapContext::app_init_context(
|
||||||
|
entry,
|
||||||
|
new_task_res.ustack_top(),
|
||||||
|
kernel_token(),
|
||||||
|
new_task.kstack.get_top(),
|
||||||
|
trap_handler as usize,
|
||||||
|
);
|
||||||
|
(*new_task_trap_cx).x[10] = arg;
|
||||||
|
new_task_tid as isize
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn sys_gettid() -> isize {
|
||||||
|
current_task().unwrap().inner_exclusive_access().res.as_ref().unwrap().tid as isize
|
||||||
|
}
|
||||||
|
|
||||||
|
/// thread does not exist, return -1
|
||||||
|
/// thread has not exited yet, return -2
|
||||||
|
/// otherwise, return thread's exit code
|
||||||
|
pub fn sys_waittid(tid: usize) -> i32 {
|
||||||
|
let task = current_task().unwrap();
|
||||||
|
let process = task.process.upgrade().unwrap();
|
||||||
|
let task_inner = task.inner_exclusive_access();
|
||||||
|
let mut process_inner = process.inner_exclusive_access();
|
||||||
|
// a thread cannot wait for itself
|
||||||
|
if task_inner.res.as_ref().unwrap().tid == tid {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
let mut exit_code: Option<i32> = None;
|
||||||
|
let waited_task = process_inner.tasks[tid].as_ref();
|
||||||
|
if let Some(waited_task) = waited_task {
|
||||||
|
if let Some(waited_exit_code) = waited_task.inner_exclusive_access().exit_code {
|
||||||
|
exit_code = Some(waited_exit_code);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// waited thread does not exist
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
if let Some(exit_code) = exit_code {
|
||||||
|
// dealloc the exited thread
|
||||||
|
process_inner.tasks[tid] = None;
|
||||||
|
exit_code
|
||||||
|
} else {
|
||||||
|
// waited thread has not exited
|
||||||
|
-2
|
||||||
|
}
|
||||||
|
}
|
@ -3,13 +3,22 @@ use crate::trap::trap_return;
|
|||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
pub struct TaskContext {
|
pub struct TaskContext {
|
||||||
ra: usize,
|
ra: usize,
|
||||||
|
sp: usize,
|
||||||
s: [usize; 12],
|
s: [usize; 12],
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TaskContext {
|
impl TaskContext {
|
||||||
pub fn goto_trap_return() -> Self {
|
pub fn zero_init() -> Self {
|
||||||
|
Self {
|
||||||
|
ra: 0,
|
||||||
|
sp: 0,
|
||||||
|
s: [0; 12],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn goto_trap_return(kstack_ptr: usize) -> Self {
|
||||||
Self {
|
Self {
|
||||||
ra: trap_return as usize,
|
ra: trap_return as usize,
|
||||||
|
sp: kstack_ptr,
|
||||||
s: [0; 12],
|
s: [0; 12],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
215
os/src/task/id.rs
Normal file
215
os/src/task/id.rs
Normal file
@ -0,0 +1,215 @@
|
|||||||
|
use alloc::{vec::Vec, sync::{Arc, Weak}};
|
||||||
|
use lazy_static::*;
|
||||||
|
use crate::sync::UPSafeCell;
|
||||||
|
use crate::mm::{KERNEL_SPACE, MapPermission, PhysPageNum, VirtAddr};
|
||||||
|
use crate::config::{KERNEL_STACK_SIZE, PAGE_SIZE, TRAMPOLINE, TRAP_CONTEXT_BASE, USER_STACK_SIZE};
|
||||||
|
use super::ProcessControlBlock;
|
||||||
|
|
||||||
|
pub struct RecycleAllocator {
|
||||||
|
current: usize,
|
||||||
|
recycled: Vec<usize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RecycleAllocator {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
RecycleAllocator {
|
||||||
|
current: 0,
|
||||||
|
recycled: Vec::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn alloc(&mut self) -> usize {
|
||||||
|
if let Some(id) = self.recycled.pop() {
|
||||||
|
id
|
||||||
|
} else {
|
||||||
|
self.current += 1;
|
||||||
|
self.current - 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn dealloc(&mut self, id: usize) {
|
||||||
|
assert!(id < self.current);
|
||||||
|
assert!(
|
||||||
|
self.recycled.iter().find(|i| **i == id).is_none(),
|
||||||
|
"id {} has been deallocated!", id
|
||||||
|
);
|
||||||
|
self.recycled.push(id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
static ref PID_ALLOCATOR: UPSafeCell<RecycleAllocator> = unsafe {
|
||||||
|
UPSafeCell::new(RecycleAllocator::new())
|
||||||
|
};
|
||||||
|
|
||||||
|
static ref KSTACK_ALLOCATOR: UPSafeCell<RecycleAllocator> = unsafe {
|
||||||
|
UPSafeCell::new(RecycleAllocator::new())
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct PidHandle(pub usize);
|
||||||
|
|
||||||
|
pub fn pid_alloc() -> PidHandle {
|
||||||
|
PidHandle(PID_ALLOCATOR.exclusive_access().alloc())
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for PidHandle {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
PID_ALLOCATOR.exclusive_access().dealloc(self.0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return (bottom, top) of a kernel stack in kernel space.
|
||||||
|
pub fn kernel_stack_position(kstack_id: usize) -> (usize, usize) {
|
||||||
|
let top = TRAMPOLINE - kstack_id * (KERNEL_STACK_SIZE + PAGE_SIZE);
|
||||||
|
let bottom = top - KERNEL_STACK_SIZE;
|
||||||
|
(bottom, top)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct KernelStack(pub usize);
|
||||||
|
|
||||||
|
pub fn kstack_alloc() -> KernelStack {
|
||||||
|
let kstack_id = KSTACK_ALLOCATOR.exclusive_access().alloc();
|
||||||
|
let (kstack_bottom, kstack_top) = kernel_stack_position(kstack_id);
|
||||||
|
KERNEL_SPACE
|
||||||
|
.exclusive_access()
|
||||||
|
.insert_framed_area(
|
||||||
|
kstack_bottom.into(),
|
||||||
|
kstack_top.into(),
|
||||||
|
MapPermission::R | MapPermission::W,
|
||||||
|
);
|
||||||
|
KernelStack(kstack_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for KernelStack {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
let (kernel_stack_bottom, _) = kernel_stack_position(self.0);
|
||||||
|
let kernel_stack_bottom_va: VirtAddr = kernel_stack_bottom.into();
|
||||||
|
KERNEL_SPACE
|
||||||
|
.exclusive_access()
|
||||||
|
.remove_area_with_start_vpn(kernel_stack_bottom_va.into());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl KernelStack {
|
||||||
|
#[allow(unused)]
|
||||||
|
pub fn push_on_top<T>(&self, value: T) -> *mut T where
|
||||||
|
T: Sized, {
|
||||||
|
let kernel_stack_top = self.get_top();
|
||||||
|
let ptr_mut = (kernel_stack_top - core::mem::size_of::<T>()) as *mut T;
|
||||||
|
unsafe { *ptr_mut = value; }
|
||||||
|
ptr_mut
|
||||||
|
}
|
||||||
|
pub fn get_top(&self) -> usize {
|
||||||
|
let (_, kernel_stack_top) = kernel_stack_position(self.0);
|
||||||
|
kernel_stack_top
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct TaskUserRes {
|
||||||
|
pub tid: usize,
|
||||||
|
pub ustack_base: usize,
|
||||||
|
pub process: Weak<ProcessControlBlock>,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn trap_cx_bottom_from_tid(tid: usize) -> usize {
|
||||||
|
TRAP_CONTEXT_BASE - tid * PAGE_SIZE
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ustack_bottom_from_tid(ustack_base: usize, tid: usize) -> usize {
|
||||||
|
ustack_base + tid * (PAGE_SIZE + USER_STACK_SIZE)
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TaskUserRes {
|
||||||
|
pub fn new(
|
||||||
|
process: Arc<ProcessControlBlock>,
|
||||||
|
ustack_base: usize,
|
||||||
|
alloc_user_res: bool,
|
||||||
|
) -> Self {
|
||||||
|
let tid = process.inner_exclusive_access().alloc_tid();
|
||||||
|
let task_user_res = Self {
|
||||||
|
tid,
|
||||||
|
ustack_base,
|
||||||
|
process: Arc::downgrade(&process),
|
||||||
|
};
|
||||||
|
if alloc_user_res {
|
||||||
|
task_user_res.alloc_user_res();
|
||||||
|
}
|
||||||
|
task_user_res
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_user_res(&self) {
|
||||||
|
let process = self.process.upgrade().unwrap();
|
||||||
|
let mut process_inner = process.inner_exclusive_access();
|
||||||
|
// alloc user stack
|
||||||
|
let ustack_bottom = ustack_bottom_from_tid(self.ustack_base, self.tid);
|
||||||
|
let ustack_top = ustack_bottom + USER_STACK_SIZE;
|
||||||
|
process_inner
|
||||||
|
.memory_set
|
||||||
|
.insert_framed_area(
|
||||||
|
ustack_bottom.into(),
|
||||||
|
ustack_top.into(),
|
||||||
|
MapPermission::R | MapPermission::W | MapPermission::U,
|
||||||
|
);
|
||||||
|
// alloc trap_cx
|
||||||
|
let trap_cx_bottom = trap_cx_bottom_from_tid(self.tid);
|
||||||
|
let trap_cx_top = trap_cx_bottom + PAGE_SIZE;
|
||||||
|
process_inner
|
||||||
|
.memory_set
|
||||||
|
.insert_framed_area(
|
||||||
|
trap_cx_bottom.into(),
|
||||||
|
trap_cx_top.into(),
|
||||||
|
MapPermission::R | MapPermission::W,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn dealloc_user_res(&self) {
|
||||||
|
// dealloc tid
|
||||||
|
let process = self.process.upgrade().unwrap();
|
||||||
|
let mut process_inner = process.inner_exclusive_access();
|
||||||
|
// dealloc ustack manually
|
||||||
|
let ustack_bottom_va: VirtAddr = ustack_bottom_from_tid(self.ustack_base, self.tid).into();
|
||||||
|
process_inner.memory_set.remove_area_with_start_vpn(ustack_bottom_va.into());
|
||||||
|
// dealloc trap_cx manually
|
||||||
|
let trap_cx_bottom_va: VirtAddr = trap_cx_bottom_from_tid(self.tid).into();
|
||||||
|
process_inner.memory_set.remove_area_with_start_vpn(trap_cx_bottom_va.into());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(unused)]
|
||||||
|
pub fn alloc_tid(&mut self) {
|
||||||
|
self.tid = self
|
||||||
|
.process
|
||||||
|
.upgrade()
|
||||||
|
.unwrap()
|
||||||
|
.inner_exclusive_access()
|
||||||
|
.alloc_tid();
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn dealloc_tid(&self) {
|
||||||
|
let process = self.process.upgrade().unwrap();
|
||||||
|
let mut process_inner = process.inner_exclusive_access();
|
||||||
|
process_inner.dealloc_tid(self.tid);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn trap_cx_user_va(&self) -> usize {
|
||||||
|
trap_cx_bottom_from_tid(self.tid)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn trap_cx_ppn(&self) -> PhysPageNum {
|
||||||
|
let process = self.process.upgrade().unwrap();
|
||||||
|
let process_inner = process.inner_exclusive_access();
|
||||||
|
let trap_cx_bottom_va: VirtAddr = trap_cx_bottom_from_tid(self.tid).into();
|
||||||
|
process_inner.memory_set.translate(trap_cx_bottom_va.into()).unwrap().ppn()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ustack_base(&self) -> usize { self.ustack_base }
|
||||||
|
pub fn ustack_top(&self) -> usize {
|
||||||
|
ustack_bottom_from_tid(self.ustack_base, self.tid) + USER_STACK_SIZE
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for TaskUserRes {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
self.dealloc_tid();
|
||||||
|
self.dealloc_user_res();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -1,7 +1,7 @@
|
|||||||
|
use crate::sync::UPSafeCell;
|
||||||
use super::TaskControlBlock;
|
use super::TaskControlBlock;
|
||||||
use alloc::collections::VecDeque;
|
use alloc::collections::VecDeque;
|
||||||
use alloc::sync::Arc;
|
use alloc::sync::Arc;
|
||||||
use spin::Mutex;
|
|
||||||
use lazy_static::*;
|
use lazy_static::*;
|
||||||
|
|
||||||
pub struct TaskManager {
|
pub struct TaskManager {
|
||||||
@ -22,13 +22,15 @@ impl TaskManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
pub static ref TASK_MANAGER: Mutex<TaskManager> = Mutex::new(TaskManager::new());
|
pub static ref TASK_MANAGER: UPSafeCell<TaskManager> = unsafe {
|
||||||
|
UPSafeCell::new(TaskManager::new())
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add_task(task: Arc<TaskControlBlock>) {
|
pub fn add_task(task: Arc<TaskControlBlock>) {
|
||||||
TASK_MANAGER.lock().add(task);
|
TASK_MANAGER.exclusive_access().add(task);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn fetch_task() -> Option<Arc<TaskControlBlock>> {
|
pub fn fetch_task() -> Option<Arc<TaskControlBlock>> {
|
||||||
TASK_MANAGER.lock().fetch()
|
TASK_MANAGER.exclusive_access().fetch()
|
||||||
}
|
}
|
||||||
|
@ -3,86 +3,121 @@ mod switch;
|
|||||||
mod task;
|
mod task;
|
||||||
mod manager;
|
mod manager;
|
||||||
mod processor;
|
mod processor;
|
||||||
mod pid;
|
mod id;
|
||||||
|
mod process;
|
||||||
|
|
||||||
use crate::fs::{open_file, OpenFlags};
|
use crate::fs::{open_file, OpenFlags};
|
||||||
use switch::__switch;
|
use switch::__switch;
|
||||||
use task::{TaskControlBlock, TaskStatus};
|
|
||||||
use alloc::sync::Arc;
|
use alloc::sync::Arc;
|
||||||
use manager::fetch_task;
|
use manager::fetch_task;
|
||||||
use lazy_static::*;
|
use lazy_static::*;
|
||||||
pub use context::TaskContext;
|
use process::ProcessControlBlock;
|
||||||
|
|
||||||
|
pub use context::TaskContext;
|
||||||
pub use processor::{
|
pub use processor::{
|
||||||
run_tasks,
|
run_tasks,
|
||||||
current_task,
|
current_task,
|
||||||
|
current_process,
|
||||||
current_user_token,
|
current_user_token,
|
||||||
|
current_trap_cx_user_va,
|
||||||
current_trap_cx,
|
current_trap_cx,
|
||||||
|
current_kstack_top,
|
||||||
take_current_task,
|
take_current_task,
|
||||||
schedule,
|
schedule,
|
||||||
};
|
};
|
||||||
|
pub use task::{TaskControlBlock, TaskStatus};
|
||||||
pub use manager::add_task;
|
pub use manager::add_task;
|
||||||
pub use pid::{PidHandle, pid_alloc, KernelStack};
|
pub use id::{
|
||||||
|
PidHandle,
|
||||||
|
pid_alloc,
|
||||||
|
KernelStack,
|
||||||
|
kstack_alloc,
|
||||||
|
};
|
||||||
|
|
||||||
pub fn suspend_current_and_run_next() {
|
pub fn suspend_current_and_run_next() {
|
||||||
// There must be an application running.
|
// There must be an application running.
|
||||||
let task = take_current_task().unwrap();
|
let task = take_current_task().unwrap();
|
||||||
|
|
||||||
// ---- hold current PCB lock
|
// ---- access current TCB exclusively
|
||||||
let mut task_inner = task.acquire_inner_lock();
|
let mut task_inner = task.inner_exclusive_access();
|
||||||
let task_cx_ptr2 = task_inner.get_task_cx_ptr2();
|
let task_cx_ptr = &mut task_inner.task_cx as *mut TaskContext;
|
||||||
// Change status to Ready
|
// Change status to Ready
|
||||||
task_inner.task_status = TaskStatus::Ready;
|
task_inner.task_status = TaskStatus::Ready;
|
||||||
drop(task_inner);
|
drop(task_inner);
|
||||||
// ---- release current PCB lock
|
// ---- release current TCB
|
||||||
|
|
||||||
// push back to ready queue.
|
// push back to ready queue.
|
||||||
add_task(task);
|
add_task(task);
|
||||||
// jump to scheduling cycle
|
// jump to scheduling cycle
|
||||||
schedule(task_cx_ptr2);
|
schedule(task_cx_ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn block_current_and_run_next() {
|
||||||
|
let task = take_current_task().unwrap();
|
||||||
|
let mut task_inner = task.inner_exclusive_access();
|
||||||
|
let task_cx_ptr = &mut task_inner.task_cx as *mut TaskContext;
|
||||||
|
task_inner.task_status = TaskStatus::Blocking;
|
||||||
|
drop(task_inner);
|
||||||
|
schedule(task_cx_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn exit_current_and_run_next(exit_code: i32) {
|
pub fn exit_current_and_run_next(exit_code: i32) {
|
||||||
// take from Processor
|
|
||||||
let task = take_current_task().unwrap();
|
let task = take_current_task().unwrap();
|
||||||
// **** hold current PCB lock
|
let mut task_inner = task.inner_exclusive_access();
|
||||||
let mut inner = task.acquire_inner_lock();
|
let process = task.process.upgrade().unwrap();
|
||||||
// Change status to Zombie
|
let tid = task_inner.res.as_ref().unwrap().tid;
|
||||||
inner.task_status = TaskStatus::Zombie;
|
// record exit code
|
||||||
// Record exit code
|
task_inner.exit_code = Some(exit_code);
|
||||||
inner.exit_code = exit_code;
|
task_inner.res = None;
|
||||||
// do not move to its parent but under initproc
|
// here we do not remove the thread since we are still using the kstack
|
||||||
|
// it will be deallocated when sys_waittid is called
|
||||||
// ++++++ hold initproc PCB lock here
|
drop(task_inner);
|
||||||
{
|
|
||||||
let mut initproc_inner = INITPROC.acquire_inner_lock();
|
|
||||||
for child in inner.children.iter() {
|
|
||||||
child.acquire_inner_lock().parent = Some(Arc::downgrade(&INITPROC));
|
|
||||||
initproc_inner.children.push(child.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// ++++++ release parent PCB lock here
|
|
||||||
|
|
||||||
inner.children.clear();
|
|
||||||
// deallocate user space
|
|
||||||
inner.memory_set.recycle_data_pages();
|
|
||||||
drop(inner);
|
|
||||||
// **** release current PCB lock
|
|
||||||
// drop task manually to maintain rc correctly
|
|
||||||
drop(task);
|
drop(task);
|
||||||
|
// however, if this is the main thread of current process
|
||||||
|
// the process should terminate at once
|
||||||
|
if tid == 0 {
|
||||||
|
let mut process_inner = process.inner_exclusive_access();
|
||||||
|
// mark this process as a zombie process
|
||||||
|
process_inner.is_zombie = true;
|
||||||
|
// record exit code of main process
|
||||||
|
process_inner.exit_code = exit_code;
|
||||||
|
|
||||||
|
{
|
||||||
|
// move all child processes under init process
|
||||||
|
let mut initproc_inner = INITPROC.inner_exclusive_access();
|
||||||
|
for child in process_inner.children.iter() {
|
||||||
|
child.inner_exclusive_access().parent = Some(Arc::downgrade(&INITPROC));
|
||||||
|
initproc_inner.children.push(child.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// deallocate user res (including tid/trap_cx/ustack) of all threads
|
||||||
|
// it has to be done before we dealloc the whole memory_set
|
||||||
|
// otherwise they will be deallocated twice
|
||||||
|
for task in process_inner.tasks.iter().filter(|t| t.is_some()) {
|
||||||
|
let task = task.as_ref().unwrap();
|
||||||
|
let mut task_inner = task.inner_exclusive_access();
|
||||||
|
task_inner.res = None;
|
||||||
|
}
|
||||||
|
|
||||||
|
process_inner.children.clear();
|
||||||
|
// deallocate other data in user space i.e. program code/data section
|
||||||
|
process_inner.memory_set.recycle_data_pages();
|
||||||
|
}
|
||||||
|
drop(process);
|
||||||
// we do not have to save task context
|
// we do not have to save task context
|
||||||
let _unused: usize = 0;
|
let mut _unused = TaskContext::zero_init();
|
||||||
schedule(&_unused as *const _);
|
schedule(&mut _unused as *mut _);
|
||||||
}
|
}
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
pub static ref INITPROC: Arc<TaskControlBlock> = Arc::new({
|
pub static ref INITPROC: Arc<ProcessControlBlock> = {
|
||||||
let inode = open_file("initproc", OpenFlags::RDONLY).unwrap();
|
let inode = open_file("initproc", OpenFlags::RDONLY).unwrap();
|
||||||
let v = inode.read_all();
|
let v = inode.read_all();
|
||||||
TaskControlBlock::new(v.as_slice())
|
ProcessControlBlock::new(v.as_slice())
|
||||||
});
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add_initproc() {
|
pub fn add_initproc() {
|
||||||
add_task(INITPROC.clone());
|
let _initproc = INITPROC.clone();
|
||||||
}
|
}
|
||||||
|
@ -1,105 +0,0 @@
|
|||||||
use alloc::vec::Vec;
|
|
||||||
use lazy_static::*;
|
|
||||||
use spin::Mutex;
|
|
||||||
use crate::mm::{KERNEL_SPACE, MapPermission, VirtAddr};
|
|
||||||
use crate::config::{
|
|
||||||
PAGE_SIZE,
|
|
||||||
TRAMPOLINE,
|
|
||||||
KERNEL_STACK_SIZE,
|
|
||||||
};
|
|
||||||
|
|
||||||
struct PidAllocator {
|
|
||||||
current: usize,
|
|
||||||
recycled: Vec<usize>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PidAllocator {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
PidAllocator {
|
|
||||||
current: 0,
|
|
||||||
recycled: Vec::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub fn alloc(&mut self) -> PidHandle {
|
|
||||||
if let Some(pid) = self.recycled.pop() {
|
|
||||||
PidHandle(pid)
|
|
||||||
} else {
|
|
||||||
self.current += 1;
|
|
||||||
PidHandle(self.current - 1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub fn dealloc(&mut self, pid: usize) {
|
|
||||||
assert!(pid < self.current);
|
|
||||||
assert!(
|
|
||||||
self.recycled.iter().find(|ppid| **ppid == pid).is_none(),
|
|
||||||
"pid {} has been deallocated!", pid
|
|
||||||
);
|
|
||||||
self.recycled.push(pid);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
lazy_static! {
|
|
||||||
static ref PID_ALLOCATOR : Mutex<PidAllocator> = Mutex::new(PidAllocator::new());
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct PidHandle(pub usize);
|
|
||||||
|
|
||||||
impl Drop for PidHandle {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
//println!("drop pid {}", self.0);
|
|
||||||
PID_ALLOCATOR.lock().dealloc(self.0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn pid_alloc() -> PidHandle {
|
|
||||||
PID_ALLOCATOR.lock().alloc()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return (bottom, top) of a kernel stack in kernel space.
|
|
||||||
pub fn kernel_stack_position(app_id: usize) -> (usize, usize) {
|
|
||||||
let top = TRAMPOLINE - app_id * (KERNEL_STACK_SIZE + PAGE_SIZE);
|
|
||||||
let bottom = top - KERNEL_STACK_SIZE;
|
|
||||||
(bottom, top)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct KernelStack {
|
|
||||||
pid: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl KernelStack {
|
|
||||||
pub fn new(pid_handle: &PidHandle) -> Self {
|
|
||||||
let pid = pid_handle.0;
|
|
||||||
let (kernel_stack_bottom, kernel_stack_top) = kernel_stack_position(pid);
|
|
||||||
KERNEL_SPACE
|
|
||||||
.lock()
|
|
||||||
.insert_framed_area(
|
|
||||||
kernel_stack_bottom.into(),
|
|
||||||
kernel_stack_top.into(),
|
|
||||||
MapPermission::R | MapPermission::W,
|
|
||||||
);
|
|
||||||
KernelStack {
|
|
||||||
pid: pid_handle.0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub fn push_on_top<T>(&self, value: T) -> *mut T where
|
|
||||||
T: Sized, {
|
|
||||||
let kernel_stack_top = self.get_top();
|
|
||||||
let ptr_mut = (kernel_stack_top - core::mem::size_of::<T>()) as *mut T;
|
|
||||||
unsafe { *ptr_mut = value; }
|
|
||||||
ptr_mut
|
|
||||||
}
|
|
||||||
pub fn get_top(&self) -> usize {
|
|
||||||
let (_, kernel_stack_top) = kernel_stack_position(self.pid);
|
|
||||||
kernel_stack_top
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for KernelStack {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
let (kernel_stack_bottom, _) = kernel_stack_position(self.pid);
|
|
||||||
let kernel_stack_bottom_va: VirtAddr = kernel_stack_bottom.into();
|
|
||||||
KERNEL_SPACE
|
|
||||||
.lock()
|
|
||||||
.remove_area_with_start_vpn(kernel_stack_bottom_va.into());
|
|
||||||
}
|
|
||||||
}
|
|
246
os/src/task/process.rs
Normal file
246
os/src/task/process.rs
Normal file
@ -0,0 +1,246 @@
|
|||||||
|
use crate::mm::{
|
||||||
|
MemorySet,
|
||||||
|
KERNEL_SPACE,
|
||||||
|
translated_refmut,
|
||||||
|
};
|
||||||
|
use crate::trap::{TrapContext, trap_handler};
|
||||||
|
use crate::sync::{UPSafeCell, Mutex, Semaphore};
|
||||||
|
use core::cell::RefMut;
|
||||||
|
use super::id::RecycleAllocator;
|
||||||
|
use super::TaskControlBlock;
|
||||||
|
use super::{PidHandle, pid_alloc};
|
||||||
|
use super::add_task;
|
||||||
|
use alloc::sync::{Weak, Arc};
|
||||||
|
use alloc::vec;
|
||||||
|
use alloc::vec::Vec;
|
||||||
|
use alloc::string::String;
|
||||||
|
use crate::fs::{File, Stdin, Stdout};
|
||||||
|
|
||||||
|
pub struct ProcessControlBlock {
|
||||||
|
// immutable
|
||||||
|
pub pid: PidHandle,
|
||||||
|
// mutable
|
||||||
|
inner: UPSafeCell<ProcessControlBlockInner>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct ProcessControlBlockInner {
|
||||||
|
pub is_zombie: bool,
|
||||||
|
pub memory_set: MemorySet,
|
||||||
|
pub parent: Option<Weak<ProcessControlBlock>>,
|
||||||
|
pub children: Vec<Arc<ProcessControlBlock>>,
|
||||||
|
pub exit_code: i32,
|
||||||
|
pub fd_table: Vec<Option<Arc<dyn File + Send + Sync>>>,
|
||||||
|
pub tasks: Vec<Option<Arc<TaskControlBlock>>>,
|
||||||
|
pub task_res_allocator: RecycleAllocator,
|
||||||
|
pub mutex_list: Vec<Option<Arc<dyn Mutex>>>,
|
||||||
|
pub semaphore_list: Vec<Option<Arc<Semaphore>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ProcessControlBlockInner {
|
||||||
|
#[allow(unused)]
|
||||||
|
pub fn get_user_token(&self) -> usize {
|
||||||
|
self.memory_set.token()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_fd(&mut self) -> usize {
|
||||||
|
if let Some(fd) = (0..self.fd_table.len())
|
||||||
|
.find(|fd| self.fd_table[*fd].is_none()) {
|
||||||
|
fd
|
||||||
|
} else {
|
||||||
|
self.fd_table.push(None);
|
||||||
|
self.fd_table.len() - 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc_tid(&mut self) -> usize {
|
||||||
|
self.task_res_allocator.alloc()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn dealloc_tid(&mut self, tid: usize){
|
||||||
|
self.task_res_allocator.dealloc(tid)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn thread_count(&self) -> usize {
|
||||||
|
self.tasks.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_task(&self, tid: usize) -> Arc<TaskControlBlock> {
|
||||||
|
self.tasks[tid].as_ref().unwrap().clone()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ProcessControlBlock {
|
||||||
|
pub fn inner_exclusive_access(&self) -> RefMut<'_, ProcessControlBlockInner> {
|
||||||
|
self.inner.exclusive_access()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new(elf_data: &[u8]) -> Arc<Self> {
|
||||||
|
// memory_set with elf program headers/trampoline/trap context/user stack
|
||||||
|
let (memory_set, ustack_base, entry_point) = MemorySet::from_elf(elf_data);
|
||||||
|
// allocate a pid
|
||||||
|
let pid_handle = pid_alloc();
|
||||||
|
let process = Arc::new(Self {
|
||||||
|
pid: pid_handle,
|
||||||
|
inner: unsafe { UPSafeCell::new(ProcessControlBlockInner {
|
||||||
|
is_zombie: false,
|
||||||
|
memory_set,
|
||||||
|
parent: None,
|
||||||
|
children: Vec::new(),
|
||||||
|
exit_code: 0,
|
||||||
|
fd_table: vec![
|
||||||
|
// 0 -> stdin
|
||||||
|
Some(Arc::new(Stdin)),
|
||||||
|
// 1 -> stdout
|
||||||
|
Some(Arc::new(Stdout)),
|
||||||
|
// 2 -> stderr
|
||||||
|
Some(Arc::new(Stdout)),
|
||||||
|
],
|
||||||
|
tasks: Vec::new(),
|
||||||
|
task_res_allocator: RecycleAllocator::new(),
|
||||||
|
mutex_list: Vec::new(),
|
||||||
|
semaphore_list: Vec::new(),
|
||||||
|
})}
|
||||||
|
});
|
||||||
|
// create a main thread, we should allocate ustack and trap_cx here
|
||||||
|
let task = Arc::new(TaskControlBlock::new(
|
||||||
|
Arc::clone(&process),
|
||||||
|
ustack_base,
|
||||||
|
true,
|
||||||
|
));
|
||||||
|
// prepare trap_cx of main thread
|
||||||
|
let task_inner = task.inner_exclusive_access();
|
||||||
|
let trap_cx = task_inner.get_trap_cx();
|
||||||
|
let ustack_top = task_inner.res.as_ref().unwrap().ustack_top();
|
||||||
|
let kstack_top = task.kstack.get_top();
|
||||||
|
drop(task_inner);
|
||||||
|
*trap_cx = TrapContext::app_init_context(
|
||||||
|
entry_point,
|
||||||
|
ustack_top,
|
||||||
|
KERNEL_SPACE.exclusive_access().token(),
|
||||||
|
kstack_top,
|
||||||
|
trap_handler as usize,
|
||||||
|
);
|
||||||
|
// add main thread to the process
|
||||||
|
let mut process_inner = process.inner_exclusive_access();
|
||||||
|
process_inner.tasks.push(Some(Arc::clone(&task)));
|
||||||
|
drop(process_inner);
|
||||||
|
// add main thread to scheduler
|
||||||
|
add_task(task);
|
||||||
|
process
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Only support processes with a single thread.
|
||||||
|
pub fn exec(self: &Arc<Self>, elf_data: &[u8], args: Vec<String>) {
|
||||||
|
assert_eq!(self.inner_exclusive_access().thread_count(), 1);
|
||||||
|
// memory_set with elf program headers/trampoline/trap context/user stack
|
||||||
|
let (memory_set, ustack_base, entry_point) = MemorySet::from_elf(elf_data);
|
||||||
|
let new_token = memory_set.token();
|
||||||
|
// substitute memory_set
|
||||||
|
self.inner_exclusive_access().memory_set = memory_set;
|
||||||
|
// then we alloc user resource for main thread again
|
||||||
|
// since memory_set has been changed
|
||||||
|
let task = self.inner_exclusive_access().get_task(0);
|
||||||
|
let mut task_inner = task.inner_exclusive_access();
|
||||||
|
task_inner.res.as_mut().unwrap().ustack_base = ustack_base;
|
||||||
|
task_inner.res.as_mut().unwrap().alloc_user_res();
|
||||||
|
task_inner.trap_cx_ppn = task_inner.res.as_mut().unwrap().trap_cx_ppn();
|
||||||
|
// push arguments on user stack
|
||||||
|
let mut user_sp = task_inner.res.as_mut().unwrap().ustack_top();
|
||||||
|
user_sp -= (args.len() + 1) * core::mem::size_of::<usize>();
|
||||||
|
let argv_base = user_sp;
|
||||||
|
let mut argv: Vec<_> = (0..=args.len())
|
||||||
|
.map(|arg| {
|
||||||
|
translated_refmut(
|
||||||
|
new_token,
|
||||||
|
(argv_base + arg * core::mem::size_of::<usize>()) as *mut usize
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
*argv[args.len()] = 0;
|
||||||
|
for i in 0..args.len() {
|
||||||
|
user_sp -= args[i].len() + 1;
|
||||||
|
*argv[i] = user_sp;
|
||||||
|
let mut p = user_sp;
|
||||||
|
for c in args[i].as_bytes() {
|
||||||
|
*translated_refmut(new_token, p as *mut u8) = *c;
|
||||||
|
p += 1;
|
||||||
|
}
|
||||||
|
*translated_refmut(new_token, p as *mut u8) = 0;
|
||||||
|
}
|
||||||
|
// make the user_sp aligned to 8B for k210 platform
|
||||||
|
user_sp -= user_sp % core::mem::size_of::<usize>();
|
||||||
|
// initialize trap_cx
|
||||||
|
let mut trap_cx = TrapContext::app_init_context(
|
||||||
|
entry_point,
|
||||||
|
user_sp,
|
||||||
|
KERNEL_SPACE.exclusive_access().token(),
|
||||||
|
task.kstack.get_top(),
|
||||||
|
trap_handler as usize,
|
||||||
|
);
|
||||||
|
trap_cx.x[10] = args.len();
|
||||||
|
trap_cx.x[11] = argv_base;
|
||||||
|
*task_inner.get_trap_cx() = trap_cx;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Only support processes with a single thread.
|
||||||
|
pub fn fork(self: &Arc<Self>) -> Arc<Self> {
|
||||||
|
let mut parent = self.inner_exclusive_access();
|
||||||
|
assert_eq!(parent.thread_count(), 1);
|
||||||
|
// clone parent's memory_set completely including trampoline/ustacks/trap_cxs
|
||||||
|
let memory_set = MemorySet::from_existed_user(&parent.memory_set);
|
||||||
|
// alloc a pid
|
||||||
|
let pid = pid_alloc();
|
||||||
|
// copy fd table
|
||||||
|
let mut new_fd_table: Vec<Option<Arc<dyn File + Send + Sync>>> = Vec::new();
|
||||||
|
for fd in parent.fd_table.iter() {
|
||||||
|
if let Some(file) = fd {
|
||||||
|
new_fd_table.push(Some(file.clone()));
|
||||||
|
} else {
|
||||||
|
new_fd_table.push(None);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// create child process pcb
|
||||||
|
let child = Arc::new(Self {
|
||||||
|
pid,
|
||||||
|
inner: unsafe { UPSafeCell::new(ProcessControlBlockInner {
|
||||||
|
is_zombie: false,
|
||||||
|
memory_set,
|
||||||
|
parent: Some(Arc::downgrade(self)),
|
||||||
|
children: Vec::new(),
|
||||||
|
exit_code: 0,
|
||||||
|
fd_table: new_fd_table,
|
||||||
|
tasks: Vec::new(),
|
||||||
|
task_res_allocator: RecycleAllocator::new(),
|
||||||
|
mutex_list: Vec::new(),
|
||||||
|
semaphore_list: Vec::new(),
|
||||||
|
})}
|
||||||
|
});
|
||||||
|
// add child
|
||||||
|
parent.children.push(Arc::clone(&child));
|
||||||
|
// create main thread of child process
|
||||||
|
let task = Arc::new(TaskControlBlock::new(
|
||||||
|
Arc::clone(&child),
|
||||||
|
parent.get_task(0).inner_exclusive_access().res.as_ref().unwrap().ustack_base(),
|
||||||
|
// here we do not allocate trap_cx or ustack again
|
||||||
|
// but mention that we allocate a new kstack here
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
// attach task to child process
|
||||||
|
let mut child_inner = child.inner_exclusive_access();
|
||||||
|
child_inner.tasks.push(Some(Arc::clone(&task)));
|
||||||
|
drop(child_inner);
|
||||||
|
// modify kstack_top in trap_cx of this thread
|
||||||
|
let task_inner = task.inner_exclusive_access();
|
||||||
|
let trap_cx = task_inner.get_trap_cx();
|
||||||
|
trap_cx.kernel_sp = task.kstack.get_top();
|
||||||
|
drop(task_inner);
|
||||||
|
// add this thread to scheduler
|
||||||
|
add_task(task);
|
||||||
|
child
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn getpid(&self) -> usize {
|
||||||
|
self.pid.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -1,95 +1,113 @@
|
|||||||
use super::TaskControlBlock;
|
use super::{TaskContext, TaskControlBlock, ProcessControlBlock};
|
||||||
use alloc::sync::Arc;
|
use alloc::sync::Arc;
|
||||||
use core::cell::RefCell;
|
|
||||||
use lazy_static::*;
|
use lazy_static::*;
|
||||||
use super::{fetch_task, TaskStatus};
|
use super::{fetch_task, TaskStatus};
|
||||||
use super::__switch;
|
use super::__switch;
|
||||||
use crate::trap::TrapContext;
|
use crate::trap::TrapContext;
|
||||||
|
use crate::sync::UPSafeCell;
|
||||||
|
|
||||||
pub struct Processor {
|
pub struct Processor {
|
||||||
inner: RefCell<ProcessorInner>,
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl Sync for Processor {}
|
|
||||||
|
|
||||||
struct ProcessorInner {
|
|
||||||
current: Option<Arc<TaskControlBlock>>,
|
current: Option<Arc<TaskControlBlock>>,
|
||||||
idle_task_cx_ptr: usize,
|
idle_task_cx: TaskContext,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Processor {
|
impl Processor {
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
inner: RefCell::new(ProcessorInner {
|
current: None,
|
||||||
current: None,
|
idle_task_cx: TaskContext::zero_init(),
|
||||||
idle_task_cx_ptr: 0,
|
|
||||||
}),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fn get_idle_task_cx_ptr2(&self) -> *const usize {
|
fn get_idle_task_cx_ptr(&mut self) -> *mut TaskContext {
|
||||||
let inner = self.inner.borrow();
|
&mut self.idle_task_cx as *mut _
|
||||||
&inner.idle_task_cx_ptr as *const usize
|
|
||||||
}
|
}
|
||||||
pub fn run(&self) {
|
pub fn take_current(&mut self) -> Option<Arc<TaskControlBlock>> {
|
||||||
loop {
|
self.current.take()
|
||||||
if let Some(task) = fetch_task() {
|
|
||||||
let idle_task_cx_ptr2 = self.get_idle_task_cx_ptr2();
|
|
||||||
// acquire
|
|
||||||
let mut task_inner = task.acquire_inner_lock();
|
|
||||||
let next_task_cx_ptr2 = task_inner.get_task_cx_ptr2();
|
|
||||||
task_inner.task_status = TaskStatus::Running;
|
|
||||||
drop(task_inner);
|
|
||||||
// release
|
|
||||||
self.inner.borrow_mut().current = Some(task);
|
|
||||||
unsafe {
|
|
||||||
__switch(
|
|
||||||
idle_task_cx_ptr2,
|
|
||||||
next_task_cx_ptr2,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub fn take_current(&self) -> Option<Arc<TaskControlBlock>> {
|
|
||||||
self.inner.borrow_mut().current.take()
|
|
||||||
}
|
}
|
||||||
pub fn current(&self) -> Option<Arc<TaskControlBlock>> {
|
pub fn current(&self) -> Option<Arc<TaskControlBlock>> {
|
||||||
self.inner.borrow().current.as_ref().map(|task| Arc::clone(task))
|
self.current.as_ref().map(|task| Arc::clone(task))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
pub static ref PROCESSOR: Processor = Processor::new();
|
pub static ref PROCESSOR: UPSafeCell<Processor> = unsafe {
|
||||||
|
UPSafeCell::new(Processor::new())
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn run_tasks() {
|
pub fn run_tasks() {
|
||||||
PROCESSOR.run();
|
loop {
|
||||||
|
let mut processor = PROCESSOR.exclusive_access();
|
||||||
|
if let Some(task) = fetch_task() {
|
||||||
|
let idle_task_cx_ptr = processor.get_idle_task_cx_ptr();
|
||||||
|
// access coming task TCB exclusively
|
||||||
|
let mut task_inner = task.inner_exclusive_access();
|
||||||
|
let next_task_cx_ptr = &task_inner.task_cx as *const TaskContext;
|
||||||
|
task_inner.task_status = TaskStatus::Running;
|
||||||
|
drop(task_inner);
|
||||||
|
// release coming task TCB manually
|
||||||
|
processor.current = Some(task);
|
||||||
|
// release processor manually
|
||||||
|
drop(processor);
|
||||||
|
unsafe {
|
||||||
|
__switch(
|
||||||
|
idle_task_cx_ptr,
|
||||||
|
next_task_cx_ptr,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
println!("no tasks available in run_tasks");
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn take_current_task() -> Option<Arc<TaskControlBlock>> {
|
pub fn take_current_task() -> Option<Arc<TaskControlBlock>> {
|
||||||
PROCESSOR.take_current()
|
PROCESSOR.exclusive_access().take_current()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn current_task() -> Option<Arc<TaskControlBlock>> {
|
pub fn current_task() -> Option<Arc<TaskControlBlock>> {
|
||||||
PROCESSOR.current()
|
PROCESSOR.exclusive_access().current()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn current_process() -> Arc<ProcessControlBlock> {
|
||||||
|
current_task().unwrap().process.upgrade().unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn current_user_token() -> usize {
|
pub fn current_user_token() -> usize {
|
||||||
let task = current_task().unwrap();
|
let task = current_task().unwrap();
|
||||||
let token = task.acquire_inner_lock().get_user_token();
|
let token = task.get_user_token();
|
||||||
token
|
token
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn current_trap_cx() -> &'static mut TrapContext {
|
pub fn current_trap_cx() -> &'static mut TrapContext {
|
||||||
current_task().unwrap().acquire_inner_lock().get_trap_cx()
|
current_task().unwrap().inner_exclusive_access().get_trap_cx()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn schedule(switched_task_cx_ptr2: *const usize) {
|
pub fn current_trap_cx_user_va() -> usize {
|
||||||
let idle_task_cx_ptr2 = PROCESSOR.get_idle_task_cx_ptr2();
|
current_task()
|
||||||
|
.unwrap()
|
||||||
|
.inner_exclusive_access()
|
||||||
|
.res
|
||||||
|
.as_ref()
|
||||||
|
.unwrap()
|
||||||
|
.trap_cx_user_va()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn current_kstack_top() -> usize {
|
||||||
|
current_task()
|
||||||
|
.unwrap()
|
||||||
|
.kstack
|
||||||
|
.get_top()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn schedule(switched_task_cx_ptr: *mut TaskContext) {
|
||||||
|
let mut processor = PROCESSOR.exclusive_access();
|
||||||
|
let idle_task_cx_ptr = processor.get_idle_task_cx_ptr();
|
||||||
|
drop(processor);
|
||||||
unsafe {
|
unsafe {
|
||||||
__switch(
|
__switch(
|
||||||
switched_task_cx_ptr2,
|
switched_task_cx_ptr,
|
||||||
idle_task_cx_ptr2,
|
idle_task_cx_ptr,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,37 +1,34 @@
|
|||||||
.altmacro
|
.altmacro
|
||||||
.macro SAVE_SN n
|
.macro SAVE_SN n
|
||||||
sd s\n, (\n+1)*8(sp)
|
sd s\n, (\n+2)*8(a0)
|
||||||
.endm
|
.endm
|
||||||
.macro LOAD_SN n
|
.macro LOAD_SN n
|
||||||
ld s\n, (\n+1)*8(sp)
|
ld s\n, (\n+2)*8(a1)
|
||||||
.endm
|
.endm
|
||||||
.section .text
|
.section .text
|
||||||
.globl __switch
|
.globl __switch
|
||||||
__switch:
|
__switch:
|
||||||
# __switch(
|
# __switch(
|
||||||
# current_task_cx_ptr2: &*const TaskContext,
|
# current_task_cx_ptr: *mut TaskContext,
|
||||||
# next_task_cx_ptr2: &*const TaskContext
|
# next_task_cx_ptr: *const TaskContext
|
||||||
# )
|
# )
|
||||||
# push TaskContext to current sp and save its address to where a0 points to
|
# save kernel stack of current task
|
||||||
addi sp, sp, -13*8
|
sd sp, 8(a0)
|
||||||
sd sp, 0(a0)
|
# save ra & s0~s11 of current execution
|
||||||
# fill TaskContext with ra & s0-s11
|
sd ra, 0(a0)
|
||||||
sd ra, 0(sp)
|
|
||||||
.set n, 0
|
.set n, 0
|
||||||
.rept 12
|
.rept 12
|
||||||
SAVE_SN %n
|
SAVE_SN %n
|
||||||
.set n, n + 1
|
.set n, n + 1
|
||||||
.endr
|
.endr
|
||||||
# ready for loading TaskContext a1 points to
|
# restore ra & s0~s11 of next execution
|
||||||
ld sp, 0(a1)
|
ld ra, 0(a1)
|
||||||
# load registers in the TaskContext
|
|
||||||
ld ra, 0(sp)
|
|
||||||
.set n, 0
|
.set n, 0
|
||||||
.rept 12
|
.rept 12
|
||||||
LOAD_SN %n
|
LOAD_SN %n
|
||||||
.set n, n + 1
|
.set n, n + 1
|
||||||
.endr
|
.endr
|
||||||
# pop TaskContext
|
# restore kernel stack of next task
|
||||||
addi sp, sp, 13*8
|
ld sp, 8(a1)
|
||||||
ret
|
ret
|
||||||
|
|
||||||
|
@ -1,8 +1,10 @@
|
|||||||
global_asm!(include_str!("switch.S"));
|
global_asm!(include_str!("switch.S"));
|
||||||
|
|
||||||
|
use super::TaskContext;
|
||||||
|
|
||||||
extern "C" {
|
extern "C" {
|
||||||
pub fn __switch(
|
pub fn __switch(
|
||||||
current_task_cx_ptr2: *const usize,
|
current_task_cx_ptr: *mut TaskContext,
|
||||||
next_task_cx_ptr2: *const usize
|
next_task_cx_ptr: *const TaskContext
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -1,230 +1,78 @@
|
|||||||
use crate::mm::{
|
use alloc::sync::{Arc, Weak};
|
||||||
MemorySet,
|
use crate::{mm::PhysPageNum, sync::UPSafeCell};
|
||||||
PhysPageNum,
|
use crate::trap::TrapContext;
|
||||||
KERNEL_SPACE,
|
use super::id::TaskUserRes;
|
||||||
VirtAddr,
|
use super::{KernelStack, ProcessControlBlock, TaskContext, kstack_alloc};
|
||||||
translated_refmut,
|
use core::cell::RefMut;
|
||||||
};
|
|
||||||
use crate::trap::{TrapContext, trap_handler};
|
|
||||||
use crate::config::{TRAP_CONTEXT};
|
|
||||||
use super::TaskContext;
|
|
||||||
use super::{PidHandle, pid_alloc, KernelStack};
|
|
||||||
use alloc::sync::{Weak, Arc};
|
|
||||||
use alloc::vec;
|
|
||||||
use alloc::vec::Vec;
|
|
||||||
use alloc::string::String;
|
|
||||||
use spin::{Mutex, MutexGuard};
|
|
||||||
use crate::fs::{File, Stdin, Stdout};
|
|
||||||
|
|
||||||
pub struct TaskControlBlock {
|
pub struct TaskControlBlock {
|
||||||
// immutable
|
// immutable
|
||||||
pub pid: PidHandle,
|
pub process: Weak<ProcessControlBlock>,
|
||||||
pub kernel_stack: KernelStack,
|
pub kstack: KernelStack,
|
||||||
// mutable
|
// mutable
|
||||||
inner: Mutex<TaskControlBlockInner>,
|
inner: UPSafeCell<TaskControlBlockInner>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TaskControlBlock {
|
||||||
|
pub fn inner_exclusive_access(&self) -> RefMut<'_, TaskControlBlockInner> {
|
||||||
|
self.inner.exclusive_access()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_user_token(&self) -> usize {
|
||||||
|
let process = self.process.upgrade().unwrap();
|
||||||
|
let inner = process.inner_exclusive_access();
|
||||||
|
inner.memory_set.token()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct TaskControlBlockInner {
|
pub struct TaskControlBlockInner {
|
||||||
|
pub res: Option<TaskUserRes>,
|
||||||
pub trap_cx_ppn: PhysPageNum,
|
pub trap_cx_ppn: PhysPageNum,
|
||||||
pub base_size: usize,
|
pub task_cx: TaskContext,
|
||||||
pub task_cx_ptr: usize,
|
|
||||||
pub task_status: TaskStatus,
|
pub task_status: TaskStatus,
|
||||||
pub memory_set: MemorySet,
|
pub exit_code: Option<i32>,
|
||||||
pub parent: Option<Weak<TaskControlBlock>>,
|
|
||||||
pub children: Vec<Arc<TaskControlBlock>>,
|
|
||||||
pub exit_code: i32,
|
|
||||||
pub fd_table: Vec<Option<Arc<dyn File + Send + Sync>>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TaskControlBlockInner {
|
impl TaskControlBlockInner {
|
||||||
pub fn get_task_cx_ptr2(&self) -> *const usize {
|
|
||||||
&self.task_cx_ptr as *const usize
|
|
||||||
}
|
|
||||||
pub fn get_trap_cx(&self) -> &'static mut TrapContext {
|
pub fn get_trap_cx(&self) -> &'static mut TrapContext {
|
||||||
self.trap_cx_ppn.get_mut()
|
self.trap_cx_ppn.get_mut()
|
||||||
}
|
}
|
||||||
pub fn get_user_token(&self) -> usize {
|
|
||||||
self.memory_set.token()
|
#[allow(unused)]
|
||||||
}
|
|
||||||
fn get_status(&self) -> TaskStatus {
|
fn get_status(&self) -> TaskStatus {
|
||||||
self.task_status
|
self.task_status
|
||||||
}
|
}
|
||||||
pub fn is_zombie(&self) -> bool {
|
|
||||||
self.get_status() == TaskStatus::Zombie
|
|
||||||
}
|
|
||||||
pub fn alloc_fd(&mut self) -> usize {
|
|
||||||
if let Some(fd) = (0..self.fd_table.len())
|
|
||||||
.find(|fd| self.fd_table[*fd].is_none()) {
|
|
||||||
fd
|
|
||||||
} else {
|
|
||||||
self.fd_table.push(None);
|
|
||||||
self.fd_table.len() - 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TaskControlBlock {
|
impl TaskControlBlock {
|
||||||
pub fn acquire_inner_lock(&self) -> MutexGuard<TaskControlBlockInner> {
|
pub fn new(
|
||||||
self.inner.lock()
|
process: Arc<ProcessControlBlock>,
|
||||||
}
|
ustack_base: usize,
|
||||||
pub fn new(elf_data: &[u8]) -> Self {
|
alloc_user_res: bool
|
||||||
// memory_set with elf program headers/trampoline/trap context/user stack
|
) -> Self {
|
||||||
let (memory_set, user_sp, entry_point) = MemorySet::from_elf(elf_data);
|
let res = TaskUserRes::new(Arc::clone(&process), ustack_base, alloc_user_res);
|
||||||
let trap_cx_ppn = memory_set
|
let trap_cx_ppn = res.trap_cx_ppn();
|
||||||
.translate(VirtAddr::from(TRAP_CONTEXT).into())
|
let kstack = kstack_alloc();
|
||||||
.unwrap()
|
let kstack_top = kstack.get_top();
|
||||||
.ppn();
|
Self {
|
||||||
// alloc a pid and a kernel stack in kernel space
|
process: Arc::downgrade(&process),
|
||||||
let pid_handle = pid_alloc();
|
kstack,
|
||||||
let kernel_stack = KernelStack::new(&pid_handle);
|
inner: unsafe { UPSafeCell::new(
|
||||||
let kernel_stack_top = kernel_stack.get_top();
|
TaskControlBlockInner {
|
||||||
// push a task context which goes to trap_return to the top of kernel stack
|
res: Some(res),
|
||||||
let task_cx_ptr = kernel_stack.push_on_top(TaskContext::goto_trap_return());
|
trap_cx_ppn,
|
||||||
let task_control_block = Self {
|
task_cx: TaskContext::goto_trap_return(kstack_top),
|
||||||
pid: pid_handle,
|
task_status: TaskStatus::Ready,
|
||||||
kernel_stack,
|
exit_code: None,
|
||||||
inner: Mutex::new(TaskControlBlockInner {
|
}
|
||||||
trap_cx_ppn,
|
)},
|
||||||
base_size: user_sp,
|
|
||||||
task_cx_ptr: task_cx_ptr as usize,
|
|
||||||
task_status: TaskStatus::Ready,
|
|
||||||
memory_set,
|
|
||||||
parent: None,
|
|
||||||
children: Vec::new(),
|
|
||||||
exit_code: 0,
|
|
||||||
fd_table: vec![
|
|
||||||
// 0 -> stdin
|
|
||||||
Some(Arc::new(Stdin)),
|
|
||||||
// 1 -> stdout
|
|
||||||
Some(Arc::new(Stdout)),
|
|
||||||
// 2 -> stderr
|
|
||||||
Some(Arc::new(Stdout)),
|
|
||||||
],
|
|
||||||
}),
|
|
||||||
};
|
|
||||||
// prepare TrapContext in user space
|
|
||||||
let trap_cx = task_control_block.acquire_inner_lock().get_trap_cx();
|
|
||||||
*trap_cx = TrapContext::app_init_context(
|
|
||||||
entry_point,
|
|
||||||
user_sp,
|
|
||||||
KERNEL_SPACE.lock().token(),
|
|
||||||
kernel_stack_top,
|
|
||||||
trap_handler as usize,
|
|
||||||
);
|
|
||||||
task_control_block
|
|
||||||
}
|
|
||||||
pub fn exec(&self, elf_data: &[u8], args: Vec<String>) {
|
|
||||||
// memory_set with elf program headers/trampoline/trap context/user stack
|
|
||||||
let (memory_set, mut user_sp, entry_point) = MemorySet::from_elf(elf_data);
|
|
||||||
let trap_cx_ppn = memory_set
|
|
||||||
.translate(VirtAddr::from(TRAP_CONTEXT).into())
|
|
||||||
.unwrap()
|
|
||||||
.ppn();
|
|
||||||
// push arguments on user stack
|
|
||||||
user_sp -= (args.len() + 1) * core::mem::size_of::<usize>();
|
|
||||||
let argv_base = user_sp;
|
|
||||||
let mut argv: Vec<_> = (0..=args.len())
|
|
||||||
.map(|arg| {
|
|
||||||
translated_refmut(
|
|
||||||
memory_set.token(),
|
|
||||||
(argv_base + arg * core::mem::size_of::<usize>()) as *mut usize
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
*argv[args.len()] = 0;
|
|
||||||
for i in 0..args.len() {
|
|
||||||
user_sp -= args[i].len() + 1;
|
|
||||||
*argv[i] = user_sp;
|
|
||||||
let mut p = user_sp;
|
|
||||||
for c in args[i].as_bytes() {
|
|
||||||
*translated_refmut(memory_set.token(), p as *mut u8) = *c;
|
|
||||||
p += 1;
|
|
||||||
}
|
|
||||||
*translated_refmut(memory_set.token(), p as *mut u8) = 0;
|
|
||||||
}
|
}
|
||||||
// make the user_sp aligned to 8B for k210 platform
|
|
||||||
user_sp -= user_sp % core::mem::size_of::<usize>();
|
|
||||||
|
|
||||||
// **** hold current PCB lock
|
|
||||||
let mut inner = self.acquire_inner_lock();
|
|
||||||
// substitute memory_set
|
|
||||||
inner.memory_set = memory_set;
|
|
||||||
// update trap_cx ppn
|
|
||||||
inner.trap_cx_ppn = trap_cx_ppn;
|
|
||||||
// initialize trap_cx
|
|
||||||
let mut trap_cx = TrapContext::app_init_context(
|
|
||||||
entry_point,
|
|
||||||
user_sp,
|
|
||||||
KERNEL_SPACE.lock().token(),
|
|
||||||
self.kernel_stack.get_top(),
|
|
||||||
trap_handler as usize,
|
|
||||||
);
|
|
||||||
trap_cx.x[10] = args.len();
|
|
||||||
trap_cx.x[11] = argv_base;
|
|
||||||
*inner.get_trap_cx() = trap_cx;
|
|
||||||
// **** release current PCB lock
|
|
||||||
}
|
}
|
||||||
pub fn fork(self: &Arc<TaskControlBlock>) -> Arc<TaskControlBlock> {
|
|
||||||
// ---- hold parent PCB lock
|
|
||||||
let mut parent_inner = self.acquire_inner_lock();
|
|
||||||
// copy user space(include trap context)
|
|
||||||
let memory_set = MemorySet::from_existed_user(
|
|
||||||
&parent_inner.memory_set
|
|
||||||
);
|
|
||||||
let trap_cx_ppn = memory_set
|
|
||||||
.translate(VirtAddr::from(TRAP_CONTEXT).into())
|
|
||||||
.unwrap()
|
|
||||||
.ppn();
|
|
||||||
// alloc a pid and a kernel stack in kernel space
|
|
||||||
let pid_handle = pid_alloc();
|
|
||||||
let kernel_stack = KernelStack::new(&pid_handle);
|
|
||||||
let kernel_stack_top = kernel_stack.get_top();
|
|
||||||
// push a goto_trap_return task_cx on the top of kernel stack
|
|
||||||
let task_cx_ptr = kernel_stack.push_on_top(TaskContext::goto_trap_return());
|
|
||||||
// copy fd table
|
|
||||||
let mut new_fd_table: Vec<Option<Arc<dyn File + Send + Sync>>> = Vec::new();
|
|
||||||
for fd in parent_inner.fd_table.iter() {
|
|
||||||
if let Some(file) = fd {
|
|
||||||
new_fd_table.push(Some(file.clone()));
|
|
||||||
} else {
|
|
||||||
new_fd_table.push(None);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let task_control_block = Arc::new(TaskControlBlock {
|
|
||||||
pid: pid_handle,
|
|
||||||
kernel_stack,
|
|
||||||
inner: Mutex::new(TaskControlBlockInner {
|
|
||||||
trap_cx_ppn,
|
|
||||||
base_size: parent_inner.base_size,
|
|
||||||
task_cx_ptr: task_cx_ptr as usize,
|
|
||||||
task_status: TaskStatus::Ready,
|
|
||||||
memory_set,
|
|
||||||
parent: Some(Arc::downgrade(self)),
|
|
||||||
children: Vec::new(),
|
|
||||||
exit_code: 0,
|
|
||||||
fd_table: new_fd_table,
|
|
||||||
}),
|
|
||||||
});
|
|
||||||
// add child
|
|
||||||
parent_inner.children.push(task_control_block.clone());
|
|
||||||
// modify kernel_sp in trap_cx
|
|
||||||
// **** acquire child PCB lock
|
|
||||||
let trap_cx = task_control_block.acquire_inner_lock().get_trap_cx();
|
|
||||||
// **** release child PCB lock
|
|
||||||
trap_cx.kernel_sp = kernel_stack_top;
|
|
||||||
// return
|
|
||||||
task_control_block
|
|
||||||
// ---- release parent PCB lock
|
|
||||||
}
|
|
||||||
pub fn getpid(&self) -> usize {
|
|
||||||
self.pid.0
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Copy, Clone, PartialEq)]
|
#[derive(Copy, Clone, PartialEq)]
|
||||||
pub enum TaskStatus {
|
pub enum TaskStatus {
|
||||||
Ready,
|
Ready,
|
||||||
Running,
|
Running,
|
||||||
Zombie,
|
Blocking,
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,13 @@
|
|||||||
|
use core::cmp::Ordering;
|
||||||
|
|
||||||
use riscv::register::time;
|
use riscv::register::time;
|
||||||
use crate::sbi::set_timer;
|
use crate::sbi::set_timer;
|
||||||
use crate::config::CLOCK_FREQ;
|
use crate::config::CLOCK_FREQ;
|
||||||
|
use crate::task::{TaskControlBlock, add_task};
|
||||||
|
use crate::sync::UPSafeCell;
|
||||||
|
use alloc::collections::BinaryHeap;
|
||||||
|
use alloc::sync::Arc;
|
||||||
|
use lazy_static::*;
|
||||||
|
|
||||||
const TICKS_PER_SEC: usize = 100;
|
const TICKS_PER_SEC: usize = 100;
|
||||||
const MSEC_PER_SEC: usize = 1000;
|
const MSEC_PER_SEC: usize = 1000;
|
||||||
@ -15,4 +22,55 @@ pub fn get_time_ms() -> usize {
|
|||||||
|
|
||||||
pub fn set_next_trigger() {
|
pub fn set_next_trigger() {
|
||||||
set_timer(get_time() + CLOCK_FREQ / TICKS_PER_SEC);
|
set_timer(get_time() + CLOCK_FREQ / TICKS_PER_SEC);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub struct TimerCondVar {
|
||||||
|
pub expire_ms: usize,
|
||||||
|
pub task: Arc<TaskControlBlock>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialEq for TimerCondVar {
|
||||||
|
fn eq(&self, other: &Self) -> bool {
|
||||||
|
self.expire_ms == other.expire_ms
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl Eq for TimerCondVar {}
|
||||||
|
impl PartialOrd for TimerCondVar {
|
||||||
|
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||||
|
let a = -(self.expire_ms as isize);
|
||||||
|
let b = -(other.expire_ms as isize);
|
||||||
|
Some(a.cmp(&b))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Ord for TimerCondVar {
|
||||||
|
fn cmp(&self, other: &Self) -> Ordering {
|
||||||
|
self.partial_cmp(other).unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
static ref TIMERS: UPSafeCell<BinaryHeap<TimerCondVar>> = unsafe { UPSafeCell::new(
|
||||||
|
BinaryHeap::<TimerCondVar>::new()
|
||||||
|
)};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn add_timer(expire_ms: usize, task: Arc<TaskControlBlock>) {
|
||||||
|
let mut timers = TIMERS.exclusive_access();
|
||||||
|
timers.push(TimerCondVar {
|
||||||
|
expire_ms,
|
||||||
|
task,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn check_timer() {
|
||||||
|
let current_ms = get_time_ms();
|
||||||
|
let mut timers = TIMERS.exclusive_access();
|
||||||
|
while let Some(timer) = timers.peek() {
|
||||||
|
if timer.expire_ms <= current_ms {
|
||||||
|
add_task(Arc::clone(&timer.task));
|
||||||
|
drop(timer);
|
||||||
|
timers.pop();
|
||||||
|
} else { break; }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -18,9 +18,10 @@ use crate::task::{
|
|||||||
suspend_current_and_run_next,
|
suspend_current_and_run_next,
|
||||||
current_user_token,
|
current_user_token,
|
||||||
current_trap_cx,
|
current_trap_cx,
|
||||||
|
current_trap_cx_user_va,
|
||||||
};
|
};
|
||||||
use crate::timer::set_next_trigger;
|
use crate::timer::{set_next_trigger, check_timer};
|
||||||
use crate::config::{TRAP_CONTEXT, TRAMPOLINE};
|
use crate::config::TRAMPOLINE;
|
||||||
|
|
||||||
global_asm!(include_str!("trap.S"));
|
global_asm!(include_str!("trap.S"));
|
||||||
|
|
||||||
@ -82,6 +83,7 @@ pub fn trap_handler() -> ! {
|
|||||||
}
|
}
|
||||||
Trap::Interrupt(Interrupt::SupervisorTimer) => {
|
Trap::Interrupt(Interrupt::SupervisorTimer) => {
|
||||||
set_next_trigger();
|
set_next_trigger();
|
||||||
|
check_timer();
|
||||||
suspend_current_and_run_next();
|
suspend_current_and_run_next();
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
@ -95,7 +97,7 @@ pub fn trap_handler() -> ! {
|
|||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub fn trap_return() -> ! {
|
pub fn trap_return() -> ! {
|
||||||
set_user_trap_entry();
|
set_user_trap_entry();
|
||||||
let trap_cx_ptr = TRAP_CONTEXT;
|
let trap_cx_user_va = current_trap_cx_user_va();
|
||||||
let user_satp = current_user_token();
|
let user_satp = current_user_token();
|
||||||
extern "C" {
|
extern "C" {
|
||||||
fn __alltraps();
|
fn __alltraps();
|
||||||
@ -103,15 +105,22 @@ pub fn trap_return() -> ! {
|
|||||||
}
|
}
|
||||||
let restore_va = __restore as usize - __alltraps as usize + TRAMPOLINE;
|
let restore_va = __restore as usize - __alltraps as usize + TRAMPOLINE;
|
||||||
unsafe {
|
unsafe {
|
||||||
llvm_asm!("fence.i" :::: "volatile");
|
asm!(
|
||||||
llvm_asm!("jr $0" :: "r"(restore_va), "{a0}"(trap_cx_ptr), "{a1}"(user_satp) :: "volatile");
|
"fence.i",
|
||||||
|
"jr {restore_va}",
|
||||||
|
restore_va = in(reg) restore_va,
|
||||||
|
in("a0") trap_cx_user_va,
|
||||||
|
in("a1") user_satp,
|
||||||
|
options(noreturn)
|
||||||
|
);
|
||||||
}
|
}
|
||||||
panic!("Unreachable in back_to_user!");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub fn trap_from_kernel() -> ! {
|
pub fn trap_from_kernel() -> ! {
|
||||||
|
use riscv::register::sepc;
|
||||||
|
println!("stval = {:#x}, sepc = {:#x}", stval::read(), sepc::read());
|
||||||
panic!("a trap {:?} from kernel!", scause::read().cause());
|
panic!("a trap {:?} from kernel!", scause::read().cause());
|
||||||
}
|
}
|
||||||
|
|
||||||
pub use context::{TrapContext};
|
pub use context::TrapContext;
|
||||||
|
@ -1 +1 @@
|
|||||||
nightly-2021-01-30
|
nightly-2021-10-15
|
||||||
|
@ -14,7 +14,7 @@ fn fork_child(cur: &str, branch: char) {
|
|||||||
if l >= DEPTH {
|
if l >= DEPTH {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
&mut next[..l].copy_from_slice(cur.as_bytes());
|
next[..l].copy_from_slice(cur.as_bytes());
|
||||||
next[l] = branch as u8;
|
next[l] = branch as u8;
|
||||||
if fork() == 0 {
|
if fork() == 0 {
|
||||||
fork_tree(core::str::from_utf8(&next[..l + 1]).unwrap());
|
fork_tree(core::str::from_utf8(&next[..l + 1]).unwrap());
|
||||||
|
36
user/src/bin/huge_write.rs
Normal file
36
user/src/bin/huge_write.rs
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
#![no_std]
|
||||||
|
#![no_main]
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
extern crate user_lib;
|
||||||
|
|
||||||
|
use user_lib::{
|
||||||
|
OpenFlags,
|
||||||
|
open,
|
||||||
|
close,
|
||||||
|
write,
|
||||||
|
get_time,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
pub fn main() -> i32 {
|
||||||
|
let mut buffer = [0u8; 1024]; // 1KiB
|
||||||
|
for i in 0..buffer.len() {
|
||||||
|
buffer[i] = i as u8;
|
||||||
|
}
|
||||||
|
let f = open("testf", OpenFlags::CREATE | OpenFlags::WRONLY);
|
||||||
|
if f < 0 {
|
||||||
|
panic!("Open test file failed!");
|
||||||
|
}
|
||||||
|
let f = f as usize;
|
||||||
|
let start = get_time();
|
||||||
|
let size_mb = 1usize;
|
||||||
|
for _ in 0..1024*size_mb {
|
||||||
|
write(f, &buffer);
|
||||||
|
}
|
||||||
|
close(f);
|
||||||
|
let time_ms = (get_time() - start) as usize;
|
||||||
|
let speed_kbs = size_mb * 1000000 / time_ms;
|
||||||
|
println!("{}MiB written, time cost = {}ms, write speed = {}KiB/s", size_mb, time_ms, speed_kbs);
|
||||||
|
0
|
||||||
|
}
|
@ -31,4 +31,4 @@ fn main() -> i32 {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
0
|
0
|
||||||
}
|
}
|
||||||
|
69
user/src/bin/mpsc_sem.rs
Normal file
69
user/src/bin/mpsc_sem.rs
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
#![no_std]
|
||||||
|
#![no_main]
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
extern crate user_lib;
|
||||||
|
|
||||||
|
extern crate alloc;
|
||||||
|
|
||||||
|
use user_lib::{semaphore_create, semaphore_up, semaphore_down};
|
||||||
|
use user_lib::{thread_create, waittid};
|
||||||
|
use user_lib::exit;
|
||||||
|
use alloc::vec::Vec;
|
||||||
|
|
||||||
|
const SEM_MUTEX: usize = 0;
|
||||||
|
const SEM_EMPTY: usize = 1;
|
||||||
|
const SEM_EXISTED: usize = 2;
|
||||||
|
const BUFFER_SIZE: usize = 8;
|
||||||
|
static mut BUFFER: [usize; BUFFER_SIZE] = [0; BUFFER_SIZE];
|
||||||
|
static mut FRONT: usize = 0;
|
||||||
|
static mut TAIL: usize = 0;
|
||||||
|
const PRODUCER_COUNT: usize = 4;
|
||||||
|
const NUMBER_PER_PRODUCER: usize = 100;
|
||||||
|
|
||||||
|
unsafe fn producer(id: *const usize) -> ! {
|
||||||
|
let id = *id;
|
||||||
|
for _ in 0..NUMBER_PER_PRODUCER {
|
||||||
|
semaphore_down(SEM_EMPTY);
|
||||||
|
semaphore_down(SEM_MUTEX);
|
||||||
|
BUFFER[FRONT] = id;
|
||||||
|
FRONT = (FRONT + 1) % BUFFER_SIZE;
|
||||||
|
semaphore_up(SEM_MUTEX);
|
||||||
|
semaphore_up(SEM_EXISTED);
|
||||||
|
}
|
||||||
|
exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn consumer() -> ! {
|
||||||
|
for _ in 0..PRODUCER_COUNT * NUMBER_PER_PRODUCER {
|
||||||
|
semaphore_down(SEM_EXISTED);
|
||||||
|
semaphore_down(SEM_MUTEX);
|
||||||
|
print!("{} ", BUFFER[TAIL]);
|
||||||
|
TAIL = (TAIL + 1) % BUFFER_SIZE;
|
||||||
|
semaphore_up(SEM_MUTEX);
|
||||||
|
semaphore_up(SEM_EMPTY);
|
||||||
|
}
|
||||||
|
println!("");
|
||||||
|
exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
pub fn main() -> i32 {
|
||||||
|
// create semaphores
|
||||||
|
assert_eq!(semaphore_create(1) as usize, SEM_MUTEX);
|
||||||
|
assert_eq!(semaphore_create(BUFFER_SIZE) as usize, SEM_EMPTY);
|
||||||
|
assert_eq!(semaphore_create(0) as usize, SEM_EXISTED);
|
||||||
|
// create threads
|
||||||
|
let ids: Vec<_> = (0..PRODUCER_COUNT).collect();
|
||||||
|
let mut threads = Vec::new();
|
||||||
|
for i in 0..PRODUCER_COUNT {
|
||||||
|
threads.push(thread_create(producer as usize, &ids.as_slice()[i] as *const _ as usize));
|
||||||
|
}
|
||||||
|
threads.push(thread_create(consumer as usize, 0));
|
||||||
|
// wait for all threads to complete
|
||||||
|
for thread in threads.iter() {
|
||||||
|
waittid(*thread as usize);
|
||||||
|
}
|
||||||
|
println!("mpsc_sem passed!");
|
||||||
|
0
|
||||||
|
}
|
95
user/src/bin/phil_din_mutex.rs
Normal file
95
user/src/bin/phil_din_mutex.rs
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
#![no_std]
|
||||||
|
#![no_main]
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
extern crate user_lib;
|
||||||
|
extern crate alloc;
|
||||||
|
|
||||||
|
use user_lib::{mutex_blocking_create, mutex_lock, mutex_unlock};
|
||||||
|
use user_lib::{thread_create, waittid};
|
||||||
|
use user_lib::{sleep, exit, get_time};
|
||||||
|
use alloc::vec::Vec;
|
||||||
|
|
||||||
|
const N: usize = 5;
|
||||||
|
const ROUND: usize = 4;
|
||||||
|
// A round: think -> wait for forks -> eat
|
||||||
|
const GRAPH_SCALE: usize = 100;
|
||||||
|
|
||||||
|
fn get_time_u() -> usize {
|
||||||
|
get_time() as usize
|
||||||
|
}
|
||||||
|
|
||||||
|
// Time unit: ms
|
||||||
|
const ARR: [[usize; ROUND * 2]; N] = [
|
||||||
|
[700, 800, 1000, 400, 500, 600, 200, 400],
|
||||||
|
[300, 600, 200, 700, 1000, 100, 300, 600],
|
||||||
|
[500, 200, 900, 200, 400, 600, 1200, 400],
|
||||||
|
[500, 1000, 600, 500, 800, 600, 200, 900],
|
||||||
|
[600, 100, 600, 600, 200, 500, 600, 200],
|
||||||
|
];
|
||||||
|
static mut THINK: [[usize; ROUND * 2]; N] = [[0; ROUND * 2]; N];
|
||||||
|
static mut EAT: [[usize; ROUND * 2]; N] = [[0; ROUND * 2]; N];
|
||||||
|
|
||||||
|
fn philosopher_dining_problem(id: *const usize) {
|
||||||
|
let id = unsafe { *id };
|
||||||
|
let left = id;
|
||||||
|
let right = if id == N - 1 { 0 } else { id + 1 };
|
||||||
|
let min = if left < right { left } else { right };
|
||||||
|
let max = left + right - min;
|
||||||
|
for round in 0..ROUND {
|
||||||
|
// thinking
|
||||||
|
unsafe { THINK[id][2 * round] = get_time_u(); }
|
||||||
|
sleep(ARR[id][2 * round]);
|
||||||
|
unsafe { THINK[id][2 * round + 1] = get_time_u(); }
|
||||||
|
// wait for forks
|
||||||
|
mutex_lock(min);
|
||||||
|
mutex_lock(max);
|
||||||
|
// eating
|
||||||
|
unsafe { EAT[id][2 * round] = get_time_u(); }
|
||||||
|
sleep(ARR[id][2 * round + 1]);
|
||||||
|
unsafe { EAT[id][2 * round + 1] = get_time_u(); }
|
||||||
|
mutex_unlock(max);
|
||||||
|
mutex_unlock(min);
|
||||||
|
}
|
||||||
|
exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
pub fn main() -> i32 {
|
||||||
|
let mut v = Vec::new();
|
||||||
|
let ids: Vec<_> = (0..N).collect();
|
||||||
|
let start = get_time_u();
|
||||||
|
for i in 0..N {
|
||||||
|
assert_eq!(mutex_blocking_create(), i as isize);
|
||||||
|
v.push(thread_create(philosopher_dining_problem as usize, &ids.as_slice()[i] as *const _ as usize));
|
||||||
|
}
|
||||||
|
for tid in v.iter() {
|
||||||
|
waittid(*tid as usize);
|
||||||
|
}
|
||||||
|
let time_cost = get_time_u() - start;
|
||||||
|
println!("time cost = {}", time_cost);
|
||||||
|
println!("'-' -> THINKING; 'x' -> EATING; ' ' -> WAITING ");
|
||||||
|
for id in (0..N).into_iter().chain(0..=0) {
|
||||||
|
print!("#{}:", id);
|
||||||
|
for j in 0..time_cost/GRAPH_SCALE {
|
||||||
|
let current_time = j * GRAPH_SCALE + start;
|
||||||
|
if (0..ROUND).find(|round| unsafe {
|
||||||
|
let start_thinking = THINK[id][2 * round];
|
||||||
|
let end_thinking = THINK[id][2 * round + 1];
|
||||||
|
start_thinking <= current_time && current_time <= end_thinking
|
||||||
|
}).is_some() {
|
||||||
|
print!("-");
|
||||||
|
} else if (0..ROUND).find(|round| unsafe {
|
||||||
|
let start_eating = EAT[id][2 * round];
|
||||||
|
let end_eating = EAT[id][2 * round + 1];
|
||||||
|
start_eating <= current_time && current_time <= end_eating
|
||||||
|
}).is_some() {
|
||||||
|
print!("x");
|
||||||
|
} else {
|
||||||
|
print!(" ");
|
||||||
|
};
|
||||||
|
}
|
||||||
|
println!("");
|
||||||
|
}
|
||||||
|
0
|
||||||
|
}
|
40
user/src/bin/race_adder.rs
Normal file
40
user/src/bin/race_adder.rs
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
#![no_std]
|
||||||
|
#![no_main]
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
extern crate user_lib;
|
||||||
|
extern crate alloc;
|
||||||
|
|
||||||
|
use user_lib::{exit, thread_create, waittid, get_time};
|
||||||
|
use alloc::vec::Vec;
|
||||||
|
|
||||||
|
static mut A: usize = 0;
|
||||||
|
const PER_THREAD: usize = 1000;
|
||||||
|
const THREAD_COUNT: usize = 16;
|
||||||
|
|
||||||
|
unsafe fn f() -> ! {
|
||||||
|
let mut t = 2usize;
|
||||||
|
for _ in 0..PER_THREAD {
|
||||||
|
let a = &mut A as *mut usize;
|
||||||
|
let cur = a.read_volatile();
|
||||||
|
for _ in 0..500 { t = t * t % 10007; }
|
||||||
|
a.write_volatile(cur + 1);
|
||||||
|
}
|
||||||
|
exit(t as i32)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
pub fn main() -> i32 {
|
||||||
|
let start = get_time();
|
||||||
|
let mut v = Vec::new();
|
||||||
|
for _ in 0..THREAD_COUNT {
|
||||||
|
v.push(thread_create(f as usize, 0) as usize);
|
||||||
|
}
|
||||||
|
let mut time_cost = Vec::new();
|
||||||
|
for tid in v.iter() {
|
||||||
|
time_cost.push(waittid(*tid));
|
||||||
|
}
|
||||||
|
println!("time cost is {}ms", get_time() - start);
|
||||||
|
assert_eq!(unsafe { A }, PER_THREAD * THREAD_COUNT);
|
||||||
|
0
|
||||||
|
}
|
46
user/src/bin/race_adder_atomic.rs
Normal file
46
user/src/bin/race_adder_atomic.rs
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
#![no_std]
|
||||||
|
#![no_main]
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
extern crate user_lib;
|
||||||
|
extern crate alloc;
|
||||||
|
|
||||||
|
use user_lib::{exit, thread_create, waittid, get_time, yield_};
|
||||||
|
use alloc::vec::Vec;
|
||||||
|
use core::sync::atomic::{AtomicBool, Ordering};
|
||||||
|
|
||||||
|
static mut A: usize = 0;
|
||||||
|
static OCCUPIED: AtomicBool = AtomicBool::new(false);
|
||||||
|
const PER_THREAD: usize = 1000;
|
||||||
|
const THREAD_COUNT: usize = 16;
|
||||||
|
|
||||||
|
unsafe fn f() -> ! {
|
||||||
|
let mut t = 2usize;
|
||||||
|
for _ in 0..PER_THREAD {
|
||||||
|
while OCCUPIED.compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed).is_err() {
|
||||||
|
yield_();
|
||||||
|
}
|
||||||
|
let a = &mut A as *mut usize;
|
||||||
|
let cur = a.read_volatile();
|
||||||
|
for _ in 0..500 { t = t * t % 10007; }
|
||||||
|
a.write_volatile(cur + 1);
|
||||||
|
OCCUPIED.store(false, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
exit(t as i32)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
pub fn main() -> i32 {
|
||||||
|
let start = get_time();
|
||||||
|
let mut v = Vec::new();
|
||||||
|
for _ in 0..THREAD_COUNT {
|
||||||
|
v.push(thread_create(f as usize, 0) as usize);
|
||||||
|
}
|
||||||
|
let mut time_cost = Vec::new();
|
||||||
|
for tid in v.iter() {
|
||||||
|
time_cost.push(waittid(*tid));
|
||||||
|
}
|
||||||
|
println!("time cost is {}ms", get_time() - start);
|
||||||
|
assert_eq!(unsafe { A }, PER_THREAD * THREAD_COUNT);
|
||||||
|
0
|
||||||
|
}
|
47
user/src/bin/race_adder_loop.rs
Normal file
47
user/src/bin/race_adder_loop.rs
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
#![no_std]
|
||||||
|
#![no_main]
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
extern crate user_lib;
|
||||||
|
extern crate alloc;
|
||||||
|
|
||||||
|
use user_lib::{exit, thread_create, waittid, get_time, yield_};
|
||||||
|
use alloc::vec::Vec;
|
||||||
|
|
||||||
|
static mut A: usize = 0;
|
||||||
|
static mut OCCUPIED: bool = false;
|
||||||
|
const PER_THREAD: usize = 1000;
|
||||||
|
const THREAD_COUNT: usize = 16;
|
||||||
|
|
||||||
|
unsafe fn f() -> ! {
|
||||||
|
let mut t = 2usize;
|
||||||
|
for _ in 0..PER_THREAD {
|
||||||
|
while OCCUPIED { yield_(); }
|
||||||
|
OCCUPIED = true;
|
||||||
|
// enter critical section
|
||||||
|
let a = &mut A as *mut usize;
|
||||||
|
let cur = a.read_volatile();
|
||||||
|
for _ in 0..500 { t = t * t % 10007; }
|
||||||
|
a.write_volatile(cur + 1);
|
||||||
|
// exit critical section
|
||||||
|
OCCUPIED = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
exit(t as i32)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
pub fn main() -> i32 {
|
||||||
|
let start = get_time();
|
||||||
|
let mut v = Vec::new();
|
||||||
|
for _ in 0..THREAD_COUNT {
|
||||||
|
v.push(thread_create(f as usize, 0) as usize);
|
||||||
|
}
|
||||||
|
let mut time_cost = Vec::new();
|
||||||
|
for tid in v.iter() {
|
||||||
|
time_cost.push(waittid(*tid));
|
||||||
|
}
|
||||||
|
println!("time cost is {}ms", get_time() - start);
|
||||||
|
assert_eq!(unsafe { A }, PER_THREAD * THREAD_COUNT);
|
||||||
|
0
|
||||||
|
}
|
44
user/src/bin/race_adder_mutex_blocking.rs
Normal file
44
user/src/bin/race_adder_mutex_blocking.rs
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
#![no_std]
|
||||||
|
#![no_main]
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
extern crate user_lib;
|
||||||
|
extern crate alloc;
|
||||||
|
|
||||||
|
use user_lib::{exit, thread_create, waittid, get_time};
|
||||||
|
use user_lib::{mutex_blocking_create, mutex_lock, mutex_unlock};
|
||||||
|
use alloc::vec::Vec;
|
||||||
|
|
||||||
|
static mut A: usize = 0;
|
||||||
|
const PER_THREAD: usize = 1000;
|
||||||
|
const THREAD_COUNT: usize = 16;
|
||||||
|
|
||||||
|
unsafe fn f() -> ! {
|
||||||
|
let mut t = 2usize;
|
||||||
|
for _ in 0..PER_THREAD {
|
||||||
|
mutex_lock(0);
|
||||||
|
let a = &mut A as *mut usize;
|
||||||
|
let cur = a.read_volatile();
|
||||||
|
for _ in 0..500 { t = t * t % 10007; }
|
||||||
|
a.write_volatile(cur + 1);
|
||||||
|
mutex_unlock(0);
|
||||||
|
}
|
||||||
|
exit(t as i32)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
pub fn main() -> i32 {
|
||||||
|
let start = get_time();
|
||||||
|
assert_eq!(mutex_blocking_create(), 0);
|
||||||
|
let mut v = Vec::new();
|
||||||
|
for _ in 0..THREAD_COUNT {
|
||||||
|
v.push(thread_create(f as usize, 0) as usize);
|
||||||
|
}
|
||||||
|
let mut time_cost = Vec::new();
|
||||||
|
for tid in v.iter() {
|
||||||
|
time_cost.push(waittid(*tid));
|
||||||
|
}
|
||||||
|
println!("time cost is {}ms", get_time() - start);
|
||||||
|
assert_eq!(unsafe { A }, PER_THREAD * THREAD_COUNT);
|
||||||
|
0
|
||||||
|
}
|
44
user/src/bin/race_adder_mutex_spin.rs
Normal file
44
user/src/bin/race_adder_mutex_spin.rs
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
#![no_std]
|
||||||
|
#![no_main]
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
extern crate user_lib;
|
||||||
|
extern crate alloc;
|
||||||
|
|
||||||
|
use user_lib::{exit, thread_create, waittid, get_time};
|
||||||
|
use user_lib::{mutex_create, mutex_lock, mutex_unlock};
|
||||||
|
use alloc::vec::Vec;
|
||||||
|
|
||||||
|
static mut A: usize = 0;
|
||||||
|
const PER_THREAD: usize = 1000;
|
||||||
|
const THREAD_COUNT: usize = 16;
|
||||||
|
|
||||||
|
unsafe fn f() -> ! {
|
||||||
|
let mut t = 2usize;
|
||||||
|
for _ in 0..PER_THREAD {
|
||||||
|
mutex_lock(0);
|
||||||
|
let a = &mut A as *mut usize;
|
||||||
|
let cur = a.read_volatile();
|
||||||
|
for _ in 0..500 { t = t * t % 10007; }
|
||||||
|
a.write_volatile(cur + 1);
|
||||||
|
mutex_unlock(0);
|
||||||
|
}
|
||||||
|
exit(t as i32)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
pub fn main() -> i32 {
|
||||||
|
let start = get_time();
|
||||||
|
assert_eq!(mutex_create(), 0);
|
||||||
|
let mut v = Vec::new();
|
||||||
|
for _ in 0..THREAD_COUNT {
|
||||||
|
v.push(thread_create(f as usize, 0) as usize);
|
||||||
|
}
|
||||||
|
let mut time_cost = Vec::new();
|
||||||
|
for tid in v.iter() {
|
||||||
|
time_cost.push(waittid(*tid));
|
||||||
|
}
|
||||||
|
println!("time cost is {}ms", get_time() - start);
|
||||||
|
assert_eq!(unsafe { A }, PER_THREAD * THREAD_COUNT);
|
||||||
|
0
|
||||||
|
}
|
38
user/src/bin/threads.rs
Normal file
38
user/src/bin/threads.rs
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
#![no_std]
|
||||||
|
#![no_main]
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
extern crate user_lib;
|
||||||
|
extern crate alloc;
|
||||||
|
|
||||||
|
use user_lib::{thread_create, waittid, exit};
|
||||||
|
use alloc::vec::Vec;
|
||||||
|
|
||||||
|
pub fn thread_a() -> ! {
|
||||||
|
for _ in 0..1000 { print!("a"); }
|
||||||
|
exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn thread_b() -> ! {
|
||||||
|
for _ in 0..1000 { print!("b"); }
|
||||||
|
exit(2)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn thread_c() -> ! {
|
||||||
|
for _ in 0..1000 { print!("c"); }
|
||||||
|
exit(3)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
pub fn main() -> i32 {
|
||||||
|
let mut v = Vec::new();
|
||||||
|
v.push(thread_create(thread_a as usize, 0));
|
||||||
|
v.push(thread_create(thread_b as usize, 0));
|
||||||
|
v.push(thread_create(thread_c as usize, 0));
|
||||||
|
for tid in v.iter() {
|
||||||
|
let exit_code = waittid(*tid as usize);
|
||||||
|
println!("thread#{} exited with code {}", tid, exit_code);
|
||||||
|
}
|
||||||
|
println!("main thread exited.");
|
||||||
|
0
|
||||||
|
}
|
39
user/src/bin/threads_arg.rs
Normal file
39
user/src/bin/threads_arg.rs
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
#![no_std]
|
||||||
|
#![no_main]
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
extern crate user_lib;
|
||||||
|
extern crate alloc;
|
||||||
|
|
||||||
|
use user_lib::{thread_create, waittid, exit};
|
||||||
|
use alloc::vec::Vec;
|
||||||
|
|
||||||
|
struct Argument {
|
||||||
|
pub ch: char,
|
||||||
|
pub rc: i32,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn thread_print(arg: *const Argument) -> ! {
|
||||||
|
let arg = unsafe { &*arg };
|
||||||
|
for _ in 0..1000 { print!("{}", arg.ch); }
|
||||||
|
exit(arg.rc)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
pub fn main() -> i32 {
|
||||||
|
let mut v = Vec::new();
|
||||||
|
let args = [
|
||||||
|
Argument { ch: 'a', rc: 1, },
|
||||||
|
Argument { ch: 'b', rc: 2, },
|
||||||
|
Argument { ch: 'c', rc: 3, },
|
||||||
|
];
|
||||||
|
for i in 0..3 {
|
||||||
|
v.push(thread_create(thread_print as usize, &args[i] as *const _ as usize));
|
||||||
|
}
|
||||||
|
for tid in v.iter() {
|
||||||
|
let exit_code = waittid(*tid as usize);
|
||||||
|
println!("thread#{} exited with code {}", tid, exit_code);
|
||||||
|
}
|
||||||
|
println!("main thread exited.");
|
||||||
|
0
|
||||||
|
}
|
@ -1,5 +1,5 @@
|
|||||||
#![no_std]
|
#![no_std]
|
||||||
#![feature(llvm_asm)]
|
#![feature(asm)]
|
||||||
#![feature(linkage)]
|
#![feature(linkage)]
|
||||||
#![feature(panic_info_message)]
|
#![feature(panic_info_message)]
|
||||||
#![feature(alloc_error_handler)]
|
#![feature(alloc_error_handler)]
|
||||||
@ -100,9 +100,32 @@ pub fn waitpid(pid: usize, exit_code: &mut i32) -> isize {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn sleep(period_ms: usize) {
|
pub fn sleep(sleep_ms: usize) {
|
||||||
let start = sys_get_time();
|
sys_sleep(sleep_ms);
|
||||||
while sys_get_time() < start + period_ms as isize {
|
}
|
||||||
sys_yield();
|
|
||||||
|
pub fn thread_create(entry: usize, arg: usize) -> isize { sys_thread_create(entry, arg) }
|
||||||
|
pub fn gettid() -> isize { sys_gettid() }
|
||||||
|
pub fn waittid(tid: usize) -> isize {
|
||||||
|
loop {
|
||||||
|
match sys_waittid(tid) {
|
||||||
|
-2 => { yield_(); }
|
||||||
|
exit_code => return exit_code,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn mutex_create() -> isize { sys_mutex_create(false) }
|
||||||
|
pub fn mutex_blocking_create() -> isize { sys_mutex_create(true) }
|
||||||
|
pub fn mutex_lock(mutex_id: usize) { sys_mutex_lock(mutex_id); }
|
||||||
|
pub fn mutex_unlock(mutex_id: usize) { sys_mutex_unlock(mutex_id); }
|
||||||
|
pub fn semaphore_create(res_count: usize) -> isize {
|
||||||
|
sys_semaphore_create(res_count)
|
||||||
|
}
|
||||||
|
pub fn semaphore_up(sem_id: usize) {
|
||||||
|
sys_semaphore_up(sem_id);
|
||||||
|
}
|
||||||
|
pub fn semaphore_down(sem_id: usize) {
|
||||||
|
sys_semaphore_down(sem_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
OUTPUT_ARCH(riscv)
|
OUTPUT_ARCH(riscv)
|
||||||
ENTRY(_start)
|
ENTRY(_start)
|
||||||
|
|
||||||
BASE_ADDRESS = 0x0;
|
BASE_ADDRESS = 0x10000;
|
||||||
|
|
||||||
SECTIONS
|
SECTIONS
|
||||||
{
|
{
|
||||||
@ -29,4 +29,4 @@ SECTIONS
|
|||||||
*(.eh_frame)
|
*(.eh_frame)
|
||||||
*(.debug*)
|
*(.debug*)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -5,21 +5,32 @@ const SYSCALL_PIPE: usize = 59;
|
|||||||
const SYSCALL_READ: usize = 63;
|
const SYSCALL_READ: usize = 63;
|
||||||
const SYSCALL_WRITE: usize = 64;
|
const SYSCALL_WRITE: usize = 64;
|
||||||
const SYSCALL_EXIT: usize = 93;
|
const SYSCALL_EXIT: usize = 93;
|
||||||
|
const SYSCALL_SLEEP: usize = 101;
|
||||||
const SYSCALL_YIELD: usize = 124;
|
const SYSCALL_YIELD: usize = 124;
|
||||||
const SYSCALL_GET_TIME: usize = 169;
|
const SYSCALL_GET_TIME: usize = 169;
|
||||||
const SYSCALL_GETPID: usize = 172;
|
const SYSCALL_GETPID: usize = 172;
|
||||||
const SYSCALL_FORK: usize = 220;
|
const SYSCALL_FORK: usize = 220;
|
||||||
const SYSCALL_EXEC: usize = 221;
|
const SYSCALL_EXEC: usize = 221;
|
||||||
const SYSCALL_WAITPID: usize = 260;
|
const SYSCALL_WAITPID: usize = 260;
|
||||||
|
const SYSCALL_THREAD_CREATE: usize = 1000;
|
||||||
|
const SYSCALL_GETTID: usize = 1001;
|
||||||
|
const SYSCALL_WAITTID: usize = 1002;
|
||||||
|
const SYSCALL_MUTEX_CREATE: usize = 1010;
|
||||||
|
const SYSCALL_MUTEX_LOCK: usize = 1011;
|
||||||
|
const SYSCALL_MUTEX_UNLOCK: usize = 1012;
|
||||||
|
const SYSCALL_SEMAPHORE_CREATE: usize = 1020;
|
||||||
|
const SYSCALL_SEMAPHORE_UP: usize = 1021;
|
||||||
|
const SYSCALL_SEMAPHORE_DOWN: usize = 1022;
|
||||||
|
|
||||||
fn syscall(id: usize, args: [usize; 3]) -> isize {
|
fn syscall(id: usize, args: [usize; 3]) -> isize {
|
||||||
let mut ret: isize;
|
let mut ret: isize;
|
||||||
unsafe {
|
unsafe {
|
||||||
llvm_asm!("ecall"
|
asm!(
|
||||||
: "={x10}" (ret)
|
"ecall",
|
||||||
: "{x10}" (args[0]), "{x11}" (args[1]), "{x12}" (args[2]), "{x17}" (id)
|
inlateout("x10") args[0] => ret,
|
||||||
: "memory"
|
in("x11") args[1],
|
||||||
: "volatile"
|
in("x12") args[2],
|
||||||
|
in("x17") id
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
ret
|
ret
|
||||||
@ -54,6 +65,10 @@ pub fn sys_exit(exit_code: i32) -> ! {
|
|||||||
panic!("sys_exit never returns!");
|
panic!("sys_exit never returns!");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn sys_sleep(sleep_ms: usize) -> isize {
|
||||||
|
syscall(SYSCALL_SLEEP, [sleep_ms, 0, 0])
|
||||||
|
}
|
||||||
|
|
||||||
pub fn sys_yield() -> isize {
|
pub fn sys_yield() -> isize {
|
||||||
syscall(SYSCALL_YIELD, [0, 0, 0])
|
syscall(SYSCALL_YIELD, [0, 0, 0])
|
||||||
}
|
}
|
||||||
@ -76,4 +91,40 @@ pub fn sys_exec(path: &str, args: &[*const u8]) -> isize {
|
|||||||
|
|
||||||
pub fn sys_waitpid(pid: isize, exit_code: *mut i32) -> isize {
|
pub fn sys_waitpid(pid: isize, exit_code: *mut i32) -> isize {
|
||||||
syscall(SYSCALL_WAITPID, [pid as usize, exit_code as usize, 0])
|
syscall(SYSCALL_WAITPID, [pid as usize, exit_code as usize, 0])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn sys_thread_create(entry: usize, arg: usize) -> isize {
|
||||||
|
syscall(SYSCALL_THREAD_CREATE, [entry, arg, 0])
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn sys_gettid() -> isize {
|
||||||
|
syscall(SYSCALL_GETTID, [0; 3])
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn sys_waittid(tid: usize) -> isize {
|
||||||
|
syscall(SYSCALL_WAITTID, [tid, 0, 0])
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn sys_mutex_create(blocking: bool) -> isize {
|
||||||
|
syscall(SYSCALL_MUTEX_CREATE, [blocking as usize, 0, 0])
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn sys_mutex_lock(id: usize) -> isize {
|
||||||
|
syscall(SYSCALL_MUTEX_LOCK, [id, 0, 0])
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn sys_mutex_unlock(id: usize) -> isize {
|
||||||
|
syscall(SYSCALL_MUTEX_UNLOCK, [id, 0, 0])
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn sys_semaphore_create(res_count: usize) -> isize {
|
||||||
|
syscall(SYSCALL_SEMAPHORE_CREATE, [res_count, 0, 0])
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn sys_semaphore_up(sem_id: usize) -> isize {
|
||||||
|
syscall(SYSCALL_SEMAPHORE_UP, [sem_id, 0, 0])
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn sys_semaphore_down(sem_id: usize) -> isize {
|
||||||
|
syscall(SYSCALL_SEMAPHORE_DOWN, [sem_id, 0, 0])
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user