mirror of
https://github.com/rcore-os/rCore-Tutorial-v3.git
synced 2024-11-22 01:16:26 +04:00
easy-fs: Refactor easy-fs
-------- The user of easy-fs can specify the sync primitive used by easy-fs via lock_api::RawMutex. Add unittests for easy-fs. Expand the size of the bootstack to 1MiB. After that, the BlockCacheMgr can be successfully initialized.
This commit is contained in:
parent
e9c42c8a7d
commit
85e2f4d3b6
3
easy-fs-fuse/.gitignore
vendored
3
easy-fs-fuse/.gitignore
vendored
@ -1,3 +0,0 @@
|
|||||||
.idea/
|
|
||||||
target/
|
|
||||||
Cargo.lock
|
|
@ -1,16 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "easy-fs-fuse"
|
|
||||||
version = "0.1.0"
|
|
||||||
authors = ["Yifan Wu <shinbokuow@163.com>"]
|
|
||||||
edition = "2018"
|
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
clap = "2.33.3"
|
|
||||||
easy-fs = { path = "../easy-fs" }
|
|
||||||
rand = "0.8.0"
|
|
||||||
|
|
||||||
# [features]
|
|
||||||
# board_qemu = []
|
|
||||||
# board_k210 = []
|
|
@ -1,18 +1,13 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "easy-fs"
|
name = "easy-fs"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
authors = ["Yifan Wu <shinbokuow@163.com>"]
|
edition = "2021"
|
||||||
edition = "2018"
|
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
spin = "0.7.0"
|
lock_api = "0.4.11"
|
||||||
lazy_static = { version = "1.4.0", features = ["spin_no_std"] }
|
|
||||||
|
|
||||||
[profile.release]
|
[dev-dependencies]
|
||||||
debug = true
|
rand = "0.8.5"
|
||||||
|
clap = "2.33.3"
|
||||||
[features]
|
|
||||||
board_qemu = []
|
|
||||||
board_k210 = []
|
|
@ -1,12 +1,39 @@
|
|||||||
use clap::{App, Arg};
|
use clap::{App, Arg};
|
||||||
use easy_fs::{BlockDevice, EasyFileSystem};
|
use easy_fs::{BlockCacheMgr, BlockDevice, EasyFileSystem, Inode};
|
||||||
use std::fs::{read_dir, File, OpenOptions};
|
use std::fs::{read_dir, File, OpenOptions};
|
||||||
use std::io::{Read, Seek, SeekFrom, Write};
|
use std::io::{Read, Seek, SeekFrom, Write};
|
||||||
use std::sync::Arc;
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::Mutex;
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
const BLOCK_SZ: usize = 512;
|
const BLOCK_SZ: usize = 512;
|
||||||
|
const FS_BLOCKS: usize = 32768;
|
||||||
|
const BLOCK_CACHE_BLKS: usize = 256;
|
||||||
|
type EasyFileSystemType = EasyFileSystem<BLOCK_CACHE_BLKS, RawSpinlock, RawSpinlock>;
|
||||||
|
|
||||||
|
pub struct RawSpinlock(AtomicBool);
|
||||||
|
|
||||||
|
unsafe impl lock_api::RawMutex for RawSpinlock {
|
||||||
|
const INIT: RawSpinlock = RawSpinlock(AtomicBool::new(false));
|
||||||
|
|
||||||
|
// A spinlock guard can be sent to another thread and unlocked there
|
||||||
|
type GuardMarker = lock_api::GuardSend;
|
||||||
|
|
||||||
|
fn lock(&self) {
|
||||||
|
// Note: This isn't the best way of implementing a spinlock, but it
|
||||||
|
// suffices for the sake of this example.
|
||||||
|
while !self.try_lock() {}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn try_lock(&self) -> bool {
|
||||||
|
self.0
|
||||||
|
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
|
||||||
|
.is_ok()
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn unlock(&self) {
|
||||||
|
self.0.store(false, Ordering::Release);
|
||||||
|
}
|
||||||
|
}
|
||||||
struct BlockFile(Mutex<File>);
|
struct BlockFile(Mutex<File>);
|
||||||
|
|
||||||
impl BlockDevice for BlockFile {
|
impl BlockDevice for BlockFile {
|
||||||
@ -49,20 +76,22 @@ fn easy_fs_pack() -> std::io::Result<()> {
|
|||||||
let src_path = matches.value_of("source").unwrap();
|
let src_path = matches.value_of("source").unwrap();
|
||||||
let target_path = matches.value_of("target").unwrap();
|
let target_path = matches.value_of("target").unwrap();
|
||||||
println!("src_path = {}\ntarget_path = {}", src_path, target_path);
|
println!("src_path = {}\ntarget_path = {}", src_path, target_path);
|
||||||
let block_file = Arc::new(BlockFile(Mutex::new({
|
let block_file: Arc<dyn BlockDevice> = Arc::new(BlockFile(Mutex::new({
|
||||||
let f = OpenOptions::new()
|
let f = OpenOptions::new()
|
||||||
.read(true)
|
.read(true)
|
||||||
.write(true)
|
.write(true)
|
||||||
.create(true)
|
.create(true)
|
||||||
.open(format!("{}{}", target_path, "fs.img"))?;
|
.open(format!("{}{}", target_path, "fs.img"))?;
|
||||||
f.set_len(16 * 2048 * 512).unwrap();
|
f.set_len((FS_BLOCKS * BLOCK_SZ) as u64).unwrap();
|
||||||
f
|
f
|
||||||
})));
|
})));
|
||||||
|
let bcache_mgr: BlockCacheMgr<BLOCK_CACHE_BLKS, RawSpinlock> = BlockCacheMgr::new(&block_file);
|
||||||
// 16MiB, at most 4095 files
|
// 16MiB, at most 4095 files
|
||||||
let efs = EasyFileSystem::create(block_file, 16 * 2048, 1);
|
let efs: EasyFileSystemType = EasyFileSystem::create(FS_BLOCKS as u32, 1, bcache_mgr);
|
||||||
let root_inode = Arc::new(EasyFileSystem::root_inode(&efs));
|
let efs: lock_api::Mutex<RawSpinlock, EasyFileSystemType> = lock_api::Mutex::new(efs);
|
||||||
let apps: Vec<_> = read_dir(src_path)
|
let efs = Arc::new(efs);
|
||||||
.unwrap()
|
let root_inode = Inode::root_inode(&efs);
|
||||||
|
let apps: Vec<_> = read_dir(src_path)?
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|dir_entry| {
|
.map(|dir_entry| {
|
||||||
let mut name_with_ext = dir_entry.unwrap().file_name().into_string().unwrap();
|
let mut name_with_ext = dir_entry.unwrap().file_name().into_string().unwrap();
|
||||||
@ -80,72 +109,5 @@ fn easy_fs_pack() -> std::io::Result<()> {
|
|||||||
// write data to easy-fs
|
// write data to easy-fs
|
||||||
inode.write_at(0, all_data.as_slice());
|
inode.write_at(0, all_data.as_slice());
|
||||||
}
|
}
|
||||||
// list apps
|
|
||||||
// for app in root_inode.ls() {
|
|
||||||
// println!("{}", app);
|
|
||||||
// }
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn efs_test() -> std::io::Result<()> {
|
|
||||||
let block_file = Arc::new(BlockFile(Mutex::new({
|
|
||||||
let f = OpenOptions::new()
|
|
||||||
.read(true)
|
|
||||||
.write(true)
|
|
||||||
.create(true)
|
|
||||||
.open("target/fs.img")?;
|
|
||||||
f.set_len(8192 * 512).unwrap();
|
|
||||||
f
|
|
||||||
})));
|
|
||||||
EasyFileSystem::create(block_file.clone(), 4096, 1);
|
|
||||||
let efs = EasyFileSystem::open(block_file.clone());
|
|
||||||
let root_inode = EasyFileSystem::root_inode(&efs);
|
|
||||||
root_inode.create("filea");
|
|
||||||
root_inode.create("fileb");
|
|
||||||
for name in root_inode.ls() {
|
|
||||||
println!("{}", name);
|
|
||||||
}
|
|
||||||
let filea = root_inode.find("filea").unwrap();
|
|
||||||
let greet_str = "Hello, world!";
|
|
||||||
filea.write_at(0, greet_str.as_bytes());
|
|
||||||
//let mut buffer = [0u8; 512];
|
|
||||||
let mut buffer = [0u8; 233];
|
|
||||||
let len = filea.read_at(0, &mut buffer);
|
|
||||||
assert_eq!(greet_str, core::str::from_utf8(&buffer[..len]).unwrap(),);
|
|
||||||
|
|
||||||
let mut random_str_test = |len: usize| {
|
|
||||||
filea.clear();
|
|
||||||
assert_eq!(filea.read_at(0, &mut buffer), 0,);
|
|
||||||
let mut str = String::new();
|
|
||||||
use rand;
|
|
||||||
// random digit
|
|
||||||
for _ in 0..len {
|
|
||||||
str.push(char::from('0' as u8 + rand::random::<u8>() % 10));
|
|
||||||
}
|
|
||||||
filea.write_at(0, str.as_bytes());
|
|
||||||
let mut read_buffer = [0u8; 127];
|
|
||||||
let mut offset = 0usize;
|
|
||||||
let mut read_str = String::new();
|
|
||||||
loop {
|
|
||||||
let len = filea.read_at(offset, &mut read_buffer);
|
|
||||||
if len == 0 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
offset += len;
|
|
||||||
read_str.push_str(core::str::from_utf8(&read_buffer[..len]).unwrap());
|
|
||||||
}
|
|
||||||
assert_eq!(str, read_str);
|
|
||||||
};
|
|
||||||
|
|
||||||
random_str_test(4 * BLOCK_SZ);
|
|
||||||
random_str_test(8 * BLOCK_SZ + BLOCK_SZ / 2);
|
|
||||||
random_str_test(100 * BLOCK_SZ);
|
|
||||||
random_str_test(70 * BLOCK_SZ + BLOCK_SZ / 7);
|
|
||||||
random_str_test((12 + 128) * BLOCK_SZ);
|
|
||||||
random_str_test(400 * BLOCK_SZ);
|
|
||||||
random_str_test(1000 * BLOCK_SZ);
|
|
||||||
random_str_test(2000 * BLOCK_SZ);
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
@ -1,10 +1,11 @@
|
|||||||
use super::{get_block_cache, BlockDevice, BLOCK_SZ};
|
use super::{BlockCacheMgr, BLOCK_SZ};
|
||||||
use alloc::sync::Arc;
|
|
||||||
/// A bitmap block
|
/// A bitmap block
|
||||||
type BitmapBlock = [u64; 64];
|
type BitmapBlock = [u64; 64];
|
||||||
/// Number of bits in a block
|
/// Number of bits in a block
|
||||||
const BLOCK_BITS: usize = BLOCK_SZ * 8;
|
const BLOCK_BITS: usize = BLOCK_SZ * 8;
|
||||||
/// A bitmap
|
/// A bitmap
|
||||||
|
#[derive(Default)]
|
||||||
pub struct Bitmap {
|
pub struct Bitmap {
|
||||||
start_block_id: usize,
|
start_block_id: usize,
|
||||||
blocks: usize,
|
blocks: usize,
|
||||||
@ -26,14 +27,13 @@ impl Bitmap {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
/// Allocate a new block from a block device
|
/// Allocate a new block from a block device
|
||||||
pub fn alloc(&self, block_device: &Arc<dyn BlockDevice>) -> Option<usize> {
|
pub fn alloc<const N: usize, R>(&self, bcache_mgr: &mut BlockCacheMgr<N, R>) -> Option<usize>
|
||||||
|
where
|
||||||
|
R: lock_api::RawMutex,
|
||||||
|
{
|
||||||
for block_id in 0..self.blocks {
|
for block_id in 0..self.blocks {
|
||||||
let pos = get_block_cache(
|
let real_block_id = block_id + self.start_block_id as usize;
|
||||||
block_id + self.start_block_id as usize,
|
let pos = bcache_mgr.write_block(real_block_id, 0, |bitmap_block: &mut BitmapBlock| {
|
||||||
Arc::clone(block_device),
|
|
||||||
)
|
|
||||||
.lock()
|
|
||||||
.modify(0, |bitmap_block: &mut BitmapBlock| {
|
|
||||||
if let Some((bits64_pos, inner_pos)) = bitmap_block
|
if let Some((bits64_pos, inner_pos)) = bitmap_block
|
||||||
.iter()
|
.iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
@ -54,17 +54,88 @@ impl Bitmap {
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
/// Deallocate a block
|
/// Deallocate a block
|
||||||
pub fn dealloc(&self, block_device: &Arc<dyn BlockDevice>, bit: usize) {
|
pub fn dealloc<const N: usize, R>(&self, bcache_mgr: &mut BlockCacheMgr<N, R>, bit: usize)
|
||||||
|
where
|
||||||
|
R: lock_api::RawMutex,
|
||||||
|
{
|
||||||
let (block_pos, bits64_pos, inner_pos) = decomposition(bit);
|
let (block_pos, bits64_pos, inner_pos) = decomposition(bit);
|
||||||
get_block_cache(block_pos + self.start_block_id, Arc::clone(block_device))
|
let real_block_id = block_pos + self.start_block_id;
|
||||||
.lock()
|
bcache_mgr.write_block(real_block_id, 0, |bitmap_block: &mut BitmapBlock| {
|
||||||
.modify(0, |bitmap_block: &mut BitmapBlock| {
|
assert!(bitmap_block[bits64_pos] & (1u64 << inner_pos) > 0);
|
||||||
assert!(bitmap_block[bits64_pos] & (1u64 << inner_pos) > 0);
|
bitmap_block[bits64_pos] -= 1u64 << inner_pos;
|
||||||
bitmap_block[bits64_pos] -= 1u64 << inner_pos;
|
});
|
||||||
});
|
|
||||||
}
|
}
|
||||||
/// Get the max number of allocatable blocks
|
/// Get the max number of allocatable blocks
|
||||||
pub fn maximum(&self) -> usize {
|
pub fn maximum(&self) -> usize {
|
||||||
self.blocks * BLOCK_BITS
|
self.blocks * BLOCK_BITS
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use crate::{test_helper::*, BlockCacheMgr};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_bitmap() {
|
||||||
|
let block_dev: Arc<dyn BlockDevice> = Arc::new(TestBlockDevice::<16>::new());
|
||||||
|
let mut bcache_mgr: BlockCacheMgr<4, RawSpinlock> = BlockCacheMgr::new(&block_dev);
|
||||||
|
let bitmap = Bitmap::new(0, 16);
|
||||||
|
assert_eq!(bitmap.maximum(), BLOCK_BITS * 16);
|
||||||
|
|
||||||
|
let mut buf = [0u8; BLOCK_SZ];
|
||||||
|
assert_eq!(bitmap.alloc(&mut bcache_mgr), Some(0));
|
||||||
|
assert_eq!(bitmap.alloc(&mut bcache_mgr), Some(1));
|
||||||
|
bcache_mgr.sync_all();
|
||||||
|
block_dev.read_block(0, &mut buf);
|
||||||
|
assert_eq!(buf[..8], 3u64.to_ne_bytes());
|
||||||
|
assert!(buf[8..].iter().all(|byte| *byte == 0));
|
||||||
|
|
||||||
|
bitmap.dealloc(&mut bcache_mgr, 0);
|
||||||
|
bcache_mgr.sync_all();
|
||||||
|
block_dev.read_block(0, &mut buf);
|
||||||
|
assert_eq!(buf[..8], 2u64.to_ne_bytes());
|
||||||
|
|
||||||
|
bitmap.dealloc(&mut bcache_mgr, 1);
|
||||||
|
bcache_mgr.sync_all();
|
||||||
|
block_dev.read_block(1, &mut buf);
|
||||||
|
assert!(buf.iter().all(|byte| *byte == 0));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[should_panic]
|
||||||
|
pub fn test_bitmap_panic() {
|
||||||
|
let block_dev: Arc<dyn BlockDevice> = Arc::new(TestBlockDevice::<16>::new());
|
||||||
|
let mut bcache_mgr: BlockCacheMgr<4, RawSpinlock> = BlockCacheMgr::new(&block_dev);
|
||||||
|
let bitmap = Bitmap::new(0, 1);
|
||||||
|
assert_eq!(bitmap.alloc(&mut bcache_mgr), Some(0));
|
||||||
|
bitmap.dealloc(&mut bcache_mgr, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_bitmap_large() {
|
||||||
|
let block_dev: Arc<dyn BlockDevice> = Arc::new(TestBlockDevice::<16>::new());
|
||||||
|
let mut bcache_mgr: BlockCacheMgr<4, RawSpinlock> = BlockCacheMgr::new(&block_dev);
|
||||||
|
let bitmap = Bitmap::new(0, 16);
|
||||||
|
let mut buf = [0u8; BLOCK_SZ];
|
||||||
|
|
||||||
|
for i in 0..16 * BLOCK_BITS {
|
||||||
|
assert_eq!(bitmap.alloc(&mut bcache_mgr), Some(i));
|
||||||
|
}
|
||||||
|
bcache_mgr.sync_all();
|
||||||
|
for block_id in 0..16usize {
|
||||||
|
block_dev.read_block(block_id, &mut buf);
|
||||||
|
assert!(buf.iter().all(|byte| *byte == u8::MAX));
|
||||||
|
}
|
||||||
|
|
||||||
|
for i in 0..16 * BLOCK_BITS {
|
||||||
|
bitmap.dealloc(&mut bcache_mgr, i);
|
||||||
|
}
|
||||||
|
bcache_mgr.sync_all();
|
||||||
|
for block_id in 0..16usize {
|
||||||
|
block_dev.read_block(block_id, &mut buf);
|
||||||
|
assert!(buf.iter().all(|byte| *byte == 0));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -1,144 +1,426 @@
|
|||||||
use super::{BlockDevice, BLOCK_SZ};
|
use crate::{BlockDevice, BLOCK_SZ};
|
||||||
use alloc::collections::VecDeque;
|
|
||||||
use alloc::sync::Arc;
|
use alloc::sync::Arc;
|
||||||
use lazy_static::*;
|
use core::ops::{Deref, DerefMut};
|
||||||
use spin::Mutex;
|
|
||||||
|
use lock_api;
|
||||||
|
|
||||||
|
#[derive(Copy, Clone)]
|
||||||
|
#[repr(align(512))]
|
||||||
|
pub struct CacheBlock([u8; BLOCK_SZ]);
|
||||||
|
impl Default for CacheBlock {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self([0u8; BLOCK_SZ])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl Deref for CacheBlock {
|
||||||
|
type Target = [u8; BLOCK_SZ];
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl DerefMut for CacheBlock {
|
||||||
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||||
|
&mut self.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Cached block inside memory
|
/// Cached block inside memory
|
||||||
pub struct BlockCache {
|
pub struct BlockCache {
|
||||||
/// cached block data
|
/// cached block data, provided by BlockCacheMgr
|
||||||
cache: [u8; BLOCK_SZ],
|
/// We use 'static lifetime here since:
|
||||||
|
/// 1. We do not want BlockCacheMgr to be a self-referential struct.
|
||||||
|
/// 2. We can guarantee that the BlockCacheMgr outlives BlockCache.
|
||||||
|
/// 3. We can guarantee that only one BlockCache can access this region
|
||||||
|
/// at a time.
|
||||||
|
cache: &'static mut CacheBlock,
|
||||||
/// underlying block id
|
/// underlying block id
|
||||||
block_id: usize,
|
block_id: usize,
|
||||||
/// underlying block device
|
/// underlying block device
|
||||||
block_device: Arc<dyn BlockDevice>,
|
block_device: Arc<dyn BlockDevice>,
|
||||||
/// whether the block is dirty
|
/// whether the block is dirty
|
||||||
modified: bool,
|
pub dirty: bool,
|
||||||
|
/// The timestamp of the last access of the block.
|
||||||
|
/// It is provided by the BlockCacheMgr.
|
||||||
|
last_access_time: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BlockCache {
|
impl BlockCache {
|
||||||
/// Load a new BlockCache from disk.
|
/// Load a new BlockCache from disk.
|
||||||
pub fn new(block_id: usize, block_device: Arc<dyn BlockDevice>) -> Self {
|
pub fn new(
|
||||||
let mut cache = [0u8; BLOCK_SZ];
|
block_id: usize,
|
||||||
block_device.read_block(block_id, &mut cache);
|
block_device: &Arc<dyn BlockDevice>,
|
||||||
|
cache: &mut CacheBlock,
|
||||||
|
) -> Self {
|
||||||
|
block_device.read_block(block_id, &mut cache.0);
|
||||||
|
// Safety: See the definition of BlockCache.
|
||||||
|
let cache: &'static mut CacheBlock = unsafe {
|
||||||
|
let cache_ptr: *mut CacheBlock = core::mem::transmute(cache.as_mut_ptr());
|
||||||
|
&mut *(cache_ptr)
|
||||||
|
};
|
||||||
Self {
|
Self {
|
||||||
cache,
|
cache,
|
||||||
block_id,
|
block_id,
|
||||||
block_device,
|
block_device: Arc::clone(block_device),
|
||||||
modified: false,
|
dirty: false,
|
||||||
|
last_access_time: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/// Get the address of an offset inside the cached block data
|
|
||||||
fn addr_of_offset(&self, offset: usize) -> usize {
|
fn addr_of_offset(&self, offset: usize) -> usize {
|
||||||
&self.cache[offset] as *const _ as usize
|
&self.cache.0[offset] as *const _ as usize
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_ref<T>(&self, offset: usize) -> &T
|
fn check_value_validity<T>(&self, offset: usize) {
|
||||||
where
|
let start_addr = self.addr_of_offset(offset);
|
||||||
T: Sized,
|
// We guarantee that the input arguments are correct. Thus,
|
||||||
{
|
// if we find that the arguments are not valid, just panic.
|
||||||
let type_size = core::mem::size_of::<T>();
|
//
|
||||||
assert!(offset + type_size <= BLOCK_SZ);
|
assert_eq!(start_addr % core::mem::align_of::<T>(), 0);
|
||||||
let addr = self.addr_of_offset(offset);
|
assert!(offset + core::mem::size_of::<T>() <= BLOCK_SZ);
|
||||||
unsafe { &*(addr as *const T) }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_mut<T>(&mut self, offset: usize) -> &mut T
|
/// Get an reference to a value of generic type T at a given
|
||||||
where
|
/// offset on the block cache.
|
||||||
T: Sized,
|
pub fn value_ref_at_offset<T>(&self, offset: usize) -> &T {
|
||||||
{
|
self.check_value_validity::<T>(offset);
|
||||||
let type_size = core::mem::size_of::<T>();
|
// Safety:
|
||||||
assert!(offset + type_size <= BLOCK_SZ);
|
// we have checked that the address at the given offset is
|
||||||
self.modified = true;
|
// aligned to type T and the value of type T is in the boundary
|
||||||
let addr = self.addr_of_offset(offset);
|
// of the cache array.
|
||||||
unsafe { &mut *(addr as *mut T) }
|
unsafe { core::mem::transmute::<&u8, &T>(&self.cache.0[offset]) }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get an mutable reference to a value of generic type T at a given
|
||||||
|
/// offset on the block cache.
|
||||||
|
pub fn value_mut_at_offset<T>(&mut self, offset: usize) -> &mut T {
|
||||||
|
self.check_value_validity::<T>(offset);
|
||||||
|
self.dirty = true;
|
||||||
|
// Safety: See value_ref_at_offset.
|
||||||
|
unsafe { core::mem::transmute::<&mut u8, &mut T>(&mut self.cache.0[offset]) }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Apply a read operation on this BlockCache at the given offset.
|
||||||
pub fn read<T, V>(&self, offset: usize, f: impl FnOnce(&T) -> V) -> V {
|
pub fn read<T, V>(&self, offset: usize, f: impl FnOnce(&T) -> V) -> V {
|
||||||
f(self.get_ref(offset))
|
f(self.value_ref_at_offset::<T>(offset))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn modify<T, V>(&mut self, offset: usize, f: impl FnOnce(&mut T) -> V) -> V {
|
/// Apply a write operation on this BlockCache at the given offset.
|
||||||
f(self.get_mut(offset))
|
pub fn write<T, V>(&mut self, offset: usize, f: impl FnOnce(&mut T) -> V) -> V {
|
||||||
|
f(self.value_mut_at_offset::<T>(offset))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn sync(&mut self) {
|
/// Sync the changes on this block cache in memory to the underlying
|
||||||
if self.modified {
|
/// block device.
|
||||||
self.modified = false;
|
pub fn sync_to_device(&mut self) {
|
||||||
self.block_device.write_block(self.block_id, &self.cache);
|
if self.dirty {
|
||||||
|
self.dirty = false;
|
||||||
|
self.block_device.write_block(self.block_id, &self.cache.0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Update the access time used for LRU when this block is
|
||||||
|
/// accessed.
|
||||||
|
pub fn update_access_time(&mut self, new_time: usize) {
|
||||||
|
self.last_access_time = new_time;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the timestamp of the last access of this block.
|
||||||
|
pub fn access_time(&self) -> usize {
|
||||||
|
self.last_access_time
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for BlockCache {
|
impl Drop for BlockCache {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
self.sync()
|
self.sync_to_device();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/// Use a block cache of 16 blocks
|
|
||||||
const BLOCK_CACHE_SIZE: usize = 16;
|
|
||||||
|
|
||||||
pub struct BlockCacheManager {
|
/// The block cache manager.
|
||||||
queue: VecDeque<(usize, Arc<Mutex<BlockCache>>)>,
|
///
|
||||||
|
/// The user can control the sync primitive used by each block cache via
|
||||||
|
/// R which implements the `lock_api::RawMutex` trait. At the same time, user can
|
||||||
|
/// control the capacity of the block cache pool via the const generic parameter N.
|
||||||
|
pub struct BlockCacheMgr<const N: usize, R>
|
||||||
|
where
|
||||||
|
R: lock_api::RawMutex,
|
||||||
|
{
|
||||||
|
cache_block_arr: [CacheBlock; N],
|
||||||
|
block_id_arr: [Option<usize>; N],
|
||||||
|
cache_arr: [Option<Arc<lock_api::Mutex<R, BlockCache>>>; N],
|
||||||
|
block_dev: Arc<dyn BlockDevice>,
|
||||||
|
current_time: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BlockCacheManager {
|
impl<const N: usize, R> BlockCacheMgr<N, R>
|
||||||
pub fn new() -> Self {
|
where
|
||||||
|
R: lock_api::RawMutex,
|
||||||
|
{
|
||||||
|
/// Initialize a block cache manager.
|
||||||
|
pub fn new(block_dev: &Arc<dyn BlockDevice>) -> Self {
|
||||||
|
let mut cache_arr: [Option<Arc<lock_api::Mutex<R, BlockCache>>>; N] =
|
||||||
|
unsafe { core::mem::zeroed() };
|
||||||
|
for cache in cache_arr.iter_mut() {
|
||||||
|
*cache = None;
|
||||||
|
}
|
||||||
Self {
|
Self {
|
||||||
queue: VecDeque::new(),
|
cache_block_arr: [CacheBlock::default(); N],
|
||||||
|
block_id_arr: [None; N],
|
||||||
|
cache_arr,
|
||||||
|
block_dev: Arc::clone(block_dev),
|
||||||
|
current_time: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_block_cache(
|
fn get_slot_id(&self, block_id: usize) -> Option<usize> {
|
||||||
|
(0..N).find(|slot_id| {
|
||||||
|
if let Some(block_id_t) = self.block_id_arr[*slot_id].as_ref() {
|
||||||
|
*block_id_t == block_id
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_empty_slot_id(&self) -> Option<usize> {
|
||||||
|
(0..N).find(|slot_id| self.block_id_arr[*slot_id].is_none())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get a block cache from block cache manager. Caller need to provide the block id.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// This function will panic if all caches cannot be swapped out since they are
|
||||||
|
/// still referenced.
|
||||||
|
pub fn get_block_cache(&mut self, block_id: usize) -> Arc<lock_api::Mutex<R, BlockCache>> {
|
||||||
|
let slot_id = if let Some(slot_id) = self.get_slot_id(block_id) {
|
||||||
|
slot_id
|
||||||
|
} else if let Some(slot_id) = self.get_empty_slot_id() {
|
||||||
|
self.block_id_arr[slot_id] = Some(block_id);
|
||||||
|
self.cache_arr[slot_id] = Some(Arc::new(lock_api::Mutex::new(BlockCache::new(
|
||||||
|
block_id,
|
||||||
|
&self.block_dev,
|
||||||
|
&mut self.cache_block_arr[slot_id],
|
||||||
|
))));
|
||||||
|
slot_id
|
||||||
|
} else {
|
||||||
|
// select a block:
|
||||||
|
// 1. with minimum last access time
|
||||||
|
// 2. it is not referenced by any thread
|
||||||
|
let invalid_slot_id = usize::MAX;
|
||||||
|
let (slot_id, _) = (0..N).fold(
|
||||||
|
(invalid_slot_id, usize::MAX),
|
||||||
|
|(target_slot_id, min_last_time), slot_id| {
|
||||||
|
let cache = self.cache_arr[slot_id].as_ref().unwrap();
|
||||||
|
if Arc::strong_count(cache) > 1 {
|
||||||
|
return (target_slot_id, min_last_time);
|
||||||
|
}
|
||||||
|
// Only acquire the cache lock if it has not been acquired(through the refcnt)
|
||||||
|
// This can avoid the AA deadlock.
|
||||||
|
let last_time = cache.lock().access_time();
|
||||||
|
if last_time < min_last_time {
|
||||||
|
(slot_id, last_time)
|
||||||
|
} else {
|
||||||
|
(target_slot_id, min_last_time)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
// If all caches are still being used, just panic
|
||||||
|
assert_ne!(slot_id, invalid_slot_id);
|
||||||
|
assert_eq!(
|
||||||
|
Arc::strong_count(self.cache_arr[slot_id].as_ref().unwrap()),
|
||||||
|
1
|
||||||
|
);
|
||||||
|
// manually substitute it with a cache of a new block
|
||||||
|
// the drop of the older value is later than the init of the new value, thus
|
||||||
|
// we cannot put them in one line
|
||||||
|
self.cache_arr[slot_id] = None;
|
||||||
|
self.block_id_arr[slot_id] = Some(block_id);
|
||||||
|
self.cache_arr[slot_id] = Some(Arc::new(lock_api::Mutex::new(BlockCache::new(
|
||||||
|
block_id,
|
||||||
|
&self.block_dev,
|
||||||
|
&mut self.cache_block_arr[slot_id],
|
||||||
|
))));
|
||||||
|
slot_id
|
||||||
|
};
|
||||||
|
// update the access time
|
||||||
|
self.current_time += 1;
|
||||||
|
let cache = self.cache_arr[slot_id].as_ref().unwrap();
|
||||||
|
cache.lock().update_access_time(self.current_time);
|
||||||
|
|
||||||
|
Arc::clone(cache)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sync the changes on all block caches of this block cache manager to the
|
||||||
|
/// block device.
|
||||||
|
pub fn sync_all(&self) {
|
||||||
|
for cache in self.cache_arr.iter() {
|
||||||
|
if let Some(cache) = cache.as_ref() {
|
||||||
|
cache.lock().sync_to_device();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Apply a read operation on a block at the given offset.
|
||||||
|
pub fn read_block<T, V>(
|
||||||
&mut self,
|
&mut self,
|
||||||
block_id: usize,
|
block_id: usize,
|
||||||
block_device: Arc<dyn BlockDevice>,
|
offset: usize,
|
||||||
) -> Arc<Mutex<BlockCache>> {
|
op: impl FnOnce(&T) -> V,
|
||||||
if let Some(pair) = self.queue.iter().find(|pair| pair.0 == block_id) {
|
) -> V {
|
||||||
Arc::clone(&pair.1)
|
let bcache = self.get_block_cache(block_id);
|
||||||
} else {
|
let bcache_guard = bcache.lock();
|
||||||
// substitute
|
bcache_guard.read(offset, op)
|
||||||
if self.queue.len() == BLOCK_CACHE_SIZE {
|
}
|
||||||
// from front to tail
|
|
||||||
if let Some((idx, _)) = self
|
/// Apply a write operation on a block at the given offset.
|
||||||
.queue
|
pub fn write_block<T, V>(
|
||||||
.iter()
|
&mut self,
|
||||||
.enumerate()
|
block_id: usize,
|
||||||
.find(|(_, pair)| Arc::strong_count(&pair.1) == 1)
|
offset: usize,
|
||||||
{
|
op: impl FnOnce(&mut T) -> V,
|
||||||
self.queue.drain(idx..=idx);
|
) -> V {
|
||||||
} else {
|
let bcache = self.get_block_cache(block_id);
|
||||||
panic!("Run out of BlockCache!");
|
let mut bcache_guard = bcache.lock();
|
||||||
}
|
bcache_guard.write(offset, op)
|
||||||
}
|
|
||||||
// load block into mem and push back
|
|
||||||
let block_cache = Arc::new(Mutex::new(BlockCache::new(
|
|
||||||
block_id,
|
|
||||||
Arc::clone(&block_device),
|
|
||||||
)));
|
|
||||||
self.queue.push_back((block_id, Arc::clone(&block_cache)));
|
|
||||||
block_cache
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
lazy_static! {
|
#[cfg(test)]
|
||||||
/// The global block cache manager
|
mod tests {
|
||||||
pub static ref BLOCK_CACHE_MANAGER: Mutex<BlockCacheManager> =
|
use super::*;
|
||||||
Mutex::new(BlockCacheManager::new());
|
use crate::test_helper::*;
|
||||||
}
|
use std::sync::Arc;
|
||||||
/// Get the block cache corresponding to the given block id and block device
|
mod block_cache {
|
||||||
pub fn get_block_cache(
|
use super::*;
|
||||||
block_id: usize,
|
|
||||||
block_device: Arc<dyn BlockDevice>,
|
#[test]
|
||||||
) -> Arc<Mutex<BlockCache>> {
|
pub fn test_init() {
|
||||||
BLOCK_CACHE_MANAGER
|
let block_dev: Arc<dyn BlockDevice> = Arc::new(MockBlockDevice);
|
||||||
.lock()
|
let mut cache_block: CacheBlock = Default::default();
|
||||||
.get_block_cache(block_id, block_device)
|
let block_cache = BlockCache::new(0, &block_dev, &mut cache_block);
|
||||||
}
|
assert_eq!(block_cache.dirty, false);
|
||||||
/// Sync all block cache to block device
|
}
|
||||||
pub fn block_cache_sync_all() {
|
|
||||||
let manager = BLOCK_CACHE_MANAGER.lock();
|
#[test]
|
||||||
for (_, cache) in manager.queue.iter() {
|
pub fn test_read_write() {
|
||||||
cache.lock().sync();
|
let block_dev: Arc<dyn BlockDevice> = Arc::new(MockBlockDevice);
|
||||||
|
let mut cache_block: CacheBlock = Default::default();
|
||||||
|
let mut block_cache = BlockCache::new(0, &block_dev, &mut cache_block);
|
||||||
|
let test_value: u32 = 0x11223344;
|
||||||
|
|
||||||
|
{
|
||||||
|
let u32_mut = block_cache.value_mut_at_offset::<u32>(0);
|
||||||
|
*u32_mut = test_value;
|
||||||
|
}
|
||||||
|
|
||||||
|
let u32_ref = block_cache.value_ref_at_offset::<u32>(0);
|
||||||
|
assert_eq!(*u32_ref, test_value);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_sync() {
|
||||||
|
let block_dev: Arc<dyn BlockDevice> = Arc::new(TestBlockDevice::<4>::new());
|
||||||
|
let mut cache_block: CacheBlock = Default::default();
|
||||||
|
let mut block_cache = BlockCache::new(0, &block_dev, &mut cache_block);
|
||||||
|
let mut buf = [0xffu8; BLOCK_SZ];
|
||||||
|
block_dev.read_block(0, &mut buf);
|
||||||
|
assert!(buf.iter().all(|byte| *byte == 0));
|
||||||
|
|
||||||
|
// write and sync manually
|
||||||
|
let test_value: u32 = 0x11223344;
|
||||||
|
{
|
||||||
|
let u32_mut = block_cache.value_mut_at_offset::<u32>(0);
|
||||||
|
*u32_mut = test_value;
|
||||||
|
}
|
||||||
|
block_cache.sync_to_device();
|
||||||
|
block_dev.read_block(0, &mut buf);
|
||||||
|
assert!(&buf[..4] == test_value.to_ne_bytes());
|
||||||
|
assert!(&buf[4..].iter().all(|byte| *byte == 0));
|
||||||
|
|
||||||
|
// write and sync automatically after block_cache is dropped
|
||||||
|
let test_value2: u32 = 0x55667788;
|
||||||
|
{
|
||||||
|
let u32_mut = block_cache.value_mut_at_offset::<u32>(4);
|
||||||
|
*u32_mut = test_value2;
|
||||||
|
}
|
||||||
|
drop(block_cache);
|
||||||
|
block_dev.read_block(0, &mut buf);
|
||||||
|
assert!(&buf[..4] == test_value.to_ne_bytes());
|
||||||
|
assert!(&buf[4..8] == test_value2.to_ne_bytes());
|
||||||
|
assert!(&buf[8..].iter().all(|byte| *byte == 0));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mod block_cache_mgr {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_init() {
|
||||||
|
let block_dev: Arc<dyn BlockDevice> = Arc::new(TestBlockDevice::<4>::new());
|
||||||
|
let mut block_cache_mgr: BlockCacheMgr<16, RawSpinlock> =
|
||||||
|
BlockCacheMgr::new(&block_dev);
|
||||||
|
let block_cache = block_cache_mgr.get_block_cache(0);
|
||||||
|
|
||||||
|
// write a value
|
||||||
|
let test_value: u32 = 0x11223344;
|
||||||
|
let mut locked_cache = block_cache.lock();
|
||||||
|
locked_cache.write(0, |v: &mut u32| {
|
||||||
|
*v = test_value;
|
||||||
|
});
|
||||||
|
// check block_dev, it won't be changed since we've not sync the changes yet
|
||||||
|
let mut buf = [0u8; BLOCK_SZ];
|
||||||
|
block_dev.read_block(0, &mut buf);
|
||||||
|
assert!(buf.iter().all(|byte| *byte == 0));
|
||||||
|
// sync changes and check again
|
||||||
|
locked_cache.sync_to_device();
|
||||||
|
block_dev.read_block(0, &mut buf);
|
||||||
|
assert_eq!(buf[..4], test_value.to_ne_bytes());
|
||||||
|
assert!(buf[4..].iter().all(|byte| *byte == 0));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_swap_and_sync() {
|
||||||
|
let block_dev: Arc<dyn BlockDevice> = Arc::new(TestBlockDevice::<16>::new());
|
||||||
|
let mut block_cache_mgr: BlockCacheMgr<4, RawSpinlock> = BlockCacheMgr::new(&block_dev);
|
||||||
|
let block_cache_list: Vec<_> = (0..4)
|
||||||
|
.map(|block_id: usize| block_cache_mgr.get_block_cache(block_id))
|
||||||
|
.collect();
|
||||||
|
let test_value: u32 = 0x11223344;
|
||||||
|
{
|
||||||
|
// write the block of id 0
|
||||||
|
let block_cache = &block_cache_list[0];
|
||||||
|
let mut locked_cache = block_cache.lock();
|
||||||
|
locked_cache.write(0, |v: &mut u32| {
|
||||||
|
*v = test_value;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
// Access block caches other than block 0.
|
||||||
|
// In this way, the block 0 will be swapped out when a new block comes in.
|
||||||
|
for block_id in 1..4usize {
|
||||||
|
let _ = block_cache_mgr.get_block_cache(block_id);
|
||||||
|
}
|
||||||
|
// all block caches will not be referenced
|
||||||
|
drop(block_cache_list);
|
||||||
|
// access a new block
|
||||||
|
let _ = block_cache_mgr.get_block_cache(5);
|
||||||
|
// block 0 should be swapped out, thus the changes on it should be synced to device
|
||||||
|
let mut buf = [0u8; BLOCK_SZ];
|
||||||
|
block_dev.read_block(0, &mut buf);
|
||||||
|
assert_eq!(&buf[0..4], test_value.to_ne_bytes());
|
||||||
|
assert!(&buf[4..].iter().all(|byte| *byte == 0));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[should_panic]
|
||||||
|
pub fn test_all_caches_referenced() {
|
||||||
|
let block_dev: Arc<dyn BlockDevice> = Arc::new(TestBlockDevice::<16>::new());
|
||||||
|
let mut block_cache_mgr: BlockCacheMgr<4, RawSpinlock> = BlockCacheMgr::new(&block_dev);
|
||||||
|
let _block_cache_list: Vec<_> = (0..4)
|
||||||
|
.map(|block_id| block_cache_mgr.get_block_cache(block_id))
|
||||||
|
.collect();
|
||||||
|
let _ = block_cache_mgr.get_block_cache(5);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
use core::any::Any;
|
use core::any::Any;
|
||||||
/// Trait for block devices
|
/// Trait for block devices
|
||||||
/// which reads and writes data in the unit of blocks
|
/// which reads and writes data in the unit of blocks
|
||||||
pub trait BlockDevice: Send + Sync + Any {
|
pub trait BlockDevice: Send + Sync + Any {
|
||||||
///Read data form block to buffer
|
///Read data form block to buffer
|
||||||
fn read_block(&self, block_id: usize, buf: &mut [u8]);
|
fn read_block(&self, block_id: usize, buf: &mut [u8]);
|
||||||
///Write data from buffer to block
|
///Write data from buffer to block
|
||||||
fn write_block(&self, block_id: usize, buf: &[u8]);
|
fn write_block(&self, block_id: usize, buf: &[u8]);
|
||||||
}
|
}
|
||||||
|
@ -1,121 +1,50 @@
|
|||||||
use super::{
|
use crate::bitmap::Bitmap;
|
||||||
block_cache_sync_all, get_block_cache, Bitmap, BlockDevice, DiskInode, DiskInodeType, Inode,
|
use crate::layout::{DiskInode, DiskInodeType, SuperBlock};
|
||||||
SuperBlock,
|
use crate::BlockCacheMgr;
|
||||||
};
|
|
||||||
use crate::BLOCK_SZ;
|
use crate::BLOCK_SZ;
|
||||||
use alloc::sync::Arc;
|
use alloc::vec::Vec;
|
||||||
use spin::Mutex;
|
|
||||||
///An easy file system on block
|
const SUPER_BLOCK_ID: usize = 0;
|
||||||
pub struct EasyFileSystem {
|
const ROOT_INODE_ID: usize = 0;
|
||||||
///Real device
|
|
||||||
pub block_device: Arc<dyn BlockDevice>,
|
/// The EasyFileSystem aggregates the underlying BlockCacheMgr and
|
||||||
///Inode bitmap
|
/// describes the layout of the file system, including the positions
|
||||||
pub inode_bitmap: Bitmap,
|
/// of different regions.
|
||||||
///Data bitmap
|
pub struct EasyFileSystem<const N: usize, RCacheMgr, RCache>
|
||||||
pub data_bitmap: Bitmap,
|
where
|
||||||
inode_area_start_block: u32,
|
RCacheMgr: lock_api::RawMutex,
|
||||||
data_area_start_block: u32,
|
RCache: lock_api::RawMutex,
|
||||||
|
{
|
||||||
|
/// The EasyFileSystem apply read/write operation on disk through the bcache_mgr.
|
||||||
|
pub bcache_mgr: lock_api::Mutex<RCacheMgr, BlockCacheMgr<N, RCache>>,
|
||||||
|
inode_bitmap: Bitmap,
|
||||||
|
data_bitmap: Bitmap,
|
||||||
|
inode_area_start_block_id: u32,
|
||||||
|
data_area_start_block_id: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
type DataBlock = [u8; BLOCK_SZ];
|
type DataBlock = [u8; BLOCK_SZ];
|
||||||
/// An easy fs over a block device
|
|
||||||
impl EasyFileSystem {
|
impl<const N: usize, RCacheMgr, RCache> EasyFileSystem<N, RCacheMgr, RCache>
|
||||||
/// A data block of block size
|
where
|
||||||
pub fn create(
|
RCacheMgr: lock_api::RawMutex,
|
||||||
block_device: Arc<dyn BlockDevice>,
|
RCache: lock_api::RawMutex,
|
||||||
total_blocks: u32,
|
{
|
||||||
inode_bitmap_blocks: u32,
|
fn new_bare(bcache_mgr: BlockCacheMgr<N, RCache>) -> Self {
|
||||||
) -> Arc<Mutex<Self>> {
|
Self {
|
||||||
// calculate block size of areas & create bitmaps
|
bcache_mgr: lock_api::Mutex::new(bcache_mgr),
|
||||||
let inode_bitmap = Bitmap::new(1, inode_bitmap_blocks as usize);
|
inode_bitmap: Bitmap::default(),
|
||||||
let inode_num = inode_bitmap.maximum();
|
data_bitmap: Bitmap::default(),
|
||||||
let inode_area_blocks =
|
inode_area_start_block_id: 0,
|
||||||
((inode_num * core::mem::size_of::<DiskInode>() + BLOCK_SZ - 1) / BLOCK_SZ) as u32;
|
data_area_start_block_id: 0,
|
||||||
let inode_total_blocks = inode_bitmap_blocks + inode_area_blocks;
|
|
||||||
let data_total_blocks = total_blocks - 1 - inode_total_blocks;
|
|
||||||
let data_bitmap_blocks = (data_total_blocks + 4096) / 4097;
|
|
||||||
let data_area_blocks = data_total_blocks - data_bitmap_blocks;
|
|
||||||
let data_bitmap = Bitmap::new(
|
|
||||||
(1 + inode_bitmap_blocks + inode_area_blocks) as usize,
|
|
||||||
data_bitmap_blocks as usize,
|
|
||||||
);
|
|
||||||
let mut efs = Self {
|
|
||||||
block_device: Arc::clone(&block_device),
|
|
||||||
inode_bitmap,
|
|
||||||
data_bitmap,
|
|
||||||
inode_area_start_block: 1 + inode_bitmap_blocks,
|
|
||||||
data_area_start_block: 1 + inode_total_blocks + data_bitmap_blocks,
|
|
||||||
};
|
|
||||||
// clear all blocks
|
|
||||||
for i in 0..total_blocks {
|
|
||||||
get_block_cache(i as usize, Arc::clone(&block_device))
|
|
||||||
.lock()
|
|
||||||
.modify(0, |data_block: &mut DataBlock| {
|
|
||||||
for byte in data_block.iter_mut() {
|
|
||||||
*byte = 0;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
// initialize SuperBlock
|
|
||||||
get_block_cache(0, Arc::clone(&block_device)).lock().modify(
|
|
||||||
0,
|
|
||||||
|super_block: &mut SuperBlock| {
|
|
||||||
super_block.initialize(
|
|
||||||
total_blocks,
|
|
||||||
inode_bitmap_blocks,
|
|
||||||
inode_area_blocks,
|
|
||||||
data_bitmap_blocks,
|
|
||||||
data_area_blocks,
|
|
||||||
);
|
|
||||||
},
|
|
||||||
);
|
|
||||||
// write back immediately
|
|
||||||
// create a inode for root node "/"
|
|
||||||
assert_eq!(efs.alloc_inode(), 0);
|
|
||||||
let (root_inode_block_id, root_inode_offset) = efs.get_disk_inode_pos(0);
|
|
||||||
get_block_cache(root_inode_block_id as usize, Arc::clone(&block_device))
|
|
||||||
.lock()
|
|
||||||
.modify(root_inode_offset, |disk_inode: &mut DiskInode| {
|
|
||||||
disk_inode.initialize(DiskInodeType::Directory);
|
|
||||||
});
|
|
||||||
block_cache_sync_all();
|
|
||||||
Arc::new(Mutex::new(efs))
|
|
||||||
}
|
|
||||||
/// Open a block device as a filesystem
|
|
||||||
pub fn open(block_device: Arc<dyn BlockDevice>) -> Arc<Mutex<Self>> {
|
|
||||||
// read SuperBlock
|
|
||||||
get_block_cache(0, Arc::clone(&block_device))
|
|
||||||
.lock()
|
|
||||||
.read(0, |super_block: &SuperBlock| {
|
|
||||||
assert!(super_block.is_valid(), "Error loading EFS!");
|
|
||||||
let inode_total_blocks =
|
|
||||||
super_block.inode_bitmap_blocks + super_block.inode_area_blocks;
|
|
||||||
let efs = Self {
|
|
||||||
block_device,
|
|
||||||
inode_bitmap: Bitmap::new(1, super_block.inode_bitmap_blocks as usize),
|
|
||||||
data_bitmap: Bitmap::new(
|
|
||||||
(1 + inode_total_blocks) as usize,
|
|
||||||
super_block.data_bitmap_blocks as usize,
|
|
||||||
),
|
|
||||||
inode_area_start_block: 1 + super_block.inode_bitmap_blocks,
|
|
||||||
data_area_start_block: 1 + inode_total_blocks + super_block.data_bitmap_blocks,
|
|
||||||
};
|
|
||||||
Arc::new(Mutex::new(efs))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
/// Get the root inode of the filesystem
|
|
||||||
pub fn root_inode(efs: &Arc<Mutex<Self>>) -> Inode {
|
|
||||||
let block_device = Arc::clone(&efs.lock().block_device);
|
|
||||||
// acquire efs lock temporarily
|
|
||||||
let (block_id, block_offset) = efs.lock().get_disk_inode_pos(0);
|
|
||||||
// release efs lock
|
|
||||||
Inode::new(block_id, block_offset, Arc::clone(efs), block_device)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get inode by id
|
/// Get inode by id
|
||||||
pub fn get_disk_inode_pos(&self, inode_id: u32) -> (u32, usize) {
|
pub fn get_disk_inode_pos(&self, inode_id: u32) -> (u32, usize) {
|
||||||
let inode_size = core::mem::size_of::<DiskInode>();
|
let inode_size = core::mem::size_of::<DiskInode>();
|
||||||
let inodes_per_block = (BLOCK_SZ / inode_size) as u32;
|
let inodes_per_block = (BLOCK_SZ / inode_size) as u32;
|
||||||
let block_id = self.inode_area_start_block + inode_id / inodes_per_block;
|
let block_id = self.inode_area_start_block_id + inode_id / inodes_per_block;
|
||||||
(
|
(
|
||||||
block_id,
|
block_id,
|
||||||
(inode_id % inodes_per_block) as usize * inode_size,
|
(inode_id % inodes_per_block) as usize * inode_size,
|
||||||
@ -123,29 +52,183 @@ impl EasyFileSystem {
|
|||||||
}
|
}
|
||||||
/// Get data block by id
|
/// Get data block by id
|
||||||
pub fn get_data_block_id(&self, data_block_id: u32) -> u32 {
|
pub fn get_data_block_id(&self, data_block_id: u32) -> u32 {
|
||||||
self.data_area_start_block + data_block_id
|
self.data_area_start_block_id + data_block_id
|
||||||
}
|
}
|
||||||
/// Allocate a new inode
|
|
||||||
pub fn alloc_inode(&mut self) -> u32 {
|
/// Allocate a new inode. This function should be called with BlockCacheMgr locked.
|
||||||
self.inode_bitmap.alloc(&self.block_device).unwrap() as u32
|
fn alloc_inode(&self, bcache_mgr: &mut BlockCacheMgr<N, RCache>) -> u32 {
|
||||||
|
self.inode_bitmap.alloc(bcache_mgr).unwrap() as u32
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Allocate a data block
|
/// Allocate a data block
|
||||||
pub fn alloc_data(&mut self) -> u32 {
|
pub fn alloc_data(&self, bcache_mgr: &mut BlockCacheMgr<N, RCache>) -> u32 {
|
||||||
self.data_bitmap.alloc(&self.block_device).unwrap() as u32 + self.data_area_start_block
|
self.data_bitmap.alloc(bcache_mgr).unwrap() as u32 + self.data_area_start_block_id
|
||||||
}
|
}
|
||||||
/// Deallocate a data block
|
/// Deallocate a data block
|
||||||
pub fn dealloc_data(&mut self, block_id: u32) {
|
pub fn dealloc_data(&self, bcache_mgr: &mut BlockCacheMgr<N, RCache>, block_id: u32) {
|
||||||
get_block_cache(block_id as usize, Arc::clone(&self.block_device))
|
bcache_mgr.write_block(block_id as usize, 0, |data_block: &mut DataBlock| {
|
||||||
.lock()
|
data_block.iter_mut().for_each(|p| {
|
||||||
.modify(0, |data_block: &mut DataBlock| {
|
*p = 0;
|
||||||
data_block.iter_mut().for_each(|p| {
|
})
|
||||||
*p = 0;
|
});
|
||||||
})
|
|
||||||
});
|
|
||||||
self.data_bitmap.dealloc(
|
self.data_bitmap.dealloc(
|
||||||
&self.block_device,
|
bcache_mgr,
|
||||||
(block_id - self.data_area_start_block) as usize,
|
(block_id - self.data_area_start_block_id) as usize,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Operate on the efs and sync the changes to block device.
|
||||||
|
pub fn sync_transaction<T>(&mut self, op: impl FnOnce(&Self) -> T) -> T {
|
||||||
|
let ret = op(self);
|
||||||
|
self.bcache_mgr.lock().sync_all();
|
||||||
|
ret
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a new file system on a bcache_mgr.
|
||||||
|
pub fn create(
|
||||||
|
total_blocks: u32,
|
||||||
|
inode_bitmap_blocks: u32,
|
||||||
|
bcache_mgr: BlockCacheMgr<N, RCache>,
|
||||||
|
) -> Self {
|
||||||
|
let mut efs = Self::new_bare(bcache_mgr);
|
||||||
|
// calculate block size of areas & create bitmaps
|
||||||
|
// layout: SuperBlock | InodeBitmap | InodeArea | DataBitmap | DataArea
|
||||||
|
efs.inode_bitmap = Bitmap::new(1, inode_bitmap_blocks as usize);
|
||||||
|
let inode_num = efs.inode_bitmap.maximum();
|
||||||
|
let inode_area_blocks =
|
||||||
|
((inode_num * core::mem::size_of::<DiskInode>() + BLOCK_SZ - 1) / BLOCK_SZ) as u32;
|
||||||
|
let inode_total_blocks = inode_bitmap_blocks + inode_area_blocks;
|
||||||
|
let data_total_blocks = total_blocks - 1 - inode_total_blocks;
|
||||||
|
let data_bitmap_blocks = (data_total_blocks + 4096) / 4097;
|
||||||
|
let data_area_blocks = data_total_blocks - data_bitmap_blocks;
|
||||||
|
efs.data_bitmap = Bitmap::new(
|
||||||
|
(1 + inode_bitmap_blocks + inode_area_blocks) as usize,
|
||||||
|
data_bitmap_blocks as usize,
|
||||||
|
);
|
||||||
|
efs.inode_area_start_block_id = inode_bitmap_blocks + 1;
|
||||||
|
efs.data_area_start_block_id = inode_total_blocks + data_bitmap_blocks + 1;
|
||||||
|
|
||||||
|
efs.sync_transaction(|efs_self| {
|
||||||
|
// clear all blocks
|
||||||
|
let mut bcache_mgr = efs_self.bcache_mgr.lock();
|
||||||
|
for i in 0..total_blocks {
|
||||||
|
bcache_mgr.write_block(i as usize, 0, |data_block: &mut DataBlock| {
|
||||||
|
data_block.iter_mut().for_each(|byte| *byte = 0);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
// initialize SuperBlock
|
||||||
|
bcache_mgr.write_block(SUPER_BLOCK_ID, 0, |super_block: &mut SuperBlock| {
|
||||||
|
super_block.initialize(
|
||||||
|
total_blocks,
|
||||||
|
inode_bitmap_blocks,
|
||||||
|
inode_area_blocks,
|
||||||
|
data_bitmap_blocks,
|
||||||
|
data_area_blocks,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
assert_eq!(
|
||||||
|
efs_self.inode_bitmap.alloc(&mut bcache_mgr),
|
||||||
|
Some(ROOT_INODE_ID)
|
||||||
|
);
|
||||||
|
let (root_inode_block_id, root_inode_offset) =
|
||||||
|
efs_self.get_disk_inode_pos(ROOT_INODE_ID as u32);
|
||||||
|
bcache_mgr.write_block(
|
||||||
|
root_inode_block_id as usize,
|
||||||
|
root_inode_offset,
|
||||||
|
|disk_inode: &mut DiskInode| {
|
||||||
|
disk_inode.initialize(DiskInodeType::Directory);
|
||||||
|
},
|
||||||
|
);
|
||||||
|
});
|
||||||
|
efs
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Open a block device as a filesystem
|
||||||
|
pub fn open(mut bcache_mgr: BlockCacheMgr<N, RCache>) -> Self {
|
||||||
|
let mut super_block = SuperBlock::default();
|
||||||
|
bcache_mgr.read_block(SUPER_BLOCK_ID, 0, |super_block_inner: &SuperBlock| {
|
||||||
|
super_block = *super_block_inner;
|
||||||
|
});
|
||||||
|
assert!(super_block.is_valid(), "Error loading EFS!");
|
||||||
|
let inode_total_blocks = super_block.inode_bitmap_blocks + super_block.inode_area_blocks;
|
||||||
|
Self {
|
||||||
|
bcache_mgr: lock_api::Mutex::new(bcache_mgr),
|
||||||
|
inode_bitmap: Bitmap::new(1, super_block.inode_bitmap_blocks as usize),
|
||||||
|
data_bitmap: Bitmap::new(
|
||||||
|
(1 + inode_total_blocks) as usize,
|
||||||
|
super_block.data_bitmap_blocks as usize,
|
||||||
|
),
|
||||||
|
inode_area_start_block_id: 1 + super_block.inode_bitmap_blocks,
|
||||||
|
data_area_start_block_id: 1 + inode_total_blocks + super_block.data_bitmap_blocks,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a new inode of the given inode type, return the inode id.
|
||||||
|
///
|
||||||
|
/// This function should be called with bcache_mgr locked.
|
||||||
|
pub fn new_inode_nolock(
|
||||||
|
&self,
|
||||||
|
bcache_mgr: &mut BlockCacheMgr<N, RCache>,
|
||||||
|
inode_type: DiskInodeType,
|
||||||
|
) -> u32 {
|
||||||
|
let new_inode_id = self.alloc_inode(bcache_mgr);
|
||||||
|
let (new_inode_block_id, new_inode_block_offset) = self.get_disk_inode_pos(new_inode_id);
|
||||||
|
bcache_mgr.write_block(
|
||||||
|
new_inode_block_id as usize,
|
||||||
|
new_inode_block_offset,
|
||||||
|
|new_inode: &mut DiskInode| {
|
||||||
|
new_inode.initialize(inode_type);
|
||||||
|
},
|
||||||
|
);
|
||||||
|
new_inode_id
|
||||||
|
}
|
||||||
|
/// Increase the size of a disk inode.
|
||||||
|
///
|
||||||
|
/// This function should be called with bcache_mgr locked.
|
||||||
|
pub fn increase_size_nolock(
|
||||||
|
&self,
|
||||||
|
new_size: u32,
|
||||||
|
disk_inode: &mut DiskInode,
|
||||||
|
bcache_mgr: &mut BlockCacheMgr<N, RCache>,
|
||||||
|
) {
|
||||||
|
if new_size < disk_inode.size {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let blocks_needed = disk_inode.blocks_num_needed(new_size);
|
||||||
|
let mut v: Vec<u32> = Vec::new();
|
||||||
|
for _ in 0..blocks_needed {
|
||||||
|
v.push(self.alloc_data(bcache_mgr));
|
||||||
|
}
|
||||||
|
disk_inode.increase_size(new_size, v, bcache_mgr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use crate::test_helper::*;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
const EFS_BLK_NUM: usize = 2048;
|
||||||
|
const BCACHE_NUM: usize = 256;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_efs_create_open() {
|
||||||
|
let block_dev: Arc<dyn BlockDevice> = Arc::new(TestBlockDevice::<EFS_BLK_NUM>::new());
|
||||||
|
{
|
||||||
|
let bcache_mgr: BlockCacheMgr<BCACHE_NUM, RawSpinlock> = BlockCacheMgr::new(&block_dev);
|
||||||
|
let _efs: EasyFileSystem<BCACHE_NUM, RawSpinlock, _> =
|
||||||
|
EasyFileSystem::create(EFS_BLK_NUM as u32, 1, bcache_mgr);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let bcache_mgr: BlockCacheMgr<BCACHE_NUM, RawSpinlock> = BlockCacheMgr::new(&block_dev);
|
||||||
|
let efs: EasyFileSystem<BCACHE_NUM, RawSpinlock, _> = EasyFileSystem::open(bcache_mgr);
|
||||||
|
// SuperBlock 1
|
||||||
|
// InodeBitmap 1
|
||||||
|
// InodeArea 1024
|
||||||
|
// last=1022, DataBitmap 1
|
||||||
|
assert_eq!(efs.inode_area_start_block_id, 2);
|
||||||
|
assert_eq!(efs.data_area_start_block_id, 1027);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
use super::{get_block_cache, BlockDevice, BLOCK_SZ};
|
use super::{BlockCacheMgr, BLOCK_SZ};
|
||||||
use alloc::sync::Arc;
|
|
||||||
use alloc::vec::Vec;
|
use alloc::vec::Vec;
|
||||||
use core::fmt::{Debug, Formatter, Result};
|
use core::fmt::{Debug, Formatter, Result};
|
||||||
|
|
||||||
@ -22,6 +21,7 @@ const INDIRECT1_BOUND: usize = DIRECT_BOUND + INODE_INDIRECT1_COUNT;
|
|||||||
const INDIRECT2_BOUND: usize = INDIRECT1_BOUND + INODE_INDIRECT2_COUNT;
|
const INDIRECT2_BOUND: usize = INDIRECT1_BOUND + INODE_INDIRECT2_COUNT;
|
||||||
/// Super block of a filesystem
|
/// Super block of a filesystem
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
|
#[derive(Default, Copy, Clone)]
|
||||||
pub struct SuperBlock {
|
pub struct SuperBlock {
|
||||||
magic: u32,
|
magic: u32,
|
||||||
pub total_blocks: u32,
|
pub total_blocks: u32,
|
||||||
@ -67,8 +67,10 @@ impl SuperBlock {
|
|||||||
self.magic == EFS_MAGIC
|
self.magic == EFS_MAGIC
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Type of a disk inode
|
/// Type of a disk inode
|
||||||
#[derive(PartialEq)]
|
#[derive(PartialEq)]
|
||||||
|
#[repr(u32)]
|
||||||
pub enum DiskInodeType {
|
pub enum DiskInodeType {
|
||||||
File,
|
File,
|
||||||
Directory,
|
Directory,
|
||||||
@ -109,14 +111,17 @@ impl DiskInode {
|
|||||||
}
|
}
|
||||||
/// Return block number correspond to size.
|
/// Return block number correspond to size.
|
||||||
pub fn data_blocks(&self) -> u32 {
|
pub fn data_blocks(&self) -> u32 {
|
||||||
Self::_data_blocks(self.size)
|
Self::data_blocks_inner(self.size)
|
||||||
}
|
}
|
||||||
fn _data_blocks(size: u32) -> u32 {
|
fn data_blocks_inner(size: u32) -> u32 {
|
||||||
(size + BLOCK_SZ as u32 - 1) / BLOCK_SZ as u32
|
(size + BLOCK_SZ as u32 - 1) / BLOCK_SZ as u32
|
||||||
}
|
}
|
||||||
/// Return number of blocks needed include indirect1/2.
|
/// Return number of blocks needed include indirect1/2.
|
||||||
pub fn total_blocks(size: u32) -> u32 {
|
pub fn total_blocks(&self) -> u32 {
|
||||||
let data_blocks = Self::_data_blocks(size) as usize;
|
Self::total_blocks_inner(self.size)
|
||||||
|
}
|
||||||
|
fn total_blocks_inner(size: u32) -> u32 {
|
||||||
|
let data_blocks = Self::data_blocks_inner(size) as usize;
|
||||||
let mut total = data_blocks as usize;
|
let mut total = data_blocks as usize;
|
||||||
// indirect1
|
// indirect1
|
||||||
if data_blocks > INODE_DIRECT_COUNT {
|
if data_blocks > INODE_DIRECT_COUNT {
|
||||||
@ -134,40 +139,46 @@ impl DiskInode {
|
|||||||
/// Get the number of data blocks that have to be allocated given the new size of data
|
/// Get the number of data blocks that have to be allocated given the new size of data
|
||||||
pub fn blocks_num_needed(&self, new_size: u32) -> u32 {
|
pub fn blocks_num_needed(&self, new_size: u32) -> u32 {
|
||||||
assert!(new_size >= self.size);
|
assert!(new_size >= self.size);
|
||||||
Self::total_blocks(new_size) - Self::total_blocks(self.size)
|
Self::total_blocks_inner(new_size) - Self::total_blocks_inner(self.size)
|
||||||
}
|
}
|
||||||
/// Get id of block given inner id
|
/// Get id of block given inner id
|
||||||
pub fn get_block_id(&self, inner_id: u32, block_device: &Arc<dyn BlockDevice>) -> u32 {
|
pub fn get_block_id<const N: usize, R>(
|
||||||
|
&self,
|
||||||
|
inner_id: u32,
|
||||||
|
bcache_mgr: &mut BlockCacheMgr<N, R>,
|
||||||
|
) -> u32
|
||||||
|
where
|
||||||
|
R: lock_api::RawMutex,
|
||||||
|
{
|
||||||
let inner_id = inner_id as usize;
|
let inner_id = inner_id as usize;
|
||||||
if inner_id < INODE_DIRECT_COUNT {
|
if inner_id < INODE_DIRECT_COUNT {
|
||||||
self.direct[inner_id]
|
self.direct[inner_id]
|
||||||
} else if inner_id < INDIRECT1_BOUND {
|
} else if inner_id < INDIRECT1_BOUND {
|
||||||
get_block_cache(self.indirect1 as usize, Arc::clone(block_device))
|
bcache_mgr.read_block(
|
||||||
.lock()
|
self.indirect1 as usize,
|
||||||
.read(0, |indirect_block: &IndirectBlock| {
|
0,
|
||||||
indirect_block[inner_id - INODE_DIRECT_COUNT]
|
|indirect_block: &IndirectBlock| indirect_block[inner_id - INODE_DIRECT_COUNT],
|
||||||
})
|
)
|
||||||
} else {
|
} else {
|
||||||
let last = inner_id - INDIRECT1_BOUND;
|
let last = inner_id - INDIRECT1_BOUND;
|
||||||
let indirect1 = get_block_cache(self.indirect2 as usize, Arc::clone(block_device))
|
let indirect1 =
|
||||||
.lock()
|
bcache_mgr.read_block(self.indirect2 as usize, 0, |indirect2: &IndirectBlock| {
|
||||||
.read(0, |indirect2: &IndirectBlock| {
|
|
||||||
indirect2[last / INODE_INDIRECT1_COUNT]
|
indirect2[last / INODE_INDIRECT1_COUNT]
|
||||||
});
|
});
|
||||||
get_block_cache(indirect1 as usize, Arc::clone(block_device))
|
bcache_mgr.read_block(indirect1 as usize, 0, |indirect1: &IndirectBlock| {
|
||||||
.lock()
|
indirect1[last % INODE_INDIRECT1_COUNT]
|
||||||
.read(0, |indirect1: &IndirectBlock| {
|
})
|
||||||
indirect1[last % INODE_INDIRECT1_COUNT]
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/// Inncrease the size of current disk inode
|
/// Inncrease the size of current disk inode
|
||||||
pub fn increase_size(
|
pub fn increase_size<const N: usize, R>(
|
||||||
&mut self,
|
&mut self,
|
||||||
new_size: u32,
|
new_size: u32,
|
||||||
new_blocks: Vec<u32>,
|
new_blocks: Vec<u32>,
|
||||||
block_device: &Arc<dyn BlockDevice>,
|
bcache_mgr: &mut BlockCacheMgr<N, R>,
|
||||||
) {
|
) where
|
||||||
|
R: lock_api::RawMutex,
|
||||||
|
{
|
||||||
let mut current_blocks = self.data_blocks();
|
let mut current_blocks = self.data_blocks();
|
||||||
self.size = new_size;
|
self.size = new_size;
|
||||||
let mut total_blocks = self.data_blocks();
|
let mut total_blocks = self.data_blocks();
|
||||||
@ -188,14 +199,16 @@ impl DiskInode {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// fill indirect1
|
// fill indirect1
|
||||||
get_block_cache(self.indirect1 as usize, Arc::clone(block_device))
|
bcache_mgr.write_block(
|
||||||
.lock()
|
self.indirect1 as usize,
|
||||||
.modify(0, |indirect1: &mut IndirectBlock| {
|
0,
|
||||||
|
|indirect1: &mut IndirectBlock| {
|
||||||
while current_blocks < total_blocks.min(INODE_INDIRECT1_COUNT as u32) {
|
while current_blocks < total_blocks.min(INODE_INDIRECT1_COUNT as u32) {
|
||||||
indirect1[current_blocks as usize] = new_blocks.next().unwrap();
|
indirect1[current_blocks as usize] = new_blocks.next().unwrap();
|
||||||
current_blocks += 1;
|
current_blocks += 1;
|
||||||
}
|
}
|
||||||
});
|
},
|
||||||
|
);
|
||||||
// alloc indirect2
|
// alloc indirect2
|
||||||
if total_blocks > INODE_INDIRECT1_COUNT as u32 {
|
if total_blocks > INODE_INDIRECT1_COUNT as u32 {
|
||||||
if current_blocks == INODE_INDIRECT1_COUNT as u32 {
|
if current_blocks == INODE_INDIRECT1_COUNT as u32 {
|
||||||
@ -212,19 +225,22 @@ impl DiskInode {
|
|||||||
let a1 = total_blocks as usize / INODE_INDIRECT1_COUNT;
|
let a1 = total_blocks as usize / INODE_INDIRECT1_COUNT;
|
||||||
let b1 = total_blocks as usize % INODE_INDIRECT1_COUNT;
|
let b1 = total_blocks as usize % INODE_INDIRECT1_COUNT;
|
||||||
// alloc low-level indirect1
|
// alloc low-level indirect1
|
||||||
get_block_cache(self.indirect2 as usize, Arc::clone(block_device))
|
let indirect2_blk = bcache_mgr.get_block_cache(self.indirect2 as usize);
|
||||||
|
indirect2_blk
|
||||||
.lock()
|
.lock()
|
||||||
.modify(0, |indirect2: &mut IndirectBlock| {
|
.write(0, |indirect2: &mut IndirectBlock| {
|
||||||
while (a0 < a1) || (a0 == a1 && b0 < b1) {
|
while (a0 < a1) || (a0 == a1 && b0 < b1) {
|
||||||
if b0 == 0 {
|
if b0 == 0 {
|
||||||
indirect2[a0] = new_blocks.next().unwrap();
|
indirect2[a0] = new_blocks.next().unwrap();
|
||||||
}
|
}
|
||||||
// fill current
|
// fill current
|
||||||
get_block_cache(indirect2[a0] as usize, Arc::clone(block_device))
|
bcache_mgr.write_block(
|
||||||
.lock()
|
indirect2[a0] as usize,
|
||||||
.modify(0, |indirect1: &mut IndirectBlock| {
|
0,
|
||||||
|
|indirect1: &mut IndirectBlock| {
|
||||||
indirect1[b0] = new_blocks.next().unwrap();
|
indirect1[b0] = new_blocks.next().unwrap();
|
||||||
});
|
},
|
||||||
|
);
|
||||||
// move to next
|
// move to next
|
||||||
b0 += 1;
|
b0 += 1;
|
||||||
if b0 == INODE_INDIRECT1_COUNT {
|
if b0 == INODE_INDIRECT1_COUNT {
|
||||||
@ -237,7 +253,13 @@ impl DiskInode {
|
|||||||
|
|
||||||
/// Clear size to zero and return blocks that should be deallocated.
|
/// Clear size to zero and return blocks that should be deallocated.
|
||||||
/// We will clear the block contents to zero later.
|
/// We will clear the block contents to zero later.
|
||||||
pub fn clear_size(&mut self, block_device: &Arc<dyn BlockDevice>) -> Vec<u32> {
|
pub fn clear_size<const N: usize, R>(
|
||||||
|
&mut self,
|
||||||
|
bcache_mgr: &mut BlockCacheMgr<N, R>,
|
||||||
|
) -> Vec<u32>
|
||||||
|
where
|
||||||
|
R: lock_api::RawMutex,
|
||||||
|
{
|
||||||
let mut v: Vec<u32> = Vec::new();
|
let mut v: Vec<u32> = Vec::new();
|
||||||
let mut data_blocks = self.data_blocks() as usize;
|
let mut data_blocks = self.data_blocks() as usize;
|
||||||
self.size = 0;
|
self.size = 0;
|
||||||
@ -257,15 +279,17 @@ impl DiskInode {
|
|||||||
return v;
|
return v;
|
||||||
}
|
}
|
||||||
// indirect1
|
// indirect1
|
||||||
get_block_cache(self.indirect1 as usize, Arc::clone(block_device))
|
bcache_mgr.write_block(
|
||||||
.lock()
|
self.indirect1 as usize,
|
||||||
.modify(0, |indirect1: &mut IndirectBlock| {
|
0,
|
||||||
|
|indirect1: &mut IndirectBlock| {
|
||||||
while current_blocks < data_blocks.min(INODE_INDIRECT1_COUNT) {
|
while current_blocks < data_blocks.min(INODE_INDIRECT1_COUNT) {
|
||||||
v.push(indirect1[current_blocks]);
|
v.push(indirect1[current_blocks]);
|
||||||
//indirect1[current_blocks] = 0;
|
indirect1[current_blocks] = 0;
|
||||||
current_blocks += 1;
|
current_blocks += 1;
|
||||||
}
|
}
|
||||||
});
|
},
|
||||||
|
);
|
||||||
self.indirect1 = 0;
|
self.indirect1 = 0;
|
||||||
// indirect2 block
|
// indirect2 block
|
||||||
if data_blocks > INODE_INDIRECT1_COUNT {
|
if data_blocks > INODE_INDIRECT1_COUNT {
|
||||||
@ -278,30 +302,32 @@ impl DiskInode {
|
|||||||
assert!(data_blocks <= INODE_INDIRECT2_COUNT);
|
assert!(data_blocks <= INODE_INDIRECT2_COUNT);
|
||||||
let a1 = data_blocks / INODE_INDIRECT1_COUNT;
|
let a1 = data_blocks / INODE_INDIRECT1_COUNT;
|
||||||
let b1 = data_blocks % INODE_INDIRECT1_COUNT;
|
let b1 = data_blocks % INODE_INDIRECT1_COUNT;
|
||||||
get_block_cache(self.indirect2 as usize, Arc::clone(block_device))
|
let indirect2_blk = bcache_mgr.get_block_cache(self.indirect2 as usize);
|
||||||
|
|
||||||
|
indirect2_blk
|
||||||
.lock()
|
.lock()
|
||||||
.modify(0, |indirect2: &mut IndirectBlock| {
|
.write(0, |indirect2: &mut IndirectBlock| {
|
||||||
// full indirect1 blocks
|
// full indirect1 blocks
|
||||||
for entry in indirect2.iter_mut().take(a1) {
|
for entry in indirect2.iter_mut().take(a1) {
|
||||||
v.push(*entry);
|
v.push(*entry);
|
||||||
get_block_cache(*entry as usize, Arc::clone(block_device))
|
bcache_mgr.write_block(*entry as usize, 0, |indirect1: &mut IndirectBlock| {
|
||||||
.lock()
|
for entry in indirect1.iter() {
|
||||||
.modify(0, |indirect1: &mut IndirectBlock| {
|
v.push(*entry);
|
||||||
for entry in indirect1.iter() {
|
}
|
||||||
v.push(*entry);
|
});
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
// last indirect1 block
|
// last indirect1 block
|
||||||
if b1 > 0 {
|
if b1 > 0 {
|
||||||
v.push(indirect2[a1]);
|
v.push(indirect2[a1]);
|
||||||
get_block_cache(indirect2[a1] as usize, Arc::clone(block_device))
|
bcache_mgr.write_block(
|
||||||
.lock()
|
indirect2[a1] as usize,
|
||||||
.modify(0, |indirect1: &mut IndirectBlock| {
|
0,
|
||||||
|
|indirect1: &mut IndirectBlock| {
|
||||||
for entry in indirect1.iter().take(b1) {
|
for entry in indirect1.iter().take(b1) {
|
||||||
v.push(*entry);
|
v.push(*entry);
|
||||||
}
|
}
|
||||||
});
|
},
|
||||||
|
);
|
||||||
//indirect2[a1] = 0;
|
//indirect2[a1] = 0;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@ -309,12 +335,15 @@ impl DiskInode {
|
|||||||
v
|
v
|
||||||
}
|
}
|
||||||
/// Read data from current disk inode
|
/// Read data from current disk inode
|
||||||
pub fn read_at(
|
pub fn read_at<const N: usize, R>(
|
||||||
&self,
|
&self,
|
||||||
offset: usize,
|
offset: usize,
|
||||||
buf: &mut [u8],
|
buf: &mut [u8],
|
||||||
block_device: &Arc<dyn BlockDevice>,
|
bcache_mgr: &mut BlockCacheMgr<N, R>,
|
||||||
) -> usize {
|
) -> usize
|
||||||
|
where
|
||||||
|
R: lock_api::RawMutex,
|
||||||
|
{
|
||||||
let mut start = offset;
|
let mut start = offset;
|
||||||
let end = (offset + buf.len()).min(self.size as usize);
|
let end = (offset + buf.len()).min(self.size as usize);
|
||||||
if start >= end {
|
if start >= end {
|
||||||
@ -329,12 +358,8 @@ impl DiskInode {
|
|||||||
// read and update read size
|
// read and update read size
|
||||||
let block_read_size = end_current_block - start;
|
let block_read_size = end_current_block - start;
|
||||||
let dst = &mut buf[read_size..read_size + block_read_size];
|
let dst = &mut buf[read_size..read_size + block_read_size];
|
||||||
get_block_cache(
|
let block_id = self.get_block_id(start_block as u32, bcache_mgr);
|
||||||
self.get_block_id(start_block as u32, block_device) as usize,
|
bcache_mgr.read_block(block_id as usize, 0, |data_block: &DataBlock| {
|
||||||
Arc::clone(block_device),
|
|
||||||
)
|
|
||||||
.lock()
|
|
||||||
.read(0, |data_block: &DataBlock| {
|
|
||||||
let src = &data_block[start % BLOCK_SZ..start % BLOCK_SZ + block_read_size];
|
let src = &data_block[start % BLOCK_SZ..start % BLOCK_SZ + block_read_size];
|
||||||
dst.copy_from_slice(src);
|
dst.copy_from_slice(src);
|
||||||
});
|
});
|
||||||
@ -350,12 +375,15 @@ impl DiskInode {
|
|||||||
}
|
}
|
||||||
/// Write data into current disk inode
|
/// Write data into current disk inode
|
||||||
/// size must be adjusted properly beforehand
|
/// size must be adjusted properly beforehand
|
||||||
pub fn write_at(
|
pub fn write_at<const N: usize, R>(
|
||||||
&mut self,
|
&mut self,
|
||||||
offset: usize,
|
offset: usize,
|
||||||
buf: &[u8],
|
buf: &[u8],
|
||||||
block_device: &Arc<dyn BlockDevice>,
|
bcache_mgr: &mut BlockCacheMgr<N, R>,
|
||||||
) -> usize {
|
) -> usize
|
||||||
|
where
|
||||||
|
R: lock_api::RawMutex,
|
||||||
|
{
|
||||||
let mut start = offset;
|
let mut start = offset;
|
||||||
let end = (offset + buf.len()).min(self.size as usize);
|
let end = (offset + buf.len()).min(self.size as usize);
|
||||||
assert!(start <= end);
|
assert!(start <= end);
|
||||||
@ -367,12 +395,8 @@ impl DiskInode {
|
|||||||
end_current_block = end_current_block.min(end);
|
end_current_block = end_current_block.min(end);
|
||||||
// write and update write size
|
// write and update write size
|
||||||
let block_write_size = end_current_block - start;
|
let block_write_size = end_current_block - start;
|
||||||
get_block_cache(
|
let block_id = self.get_block_id(start_block as u32, bcache_mgr) as usize;
|
||||||
self.get_block_id(start_block as u32, block_device) as usize,
|
bcache_mgr.write_block(block_id, 0, |data_block: &mut DataBlock| {
|
||||||
Arc::clone(block_device),
|
|
||||||
)
|
|
||||||
.lock()
|
|
||||||
.modify(0, |data_block: &mut DataBlock| {
|
|
||||||
let src = &buf[write_size..write_size + block_write_size];
|
let src = &buf[write_size..write_size + block_write_size];
|
||||||
let dst = &mut data_block[start % BLOCK_SZ..start % BLOCK_SZ + block_write_size];
|
let dst = &mut data_block[start % BLOCK_SZ..start % BLOCK_SZ + block_write_size];
|
||||||
dst.copy_from_slice(src);
|
dst.copy_from_slice(src);
|
||||||
@ -387,8 +411,33 @@ impl DiskInode {
|
|||||||
}
|
}
|
||||||
write_size
|
write_size
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Find inode under a disk inode by name
|
||||||
|
pub fn inode_id_by_name<const N: usize, R>(
|
||||||
|
&self,
|
||||||
|
name: &str,
|
||||||
|
bcache_mgr: &mut BlockCacheMgr<N, R>,
|
||||||
|
) -> Option<u32>
|
||||||
|
where
|
||||||
|
R: lock_api::RawMutex,
|
||||||
|
{
|
||||||
|
// assert it is a directory
|
||||||
|
assert!(self.is_dir());
|
||||||
|
let file_count = (self.size as usize) / DIRENT_SZ;
|
||||||
|
let mut dirent = DirEntry::empty();
|
||||||
|
for i in 0..file_count {
|
||||||
|
assert_eq!(
|
||||||
|
self.read_at(DIRENT_SZ * i, dirent.as_bytes_mut(), bcache_mgr),
|
||||||
|
DIRENT_SZ,
|
||||||
|
);
|
||||||
|
if dirent.name() == name {
|
||||||
|
return Some(dirent.inode_number() as u32);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
}
|
}
|
||||||
/// A directory entry
|
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
pub struct DirEntry {
|
pub struct DirEntry {
|
||||||
name: [u8; NAME_LENGTH_LIMIT + 1],
|
name: [u8; NAME_LENGTH_LIMIT + 1],
|
||||||
@ -405,10 +454,11 @@ impl DirEntry {
|
|||||||
inode_number: 0,
|
inode_number: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/// Crate a directory entry from name and inode number
|
/// Create a directory entry from name and inode number
|
||||||
pub fn new(name: &str, inode_number: u32) -> Self {
|
pub fn new(name: &str, inode_number: u32) -> Self {
|
||||||
let mut bytes = [0u8; NAME_LENGTH_LIMIT + 1];
|
let mut bytes = [0u8; NAME_LENGTH_LIMIT + 1];
|
||||||
bytes[..name.len()].copy_from_slice(name.as_bytes());
|
let len = core::cmp::min(name.len(), NAME_LENGTH_LIMIT);
|
||||||
|
bytes[..len].copy_from_slice(&name.as_bytes()[..len]);
|
||||||
Self {
|
Self {
|
||||||
name: bytes,
|
name: bytes,
|
||||||
inode_number,
|
inode_number,
|
||||||
@ -416,11 +466,13 @@ impl DirEntry {
|
|||||||
}
|
}
|
||||||
/// Serialize into bytes
|
/// Serialize into bytes
|
||||||
pub fn as_bytes(&self) -> &[u8] {
|
pub fn as_bytes(&self) -> &[u8] {
|
||||||
unsafe { core::slice::from_raw_parts(self as *const _ as usize as *const u8, DIRENT_SZ) }
|
// Safety: We can guarantee that size_of::<Self>() == DIRENT_SZ
|
||||||
|
unsafe { core::mem::transmute::<&Self, &[u8; DIRENT_SZ]>(self) }
|
||||||
}
|
}
|
||||||
/// Serialize into mutable bytes
|
/// Serialize into mutable bytes
|
||||||
pub fn as_bytes_mut(&mut self) -> &mut [u8] {
|
pub fn as_bytes_mut(&mut self) -> &mut [u8] {
|
||||||
unsafe { core::slice::from_raw_parts_mut(self as *mut _ as usize as *mut u8, DIRENT_SZ) }
|
// Safety: The same of Self::as_bytes
|
||||||
|
unsafe { core::mem::transmute::<&mut Self, &mut [u8; DIRENT_SZ]>(self) }
|
||||||
}
|
}
|
||||||
/// Get name of the entry
|
/// Get name of the entry
|
||||||
pub fn name(&self) -> &str {
|
pub fn name(&self) -> &str {
|
||||||
@ -432,3 +484,107 @@ impl DirEntry {
|
|||||||
self.inode_number
|
self.inode_number
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use crate::test_helper::*;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_static_size() {
|
||||||
|
use core::mem::size_of;
|
||||||
|
assert!(size_of::<SuperBlock>() <= BLOCK_SZ);
|
||||||
|
assert_eq!(size_of::<DiskInode>(), BLOCK_SZ / 4);
|
||||||
|
assert_eq!(size_of::<DirEntry>(), DIRENT_SZ);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub mod disk_inode {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_init() {
|
||||||
|
let block_dev: Arc<dyn BlockDevice> = Arc::new(TestBlockDevice::<4096>::new());
|
||||||
|
let mut bcache_mgr: BlockCacheMgr<256, RawSpinlock> = BlockCacheMgr::new(&block_dev);
|
||||||
|
let mut blk_allocator = StackAllocator::<usize>::new();
|
||||||
|
let inode_blk_id = blk_allocator.alloc(1)[0];
|
||||||
|
|
||||||
|
bcache_mgr.write_block(inode_blk_id, 0, |disk_inode: &mut DiskInode| {
|
||||||
|
disk_inode.initialize(DiskInodeType::File);
|
||||||
|
assert_eq!(disk_inode.data_blocks(), 0);
|
||||||
|
assert_eq!(disk_inode.total_blocks(), 0);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_increase_clear_size() {
|
||||||
|
let block_dev: Arc<dyn BlockDevice> = Arc::new(TestBlockDevice::<4096>::new());
|
||||||
|
let mut bcache_mgr: BlockCacheMgr<16, RawSpinlock> = BlockCacheMgr::new(&block_dev);
|
||||||
|
let mut blk_allocator = StackAllocator::<u32>::new();
|
||||||
|
let inode_blk_id = blk_allocator.alloc(1)[0] as usize;
|
||||||
|
let mut allocated = 0usize;
|
||||||
|
|
||||||
|
let inode_blk = bcache_mgr.get_block_cache(inode_blk_id);
|
||||||
|
let mut inode_blk_guard = inode_blk.lock();
|
||||||
|
let disk_inode = inode_blk_guard.value_mut_at_offset::<DiskInode>(0);
|
||||||
|
|
||||||
|
disk_inode.initialize(DiskInodeType::File);
|
||||||
|
let new_size = 20 * BLOCK_SZ;
|
||||||
|
let needed_blk_num = disk_inode.blocks_num_needed(new_size as u32);
|
||||||
|
allocated += needed_blk_num as usize;
|
||||||
|
assert_eq!(needed_blk_num, 20);
|
||||||
|
let new_blks = blk_allocator.alloc(needed_blk_num as usize);
|
||||||
|
disk_inode.increase_size(new_size as u32, new_blks, &mut bcache_mgr);
|
||||||
|
assert_eq!(disk_inode.size, new_size as u32);
|
||||||
|
|
||||||
|
let new_size = INDIRECT1_BOUND * BLOCK_SZ;
|
||||||
|
let needed_blk_num = disk_inode.blocks_num_needed(new_size as u32);
|
||||||
|
allocated += needed_blk_num as usize;
|
||||||
|
assert_eq!(needed_blk_num, INDIRECT1_BOUND as u32 - 20 + 1);
|
||||||
|
let new_blks = blk_allocator.alloc(needed_blk_num as usize);
|
||||||
|
disk_inode.increase_size(new_size as u32, new_blks, &mut bcache_mgr);
|
||||||
|
assert_eq!(disk_inode.size, new_size as u32);
|
||||||
|
|
||||||
|
let blks = disk_inode.clear_size(&mut bcache_mgr);
|
||||||
|
assert_eq!(disk_inode.size, 0);
|
||||||
|
assert_eq!(blks.len(), allocated);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_read_write() {
|
||||||
|
let block_dev: Arc<dyn BlockDevice> = Arc::new(TestBlockDevice::<4096>::new());
|
||||||
|
let mut bcache_mgr: BlockCacheMgr<16, RawSpinlock> = BlockCacheMgr::new(&block_dev);
|
||||||
|
let mut blk_allocator = StackAllocator::<u32>::new();
|
||||||
|
let inode_blk_id = blk_allocator.alloc(1)[0] as usize;
|
||||||
|
|
||||||
|
let inode_blk = bcache_mgr.get_block_cache(inode_blk_id);
|
||||||
|
let mut inode_blk_guard = inode_blk.lock();
|
||||||
|
let disk_inode = inode_blk_guard.value_mut_at_offset::<DiskInode>(0);
|
||||||
|
let new_size = 200 * BLOCK_SZ as u32;
|
||||||
|
let needed_blk_num = disk_inode.blocks_num_needed(new_size);
|
||||||
|
let new_blks = blk_allocator.alloc(needed_blk_num as usize);
|
||||||
|
disk_inode.increase_size(new_size, new_blks, &mut bcache_mgr);
|
||||||
|
|
||||||
|
let test_ops = 1000;
|
||||||
|
let mut fake_file = FakeFile::new(new_size as usize);
|
||||||
|
for _ in 0..test_ops {
|
||||||
|
let file_op = FileOpGenerator::generate(new_size as usize);
|
||||||
|
match file_op {
|
||||||
|
FileOp::FileRead { offset, len } => {
|
||||||
|
let mut buf_fake: Vec<u8> = Vec::new();
|
||||||
|
let mut buf: Vec<u8> = Vec::new();
|
||||||
|
buf_fake.resize(len, 0);
|
||||||
|
buf.resize(len, 0);
|
||||||
|
disk_inode.read_at(offset, buf.as_mut_slice(), &mut bcache_mgr);
|
||||||
|
fake_file.read_at(offset, buf_fake.as_mut_slice());
|
||||||
|
assert_eq!(buf.as_slice(), buf_fake.as_slice());
|
||||||
|
}
|
||||||
|
FileOp::FileWrite { offset, data } => {
|
||||||
|
disk_inode.write_at(offset, data.as_slice(), &mut bcache_mgr);
|
||||||
|
fake_file.write_at(offset, data.as_slice());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -1,18 +1,22 @@
|
|||||||
//!An easy file system isolated from the kernel
|
#![cfg_attr(not(test), no_std)]
|
||||||
#![no_std]
|
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
|
//! EasyFileSystem
|
||||||
|
|
||||||
extern crate alloc;
|
extern crate alloc;
|
||||||
|
|
||||||
mod bitmap;
|
mod bitmap;
|
||||||
mod block_cache;
|
mod block_cache;
|
||||||
mod block_dev;
|
mod block_dev;
|
||||||
mod efs;
|
mod efs;
|
||||||
mod layout;
|
mod layout;
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test_helper;
|
||||||
mod vfs;
|
mod vfs;
|
||||||
/// Use a block size of 512 bytes
|
|
||||||
|
/// Each block is of 512 bytes.
|
||||||
pub const BLOCK_SZ: usize = 512;
|
pub const BLOCK_SZ: usize = 512;
|
||||||
use bitmap::Bitmap;
|
pub use block_cache::{BlockCache, BlockCacheMgr};
|
||||||
use block_cache::{block_cache_sync_all, get_block_cache};
|
|
||||||
pub use block_dev::BlockDevice;
|
pub use block_dev::BlockDevice;
|
||||||
pub use efs::EasyFileSystem;
|
pub use efs::EasyFileSystem;
|
||||||
use layout::*;
|
|
||||||
pub use vfs::Inode;
|
pub use vfs::Inode;
|
||||||
|
199
easy-fs/src/test_helper.rs
Normal file
199
easy-fs/src/test_helper.rs
Normal file
@ -0,0 +1,199 @@
|
|||||||
|
pub use crate::{BlockDevice, BLOCK_SZ};
|
||||||
|
use core::mem::swap;
|
||||||
|
use lock_api::{GuardSend, RawMutex};
|
||||||
|
use rand::Rng;
|
||||||
|
use std::collections::VecDeque;
|
||||||
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
|
use std::sync::Mutex;
|
||||||
|
|
||||||
|
pub struct MockBlockDevice;
|
||||||
|
|
||||||
|
impl BlockDevice for MockBlockDevice {
|
||||||
|
fn read_block(&self, _block_id: usize, _buf: &mut [u8]) {}
|
||||||
|
fn write_block(&self, _block_id: usize, _buf: &[u8]) {}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct TestBlockDevice<const N: usize> {
|
||||||
|
pub blocks: Box<Mutex<Vec<[u8; BLOCK_SZ]>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<const N: usize> TestBlockDevice<N> {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
let mut v: Vec<[u8; BLOCK_SZ]> = Vec::new();
|
||||||
|
for _ in 0..N {
|
||||||
|
v.push([0u8; BLOCK_SZ]);
|
||||||
|
}
|
||||||
|
Self {
|
||||||
|
blocks: Box::new(Mutex::new(v)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<const N: usize> BlockDevice for TestBlockDevice<N> {
|
||||||
|
fn read_block(&self, block_id: usize, buf: &mut [u8]) {
|
||||||
|
assert!(block_id < N);
|
||||||
|
let blocks = self.blocks.as_ref().lock().unwrap();
|
||||||
|
buf.copy_from_slice(&blocks[block_id]);
|
||||||
|
}
|
||||||
|
fn write_block(&self, block_id: usize, buf: &[u8]) {
|
||||||
|
assert!(block_id < N);
|
||||||
|
let mut blocks = self.blocks.as_ref().lock().unwrap();
|
||||||
|
blocks[block_id].copy_from_slice(buf);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Define our raw lock type
|
||||||
|
///
|
||||||
|
/// From [lock_api](https://docs.rs/lock_api/latest/lock_api/index.html)
|
||||||
|
pub struct RawSpinlock(AtomicBool);
|
||||||
|
|
||||||
|
// Implement RawMutex for this type
|
||||||
|
unsafe impl RawMutex for RawSpinlock {
|
||||||
|
const INIT: RawSpinlock = RawSpinlock(AtomicBool::new(false));
|
||||||
|
|
||||||
|
// A spinlock guard can be sent to another thread and unlocked there
|
||||||
|
type GuardMarker = GuardSend;
|
||||||
|
|
||||||
|
fn lock(&self) {
|
||||||
|
// Note: This isn't the best way of implementing a spinlock, but it
|
||||||
|
// suffices for the sake of this example.
|
||||||
|
while !self.try_lock() {}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn try_lock(&self) -> bool {
|
||||||
|
self.0
|
||||||
|
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
|
||||||
|
.is_ok()
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn unlock(&self) {
|
||||||
|
self.0.store(false, Ordering::Release);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct StackAllocator<U> {
|
||||||
|
next_id: U,
|
||||||
|
free_list: VecDeque<U>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StackAllocator<u32> {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
next_id: 0,
|
||||||
|
free_list: VecDeque::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fn alloc_one(&mut self) -> u32 {
|
||||||
|
if let Some(id) = self.free_list.pop_back() {
|
||||||
|
id
|
||||||
|
} else {
|
||||||
|
let id = self.next_id;
|
||||||
|
self.next_id += 1;
|
||||||
|
id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn alloc(&mut self, n: usize) -> Vec<u32> {
|
||||||
|
let mut v: Vec<u32> = Vec::new();
|
||||||
|
for _ in 0..n {
|
||||||
|
v.push(self.alloc_one());
|
||||||
|
}
|
||||||
|
v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StackAllocator<usize> {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
next_id: 0,
|
||||||
|
free_list: VecDeque::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fn alloc_one(&mut self) -> usize {
|
||||||
|
if let Some(id) = self.free_list.pop_back() {
|
||||||
|
id
|
||||||
|
} else {
|
||||||
|
let id = self.next_id;
|
||||||
|
self.next_id += 1;
|
||||||
|
id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn alloc(&mut self, n: usize) -> Vec<usize> {
|
||||||
|
let mut v: Vec<usize> = Vec::new();
|
||||||
|
for _ in 0..n {
|
||||||
|
v.push(self.alloc_one());
|
||||||
|
}
|
||||||
|
v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<U> StackAllocator<U> {
|
||||||
|
#[allow(unused)]
|
||||||
|
fn free_one(&mut self, id: U) {
|
||||||
|
self.free_list.push_back(id);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(unused)]
|
||||||
|
pub fn free(&mut self, ids: Vec<U>) {
|
||||||
|
for id in ids.into_iter() {
|
||||||
|
self.free_one(id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct FakeFile {
|
||||||
|
v: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FakeFile {
|
||||||
|
pub fn new(size: usize) -> Self {
|
||||||
|
let mut v: Vec<u8> = Vec::new();
|
||||||
|
for _ in 0..size {
|
||||||
|
v.push(0u8);
|
||||||
|
}
|
||||||
|
Self { v }
|
||||||
|
}
|
||||||
|
pub fn read_at(&self, offset: usize, dst: &mut [u8]) {
|
||||||
|
dst.copy_from_slice(&self.v.as_slice()[offset..offset + dst.len()]);
|
||||||
|
}
|
||||||
|
pub fn write_at(&mut self, offset: usize, src: &[u8]) {
|
||||||
|
let dst = &mut self.v.as_mut_slice()[offset..offset + src.len()];
|
||||||
|
dst.copy_from_slice(src);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct FileOpGenerator;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum FileOp {
|
||||||
|
FileRead { offset: usize, len: usize },
|
||||||
|
FileWrite { offset: usize, data: Vec<u8> },
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FileOpGenerator {
|
||||||
|
pub fn generate(file_len: usize) -> FileOp {
|
||||||
|
let offset: usize;
|
||||||
|
let len: usize;
|
||||||
|
let mut rng = rand::thread_rng();
|
||||||
|
loop {
|
||||||
|
let mut offset0 = rng.gen::<u64>() % (file_len as u64 - 1);
|
||||||
|
let mut offset1 = rng.gen::<u64>() % (file_len as u64 - 1);
|
||||||
|
if offset0 != offset1 {
|
||||||
|
if offset0 > offset1 {
|
||||||
|
swap(&mut offset0, &mut offset1);
|
||||||
|
}
|
||||||
|
offset = offset0 as usize;
|
||||||
|
len = (offset1 - offset0) as usize;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if rand::random() {
|
||||||
|
FileOp::FileRead { offset, len }
|
||||||
|
} else {
|
||||||
|
let mut data: Vec<u8> = Vec::new();
|
||||||
|
for _ in 0..len {
|
||||||
|
data.push(rand::random::<u8>());
|
||||||
|
}
|
||||||
|
FileOp::FileWrite { offset, data }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -1,186 +1,295 @@
|
|||||||
use super::{
|
use crate::efs::EasyFileSystem;
|
||||||
block_cache_sync_all, get_block_cache, BlockDevice, DirEntry, DiskInode, DiskInodeType,
|
use crate::layout::{DirEntry, DiskInode, DiskInodeType, DIRENT_SZ};
|
||||||
EasyFileSystem, DIRENT_SZ,
|
use crate::BlockCacheMgr;
|
||||||
};
|
|
||||||
use alloc::string::String;
|
use alloc::string::String;
|
||||||
use alloc::sync::Arc;
|
use alloc::sync::Arc;
|
||||||
use alloc::vec::Vec;
|
use alloc::vec::Vec;
|
||||||
use spin::{Mutex, MutexGuard};
|
use core::str::FromStr;
|
||||||
|
|
||||||
/// Virtual filesystem layer over easy-fs
|
/// Virtual filesystem layer over easy-fs
|
||||||
pub struct Inode {
|
pub struct Inode<REasyFS, const N: usize, RCacheMgr, RCache>
|
||||||
|
where
|
||||||
|
REasyFS: lock_api::RawMutex,
|
||||||
|
RCacheMgr: lock_api::RawMutex,
|
||||||
|
RCache: lock_api::RawMutex,
|
||||||
|
{
|
||||||
block_id: usize,
|
block_id: usize,
|
||||||
block_offset: usize,
|
block_offset: usize,
|
||||||
fs: Arc<Mutex<EasyFileSystem>>,
|
fs: Arc<lock_api::Mutex<REasyFS, EasyFileSystem<N, RCacheMgr, RCache>>>,
|
||||||
block_device: Arc<dyn BlockDevice>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Inode {
|
impl<REasyFS, const N: usize, RCacheMgr, RCache> Inode<REasyFS, N, RCacheMgr, RCache>
|
||||||
/// Create a vfs inode
|
where
|
||||||
pub fn new(
|
REasyFS: lock_api::RawMutex,
|
||||||
block_id: u32,
|
RCacheMgr: lock_api::RawMutex,
|
||||||
block_offset: usize,
|
RCache: lock_api::RawMutex,
|
||||||
fs: Arc<Mutex<EasyFileSystem>>,
|
{
|
||||||
block_device: Arc<dyn BlockDevice>,
|
/// Get the root inode of an EasyFileSystem.
|
||||||
|
pub fn root_inode(
|
||||||
|
efs: &Arc<lock_api::Mutex<REasyFS, EasyFileSystem<N, RCacheMgr, RCache>>>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
|
let (block_id, block_offset) = efs.lock().get_disk_inode_pos(0);
|
||||||
Self {
|
Self {
|
||||||
block_id: block_id as usize,
|
block_id: block_id as usize,
|
||||||
block_offset,
|
block_offset,
|
||||||
fs,
|
fs: Arc::clone(efs),
|
||||||
block_device,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/// Call a function over a disk inode to read it
|
|
||||||
fn read_disk_inode<V>(&self, f: impl FnOnce(&DiskInode) -> V) -> V {
|
/// Apply a read operation on the DiskInode the current Inode refers to.
|
||||||
get_block_cache(self.block_id, Arc::clone(&self.block_device))
|
///
|
||||||
.lock()
|
/// This requires the mutable reference of the bcache_mgr.
|
||||||
.read(self.block_offset, f)
|
pub fn read_disk_inode<V>(
|
||||||
}
|
|
||||||
/// Call a function over a disk inode to modify it
|
|
||||||
fn modify_disk_inode<V>(&self, f: impl FnOnce(&mut DiskInode) -> V) -> V {
|
|
||||||
get_block_cache(self.block_id, Arc::clone(&self.block_device))
|
|
||||||
.lock()
|
|
||||||
.modify(self.block_offset, f)
|
|
||||||
}
|
|
||||||
/// Find inode under a disk inode by name
|
|
||||||
fn find_inode_id(&self, name: &str, disk_inode: &DiskInode) -> Option<u32> {
|
|
||||||
// assert it is a directory
|
|
||||||
assert!(disk_inode.is_dir());
|
|
||||||
let file_count = (disk_inode.size as usize) / DIRENT_SZ;
|
|
||||||
let mut dirent = DirEntry::empty();
|
|
||||||
for i in 0..file_count {
|
|
||||||
assert_eq!(
|
|
||||||
disk_inode.read_at(DIRENT_SZ * i, dirent.as_bytes_mut(), &self.block_device,),
|
|
||||||
DIRENT_SZ,
|
|
||||||
);
|
|
||||||
if dirent.name() == name {
|
|
||||||
return Some(dirent.inode_number() as u32);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
|
||||||
/// Find inode under current inode by name
|
|
||||||
pub fn find(&self, name: &str) -> Option<Arc<Inode>> {
|
|
||||||
let fs = self.fs.lock();
|
|
||||||
self.read_disk_inode(|disk_inode| {
|
|
||||||
self.find_inode_id(name, disk_inode).map(|inode_id| {
|
|
||||||
let (block_id, block_offset) = fs.get_disk_inode_pos(inode_id);
|
|
||||||
Arc::new(Self::new(
|
|
||||||
block_id,
|
|
||||||
block_offset,
|
|
||||||
self.fs.clone(),
|
|
||||||
self.block_device.clone(),
|
|
||||||
))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
/// Increase the size of a disk inode
|
|
||||||
fn increase_size(
|
|
||||||
&self,
|
&self,
|
||||||
new_size: u32,
|
bcache_mgr: &mut BlockCacheMgr<N, RCache>,
|
||||||
disk_inode: &mut DiskInode,
|
op: impl FnOnce(&DiskInode) -> V,
|
||||||
fs: &mut MutexGuard<EasyFileSystem>,
|
) -> V {
|
||||||
) {
|
bcache_mgr.read_block(self.block_id, self.block_offset, op)
|
||||||
if new_size < disk_inode.size {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
let blocks_needed = disk_inode.blocks_num_needed(new_size);
|
|
||||||
let mut v: Vec<u32> = Vec::new();
|
|
||||||
for _ in 0..blocks_needed {
|
|
||||||
v.push(fs.alloc_data());
|
|
||||||
}
|
|
||||||
disk_inode.increase_size(new_size, v, &self.block_device);
|
|
||||||
}
|
}
|
||||||
/// Create inode under current inode by name
|
|
||||||
pub fn create(&self, name: &str) -> Option<Arc<Inode>> {
|
/// Apply a write operation on the DiskInode the current Inode refers to.
|
||||||
let mut fs = self.fs.lock();
|
///
|
||||||
let op = |root_inode: &DiskInode| {
|
/// This requires the mutable reference of the bcache_mgr.
|
||||||
// assert it is a directory
|
pub fn write_disk_inode<V>(
|
||||||
assert!(root_inode.is_dir());
|
&self,
|
||||||
// has the file been created?
|
bcache_mgr: &mut BlockCacheMgr<N, RCache>,
|
||||||
self.find_inode_id(name, root_inode)
|
op: impl FnOnce(&mut DiskInode) -> V,
|
||||||
};
|
) -> V {
|
||||||
if self.read_disk_inode(op).is_some() {
|
bcache_mgr.write_block(self.block_id, self.block_offset, op)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return true if the current Inode is a directory.
|
||||||
|
pub fn is_dir(&self) -> bool {
|
||||||
|
let fs_guard = self.fs.lock();
|
||||||
|
let mut bcache_mgr = fs_guard.bcache_mgr.lock();
|
||||||
|
bcache_mgr.read_block(
|
||||||
|
self.block_id,
|
||||||
|
self.block_offset,
|
||||||
|
|disk_inode: &DiskInode| disk_inode.is_dir(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return true if the current Inode is a file.
|
||||||
|
pub fn is_file(&self) -> bool {
|
||||||
|
let fs_guard = self.fs.lock();
|
||||||
|
let mut bcache_mgr = fs_guard.bcache_mgr.lock();
|
||||||
|
bcache_mgr.read_block(
|
||||||
|
self.block_id,
|
||||||
|
self.block_offset,
|
||||||
|
|disk_inode: &DiskInode| disk_inode.is_file(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the size in bytes of the content the current Inode refers to.
|
||||||
|
pub fn size(&self) -> u32 {
|
||||||
|
let fs_guard = self.fs.lock();
|
||||||
|
let mut bcache_mgr = fs_guard.bcache_mgr.lock();
|
||||||
|
bcache_mgr.read_block(
|
||||||
|
self.block_id,
|
||||||
|
self.block_offset,
|
||||||
|
|disk_inode: &DiskInode| disk_inode.size,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a new file under the root directory.
|
||||||
|
///
|
||||||
|
/// If the file with the given name exists, return None. Otherwise,
|
||||||
|
/// return the created Inode.
|
||||||
|
pub fn create(&self, name: &str) -> Option<Arc<Self>> {
|
||||||
|
assert!(self.is_dir());
|
||||||
|
if self.find(name).is_some() {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
// create a new file
|
let mut fs_guard = self.fs.lock();
|
||||||
// alloc a inode with an indirect block
|
|
||||||
let new_inode_id = fs.alloc_inode();
|
|
||||||
// initialize inode
|
|
||||||
let (new_inode_block_id, new_inode_block_offset) = fs.get_disk_inode_pos(new_inode_id);
|
|
||||||
get_block_cache(new_inode_block_id as usize, Arc::clone(&self.block_device))
|
|
||||||
.lock()
|
|
||||||
.modify(new_inode_block_offset, |new_inode: &mut DiskInode| {
|
|
||||||
new_inode.initialize(DiskInodeType::File);
|
|
||||||
});
|
|
||||||
self.modify_disk_inode(|root_inode| {
|
|
||||||
// append file in the dirent
|
|
||||||
let file_count = (root_inode.size as usize) / DIRENT_SZ;
|
|
||||||
let new_size = (file_count + 1) * DIRENT_SZ;
|
|
||||||
// increase size
|
|
||||||
self.increase_size(new_size as u32, root_inode, &mut fs);
|
|
||||||
// write dirent
|
|
||||||
let dirent = DirEntry::new(name, new_inode_id);
|
|
||||||
root_inode.write_at(
|
|
||||||
file_count * DIRENT_SZ,
|
|
||||||
dirent.as_bytes(),
|
|
||||||
&self.block_device,
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
let (block_id, block_offset) = fs.get_disk_inode_pos(new_inode_id);
|
fs_guard.sync_transaction(|fs| {
|
||||||
block_cache_sync_all();
|
let mut bcache_mgr = fs.bcache_mgr.lock();
|
||||||
// return inode
|
|
||||||
Some(Arc::new(Self::new(
|
let new_inode_id = fs.new_inode_nolock(&mut bcache_mgr, DiskInodeType::File);
|
||||||
block_id,
|
let (new_inode_blk_id, new_inode_blk_offset) = fs.get_disk_inode_pos(new_inode_id);
|
||||||
block_offset,
|
let root_inode_blk = bcache_mgr.get_block_cache(self.block_id);
|
||||||
self.fs.clone(),
|
root_inode_blk
|
||||||
self.block_device.clone(),
|
.lock()
|
||||||
)))
|
.write(self.block_offset, |root_inode: &mut DiskInode| {
|
||||||
// release efs lock automatically by compiler
|
// append file in the dirent
|
||||||
}
|
let file_count = (root_inode.size as usize) / DIRENT_SZ;
|
||||||
/// List inodes under current inode
|
let new_size = (file_count + 1) * DIRENT_SZ;
|
||||||
pub fn ls(&self) -> Vec<String> {
|
// increase size
|
||||||
let _fs = self.fs.lock();
|
fs.increase_size_nolock(new_size as u32, root_inode, &mut bcache_mgr);
|
||||||
self.read_disk_inode(|disk_inode| {
|
// write dirent
|
||||||
let file_count = (disk_inode.size as usize) / DIRENT_SZ;
|
let dirent = DirEntry::new(name, new_inode_id);
|
||||||
let mut v: Vec<String> = Vec::new();
|
root_inode.write_at(file_count * DIRENT_SZ, dirent.as_bytes(), &mut bcache_mgr);
|
||||||
for i in 0..file_count {
|
});
|
||||||
let mut dirent = DirEntry::empty();
|
Some(Arc::new(Self {
|
||||||
assert_eq!(
|
block_id: new_inode_blk_id as usize,
|
||||||
disk_inode.read_at(i * DIRENT_SZ, dirent.as_bytes_mut(), &self.block_device,),
|
block_offset: new_inode_blk_offset,
|
||||||
DIRENT_SZ,
|
fs: Arc::clone(&self.fs),
|
||||||
);
|
}))
|
||||||
v.push(String::from(dirent.name()));
|
|
||||||
}
|
|
||||||
v
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
/// Read data from current inode
|
|
||||||
|
/// Search a file with the given name under the root directory and return the
|
||||||
|
/// corresponding Inode.
|
||||||
|
pub fn find(&self, name: &str) -> Option<Arc<Self>> {
|
||||||
|
assert!(self.is_dir());
|
||||||
|
let mut fs_guard = self.fs.lock();
|
||||||
|
|
||||||
|
fs_guard.sync_transaction(|fs| {
|
||||||
|
let mut bcache_mgr = fs.bcache_mgr.lock();
|
||||||
|
let block_cache = bcache_mgr.get_block_cache(self.block_id);
|
||||||
|
let disk_inode_op = |disk_inode: &DiskInode| {
|
||||||
|
disk_inode
|
||||||
|
.inode_id_by_name(name, &mut bcache_mgr)
|
||||||
|
.map(|inode_id: u32| {
|
||||||
|
let (block_id, block_offset) = fs.get_disk_inode_pos(inode_id);
|
||||||
|
Arc::new(Self {
|
||||||
|
block_id: block_id as usize,
|
||||||
|
block_offset,
|
||||||
|
fs: self.fs.clone(),
|
||||||
|
})
|
||||||
|
})
|
||||||
|
};
|
||||||
|
let block_cache = block_cache.lock();
|
||||||
|
block_cache.read(self.block_offset, disk_inode_op)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read the file's contents at the specified offset into the given buffer.
|
||||||
pub fn read_at(&self, offset: usize, buf: &mut [u8]) -> usize {
|
pub fn read_at(&self, offset: usize, buf: &mut [u8]) -> usize {
|
||||||
let _fs = self.fs.lock();
|
let mut fs_guard = self.fs.lock();
|
||||||
self.read_disk_inode(|disk_inode| disk_inode.read_at(offset, buf, &self.block_device))
|
fs_guard.sync_transaction(|fs| {
|
||||||
|
let mut bcache_mgr = fs.bcache_mgr.lock();
|
||||||
|
let inode_blk = bcache_mgr.get_block_cache(self.block_id);
|
||||||
|
let inode_blk_guard = inode_blk.lock();
|
||||||
|
let disk_inode: &DiskInode = inode_blk_guard.value_ref_at_offset(self.block_offset);
|
||||||
|
disk_inode.read_at(offset, buf, &mut bcache_mgr)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
/// Write data to current inode
|
|
||||||
|
/// Write the contents of the buffer to the file at the specified offset.
|
||||||
pub fn write_at(&self, offset: usize, buf: &[u8]) -> usize {
|
pub fn write_at(&self, offset: usize, buf: &[u8]) -> usize {
|
||||||
let mut fs = self.fs.lock();
|
let mut fs_guard = self.fs.lock();
|
||||||
let size = self.modify_disk_inode(|disk_inode| {
|
fs_guard.sync_transaction(|fs| {
|
||||||
self.increase_size((offset + buf.len()) as u32, disk_inode, &mut fs);
|
let mut bcache_mgr = fs.bcache_mgr.lock();
|
||||||
disk_inode.write_at(offset, buf, &self.block_device)
|
let inode_blk = bcache_mgr.get_block_cache(self.block_id);
|
||||||
});
|
let mut inode_blk_guard = inode_blk.lock();
|
||||||
block_cache_sync_all();
|
let disk_inode: &mut DiskInode = inode_blk_guard.value_mut_at_offset(self.block_offset);
|
||||||
size
|
fs.increase_size_nolock((offset + buf.len()) as u32, disk_inode, &mut bcache_mgr);
|
||||||
|
disk_inode.write_at(offset, buf, &mut bcache_mgr)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
/// Clear the data in current inode
|
|
||||||
|
/// Clear the size of the file to zero.
|
||||||
pub fn clear(&self) {
|
pub fn clear(&self) {
|
||||||
let mut fs = self.fs.lock();
|
let mut fs_guard = self.fs.lock();
|
||||||
self.modify_disk_inode(|disk_inode| {
|
fs_guard.sync_transaction(|fs| {
|
||||||
let size = disk_inode.size;
|
let mut bcache_mgr = fs.bcache_mgr.lock();
|
||||||
let data_blocks_dealloc = disk_inode.clear_size(&self.block_device);
|
let inode_blk = bcache_mgr.get_block_cache(self.block_id);
|
||||||
assert!(data_blocks_dealloc.len() == DiskInode::total_blocks(size) as usize);
|
let mut inode_blk_guard = inode_blk.lock();
|
||||||
for data_block in data_blocks_dealloc.into_iter() {
|
let disk_inode: &mut DiskInode = inode_blk_guard.value_mut_at_offset(self.block_offset);
|
||||||
fs.dealloc_data(data_block);
|
let total_blks = disk_inode.total_blocks();
|
||||||
|
let blocks_dealloc = disk_inode.clear_size(&mut bcache_mgr);
|
||||||
|
assert_eq!(blocks_dealloc.len(), total_blks as usize);
|
||||||
|
for block_id in blocks_dealloc.into_iter() {
|
||||||
|
fs.dealloc_data(&mut bcache_mgr, block_id);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
block_cache_sync_all();
|
}
|
||||||
|
|
||||||
|
/// Return a vector contains all the file names as String under the directory.
|
||||||
|
pub fn listdir(&self) -> Vec<String> {
|
||||||
|
assert!(self.is_dir());
|
||||||
|
let mut list: Vec<String> = Vec::new();
|
||||||
|
assert_eq!(self.size() % (DIRENT_SZ as u32), 0);
|
||||||
|
let mut dirent = DirEntry::empty();
|
||||||
|
let mut offset = 0usize;
|
||||||
|
while offset < self.size() as usize {
|
||||||
|
self.read_at(offset, dirent.as_bytes_mut());
|
||||||
|
list.push(String::from_str(dirent.name()).unwrap());
|
||||||
|
offset += DIRENT_SZ;
|
||||||
|
}
|
||||||
|
list
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use crate::test_helper::*;
|
||||||
|
|
||||||
|
const BLK_DEV_SZ: usize = 4096;
|
||||||
|
const BCACHE_SZ: usize = 256;
|
||||||
|
const TEST_FILENAME: &str = "test_file0";
|
||||||
|
|
||||||
|
type EasyFileSystemType = EasyFileSystem<256, RawSpinlock, RawSpinlock>;
|
||||||
|
|
||||||
|
fn test_prepare_efs() -> Arc<lock_api::Mutex<RawSpinlock, EasyFileSystemType>> {
|
||||||
|
let block_dev: Arc<dyn BlockDevice> = Arc::new(TestBlockDevice::<BLK_DEV_SZ>::new());
|
||||||
|
let bcache_mgr: BlockCacheMgr<BCACHE_SZ, RawSpinlock> = BlockCacheMgr::new(&block_dev);
|
||||||
|
let efs: EasyFileSystem<BCACHE_SZ, RawSpinlock, _> =
|
||||||
|
EasyFileSystem::create(BLK_DEV_SZ as u32, 1, bcache_mgr);
|
||||||
|
let efs: lock_api::Mutex<RawSpinlock, EasyFileSystem<BCACHE_SZ, _, _>> =
|
||||||
|
lock_api::Mutex::new(efs);
|
||||||
|
Arc::new(efs)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_file_create_and_find() {
|
||||||
|
let efs = test_prepare_efs();
|
||||||
|
let root_inode = Inode::root_inode(&efs);
|
||||||
|
assert!(root_inode.is_dir());
|
||||||
|
root_inode.create(TEST_FILENAME);
|
||||||
|
let file_inode = root_inode.find(TEST_FILENAME);
|
||||||
|
assert!(file_inode.is_some());
|
||||||
|
let file_inode = file_inode.unwrap();
|
||||||
|
assert!(file_inode.is_file());
|
||||||
|
assert_eq!(root_inode.size(), DIRENT_SZ as u32);
|
||||||
|
assert_eq!(file_inode.size(), 0);
|
||||||
|
|
||||||
|
let mut dirent = DirEntry::empty();
|
||||||
|
root_inode.read_at(0, dirent.as_bytes_mut());
|
||||||
|
assert_eq!(dirent.name(), TEST_FILENAME);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_listdir() {
|
||||||
|
let efs = test_prepare_efs();
|
||||||
|
let root_inode = Inode::root_inode(&efs);
|
||||||
|
let filenames: Vec<String> = (0..1000).map(|id| format!("test_file{}", id)).collect();
|
||||||
|
for filename in filenames.iter() {
|
||||||
|
root_inode.create(filename.as_str());
|
||||||
|
}
|
||||||
|
let filenames_from_fs = root_inode.listdir();
|
||||||
|
assert_eq!(filenames, filenames_from_fs);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_file_read_write() {
|
||||||
|
let efs = test_prepare_efs();
|
||||||
|
let root_inode = Inode::root_inode(&efs);
|
||||||
|
let file_inode = root_inode.create(TEST_FILENAME).unwrap();
|
||||||
|
|
||||||
|
let file_size = 200 * BLOCK_SZ;
|
||||||
|
let mut fake_file = FakeFile::new(file_size);
|
||||||
|
for _ in 0..1000 {
|
||||||
|
match FileOpGenerator::generate(file_size) {
|
||||||
|
FileOp::FileRead { offset, len } => {
|
||||||
|
let mut buf: Vec<u8> = Vec::new();
|
||||||
|
buf.resize(len, 0);
|
||||||
|
let mut fake_buf: Vec<u8> = Vec::new();
|
||||||
|
fake_buf.resize(len, 0);
|
||||||
|
fake_file.read_at(offset, fake_buf.as_mut_slice());
|
||||||
|
file_inode.read_at(offset, buf.as_mut_slice());
|
||||||
|
assert_eq!(buf.as_slice(), fake_buf.as_slice());
|
||||||
|
}
|
||||||
|
FileOp::FileWrite { offset, data } => {
|
||||||
|
fake_file.write_at(offset, data.as_slice());
|
||||||
|
file_inode.write_at(offset, data.as_slice());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
file_inode.clear();
|
||||||
|
assert_eq!(file_inode.size(), 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -16,6 +16,7 @@ virtio-drivers = { git = "https://github.com/rcore-os/virtio-drivers", rev = "4e
|
|||||||
easy-fs = { path = "../easy-fs" }
|
easy-fs = { path = "../easy-fs" }
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
sbi-rt = { version = "0.0.2", features = ["legacy"] }
|
sbi-rt = { version = "0.0.2", features = ["legacy"] }
|
||||||
|
lock_api = "0.4.11"
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
debug = true
|
debug = true
|
||||||
|
@ -44,7 +44,7 @@ $(KERNEL_BIN): kernel
|
|||||||
fs-img: $(APPS)
|
fs-img: $(APPS)
|
||||||
@cd ../user && make build TEST=$(TEST)
|
@cd ../user && make build TEST=$(TEST)
|
||||||
@rm -f $(FS_IMG)
|
@rm -f $(FS_IMG)
|
||||||
@cd ../easy-fs-fuse && cargo run --release -- -s ../user/src/bin/ -t ../user/target/riscv64gc-unknown-none-elf/release/
|
@cd ../easy-fs && cargo run --example fuse -- -s ../user/src/bin/ -t ../user/target/riscv64gc-unknown-none-elf/release/
|
||||||
|
|
||||||
$(APPS):
|
$(APPS):
|
||||||
|
|
||||||
|
@ -7,6 +7,6 @@ _start:
|
|||||||
.section .bss.stack
|
.section .bss.stack
|
||||||
.globl boot_stack_lower_bound
|
.globl boot_stack_lower_bound
|
||||||
boot_stack_lower_bound:
|
boot_stack_lower_bound:
|
||||||
.space 4096 * 16
|
.space 4096 * 256
|
||||||
.globl boot_stack_top
|
.globl boot_stack_top
|
||||||
boot_stack_top:
|
boot_stack_top:
|
@ -7,12 +7,16 @@
|
|||||||
use super::File;
|
use super::File;
|
||||||
use crate::drivers::BLOCK_DEVICE;
|
use crate::drivers::BLOCK_DEVICE;
|
||||||
use crate::mm::UserBuffer;
|
use crate::mm::UserBuffer;
|
||||||
use crate::sync::UPSafeCell;
|
use crate::sync::{RawExclusiveLock, UPSafeCell};
|
||||||
use alloc::sync::Arc;
|
use alloc::sync::Arc;
|
||||||
use alloc::vec::Vec;
|
use alloc::vec::Vec;
|
||||||
use bitflags::*;
|
use bitflags::*;
|
||||||
use easy_fs::{EasyFileSystem, Inode};
|
use easy_fs::{BlockCacheMgr, EasyFileSystem, Inode};
|
||||||
use lazy_static::*;
|
use lazy_static::*;
|
||||||
|
|
||||||
|
const BLOCK_CACHE_SIZE: usize = 64;
|
||||||
|
type InodeType = Inode<RawExclusiveLock, BLOCK_CACHE_SIZE, RawExclusiveLock, RawExclusiveLock>;
|
||||||
|
|
||||||
/// A wrapper around a filesystem inode
|
/// A wrapper around a filesystem inode
|
||||||
/// to implement File trait atop
|
/// to implement File trait atop
|
||||||
pub struct OSInode {
|
pub struct OSInode {
|
||||||
@ -23,12 +27,12 @@ pub struct OSInode {
|
|||||||
/// The OS inode inner in 'UPSafeCell'
|
/// The OS inode inner in 'UPSafeCell'
|
||||||
pub struct OSInodeInner {
|
pub struct OSInodeInner {
|
||||||
offset: usize,
|
offset: usize,
|
||||||
inode: Arc<Inode>,
|
inode: Arc<InodeType>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl OSInode {
|
impl OSInode {
|
||||||
/// Construct an OS inode from a inode
|
/// Construct an OS inode from a inode
|
||||||
pub fn new(readable: bool, writable: bool, inode: Arc<Inode>) -> Self {
|
pub fn new(readable: bool, writable: bool, inode: Arc<InodeType>) -> Self {
|
||||||
Self {
|
Self {
|
||||||
readable,
|
readable,
|
||||||
writable,
|
writable,
|
||||||
@ -53,15 +57,21 @@ impl OSInode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
pub static ref ROOT_INODE: Arc<Inode> = {
|
pub static ref ROOT_INODE: Arc<InodeType> = {
|
||||||
let efs = EasyFileSystem::open(BLOCK_DEVICE.clone());
|
let block_dev = BLOCK_DEVICE.clone();
|
||||||
Arc::new(EasyFileSystem::root_inode(&efs))
|
let bcache_mgr: BlockCacheMgr<BLOCK_CACHE_SIZE, RawExclusiveLock> =
|
||||||
|
BlockCacheMgr::new(&block_dev);
|
||||||
|
let efs: EasyFileSystem<BLOCK_CACHE_SIZE, RawExclusiveLock, _> =
|
||||||
|
EasyFileSystem::open(bcache_mgr);
|
||||||
|
let efs = Arc::new(lock_api::Mutex::<RawExclusiveLock, _>::new(efs));
|
||||||
|
Arc::new(Inode::root_inode(&efs))
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
/// List all files in the filesystems
|
/// List all files in the filesystems
|
||||||
|
#[allow(warnings)]
|
||||||
pub fn list_apps() {
|
pub fn list_apps() {
|
||||||
println!("/**** APPS ****");
|
println!("/**** APPS ****");
|
||||||
for app in ROOT_INODE.ls() {
|
for app in ROOT_INODE.listdir() {
|
||||||
println!("{}", app);
|
println!("{}", app);
|
||||||
}
|
}
|
||||||
println!("**************/");
|
println!("**************/");
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
//! Synchronization and interior mutability primitives
|
//! Synchronization and interior mutability primitives
|
||||||
mod up;
|
mod up;
|
||||||
|
|
||||||
pub use up::UPSafeCell;
|
pub use up::{RawExclusiveLock, UPSafeCell};
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
//! Uniprocessor interior mutability primitives
|
//! Uniprocessor interior mutability primitives
|
||||||
use core::cell::{RefCell, RefMut};
|
use core::cell::{RefCell, RefMut};
|
||||||
|
use core::sync::atomic::{AtomicBool, Ordering};
|
||||||
|
|
||||||
/// Wrap a static data structure inside it so that we are
|
/// Wrap a static data structure inside it so that we are
|
||||||
/// able to access it without any `unsafe`.
|
/// able to access it without any `unsafe`.
|
||||||
@ -28,3 +29,23 @@ impl<T> UPSafeCell<T> {
|
|||||||
self.inner.borrow_mut()
|
self.inner.borrow_mut()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The sync primitive used by easy-fs.
|
||||||
|
pub struct RawExclusiveLock(AtomicBool);
|
||||||
|
|
||||||
|
unsafe impl lock_api::RawMutex for RawExclusiveLock {
|
||||||
|
const INIT: Self = Self(AtomicBool::new(false));
|
||||||
|
type GuardMarker = lock_api::GuardNoSend;
|
||||||
|
fn lock(&self) {
|
||||||
|
assert_eq!(self.0.load(Ordering::Relaxed), false);
|
||||||
|
self.0.store(true, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
fn try_lock(&self) -> bool {
|
||||||
|
self.0
|
||||||
|
.compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed)
|
||||||
|
.is_ok()
|
||||||
|
}
|
||||||
|
unsafe fn unlock(&self) {
|
||||||
|
self.0.store(false, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user