easy-fs: Refactor easy-fs

--------

The user of easy-fs can specify the sync primitive used by easy-fs via lock_api::RawMutex.
Add unittests for easy-fs.
Expand the size of the bootstack to 1MiB. After that, the BlockCacheMgr can be successfully initialized.
This commit is contained in:
Yifan Wu 2024-01-05 20:26:28 +08:00
parent e9c42c8a7d
commit 85e2f4d3b6
18 changed files with 1470 additions and 596 deletions

View File

@ -1,3 +0,0 @@
.idea/
target/
Cargo.lock

View File

@ -1,16 +0,0 @@
[package]
name = "easy-fs-fuse"
version = "0.1.0"
authors = ["Yifan Wu <shinbokuow@163.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
clap = "2.33.3"
easy-fs = { path = "../easy-fs" }
rand = "0.8.0"
# [features]
# board_qemu = []
# board_k210 = []

View File

@ -1,18 +1,13 @@
[package]
name = "easy-fs"
version = "0.1.0"
authors = ["Yifan Wu <shinbokuow@163.com>"]
edition = "2018"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
spin = "0.7.0"
lazy_static = { version = "1.4.0", features = ["spin_no_std"] }
lock_api = "0.4.11"
[profile.release]
debug = true
[features]
board_qemu = []
board_k210 = []
[dev-dependencies]
rand = "0.8.5"
clap = "2.33.3"

View File

@ -1,12 +1,39 @@
use clap::{App, Arg};
use easy_fs::{BlockDevice, EasyFileSystem};
use easy_fs::{BlockCacheMgr, BlockDevice, EasyFileSystem, Inode};
use std::fs::{read_dir, File, OpenOptions};
use std::io::{Read, Seek, SeekFrom, Write};
use std::sync::Arc;
use std::sync::Mutex;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
const BLOCK_SZ: usize = 512;
const FS_BLOCKS: usize = 32768;
const BLOCK_CACHE_BLKS: usize = 256;
type EasyFileSystemType = EasyFileSystem<BLOCK_CACHE_BLKS, RawSpinlock, RawSpinlock>;
pub struct RawSpinlock(AtomicBool);
unsafe impl lock_api::RawMutex for RawSpinlock {
const INIT: RawSpinlock = RawSpinlock(AtomicBool::new(false));
// A spinlock guard can be sent to another thread and unlocked there
type GuardMarker = lock_api::GuardSend;
fn lock(&self) {
// Note: This isn't the best way of implementing a spinlock, but it
// suffices for the sake of this example.
while !self.try_lock() {}
}
fn try_lock(&self) -> bool {
self.0
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
}
unsafe fn unlock(&self) {
self.0.store(false, Ordering::Release);
}
}
struct BlockFile(Mutex<File>);
impl BlockDevice for BlockFile {
@ -49,20 +76,22 @@ fn easy_fs_pack() -> std::io::Result<()> {
let src_path = matches.value_of("source").unwrap();
let target_path = matches.value_of("target").unwrap();
println!("src_path = {}\ntarget_path = {}", src_path, target_path);
let block_file = Arc::new(BlockFile(Mutex::new({
let block_file: Arc<dyn BlockDevice> = Arc::new(BlockFile(Mutex::new({
let f = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(format!("{}{}", target_path, "fs.img"))?;
f.set_len(16 * 2048 * 512).unwrap();
f.set_len((FS_BLOCKS * BLOCK_SZ) as u64).unwrap();
f
})));
let bcache_mgr: BlockCacheMgr<BLOCK_CACHE_BLKS, RawSpinlock> = BlockCacheMgr::new(&block_file);
// 16MiB, at most 4095 files
let efs = EasyFileSystem::create(block_file, 16 * 2048, 1);
let root_inode = Arc::new(EasyFileSystem::root_inode(&efs));
let apps: Vec<_> = read_dir(src_path)
.unwrap()
let efs: EasyFileSystemType = EasyFileSystem::create(FS_BLOCKS as u32, 1, bcache_mgr);
let efs: lock_api::Mutex<RawSpinlock, EasyFileSystemType> = lock_api::Mutex::new(efs);
let efs = Arc::new(efs);
let root_inode = Inode::root_inode(&efs);
let apps: Vec<_> = read_dir(src_path)?
.into_iter()
.map(|dir_entry| {
let mut name_with_ext = dir_entry.unwrap().file_name().into_string().unwrap();
@ -80,72 +109,5 @@ fn easy_fs_pack() -> std::io::Result<()> {
// write data to easy-fs
inode.write_at(0, all_data.as_slice());
}
// list apps
// for app in root_inode.ls() {
// println!("{}", app);
// }
Ok(())
}
#[test]
fn efs_test() -> std::io::Result<()> {
let block_file = Arc::new(BlockFile(Mutex::new({
let f = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open("target/fs.img")?;
f.set_len(8192 * 512).unwrap();
f
})));
EasyFileSystem::create(block_file.clone(), 4096, 1);
let efs = EasyFileSystem::open(block_file.clone());
let root_inode = EasyFileSystem::root_inode(&efs);
root_inode.create("filea");
root_inode.create("fileb");
for name in root_inode.ls() {
println!("{}", name);
}
let filea = root_inode.find("filea").unwrap();
let greet_str = "Hello, world!";
filea.write_at(0, greet_str.as_bytes());
//let mut buffer = [0u8; 512];
let mut buffer = [0u8; 233];
let len = filea.read_at(0, &mut buffer);
assert_eq!(greet_str, core::str::from_utf8(&buffer[..len]).unwrap(),);
let mut random_str_test = |len: usize| {
filea.clear();
assert_eq!(filea.read_at(0, &mut buffer), 0,);
let mut str = String::new();
use rand;
// random digit
for _ in 0..len {
str.push(char::from('0' as u8 + rand::random::<u8>() % 10));
}
filea.write_at(0, str.as_bytes());
let mut read_buffer = [0u8; 127];
let mut offset = 0usize;
let mut read_str = String::new();
loop {
let len = filea.read_at(offset, &mut read_buffer);
if len == 0 {
break;
}
offset += len;
read_str.push_str(core::str::from_utf8(&read_buffer[..len]).unwrap());
}
assert_eq!(str, read_str);
};
random_str_test(4 * BLOCK_SZ);
random_str_test(8 * BLOCK_SZ + BLOCK_SZ / 2);
random_str_test(100 * BLOCK_SZ);
random_str_test(70 * BLOCK_SZ + BLOCK_SZ / 7);
random_str_test((12 + 128) * BLOCK_SZ);
random_str_test(400 * BLOCK_SZ);
random_str_test(1000 * BLOCK_SZ);
random_str_test(2000 * BLOCK_SZ);
Ok(())
}

View File

@ -1,10 +1,11 @@
use super::{get_block_cache, BlockDevice, BLOCK_SZ};
use alloc::sync::Arc;
use super::{BlockCacheMgr, BLOCK_SZ};
/// A bitmap block
type BitmapBlock = [u64; 64];
/// Number of bits in a block
const BLOCK_BITS: usize = BLOCK_SZ * 8;
/// A bitmap
#[derive(Default)]
pub struct Bitmap {
start_block_id: usize,
blocks: usize,
@ -26,14 +27,13 @@ impl Bitmap {
}
}
/// Allocate a new block from a block device
pub fn alloc(&self, block_device: &Arc<dyn BlockDevice>) -> Option<usize> {
pub fn alloc<const N: usize, R>(&self, bcache_mgr: &mut BlockCacheMgr<N, R>) -> Option<usize>
where
R: lock_api::RawMutex,
{
for block_id in 0..self.blocks {
let pos = get_block_cache(
block_id + self.start_block_id as usize,
Arc::clone(block_device),
)
.lock()
.modify(0, |bitmap_block: &mut BitmapBlock| {
let real_block_id = block_id + self.start_block_id as usize;
let pos = bcache_mgr.write_block(real_block_id, 0, |bitmap_block: &mut BitmapBlock| {
if let Some((bits64_pos, inner_pos)) = bitmap_block
.iter()
.enumerate()
@ -54,17 +54,88 @@ impl Bitmap {
None
}
/// Deallocate a block
pub fn dealloc(&self, block_device: &Arc<dyn BlockDevice>, bit: usize) {
pub fn dealloc<const N: usize, R>(&self, bcache_mgr: &mut BlockCacheMgr<N, R>, bit: usize)
where
R: lock_api::RawMutex,
{
let (block_pos, bits64_pos, inner_pos) = decomposition(bit);
get_block_cache(block_pos + self.start_block_id, Arc::clone(block_device))
.lock()
.modify(0, |bitmap_block: &mut BitmapBlock| {
assert!(bitmap_block[bits64_pos] & (1u64 << inner_pos) > 0);
bitmap_block[bits64_pos] -= 1u64 << inner_pos;
});
let real_block_id = block_pos + self.start_block_id;
bcache_mgr.write_block(real_block_id, 0, |bitmap_block: &mut BitmapBlock| {
assert!(bitmap_block[bits64_pos] & (1u64 << inner_pos) > 0);
bitmap_block[bits64_pos] -= 1u64 << inner_pos;
});
}
/// Get the max number of allocatable blocks
pub fn maximum(&self) -> usize {
self.blocks * BLOCK_BITS
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{test_helper::*, BlockCacheMgr};
use std::sync::Arc;
#[test]
pub fn test_bitmap() {
let block_dev: Arc<dyn BlockDevice> = Arc::new(TestBlockDevice::<16>::new());
let mut bcache_mgr: BlockCacheMgr<4, RawSpinlock> = BlockCacheMgr::new(&block_dev);
let bitmap = Bitmap::new(0, 16);
assert_eq!(bitmap.maximum(), BLOCK_BITS * 16);
let mut buf = [0u8; BLOCK_SZ];
assert_eq!(bitmap.alloc(&mut bcache_mgr), Some(0));
assert_eq!(bitmap.alloc(&mut bcache_mgr), Some(1));
bcache_mgr.sync_all();
block_dev.read_block(0, &mut buf);
assert_eq!(buf[..8], 3u64.to_ne_bytes());
assert!(buf[8..].iter().all(|byte| *byte == 0));
bitmap.dealloc(&mut bcache_mgr, 0);
bcache_mgr.sync_all();
block_dev.read_block(0, &mut buf);
assert_eq!(buf[..8], 2u64.to_ne_bytes());
bitmap.dealloc(&mut bcache_mgr, 1);
bcache_mgr.sync_all();
block_dev.read_block(1, &mut buf);
assert!(buf.iter().all(|byte| *byte == 0));
}
#[test]
#[should_panic]
pub fn test_bitmap_panic() {
let block_dev: Arc<dyn BlockDevice> = Arc::new(TestBlockDevice::<16>::new());
let mut bcache_mgr: BlockCacheMgr<4, RawSpinlock> = BlockCacheMgr::new(&block_dev);
let bitmap = Bitmap::new(0, 1);
assert_eq!(bitmap.alloc(&mut bcache_mgr), Some(0));
bitmap.dealloc(&mut bcache_mgr, 1);
}
#[test]
pub fn test_bitmap_large() {
let block_dev: Arc<dyn BlockDevice> = Arc::new(TestBlockDevice::<16>::new());
let mut bcache_mgr: BlockCacheMgr<4, RawSpinlock> = BlockCacheMgr::new(&block_dev);
let bitmap = Bitmap::new(0, 16);
let mut buf = [0u8; BLOCK_SZ];
for i in 0..16 * BLOCK_BITS {
assert_eq!(bitmap.alloc(&mut bcache_mgr), Some(i));
}
bcache_mgr.sync_all();
for block_id in 0..16usize {
block_dev.read_block(block_id, &mut buf);
assert!(buf.iter().all(|byte| *byte == u8::MAX));
}
for i in 0..16 * BLOCK_BITS {
bitmap.dealloc(&mut bcache_mgr, i);
}
bcache_mgr.sync_all();
for block_id in 0..16usize {
block_dev.read_block(block_id, &mut buf);
assert!(buf.iter().all(|byte| *byte == 0));
}
}
}

View File

@ -1,144 +1,426 @@
use super::{BlockDevice, BLOCK_SZ};
use alloc::collections::VecDeque;
use crate::{BlockDevice, BLOCK_SZ};
use alloc::sync::Arc;
use lazy_static::*;
use spin::Mutex;
use core::ops::{Deref, DerefMut};
use lock_api;
#[derive(Copy, Clone)]
#[repr(align(512))]
pub struct CacheBlock([u8; BLOCK_SZ]);
impl Default for CacheBlock {
fn default() -> Self {
Self([0u8; BLOCK_SZ])
}
}
impl Deref for CacheBlock {
type Target = [u8; BLOCK_SZ];
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for CacheBlock {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// Cached block inside memory
pub struct BlockCache {
/// cached block data
cache: [u8; BLOCK_SZ],
/// cached block data, provided by BlockCacheMgr
/// We use 'static lifetime here since:
/// 1. We do not want BlockCacheMgr to be a self-referential struct.
/// 2. We can guarantee that the BlockCacheMgr outlives BlockCache.
/// 3. We can guarantee that only one BlockCache can access this region
/// at a time.
cache: &'static mut CacheBlock,
/// underlying block id
block_id: usize,
/// underlying block device
block_device: Arc<dyn BlockDevice>,
/// whether the block is dirty
modified: bool,
pub dirty: bool,
/// The timestamp of the last access of the block.
/// It is provided by the BlockCacheMgr.
last_access_time: usize,
}
impl BlockCache {
/// Load a new BlockCache from disk.
pub fn new(block_id: usize, block_device: Arc<dyn BlockDevice>) -> Self {
let mut cache = [0u8; BLOCK_SZ];
block_device.read_block(block_id, &mut cache);
pub fn new(
block_id: usize,
block_device: &Arc<dyn BlockDevice>,
cache: &mut CacheBlock,
) -> Self {
block_device.read_block(block_id, &mut cache.0);
// Safety: See the definition of BlockCache.
let cache: &'static mut CacheBlock = unsafe {
let cache_ptr: *mut CacheBlock = core::mem::transmute(cache.as_mut_ptr());
&mut *(cache_ptr)
};
Self {
cache,
block_id,
block_device,
modified: false,
block_device: Arc::clone(block_device),
dirty: false,
last_access_time: 0,
}
}
/// Get the address of an offset inside the cached block data
fn addr_of_offset(&self, offset: usize) -> usize {
&self.cache[offset] as *const _ as usize
&self.cache.0[offset] as *const _ as usize
}
pub fn get_ref<T>(&self, offset: usize) -> &T
where
T: Sized,
{
let type_size = core::mem::size_of::<T>();
assert!(offset + type_size <= BLOCK_SZ);
let addr = self.addr_of_offset(offset);
unsafe { &*(addr as *const T) }
fn check_value_validity<T>(&self, offset: usize) {
let start_addr = self.addr_of_offset(offset);
// We guarantee that the input arguments are correct. Thus,
// if we find that the arguments are not valid, just panic.
//
assert_eq!(start_addr % core::mem::align_of::<T>(), 0);
assert!(offset + core::mem::size_of::<T>() <= BLOCK_SZ);
}
pub fn get_mut<T>(&mut self, offset: usize) -> &mut T
where
T: Sized,
{
let type_size = core::mem::size_of::<T>();
assert!(offset + type_size <= BLOCK_SZ);
self.modified = true;
let addr = self.addr_of_offset(offset);
unsafe { &mut *(addr as *mut T) }
/// Get an reference to a value of generic type T at a given
/// offset on the block cache.
pub fn value_ref_at_offset<T>(&self, offset: usize) -> &T {
self.check_value_validity::<T>(offset);
// Safety:
// we have checked that the address at the given offset is
// aligned to type T and the value of type T is in the boundary
// of the cache array.
unsafe { core::mem::transmute::<&u8, &T>(&self.cache.0[offset]) }
}
/// Get an mutable reference to a value of generic type T at a given
/// offset on the block cache.
pub fn value_mut_at_offset<T>(&mut self, offset: usize) -> &mut T {
self.check_value_validity::<T>(offset);
self.dirty = true;
// Safety: See value_ref_at_offset.
unsafe { core::mem::transmute::<&mut u8, &mut T>(&mut self.cache.0[offset]) }
}
/// Apply a read operation on this BlockCache at the given offset.
pub fn read<T, V>(&self, offset: usize, f: impl FnOnce(&T) -> V) -> V {
f(self.get_ref(offset))
f(self.value_ref_at_offset::<T>(offset))
}
pub fn modify<T, V>(&mut self, offset: usize, f: impl FnOnce(&mut T) -> V) -> V {
f(self.get_mut(offset))
/// Apply a write operation on this BlockCache at the given offset.
pub fn write<T, V>(&mut self, offset: usize, f: impl FnOnce(&mut T) -> V) -> V {
f(self.value_mut_at_offset::<T>(offset))
}
pub fn sync(&mut self) {
if self.modified {
self.modified = false;
self.block_device.write_block(self.block_id, &self.cache);
/// Sync the changes on this block cache in memory to the underlying
/// block device.
pub fn sync_to_device(&mut self) {
if self.dirty {
self.dirty = false;
self.block_device.write_block(self.block_id, &self.cache.0);
}
}
/// Update the access time used for LRU when this block is
/// accessed.
pub fn update_access_time(&mut self, new_time: usize) {
self.last_access_time = new_time;
}
/// Get the timestamp of the last access of this block.
pub fn access_time(&self) -> usize {
self.last_access_time
}
}
impl Drop for BlockCache {
fn drop(&mut self) {
self.sync()
self.sync_to_device();
}
}
/// Use a block cache of 16 blocks
const BLOCK_CACHE_SIZE: usize = 16;
pub struct BlockCacheManager {
queue: VecDeque<(usize, Arc<Mutex<BlockCache>>)>,
/// The block cache manager.
///
/// The user can control the sync primitive used by each block cache via
/// R which implements the `lock_api::RawMutex` trait. At the same time, user can
/// control the capacity of the block cache pool via the const generic parameter N.
pub struct BlockCacheMgr<const N: usize, R>
where
R: lock_api::RawMutex,
{
cache_block_arr: [CacheBlock; N],
block_id_arr: [Option<usize>; N],
cache_arr: [Option<Arc<lock_api::Mutex<R, BlockCache>>>; N],
block_dev: Arc<dyn BlockDevice>,
current_time: usize,
}
impl BlockCacheManager {
pub fn new() -> Self {
impl<const N: usize, R> BlockCacheMgr<N, R>
where
R: lock_api::RawMutex,
{
/// Initialize a block cache manager.
pub fn new(block_dev: &Arc<dyn BlockDevice>) -> Self {
let mut cache_arr: [Option<Arc<lock_api::Mutex<R, BlockCache>>>; N] =
unsafe { core::mem::zeroed() };
for cache in cache_arr.iter_mut() {
*cache = None;
}
Self {
queue: VecDeque::new(),
cache_block_arr: [CacheBlock::default(); N],
block_id_arr: [None; N],
cache_arr,
block_dev: Arc::clone(block_dev),
current_time: 0,
}
}
pub fn get_block_cache(
fn get_slot_id(&self, block_id: usize) -> Option<usize> {
(0..N).find(|slot_id| {
if let Some(block_id_t) = self.block_id_arr[*slot_id].as_ref() {
*block_id_t == block_id
} else {
false
}
})
}
fn get_empty_slot_id(&self) -> Option<usize> {
(0..N).find(|slot_id| self.block_id_arr[*slot_id].is_none())
}
/// Get a block cache from block cache manager. Caller need to provide the block id.
///
/// # Panics
///
/// This function will panic if all caches cannot be swapped out since they are
/// still referenced.
pub fn get_block_cache(&mut self, block_id: usize) -> Arc<lock_api::Mutex<R, BlockCache>> {
let slot_id = if let Some(slot_id) = self.get_slot_id(block_id) {
slot_id
} else if let Some(slot_id) = self.get_empty_slot_id() {
self.block_id_arr[slot_id] = Some(block_id);
self.cache_arr[slot_id] = Some(Arc::new(lock_api::Mutex::new(BlockCache::new(
block_id,
&self.block_dev,
&mut self.cache_block_arr[slot_id],
))));
slot_id
} else {
// select a block:
// 1. with minimum last access time
// 2. it is not referenced by any thread
let invalid_slot_id = usize::MAX;
let (slot_id, _) = (0..N).fold(
(invalid_slot_id, usize::MAX),
|(target_slot_id, min_last_time), slot_id| {
let cache = self.cache_arr[slot_id].as_ref().unwrap();
if Arc::strong_count(cache) > 1 {
return (target_slot_id, min_last_time);
}
// Only acquire the cache lock if it has not been acquired(through the refcnt)
// This can avoid the AA deadlock.
let last_time = cache.lock().access_time();
if last_time < min_last_time {
(slot_id, last_time)
} else {
(target_slot_id, min_last_time)
}
},
);
// If all caches are still being used, just panic
assert_ne!(slot_id, invalid_slot_id);
assert_eq!(
Arc::strong_count(self.cache_arr[slot_id].as_ref().unwrap()),
1
);
// manually substitute it with a cache of a new block
// the drop of the older value is later than the init of the new value, thus
// we cannot put them in one line
self.cache_arr[slot_id] = None;
self.block_id_arr[slot_id] = Some(block_id);
self.cache_arr[slot_id] = Some(Arc::new(lock_api::Mutex::new(BlockCache::new(
block_id,
&self.block_dev,
&mut self.cache_block_arr[slot_id],
))));
slot_id
};
// update the access time
self.current_time += 1;
let cache = self.cache_arr[slot_id].as_ref().unwrap();
cache.lock().update_access_time(self.current_time);
Arc::clone(cache)
}
/// Sync the changes on all block caches of this block cache manager to the
/// block device.
pub fn sync_all(&self) {
for cache in self.cache_arr.iter() {
if let Some(cache) = cache.as_ref() {
cache.lock().sync_to_device();
}
}
}
/// Apply a read operation on a block at the given offset.
pub fn read_block<T, V>(
&mut self,
block_id: usize,
block_device: Arc<dyn BlockDevice>,
) -> Arc<Mutex<BlockCache>> {
if let Some(pair) = self.queue.iter().find(|pair| pair.0 == block_id) {
Arc::clone(&pair.1)
} else {
// substitute
if self.queue.len() == BLOCK_CACHE_SIZE {
// from front to tail
if let Some((idx, _)) = self
.queue
.iter()
.enumerate()
.find(|(_, pair)| Arc::strong_count(&pair.1) == 1)
{
self.queue.drain(idx..=idx);
} else {
panic!("Run out of BlockCache!");
}
}
// load block into mem and push back
let block_cache = Arc::new(Mutex::new(BlockCache::new(
block_id,
Arc::clone(&block_device),
)));
self.queue.push_back((block_id, Arc::clone(&block_cache)));
block_cache
}
offset: usize,
op: impl FnOnce(&T) -> V,
) -> V {
let bcache = self.get_block_cache(block_id);
let bcache_guard = bcache.lock();
bcache_guard.read(offset, op)
}
/// Apply a write operation on a block at the given offset.
pub fn write_block<T, V>(
&mut self,
block_id: usize,
offset: usize,
op: impl FnOnce(&mut T) -> V,
) -> V {
let bcache = self.get_block_cache(block_id);
let mut bcache_guard = bcache.lock();
bcache_guard.write(offset, op)
}
}
lazy_static! {
/// The global block cache manager
pub static ref BLOCK_CACHE_MANAGER: Mutex<BlockCacheManager> =
Mutex::new(BlockCacheManager::new());
}
/// Get the block cache corresponding to the given block id and block device
pub fn get_block_cache(
block_id: usize,
block_device: Arc<dyn BlockDevice>,
) -> Arc<Mutex<BlockCache>> {
BLOCK_CACHE_MANAGER
.lock()
.get_block_cache(block_id, block_device)
}
/// Sync all block cache to block device
pub fn block_cache_sync_all() {
let manager = BLOCK_CACHE_MANAGER.lock();
for (_, cache) in manager.queue.iter() {
cache.lock().sync();
#[cfg(test)]
mod tests {
use super::*;
use crate::test_helper::*;
use std::sync::Arc;
mod block_cache {
use super::*;
#[test]
pub fn test_init() {
let block_dev: Arc<dyn BlockDevice> = Arc::new(MockBlockDevice);
let mut cache_block: CacheBlock = Default::default();
let block_cache = BlockCache::new(0, &block_dev, &mut cache_block);
assert_eq!(block_cache.dirty, false);
}
#[test]
pub fn test_read_write() {
let block_dev: Arc<dyn BlockDevice> = Arc::new(MockBlockDevice);
let mut cache_block: CacheBlock = Default::default();
let mut block_cache = BlockCache::new(0, &block_dev, &mut cache_block);
let test_value: u32 = 0x11223344;
{
let u32_mut = block_cache.value_mut_at_offset::<u32>(0);
*u32_mut = test_value;
}
let u32_ref = block_cache.value_ref_at_offset::<u32>(0);
assert_eq!(*u32_ref, test_value);
}
#[test]
pub fn test_sync() {
let block_dev: Arc<dyn BlockDevice> = Arc::new(TestBlockDevice::<4>::new());
let mut cache_block: CacheBlock = Default::default();
let mut block_cache = BlockCache::new(0, &block_dev, &mut cache_block);
let mut buf = [0xffu8; BLOCK_SZ];
block_dev.read_block(0, &mut buf);
assert!(buf.iter().all(|byte| *byte == 0));
// write and sync manually
let test_value: u32 = 0x11223344;
{
let u32_mut = block_cache.value_mut_at_offset::<u32>(0);
*u32_mut = test_value;
}
block_cache.sync_to_device();
block_dev.read_block(0, &mut buf);
assert!(&buf[..4] == test_value.to_ne_bytes());
assert!(&buf[4..].iter().all(|byte| *byte == 0));
// write and sync automatically after block_cache is dropped
let test_value2: u32 = 0x55667788;
{
let u32_mut = block_cache.value_mut_at_offset::<u32>(4);
*u32_mut = test_value2;
}
drop(block_cache);
block_dev.read_block(0, &mut buf);
assert!(&buf[..4] == test_value.to_ne_bytes());
assert!(&buf[4..8] == test_value2.to_ne_bytes());
assert!(&buf[8..].iter().all(|byte| *byte == 0));
}
}
mod block_cache_mgr {
use super::*;
#[test]
pub fn test_init() {
let block_dev: Arc<dyn BlockDevice> = Arc::new(TestBlockDevice::<4>::new());
let mut block_cache_mgr: BlockCacheMgr<16, RawSpinlock> =
BlockCacheMgr::new(&block_dev);
let block_cache = block_cache_mgr.get_block_cache(0);
// write a value
let test_value: u32 = 0x11223344;
let mut locked_cache = block_cache.lock();
locked_cache.write(0, |v: &mut u32| {
*v = test_value;
});
// check block_dev, it won't be changed since we've not sync the changes yet
let mut buf = [0u8; BLOCK_SZ];
block_dev.read_block(0, &mut buf);
assert!(buf.iter().all(|byte| *byte == 0));
// sync changes and check again
locked_cache.sync_to_device();
block_dev.read_block(0, &mut buf);
assert_eq!(buf[..4], test_value.to_ne_bytes());
assert!(buf[4..].iter().all(|byte| *byte == 0));
}
#[test]
pub fn test_swap_and_sync() {
let block_dev: Arc<dyn BlockDevice> = Arc::new(TestBlockDevice::<16>::new());
let mut block_cache_mgr: BlockCacheMgr<4, RawSpinlock> = BlockCacheMgr::new(&block_dev);
let block_cache_list: Vec<_> = (0..4)
.map(|block_id: usize| block_cache_mgr.get_block_cache(block_id))
.collect();
let test_value: u32 = 0x11223344;
{
// write the block of id 0
let block_cache = &block_cache_list[0];
let mut locked_cache = block_cache.lock();
locked_cache.write(0, |v: &mut u32| {
*v = test_value;
});
}
// Access block caches other than block 0.
// In this way, the block 0 will be swapped out when a new block comes in.
for block_id in 1..4usize {
let _ = block_cache_mgr.get_block_cache(block_id);
}
// all block caches will not be referenced
drop(block_cache_list);
// access a new block
let _ = block_cache_mgr.get_block_cache(5);
// block 0 should be swapped out, thus the changes on it should be synced to device
let mut buf = [0u8; BLOCK_SZ];
block_dev.read_block(0, &mut buf);
assert_eq!(&buf[0..4], test_value.to_ne_bytes());
assert!(&buf[4..].iter().all(|byte| *byte == 0));
}
#[test]
#[should_panic]
pub fn test_all_caches_referenced() {
let block_dev: Arc<dyn BlockDevice> = Arc::new(TestBlockDevice::<16>::new());
let mut block_cache_mgr: BlockCacheMgr<4, RawSpinlock> = BlockCacheMgr::new(&block_dev);
let _block_cache_list: Vec<_> = (0..4)
.map(|block_id| block_cache_mgr.get_block_cache(block_id))
.collect();
let _ = block_cache_mgr.get_block_cache(5);
}
}
}

View File

@ -1,9 +1,9 @@
use core::any::Any;
/// Trait for block devices
/// which reads and writes data in the unit of blocks
pub trait BlockDevice: Send + Sync + Any {
///Read data form block to buffer
fn read_block(&self, block_id: usize, buf: &mut [u8]);
///Write data from buffer to block
fn write_block(&self, block_id: usize, buf: &[u8]);
}
use core::any::Any;
/// Trait for block devices
/// which reads and writes data in the unit of blocks
pub trait BlockDevice: Send + Sync + Any {
///Read data form block to buffer
fn read_block(&self, block_id: usize, buf: &mut [u8]);
///Write data from buffer to block
fn write_block(&self, block_id: usize, buf: &[u8]);
}

View File

@ -1,121 +1,50 @@
use super::{
block_cache_sync_all, get_block_cache, Bitmap, BlockDevice, DiskInode, DiskInodeType, Inode,
SuperBlock,
};
use crate::bitmap::Bitmap;
use crate::layout::{DiskInode, DiskInodeType, SuperBlock};
use crate::BlockCacheMgr;
use crate::BLOCK_SZ;
use alloc::sync::Arc;
use spin::Mutex;
///An easy file system on block
pub struct EasyFileSystem {
///Real device
pub block_device: Arc<dyn BlockDevice>,
///Inode bitmap
pub inode_bitmap: Bitmap,
///Data bitmap
pub data_bitmap: Bitmap,
inode_area_start_block: u32,
data_area_start_block: u32,
use alloc::vec::Vec;
const SUPER_BLOCK_ID: usize = 0;
const ROOT_INODE_ID: usize = 0;
/// The EasyFileSystem aggregates the underlying BlockCacheMgr and
/// describes the layout of the file system, including the positions
/// of different regions.
pub struct EasyFileSystem<const N: usize, RCacheMgr, RCache>
where
RCacheMgr: lock_api::RawMutex,
RCache: lock_api::RawMutex,
{
/// The EasyFileSystem apply read/write operation on disk through the bcache_mgr.
pub bcache_mgr: lock_api::Mutex<RCacheMgr, BlockCacheMgr<N, RCache>>,
inode_bitmap: Bitmap,
data_bitmap: Bitmap,
inode_area_start_block_id: u32,
data_area_start_block_id: u32,
}
type DataBlock = [u8; BLOCK_SZ];
/// An easy fs over a block device
impl EasyFileSystem {
/// A data block of block size
pub fn create(
block_device: Arc<dyn BlockDevice>,
total_blocks: u32,
inode_bitmap_blocks: u32,
) -> Arc<Mutex<Self>> {
// calculate block size of areas & create bitmaps
let inode_bitmap = Bitmap::new(1, inode_bitmap_blocks as usize);
let inode_num = inode_bitmap.maximum();
let inode_area_blocks =
((inode_num * core::mem::size_of::<DiskInode>() + BLOCK_SZ - 1) / BLOCK_SZ) as u32;
let inode_total_blocks = inode_bitmap_blocks + inode_area_blocks;
let data_total_blocks = total_blocks - 1 - inode_total_blocks;
let data_bitmap_blocks = (data_total_blocks + 4096) / 4097;
let data_area_blocks = data_total_blocks - data_bitmap_blocks;
let data_bitmap = Bitmap::new(
(1 + inode_bitmap_blocks + inode_area_blocks) as usize,
data_bitmap_blocks as usize,
);
let mut efs = Self {
block_device: Arc::clone(&block_device),
inode_bitmap,
data_bitmap,
inode_area_start_block: 1 + inode_bitmap_blocks,
data_area_start_block: 1 + inode_total_blocks + data_bitmap_blocks,
};
// clear all blocks
for i in 0..total_blocks {
get_block_cache(i as usize, Arc::clone(&block_device))
.lock()
.modify(0, |data_block: &mut DataBlock| {
for byte in data_block.iter_mut() {
*byte = 0;
}
});
impl<const N: usize, RCacheMgr, RCache> EasyFileSystem<N, RCacheMgr, RCache>
where
RCacheMgr: lock_api::RawMutex,
RCache: lock_api::RawMutex,
{
fn new_bare(bcache_mgr: BlockCacheMgr<N, RCache>) -> Self {
Self {
bcache_mgr: lock_api::Mutex::new(bcache_mgr),
inode_bitmap: Bitmap::default(),
data_bitmap: Bitmap::default(),
inode_area_start_block_id: 0,
data_area_start_block_id: 0,
}
// initialize SuperBlock
get_block_cache(0, Arc::clone(&block_device)).lock().modify(
0,
|super_block: &mut SuperBlock| {
super_block.initialize(
total_blocks,
inode_bitmap_blocks,
inode_area_blocks,
data_bitmap_blocks,
data_area_blocks,
);
},
);
// write back immediately
// create a inode for root node "/"
assert_eq!(efs.alloc_inode(), 0);
let (root_inode_block_id, root_inode_offset) = efs.get_disk_inode_pos(0);
get_block_cache(root_inode_block_id as usize, Arc::clone(&block_device))
.lock()
.modify(root_inode_offset, |disk_inode: &mut DiskInode| {
disk_inode.initialize(DiskInodeType::Directory);
});
block_cache_sync_all();
Arc::new(Mutex::new(efs))
}
/// Open a block device as a filesystem
pub fn open(block_device: Arc<dyn BlockDevice>) -> Arc<Mutex<Self>> {
// read SuperBlock
get_block_cache(0, Arc::clone(&block_device))
.lock()
.read(0, |super_block: &SuperBlock| {
assert!(super_block.is_valid(), "Error loading EFS!");
let inode_total_blocks =
super_block.inode_bitmap_blocks + super_block.inode_area_blocks;
let efs = Self {
block_device,
inode_bitmap: Bitmap::new(1, super_block.inode_bitmap_blocks as usize),
data_bitmap: Bitmap::new(
(1 + inode_total_blocks) as usize,
super_block.data_bitmap_blocks as usize,
),
inode_area_start_block: 1 + super_block.inode_bitmap_blocks,
data_area_start_block: 1 + inode_total_blocks + super_block.data_bitmap_blocks,
};
Arc::new(Mutex::new(efs))
})
}
/// Get the root inode of the filesystem
pub fn root_inode(efs: &Arc<Mutex<Self>>) -> Inode {
let block_device = Arc::clone(&efs.lock().block_device);
// acquire efs lock temporarily
let (block_id, block_offset) = efs.lock().get_disk_inode_pos(0);
// release efs lock
Inode::new(block_id, block_offset, Arc::clone(efs), block_device)
}
/// Get inode by id
pub fn get_disk_inode_pos(&self, inode_id: u32) -> (u32, usize) {
let inode_size = core::mem::size_of::<DiskInode>();
let inodes_per_block = (BLOCK_SZ / inode_size) as u32;
let block_id = self.inode_area_start_block + inode_id / inodes_per_block;
let block_id = self.inode_area_start_block_id + inode_id / inodes_per_block;
(
block_id,
(inode_id % inodes_per_block) as usize * inode_size,
@ -123,29 +52,183 @@ impl EasyFileSystem {
}
/// Get data block by id
pub fn get_data_block_id(&self, data_block_id: u32) -> u32 {
self.data_area_start_block + data_block_id
self.data_area_start_block_id + data_block_id
}
/// Allocate a new inode
pub fn alloc_inode(&mut self) -> u32 {
self.inode_bitmap.alloc(&self.block_device).unwrap() as u32
/// Allocate a new inode. This function should be called with BlockCacheMgr locked.
fn alloc_inode(&self, bcache_mgr: &mut BlockCacheMgr<N, RCache>) -> u32 {
self.inode_bitmap.alloc(bcache_mgr).unwrap() as u32
}
/// Allocate a data block
pub fn alloc_data(&mut self) -> u32 {
self.data_bitmap.alloc(&self.block_device).unwrap() as u32 + self.data_area_start_block
pub fn alloc_data(&self, bcache_mgr: &mut BlockCacheMgr<N, RCache>) -> u32 {
self.data_bitmap.alloc(bcache_mgr).unwrap() as u32 + self.data_area_start_block_id
}
/// Deallocate a data block
pub fn dealloc_data(&mut self, block_id: u32) {
get_block_cache(block_id as usize, Arc::clone(&self.block_device))
.lock()
.modify(0, |data_block: &mut DataBlock| {
data_block.iter_mut().for_each(|p| {
*p = 0;
})
});
pub fn dealloc_data(&self, bcache_mgr: &mut BlockCacheMgr<N, RCache>, block_id: u32) {
bcache_mgr.write_block(block_id as usize, 0, |data_block: &mut DataBlock| {
data_block.iter_mut().for_each(|p| {
*p = 0;
})
});
self.data_bitmap.dealloc(
&self.block_device,
(block_id - self.data_area_start_block) as usize,
bcache_mgr,
(block_id - self.data_area_start_block_id) as usize,
)
}
/// Operate on the efs and sync the changes to block device.
pub fn sync_transaction<T>(&mut self, op: impl FnOnce(&Self) -> T) -> T {
let ret = op(self);
self.bcache_mgr.lock().sync_all();
ret
}
/// Create a new file system on a bcache_mgr.
pub fn create(
total_blocks: u32,
inode_bitmap_blocks: u32,
bcache_mgr: BlockCacheMgr<N, RCache>,
) -> Self {
let mut efs = Self::new_bare(bcache_mgr);
// calculate block size of areas & create bitmaps
// layout: SuperBlock | InodeBitmap | InodeArea | DataBitmap | DataArea
efs.inode_bitmap = Bitmap::new(1, inode_bitmap_blocks as usize);
let inode_num = efs.inode_bitmap.maximum();
let inode_area_blocks =
((inode_num * core::mem::size_of::<DiskInode>() + BLOCK_SZ - 1) / BLOCK_SZ) as u32;
let inode_total_blocks = inode_bitmap_blocks + inode_area_blocks;
let data_total_blocks = total_blocks - 1 - inode_total_blocks;
let data_bitmap_blocks = (data_total_blocks + 4096) / 4097;
let data_area_blocks = data_total_blocks - data_bitmap_blocks;
efs.data_bitmap = Bitmap::new(
(1 + inode_bitmap_blocks + inode_area_blocks) as usize,
data_bitmap_blocks as usize,
);
efs.inode_area_start_block_id = inode_bitmap_blocks + 1;
efs.data_area_start_block_id = inode_total_blocks + data_bitmap_blocks + 1;
efs.sync_transaction(|efs_self| {
// clear all blocks
let mut bcache_mgr = efs_self.bcache_mgr.lock();
for i in 0..total_blocks {
bcache_mgr.write_block(i as usize, 0, |data_block: &mut DataBlock| {
data_block.iter_mut().for_each(|byte| *byte = 0);
});
}
// initialize SuperBlock
bcache_mgr.write_block(SUPER_BLOCK_ID, 0, |super_block: &mut SuperBlock| {
super_block.initialize(
total_blocks,
inode_bitmap_blocks,
inode_area_blocks,
data_bitmap_blocks,
data_area_blocks,
);
});
assert_eq!(
efs_self.inode_bitmap.alloc(&mut bcache_mgr),
Some(ROOT_INODE_ID)
);
let (root_inode_block_id, root_inode_offset) =
efs_self.get_disk_inode_pos(ROOT_INODE_ID as u32);
bcache_mgr.write_block(
root_inode_block_id as usize,
root_inode_offset,
|disk_inode: &mut DiskInode| {
disk_inode.initialize(DiskInodeType::Directory);
},
);
});
efs
}
/// Open a block device as a filesystem
pub fn open(mut bcache_mgr: BlockCacheMgr<N, RCache>) -> Self {
let mut super_block = SuperBlock::default();
bcache_mgr.read_block(SUPER_BLOCK_ID, 0, |super_block_inner: &SuperBlock| {
super_block = *super_block_inner;
});
assert!(super_block.is_valid(), "Error loading EFS!");
let inode_total_blocks = super_block.inode_bitmap_blocks + super_block.inode_area_blocks;
Self {
bcache_mgr: lock_api::Mutex::new(bcache_mgr),
inode_bitmap: Bitmap::new(1, super_block.inode_bitmap_blocks as usize),
data_bitmap: Bitmap::new(
(1 + inode_total_blocks) as usize,
super_block.data_bitmap_blocks as usize,
),
inode_area_start_block_id: 1 + super_block.inode_bitmap_blocks,
data_area_start_block_id: 1 + inode_total_blocks + super_block.data_bitmap_blocks,
}
}
/// Create a new inode of the given inode type, return the inode id.
///
/// This function should be called with bcache_mgr locked.
pub fn new_inode_nolock(
&self,
bcache_mgr: &mut BlockCacheMgr<N, RCache>,
inode_type: DiskInodeType,
) -> u32 {
let new_inode_id = self.alloc_inode(bcache_mgr);
let (new_inode_block_id, new_inode_block_offset) = self.get_disk_inode_pos(new_inode_id);
bcache_mgr.write_block(
new_inode_block_id as usize,
new_inode_block_offset,
|new_inode: &mut DiskInode| {
new_inode.initialize(inode_type);
},
);
new_inode_id
}
/// Increase the size of a disk inode.
///
/// This function should be called with bcache_mgr locked.
pub fn increase_size_nolock(
&self,
new_size: u32,
disk_inode: &mut DiskInode,
bcache_mgr: &mut BlockCacheMgr<N, RCache>,
) {
if new_size < disk_inode.size {
return;
}
let blocks_needed = disk_inode.blocks_num_needed(new_size);
let mut v: Vec<u32> = Vec::new();
for _ in 0..blocks_needed {
v.push(self.alloc_data(bcache_mgr));
}
disk_inode.increase_size(new_size, v, bcache_mgr);
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_helper::*;
use std::sync::Arc;
const EFS_BLK_NUM: usize = 2048;
const BCACHE_NUM: usize = 256;
#[test]
pub fn test_efs_create_open() {
let block_dev: Arc<dyn BlockDevice> = Arc::new(TestBlockDevice::<EFS_BLK_NUM>::new());
{
let bcache_mgr: BlockCacheMgr<BCACHE_NUM, RawSpinlock> = BlockCacheMgr::new(&block_dev);
let _efs: EasyFileSystem<BCACHE_NUM, RawSpinlock, _> =
EasyFileSystem::create(EFS_BLK_NUM as u32, 1, bcache_mgr);
}
{
let bcache_mgr: BlockCacheMgr<BCACHE_NUM, RawSpinlock> = BlockCacheMgr::new(&block_dev);
let efs: EasyFileSystem<BCACHE_NUM, RawSpinlock, _> = EasyFileSystem::open(bcache_mgr);
// SuperBlock 1
// InodeBitmap 1
// InodeArea 1024
// last=1022, DataBitmap 1
assert_eq!(efs.inode_area_start_block_id, 2);
assert_eq!(efs.data_area_start_block_id, 1027);
}
}
}

View File

@ -1,5 +1,4 @@
use super::{get_block_cache, BlockDevice, BLOCK_SZ};
use alloc::sync::Arc;
use super::{BlockCacheMgr, BLOCK_SZ};
use alloc::vec::Vec;
use core::fmt::{Debug, Formatter, Result};
@ -22,6 +21,7 @@ const INDIRECT1_BOUND: usize = DIRECT_BOUND + INODE_INDIRECT1_COUNT;
const INDIRECT2_BOUND: usize = INDIRECT1_BOUND + INODE_INDIRECT2_COUNT;
/// Super block of a filesystem
#[repr(C)]
#[derive(Default, Copy, Clone)]
pub struct SuperBlock {
magic: u32,
pub total_blocks: u32,
@ -67,8 +67,10 @@ impl SuperBlock {
self.magic == EFS_MAGIC
}
}
/// Type of a disk inode
#[derive(PartialEq)]
#[repr(u32)]
pub enum DiskInodeType {
File,
Directory,
@ -109,14 +111,17 @@ impl DiskInode {
}
/// Return block number correspond to size.
pub fn data_blocks(&self) -> u32 {
Self::_data_blocks(self.size)
Self::data_blocks_inner(self.size)
}
fn _data_blocks(size: u32) -> u32 {
fn data_blocks_inner(size: u32) -> u32 {
(size + BLOCK_SZ as u32 - 1) / BLOCK_SZ as u32
}
/// Return number of blocks needed include indirect1/2.
pub fn total_blocks(size: u32) -> u32 {
let data_blocks = Self::_data_blocks(size) as usize;
pub fn total_blocks(&self) -> u32 {
Self::total_blocks_inner(self.size)
}
fn total_blocks_inner(size: u32) -> u32 {
let data_blocks = Self::data_blocks_inner(size) as usize;
let mut total = data_blocks as usize;
// indirect1
if data_blocks > INODE_DIRECT_COUNT {
@ -134,40 +139,46 @@ impl DiskInode {
/// Get the number of data blocks that have to be allocated given the new size of data
pub fn blocks_num_needed(&self, new_size: u32) -> u32 {
assert!(new_size >= self.size);
Self::total_blocks(new_size) - Self::total_blocks(self.size)
Self::total_blocks_inner(new_size) - Self::total_blocks_inner(self.size)
}
/// Get id of block given inner id
pub fn get_block_id(&self, inner_id: u32, block_device: &Arc<dyn BlockDevice>) -> u32 {
pub fn get_block_id<const N: usize, R>(
&self,
inner_id: u32,
bcache_mgr: &mut BlockCacheMgr<N, R>,
) -> u32
where
R: lock_api::RawMutex,
{
let inner_id = inner_id as usize;
if inner_id < INODE_DIRECT_COUNT {
self.direct[inner_id]
} else if inner_id < INDIRECT1_BOUND {
get_block_cache(self.indirect1 as usize, Arc::clone(block_device))
.lock()
.read(0, |indirect_block: &IndirectBlock| {
indirect_block[inner_id - INODE_DIRECT_COUNT]
})
bcache_mgr.read_block(
self.indirect1 as usize,
0,
|indirect_block: &IndirectBlock| indirect_block[inner_id - INODE_DIRECT_COUNT],
)
} else {
let last = inner_id - INDIRECT1_BOUND;
let indirect1 = get_block_cache(self.indirect2 as usize, Arc::clone(block_device))
.lock()
.read(0, |indirect2: &IndirectBlock| {
let indirect1 =
bcache_mgr.read_block(self.indirect2 as usize, 0, |indirect2: &IndirectBlock| {
indirect2[last / INODE_INDIRECT1_COUNT]
});
get_block_cache(indirect1 as usize, Arc::clone(block_device))
.lock()
.read(0, |indirect1: &IndirectBlock| {
indirect1[last % INODE_INDIRECT1_COUNT]
})
bcache_mgr.read_block(indirect1 as usize, 0, |indirect1: &IndirectBlock| {
indirect1[last % INODE_INDIRECT1_COUNT]
})
}
}
/// Inncrease the size of current disk inode
pub fn increase_size(
pub fn increase_size<const N: usize, R>(
&mut self,
new_size: u32,
new_blocks: Vec<u32>,
block_device: &Arc<dyn BlockDevice>,
) {
bcache_mgr: &mut BlockCacheMgr<N, R>,
) where
R: lock_api::RawMutex,
{
let mut current_blocks = self.data_blocks();
self.size = new_size;
let mut total_blocks = self.data_blocks();
@ -188,14 +199,16 @@ impl DiskInode {
return;
}
// fill indirect1
get_block_cache(self.indirect1 as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
bcache_mgr.write_block(
self.indirect1 as usize,
0,
|indirect1: &mut IndirectBlock| {
while current_blocks < total_blocks.min(INODE_INDIRECT1_COUNT as u32) {
indirect1[current_blocks as usize] = new_blocks.next().unwrap();
current_blocks += 1;
}
});
},
);
// alloc indirect2
if total_blocks > INODE_INDIRECT1_COUNT as u32 {
if current_blocks == INODE_INDIRECT1_COUNT as u32 {
@ -212,19 +225,22 @@ impl DiskInode {
let a1 = total_blocks as usize / INODE_INDIRECT1_COUNT;
let b1 = total_blocks as usize % INODE_INDIRECT1_COUNT;
// alloc low-level indirect1
get_block_cache(self.indirect2 as usize, Arc::clone(block_device))
let indirect2_blk = bcache_mgr.get_block_cache(self.indirect2 as usize);
indirect2_blk
.lock()
.modify(0, |indirect2: &mut IndirectBlock| {
.write(0, |indirect2: &mut IndirectBlock| {
while (a0 < a1) || (a0 == a1 && b0 < b1) {
if b0 == 0 {
indirect2[a0] = new_blocks.next().unwrap();
}
// fill current
get_block_cache(indirect2[a0] as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
bcache_mgr.write_block(
indirect2[a0] as usize,
0,
|indirect1: &mut IndirectBlock| {
indirect1[b0] = new_blocks.next().unwrap();
});
},
);
// move to next
b0 += 1;
if b0 == INODE_INDIRECT1_COUNT {
@ -237,7 +253,13 @@ impl DiskInode {
/// Clear size to zero and return blocks that should be deallocated.
/// We will clear the block contents to zero later.
pub fn clear_size(&mut self, block_device: &Arc<dyn BlockDevice>) -> Vec<u32> {
pub fn clear_size<const N: usize, R>(
&mut self,
bcache_mgr: &mut BlockCacheMgr<N, R>,
) -> Vec<u32>
where
R: lock_api::RawMutex,
{
let mut v: Vec<u32> = Vec::new();
let mut data_blocks = self.data_blocks() as usize;
self.size = 0;
@ -257,15 +279,17 @@ impl DiskInode {
return v;
}
// indirect1
get_block_cache(self.indirect1 as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
bcache_mgr.write_block(
self.indirect1 as usize,
0,
|indirect1: &mut IndirectBlock| {
while current_blocks < data_blocks.min(INODE_INDIRECT1_COUNT) {
v.push(indirect1[current_blocks]);
//indirect1[current_blocks] = 0;
indirect1[current_blocks] = 0;
current_blocks += 1;
}
});
},
);
self.indirect1 = 0;
// indirect2 block
if data_blocks > INODE_INDIRECT1_COUNT {
@ -278,30 +302,32 @@ impl DiskInode {
assert!(data_blocks <= INODE_INDIRECT2_COUNT);
let a1 = data_blocks / INODE_INDIRECT1_COUNT;
let b1 = data_blocks % INODE_INDIRECT1_COUNT;
get_block_cache(self.indirect2 as usize, Arc::clone(block_device))
let indirect2_blk = bcache_mgr.get_block_cache(self.indirect2 as usize);
indirect2_blk
.lock()
.modify(0, |indirect2: &mut IndirectBlock| {
.write(0, |indirect2: &mut IndirectBlock| {
// full indirect1 blocks
for entry in indirect2.iter_mut().take(a1) {
v.push(*entry);
get_block_cache(*entry as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
for entry in indirect1.iter() {
v.push(*entry);
}
});
bcache_mgr.write_block(*entry as usize, 0, |indirect1: &mut IndirectBlock| {
for entry in indirect1.iter() {
v.push(*entry);
}
});
}
// last indirect1 block
if b1 > 0 {
v.push(indirect2[a1]);
get_block_cache(indirect2[a1] as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
bcache_mgr.write_block(
indirect2[a1] as usize,
0,
|indirect1: &mut IndirectBlock| {
for entry in indirect1.iter().take(b1) {
v.push(*entry);
}
});
},
);
//indirect2[a1] = 0;
}
});
@ -309,12 +335,15 @@ impl DiskInode {
v
}
/// Read data from current disk inode
pub fn read_at(
pub fn read_at<const N: usize, R>(
&self,
offset: usize,
buf: &mut [u8],
block_device: &Arc<dyn BlockDevice>,
) -> usize {
bcache_mgr: &mut BlockCacheMgr<N, R>,
) -> usize
where
R: lock_api::RawMutex,
{
let mut start = offset;
let end = (offset + buf.len()).min(self.size as usize);
if start >= end {
@ -329,12 +358,8 @@ impl DiskInode {
// read and update read size
let block_read_size = end_current_block - start;
let dst = &mut buf[read_size..read_size + block_read_size];
get_block_cache(
self.get_block_id(start_block as u32, block_device) as usize,
Arc::clone(block_device),
)
.lock()
.read(0, |data_block: &DataBlock| {
let block_id = self.get_block_id(start_block as u32, bcache_mgr);
bcache_mgr.read_block(block_id as usize, 0, |data_block: &DataBlock| {
let src = &data_block[start % BLOCK_SZ..start % BLOCK_SZ + block_read_size];
dst.copy_from_slice(src);
});
@ -350,12 +375,15 @@ impl DiskInode {
}
/// Write data into current disk inode
/// size must be adjusted properly beforehand
pub fn write_at(
pub fn write_at<const N: usize, R>(
&mut self,
offset: usize,
buf: &[u8],
block_device: &Arc<dyn BlockDevice>,
) -> usize {
bcache_mgr: &mut BlockCacheMgr<N, R>,
) -> usize
where
R: lock_api::RawMutex,
{
let mut start = offset;
let end = (offset + buf.len()).min(self.size as usize);
assert!(start <= end);
@ -367,12 +395,8 @@ impl DiskInode {
end_current_block = end_current_block.min(end);
// write and update write size
let block_write_size = end_current_block - start;
get_block_cache(
self.get_block_id(start_block as u32, block_device) as usize,
Arc::clone(block_device),
)
.lock()
.modify(0, |data_block: &mut DataBlock| {
let block_id = self.get_block_id(start_block as u32, bcache_mgr) as usize;
bcache_mgr.write_block(block_id, 0, |data_block: &mut DataBlock| {
let src = &buf[write_size..write_size + block_write_size];
let dst = &mut data_block[start % BLOCK_SZ..start % BLOCK_SZ + block_write_size];
dst.copy_from_slice(src);
@ -387,8 +411,33 @@ impl DiskInode {
}
write_size
}
/// Find inode under a disk inode by name
pub fn inode_id_by_name<const N: usize, R>(
&self,
name: &str,
bcache_mgr: &mut BlockCacheMgr<N, R>,
) -> Option<u32>
where
R: lock_api::RawMutex,
{
// assert it is a directory
assert!(self.is_dir());
let file_count = (self.size as usize) / DIRENT_SZ;
let mut dirent = DirEntry::empty();
for i in 0..file_count {
assert_eq!(
self.read_at(DIRENT_SZ * i, dirent.as_bytes_mut(), bcache_mgr),
DIRENT_SZ,
);
if dirent.name() == name {
return Some(dirent.inode_number() as u32);
}
}
None
}
}
/// A directory entry
#[repr(C)]
pub struct DirEntry {
name: [u8; NAME_LENGTH_LIMIT + 1],
@ -405,10 +454,11 @@ impl DirEntry {
inode_number: 0,
}
}
/// Crate a directory entry from name and inode number
/// Create a directory entry from name and inode number
pub fn new(name: &str, inode_number: u32) -> Self {
let mut bytes = [0u8; NAME_LENGTH_LIMIT + 1];
bytes[..name.len()].copy_from_slice(name.as_bytes());
let len = core::cmp::min(name.len(), NAME_LENGTH_LIMIT);
bytes[..len].copy_from_slice(&name.as_bytes()[..len]);
Self {
name: bytes,
inode_number,
@ -416,11 +466,13 @@ impl DirEntry {
}
/// Serialize into bytes
pub fn as_bytes(&self) -> &[u8] {
unsafe { core::slice::from_raw_parts(self as *const _ as usize as *const u8, DIRENT_SZ) }
// Safety: We can guarantee that size_of::<Self>() == DIRENT_SZ
unsafe { core::mem::transmute::<&Self, &[u8; DIRENT_SZ]>(self) }
}
/// Serialize into mutable bytes
pub fn as_bytes_mut(&mut self) -> &mut [u8] {
unsafe { core::slice::from_raw_parts_mut(self as *mut _ as usize as *mut u8, DIRENT_SZ) }
// Safety: The same of Self::as_bytes
unsafe { core::mem::transmute::<&mut Self, &mut [u8; DIRENT_SZ]>(self) }
}
/// Get name of the entry
pub fn name(&self) -> &str {
@ -432,3 +484,107 @@ impl DirEntry {
self.inode_number
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_helper::*;
use std::sync::Arc;
#[test]
pub fn test_static_size() {
use core::mem::size_of;
assert!(size_of::<SuperBlock>() <= BLOCK_SZ);
assert_eq!(size_of::<DiskInode>(), BLOCK_SZ / 4);
assert_eq!(size_of::<DirEntry>(), DIRENT_SZ);
}
pub mod disk_inode {
use super::*;
#[test]
pub fn test_init() {
let block_dev: Arc<dyn BlockDevice> = Arc::new(TestBlockDevice::<4096>::new());
let mut bcache_mgr: BlockCacheMgr<256, RawSpinlock> = BlockCacheMgr::new(&block_dev);
let mut blk_allocator = StackAllocator::<usize>::new();
let inode_blk_id = blk_allocator.alloc(1)[0];
bcache_mgr.write_block(inode_blk_id, 0, |disk_inode: &mut DiskInode| {
disk_inode.initialize(DiskInodeType::File);
assert_eq!(disk_inode.data_blocks(), 0);
assert_eq!(disk_inode.total_blocks(), 0);
});
}
#[test]
pub fn test_increase_clear_size() {
let block_dev: Arc<dyn BlockDevice> = Arc::new(TestBlockDevice::<4096>::new());
let mut bcache_mgr: BlockCacheMgr<16, RawSpinlock> = BlockCacheMgr::new(&block_dev);
let mut blk_allocator = StackAllocator::<u32>::new();
let inode_blk_id = blk_allocator.alloc(1)[0] as usize;
let mut allocated = 0usize;
let inode_blk = bcache_mgr.get_block_cache(inode_blk_id);
let mut inode_blk_guard = inode_blk.lock();
let disk_inode = inode_blk_guard.value_mut_at_offset::<DiskInode>(0);
disk_inode.initialize(DiskInodeType::File);
let new_size = 20 * BLOCK_SZ;
let needed_blk_num = disk_inode.blocks_num_needed(new_size as u32);
allocated += needed_blk_num as usize;
assert_eq!(needed_blk_num, 20);
let new_blks = blk_allocator.alloc(needed_blk_num as usize);
disk_inode.increase_size(new_size as u32, new_blks, &mut bcache_mgr);
assert_eq!(disk_inode.size, new_size as u32);
let new_size = INDIRECT1_BOUND * BLOCK_SZ;
let needed_blk_num = disk_inode.blocks_num_needed(new_size as u32);
allocated += needed_blk_num as usize;
assert_eq!(needed_blk_num, INDIRECT1_BOUND as u32 - 20 + 1);
let new_blks = blk_allocator.alloc(needed_blk_num as usize);
disk_inode.increase_size(new_size as u32, new_blks, &mut bcache_mgr);
assert_eq!(disk_inode.size, new_size as u32);
let blks = disk_inode.clear_size(&mut bcache_mgr);
assert_eq!(disk_inode.size, 0);
assert_eq!(blks.len(), allocated);
}
#[test]
pub fn test_read_write() {
let block_dev: Arc<dyn BlockDevice> = Arc::new(TestBlockDevice::<4096>::new());
let mut bcache_mgr: BlockCacheMgr<16, RawSpinlock> = BlockCacheMgr::new(&block_dev);
let mut blk_allocator = StackAllocator::<u32>::new();
let inode_blk_id = blk_allocator.alloc(1)[0] as usize;
let inode_blk = bcache_mgr.get_block_cache(inode_blk_id);
let mut inode_blk_guard = inode_blk.lock();
let disk_inode = inode_blk_guard.value_mut_at_offset::<DiskInode>(0);
let new_size = 200 * BLOCK_SZ as u32;
let needed_blk_num = disk_inode.blocks_num_needed(new_size);
let new_blks = blk_allocator.alloc(needed_blk_num as usize);
disk_inode.increase_size(new_size, new_blks, &mut bcache_mgr);
let test_ops = 1000;
let mut fake_file = FakeFile::new(new_size as usize);
for _ in 0..test_ops {
let file_op = FileOpGenerator::generate(new_size as usize);
match file_op {
FileOp::FileRead { offset, len } => {
let mut buf_fake: Vec<u8> = Vec::new();
let mut buf: Vec<u8> = Vec::new();
buf_fake.resize(len, 0);
buf.resize(len, 0);
disk_inode.read_at(offset, buf.as_mut_slice(), &mut bcache_mgr);
fake_file.read_at(offset, buf_fake.as_mut_slice());
assert_eq!(buf.as_slice(), buf_fake.as_slice());
}
FileOp::FileWrite { offset, data } => {
disk_inode.write_at(offset, data.as_slice(), &mut bcache_mgr);
fake_file.write_at(offset, data.as_slice());
}
}
}
}
}
}

View File

@ -1,18 +1,22 @@
//!An easy file system isolated from the kernel
#![no_std]
#![cfg_attr(not(test), no_std)]
#![deny(missing_docs)]
//! EasyFileSystem
extern crate alloc;
mod bitmap;
mod block_cache;
mod block_dev;
mod efs;
mod layout;
#[cfg(test)]
mod test_helper;
mod vfs;
/// Use a block size of 512 bytes
/// Each block is of 512 bytes.
pub const BLOCK_SZ: usize = 512;
use bitmap::Bitmap;
use block_cache::{block_cache_sync_all, get_block_cache};
pub use block_cache::{BlockCache, BlockCacheMgr};
pub use block_dev::BlockDevice;
pub use efs::EasyFileSystem;
use layout::*;
pub use vfs::Inode;

199
easy-fs/src/test_helper.rs Normal file
View File

@ -0,0 +1,199 @@
pub use crate::{BlockDevice, BLOCK_SZ};
use core::mem::swap;
use lock_api::{GuardSend, RawMutex};
use rand::Rng;
use std::collections::VecDeque;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Mutex;
pub struct MockBlockDevice;
impl BlockDevice for MockBlockDevice {
fn read_block(&self, _block_id: usize, _buf: &mut [u8]) {}
fn write_block(&self, _block_id: usize, _buf: &[u8]) {}
}
pub struct TestBlockDevice<const N: usize> {
pub blocks: Box<Mutex<Vec<[u8; BLOCK_SZ]>>>,
}
impl<const N: usize> TestBlockDevice<N> {
pub fn new() -> Self {
let mut v: Vec<[u8; BLOCK_SZ]> = Vec::new();
for _ in 0..N {
v.push([0u8; BLOCK_SZ]);
}
Self {
blocks: Box::new(Mutex::new(v)),
}
}
}
impl<const N: usize> BlockDevice for TestBlockDevice<N> {
fn read_block(&self, block_id: usize, buf: &mut [u8]) {
assert!(block_id < N);
let blocks = self.blocks.as_ref().lock().unwrap();
buf.copy_from_slice(&blocks[block_id]);
}
fn write_block(&self, block_id: usize, buf: &[u8]) {
assert!(block_id < N);
let mut blocks = self.blocks.as_ref().lock().unwrap();
blocks[block_id].copy_from_slice(buf);
}
}
/// Define our raw lock type
///
/// From [lock_api](https://docs.rs/lock_api/latest/lock_api/index.html)
pub struct RawSpinlock(AtomicBool);
// Implement RawMutex for this type
unsafe impl RawMutex for RawSpinlock {
const INIT: RawSpinlock = RawSpinlock(AtomicBool::new(false));
// A spinlock guard can be sent to another thread and unlocked there
type GuardMarker = GuardSend;
fn lock(&self) {
// Note: This isn't the best way of implementing a spinlock, but it
// suffices for the sake of this example.
while !self.try_lock() {}
}
fn try_lock(&self) -> bool {
self.0
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
}
unsafe fn unlock(&self) {
self.0.store(false, Ordering::Release);
}
}
pub struct StackAllocator<U> {
next_id: U,
free_list: VecDeque<U>,
}
impl StackAllocator<u32> {
pub fn new() -> Self {
Self {
next_id: 0,
free_list: VecDeque::new(),
}
}
fn alloc_one(&mut self) -> u32 {
if let Some(id) = self.free_list.pop_back() {
id
} else {
let id = self.next_id;
self.next_id += 1;
id
}
}
pub fn alloc(&mut self, n: usize) -> Vec<u32> {
let mut v: Vec<u32> = Vec::new();
for _ in 0..n {
v.push(self.alloc_one());
}
v
}
}
impl StackAllocator<usize> {
pub fn new() -> Self {
Self {
next_id: 0,
free_list: VecDeque::new(),
}
}
fn alloc_one(&mut self) -> usize {
if let Some(id) = self.free_list.pop_back() {
id
} else {
let id = self.next_id;
self.next_id += 1;
id
}
}
pub fn alloc(&mut self, n: usize) -> Vec<usize> {
let mut v: Vec<usize> = Vec::new();
for _ in 0..n {
v.push(self.alloc_one());
}
v
}
}
impl<U> StackAllocator<U> {
#[allow(unused)]
fn free_one(&mut self, id: U) {
self.free_list.push_back(id);
}
#[allow(unused)]
pub fn free(&mut self, ids: Vec<U>) {
for id in ids.into_iter() {
self.free_one(id);
}
}
}
pub struct FakeFile {
v: Vec<u8>,
}
impl FakeFile {
pub fn new(size: usize) -> Self {
let mut v: Vec<u8> = Vec::new();
for _ in 0..size {
v.push(0u8);
}
Self { v }
}
pub fn read_at(&self, offset: usize, dst: &mut [u8]) {
dst.copy_from_slice(&self.v.as_slice()[offset..offset + dst.len()]);
}
pub fn write_at(&mut self, offset: usize, src: &[u8]) {
let dst = &mut self.v.as_mut_slice()[offset..offset + src.len()];
dst.copy_from_slice(src);
}
}
pub struct FileOpGenerator;
#[derive(Debug)]
pub enum FileOp {
FileRead { offset: usize, len: usize },
FileWrite { offset: usize, data: Vec<u8> },
}
impl FileOpGenerator {
pub fn generate(file_len: usize) -> FileOp {
let offset: usize;
let len: usize;
let mut rng = rand::thread_rng();
loop {
let mut offset0 = rng.gen::<u64>() % (file_len as u64 - 1);
let mut offset1 = rng.gen::<u64>() % (file_len as u64 - 1);
if offset0 != offset1 {
if offset0 > offset1 {
swap(&mut offset0, &mut offset1);
}
offset = offset0 as usize;
len = (offset1 - offset0) as usize;
break;
}
}
if rand::random() {
FileOp::FileRead { offset, len }
} else {
let mut data: Vec<u8> = Vec::new();
for _ in 0..len {
data.push(rand::random::<u8>());
}
FileOp::FileWrite { offset, data }
}
}
}

View File

@ -1,186 +1,295 @@
use super::{
block_cache_sync_all, get_block_cache, BlockDevice, DirEntry, DiskInode, DiskInodeType,
EasyFileSystem, DIRENT_SZ,
};
use crate::efs::EasyFileSystem;
use crate::layout::{DirEntry, DiskInode, DiskInodeType, DIRENT_SZ};
use crate::BlockCacheMgr;
use alloc::string::String;
use alloc::sync::Arc;
use alloc::vec::Vec;
use spin::{Mutex, MutexGuard};
use core::str::FromStr;
/// Virtual filesystem layer over easy-fs
pub struct Inode {
pub struct Inode<REasyFS, const N: usize, RCacheMgr, RCache>
where
REasyFS: lock_api::RawMutex,
RCacheMgr: lock_api::RawMutex,
RCache: lock_api::RawMutex,
{
block_id: usize,
block_offset: usize,
fs: Arc<Mutex<EasyFileSystem>>,
block_device: Arc<dyn BlockDevice>,
fs: Arc<lock_api::Mutex<REasyFS, EasyFileSystem<N, RCacheMgr, RCache>>>,
}
impl Inode {
/// Create a vfs inode
pub fn new(
block_id: u32,
block_offset: usize,
fs: Arc<Mutex<EasyFileSystem>>,
block_device: Arc<dyn BlockDevice>,
impl<REasyFS, const N: usize, RCacheMgr, RCache> Inode<REasyFS, N, RCacheMgr, RCache>
where
REasyFS: lock_api::RawMutex,
RCacheMgr: lock_api::RawMutex,
RCache: lock_api::RawMutex,
{
/// Get the root inode of an EasyFileSystem.
pub fn root_inode(
efs: &Arc<lock_api::Mutex<REasyFS, EasyFileSystem<N, RCacheMgr, RCache>>>,
) -> Self {
let (block_id, block_offset) = efs.lock().get_disk_inode_pos(0);
Self {
block_id: block_id as usize,
block_offset,
fs,
block_device,
fs: Arc::clone(efs),
}
}
/// Call a function over a disk inode to read it
fn read_disk_inode<V>(&self, f: impl FnOnce(&DiskInode) -> V) -> V {
get_block_cache(self.block_id, Arc::clone(&self.block_device))
.lock()
.read(self.block_offset, f)
}
/// Call a function over a disk inode to modify it
fn modify_disk_inode<V>(&self, f: impl FnOnce(&mut DiskInode) -> V) -> V {
get_block_cache(self.block_id, Arc::clone(&self.block_device))
.lock()
.modify(self.block_offset, f)
}
/// Find inode under a disk inode by name
fn find_inode_id(&self, name: &str, disk_inode: &DiskInode) -> Option<u32> {
// assert it is a directory
assert!(disk_inode.is_dir());
let file_count = (disk_inode.size as usize) / DIRENT_SZ;
let mut dirent = DirEntry::empty();
for i in 0..file_count {
assert_eq!(
disk_inode.read_at(DIRENT_SZ * i, dirent.as_bytes_mut(), &self.block_device,),
DIRENT_SZ,
);
if dirent.name() == name {
return Some(dirent.inode_number() as u32);
}
}
None
}
/// Find inode under current inode by name
pub fn find(&self, name: &str) -> Option<Arc<Inode>> {
let fs = self.fs.lock();
self.read_disk_inode(|disk_inode| {
self.find_inode_id(name, disk_inode).map(|inode_id| {
let (block_id, block_offset) = fs.get_disk_inode_pos(inode_id);
Arc::new(Self::new(
block_id,
block_offset,
self.fs.clone(),
self.block_device.clone(),
))
})
})
}
/// Increase the size of a disk inode
fn increase_size(
/// Apply a read operation on the DiskInode the current Inode refers to.
///
/// This requires the mutable reference of the bcache_mgr.
pub fn read_disk_inode<V>(
&self,
new_size: u32,
disk_inode: &mut DiskInode,
fs: &mut MutexGuard<EasyFileSystem>,
) {
if new_size < disk_inode.size {
return;
}
let blocks_needed = disk_inode.blocks_num_needed(new_size);
let mut v: Vec<u32> = Vec::new();
for _ in 0..blocks_needed {
v.push(fs.alloc_data());
}
disk_inode.increase_size(new_size, v, &self.block_device);
bcache_mgr: &mut BlockCacheMgr<N, RCache>,
op: impl FnOnce(&DiskInode) -> V,
) -> V {
bcache_mgr.read_block(self.block_id, self.block_offset, op)
}
/// Create inode under current inode by name
pub fn create(&self, name: &str) -> Option<Arc<Inode>> {
let mut fs = self.fs.lock();
let op = |root_inode: &DiskInode| {
// assert it is a directory
assert!(root_inode.is_dir());
// has the file been created?
self.find_inode_id(name, root_inode)
};
if self.read_disk_inode(op).is_some() {
/// Apply a write operation on the DiskInode the current Inode refers to.
///
/// This requires the mutable reference of the bcache_mgr.
pub fn write_disk_inode<V>(
&self,
bcache_mgr: &mut BlockCacheMgr<N, RCache>,
op: impl FnOnce(&mut DiskInode) -> V,
) -> V {
bcache_mgr.write_block(self.block_id, self.block_offset, op)
}
/// Return true if the current Inode is a directory.
pub fn is_dir(&self) -> bool {
let fs_guard = self.fs.lock();
let mut bcache_mgr = fs_guard.bcache_mgr.lock();
bcache_mgr.read_block(
self.block_id,
self.block_offset,
|disk_inode: &DiskInode| disk_inode.is_dir(),
)
}
/// Return true if the current Inode is a file.
pub fn is_file(&self) -> bool {
let fs_guard = self.fs.lock();
let mut bcache_mgr = fs_guard.bcache_mgr.lock();
bcache_mgr.read_block(
self.block_id,
self.block_offset,
|disk_inode: &DiskInode| disk_inode.is_file(),
)
}
/// Return the size in bytes of the content the current Inode refers to.
pub fn size(&self) -> u32 {
let fs_guard = self.fs.lock();
let mut bcache_mgr = fs_guard.bcache_mgr.lock();
bcache_mgr.read_block(
self.block_id,
self.block_offset,
|disk_inode: &DiskInode| disk_inode.size,
)
}
/// Create a new file under the root directory.
///
/// If the file with the given name exists, return None. Otherwise,
/// return the created Inode.
pub fn create(&self, name: &str) -> Option<Arc<Self>> {
assert!(self.is_dir());
if self.find(name).is_some() {
return None;
}
// create a new file
// alloc a inode with an indirect block
let new_inode_id = fs.alloc_inode();
// initialize inode
let (new_inode_block_id, new_inode_block_offset) = fs.get_disk_inode_pos(new_inode_id);
get_block_cache(new_inode_block_id as usize, Arc::clone(&self.block_device))
.lock()
.modify(new_inode_block_offset, |new_inode: &mut DiskInode| {
new_inode.initialize(DiskInodeType::File);
});
self.modify_disk_inode(|root_inode| {
// append file in the dirent
let file_count = (root_inode.size as usize) / DIRENT_SZ;
let new_size = (file_count + 1) * DIRENT_SZ;
// increase size
self.increase_size(new_size as u32, root_inode, &mut fs);
// write dirent
let dirent = DirEntry::new(name, new_inode_id);
root_inode.write_at(
file_count * DIRENT_SZ,
dirent.as_bytes(),
&self.block_device,
);
});
let mut fs_guard = self.fs.lock();
let (block_id, block_offset) = fs.get_disk_inode_pos(new_inode_id);
block_cache_sync_all();
// return inode
Some(Arc::new(Self::new(
block_id,
block_offset,
self.fs.clone(),
self.block_device.clone(),
)))
// release efs lock automatically by compiler
}
/// List inodes under current inode
pub fn ls(&self) -> Vec<String> {
let _fs = self.fs.lock();
self.read_disk_inode(|disk_inode| {
let file_count = (disk_inode.size as usize) / DIRENT_SZ;
let mut v: Vec<String> = Vec::new();
for i in 0..file_count {
let mut dirent = DirEntry::empty();
assert_eq!(
disk_inode.read_at(i * DIRENT_SZ, dirent.as_bytes_mut(), &self.block_device,),
DIRENT_SZ,
);
v.push(String::from(dirent.name()));
}
v
fs_guard.sync_transaction(|fs| {
let mut bcache_mgr = fs.bcache_mgr.lock();
let new_inode_id = fs.new_inode_nolock(&mut bcache_mgr, DiskInodeType::File);
let (new_inode_blk_id, new_inode_blk_offset) = fs.get_disk_inode_pos(new_inode_id);
let root_inode_blk = bcache_mgr.get_block_cache(self.block_id);
root_inode_blk
.lock()
.write(self.block_offset, |root_inode: &mut DiskInode| {
// append file in the dirent
let file_count = (root_inode.size as usize) / DIRENT_SZ;
let new_size = (file_count + 1) * DIRENT_SZ;
// increase size
fs.increase_size_nolock(new_size as u32, root_inode, &mut bcache_mgr);
// write dirent
let dirent = DirEntry::new(name, new_inode_id);
root_inode.write_at(file_count * DIRENT_SZ, dirent.as_bytes(), &mut bcache_mgr);
});
Some(Arc::new(Self {
block_id: new_inode_blk_id as usize,
block_offset: new_inode_blk_offset,
fs: Arc::clone(&self.fs),
}))
})
}
/// Read data from current inode
/// Search a file with the given name under the root directory and return the
/// corresponding Inode.
pub fn find(&self, name: &str) -> Option<Arc<Self>> {
assert!(self.is_dir());
let mut fs_guard = self.fs.lock();
fs_guard.sync_transaction(|fs| {
let mut bcache_mgr = fs.bcache_mgr.lock();
let block_cache = bcache_mgr.get_block_cache(self.block_id);
let disk_inode_op = |disk_inode: &DiskInode| {
disk_inode
.inode_id_by_name(name, &mut bcache_mgr)
.map(|inode_id: u32| {
let (block_id, block_offset) = fs.get_disk_inode_pos(inode_id);
Arc::new(Self {
block_id: block_id as usize,
block_offset,
fs: self.fs.clone(),
})
})
};
let block_cache = block_cache.lock();
block_cache.read(self.block_offset, disk_inode_op)
})
}
/// Read the file's contents at the specified offset into the given buffer.
pub fn read_at(&self, offset: usize, buf: &mut [u8]) -> usize {
let _fs = self.fs.lock();
self.read_disk_inode(|disk_inode| disk_inode.read_at(offset, buf, &self.block_device))
let mut fs_guard = self.fs.lock();
fs_guard.sync_transaction(|fs| {
let mut bcache_mgr = fs.bcache_mgr.lock();
let inode_blk = bcache_mgr.get_block_cache(self.block_id);
let inode_blk_guard = inode_blk.lock();
let disk_inode: &DiskInode = inode_blk_guard.value_ref_at_offset(self.block_offset);
disk_inode.read_at(offset, buf, &mut bcache_mgr)
})
}
/// Write data to current inode
/// Write the contents of the buffer to the file at the specified offset.
pub fn write_at(&self, offset: usize, buf: &[u8]) -> usize {
let mut fs = self.fs.lock();
let size = self.modify_disk_inode(|disk_inode| {
self.increase_size((offset + buf.len()) as u32, disk_inode, &mut fs);
disk_inode.write_at(offset, buf, &self.block_device)
});
block_cache_sync_all();
size
let mut fs_guard = self.fs.lock();
fs_guard.sync_transaction(|fs| {
let mut bcache_mgr = fs.bcache_mgr.lock();
let inode_blk = bcache_mgr.get_block_cache(self.block_id);
let mut inode_blk_guard = inode_blk.lock();
let disk_inode: &mut DiskInode = inode_blk_guard.value_mut_at_offset(self.block_offset);
fs.increase_size_nolock((offset + buf.len()) as u32, disk_inode, &mut bcache_mgr);
disk_inode.write_at(offset, buf, &mut bcache_mgr)
})
}
/// Clear the data in current inode
/// Clear the size of the file to zero.
pub fn clear(&self) {
let mut fs = self.fs.lock();
self.modify_disk_inode(|disk_inode| {
let size = disk_inode.size;
let data_blocks_dealloc = disk_inode.clear_size(&self.block_device);
assert!(data_blocks_dealloc.len() == DiskInode::total_blocks(size) as usize);
for data_block in data_blocks_dealloc.into_iter() {
fs.dealloc_data(data_block);
let mut fs_guard = self.fs.lock();
fs_guard.sync_transaction(|fs| {
let mut bcache_mgr = fs.bcache_mgr.lock();
let inode_blk = bcache_mgr.get_block_cache(self.block_id);
let mut inode_blk_guard = inode_blk.lock();
let disk_inode: &mut DiskInode = inode_blk_guard.value_mut_at_offset(self.block_offset);
let total_blks = disk_inode.total_blocks();
let blocks_dealloc = disk_inode.clear_size(&mut bcache_mgr);
assert_eq!(blocks_dealloc.len(), total_blks as usize);
for block_id in blocks_dealloc.into_iter() {
fs.dealloc_data(&mut bcache_mgr, block_id);
}
});
block_cache_sync_all();
}
/// Return a vector contains all the file names as String under the directory.
pub fn listdir(&self) -> Vec<String> {
assert!(self.is_dir());
let mut list: Vec<String> = Vec::new();
assert_eq!(self.size() % (DIRENT_SZ as u32), 0);
let mut dirent = DirEntry::empty();
let mut offset = 0usize;
while offset < self.size() as usize {
self.read_at(offset, dirent.as_bytes_mut());
list.push(String::from_str(dirent.name()).unwrap());
offset += DIRENT_SZ;
}
list
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_helper::*;
const BLK_DEV_SZ: usize = 4096;
const BCACHE_SZ: usize = 256;
const TEST_FILENAME: &str = "test_file0";
type EasyFileSystemType = EasyFileSystem<256, RawSpinlock, RawSpinlock>;
fn test_prepare_efs() -> Arc<lock_api::Mutex<RawSpinlock, EasyFileSystemType>> {
let block_dev: Arc<dyn BlockDevice> = Arc::new(TestBlockDevice::<BLK_DEV_SZ>::new());
let bcache_mgr: BlockCacheMgr<BCACHE_SZ, RawSpinlock> = BlockCacheMgr::new(&block_dev);
let efs: EasyFileSystem<BCACHE_SZ, RawSpinlock, _> =
EasyFileSystem::create(BLK_DEV_SZ as u32, 1, bcache_mgr);
let efs: lock_api::Mutex<RawSpinlock, EasyFileSystem<BCACHE_SZ, _, _>> =
lock_api::Mutex::new(efs);
Arc::new(efs)
}
#[test]
pub fn test_file_create_and_find() {
let efs = test_prepare_efs();
let root_inode = Inode::root_inode(&efs);
assert!(root_inode.is_dir());
root_inode.create(TEST_FILENAME);
let file_inode = root_inode.find(TEST_FILENAME);
assert!(file_inode.is_some());
let file_inode = file_inode.unwrap();
assert!(file_inode.is_file());
assert_eq!(root_inode.size(), DIRENT_SZ as u32);
assert_eq!(file_inode.size(), 0);
let mut dirent = DirEntry::empty();
root_inode.read_at(0, dirent.as_bytes_mut());
assert_eq!(dirent.name(), TEST_FILENAME);
}
#[test]
pub fn test_listdir() {
let efs = test_prepare_efs();
let root_inode = Inode::root_inode(&efs);
let filenames: Vec<String> = (0..1000).map(|id| format!("test_file{}", id)).collect();
for filename in filenames.iter() {
root_inode.create(filename.as_str());
}
let filenames_from_fs = root_inode.listdir();
assert_eq!(filenames, filenames_from_fs);
}
#[test]
pub fn test_file_read_write() {
let efs = test_prepare_efs();
let root_inode = Inode::root_inode(&efs);
let file_inode = root_inode.create(TEST_FILENAME).unwrap();
let file_size = 200 * BLOCK_SZ;
let mut fake_file = FakeFile::new(file_size);
for _ in 0..1000 {
match FileOpGenerator::generate(file_size) {
FileOp::FileRead { offset, len } => {
let mut buf: Vec<u8> = Vec::new();
buf.resize(len, 0);
let mut fake_buf: Vec<u8> = Vec::new();
fake_buf.resize(len, 0);
fake_file.read_at(offset, fake_buf.as_mut_slice());
file_inode.read_at(offset, buf.as_mut_slice());
assert_eq!(buf.as_slice(), fake_buf.as_slice());
}
FileOp::FileWrite { offset, data } => {
fake_file.write_at(offset, data.as_slice());
file_inode.write_at(offset, data.as_slice());
}
}
}
file_inode.clear();
assert_eq!(file_inode.size(), 0);
}
}

View File

@ -16,6 +16,7 @@ virtio-drivers = { git = "https://github.com/rcore-os/virtio-drivers", rev = "4e
easy-fs = { path = "../easy-fs" }
log = "0.4"
sbi-rt = { version = "0.0.2", features = ["legacy"] }
lock_api = "0.4.11"
[profile.release]
debug = true

View File

@ -44,7 +44,7 @@ $(KERNEL_BIN): kernel
fs-img: $(APPS)
@cd ../user && make build TEST=$(TEST)
@rm -f $(FS_IMG)
@cd ../easy-fs-fuse && cargo run --release -- -s ../user/src/bin/ -t ../user/target/riscv64gc-unknown-none-elf/release/
@cd ../easy-fs && cargo run --example fuse -- -s ../user/src/bin/ -t ../user/target/riscv64gc-unknown-none-elf/release/
$(APPS):

View File

@ -7,6 +7,6 @@ _start:
.section .bss.stack
.globl boot_stack_lower_bound
boot_stack_lower_bound:
.space 4096 * 16
.space 4096 * 256
.globl boot_stack_top
boot_stack_top:

View File

@ -7,12 +7,16 @@
use super::File;
use crate::drivers::BLOCK_DEVICE;
use crate::mm::UserBuffer;
use crate::sync::UPSafeCell;
use crate::sync::{RawExclusiveLock, UPSafeCell};
use alloc::sync::Arc;
use alloc::vec::Vec;
use bitflags::*;
use easy_fs::{EasyFileSystem, Inode};
use easy_fs::{BlockCacheMgr, EasyFileSystem, Inode};
use lazy_static::*;
const BLOCK_CACHE_SIZE: usize = 64;
type InodeType = Inode<RawExclusiveLock, BLOCK_CACHE_SIZE, RawExclusiveLock, RawExclusiveLock>;
/// A wrapper around a filesystem inode
/// to implement File trait atop
pub struct OSInode {
@ -23,12 +27,12 @@ pub struct OSInode {
/// The OS inode inner in 'UPSafeCell'
pub struct OSInodeInner {
offset: usize,
inode: Arc<Inode>,
inode: Arc<InodeType>,
}
impl OSInode {
/// Construct an OS inode from a inode
pub fn new(readable: bool, writable: bool, inode: Arc<Inode>) -> Self {
pub fn new(readable: bool, writable: bool, inode: Arc<InodeType>) -> Self {
Self {
readable,
writable,
@ -53,15 +57,21 @@ impl OSInode {
}
lazy_static! {
pub static ref ROOT_INODE: Arc<Inode> = {
let efs = EasyFileSystem::open(BLOCK_DEVICE.clone());
Arc::new(EasyFileSystem::root_inode(&efs))
pub static ref ROOT_INODE: Arc<InodeType> = {
let block_dev = BLOCK_DEVICE.clone();
let bcache_mgr: BlockCacheMgr<BLOCK_CACHE_SIZE, RawExclusiveLock> =
BlockCacheMgr::new(&block_dev);
let efs: EasyFileSystem<BLOCK_CACHE_SIZE, RawExclusiveLock, _> =
EasyFileSystem::open(bcache_mgr);
let efs = Arc::new(lock_api::Mutex::<RawExclusiveLock, _>::new(efs));
Arc::new(Inode::root_inode(&efs))
};
}
/// List all files in the filesystems
#[allow(warnings)]
pub fn list_apps() {
println!("/**** APPS ****");
for app in ROOT_INODE.ls() {
for app in ROOT_INODE.listdir() {
println!("{}", app);
}
println!("**************/");

View File

@ -1,4 +1,4 @@
//! Synchronization and interior mutability primitives
mod up;
pub use up::UPSafeCell;
pub use up::{RawExclusiveLock, UPSafeCell};

View File

@ -1,5 +1,6 @@
//! Uniprocessor interior mutability primitives
use core::cell::{RefCell, RefMut};
use core::sync::atomic::{AtomicBool, Ordering};
/// Wrap a static data structure inside it so that we are
/// able to access it without any `unsafe`.
@ -28,3 +29,23 @@ impl<T> UPSafeCell<T> {
self.inner.borrow_mut()
}
}
/// The sync primitive used by easy-fs.
pub struct RawExclusiveLock(AtomicBool);
unsafe impl lock_api::RawMutex for RawExclusiveLock {
const INIT: Self = Self(AtomicBool::new(false));
type GuardMarker = lock_api::GuardNoSend;
fn lock(&self) {
assert_eq!(self.0.load(Ordering::Relaxed), false);
self.0.store(true, Ordering::Relaxed);
}
fn try_lock(&self) -> bool {
self.0
.compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed)
.is_ok()
}
unsafe fn unlock(&self) {
self.0.store(false, Ordering::Relaxed);
}
}