Refactor easy-fs

This commit is contained in:
Yifan Wu 2021-02-24 03:42:45 +08:00
parent 12c6c53af5
commit 1346fb1a1f
14 changed files with 329 additions and 278 deletions

4
.gitignore vendored
View File

@ -6,4 +6,8 @@ os/Cargo.lock
user/target/* user/target/*
user/.idea/* user/.idea/*
user/Cargo.lock user/Cargo.lock
easy-fs/Cargo.lock
easy-fs/target/*
easy-fs-fuse/Cargo.lock
easy-fs-fuse/target/*
tools/ tools/

12
easy-fs-fuse/Cargo.toml Normal file
View File

@ -0,0 +1,12 @@
[package]
name = "easy-fs-fuse"
version = "0.1.0"
authors = ["Yifan Wu <shinbokuow@163.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
clap = "2.33.3"
easy-fs = { path = "../easy-fs" }
rand = "0.8.0"

View File

@ -1,6 +1,3 @@
extern crate easy_fs;
extern crate alloc;
use easy_fs::{ use easy_fs::{
BlockDevice, BlockDevice,
EasyFileSystem, EasyFileSystem,
@ -8,7 +5,8 @@ use easy_fs::{
use std::fs::{File, OpenOptions, read_dir}; use std::fs::{File, OpenOptions, read_dir};
use std::io::{Read, Write, Seek, SeekFrom}; use std::io::{Read, Write, Seek, SeekFrom};
use std::sync::Mutex; use std::sync::Mutex;
use alloc::sync::Arc; use std::sync::Arc;
use clap::{Arg, App};
const BLOCK_SZ: usize = 512; const BLOCK_SZ: usize = 512;
@ -34,15 +32,30 @@ fn main() {
easy_fs_pack().expect("Error when packing easy-fs!"); easy_fs_pack().expect("Error when packing easy-fs!");
} }
static TARGET_PATH: &str = "../user/target/riscv64gc-unknown-none-elf/release/";
fn easy_fs_pack() -> std::io::Result<()> { fn easy_fs_pack() -> std::io::Result<()> {
let matches = App::new("EasyFileSystem packer")
.arg(Arg::with_name("source")
.short("s")
.long("source")
.takes_value(true)
.help("Executable source dir(with backslash)")
)
.arg(Arg::with_name("target")
.short("t")
.long("target")
.takes_value(true)
.help("Executable target dir(with backslash)")
)
.get_matches();
let src_path = matches.value_of("source").unwrap();
let target_path = matches.value_of("target").unwrap();
println!("src_path = {}\ntarget_path = {}", src_path, target_path);
let block_file = Arc::new(BlockFile(Mutex::new({ let block_file = Arc::new(BlockFile(Mutex::new({
let f = OpenOptions::new() let f = OpenOptions::new()
.read(true) .read(true)
.write(true) .write(true)
.create(true) .create(true)
.open(format!("{}{}", TARGET_PATH, "fs.img"))?; .open(format!("{}{}", target_path, "fs.img"))?;
f.set_len(8192 * 512).unwrap(); f.set_len(8192 * 512).unwrap();
f f
}))); })));
@ -53,7 +66,7 @@ fn easy_fs_pack() -> std::io::Result<()> {
1, 1,
); );
let root_inode = Arc::new(EasyFileSystem::root_inode(&efs)); let root_inode = Arc::new(EasyFileSystem::root_inode(&efs));
let apps: Vec<_> = read_dir("../user/src/bin") let apps: Vec<_> = read_dir(src_path)
.unwrap() .unwrap()
.into_iter() .into_iter()
.map(|dir_entry| { .map(|dir_entry| {
@ -64,7 +77,7 @@ fn easy_fs_pack() -> std::io::Result<()> {
.collect(); .collect();
for app in apps { for app in apps {
// load app data from host file system // load app data from host file system
let mut host_file = File::open(format!("{}{}", TARGET_PATH, app)).unwrap(); let mut host_file = File::open(format!("{}{}", target_path, app)).unwrap();
let mut all_data: Vec<u8> = Vec::new(); let mut all_data: Vec<u8> = Vec::new();
host_file.read_to_end(&mut all_data).unwrap(); host_file.read_to_end(&mut all_data).unwrap();
// create a file in easy-fs // create a file in easy-fs
@ -79,22 +92,24 @@ fn easy_fs_pack() -> std::io::Result<()> {
Ok(()) Ok(())
} }
/*
#[test] #[test]
fn efs_test() -> std::io::Result<()> { fn efs_test() -> std::io::Result<()> {
let block_file = Arc::new(BlockFile(Mutex::new( let block_file = Arc::new(BlockFile(Mutex::new({
OpenOptions::new() let f = OpenOptions::new()
.read(true) .read(true)
.write(true) .write(true)
.open("target/fs.img")? .create(true)
))); .open("target/fs.img")?;
f.set_len(8192 * 512).unwrap();
f
})));
EasyFileSystem::create( EasyFileSystem::create(
block_file.clone(), block_file.clone(),
4096, 4096,
1, 1,
); );
let efs = EasyFileSystem::open(block_file.clone()); let efs = EasyFileSystem::open(block_file.clone());
let mut root_inode = EasyFileSystem::root_inode(&efs); let root_inode = EasyFileSystem::root_inode(&efs);
root_inode.create("filea"); root_inode.create("filea");
root_inode.create("fileb"); root_inode.create("fileb");
for name in root_inode.ls() { for name in root_inode.ls() {
@ -146,5 +161,4 @@ fn efs_test() -> std::io::Result<()> {
random_str_test((12 + 128) * BLOCK_SZ); random_str_test((12 + 128) * BLOCK_SZ);
Ok(()) Ok(())
} }
*/

View File

@ -8,5 +8,4 @@ edition = "2018"
[dependencies] [dependencies]
spin = "0.7.0" spin = "0.7.0"
lazy_static = { version = "1.4.0", features = ["spin_no_std"] } lazy_static = { version = "1.4.0", features = ["spin_no_std"] }
#rand = "0.8.0"

View File

@ -1,6 +0,0 @@
static TARGET_PATH: &str = "../user/target/riscv64gc-unknown-none-elf/release/";
fn main() {
println!("cargo:rerun-if-changed=../user/src/");
println!("cargo:rerun-if-changed={}", TARGET_PATH);
}

View File

@ -1,7 +1,9 @@
use alloc::sync::Arc; use alloc::sync::Arc;
use super::BlockDevice; use super::{
use super::Dirty; BlockDevice,
use super::BLOCK_SZ; BLOCK_SZ,
get_block_cache,
};
type BitmapBlock = [u64; 64]; type BitmapBlock = [u64; 64];
@ -26,41 +28,45 @@ impl Bitmap {
blocks, blocks,
} }
} }
pub fn alloc(&self, block_device: &Arc<dyn BlockDevice>) -> Option<usize> { pub fn alloc(&self, block_device: &Arc<dyn BlockDevice>) -> Option<usize> {
for block_id in 0..self.blocks { for block_id in 0..self.blocks {
let mut dirty_bitmap_block: Dirty<BitmapBlock> = Dirty::new( let pos = get_block_cache(
block_id + self.start_block_id as usize, block_id + self.start_block_id as usize,
0, Arc::clone(block_device),
block_device.clone() ).lock().modify(0, |bitmap_block: &mut BitmapBlock| {
); if let Some((bits64_pos, inner_pos)) = bitmap_block
let bitmap_block = dirty_bitmap_block.get_mut(); .iter()
if let Some((bits64_pos, inner_pos)) = bitmap_block .enumerate()
.iter() .find(|(_, bits64)| **bits64 != u64::MAX)
.enumerate() .map(|(bits64_pos, bits64)| {
.find(|(_, bits64)| **bits64 != u64::MAX) (bits64_pos, bits64.trailing_ones() as usize)
.map(|(bits64_pos, bits64)| { }) {
(bits64_pos, bits64.trailing_ones() as usize) // modify cache
}) { bitmap_block[bits64_pos] |= 1u64 << inner_pos;
// modify cache Some(block_id * BLOCK_BITS + bits64_pos * 64 + inner_pos as usize)
bitmap_block[bits64_pos] |= 1u64 << inner_pos; } else {
return Some(block_id * BLOCK_BITS + bits64_pos * 64 + inner_pos as usize); None
// after dirty is dropped, data will be written back automatically }
});
if pos.is_some() {
return pos;
} }
} }
None None
} }
pub fn dealloc(&self, block_device: &Arc<dyn BlockDevice>, bit: usize) { pub fn dealloc(&self, block_device: &Arc<dyn BlockDevice>, bit: usize) {
let (block_pos, bits64_pos, inner_pos) = decomposition(bit); let (block_pos, bits64_pos, inner_pos) = decomposition(bit);
let mut dirty_bitmap_block: Dirty<BitmapBlock> = Dirty::new( get_block_cache(
block_pos + self.start_block_id, block_pos + self.start_block_id,
0, Arc::clone(block_device)
block_device.clone(), ).lock().modify(0, |bitmap_block: &mut BitmapBlock| {
);
dirty_bitmap_block.modify(|bitmap_block| {
assert!(bitmap_block[bits64_pos] & (1u64 << inner_pos) > 0); assert!(bitmap_block[bits64_pos] & (1u64 << inner_pos) > 0);
bitmap_block[bits64_pos] -= 1u64 << inner_pos; bitmap_block[bits64_pos] -= 1u64 << inner_pos;
}); });
} }
pub fn maximum(&self) -> usize { pub fn maximum(&self) -> usize {
self.blocks * BLOCK_BITS self.blocks * BLOCK_BITS
} }

View File

@ -1,9 +1,9 @@
use super::{ use super::{
BlockDevice,
BLOCK_SZ, BLOCK_SZ,
BlockDevice,
}; };
use alloc::sync::{Arc, Weak}; use alloc::collections::VecDeque;
use alloc::collections::BTreeMap; use alloc::sync::Arc;
use lazy_static::*; use lazy_static::*;
use spin::Mutex; use spin::Mutex;
@ -11,76 +11,118 @@ pub struct BlockCache {
cache: [u8; BLOCK_SZ], cache: [u8; BLOCK_SZ],
block_id: usize, block_id: usize,
block_device: Arc<dyn BlockDevice>, block_device: Arc<dyn BlockDevice>,
modified: bool,
} }
impl BlockCache { impl BlockCache {
pub fn new(block_id: usize, block_device: Arc<dyn BlockDevice>) -> Self { /// Load a new BlockCache from disk.
pub fn new(
block_id: usize,
block_device: Arc<dyn BlockDevice>
) -> Self {
let mut cache = [0u8; BLOCK_SZ]; let mut cache = [0u8; BLOCK_SZ];
block_device.read_block(block_id, &mut cache); block_device.read_block(block_id, &mut cache);
Self { Self {
cache, cache,
block_id, block_id,
block_device, block_device,
modified: false,
} }
} }
pub fn start_addr(&self, offset: usize) -> usize {
fn addr_of_offset(&self, offset: usize) -> usize {
&self.cache[offset] as *const _ as usize &self.cache[offset] as *const _ as usize
} }
pub fn get_ref<T>(&self, offset: usize) -> &T where T: Sized {
let type_size = core::mem::size_of::<T>();
assert!(offset + type_size <= BLOCK_SZ);
let addr = self.addr_of_offset(offset);
unsafe { &*(addr as *const T) }
}
pub fn get_mut<T>(&mut self, offset: usize) -> &mut T where T: Sized {
let type_size = core::mem::size_of::<T>();
assert!(offset + type_size <= BLOCK_SZ);
self.modified = true;
let addr = self.addr_of_offset(offset);
unsafe { &mut *(addr as *mut T) }
}
pub fn read<T, V>(&self, offset: usize, f: impl FnOnce(&T) -> V) -> V {
f(self.get_ref(offset))
}
pub fn modify<T, V>(&mut self, offset:usize, f: impl FnOnce(&mut T) -> V) -> V {
f(self.get_mut(offset))
}
pub fn sync(&mut self) {
if self.modified {
self.modified = false;
self.block_device.write_block(self.block_id, &self.cache);
}
}
} }
impl Drop for BlockCache { impl Drop for BlockCache {
fn drop(&mut self) { fn drop(&mut self) {
// write back self.sync()
self.block_device.write_block(self.block_id, &self.cache);
// invalid in block cache manager
BLOCK_CACHE_MANAGER.lock().invalid(self.block_id);
} }
} }
pub struct BlockCacheManager { const BLOCK_CACHE_SIZE: usize = 16;
map: BTreeMap<usize, Weak<BlockCache>>,
}
lazy_static! { pub struct BlockCacheManager {
static ref BLOCK_CACHE_MANAGER: Mutex<BlockCacheManager> = Mutex::new( queue: VecDeque<(usize, Arc<Mutex<BlockCache>>)>,
BlockCacheManager::new()
);
} }
impl BlockCacheManager { impl BlockCacheManager {
pub fn new() -> Self { pub fn new() -> Self {
Self { map: BTreeMap::new() } Self { queue: VecDeque::new() }
} }
pub fn get(
pub fn get_block_cache(
&mut self, &mut self,
block_id: usize, block_id: usize,
block_device: Arc<dyn BlockDevice> block_device: Arc<dyn BlockDevice>,
) -> Arc<BlockCache> { ) -> Arc<Mutex<BlockCache>> {
if let Some(block_cache) = self.map.get(&block_id) { if let Some(pair) = self.queue
// return cloned .iter()
block_cache.upgrade().unwrap().clone() .find(|pair| pair.0 == block_id) {
Arc::clone(&pair.1)
} else { } else {
// fetch from disk // substitute
let block_cache = Arc::new(BlockCache::new( if self.queue.len() == BLOCK_CACHE_SIZE {
block_id, // from front to tail
block_device.clone() if let Some((idx, _)) = self.queue
.iter()
.enumerate()
.find(|(_, pair)| Arc::strong_count(&pair.1) == 1) {
self.queue.drain(idx..=idx);
} else {
panic!("Run out of BlockCache!");
}
}
// load block into mem and push back
let block_cache = Arc::new(Mutex::new(
BlockCache::new(block_id, Arc::clone(&block_device))
)); ));
self.map.insert( self.queue.push_back((block_id, Arc::clone(&block_cache)));
block_id,
Arc::downgrade(&block_cache),
);
// return
block_cache block_cache
} }
} }
pub fn invalid(&mut self, block_id: usize) { }
assert!(self.map.remove(&block_id).is_some());
} lazy_static! {
pub static ref BLOCK_CACHE_MANAGER: Mutex<BlockCacheManager> = Mutex::new(
BlockCacheManager::new()
);
} }
pub fn get_block_cache( pub fn get_block_cache(
block_id: usize, block_id: usize,
block_device: Arc<dyn BlockDevice> block_device: Arc<dyn BlockDevice>
) -> Arc<BlockCache> { ) -> Arc<Mutex<BlockCache>> {
BLOCK_CACHE_MANAGER.lock().get(block_id, block_device) BLOCK_CACHE_MANAGER.lock().get_block_cache(block_id, block_device)
} }

View File

@ -1,44 +0,0 @@
use super::{
BlockDevice,
BLOCK_SZ,
BlockCache,
get_block_cache,
};
use alloc::sync::Arc;
use core::marker::PhantomData;
pub struct Dirty<T> {
block_cache: Arc<BlockCache>,
offset: usize,
phantom: PhantomData<T>,
}
impl<T> Dirty<T> where T: Sized {
pub fn new(block_id: usize, offset: usize, block_device: Arc<dyn BlockDevice>) -> Self {
Self {
block_cache: get_block_cache(block_id, block_device.clone()),
offset,
phantom: PhantomData,
}
}
pub fn get_mut(&mut self) -> &mut T {
let type_size = core::mem::size_of::<T>();
// assert that the struct is inside a block
assert!(self.offset + type_size <= BLOCK_SZ);
let start_addr = self.block_cache.start_addr(self.offset);
unsafe { &mut *(start_addr as *mut T) }
}
pub fn get_ref(&self) -> &T {
let type_size = core::mem::size_of::<T>();
// assert that the struct is inside a block
assert!(self.offset + type_size <= BLOCK_SZ);
let start_addr = self.block_cache.start_addr(self.offset);
unsafe { &*(start_addr as *const T) }
}
pub fn read<V>(&self, f: impl FnOnce(&T) -> V) -> V {
f(self.get_ref())
}
pub fn modify<V>(&mut self, f: impl FnOnce(&mut T) -> V) -> V {
f(self.get_mut())
}
}

View File

@ -6,8 +6,8 @@ use super::{
SuperBlock, SuperBlock,
DiskInode, DiskInode,
DiskInodeType, DiskInodeType,
Dirty,
Inode, Inode,
get_block_cache,
}; };
use crate::BLOCK_SZ; use crate::BLOCK_SZ;
@ -41,7 +41,7 @@ impl EasyFileSystem {
data_bitmap_blocks as usize, data_bitmap_blocks as usize,
); );
let mut efs = Self { let mut efs = Self {
block_device, block_device: Arc::clone(&block_device),
inode_bitmap, inode_bitmap,
data_bitmap, data_bitmap,
inode_area_start_block: 1 + inode_bitmap_blocks, inode_area_start_block: 1 + inode_bitmap_blocks,
@ -49,14 +49,19 @@ impl EasyFileSystem {
}; };
// clear all blocks // clear all blocks
for i in 0..total_blocks { for i in 0..total_blocks {
efs.get_block(i).modify(|data_block| { get_block_cache(
for byte in data_block.iter_mut() { i as usize,
*byte = 0; Arc::clone(&block_device)
} )
.lock()
.modify(0, |data_block: &mut DataBlock| {
for byte in data_block.iter_mut() { *byte = 0; }
}); });
} }
// initialize SuperBlock // initialize SuperBlock
efs.get_super_block().modify(|super_block| { get_block_cache(0, Arc::clone(&block_device))
.lock()
.modify(0, |super_block: &mut SuperBlock| {
super_block.initialize( super_block.initialize(
total_blocks, total_blocks,
inode_bitmap_blocks, inode_bitmap_blocks,
@ -68,10 +73,15 @@ impl EasyFileSystem {
// write back immediately // write back immediately
// create a inode for root node "/" // create a inode for root node "/"
assert_eq!(efs.alloc_inode(), 0); assert_eq!(efs.alloc_inode(), 0);
efs.get_disk_inode(0).modify(|disk_inode| { let (root_inode_block_id, root_inode_offset) = efs.get_disk_inode_pos(0);
get_block_cache(
root_inode_block_id as usize,
Arc::clone(&block_device)
)
.lock()
.modify(root_inode_offset, |disk_inode: &mut DiskInode| {
disk_inode.initialize( disk_inode.initialize(
DiskInodeType::Directory, DiskInodeType::Directory,efs.alloc_data()
efs.alloc_data(),
); );
}); });
Arc::new(Mutex::new(efs)) Arc::new(Mutex::new(efs))
@ -79,54 +89,56 @@ impl EasyFileSystem {
pub fn open(block_device: Arc<dyn BlockDevice>) -> Arc<Mutex<Self>> { pub fn open(block_device: Arc<dyn BlockDevice>) -> Arc<Mutex<Self>> {
// read SuperBlock // read SuperBlock
let super_block_dirty: Dirty<SuperBlock> = Dirty::new(0, 0, block_device.clone()); get_block_cache(0, Arc::clone(&block_device))
let super_block = super_block_dirty.get_ref(); .lock()
assert!(super_block.is_valid(), "Error loading EFS!"); .read(0, |super_block: &SuperBlock| {
let inode_total_blocks = assert!(super_block.is_valid(), "Error loading EFS!");
super_block.inode_bitmap_blocks + super_block.inode_area_blocks; let inode_total_blocks =
let efs = Self { super_block.inode_bitmap_blocks + super_block.inode_area_blocks;
block_device, let efs = Self {
inode_bitmap: Bitmap::new( block_device,
1, inode_bitmap: Bitmap::new(
super_block.inode_bitmap_blocks as usize 1,
), super_block.inode_bitmap_blocks as usize
data_bitmap: Bitmap::new( ),
(1 + inode_total_blocks) as usize, data_bitmap: Bitmap::new(
super_block.data_bitmap_blocks as usize, (1 + inode_total_blocks) as usize,
), super_block.data_bitmap_blocks as usize,
inode_area_start_block: 1 + super_block.inode_bitmap_blocks, ),
data_area_start_block: 1 + inode_total_blocks + super_block.data_bitmap_blocks, inode_area_start_block: 1 + super_block.inode_bitmap_blocks,
}; data_area_start_block: 1 + inode_total_blocks + super_block.data_bitmap_blocks,
Arc::new(Mutex::new(efs)) };
Arc::new(Mutex::new(efs))
})
} }
pub fn root_inode(efs: &Arc<Mutex<Self>>) -> Inode { pub fn root_inode(efs: &Arc<Mutex<Self>>) -> Inode {
let block_device = Arc::clone(&efs.lock().block_device);
Inode::new( Inode::new(
0, 0,
efs.clone(), Arc::clone(efs),
efs.lock().block_device.clone(), block_device,
) )
} }
/*
fn get_super_block(&self) -> Dirty<SuperBlock> { fn get_super_block(&self) -> Dirty<SuperBlock> {
Dirty::new(0, 0, self.block_device.clone()) Dirty::new(0, 0, self.block_device.clone())
} }
*/
pub fn get_disk_inode(&self, inode_id: u32) -> Dirty<DiskInode> { pub fn get_disk_inode_pos(&self, inode_id: u32) -> (u32, usize) {
let inode_size = core::mem::size_of::<DiskInode>(); let inode_size = core::mem::size_of::<DiskInode>();
let inodes_per_block = (BLOCK_SZ / inode_size) as u32; let inodes_per_block = (BLOCK_SZ / inode_size) as u32;
let block_id = self.inode_area_start_block + inode_id / inodes_per_block; let block_id = self.inode_area_start_block + inode_id / inodes_per_block;
Dirty::new( (block_id, (inode_id % inodes_per_block) as usize * inode_size)
block_id as usize,
(inode_id % inodes_per_block) as usize * inode_size,
self.block_device.clone(),
)
} }
pub fn get_data_block(&self, data_block_id: u32) -> Dirty<DataBlock> { pub fn get_data_block_id(&self, data_block_id: u32) -> u32 {
self.get_block(self.data_area_start_block + data_block_id) self.data_area_start_block + data_block_id
} }
/*
fn get_block(&self, block_id: u32) -> Dirty<DataBlock> { fn get_block(&self, block_id: u32) -> Dirty<DataBlock> {
Dirty::new( Dirty::new(
block_id as usize, block_id as usize,
@ -134,6 +146,7 @@ impl EasyFileSystem {
self.block_device.clone(), self.block_device.clone(),
) )
} }
*/
pub fn alloc_inode(&mut self) -> u32 { pub fn alloc_inode(&mut self) -> u32 {
self.inode_bitmap.alloc(&self.block_device).unwrap() as u32 self.inode_bitmap.alloc(&self.block_device).unwrap() as u32

View File

@ -2,7 +2,7 @@ use core::fmt::{Debug, Formatter, Result};
use super::{ use super::{
BLOCK_SZ, BLOCK_SZ,
BlockDevice, BlockDevice,
Dirty, get_block_cache,
}; };
use alloc::sync::Arc; use alloc::sync::Arc;
use alloc::vec::Vec; use alloc::vec::Vec;
@ -87,6 +87,7 @@ impl DiskInode {
pub fn is_dir(&self) -> bool { pub fn is_dir(&self) -> bool {
self.type_ == DiskInodeType::Directory self.type_ == DiskInodeType::Directory
} }
#[allow(unused)]
pub fn is_file(&self) -> bool { pub fn is_file(&self) -> bool {
self.type_ == DiskInodeType::File self.type_ == DiskInodeType::File
} }
@ -102,14 +103,11 @@ impl DiskInode {
self.direct[inner_id] self.direct[inner_id]
} else { } else {
// only support indirect1 now // only support indirect1 now
Dirty::<IndirectBlock>::new( get_block_cache(self.indirect1 as usize, Arc::clone(block_device))
self.indirect1 as usize, .lock()
0, .read(0, |indirect_block: &IndirectBlock| {
block_device.clone() indirect_block[inner_id - INODE_DIRECT_COUNT]
).read(|indirect_block| { })
// it will panic if file is too large
indirect_block[inner_id - INODE_DIRECT_COUNT]
})
} }
} }
pub fn blocks_num_needed(&self, new_size: u32) -> u32 { pub fn blocks_num_needed(&self, new_size: u32) -> u32 {
@ -126,11 +124,12 @@ impl DiskInode {
let last_blocks = self.blocks(); let last_blocks = self.blocks();
self.size = new_size; self.size = new_size;
let current_blocks = self.blocks(); let current_blocks = self.blocks();
Dirty::<IndirectBlock>::new( get_block_cache(
self.indirect1 as usize, self.indirect1 as usize,
0, Arc::clone(block_device)
block_device.clone() )
).modify(|indirect_block| { .lock()
.modify(0, |indirect_block: &mut IndirectBlock| {
for i in 0..current_blocks - last_blocks { for i in 0..current_blocks - last_blocks {
let inner_id = (last_blocks + i) as usize; let inner_id = (last_blocks + i) as usize;
let new_block = new_blocks[i as usize]; let new_block = new_blocks[i as usize];
@ -152,11 +151,12 @@ impl DiskInode {
self.direct[i] = 0; self.direct[i] = 0;
} }
if blocks > INODE_DIRECT_COUNT { if blocks > INODE_DIRECT_COUNT {
Dirty::<IndirectBlock>::new( get_block_cache(
self.indirect1 as usize, self.indirect1 as usize,
0, Arc::clone(block_device),
block_device.clone(), )
).modify(|indirect_block| { .lock()
.modify(0, |indirect_block: &mut IndirectBlock| {
for i in 0..blocks - INODE_DIRECT_COUNT { for i in 0..blocks - INODE_DIRECT_COUNT {
v.push(indirect_block[i]); v.push(indirect_block[i]);
indirect_block[i] = 0; indirect_block[i] = 0;
@ -185,11 +185,12 @@ impl DiskInode {
// read and update read size // read and update read size
let block_read_size = end_current_block - start; let block_read_size = end_current_block - start;
let dst = &mut buf[read_size..read_size + block_read_size]; let dst = &mut buf[read_size..read_size + block_read_size];
Dirty::<DataBlock>::new( get_block_cache(
self.get_block_id(start_block as u32, block_device) as usize, self.get_block_id(start_block as u32, block_device) as usize,
0, Arc::clone(block_device),
block_device.clone() )
).read(|data_block| { .lock()
.read(0, |data_block: &DataBlock| {
let src = &data_block[start % BLOCK_SZ..start % BLOCK_SZ + block_read_size]; let src = &data_block[start % BLOCK_SZ..start % BLOCK_SZ + block_read_size];
dst.copy_from_slice(src); dst.copy_from_slice(src);
}); });
@ -219,11 +220,12 @@ impl DiskInode {
end_current_block = end_current_block.min(end); end_current_block = end_current_block.min(end);
// write and update write size // write and update write size
let block_write_size = end_current_block - start; let block_write_size = end_current_block - start;
Dirty::<DataBlock>::new( get_block_cache(
self.get_block_id(start_block as u32, block_device) as usize, self.get_block_id(start_block as u32, block_device) as usize,
0, Arc::clone(block_device)
block_device.clone() )
).modify(|data_block| { .lock()
.modify(0, |data_block: &mut DataBlock| {
let src = &buf[write_size..write_size + block_write_size]; let src = &buf[write_size..write_size + block_write_size];
let dst = &mut data_block[start % BLOCK_SZ..start % BLOCK_SZ + block_write_size]; let dst = &mut data_block[start % BLOCK_SZ..start % BLOCK_SZ + block_write_size];
dst.copy_from_slice(src); dst.copy_from_slice(src);

View File

@ -5,7 +5,6 @@ extern crate alloc;
mod block_dev; mod block_dev;
mod layout; mod layout;
mod efs; mod efs;
mod dirty;
mod bitmap; mod bitmap;
mod vfs; mod vfs;
mod block_cache; mod block_cache;
@ -15,6 +14,5 @@ pub use block_dev::BlockDevice;
pub use efs::EasyFileSystem; pub use efs::EasyFileSystem;
pub use vfs::Inode; pub use vfs::Inode;
use layout::*; use layout::*;
use dirty::Dirty;
use bitmap::Bitmap; use bitmap::Bitmap;
use block_cache::{BlockCache, get_block_cache}; use block_cache::get_block_cache;

View File

@ -1,12 +1,12 @@
use super::{ use super::{
BlockDevice, BlockDevice,
Dirty,
DiskInode, DiskInode,
DiskInodeType, DiskInodeType,
DirEntry, DirEntry,
DirentBytes, DirentBytes,
EasyFileSystem, EasyFileSystem,
DIRENT_SZ, DIRENT_SZ,
get_block_cache,
}; };
use alloc::sync::Arc; use alloc::sync::Arc;
use alloc::string::String; use alloc::string::String;
@ -14,7 +14,8 @@ use alloc::vec::Vec;
use spin::{Mutex, MutexGuard}; use spin::{Mutex, MutexGuard};
pub struct Inode { pub struct Inode {
inode_id: u32, block_id: usize,
block_offset: usize,
fs: Arc<Mutex<EasyFileSystem>>, fs: Arc<Mutex<EasyFileSystem>>,
block_device: Arc<dyn BlockDevice>, block_device: Arc<dyn BlockDevice>,
} }
@ -25,37 +26,51 @@ impl Inode {
fs: Arc<Mutex<EasyFileSystem>>, fs: Arc<Mutex<EasyFileSystem>>,
block_device: Arc<dyn BlockDevice>, block_device: Arc<dyn BlockDevice>,
) -> Self { ) -> Self {
let (block_id, block_offset) = fs.lock().get_disk_inode_pos(inode_id);
Self { Self {
inode_id, block_id: block_id as usize,
block_offset,
fs, fs,
block_device, block_device,
} }
} }
fn read_disk_inode<V>(&self, f: impl FnOnce(&DiskInode) -> V) -> V {
get_block_cache(
self.block_id,
Arc::clone(&self.block_device)
).lock().read(self.block_offset, f)
}
fn modify_disk_inode<V>(&self, f: impl FnOnce(&mut DiskInode) -> V) -> V {
get_block_cache(
self.block_id,
Arc::clone(&self.block_device)
).lock().modify(self.block_offset, f)
}
/*
fn get_disk_inode(&self, fs: &mut MutexGuard<EasyFileSystem>) -> Dirty<DiskInode> { fn get_disk_inode(&self, fs: &mut MutexGuard<EasyFileSystem>) -> Dirty<DiskInode> {
fs.get_disk_inode(self.inode_id) fs.get_disk_inode(self.inode_id)
} }
*/
fn find_inode_id( fn find_inode_id(
&self, &self,
name: &str, name: &str,
inode: &Dirty<DiskInode>, disk_inode: &DiskInode,
) -> Option<u32> { ) -> Option<u32> {
// assert it is a directory // assert it is a directory
assert!(inode.read(|inode| inode.is_dir())); assert!(disk_inode.is_dir());
let file_count = inode.read(|inode| { let file_count = (disk_inode.size as usize) / DIRENT_SZ;
inode.size as usize
}) / DIRENT_SZ;
let mut dirent_space: DirentBytes = Default::default(); let mut dirent_space: DirentBytes = Default::default();
for i in 0..file_count { for i in 0..file_count {
assert_eq!( assert_eq!(
inode.read(|inode| { disk_inode.read_at(
inode.read_at( DIRENT_SZ * i,
DIRENT_SZ * i, &mut dirent_space,
&mut dirent_space, &self.block_device,
&self.block_device, ),
)
}),
DIRENT_SZ, DIRENT_SZ,
); );
let dirent = DirEntry::from_bytes(&dirent_space); let dirent = DirEntry::from_bytes(&dirent_space);
@ -67,9 +82,9 @@ impl Inode {
} }
pub fn find(&self, name: &str) -> Option<Arc<Inode>> { pub fn find(&self, name: &str) -> Option<Arc<Inode>> {
let mut fs = self.fs.lock(); let _ = self.fs.lock();
let inode = self.get_disk_inode(&mut fs); self.read_disk_inode(|disk_inode| {
self.find_inode_id(name, &inode) self.find_inode_id(name, disk_inode)
.map(|inode_id| { .map(|inode_id| {
Arc::new(Self::new( Arc::new(Self::new(
inode_id, inode_id,
@ -77,68 +92,69 @@ impl Inode {
self.block_device.clone(), self.block_device.clone(),
)) ))
}) })
})
} }
fn increase_size( fn increase_size(
&self, &self,
new_size: u32, new_size: u32,
inode: &mut Dirty<DiskInode>, disk_inode: &mut DiskInode,
fs: &mut MutexGuard<EasyFileSystem>, fs: &mut MutexGuard<EasyFileSystem>,
) { ) {
let size = inode.read(|inode| inode.size); if new_size < disk_inode.size {
if new_size < size {
return; return;
} }
let blocks_needed = inode.read(|inode| { let blocks_needed = disk_inode.blocks_num_needed(new_size);
inode.blocks_num_needed(new_size)
});
let mut v: Vec<u32> = Vec::new(); let mut v: Vec<u32> = Vec::new();
for _ in 0..blocks_needed { for _ in 0..blocks_needed {
v.push(fs.alloc_data()); v.push(fs.alloc_data());
} }
inode.modify(|inode| { disk_inode.increase_size(new_size, v, &self.block_device);
inode.increase_size(new_size, v, &self.block_device);
});
} }
pub fn create(&self, name: &str) -> Option<Arc<Inode>> { pub fn create(&self, name: &str) -> Option<Arc<Inode>> {
let mut fs = self.fs.lock(); let mut fs = self.fs.lock();
let mut inode = self.get_disk_inode(&mut fs); if self.modify_disk_inode(|root_inode| {
// assert it is a directory // assert it is a directory
assert!(inode.read(|inode| inode.is_dir())); assert!(root_inode.is_dir());
// has the file been created? // has the file been created?
if let Some(_) = self.find_inode_id(name, &inode) { self.find_inode_id(name, root_inode)
}).is_some() {
return None; return None;
} }
//println!("same file does not exist in Inode::create.");
// create a new file // create a new file
// alloc a inode with an indirect block // alloc a inode with an indirect block
let new_inode_id = fs.alloc_inode(); let new_inode_id = fs.alloc_inode();
let indirect1 = fs.alloc_data(); let indirect1 = fs.alloc_data();
// initialize inode // initialize inode
fs.get_disk_inode(new_inode_id).modify(|inode| { let (new_inode_block_id, new_inode_block_offset)
inode.initialize( = fs.get_disk_inode_pos(new_inode_id);
DiskInodeType::File, //println!("new_inode_id={} ({},{})", new_inode_id, new_inode_block_id, new_inode_block_offset);
indirect1, get_block_cache(
) new_inode_block_id as usize,
Arc::clone(&self.block_device)
).lock().modify(new_inode_block_offset, |new_inode: &mut DiskInode| {
new_inode.initialize(DiskInodeType::File, indirect1);
}); });
//println!("new inode has been initialized.");
// append file in the dirent self.modify_disk_inode(|root_inode| {
let file_count = // append file in the dirent
inode.read(|inode| inode.size as usize) / DIRENT_SZ; let file_count = (root_inode.size as usize) / DIRENT_SZ;
let new_size = (file_count + 1) * DIRENT_SZ; let new_size = (file_count + 1) * DIRENT_SZ;
// increase size // increase size
self.increase_size(new_size as u32, &mut inode, &mut fs); self.increase_size(new_size as u32, root_inode, &mut fs);
// write dirent // write dirent
let dirent = DirEntry::new(name, new_inode_id); let dirent = DirEntry::new(name, new_inode_id);
inode.modify(|inode| { root_inode.write_at(
inode.write_at(
file_count * DIRENT_SZ, file_count * DIRENT_SZ,
dirent.into_bytes(), dirent.into_bytes(),
&self.block_device, &self.block_device,
); );
}); });
//println!("new file has been inserted into root inode.");
// release efs lock manually because we will acquire it again in Inode::new
drop(fs);
// return inode // return inode
Some(Arc::new(Self::new( Some(Arc::new(Self::new(
new_inode_id, new_inode_id,
@ -148,53 +164,48 @@ impl Inode {
} }
pub fn ls(&self) -> Vec<String> { pub fn ls(&self) -> Vec<String> {
let mut fs = self.fs.lock(); let _ = self.fs.lock();
let inode = self.get_disk_inode(&mut fs); self.read_disk_inode(|disk_inode| {
let file_count = inode.read(|inode| { let file_count = (disk_inode.size as usize) / DIRENT_SZ;
(inode.size as usize) / DIRENT_SZ let mut v: Vec<String> = Vec::new();
}); for i in 0..file_count {
let mut v: Vec<String> = Vec::new(); let mut dirent_bytes: DirentBytes = Default::default();
for i in 0..file_count { assert_eq!(
let mut dirent_bytes: DirentBytes = Default::default(); disk_inode.read_at(
assert_eq!(
inode.read(|inode| {
inode.read_at(
i * DIRENT_SZ, i * DIRENT_SZ,
&mut dirent_bytes, &mut dirent_bytes,
&self.block_device, &self.block_device,
) ),
}), DIRENT_SZ,
DIRENT_SZ, );
); v.push(String::from(DirEntry::from_bytes(&dirent_bytes).name()));
v.push(String::from(DirEntry::from_bytes(&dirent_bytes).name())); }
} v
v })
} }
pub fn read_at(&self, offset: usize, buf: &mut [u8]) -> usize { pub fn read_at(&self, offset: usize, buf: &mut [u8]) -> usize {
let mut fs = self.fs.lock(); let _ = self.fs.lock();
self.get_disk_inode(&mut fs).modify(|disk_inode| { self.read_disk_inode(|disk_inode| {
disk_inode.read_at(offset, buf, &self.block_device) disk_inode.read_at(offset, buf, &self.block_device)
}) })
} }
pub fn write_at(&self, offset: usize, buf: &[u8]) -> usize { pub fn write_at(&self, offset: usize, buf: &[u8]) -> usize {
let mut fs = self.fs.lock(); let mut fs = self.fs.lock();
let mut inode = self.get_disk_inode(&mut fs); self.modify_disk_inode(|disk_inode| {
self.increase_size((offset + buf.len()) as u32, &mut inode, &mut fs); self.increase_size((offset + buf.len()) as u32, disk_inode, &mut fs);
inode.modify(|disk_inode| {
disk_inode.write_at(offset, buf, &self.block_device) disk_inode.write_at(offset, buf, &self.block_device)
}) })
} }
pub fn clear(&self) { pub fn clear(&self) {
let mut fs = self.fs.lock(); let mut fs = self.fs.lock();
let mut inode = self.get_disk_inode(&mut fs); self.modify_disk_inode(|disk_inode| {
let data_blocks_dealloc = inode.modify(|disk_inode| { let data_blocks_dealloc = disk_inode.clear_size(&self.block_device);
disk_inode.clear_size(&self.block_device) for data_block in data_blocks_dealloc.into_iter() {
fs.dealloc_data(data_block);
}
}); });
for data_block in data_blocks_dealloc.into_iter() {
fs.dealloc_data(data_block);
}
} }
} }

View File

@ -50,7 +50,7 @@ $(KERNEL_BIN): kernel
$(FS_IMG): $(APPS) $(FS_IMG): $(APPS)
@cd ../user && make build @cd ../user && make build
@cd ../easy-fs && cargo run --release @cd ../easy-fs-fuse && cargo run --release -- -s ../user/src/bin/ -t ../user/target/riscv64gc-unknown-none-elf/release/
$(APPS): $(APPS):

View File

@ -20,4 +20,4 @@ build: binary
clean: clean:
@cargo clean @cargo clean
.PHONY: elf binary build clean .PHONY: elf binary build clean