Refactor easy-fs

This commit is contained in:
Yifan Wu 2021-02-24 03:42:45 +08:00
parent 12c6c53af5
commit 1346fb1a1f
14 changed files with 329 additions and 278 deletions

4
.gitignore vendored
View File

@ -6,4 +6,8 @@ os/Cargo.lock
user/target/*
user/.idea/*
user/Cargo.lock
easy-fs/Cargo.lock
easy-fs/target/*
easy-fs-fuse/Cargo.lock
easy-fs-fuse/target/*
tools/

12
easy-fs-fuse/Cargo.toml Normal file
View File

@ -0,0 +1,12 @@
[package]
name = "easy-fs-fuse"
version = "0.1.0"
authors = ["Yifan Wu <shinbokuow@163.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
clap = "2.33.3"
easy-fs = { path = "../easy-fs" }
rand = "0.8.0"

View File

@ -1,6 +1,3 @@
extern crate easy_fs;
extern crate alloc;
use easy_fs::{
BlockDevice,
EasyFileSystem,
@ -8,7 +5,8 @@ use easy_fs::{
use std::fs::{File, OpenOptions, read_dir};
use std::io::{Read, Write, Seek, SeekFrom};
use std::sync::Mutex;
use alloc::sync::Arc;
use std::sync::Arc;
use clap::{Arg, App};
const BLOCK_SZ: usize = 512;
@ -34,15 +32,30 @@ fn main() {
easy_fs_pack().expect("Error when packing easy-fs!");
}
static TARGET_PATH: &str = "../user/target/riscv64gc-unknown-none-elf/release/";
fn easy_fs_pack() -> std::io::Result<()> {
let matches = App::new("EasyFileSystem packer")
.arg(Arg::with_name("source")
.short("s")
.long("source")
.takes_value(true)
.help("Executable source dir(with backslash)")
)
.arg(Arg::with_name("target")
.short("t")
.long("target")
.takes_value(true)
.help("Executable target dir(with backslash)")
)
.get_matches();
let src_path = matches.value_of("source").unwrap();
let target_path = matches.value_of("target").unwrap();
println!("src_path = {}\ntarget_path = {}", src_path, target_path);
let block_file = Arc::new(BlockFile(Mutex::new({
let f = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(format!("{}{}", TARGET_PATH, "fs.img"))?;
.open(format!("{}{}", target_path, "fs.img"))?;
f.set_len(8192 * 512).unwrap();
f
})));
@ -53,7 +66,7 @@ fn easy_fs_pack() -> std::io::Result<()> {
1,
);
let root_inode = Arc::new(EasyFileSystem::root_inode(&efs));
let apps: Vec<_> = read_dir("../user/src/bin")
let apps: Vec<_> = read_dir(src_path)
.unwrap()
.into_iter()
.map(|dir_entry| {
@ -64,7 +77,7 @@ fn easy_fs_pack() -> std::io::Result<()> {
.collect();
for app in apps {
// load app data from host file system
let mut host_file = File::open(format!("{}{}", TARGET_PATH, app)).unwrap();
let mut host_file = File::open(format!("{}{}", target_path, app)).unwrap();
let mut all_data: Vec<u8> = Vec::new();
host_file.read_to_end(&mut all_data).unwrap();
// create a file in easy-fs
@ -79,22 +92,24 @@ fn easy_fs_pack() -> std::io::Result<()> {
Ok(())
}
/*
#[test]
fn efs_test() -> std::io::Result<()> {
let block_file = Arc::new(BlockFile(Mutex::new(
OpenOptions::new()
let block_file = Arc::new(BlockFile(Mutex::new({
let f = OpenOptions::new()
.read(true)
.write(true)
.open("target/fs.img")?
)));
.create(true)
.open("target/fs.img")?;
f.set_len(8192 * 512).unwrap();
f
})));
EasyFileSystem::create(
block_file.clone(),
4096,
1,
);
let efs = EasyFileSystem::open(block_file.clone());
let mut root_inode = EasyFileSystem::root_inode(&efs);
let root_inode = EasyFileSystem::root_inode(&efs);
root_inode.create("filea");
root_inode.create("fileb");
for name in root_inode.ls() {
@ -146,5 +161,4 @@ fn efs_test() -> std::io::Result<()> {
random_str_test((12 + 128) * BLOCK_SZ);
Ok(())
}
*/
}

View File

@ -8,5 +8,4 @@ edition = "2018"
[dependencies]
spin = "0.7.0"
lazy_static = { version = "1.4.0", features = ["spin_no_std"] }
#rand = "0.8.0"
lazy_static = { version = "1.4.0", features = ["spin_no_std"] }

View File

@ -1,6 +0,0 @@
static TARGET_PATH: &str = "../user/target/riscv64gc-unknown-none-elf/release/";
fn main() {
println!("cargo:rerun-if-changed=../user/src/");
println!("cargo:rerun-if-changed={}", TARGET_PATH);
}

View File

@ -1,7 +1,9 @@
use alloc::sync::Arc;
use super::BlockDevice;
use super::Dirty;
use super::BLOCK_SZ;
use super::{
BlockDevice,
BLOCK_SZ,
get_block_cache,
};
type BitmapBlock = [u64; 64];
@ -26,41 +28,45 @@ impl Bitmap {
blocks,
}
}
pub fn alloc(&self, block_device: &Arc<dyn BlockDevice>) -> Option<usize> {
for block_id in 0..self.blocks {
let mut dirty_bitmap_block: Dirty<BitmapBlock> = Dirty::new(
let pos = get_block_cache(
block_id + self.start_block_id as usize,
0,
block_device.clone()
);
let bitmap_block = dirty_bitmap_block.get_mut();
if let Some((bits64_pos, inner_pos)) = bitmap_block
.iter()
.enumerate()
.find(|(_, bits64)| **bits64 != u64::MAX)
.map(|(bits64_pos, bits64)| {
(bits64_pos, bits64.trailing_ones() as usize)
}) {
// modify cache
bitmap_block[bits64_pos] |= 1u64 << inner_pos;
return Some(block_id * BLOCK_BITS + bits64_pos * 64 + inner_pos as usize);
// after dirty is dropped, data will be written back automatically
Arc::clone(block_device),
).lock().modify(0, |bitmap_block: &mut BitmapBlock| {
if let Some((bits64_pos, inner_pos)) = bitmap_block
.iter()
.enumerate()
.find(|(_, bits64)| **bits64 != u64::MAX)
.map(|(bits64_pos, bits64)| {
(bits64_pos, bits64.trailing_ones() as usize)
}) {
// modify cache
bitmap_block[bits64_pos] |= 1u64 << inner_pos;
Some(block_id * BLOCK_BITS + bits64_pos * 64 + inner_pos as usize)
} else {
None
}
});
if pos.is_some() {
return pos;
}
}
None
}
pub fn dealloc(&self, block_device: &Arc<dyn BlockDevice>, bit: usize) {
let (block_pos, bits64_pos, inner_pos) = decomposition(bit);
let mut dirty_bitmap_block: Dirty<BitmapBlock> = Dirty::new(
get_block_cache(
block_pos + self.start_block_id,
0,
block_device.clone(),
);
dirty_bitmap_block.modify(|bitmap_block| {
Arc::clone(block_device)
).lock().modify(0, |bitmap_block: &mut BitmapBlock| {
assert!(bitmap_block[bits64_pos] & (1u64 << inner_pos) > 0);
bitmap_block[bits64_pos] -= 1u64 << inner_pos;
});
}
pub fn maximum(&self) -> usize {
self.blocks * BLOCK_BITS
}

View File

@ -1,9 +1,9 @@
use super::{
BlockDevice,
BLOCK_SZ,
BlockDevice,
};
use alloc::sync::{Arc, Weak};
use alloc::collections::BTreeMap;
use alloc::collections::VecDeque;
use alloc::sync::Arc;
use lazy_static::*;
use spin::Mutex;
@ -11,76 +11,118 @@ pub struct BlockCache {
cache: [u8; BLOCK_SZ],
block_id: usize,
block_device: Arc<dyn BlockDevice>,
modified: bool,
}
impl BlockCache {
pub fn new(block_id: usize, block_device: Arc<dyn BlockDevice>) -> Self {
/// Load a new BlockCache from disk.
pub fn new(
block_id: usize,
block_device: Arc<dyn BlockDevice>
) -> Self {
let mut cache = [0u8; BLOCK_SZ];
block_device.read_block(block_id, &mut cache);
Self {
cache,
block_id,
block_device,
modified: false,
}
}
pub fn start_addr(&self, offset: usize) -> usize {
fn addr_of_offset(&self, offset: usize) -> usize {
&self.cache[offset] as *const _ as usize
}
pub fn get_ref<T>(&self, offset: usize) -> &T where T: Sized {
let type_size = core::mem::size_of::<T>();
assert!(offset + type_size <= BLOCK_SZ);
let addr = self.addr_of_offset(offset);
unsafe { &*(addr as *const T) }
}
pub fn get_mut<T>(&mut self, offset: usize) -> &mut T where T: Sized {
let type_size = core::mem::size_of::<T>();
assert!(offset + type_size <= BLOCK_SZ);
self.modified = true;
let addr = self.addr_of_offset(offset);
unsafe { &mut *(addr as *mut T) }
}
pub fn read<T, V>(&self, offset: usize, f: impl FnOnce(&T) -> V) -> V {
f(self.get_ref(offset))
}
pub fn modify<T, V>(&mut self, offset:usize, f: impl FnOnce(&mut T) -> V) -> V {
f(self.get_mut(offset))
}
pub fn sync(&mut self) {
if self.modified {
self.modified = false;
self.block_device.write_block(self.block_id, &self.cache);
}
}
}
impl Drop for BlockCache {
fn drop(&mut self) {
// write back
self.block_device.write_block(self.block_id, &self.cache);
// invalid in block cache manager
BLOCK_CACHE_MANAGER.lock().invalid(self.block_id);
self.sync()
}
}
pub struct BlockCacheManager {
map: BTreeMap<usize, Weak<BlockCache>>,
}
const BLOCK_CACHE_SIZE: usize = 16;
lazy_static! {
static ref BLOCK_CACHE_MANAGER: Mutex<BlockCacheManager> = Mutex::new(
BlockCacheManager::new()
);
pub struct BlockCacheManager {
queue: VecDeque<(usize, Arc<Mutex<BlockCache>>)>,
}
impl BlockCacheManager {
pub fn new() -> Self {
Self { map: BTreeMap::new() }
Self { queue: VecDeque::new() }
}
pub fn get(
pub fn get_block_cache(
&mut self,
block_id: usize,
block_device: Arc<dyn BlockDevice>
) -> Arc<BlockCache> {
if let Some(block_cache) = self.map.get(&block_id) {
// return cloned
block_cache.upgrade().unwrap().clone()
block_device: Arc<dyn BlockDevice>,
) -> Arc<Mutex<BlockCache>> {
if let Some(pair) = self.queue
.iter()
.find(|pair| pair.0 == block_id) {
Arc::clone(&pair.1)
} else {
// fetch from disk
let block_cache = Arc::new(BlockCache::new(
block_id,
block_device.clone()
// substitute
if self.queue.len() == BLOCK_CACHE_SIZE {
// from front to tail
if let Some((idx, _)) = self.queue
.iter()
.enumerate()
.find(|(_, pair)| Arc::strong_count(&pair.1) == 1) {
self.queue.drain(idx..=idx);
} else {
panic!("Run out of BlockCache!");
}
}
// load block into mem and push back
let block_cache = Arc::new(Mutex::new(
BlockCache::new(block_id, Arc::clone(&block_device))
));
self.map.insert(
block_id,
Arc::downgrade(&block_cache),
);
// return
self.queue.push_back((block_id, Arc::clone(&block_cache)));
block_cache
}
}
pub fn invalid(&mut self, block_id: usize) {
assert!(self.map.remove(&block_id).is_some());
}
}
lazy_static! {
pub static ref BLOCK_CACHE_MANAGER: Mutex<BlockCacheManager> = Mutex::new(
BlockCacheManager::new()
);
}
pub fn get_block_cache(
block_id: usize,
block_device: Arc<dyn BlockDevice>
) -> Arc<BlockCache> {
BLOCK_CACHE_MANAGER.lock().get(block_id, block_device)
) -> Arc<Mutex<BlockCache>> {
BLOCK_CACHE_MANAGER.lock().get_block_cache(block_id, block_device)
}

View File

@ -1,44 +0,0 @@
use super::{
BlockDevice,
BLOCK_SZ,
BlockCache,
get_block_cache,
};
use alloc::sync::Arc;
use core::marker::PhantomData;
pub struct Dirty<T> {
block_cache: Arc<BlockCache>,
offset: usize,
phantom: PhantomData<T>,
}
impl<T> Dirty<T> where T: Sized {
pub fn new(block_id: usize, offset: usize, block_device: Arc<dyn BlockDevice>) -> Self {
Self {
block_cache: get_block_cache(block_id, block_device.clone()),
offset,
phantom: PhantomData,
}
}
pub fn get_mut(&mut self) -> &mut T {
let type_size = core::mem::size_of::<T>();
// assert that the struct is inside a block
assert!(self.offset + type_size <= BLOCK_SZ);
let start_addr = self.block_cache.start_addr(self.offset);
unsafe { &mut *(start_addr as *mut T) }
}
pub fn get_ref(&self) -> &T {
let type_size = core::mem::size_of::<T>();
// assert that the struct is inside a block
assert!(self.offset + type_size <= BLOCK_SZ);
let start_addr = self.block_cache.start_addr(self.offset);
unsafe { &*(start_addr as *const T) }
}
pub fn read<V>(&self, f: impl FnOnce(&T) -> V) -> V {
f(self.get_ref())
}
pub fn modify<V>(&mut self, f: impl FnOnce(&mut T) -> V) -> V {
f(self.get_mut())
}
}

View File

@ -6,8 +6,8 @@ use super::{
SuperBlock,
DiskInode,
DiskInodeType,
Dirty,
Inode,
get_block_cache,
};
use crate::BLOCK_SZ;
@ -41,7 +41,7 @@ impl EasyFileSystem {
data_bitmap_blocks as usize,
);
let mut efs = Self {
block_device,
block_device: Arc::clone(&block_device),
inode_bitmap,
data_bitmap,
inode_area_start_block: 1 + inode_bitmap_blocks,
@ -49,14 +49,19 @@ impl EasyFileSystem {
};
// clear all blocks
for i in 0..total_blocks {
efs.get_block(i).modify(|data_block| {
for byte in data_block.iter_mut() {
*byte = 0;
}
get_block_cache(
i as usize,
Arc::clone(&block_device)
)
.lock()
.modify(0, |data_block: &mut DataBlock| {
for byte in data_block.iter_mut() { *byte = 0; }
});
}
// initialize SuperBlock
efs.get_super_block().modify(|super_block| {
get_block_cache(0, Arc::clone(&block_device))
.lock()
.modify(0, |super_block: &mut SuperBlock| {
super_block.initialize(
total_blocks,
inode_bitmap_blocks,
@ -68,10 +73,15 @@ impl EasyFileSystem {
// write back immediately
// create a inode for root node "/"
assert_eq!(efs.alloc_inode(), 0);
efs.get_disk_inode(0).modify(|disk_inode| {
let (root_inode_block_id, root_inode_offset) = efs.get_disk_inode_pos(0);
get_block_cache(
root_inode_block_id as usize,
Arc::clone(&block_device)
)
.lock()
.modify(root_inode_offset, |disk_inode: &mut DiskInode| {
disk_inode.initialize(
DiskInodeType::Directory,
efs.alloc_data(),
DiskInodeType::Directory,efs.alloc_data()
);
});
Arc::new(Mutex::new(efs))
@ -79,54 +89,56 @@ impl EasyFileSystem {
pub fn open(block_device: Arc<dyn BlockDevice>) -> Arc<Mutex<Self>> {
// read SuperBlock
let super_block_dirty: Dirty<SuperBlock> = Dirty::new(0, 0, block_device.clone());
let super_block = super_block_dirty.get_ref();
assert!(super_block.is_valid(), "Error loading EFS!");
let inode_total_blocks =
super_block.inode_bitmap_blocks + super_block.inode_area_blocks;
let efs = Self {
block_device,
inode_bitmap: Bitmap::new(
1,
super_block.inode_bitmap_blocks as usize
),
data_bitmap: Bitmap::new(
(1 + inode_total_blocks) as usize,
super_block.data_bitmap_blocks as usize,
),
inode_area_start_block: 1 + super_block.inode_bitmap_blocks,
data_area_start_block: 1 + inode_total_blocks + super_block.data_bitmap_blocks,
};
Arc::new(Mutex::new(efs))
get_block_cache(0, Arc::clone(&block_device))
.lock()
.read(0, |super_block: &SuperBlock| {
assert!(super_block.is_valid(), "Error loading EFS!");
let inode_total_blocks =
super_block.inode_bitmap_blocks + super_block.inode_area_blocks;
let efs = Self {
block_device,
inode_bitmap: Bitmap::new(
1,
super_block.inode_bitmap_blocks as usize
),
data_bitmap: Bitmap::new(
(1 + inode_total_blocks) as usize,
super_block.data_bitmap_blocks as usize,
),
inode_area_start_block: 1 + super_block.inode_bitmap_blocks,
data_area_start_block: 1 + inode_total_blocks + super_block.data_bitmap_blocks,
};
Arc::new(Mutex::new(efs))
})
}
pub fn root_inode(efs: &Arc<Mutex<Self>>) -> Inode {
let block_device = Arc::clone(&efs.lock().block_device);
Inode::new(
0,
efs.clone(),
efs.lock().block_device.clone(),
Arc::clone(efs),
block_device,
)
}
/*
fn get_super_block(&self) -> Dirty<SuperBlock> {
Dirty::new(0, 0, self.block_device.clone())
}
*/
pub fn get_disk_inode(&self, inode_id: u32) -> Dirty<DiskInode> {
pub fn get_disk_inode_pos(&self, inode_id: u32) -> (u32, usize) {
let inode_size = core::mem::size_of::<DiskInode>();
let inodes_per_block = (BLOCK_SZ / inode_size) as u32;
let block_id = self.inode_area_start_block + inode_id / inodes_per_block;
Dirty::new(
block_id as usize,
(inode_id % inodes_per_block) as usize * inode_size,
self.block_device.clone(),
)
(block_id, (inode_id % inodes_per_block) as usize * inode_size)
}
pub fn get_data_block(&self, data_block_id: u32) -> Dirty<DataBlock> {
self.get_block(self.data_area_start_block + data_block_id)
pub fn get_data_block_id(&self, data_block_id: u32) -> u32 {
self.data_area_start_block + data_block_id
}
/*
fn get_block(&self, block_id: u32) -> Dirty<DataBlock> {
Dirty::new(
block_id as usize,
@ -134,6 +146,7 @@ impl EasyFileSystem {
self.block_device.clone(),
)
}
*/
pub fn alloc_inode(&mut self) -> u32 {
self.inode_bitmap.alloc(&self.block_device).unwrap() as u32

View File

@ -2,7 +2,7 @@ use core::fmt::{Debug, Formatter, Result};
use super::{
BLOCK_SZ,
BlockDevice,
Dirty,
get_block_cache,
};
use alloc::sync::Arc;
use alloc::vec::Vec;
@ -87,6 +87,7 @@ impl DiskInode {
pub fn is_dir(&self) -> bool {
self.type_ == DiskInodeType::Directory
}
#[allow(unused)]
pub fn is_file(&self) -> bool {
self.type_ == DiskInodeType::File
}
@ -102,14 +103,11 @@ impl DiskInode {
self.direct[inner_id]
} else {
// only support indirect1 now
Dirty::<IndirectBlock>::new(
self.indirect1 as usize,
0,
block_device.clone()
).read(|indirect_block| {
// it will panic if file is too large
indirect_block[inner_id - INODE_DIRECT_COUNT]
})
get_block_cache(self.indirect1 as usize, Arc::clone(block_device))
.lock()
.read(0, |indirect_block: &IndirectBlock| {
indirect_block[inner_id - INODE_DIRECT_COUNT]
})
}
}
pub fn blocks_num_needed(&self, new_size: u32) -> u32 {
@ -126,11 +124,12 @@ impl DiskInode {
let last_blocks = self.blocks();
self.size = new_size;
let current_blocks = self.blocks();
Dirty::<IndirectBlock>::new(
get_block_cache(
self.indirect1 as usize,
0,
block_device.clone()
).modify(|indirect_block| {
Arc::clone(block_device)
)
.lock()
.modify(0, |indirect_block: &mut IndirectBlock| {
for i in 0..current_blocks - last_blocks {
let inner_id = (last_blocks + i) as usize;
let new_block = new_blocks[i as usize];
@ -152,11 +151,12 @@ impl DiskInode {
self.direct[i] = 0;
}
if blocks > INODE_DIRECT_COUNT {
Dirty::<IndirectBlock>::new(
get_block_cache(
self.indirect1 as usize,
0,
block_device.clone(),
).modify(|indirect_block| {
Arc::clone(block_device),
)
.lock()
.modify(0, |indirect_block: &mut IndirectBlock| {
for i in 0..blocks - INODE_DIRECT_COUNT {
v.push(indirect_block[i]);
indirect_block[i] = 0;
@ -185,11 +185,12 @@ impl DiskInode {
// read and update read size
let block_read_size = end_current_block - start;
let dst = &mut buf[read_size..read_size + block_read_size];
Dirty::<DataBlock>::new(
get_block_cache(
self.get_block_id(start_block as u32, block_device) as usize,
0,
block_device.clone()
).read(|data_block| {
Arc::clone(block_device),
)
.lock()
.read(0, |data_block: &DataBlock| {
let src = &data_block[start % BLOCK_SZ..start % BLOCK_SZ + block_read_size];
dst.copy_from_slice(src);
});
@ -219,11 +220,12 @@ impl DiskInode {
end_current_block = end_current_block.min(end);
// write and update write size
let block_write_size = end_current_block - start;
Dirty::<DataBlock>::new(
get_block_cache(
self.get_block_id(start_block as u32, block_device) as usize,
0,
block_device.clone()
).modify(|data_block| {
Arc::clone(block_device)
)
.lock()
.modify(0, |data_block: &mut DataBlock| {
let src = &buf[write_size..write_size + block_write_size];
let dst = &mut data_block[start % BLOCK_SZ..start % BLOCK_SZ + block_write_size];
dst.copy_from_slice(src);

View File

@ -5,7 +5,6 @@ extern crate alloc;
mod block_dev;
mod layout;
mod efs;
mod dirty;
mod bitmap;
mod vfs;
mod block_cache;
@ -15,6 +14,5 @@ pub use block_dev::BlockDevice;
pub use efs::EasyFileSystem;
pub use vfs::Inode;
use layout::*;
use dirty::Dirty;
use bitmap::Bitmap;
use block_cache::{BlockCache, get_block_cache};
use block_cache::get_block_cache;

View File

@ -1,12 +1,12 @@
use super::{
BlockDevice,
Dirty,
DiskInode,
DiskInodeType,
DirEntry,
DirentBytes,
EasyFileSystem,
DIRENT_SZ,
get_block_cache,
};
use alloc::sync::Arc;
use alloc::string::String;
@ -14,7 +14,8 @@ use alloc::vec::Vec;
use spin::{Mutex, MutexGuard};
pub struct Inode {
inode_id: u32,
block_id: usize,
block_offset: usize,
fs: Arc<Mutex<EasyFileSystem>>,
block_device: Arc<dyn BlockDevice>,
}
@ -25,37 +26,51 @@ impl Inode {
fs: Arc<Mutex<EasyFileSystem>>,
block_device: Arc<dyn BlockDevice>,
) -> Self {
let (block_id, block_offset) = fs.lock().get_disk_inode_pos(inode_id);
Self {
inode_id,
block_id: block_id as usize,
block_offset,
fs,
block_device,
}
}
fn read_disk_inode<V>(&self, f: impl FnOnce(&DiskInode) -> V) -> V {
get_block_cache(
self.block_id,
Arc::clone(&self.block_device)
).lock().read(self.block_offset, f)
}
fn modify_disk_inode<V>(&self, f: impl FnOnce(&mut DiskInode) -> V) -> V {
get_block_cache(
self.block_id,
Arc::clone(&self.block_device)
).lock().modify(self.block_offset, f)
}
/*
fn get_disk_inode(&self, fs: &mut MutexGuard<EasyFileSystem>) -> Dirty<DiskInode> {
fs.get_disk_inode(self.inode_id)
}
*/
fn find_inode_id(
&self,
name: &str,
inode: &Dirty<DiskInode>,
disk_inode: &DiskInode,
) -> Option<u32> {
// assert it is a directory
assert!(inode.read(|inode| inode.is_dir()));
let file_count = inode.read(|inode| {
inode.size as usize
}) / DIRENT_SZ;
assert!(disk_inode.is_dir());
let file_count = (disk_inode.size as usize) / DIRENT_SZ;
let mut dirent_space: DirentBytes = Default::default();
for i in 0..file_count {
assert_eq!(
inode.read(|inode| {
inode.read_at(
DIRENT_SZ * i,
&mut dirent_space,
&self.block_device,
)
}),
disk_inode.read_at(
DIRENT_SZ * i,
&mut dirent_space,
&self.block_device,
),
DIRENT_SZ,
);
let dirent = DirEntry::from_bytes(&dirent_space);
@ -67,9 +82,9 @@ impl Inode {
}
pub fn find(&self, name: &str) -> Option<Arc<Inode>> {
let mut fs = self.fs.lock();
let inode = self.get_disk_inode(&mut fs);
self.find_inode_id(name, &inode)
let _ = self.fs.lock();
self.read_disk_inode(|disk_inode| {
self.find_inode_id(name, disk_inode)
.map(|inode_id| {
Arc::new(Self::new(
inode_id,
@ -77,68 +92,69 @@ impl Inode {
self.block_device.clone(),
))
})
})
}
fn increase_size(
&self,
new_size: u32,
inode: &mut Dirty<DiskInode>,
disk_inode: &mut DiskInode,
fs: &mut MutexGuard<EasyFileSystem>,
) {
let size = inode.read(|inode| inode.size);
if new_size < size {
if new_size < disk_inode.size {
return;
}
let blocks_needed = inode.read(|inode| {
inode.blocks_num_needed(new_size)
});
let blocks_needed = disk_inode.blocks_num_needed(new_size);
let mut v: Vec<u32> = Vec::new();
for _ in 0..blocks_needed {
v.push(fs.alloc_data());
}
inode.modify(|inode| {
inode.increase_size(new_size, v, &self.block_device);
});
disk_inode.increase_size(new_size, v, &self.block_device);
}
pub fn create(&self, name: &str) -> Option<Arc<Inode>> {
let mut fs = self.fs.lock();
let mut inode = self.get_disk_inode(&mut fs);
// assert it is a directory
assert!(inode.read(|inode| inode.is_dir()));
// has the file been created?
if let Some(_) = self.find_inode_id(name, &inode) {
if self.modify_disk_inode(|root_inode| {
// assert it is a directory
assert!(root_inode.is_dir());
// has the file been created?
self.find_inode_id(name, root_inode)
}).is_some() {
return None;
}
//println!("same file does not exist in Inode::create.");
// create a new file
// alloc a inode with an indirect block
let new_inode_id = fs.alloc_inode();
let indirect1 = fs.alloc_data();
// initialize inode
fs.get_disk_inode(new_inode_id).modify(|inode| {
inode.initialize(
DiskInodeType::File,
indirect1,
)
let (new_inode_block_id, new_inode_block_offset)
= fs.get_disk_inode_pos(new_inode_id);
//println!("new_inode_id={} ({},{})", new_inode_id, new_inode_block_id, new_inode_block_offset);
get_block_cache(
new_inode_block_id as usize,
Arc::clone(&self.block_device)
).lock().modify(new_inode_block_offset, |new_inode: &mut DiskInode| {
new_inode.initialize(DiskInodeType::File, indirect1);
});
// append file in the dirent
let file_count =
inode.read(|inode| inode.size as usize) / DIRENT_SZ;
let new_size = (file_count + 1) * DIRENT_SZ;
// increase size
self.increase_size(new_size as u32, &mut inode, &mut fs);
// write dirent
let dirent = DirEntry::new(name, new_inode_id);
inode.modify(|inode| {
inode.write_at(
//println!("new inode has been initialized.");
self.modify_disk_inode(|root_inode| {
// append file in the dirent
let file_count = (root_inode.size as usize) / DIRENT_SZ;
let new_size = (file_count + 1) * DIRENT_SZ;
// increase size
self.increase_size(new_size as u32, root_inode, &mut fs);
// write dirent
let dirent = DirEntry::new(name, new_inode_id);
root_inode.write_at(
file_count * DIRENT_SZ,
dirent.into_bytes(),
&self.block_device,
);
});
//println!("new file has been inserted into root inode.");
// release efs lock manually because we will acquire it again in Inode::new
drop(fs);
// return inode
Some(Arc::new(Self::new(
new_inode_id,
@ -148,53 +164,48 @@ impl Inode {
}
pub fn ls(&self) -> Vec<String> {
let mut fs = self.fs.lock();
let inode = self.get_disk_inode(&mut fs);
let file_count = inode.read(|inode| {
(inode.size as usize) / DIRENT_SZ
});
let mut v: Vec<String> = Vec::new();
for i in 0..file_count {
let mut dirent_bytes: DirentBytes = Default::default();
assert_eq!(
inode.read(|inode| {
inode.read_at(
let _ = self.fs.lock();
self.read_disk_inode(|disk_inode| {
let file_count = (disk_inode.size as usize) / DIRENT_SZ;
let mut v: Vec<String> = Vec::new();
for i in 0..file_count {
let mut dirent_bytes: DirentBytes = Default::default();
assert_eq!(
disk_inode.read_at(
i * DIRENT_SZ,
&mut dirent_bytes,
&self.block_device,
)
}),
DIRENT_SZ,
);
v.push(String::from(DirEntry::from_bytes(&dirent_bytes).name()));
}
v
),
DIRENT_SZ,
);
v.push(String::from(DirEntry::from_bytes(&dirent_bytes).name()));
}
v
})
}
pub fn read_at(&self, offset: usize, buf: &mut [u8]) -> usize {
let mut fs = self.fs.lock();
self.get_disk_inode(&mut fs).modify(|disk_inode| {
let _ = self.fs.lock();
self.read_disk_inode(|disk_inode| {
disk_inode.read_at(offset, buf, &self.block_device)
})
}
pub fn write_at(&self, offset: usize, buf: &[u8]) -> usize {
let mut fs = self.fs.lock();
let mut inode = self.get_disk_inode(&mut fs);
self.increase_size((offset + buf.len()) as u32, &mut inode, &mut fs);
inode.modify(|disk_inode| {
self.modify_disk_inode(|disk_inode| {
self.increase_size((offset + buf.len()) as u32, disk_inode, &mut fs);
disk_inode.write_at(offset, buf, &self.block_device)
})
}
pub fn clear(&self) {
let mut fs = self.fs.lock();
let mut inode = self.get_disk_inode(&mut fs);
let data_blocks_dealloc = inode.modify(|disk_inode| {
disk_inode.clear_size(&self.block_device)
self.modify_disk_inode(|disk_inode| {
let data_blocks_dealloc = disk_inode.clear_size(&self.block_device);
for data_block in data_blocks_dealloc.into_iter() {
fs.dealloc_data(data_block);
}
});
for data_block in data_blocks_dealloc.into_iter() {
fs.dealloc_data(data_block);
}
}
}

View File

@ -50,7 +50,7 @@ $(KERNEL_BIN): kernel
$(FS_IMG): $(APPS)
@cd ../user && make build
@cd ../easy-fs && cargo run --release
@cd ../easy-fs-fuse && cargo run --release -- -s ../user/src/bin/ -t ../user/target/riscv64gc-unknown-none-elf/release/
$(APPS):

View File

@ -20,4 +20,4 @@ build: binary
clean:
@cargo clean
.PHONY: elf binary build clean
.PHONY: elf binary build clean