mirror of
https://github.com/rcore-os/rCore.git
synced 2024-11-22 08:06:17 +04:00
add support for mmap shared memory. passed compilation.
This commit is contained in:
parent
0b05a58e3b
commit
b5c08c2825
@ -6,3 +6,4 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4"
|
||||
spin = "0.5"
|
||||
|
@ -40,9 +40,11 @@ mod byframe;
|
||||
mod delay;
|
||||
mod file;
|
||||
mod linear;
|
||||
mod shared;
|
||||
//mod swap;
|
||||
|
||||
pub use self::byframe::ByFrame;
|
||||
pub use self::delay::Delay;
|
||||
pub use self::file::{File, Read};
|
||||
pub use self::linear::Linear;
|
||||
pub use self::shared::Shared;
|
||||
|
130
crate/memory/src/memory_set/handler/shared.rs
Normal file
130
crate/memory/src/memory_set/handler/shared.rs
Normal file
@ -0,0 +1,130 @@
|
||||
use super::*;
|
||||
use alloc::sync::Arc;
|
||||
use alloc::collections::BTreeMap;
|
||||
use spin::Mutex;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct SharedGuard<T: FrameAllocator> {
|
||||
allocator: T,
|
||||
// direct mapping now. only work for mmap
|
||||
target: BTreeMap<usize, usize>
|
||||
}
|
||||
|
||||
impl<T: FrameAllocator> SharedGuard<T> {
|
||||
pub fn new(allocator: T) -> Self {
|
||||
SharedGuard {
|
||||
allocator: allocator,
|
||||
target: BTreeMap::new()
|
||||
} // delayed allocated now
|
||||
}
|
||||
pub fn alloc(&mut self, virtAddr: usize) -> Option<usize> {
|
||||
let physAddr = self.allocator.alloc().expect("failed to allocate frame");
|
||||
self.target.insert(virtAddr, physAddr);
|
||||
Some(physAddr)
|
||||
}
|
||||
pub fn dealloc(&mut self, virtAddr: usize) {
|
||||
let physAddr = self.target.get(&virtAddr).unwrap().clone();
|
||||
self.allocator.dealloc(physAddr);
|
||||
self.target.remove(&virtAddr);
|
||||
}
|
||||
pub fn get(&self, addr: usize) -> Option<usize> {
|
||||
Some(self.target.get(&addr).unwrap().clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: FrameAllocator> Drop for SharedGuard<T> {
|
||||
fn drop(&mut self) {
|
||||
let mut freeList = Vec::new();
|
||||
for (virtAddr, _physAddr) in self.target.iter() {
|
||||
freeList.push(virtAddr.clone());
|
||||
}
|
||||
for virtAddr in freeList.iter() {
|
||||
self.dealloc(virtAddr.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Shared<T: FrameAllocator> {
|
||||
allocator: T,
|
||||
guard: Option<Arc<Mutex<SharedGuard<T>>>>
|
||||
}
|
||||
|
||||
impl<T: FrameAllocator> MemoryHandler for Shared<T> {
|
||||
fn box_clone(&self) -> Box<dyn MemoryHandler> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
|
||||
fn map(&self, pt: &mut dyn PageTable, addr: VirtAddr, attr: &MemoryAttr) {
|
||||
//assert!(self.guard.is_some(), "remapping memory area")
|
||||
let guard = self.guard.clone();
|
||||
let physAddrOpt = guard.unwrap().lock().get(addr);
|
||||
if physAddrOpt.is_none() { // not mapped yet
|
||||
let entry = pt.map(addr, 0);
|
||||
entry.set_present(false);
|
||||
attr.apply(entry);
|
||||
} else { // physical memory already allocated by other process
|
||||
let physAddr = physAddrOpt.unwrap().clone();
|
||||
let entry = pt.map(addr, physAddr);
|
||||
attr.apply(entry)
|
||||
}
|
||||
}
|
||||
|
||||
fn unmap(&self, pt: &mut dyn PageTable, addr: VirtAddr) {
|
||||
// free physical memory done when guard destroyed
|
||||
pt.unmap(addr);
|
||||
}
|
||||
|
||||
fn clone_map(
|
||||
&self,
|
||||
pt: &mut dyn PageTable,
|
||||
_src_pt: &mut dyn PageTable,
|
||||
addr: VirtAddr,
|
||||
attr: &MemoryAttr,
|
||||
) {
|
||||
// actual map done when handling page fault, since guard are copied.
|
||||
let entry = pt.map(addr, 0);
|
||||
entry.set_present(false);
|
||||
attr.apply(entry);
|
||||
}
|
||||
|
||||
fn handle_page_fault(&self, pt: &mut dyn PageTable, addr: VirtAddr) -> bool {
|
||||
let entry = pt.get_entry(addr).expect("failed to get entry");
|
||||
let guard = self.guard.clone();
|
||||
let physAddrOpt = guard.clone().unwrap().lock().get(addr);
|
||||
if entry.present() {
|
||||
// not a delay case
|
||||
return false;
|
||||
} else if physAddrOpt.is_none() {
|
||||
// physical memory not alloced.
|
||||
let frame = guard.clone().unwrap().lock().alloc(addr).unwrap();
|
||||
entry.set_target(frame);
|
||||
entry.set_present(true);
|
||||
entry.update();
|
||||
|
||||
//init with zero for delay mmap mode
|
||||
let data = pt.get_page_slice_mut(addr);
|
||||
let len = data.len();
|
||||
for x in data {
|
||||
*x = 0;
|
||||
}
|
||||
pt.flush_cache_copy_user(addr, addr + len, false);
|
||||
} else {
|
||||
// physical memory alloced. update page table
|
||||
let frame = physAddrOpt.unwrap().clone();
|
||||
entry.set_target(frame);
|
||||
entry.set_present(true);
|
||||
entry.update();
|
||||
}
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: FrameAllocator> Shared<T> {
|
||||
pub fn new(allocator: T) -> Self {
|
||||
Shared {
|
||||
allocator: allocator.clone(),
|
||||
guard: Some(Arc::new(Mutex::new(SharedGuard::new(allocator))))
|
||||
}
|
||||
}
|
||||
}
|
1
kernel/Cargo.lock
generated
1
kernel/Cargo.lock
generated
@ -429,6 +429,7 @@ name = "rcore-memory"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"spin 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
81
kernel/src/ipc/shared_mem.rs
Normal file
81
kernel/src/ipc/shared_mem.rs
Normal file
@ -0,0 +1,81 @@
|
||||
use crate::sync::Semaphore;
|
||||
use crate::sync::SpinLock as Mutex;
|
||||
use alloc::{boxed::Box, collections::BTreeMap, string::String, sync::Arc, sync::Weak, vec::Vec};
|
||||
use core::cell::UnsafeCell;
|
||||
use lazy_static::lazy_static;
|
||||
use spin::RwLock;
|
||||
use rcore_memory::{VirtAddr, PhysAddr};
|
||||
|
||||
pub struct shmid {
|
||||
key: usize,
|
||||
size: usize,
|
||||
target: PhysAddr
|
||||
}
|
||||
|
||||
pub struct shmid_local {
|
||||
key: usize,
|
||||
size: usize,
|
||||
addr: VirtAddr,
|
||||
target: PhysAddr
|
||||
}
|
||||
|
||||
impl shmid {
|
||||
pub fn new(key: usize, size: usize, target: PhysAddr) -> shmid {
|
||||
shmid {
|
||||
key,
|
||||
size,
|
||||
target
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl shmid_local {
|
||||
pub fn new(key: usize, size: usize, addr: VirtAddr, target: PhysAddr) -> shmid {
|
||||
shmid_local {
|
||||
key,
|
||||
size,
|
||||
addr,
|
||||
target
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
pub static ref KEY2SHM: RwLock<BTreeMap<usize, Arc<shmid>>> =
|
||||
RwLock::new(BTreeMap::new()); // between ARC & WEAK
|
||||
}
|
||||
|
||||
/*pub fn new_shm(key: usize, size: usize, shmflg: usize) -> shmid_local {
|
||||
let mut key2shm_table = KEY2SHM.write();
|
||||
let mut shmid_ref: shmid;
|
||||
let mut key_shmid_ref = key2shm_table.get(&key);
|
||||
if (key_shmid_ref.is_none() || key_shmid_ref.unwrap().upgrade().is_none()) {
|
||||
proc.
|
||||
} else {
|
||||
shmid_ref = key2shm_table.get(&key).unwrap().unwrap();
|
||||
|
||||
}
|
||||
|
||||
shmid_ref
|
||||
}
|
||||
|
||||
pub fn new_semary(key: usize, nsems: usize, semflg: usize) -> Arc<SemArray> {
|
||||
let mut key2sem_table = KEY2SEM.write();
|
||||
let mut sem_array_ref: Arc<SemArray>;
|
||||
|
||||
let mut key_sem_array_ref = key2sem_table.get(&key);
|
||||
if (key_sem_array_ref.is_none() || key_sem_array_ref.unwrap().upgrade().is_none()) {
|
||||
let mut semaphores: Vec<Semaphore> = Vec::new();
|
||||
for i in 0..nsems {
|
||||
semaphores.push(Semaphore::new(0));
|
||||
}
|
||||
|
||||
let mut sem_array = SemArray::new(key, semaphores);
|
||||
sem_array_ref = Arc::new(sem_array);
|
||||
key2sem_table.insert(key, Arc::downgrade(&sem_array_ref));
|
||||
} else {
|
||||
sem_array_ref = key2sem_table.get(&key).unwrap().upgrade().unwrap(); // no security check
|
||||
}
|
||||
|
||||
sem_array_ref
|
||||
}*/
|
@ -11,6 +11,10 @@ pub use crate::ipc::SemArray;
|
||||
pub use crate::ipc::SemBuf;
|
||||
pub use crate::ipc::SemctlUnion;
|
||||
|
||||
use rcore_memory::memory_set::handler::{Delay, File, Linear, Shared};
|
||||
use rcore_memory::memory_set::MemoryAttr;
|
||||
use rcore_memory::PAGE_SIZE;
|
||||
|
||||
use super::*;
|
||||
|
||||
impl Syscall<'_> {
|
||||
@ -98,6 +102,60 @@ impl Syscall<'_> {
|
||||
unimplemented!("Semaphore: Semctl.(Not setval)");
|
||||
}
|
||||
}
|
||||
|
||||
/*pub fn sys_shmget(&self, key: usize, size: usize, shmflg: usize) -> SysResult {
|
||||
info!("sys_shmget: key: {}", key);
|
||||
|
||||
let mut size = size;
|
||||
|
||||
if ((size & (PAGE_SIZE - 1)) != 0) {
|
||||
size = (size & !(PAGE_SIZE - 1)) + PAGE_SIZE;
|
||||
}
|
||||
|
||||
|
||||
let mut proc = self.process();
|
||||
|
||||
let mut key2shm_table = KEY2SHM.write();
|
||||
let mut shmid_ref: shmid;
|
||||
let mut shmid_local_ref: shmid_local;
|
||||
|
||||
let mut key_shmid_ref = key2shm_table.get(&key);
|
||||
if (key_shmid_ref.is_none() || key_shmid_ref.unwrap().upgrade().is_none()) {
|
||||
let addr = proc.vm().find_free_area(PAGE_SIZE, size);
|
||||
proc.vm().push(
|
||||
addr,
|
||||
addr + size,
|
||||
MemoryAttr {
|
||||
user: true,
|
||||
readonly: false,
|
||||
execute: true,
|
||||
mmio: 0
|
||||
}
|
||||
Shared::new(GlobalFrameAlloc),
|
||||
"shmget",
|
||||
);
|
||||
let target = proc.vm().translate(addr);
|
||||
shmid_ref = shmid::new(key, size, target);
|
||||
shmid_local_ref = shmid_local::new(key, size, addr, target);
|
||||
} else {
|
||||
shmid_ref = key2shm_table.get(&key).unwrap().unwrap();
|
||||
|
||||
}
|
||||
|
||||
shmid_ref
|
||||
|
||||
/*let sem_id = (0..)
|
||||
.find(|i| match semarray_table.get(i) {
|
||||
Some(p) => false,
|
||||
_ => true,
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let mut sem_array: Arc<SemArray> = new_semary(key, nsems, semflg);
|
||||
|
||||
semarray_table.insert(sem_id, sem_array);
|
||||
Ok(sem_id)*/
|
||||
}*/
|
||||
}
|
||||
|
||||
bitflags! {
|
||||
|
@ -1,4 +1,4 @@
|
||||
use rcore_memory::memory_set::handler::{Delay, File, Linear};
|
||||
use rcore_memory::memory_set::handler::{Delay, File, Linear, Shared};
|
||||
use rcore_memory::memory_set::MemoryAttr;
|
||||
use rcore_memory::PAGE_SIZE;
|
||||
|
||||
@ -40,16 +40,25 @@ impl Syscall<'_> {
|
||||
|
||||
if flags.contains(MmapFlags::ANONYMOUS) {
|
||||
if flags.contains(MmapFlags::SHARED) {
|
||||
return Err(SysError::EINVAL);
|
||||
self.vm().push(
|
||||
addr,
|
||||
addr + len,
|
||||
prot.to_attr(),
|
||||
Shared::new(GlobalFrameAlloc),
|
||||
"mmap_anon_shared",
|
||||
);
|
||||
return Ok(addr);
|
||||
} else {
|
||||
self.vm().push(
|
||||
addr,
|
||||
addr + len,
|
||||
prot.to_attr(),
|
||||
Delay::new(GlobalFrameAlloc),
|
||||
"mmap_anon",
|
||||
);
|
||||
return Ok(addr);
|
||||
}
|
||||
self.vm().push(
|
||||
addr,
|
||||
addr + len,
|
||||
prot.to_attr(),
|
||||
Delay::new(GlobalFrameAlloc),
|
||||
"mmap_anon",
|
||||
);
|
||||
return Ok(addr);
|
||||
|
||||
} else {
|
||||
let file = proc.get_file(fd)?;
|
||||
info!("mmap path is {} ", &*file.path);
|
||||
|
@ -268,9 +268,19 @@ impl Syscall<'_> {
|
||||
args[0],
|
||||
args[1],
|
||||
args[2],
|
||||
args[3] as isize, /* as SemctlUnion*/
|
||||
args[3] as isize, /* as SemctlUnion */
|
||||
),
|
||||
|
||||
// shm
|
||||
/*SYS_SHMGET => self.sys_shmget(args[0], args[1], args[2]),
|
||||
SYS_SHMAT => self.sys_shmat(args[0], args[1], args[2]),
|
||||
SYS_SHMDT => self.sys_shmdt(args[0], args[1], args[2]),
|
||||
SYS_SHMCTL => self.sys_shmctl(
|
||||
args[0],
|
||||
args[1],
|
||||
args[2] /* should be shmid_ds *buf */
|
||||
),*/
|
||||
|
||||
// system
|
||||
SYS_GETPID => self.sys_getpid(),
|
||||
SYS_GETTID => self.sys_gettid(),
|
||||
|
Loading…
Reference in New Issue
Block a user