1
0
mirror of https://github.com/rcore-os/rCore.git synced 2024-11-27 02:03:29 +04:00
rCore/kernel/src/process/context.rs

228 lines
7.6 KiB
Rust
Raw Normal View History

2018-12-13 22:37:51 +04:00
use alloc::{boxed::Box, collections::BTreeMap, string::String, sync::Arc, vec::Vec};
use log::*;
use simple_filesystem::file::File;
use spin::Mutex;
2019-01-24 15:03:45 +04:00
use rcore_thread::Context;
2018-12-17 19:54:13 +04:00
use xmas_elf::{ElfFile, header, program::{Flags, Type}};
2018-12-13 22:37:51 +04:00
use crate::arch::interrupt::{Context as ArchContext, TrapFrame};
2018-12-17 19:54:13 +04:00
use crate::memory::{ByFrame, GlobalFrameAlloc, KernelStack, MemoryAttr, MemorySet};
2018-11-15 19:41:22 +04:00
// TODO: avoid pub
2018-12-13 22:37:51 +04:00
pub struct Process {
2018-11-15 19:41:22 +04:00
pub arch: ArchContext,
pub memory_set: MemorySet,
pub kstack: KernelStack,
pub files: BTreeMap<usize, Arc<Mutex<File>>>,
pub cwd: String,
}
2018-12-13 22:37:51 +04:00
impl Context for Process {
2018-11-03 17:45:03 +04:00
unsafe fn switch_to(&mut self, target: &mut Context) {
use core::mem::transmute;
2018-12-13 22:37:51 +04:00
let (target, _): (&mut Process, *const ()) = transmute(target);
2018-07-16 20:23:02 +04:00
self.arch.switch(&mut target.arch);
2018-07-17 15:06:30 +04:00
}
2018-07-16 21:56:28 +04:00
}
2018-12-13 22:37:51 +04:00
impl Process {
2018-11-03 17:45:03 +04:00
pub unsafe fn new_init() -> Box<Context> {
2018-12-13 22:37:51 +04:00
Box::new(Process {
2018-07-16 20:23:02 +04:00
arch: ArchContext::null(),
memory_set: MemorySet::new(),
2018-11-03 17:45:03 +04:00
kstack: KernelStack::new(),
2018-11-07 09:34:31 +04:00
files: BTreeMap::default(),
cwd: String::new(),
2018-11-03 17:45:03 +04:00
})
}
pub fn new_kernel(entry: extern fn(usize) -> !, arg: usize) -> Box<Context> {
let memory_set = MemorySet::new();
let kstack = KernelStack::new();
2018-12-13 22:37:51 +04:00
Box::new(Process {
2018-11-03 17:45:03 +04:00
arch: unsafe { ArchContext::new_kernel_thread(entry, arg, kstack.top(), memory_set.token()) },
memory_set,
kstack,
2018-11-07 09:34:31 +04:00
files: BTreeMap::default(),
cwd: String::new(),
2018-11-03 17:45:03 +04:00
})
}
2018-07-14 07:56:55 +04:00
/// Make a new user thread from ELF data
2018-12-13 22:37:51 +04:00
pub fn new_user<'a, Iter>(data: &[u8], args: Iter) -> Box<Process>
2018-11-15 19:41:22 +04:00
where Iter: Iterator<Item=&'a str>
{
// Parse elf
let elf = ElfFile::new(data).expect("failed to read elf");
2018-05-17 17:06:13 +04:00
let is32 = match elf.header.pt2 {
2018-07-14 07:56:55 +04:00
header::HeaderPt2::Header32(_) => true,
header::HeaderPt2::Header64(_) => false,
2018-05-17 17:06:13 +04:00
};
2018-11-26 21:23:07 +04:00
match elf.header.pt2.type_().as_type() {
header::Type::Executable => {
// #[cfg(feature = "no_mmu")]
// panic!("ELF is not shared object");
},
header::Type::SharedObject => {},
_ => panic!("ELF is not executable or shared object"),
}
// Make page table
2018-11-27 07:26:37 +04:00
let (mut memory_set, entry_addr) = memory_set_from(&elf);
2018-11-26 21:23:07 +04:00
// User stack
use crate::consts::{USER_STACK_OFFSET, USER_STACK_SIZE, USER32_STACK_OFFSET};
#[cfg(not(feature = "no_mmu"))]
let mut ustack_top = {
let (ustack_buttom, ustack_top) = match is32 {
true => (USER32_STACK_OFFSET, USER32_STACK_OFFSET + USER_STACK_SIZE),
false => (USER_STACK_OFFSET, USER_STACK_OFFSET + USER_STACK_SIZE),
};
2018-12-13 22:37:51 +04:00
memory_set.push(ustack_buttom, ustack_top, ByFrame::new(MemoryAttr::default().user(), GlobalFrameAlloc), "user_stack");
2018-11-26 21:23:07 +04:00
ustack_top
};
#[cfg(feature = "no_mmu")]
let mut ustack_top = memory_set.push(USER_STACK_SIZE).as_ptr() as usize + USER_STACK_SIZE;
2018-05-17 17:06:13 +04:00
2018-07-10 20:53:40 +04:00
unsafe {
2018-11-26 21:23:07 +04:00
memory_set.with(|| { ustack_top = push_args_at_stack(args, ustack_top) });
2018-07-10 20:53:40 +04:00
}
2018-11-26 21:23:07 +04:00
trace!("{:#x?}", memory_set);
2018-11-03 17:45:03 +04:00
let kstack = KernelStack::new();
2018-11-05 15:31:45 +04:00
2018-12-13 22:37:51 +04:00
Box::new(Process {
2018-07-16 20:23:02 +04:00
arch: unsafe {
ArchContext::new_user_thread(
2018-11-07 18:52:39 +04:00
entry_addr, ustack_top, kstack.top(), is32, memory_set.token())
2018-07-16 20:23:02 +04:00
},
memory_set,
2018-11-03 17:45:03 +04:00
kstack,
2018-11-07 09:34:31 +04:00
files: BTreeMap::default(),
cwd: String::new(),
})
}
2018-05-13 11:06:44 +04:00
/// Fork
2018-11-03 17:45:03 +04:00
pub fn fork(&self, tf: &TrapFrame) -> Box<Context> {
info!("COME into fork!");
// Clone memory set, make a new page table
2018-12-17 19:54:13 +04:00
let memory_set = self.memory_set.clone();
info!("finish mmset clone in fork!");
2018-11-26 21:23:07 +04:00
// MMU: copy data to the new space
// NoMMU: coping data has been done in `memory_set.clone()`
#[cfg(not(feature = "no_mmu"))]
for area in memory_set.iter() {
let data = Vec::<u8>::from(unsafe { area.as_slice() });
unsafe { memory_set.with(|| {
area.as_slice_mut().copy_from_slice(data.as_slice())
}) }
2018-07-10 20:53:40 +04:00
}
info!("temporary copy data!");
2018-11-03 17:45:03 +04:00
let kstack = KernelStack::new();
2018-12-13 22:37:51 +04:00
Box::new(Process {
2018-11-03 17:45:03 +04:00
arch: unsafe { ArchContext::new_fork(tf, kstack.top(), memory_set.token()) },
memory_set,
2018-11-03 17:45:03 +04:00
kstack,
2018-11-07 09:34:31 +04:00
files: BTreeMap::default(),
cwd: String::new(),
})
2018-10-30 08:45:09 +04:00
}
}
2018-11-07 18:52:39 +04:00
/// Push a slice at the stack. Return the new sp.
unsafe fn push_slice<T: Copy>(mut sp: usize, vs: &[T]) -> usize {
use core::{mem::{size_of, align_of}, slice};
sp -= vs.len() * size_of::<T>();
sp -= sp % align_of::<T>();
slice::from_raw_parts_mut(sp as *mut T, vs.len())
.copy_from_slice(vs);
sp
}
2018-11-15 19:41:22 +04:00
unsafe fn push_args_at_stack<'a, Iter>(args: Iter, stack_top: usize) -> usize
where Iter: Iterator<Item=&'a str>
{
2018-11-07 18:52:39 +04:00
let mut sp = stack_top;
let mut argv = Vec::new();
2018-11-15 19:41:22 +04:00
for arg in args {
2018-11-07 18:52:39 +04:00
sp = push_slice(sp, &[0u8]);
sp = push_slice(sp, arg.as_bytes());
argv.push(sp);
}
sp = push_slice(sp, argv.as_slice());
sp = push_slice(sp, &[argv.len()]);
sp
}
2018-11-27 07:26:37 +04:00
/// Generate a MemorySet according to the ELF file.
/// Also return the real entry point address.
2018-12-13 22:37:51 +04:00
fn memory_set_from(elf: &ElfFile<'_>) -> (MemorySet, usize) {
debug!("come in to memory_set_from");
2018-11-26 21:23:07 +04:00
let mut ms = MemorySet::new();
let mut entry = elf.header.pt2.entry_point() as usize;
// [NoMMU] Get total memory size and alloc space
let va_begin = elf.program_iter()
.filter(|ph| ph.get_type() == Ok(Type::Load))
.map(|ph| ph.virtual_addr()).min().unwrap() as usize;
let va_end = elf.program_iter()
.filter(|ph| ph.get_type() == Ok(Type::Load))
.map(|ph| ph.virtual_addr() + ph.mem_size()).max().unwrap() as usize;
let va_size = va_end - va_begin;
#[cfg(feature = "no_mmu")]
let target = ms.push(va_size);
#[cfg(feature = "no_mmu")]
{ entry = entry - va_begin + target.as_ptr() as usize; }
#[cfg(feature = "board_k210")]
{ entry += 0x40000000; }
2018-07-10 20:53:40 +04:00
for ph in elf.program_iter() {
2018-07-14 07:56:55 +04:00
if ph.get_type() != Ok(Type::Load) {
continue;
}
2018-11-26 21:23:07 +04:00
let virt_addr = ph.virtual_addr() as usize;
let offset = ph.offset() as usize;
let file_size = ph.file_size() as usize;
let mem_size = ph.mem_size() as usize;
#[cfg(target_arch = "aarch64")]
assert_eq!((virt_addr >> 48), 0xffff, "Segment Fault");
2018-11-26 21:23:07 +04:00
// Get target slice
#[cfg(feature = "no_mmu")]
let target = &mut target[virt_addr - va_begin..virt_addr - va_begin + mem_size];
#[cfg(feature = "no_mmu")]
info!("area @ {:?}, size = {:#x}", target.as_ptr(), mem_size);
2018-11-26 21:23:07 +04:00
#[cfg(not(feature = "no_mmu"))]
let target = {
2018-12-13 22:37:51 +04:00
ms.push(virt_addr, virt_addr + mem_size, ByFrame::new(memory_attr_from(ph.flags()), GlobalFrameAlloc), "");
2018-11-26 21:23:07 +04:00
unsafe { ::core::slice::from_raw_parts_mut(virt_addr as *mut u8, mem_size) }
2018-07-10 20:53:40 +04:00
};
2018-11-26 21:23:07 +04:00
// Copy data
unsafe {
ms.with(|| {
if file_size != 0 {
target[..file_size].copy_from_slice(&elf.input[offset..offset + file_size]);
}
target[file_size..].iter_mut().for_each(|x| *x = 0);
});
}
}
(ms, entry)
}
2018-07-10 20:53:40 +04:00
fn memory_attr_from(elf_flags: Flags) -> MemoryAttr {
let mut flags = MemoryAttr::default().user();
// TODO: handle readonly
if elf_flags.is_execute() { flags = flags.execute(); }
flags
2018-10-30 08:45:09 +04:00
}