1
0
mirror of https://github.com/rcore-os/rCore.git synced 2024-11-27 02:03:29 +04:00
rCore/kernel/src/process/context.rs

262 lines
8.9 KiB
Rust
Raw Normal View History

2018-07-16 20:23:02 +04:00
use arch::interrupt::{TrapFrame, Context as ArchContext};
2018-11-03 17:45:03 +04:00
use memory::{MemoryArea, MemoryAttr, MemorySet, KernelStack, active_table_swap, alloc_frame};
2018-07-14 07:56:55 +04:00
use xmas_elf::{ElfFile, header, program::{Flags, ProgramHeader, Type}};
2018-07-16 20:23:02 +04:00
use core::fmt::{Debug, Error, Formatter};
2018-11-03 17:45:03 +04:00
use ucore_process::Context;
use alloc::boxed::Box;
use ucore_memory::{Page};
use ::memory::{InactivePageTable0, memory_set_record};
use ucore_memory::memory_set::*;
2018-11-03 17:45:03 +04:00
pub struct ContextImpl {
2018-07-16 20:23:02 +04:00
arch: ArchContext,
memory_set: MemorySet,
2018-11-03 17:45:03 +04:00
kstack: KernelStack,
}
2018-11-03 17:45:03 +04:00
impl Context for ContextImpl {
unsafe fn switch_to(&mut self, target: &mut Context) {
use core::mem::transmute;
let (target, _): (&mut ContextImpl, *const ()) = transmute(target);
2018-07-16 20:23:02 +04:00
self.arch.switch(&mut target.arch);
2018-07-17 15:06:30 +04:00
}
2018-07-16 21:56:28 +04:00
}
2018-11-03 17:45:03 +04:00
impl ContextImpl {
pub unsafe fn new_init() -> Box<Context> {
Box::new(ContextImpl {
2018-07-16 20:23:02 +04:00
arch: ArchContext::null(),
memory_set: MemorySet::new(),
2018-11-03 17:45:03 +04:00
kstack: KernelStack::new(),
})
}
pub fn new_kernel(entry: extern fn(usize) -> !, arg: usize) -> Box<Context> {
let memory_set = MemorySet::new();
let kstack = KernelStack::new();
Box::new(ContextImpl {
arch: unsafe { ArchContext::new_kernel_thread(entry, arg, kstack.top(), memory_set.token()) },
memory_set,
kstack,
})
}
2018-07-14 07:56:55 +04:00
/// Make a new user thread from ELF data
/*
* @param:
* data: the ELF data stream
* @brief:
* make a new thread from ELF data
* @retval:
* the new user thread Context
*/
2018-11-03 17:45:03 +04:00
pub fn new_user(data: &[u8]) -> Box<Context> {
// Parse elf
let elf = ElfFile::new(data).expect("failed to read elf");
2018-05-17 17:06:13 +04:00
let is32 = match elf.header.pt2 {
2018-07-14 07:56:55 +04:00
header::HeaderPt2::Header32(_) => true,
header::HeaderPt2::Header64(_) => false,
2018-05-17 17:06:13 +04:00
};
2018-07-14 07:56:55 +04:00
assert_eq!(elf.header.pt2.type_().as_type(), header::Type::Executable, "ELF is not executable");
2018-05-17 17:06:13 +04:00
// User stack
use consts::{USER_STACK_OFFSET, USER_STACK_SIZE, USER32_STACK_OFFSET};
2018-05-19 12:32:18 +04:00
let (user_stack_buttom, user_stack_top) = match is32 {
true => (USER32_STACK_OFFSET, USER32_STACK_OFFSET + USER_STACK_SIZE),
2018-05-17 17:06:13 +04:00
false => (USER_STACK_OFFSET, USER_STACK_OFFSET + USER_STACK_SIZE),
};
// Make page table
2018-07-10 20:53:40 +04:00
let mut memory_set = memory_set_from(&elf);
// add the new memory set to the recorder
let mmset_ptr = ((&mut memory_set) as * mut MemorySet) as usize;
memory_set_record().push_back(mmset_ptr);
//let id = memory_set_record().iter()
// .position(|x| unsafe { info!("current memory set record include {:x?}, {:x?}", x, (*(x.clone() as *mut MemorySet)).get_page_table_mut().token()); false });
memory_set.push(MemoryArea::new(user_stack_buttom, user_stack_top, MemoryAttr::default().user(), "user_stack"));
trace!("{:#x?}", memory_set);
2018-07-14 07:56:55 +04:00
let entry_addr = elf.header.pt2.entry_point() as usize;
2018-05-17 17:06:13 +04:00
// Temporary switch to it, in order to copy data
2018-07-10 20:53:40 +04:00
unsafe {
memory_set.with(|| {
for ph in elf.program_iter() {
2018-07-14 07:56:55 +04:00
let virt_addr = ph.virtual_addr() as usize;
let offset = ph.offset() as usize;
let file_size = ph.file_size() as usize;
2018-07-17 07:45:55 +04:00
if file_size == 0 {
return;
}
2018-07-10 20:53:40 +04:00
use core::slice;
let target = unsafe { slice::from_raw_parts_mut(virt_addr as *mut u8, file_size) };
target.copy_from_slice(&data[offset..offset + file_size]);
2018-05-17 17:06:13 +04:00
}
2018-07-10 20:53:40 +04:00
if is32 {
unsafe {
// TODO: full argc & argv
*(user_stack_top as *mut u32).offset(-1) = 0; // argv
*(user_stack_top as *mut u32).offset(-2) = 0; // argc
}
}
});
}
2018-11-03 17:45:03 +04:00
let kstack = KernelStack::new();
2018-11-05 15:31:45 +04:00
// map the memory set swappable
2018-11-05 17:31:04 +04:00
//memory_set_map_swappable(&mut memory_set);
2018-10-30 08:45:09 +04:00
//set the user Memory pages in the memory set swappable
2018-11-03 10:33:36 +04:00
//memory_set_map_swappable(&mut memory_set);
let id = memory_set_record().iter()
.position(|x| x.clone() == mmset_ptr).unwrap();
memory_set_record().remove(id);
2018-11-05 15:31:45 +04:00
2018-11-03 17:45:03 +04:00
Box::new(ContextImpl {
2018-07-16 20:23:02 +04:00
arch: unsafe {
ArchContext::new_user_thread(
2018-11-03 17:45:03 +04:00
entry_addr, user_stack_top - 8, kstack.top(), is32, memory_set.token())
2018-07-16 20:23:02 +04:00
},
memory_set,
2018-11-03 17:45:03 +04:00
kstack,
})
}
2018-05-13 11:06:44 +04:00
/// Fork
2018-11-03 17:45:03 +04:00
pub fn fork(&self, tf: &TrapFrame) -> Box<Context> {
// Clone memory set, make a new page table
2018-10-30 08:45:09 +04:00
let mut memory_set = self.memory_set.clone();
// add the new memory set to the recorder
debug!("fork! new page table token: {:x?}", memory_set.token());
let mmset_ptr = ((&mut memory_set) as * mut MemorySet) as usize;
memory_set_record().push_back(mmset_ptr);
// Copy data to temp space
2018-07-16 20:23:02 +04:00
use alloc::vec::Vec;
let datas: Vec<Vec<u8>> = memory_set.iter().map(|area| {
Vec::from(unsafe { area.as_slice() })
}).collect();
// Temporary switch to it, in order to copy data
2018-07-10 20:53:40 +04:00
unsafe {
memory_set.with(|| {
for (area, data) in memory_set.iter().zip(datas.iter()) {
unsafe { area.as_slice_mut() }.copy_from_slice(data.as_slice())
}
});
}
2018-11-03 17:45:03 +04:00
let kstack = KernelStack::new();
2018-11-05 15:31:45 +04:00
// map the memory set swappable
2018-11-05 17:31:04 +04:00
//memory_set_map_swappable(&mut memory_set);
// remove the raw pointer for the memory set since it will
let id = memory_set_record().iter()
.position(|x| x.clone() == mmset_ptr).unwrap();
memory_set_record().remove(id);
2018-11-05 15:31:45 +04:00
2018-11-03 17:45:03 +04:00
Box::new(ContextImpl {
arch: unsafe { ArchContext::new_fork(tf, kstack.top(), memory_set.token()) },
memory_set,
2018-11-03 17:45:03 +04:00
kstack,
})
2018-05-13 11:06:44 +04:00
}
2018-10-30 08:45:09 +04:00
pub fn get_memory_set_mut(&mut self) -> &mut MemorySet {
&mut self.memory_set
}
}
2018-11-03 17:45:03 +04:00
impl Drop for ContextImpl{
2018-10-30 08:45:09 +04:00
fn drop(&mut self){
// remove the new memory set to the recorder (deprecated in the latest version)
2018-11-03 10:33:36 +04:00
/*
let id = memory_set_record().iter()
.position(|x| unsafe{(*(x.clone() as *mut MemorySet)).token() == self.memory_set.token()});
if id.is_some(){
info!("remove id {:x?}", id.unwrap());
memory_set_record().remove(id.unwrap());
}
*/
2018-10-30 08:45:09 +04:00
//set the user Memory pages in the memory set unswappable
2018-11-03 17:45:03 +04:00
let Self {ref mut arch, ref mut memory_set, ref mut kstack} = self;
2018-10-30 08:45:09 +04:00
let pt = {
memory_set.get_page_table_mut() as *mut InactivePageTable0
};
for area in memory_set.iter(){
for page in Page::range_of(area.get_start_addr(), area.get_end_addr()) {
let addr = page.start_address();
unsafe {
active_table_swap().remove_from_swappable(pt, addr, || alloc_frame().unwrap());
}
}
}
debug!("Finishing setting pages unswappable");
2018-10-30 08:45:09 +04:00
}
2018-07-16 20:23:02 +04:00
}
2018-05-19 12:32:18 +04:00
2018-11-03 17:45:03 +04:00
impl Debug for ContextImpl {
2018-07-16 20:23:02 +04:00
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
2018-07-17 07:45:55 +04:00
write!(f, "{:x?}", self.arch)
2018-05-19 12:32:18 +04:00
}
}
/*
* @param:
* elf: the source ELF file
* @brief:
* generate a memory set according to the elf file
* @retval:
* the new memory set
*/
2018-07-10 20:53:40 +04:00
fn memory_set_from<'a>(elf: &'a ElfFile<'a>) -> MemorySet {
debug!("come in to memory_set_from");
2018-07-10 20:53:40 +04:00
let mut set = MemorySet::new();
for ph in elf.program_iter() {
2018-07-14 07:56:55 +04:00
if ph.get_type() != Ok(Type::Load) {
continue;
}
2018-07-10 20:53:40 +04:00
let (virt_addr, mem_size, flags) = match ph {
ProgramHeader::Ph32(ph) => (ph.virtual_addr as usize, ph.mem_size as usize, ph.flags),
2018-11-03 17:45:03 +04:00
ProgramHeader::Ph64(ph) => (ph.virtual_addr as usize, ph.mem_size as usize, ph.flags),
2018-07-10 20:53:40 +04:00
};
set.push(MemoryArea::new(virt_addr, virt_addr + mem_size, memory_attr_from(flags), ""));
}
2018-07-10 20:53:40 +04:00
set
}
2018-07-10 20:53:40 +04:00
fn memory_attr_from(elf_flags: Flags) -> MemoryAttr {
let mut flags = MemoryAttr::default().user();
// TODO: handle readonly
if elf_flags.is_execute() { flags = flags.execute(); }
flags
2018-10-30 08:45:09 +04:00
}
/*
* @param:
* memory_set: the target MemorySet to set swappable
* @brief:
* map the memory area in the memory_set swappalbe, specially for the user process
*/
pub fn memory_set_map_swappable(memory_set: &mut MemorySet){
2018-10-30 08:45:09 +04:00
let pt = unsafe {
memory_set.get_page_table_mut() as *mut InactivePageTable0
};
for area in memory_set.iter(){
for page in Page::range_of(area.get_start_addr(), area.get_end_addr()) {
let addr = page.start_address();
unsafe { active_table_swap().set_swappable(pt, addr); }
2018-10-30 08:45:09 +04:00
}
}
info!("Finishing setting pages swappable");
2018-11-03 17:45:03 +04:00
}
2018-11-05 15:31:45 +04:00