1
0
mirror of https://github.com/rcore-os/rCore.git synced 2024-11-27 02:03:29 +04:00
rCore/src/lib.rs

113 lines
2.7 KiB
Rust
Raw Normal View History

2018-04-02 11:28:32 +04:00
#![feature(ptr_internals)]
2017-04-11 17:02:21 +04:00
#![feature(lang_items)]
2017-04-12 21:16:04 +04:00
#![feature(const_fn)]
2017-11-19 15:41:20 +04:00
#![feature(alloc)]
#![feature(const_unique_new, const_atomic_usize_new)]
#![feature(unique)]
#![feature(allocator_api)]
2017-11-19 16:13:18 +04:00
#![feature(global_allocator)]
#![feature(abi_x86_interrupt)]
2018-04-05 18:36:39 +04:00
#![feature(iterator_step_by)]
2018-04-17 11:32:53 +04:00
#![feature(unboxed_closures)]
2017-04-11 17:02:21 +04:00
#![no_std]
2017-11-19 15:41:20 +04:00
#[macro_use]
extern crate alloc;
2017-04-11 20:25:51 +04:00
extern crate rlibc;
extern crate volatile;
extern crate spin;
2017-04-13 19:51:09 +04:00
extern crate multiboot2;
2017-04-13 20:27:39 +04:00
#[macro_use]
extern crate bitflags;
2017-04-13 21:40:20 +04:00
extern crate x86_64;
#[macro_use]
extern crate once;
extern crate linked_list_allocator;
#[macro_use]
2017-04-18 17:15:44 +04:00
extern crate lazy_static;
2017-04-19 14:07:02 +04:00
extern crate bit_field;
2018-04-14 16:18:59 +04:00
extern crate syscall;
#[macro_use] // print!
mod io;
mod memory;
mod lang;
2018-04-05 18:36:39 +04:00
mod util;
2018-04-09 17:20:47 +04:00
#[macro_use] // test!
mod test_util;
2018-04-12 16:57:56 +04:00
mod consts;
2017-04-12 21:16:04 +04:00
#[allow(dead_code)]
#[cfg(target_arch = "x86_64")]
#[path = "arch/x86_64/mod.rs"]
mod arch;
// The entry point of Rust kernel
2017-04-11 17:02:21 +04:00
#[no_mangle]
pub extern "C" fn rust_main(multiboot_information_address: usize) {
// ATTENTION: we have a very small stack and no guard page
println!("Hello World{}", "!");
2017-04-11 21:30:19 +04:00
let boot_info = unsafe { multiboot2::load(multiboot_information_address) };
arch::init();
// set up guard page and map the heap pages
let mut memory_controller = memory::init(boot_info);
unsafe {
2018-04-13 14:41:39 +04:00
use consts::{KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE};
HEAP_ALLOCATOR.lock().init(KERNEL_HEAP_OFFSET, KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE);
}
2018-04-15 21:22:18 +04:00
let double_fault_stack = memory_controller.alloc_stack(1)
.expect("could not allocate double fault stack");
arch::gdt::init(double_fault_stack.top());
arch::idt::init();
2018-04-09 17:20:47 +04:00
test!(global_allocator);
test!(guard_page);
test!(find_mp);
let acpi = arch::driver::init(
|addr: usize| memory_controller.map_page_identity(addr));
2018-04-17 11:32:53 +04:00
// memory_controller.print_page_table();
arch::smp::start_other_cores(&acpi, &mut memory_controller);
2018-04-17 11:32:53 +04:00
unsafe{ arch::interrupt::enable(); }
2018-04-17 11:32:53 +04:00
loop{}
2018-04-09 17:20:47 +04:00
test_end!();
2017-04-11 20:25:51 +04:00
}
2017-04-11 17:02:21 +04:00
use linked_list_allocator::LockedHeap;
2017-11-19 16:13:18 +04:00
#[global_allocator]
static HEAP_ALLOCATOR: LockedHeap = LockedHeap::empty();
2018-04-09 17:20:47 +04:00
mod test {
pub fn global_allocator() {
for i in 0..10000 {
format!("Some String");
}
}
pub fn find_mp() {
use arch;
let mp = arch::driver::mp::find_mp();
assert!(mp.is_some());
}
pub fn guard_page() {
use x86_64;
// invoke a breakpoint exception
x86_64::instructions::interrupts::int3();
fn stack_overflow() {
stack_overflow(); // for each recursion, the return address is pushed
}
// trigger a stack overflow
stack_overflow();
println!("It did not crash!");
}
}