1
0
mirror of https://github.com/rcore-os/rCore.git synced 2024-11-22 08:06:17 +04:00

Merge branch 'master' of github.com:rcore-os/rCore

This commit is contained in:
function2-llx 2020-05-22 23:10:54 +08:00
commit ff54128273
8 changed files with 65 additions and 22 deletions

View File

@ -33,6 +33,7 @@ impl Clone for Box<dyn MemoryHandler> {
pub trait FrameAllocator: Debug + Clone + Send + Sync + 'static {
fn alloc(&self) -> Option<PhysAddr>;
fn alloc_contiguous(&self, size: usize, align_log2: usize) -> Option<PhysAddr>;
fn dealloc(&self, target: PhysAddr);
}

2
kernel/Cargo.lock generated
View File

@ -91,7 +91,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "bitmap-allocator"
version = "0.1.0"
source = "git+https://github.com/rcore-os/bitmap-allocator#ede748530a026f89b15f1b162abb78be44deec44"
source = "git+https://github.com/rcore-os/bitmap-allocator#03bd9909d0dc85e99f5559b97a163ab81073df83"
dependencies = [
"bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
]

View File

@ -9,6 +9,12 @@ pub unsafe fn init_external_interrupt() {
const HART0_S_MODE_INTERRUPT_ENABLES: *mut u32 = phys_to_virt(0x0C00_2080) as *mut u32;
const SERIAL: u32 = 0xa;
HART0_S_MODE_INTERRUPT_ENABLES.write_volatile(1 << SERIAL);
const SERIAL_PRIO: *mut u32 = phys_to_virt(0x0C000000 + (SERIAL as usize) * 4) as *mut u32;
SERIAL_PRIO.write_volatile(7); // QEMU: priority[irq] <- value & 0x7, hence the 7 here.
const HART0_S_MODE_PRIO_THRESH: *mut u32 = phys_to_virt(0x0C00_0000 + 0x20_1000) as *mut u32;
HART0_S_MODE_PRIO_THRESH.write_volatile(0); // Permits everything
}
pub unsafe fn enable_serial_interrupt() {

View File

@ -1,5 +1,5 @@
pub use crate::arch::paging::PageTableImpl;
use crate::memory::{alloc_frame, dealloc_frame, phys_to_virt, virt_to_phys};
use crate::memory::{alloc_frame_contiguous, dealloc_frame, phys_to_virt, virt_to_phys};
use isomorphic_drivers::provider;
use rcore_memory::PAGE_SIZE;
@ -24,13 +24,7 @@ impl provider::Provider for Provider {
#[no_mangle]
extern "C" fn virtio_dma_alloc(pages: usize) -> PhysAddr {
// TODO: allocate continuous pages
let mut paddr = alloc_frame().unwrap();
for _ in 1..pages {
let paddr_new = alloc_frame().unwrap();
assert_eq!(paddr - PAGE_SIZE, paddr_new);
paddr = paddr_new;
}
let paddr = alloc_frame_contiguous(pages, 0).unwrap();
trace!("alloc DMA: paddr={:#x}, pages={}", paddr, pages);
paddr
}

View File

@ -5,7 +5,9 @@
#![feature(optin_builtin_traits)]
#![feature(panic_info_message)]
#![feature(global_asm)]
#![feature(negative_impls)]
#![feature(alloc_prelude)]
#![feature(const_fn)]
#![deny(unused_must_use)]
#![deny(stable_features)]
#![deny(unused_unsafe)]

View File

@ -48,10 +48,7 @@ pub type FrameAlloc = bitmap_allocator::BitAlloc1M;
#[cfg(feature = "board_k210")]
pub type FrameAlloc = bitmap_allocator::BitAlloc4K;
lazy_static! {
pub static ref FRAME_ALLOCATOR: SpinNoIrqLock<FrameAlloc> =
SpinNoIrqLock::new(FrameAlloc::default());
}
pub static FRAME_ALLOCATOR: SpinNoIrqLock<FrameAlloc> = SpinNoIrqLock::new(FrameAlloc::DEFAULT);
/// Convert physical address to virtual address
#[inline]
@ -85,6 +82,16 @@ impl FrameAllocator for GlobalFrameAlloc {
ret
// TODO: try to swap out when alloc failed
}
fn alloc_contiguous(&self, size: usize, align_log2: usize) -> Option<PhysAddr> {
// get the real address of the alloc frame
let ret = FRAME_ALLOCATOR
.lock()
.alloc_contiguous(size, align_log2)
.map(|id| id * PAGE_SIZE + MEMORY_OFFSET);
trace!("Allocate frame: {:x?}", ret);
ret
// TODO: try to swap out when alloc failed
}
fn dealloc(&self, target: usize) {
trace!("Deallocate frame: {:x}", target);
FRAME_ALLOCATOR
@ -99,6 +106,9 @@ pub fn alloc_frame() -> Option<usize> {
pub fn dealloc_frame(target: usize) {
GlobalFrameAlloc.dealloc(target);
}
pub fn alloc_frame_contiguous(size: usize, align_log2: usize) -> Option<usize> {
GlobalFrameAlloc.alloc_contiguous(size, align_log2)
}
pub struct KernelStack(usize);
const KSTACK_SIZE: usize = 0x4000; //16KB

View File

@ -31,8 +31,9 @@ use crate::arch::interrupt;
use crate::processor;
use core::cell::UnsafeCell;
use core::fmt;
use core::mem::MaybeUninit;
use core::ops::{Deref, DerefMut};
use core::sync::atomic::{AtomicBool, Ordering};
use core::sync::atomic::{AtomicBool, AtomicU8, Ordering};
use rcore_thread::std_thread::yield_now;
pub type SpinLock<T> = Mutex<T, Spin>;
@ -41,7 +42,8 @@ pub type SleepLock<T> = Mutex<T, Condvar>;
pub struct Mutex<T: ?Sized, S: MutexSupport> {
lock: AtomicBool,
support: S,
support: MaybeUninit<S>,
support_initialization: AtomicU8, // 0 = uninitialized, 1 = initializing, 2 = initialized
user: UnsafeCell<(usize, usize)>, // (cid, tid)
data: UnsafeCell<T>,
}
@ -76,11 +78,12 @@ impl<T, S: MutexSupport> Mutex<T, S> {
/// drop(lock);
/// }
/// ```
pub fn new(user_data: T) -> Mutex<T, S> {
pub const fn new(user_data: T) -> Mutex<T, S> {
Mutex {
lock: AtomicBool::new(false),
data: UnsafeCell::new(user_data),
support: S::new(),
support: MaybeUninit::uninit(),
support_initialization: AtomicU8::new(0),
user: UnsafeCell::new((0, 0)),
}
}
@ -100,7 +103,7 @@ impl<T: ?Sized, S: MutexSupport> Mutex<T, S> {
let mut try_count = 0;
// Wait until the lock looks unlocked before retrying
while self.lock.load(Ordering::Relaxed) {
self.support.cpu_relax();
unsafe { &*self.support.as_ptr() }.cpu_relax();
try_count += 1;
if try_count == 0x100000 {
let (cid, tid) = unsafe { *self.user.get() };
@ -133,6 +136,9 @@ impl<T: ?Sized, S: MutexSupport> Mutex<T, S> {
/// ```
pub fn lock(&self) -> MutexGuard<T, S> {
let support_guard = S::before_lock();
self.ensure_support();
self.obtain_lock();
MutexGuard {
mutex: self,
@ -148,6 +154,28 @@ impl<T: ?Sized, S: MutexSupport> Mutex<T, S> {
}
yield_now();
}
}
pub fn ensure_support(&self) {
let initialization = self.support_initialization.load(Ordering::Relaxed);
if (initialization == 2) {
return;
};
if (initialization == 1
|| self
.support_initialization
.compare_and_swap(0, 1, Ordering::Acquire)
!= 0)
{
// Wait for another thread to initialize
while self.support_initialization.load(Ordering::Acquire) == 1 {
core::sync::atomic::spin_loop_hint();
}
} else {
// My turn to initialize
(unsafe { core::ptr::write(self.support.as_ptr() as *mut _, S::new()) });
self.support_initialization.store(2, Ordering::Release);
}
}
/// Force unlock the spinlock.
@ -212,7 +240,7 @@ impl<'a, T: ?Sized, S: MutexSupport> Drop for MutexGuard<'a, T, S> {
/// The dropping of the MutexGuard will release the lock it was created from.
fn drop(&mut self) {
self.mutex.lock.store(false, Ordering::Release);
self.mutex.support.after_unlock();
unsafe { &*self.mutex.support.as_ptr() }.after_unlock();
}
}

View File

@ -47,9 +47,9 @@ impl Syscall<'_> {
let slice = unsafe { self.vm().check_read_array(base, len)? };
let file_like = proc.get_file_like(fd)?;
let len = file_like.write(slice)?;
// if len == 1 && !proc.pid.is_init() {
// println!("write content: {}", slice[0] as char);
// }
if len == 1 && !proc.pid.is_init() {
println!("write content: {}", slice[0] as char);
}
Ok(len)
}
@ -1825,6 +1825,8 @@ impl IoVecs {
if readv {
vm.check_write_array(iov.base, iov.len)?;
} else {
vm.check_read_array(iov.base, iov.len)?;
}
slices.push(slice::from_raw_parts_mut(iov.base, iov.len));