1
0
mirror of https://github.com/rcore-os/rCore.git synced 2024-11-22 08:06:17 +04:00

Fixed stack overflow caused by large struct initialization on stack

This commit is contained in:
Liu Xiaoyi 2020-05-13 04:56:24 +08:00
parent f807c951d6
commit 54504eb317
No known key found for this signature in database
GPG Key ID: A04E02BF7E977471
4 changed files with 34 additions and 11 deletions

View File

@ -62,7 +62,7 @@ isomorphic_drivers = { git = "https://github.com/rcore-os/isomorphic_drivers", f
virtio-drivers = { git = "https://github.com/rcore-os/virtio-drivers", rev = "dfa70e14" }
lazy_static = { version = "1.4", features = ["spin_no_std"] }
smoltcp = { git = "https://github.com/rcore-os/smoltcp", rev = "5bd87c7c", default-features = false, features = ["alloc", "log", "ethernet", "proto-ipv4", "proto-igmp", "socket-icmp", "socket-udp", "socket-tcp", "socket-raw"] }
bitmap-allocator = { git = "https://github.com/rcore-os/bitmap-allocator" }
bitmap-allocator = { git = "https://github.com/rcore-os/bitmap-allocator", rev="03bd990" }
rcore-console = { git = "https://github.com/rcore-os/rcore-console", rev = "b7bacf9", default-features = false }
rcore-memory = { path = "../crate/memory" }
rcore-thread = { git = "https://github.com/rcore-os/rcore-thread", rev = "d727949b" }

View File

@ -5,7 +5,9 @@
#![feature(optin_builtin_traits)]
#![feature(panic_info_message)]
#![feature(global_asm)]
#![feature(negative_impls)]
#![feature(alloc_prelude)]
#![feature(const_fn)]
#![deny(unused_must_use)]
#![deny(stable_features)]
#![deny(unused_unsafe)]

View File

@ -48,10 +48,7 @@ pub type FrameAlloc = bitmap_allocator::BitAlloc1M;
#[cfg(feature = "board_k210")]
pub type FrameAlloc = bitmap_allocator::BitAlloc4K;
lazy_static! {
pub static ref FRAME_ALLOCATOR: SpinNoIrqLock<FrameAlloc> =
SpinNoIrqLock::new(FrameAlloc::default());
}
pub static FRAME_ALLOCATOR: SpinNoIrqLock<FrameAlloc> = SpinNoIrqLock::new(FrameAlloc::DEFAULT);
/// Convert physical address to virtual address
#[inline]

View File

@ -32,7 +32,8 @@ use crate::processor;
use core::cell::UnsafeCell;
use core::fmt;
use core::ops::{Deref, DerefMut};
use core::sync::atomic::{AtomicBool, Ordering};
use core::sync::atomic::{AtomicBool, Ordering, AtomicU8};
use core::mem::MaybeUninit;
pub type SpinLock<T> = Mutex<T, Spin>;
pub type SpinNoIrqLock<T> = Mutex<T, SpinNoIrq>;
@ -40,7 +41,8 @@ pub type SleepLock<T> = Mutex<T, Condvar>;
pub struct Mutex<T: ?Sized, S: MutexSupport> {
lock: AtomicBool,
support: S,
support: MaybeUninit<S>,
support_initialization: AtomicU8, // 0 = uninitialized, 1 = initializing, 2 = initialized
user: UnsafeCell<(usize, usize)>, // (cid, tid)
data: UnsafeCell<T>,
}
@ -75,11 +77,12 @@ impl<T, S: MutexSupport> Mutex<T, S> {
/// drop(lock);
/// }
/// ```
pub fn new(user_data: T) -> Mutex<T, S> {
pub const fn new(user_data: T) -> Mutex<T, S> {
Mutex {
lock: AtomicBool::new(false),
data: UnsafeCell::new(user_data),
support: S::new(),
support: MaybeUninit::uninit(),
support_initialization: AtomicU8::new(0),
user: UnsafeCell::new((0, 0)),
}
}
@ -99,7 +102,7 @@ impl<T: ?Sized, S: MutexSupport> Mutex<T, S> {
let mut try_count = 0;
// Wait until the lock looks unlocked before retrying
while self.lock.load(Ordering::Relaxed) {
self.support.cpu_relax();
unsafe { &*self.support.as_ptr() }.cpu_relax();
try_count += 1;
if try_count == 0x100000 {
let (cid, tid) = unsafe { *self.user.get() };
@ -132,6 +135,9 @@ impl<T: ?Sized, S: MutexSupport> Mutex<T, S> {
/// ```
pub fn lock(&self) -> MutexGuard<T, S> {
let support_guard = S::before_lock();
self.ensure_support();
self.obtain_lock();
MutexGuard {
mutex: self,
@ -139,6 +145,24 @@ impl<T: ?Sized, S: MutexSupport> Mutex<T, S> {
}
}
pub fn ensure_support(&self) {
let initialization = self.support_initialization.load(Ordering::Relaxed);
if(initialization == 2) { return };
if(
initialization == 1
|| self.support_initialization.compare_and_swap(0, 1, Ordering::Acquire) != 0
) {
// Wait for another thread to initialize
while self.support_initialization.load(Ordering::Acquire) == 1 {
core::sync::atomic::spin_loop_hint();
}
} else {
// My turn to initialize
(unsafe { core::ptr::write(self.support.as_ptr() as *mut _, S::new()) });
self.support_initialization.store(2, Ordering::Release);
}
}
/// Force unlock the spinlock.
///
/// This is *extremely* unsafe if the lock is not held by the current
@ -201,7 +225,7 @@ impl<'a, T: ?Sized, S: MutexSupport> Drop for MutexGuard<'a, T, S> {
/// The dropping of the MutexGuard will release the lock it was created from.
fn drop(&mut self) {
self.mutex.lock.store(false, Ordering::Release);
self.mutex.support.after_unlock();
unsafe { &*self.mutex.support.as_ptr() }.after_unlock();
}
}