diff --git a/library/std/src/backtrace.rs b/library/std/src/backtrace.rs index 5cf6ec8178928..ef4fa5fcd1035 100644 --- a/library/std/src/backtrace.rs +++ b/library/std/src/backtrace.rs @@ -325,8 +325,7 @@ impl Backtrace { // Capture a backtrace which start just before the function addressed by // `ip` fn create(ip: usize) -> Backtrace { - // SAFETY: We don't attempt to lock this reentrantly. - let _lock = unsafe { lock() }; + let _lock = lock(); let mut frames = Vec::new(); let mut actual_start = None; unsafe { @@ -469,8 +468,7 @@ impl Capture { // Use the global backtrace lock to synchronize this as it's a // requirement of the `backtrace` crate, and then actually resolve // everything. - // SAFETY: We don't attempt to lock this reentrantly. - let _lock = unsafe { lock() }; + let _lock = lock(); for frame in self.frames.iter_mut() { let symbols = &mut frame.symbols; let frame = match &frame.frame { diff --git a/library/std/src/io/stdio.rs b/library/std/src/io/stdio.rs index 4d3736f79146c..8a771b41a6576 100644 --- a/library/std/src/io/stdio.rs +++ b/library/std/src/io/stdio.rs @@ -8,7 +8,6 @@ use crate::io::prelude::*; use crate::cell::{Cell, RefCell}; use crate::fmt; use crate::io::{self, BufReader, IoSlice, IoSliceMut, LineWriter, Lines}; -use crate::pin::Pin; use crate::sync::atomic::{AtomicBool, Ordering}; use crate::sync::{Arc, Mutex, MutexGuard, OnceLock}; use crate::sys::stdio; @@ -526,7 +525,7 @@ pub struct Stdout { // FIXME: this should be LineWriter or BufWriter depending on the state of // stdout (tty or not). Note that if this is not line buffered it // should also flush-on-panic or some form of flush-on-abort. - inner: Pin<&'static ReentrantMutex>>>, + inner: &'static ReentrantMutex>>>, } /// A locked reference to the [`Stdout`] handle. @@ -548,10 +547,11 @@ pub struct Stdout { #[must_use = "if unused stdout will immediately unlock"] #[stable(feature = "rust1", since = "1.0.0")] pub struct StdoutLock<'a> { - inner: ReentrantMutexGuard<'a, RefCell>>, + inner: ReentrantMutexGuard<'a, RefCell>>>, } -static STDOUT: OnceLock>>> = OnceLock::new(); +static STDOUT: ReentrantMutex>>> = + ReentrantMutex::new(RefCell::new(None)); /// Constructs a new handle to the standard output of the current process. /// @@ -602,25 +602,18 @@ static STDOUT: OnceLock>>> = OnceLo #[must_use] #[stable(feature = "rust1", since = "1.0.0")] pub fn stdout() -> Stdout { - Stdout { - inner: Pin::static_ref(&STDOUT).get_or_init_pin( - || unsafe { ReentrantMutex::new(RefCell::new(LineWriter::new(stdout_raw()))) }, - |mutex| unsafe { mutex.init() }, - ), - } + Stdout { inner: &STDOUT } } pub fn cleanup() { - if let Some(instance) = STDOUT.get() { - // Flush the data and disable buffering during shutdown - // by replacing the line writer by one with zero - // buffering capacity. - // We use try_lock() instead of lock(), because someone - // might have leaked a StdoutLock, which would - // otherwise cause a deadlock here. - if let Some(lock) = Pin::static_ref(instance).try_lock() { - *lock.borrow_mut() = LineWriter::with_capacity(0, stdout_raw()); - } + // Flush the data and disable buffering during shutdown + // by replacing the line writer by one with zero + // buffering capacity. + // We use try_lock() instead of lock(), because someone + // might have leaked a StdoutLock, which would + // otherwise cause a deadlock here. + if let Some(lock) = STDOUT.try_lock() { + *lock.borrow_mut() = Some(LineWriter::with_capacity(0, stdout_raw())); } } @@ -670,7 +663,7 @@ impl Write for Stdout { } #[inline] fn is_write_vectored(&self) -> bool { - io::Write::is_write_vectored(&&*self) + stdout_raw().is_write_vectored() } fn flush(&mut self) -> io::Result<()> { (&*self).flush() @@ -696,7 +689,7 @@ impl Write for &Stdout { } #[inline] fn is_write_vectored(&self) -> bool { - self.lock().is_write_vectored() + stdout_raw().is_write_vectored() } fn flush(&mut self) -> io::Result<()> { self.lock().flush() @@ -712,26 +705,37 @@ impl Write for &Stdout { } } +impl StdoutLock<'_> { + #[inline] + fn with_inner(&mut self, f: F) -> R + where + F: FnOnce(&mut LineWriter) -> R, + { + let mut inner = self.inner.borrow_mut(); + f(inner.get_or_insert_with(|| LineWriter::new(stdout_raw()))) + } +} + #[stable(feature = "rust1", since = "1.0.0")] impl Write for StdoutLock<'_> { fn write(&mut self, buf: &[u8]) -> io::Result { - self.inner.borrow_mut().write(buf) + self.with_inner(|stdout| stdout.write(buf)) } fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result { - self.inner.borrow_mut().write_vectored(bufs) + self.with_inner(|stdout| stdout.write_vectored(bufs)) } #[inline] fn is_write_vectored(&self) -> bool { - self.inner.borrow_mut().is_write_vectored() + stdout_raw().is_write_vectored() } fn flush(&mut self) -> io::Result<()> { - self.inner.borrow_mut().flush() + self.with_inner(|stdout| stdout.flush()) } fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { - self.inner.borrow_mut().write_all(buf) + self.with_inner(|stdout| stdout.write_all(buf)) } fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> { - self.inner.borrow_mut().write_all_vectored(bufs) + self.with_inner(|stdout| stdout.write_all_vectored(bufs)) } } @@ -761,7 +765,7 @@ impl fmt::Debug for StdoutLock<'_> { /// standard library or via raw Windows API calls, will fail. #[stable(feature = "rust1", since = "1.0.0")] pub struct Stderr { - inner: Pin<&'static ReentrantMutex>>, + inner: &'static ReentrantMutex>, } /// A locked reference to the [`Stderr`] handle. @@ -834,16 +838,12 @@ pub struct StderrLock<'a> { #[stable(feature = "rust1", since = "1.0.0")] pub fn stderr() -> Stderr { // Note that unlike `stdout()` we don't use `at_exit` here to register a - // destructor. Stderr is not buffered , so there's no need to run a + // destructor. Stderr is not buffered, so there's no need to run a // destructor for flushing the buffer - static INSTANCE: OnceLock>> = OnceLock::new(); + static INSTANCE: ReentrantMutex> = + ReentrantMutex::new(RefCell::new(stderr_raw())); - Stderr { - inner: Pin::static_ref(&INSTANCE).get_or_init_pin( - || unsafe { ReentrantMutex::new(RefCell::new(stderr_raw())) }, - |mutex| unsafe { mutex.init() }, - ), - } + Stderr { inner: &INSTANCE } } impl Stderr { diff --git a/library/std/src/panicking.rs b/library/std/src/panicking.rs index 25c9201f2ed3a..66dd5ff2a40fe 100644 --- a/library/std/src/panicking.rs +++ b/library/std/src/panicking.rs @@ -18,9 +18,9 @@ use crate::intrinsics; use crate::mem::{self, ManuallyDrop}; use crate::process; use crate::sync::atomic::{AtomicBool, Ordering}; +use crate::sync::{PoisonError, RwLock}; use crate::sys::stdio::panic_output; use crate::sys_common::backtrace; -use crate::sys_common::rwlock::StaticRwLock; use crate::sys_common::thread_info; use crate::thread; @@ -71,20 +71,31 @@ extern "C" fn __rust_foreign_exception() -> ! { rtabort!("Rust cannot catch foreign exceptions"); } -#[derive(Copy, Clone)] enum Hook { Default, - Custom(*mut (dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send)), + Custom(Box) + 'static + Sync + Send>), } impl Hook { fn custom(f: impl Fn(&PanicInfo<'_>) + 'static + Sync + Send) -> Self { - Self::Custom(Box::into_raw(Box::new(f))) + Self::Custom(Box::new(f)) + } + + fn into_box(self) -> Box) + 'static + Sync + Send> { + match self { + Hook::Default => Box::new(default_hook), + Hook::Custom(hook) => hook, + } } } -static HOOK_LOCK: StaticRwLock = StaticRwLock::new(); -static mut HOOK: Hook = Hook::Default; +impl Default for Hook { + fn default() -> Hook { + Hook::Default + } +} + +static HOOK: RwLock = RwLock::new(Hook::Default); /// Registers a custom panic hook, replacing any that was previously registered. /// @@ -125,24 +136,7 @@ pub fn set_hook(hook: Box) + 'static + Sync + Send>) { panic!("cannot modify the panic hook from a panicking thread"); } - // SAFETY: - // - // - `HOOK` can only be modified while holding write access to `HOOK_LOCK`. - // - The argument of `Box::from_raw` is always a valid pointer that was created using - // `Box::into_raw`. - unsafe { - let guard = HOOK_LOCK.write(); - let old_hook = HOOK; - HOOK = Hook::Custom(Box::into_raw(hook)); - drop(guard); - - if let Hook::Custom(ptr) = old_hook { - #[allow(unused_must_use)] - { - Box::from_raw(ptr); - } - } - } + *HOOK.write().unwrap_or_else(PoisonError::into_inner) = Hook::custom(hook); } /// Unregisters the current panic hook, returning it. @@ -179,22 +173,11 @@ pub fn take_hook() -> Box) + 'static + Sync + Send> { panic!("cannot modify the panic hook from a panicking thread"); } - // SAFETY: - // - // - `HOOK` can only be modified while holding write access to `HOOK_LOCK`. - // - The argument of `Box::from_raw` is always a valid pointer that was created using - // `Box::into_raw`. - unsafe { - let guard = HOOK_LOCK.write(); - let hook = HOOK; - HOOK = Hook::Default; - drop(guard); + let mut hook = HOOK.write().unwrap_or_else(PoisonError::into_inner); + let old_hook = mem::take(&mut *hook); + drop(hook); - match hook { - Hook::Default => Box::new(default_hook), - Hook::Custom(ptr) => Box::from_raw(ptr), - } - } + old_hook.into_box() } /// Atomic combination of [`take_hook`] and [`set_hook`]. Use this to replace the panic handler with @@ -240,24 +223,10 @@ where panic!("cannot modify the panic hook from a panicking thread"); } - // SAFETY: - // - // - `HOOK` can only be modified while holding write access to `HOOK_LOCK`. - // - The argument of `Box::from_raw` is always a valid pointer that was created using - // `Box::into_raw`. - unsafe { - let guard = HOOK_LOCK.write(); - let old_hook = HOOK; - HOOK = Hook::Default; - - let prev = match old_hook { - Hook::Default => Box::new(default_hook), - Hook::Custom(ptr) => Box::from_raw(ptr), - }; - - HOOK = Hook::custom(move |info| hook_fn(&prev, info)); - drop(guard); - } + let mut hook = HOOK.write().unwrap_or_else(PoisonError::into_inner); + let old_hook = mem::take(&mut *hook); + let prev = old_hook.into_box(); + *hook = Hook::custom(move |info| hook_fn(&prev, info)); } fn default_hook(info: &PanicInfo<'_>) { @@ -682,27 +651,26 @@ fn rust_panic_with_hook( crate::sys::abort_internal(); } - unsafe { - let mut info = PanicInfo::internal_constructor(message, location, can_unwind); - let _guard = HOOK_LOCK.read(); - match HOOK { - // Some platforms (like wasm) know that printing to stderr won't ever actually - // print anything, and if that's the case we can skip the default - // hook. Since string formatting happens lazily when calling `payload` - // methods, this means we avoid formatting the string at all! - // (The panic runtime might still call `payload.take_box()` though and trigger - // formatting.) - Hook::Default if panic_output().is_none() => {} - Hook::Default => { - info.set_payload(payload.get()); - default_hook(&info); - } - Hook::Custom(ptr) => { - info.set_payload(payload.get()); - (*ptr)(&info); - } - }; - } + let mut info = PanicInfo::internal_constructor(message, location, can_unwind); + let hook = HOOK.read().unwrap_or_else(PoisonError::into_inner); + match *hook { + // Some platforms (like wasm) know that printing to stderr won't ever actually + // print anything, and if that's the case we can skip the default + // hook. Since string formatting happens lazily when calling `payload` + // methods, this means we avoid formatting the string at all! + // (The panic runtime might still call `payload.take_box()` though and trigger + // formatting.) + Hook::Default if panic_output().is_none() => {} + Hook::Default => { + info.set_payload(payload.get()); + default_hook(&info); + } + Hook::Custom(ref hook) => { + info.set_payload(payload.get()); + hook(&info); + } + }; + drop(hook); if panics > 1 || !can_unwind { // If a thread panics while it's already unwinding then we diff --git a/library/std/src/sync/condvar.rs b/library/std/src/sync/condvar.rs index eb1e7135a6e41..76a1b4a2a86cd 100644 --- a/library/std/src/sync/condvar.rs +++ b/library/std/src/sync/condvar.rs @@ -3,7 +3,7 @@ mod tests; use crate::fmt; use crate::sync::{mutex, poison, LockResult, MutexGuard, PoisonError}; -use crate::sys_common::condvar as sys; +use crate::sys::locks as sys; use crate::time::{Duration, Instant}; /// A type indicating whether a timed wait on a condition variable returned diff --git a/library/std/src/sync/mutex.rs b/library/std/src/sync/mutex.rs index e0d13cd648c64..3c2ae544b6284 100644 --- a/library/std/src/sync/mutex.rs +++ b/library/std/src/sync/mutex.rs @@ -5,7 +5,7 @@ use crate::cell::UnsafeCell; use crate::fmt; use crate::ops::{Deref, DerefMut}; use crate::sync::{poison, LockResult, TryLockError, TryLockResult}; -use crate::sys_common::mutex as sys; +use crate::sys::locks as sys; /// A mutual exclusion primitive useful for protecting shared data /// @@ -163,7 +163,7 @@ use crate::sys_common::mutex as sys; #[stable(feature = "rust1", since = "1.0.0")] #[cfg_attr(not(test), rustc_diagnostic_item = "Mutex")] pub struct Mutex { - inner: sys::MovableMutex, + inner: sys::Mutex, poison: poison::Flag, data: UnsafeCell, } @@ -216,11 +216,7 @@ impl Mutex { #[rustc_const_stable(feature = "const_locks", since = "1.63.0")] #[inline] pub const fn new(t: T) -> Mutex { - Mutex { - inner: sys::MovableMutex::new(), - poison: poison::Flag::new(), - data: UnsafeCell::new(t), - } + Mutex { inner: sys::Mutex::new(), poison: poison::Flag::new(), data: UnsafeCell::new(t) } } } @@ -263,7 +259,7 @@ impl Mutex { #[stable(feature = "rust1", since = "1.0.0")] pub fn lock(&self) -> LockResult> { unsafe { - self.inner.raw_lock(); + self.inner.lock(); MutexGuard::new(self) } } @@ -525,7 +521,7 @@ impl Drop for MutexGuard<'_, T> { fn drop(&mut self) { unsafe { self.lock.poison.done(&self.poison); - self.lock.inner.raw_unlock(); + self.lock.inner.unlock(); } } } @@ -544,7 +540,7 @@ impl fmt::Display for MutexGuard<'_, T> { } } -pub fn guard_lock<'a, T: ?Sized>(guard: &MutexGuard<'a, T>) -> &'a sys::MovableMutex { +pub fn guard_lock<'a, T: ?Sized>(guard: &MutexGuard<'a, T>) -> &'a sys::Mutex { &guard.lock.inner } diff --git a/library/std/src/sync/once_lock.rs b/library/std/src/sync/once_lock.rs index 813516040cdb6..37413ec62a717 100644 --- a/library/std/src/sync/once_lock.rs +++ b/library/std/src/sync/once_lock.rs @@ -3,7 +3,6 @@ use crate::fmt; use crate::marker::PhantomData; use crate::mem::MaybeUninit; use crate::panic::{RefUnwindSafe, UnwindSafe}; -use crate::pin::Pin; use crate::sync::Once; /// A synchronization primitive which can be written to only once. @@ -223,60 +222,6 @@ impl OnceLock { Ok(unsafe { self.get_unchecked() }) } - /// Internal-only API that gets the contents of the cell, initializing it - /// in two steps with `f` and `g` if the cell was empty. - /// - /// `f` is called to construct the value, which is then moved into the cell - /// and given as a (pinned) mutable reference to `g` to finish - /// initialization. - /// - /// This allows `g` to inspect an manipulate the value after it has been - /// moved into its final place in the cell, but before the cell is - /// considered initialized. - /// - /// # Panics - /// - /// If `f` or `g` panics, the panic is propagated to the caller, and the - /// cell remains uninitialized. - /// - /// With the current implementation, if `g` panics, the value from `f` will - /// not be dropped. This should probably be fixed if this is ever used for - /// a type where this matters. - /// - /// It is an error to reentrantly initialize the cell from `f`. The exact - /// outcome is unspecified. Current implementation deadlocks, but this may - /// be changed to a panic in the future. - pub(crate) fn get_or_init_pin(self: Pin<&Self>, f: F, g: G) -> Pin<&T> - where - F: FnOnce() -> T, - G: FnOnce(Pin<&mut T>), - { - if let Some(value) = self.get_ref().get() { - // SAFETY: The inner value was already initialized, and will not be - // moved anymore. - return unsafe { Pin::new_unchecked(value) }; - } - - let slot = &self.value; - - // Ignore poisoning from other threads - // If another thread panics, then we'll be able to run our closure - self.once.call_once_force(|_| { - let value = f(); - // SAFETY: We use the Once (self.once) to guarantee unique access - // to the UnsafeCell (slot). - let value: &mut T = unsafe { (&mut *slot.get()).write(value) }; - // SAFETY: The value has been written to its final place in - // self.value. We do not to move it anymore, which we promise here - // with a Pin<&mut T>. - g(unsafe { Pin::new_unchecked(value) }); - }); - - // SAFETY: The inner value has been initialized, and will not be moved - // anymore. - unsafe { Pin::new_unchecked(self.get_ref().get_unchecked()) } - } - /// Consumes the `OnceLock`, returning the wrapped value. Returns /// `None` if the cell was empty. /// diff --git a/library/std/src/sync/rwlock.rs b/library/std/src/sync/rwlock.rs index 6e4a2cfc82afd..acb1b284bb2df 100644 --- a/library/std/src/sync/rwlock.rs +++ b/library/std/src/sync/rwlock.rs @@ -6,7 +6,7 @@ use crate::fmt; use crate::ops::{Deref, DerefMut}; use crate::ptr::NonNull; use crate::sync::{poison, LockResult, TryLockError, TryLockResult}; -use crate::sys_common::rwlock as sys; +use crate::sys::locks as sys; /// A reader-writer lock /// @@ -77,7 +77,7 @@ use crate::sys_common::rwlock as sys; /// [`Mutex`]: super::Mutex #[stable(feature = "rust1", since = "1.0.0")] pub struct RwLock { - inner: sys::MovableRwLock, + inner: sys::RwLock, poison: poison::Flag, data: UnsafeCell, } @@ -107,7 +107,7 @@ pub struct RwLockReadGuard<'a, T: ?Sized + 'a> { // `NonNull` is also covariant over `T`, just like we would have with `&T`. `NonNull` // is preferable over `const* T` to allow for niche optimization. data: NonNull, - inner_lock: &'a sys::MovableRwLock, + inner_lock: &'a sys::RwLock, } #[stable(feature = "rust1", since = "1.0.0")] @@ -155,11 +155,7 @@ impl RwLock { #[rustc_const_stable(feature = "const_locks", since = "1.63.0")] #[inline] pub const fn new(t: T) -> RwLock { - RwLock { - inner: sys::MovableRwLock::new(), - poison: poison::Flag::new(), - data: UnsafeCell::new(t), - } + RwLock { inner: sys::RwLock::new(), poison: poison::Flag::new(), data: UnsafeCell::new(t) } } } diff --git a/library/std/src/sys/hermit/args.rs b/library/std/src/sys/hermit/args.rs index 1c7e1dd8d5778..a63c0a7150224 100644 --- a/library/std/src/sys/hermit/args.rs +++ b/library/std/src/sys/hermit/args.rs @@ -57,12 +57,11 @@ mod imp { use crate::ffi::{CStr, OsString}; use crate::os::unix::ffi::OsStringExt; use crate::ptr; - - use crate::sys_common::mutex::StaticMutex; + use crate::sync::Mutex; static mut ARGC: isize = 0; static mut ARGV: *const *const u8 = ptr::null(); - static LOCK: StaticMutex = StaticMutex::new(); + static LOCK: Mutex<()> = Mutex::new(()); pub unsafe fn init(argc: isize, argv: *const *const u8) { let _guard = LOCK.lock(); diff --git a/library/std/src/sys/hermit/condvar.rs b/library/std/src/sys/hermit/condvar.rs index 22059ca0dbe10..9d09c40b22d3e 100644 --- a/library/std/src/sys/hermit/condvar.rs +++ b/library/std/src/sys/hermit/condvar.rs @@ -1,9 +1,12 @@ use crate::ffi::c_void; +use crate::mem::MaybeUninit; use crate::ptr; -use crate::sync::atomic::{AtomicUsize, Ordering::SeqCst}; +use crate::sync::atomic::{ + AtomicPtr, AtomicUsize, + Ordering::{Acquire, Release, SeqCst}, +}; use crate::sys::hermit::abi; use crate::sys::locks::Mutex; -use crate::sys_common::lazy_box::{LazyBox, LazyInit}; use crate::time::Duration; // The implementation is inspired by Andrew D. Birrell's paper @@ -11,70 +14,93 @@ use crate::time::Duration; pub struct Condvar { counter: AtomicUsize, - sem1: *const c_void, - sem2: *const c_void, + sem1: AtomicPtr, + sem2: AtomicPtr, } -pub(crate) type MovableCondvar = LazyBox; +#[cold] +fn init_semaphore(sem: &AtomicPtr) -> *mut c_void { + let new = unsafe { + let mut new = MaybeUninit::uninit(); + let _ = abi::sem_init(new.as_mut_ptr(), 0); + new.assume_init() as *mut c_void + }; -impl LazyInit for Condvar { - fn init() -> Box { - Box::new(Self::new()) + match sem.compare_exchange(ptr::null_mut(), new, Release, Acquire) { + Ok(_) => new, + Err(sem) => unsafe { + let _ = abi::sem_destroy(new); + sem + }, } } -unsafe impl Send for Condvar {} -unsafe impl Sync for Condvar {} - impl Condvar { - pub fn new() -> Self { - let mut condvar = - Self { counter: AtomicUsize::new(0), sem1: ptr::null(), sem2: ptr::null() }; - unsafe { - let _ = abi::sem_init(&mut condvar.sem1, 0); - let _ = abi::sem_init(&mut condvar.sem2, 0); + #[inline] + pub const fn new() -> Self { + Self { + counter: AtomicUsize::new(0), + sem1: AtomicPtr::new(ptr::null_mut()), + sem2: AtomicPtr::new(ptr::null_mut()), + } + } + + #[inline] + fn semaphores(&self) -> (*const c_void, *const c_void) { + let mut sem1 = self.sem1.load(Acquire); + if sem1.is_null() { + sem1 = init_semaphore(&self.sem1); } - condvar + + let mut sem2 = self.sem2.load(Acquire); + if sem2.is_null() { + sem2 = init_semaphore(&self.sem2); + } + + (sem1, sem2) } - pub unsafe fn notify_one(&self) { + pub fn notify_one(&self) { if self.counter.load(SeqCst) > 0 { self.counter.fetch_sub(1, SeqCst); - abi::sem_post(self.sem1); - abi::sem_timedwait(self.sem2, 0); + let (sem1, sem2) = self.semaphores(); + unsafe { + abi::sem_post(sem1); + abi::sem_timedwait(sem2, 0); + } } } - pub unsafe fn notify_all(&self) { + pub fn notify_all(&self) { let counter = self.counter.swap(0, SeqCst); + let (sem1, sem2) = self.semaphores(); for _ in 0..counter { - abi::sem_post(self.sem1); + unsafe { abi::sem_post(sem1) }; } for _ in 0..counter { - abi::sem_timedwait(self.sem2, 0); + unsafe { abi::sem_timedwait(sem2, 0) }; } } pub unsafe fn wait(&self, mutex: &Mutex) { self.counter.fetch_add(1, SeqCst); + let (sem1, sem2) = self.semaphores(); mutex.unlock(); - abi::sem_timedwait(self.sem1, 0); - abi::sem_post(self.sem2); + abi::sem_timedwait(sem1, 0); + abi::sem_post(sem2); mutex.lock(); } pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { self.counter.fetch_add(1, SeqCst); + let (sem1, sem2) = self.semaphores(); mutex.unlock(); - let millis = dur.as_millis().min(u32::MAX as u128) as u32; - let res = if millis > 0 { - abi::sem_timedwait(self.sem1, millis) - } else { - abi::sem_trywait(self.sem1) - }; + let millis = dur.as_millis().min(u32::MAX as u128) as u32; + let res = + if millis > 0 { abi::sem_timedwait(sem1, millis) } else { abi::sem_trywait(sem1) }; - abi::sem_post(self.sem2); + abi::sem_post(sem2); mutex.lock(); res == 0 } @@ -83,8 +109,14 @@ impl Condvar { impl Drop for Condvar { fn drop(&mut self) { unsafe { - let _ = abi::sem_destroy(self.sem1); - let _ = abi::sem_destroy(self.sem2); + let sem1 = *self.sem1.get_mut(); + let sem2 = *self.sem2.get_mut(); + if !sem1.is_null() { + let _ = abi::sem_destroy(sem1); + } + if !sem2.is_null() { + let _ = abi::sem_destroy(sem2); + } } } } diff --git a/library/std/src/sys/hermit/mutex.rs b/library/std/src/sys/hermit/mutex.rs index eb15a04ffcffb..4f8ef3512864a 100644 --- a/library/std/src/sys/hermit/mutex.rs +++ b/library/std/src/sys/hermit/mutex.rs @@ -60,9 +60,11 @@ impl Spinlock { } #[inline] - pub unsafe fn lock(&self) -> SpinlockGuard<'_, T> { - self.obtain_lock(); - SpinlockGuard { dequeue: &self.dequeue, data: &mut *self.data.get() } + pub fn lock(&self) -> SpinlockGuard<'_, T> { + unsafe { + self.obtain_lock(); + SpinlockGuard { dequeue: &self.dequeue, data: &mut *self.data.get() } + } } } @@ -164,34 +166,28 @@ pub struct Mutex { inner: Spinlock, } -pub type MovableMutex = Mutex; - -unsafe impl Send for Mutex {} -unsafe impl Sync for Mutex {} - impl Mutex { pub const fn new() -> Mutex { Mutex { inner: Spinlock::new(MutexInner::new()) } } #[inline] - pub unsafe fn init(&mut self) {} - - #[inline] - pub unsafe fn lock(&self) { + pub fn lock(&self) { loop { let mut guard = self.inner.lock(); if guard.locked == false { guard.locked = true; return; } else { - let prio = abi::get_priority(); - let id = abi::getpid(); + unsafe { + let prio = abi::get_priority(); + let id = abi::getpid(); - guard.blocked_task.push(prio, id); - abi::block_current_task(); - drop(guard); - abi::yield_now(); + guard.blocked_task.push(prio, id); + abi::block_current_task(); + drop(guard); + abi::yield_now(); + } } } } @@ -206,7 +202,7 @@ impl Mutex { } #[inline] - pub unsafe fn try_lock(&self) -> bool { + pub fn try_lock(&self) -> bool { let mut guard = self.inner.lock(); if guard.locked == false { guard.locked = true; diff --git a/library/std/src/sys/hermit/rwlock.rs b/library/std/src/sys/hermit/rwlock.rs index 9701bab1f660b..0e4d4f2e03eb0 100644 --- a/library/std/src/sys/hermit/rwlock.rs +++ b/library/std/src/sys/hermit/rwlock.rs @@ -1,15 +1,12 @@ use crate::cell::UnsafeCell; -use crate::sys::locks::{MovableCondvar, Mutex}; -use crate::sys_common::lazy_box::{LazyBox, LazyInit}; +use crate::sys::locks::{Condvar, Mutex}; pub struct RwLock { lock: Mutex, - cond: MovableCondvar, + cond: Condvar, state: UnsafeCell, } -pub type MovableRwLock = RwLock; - enum State { Unlocked, Reading(usize), @@ -29,45 +26,49 @@ unsafe impl Sync for RwLock {} impl RwLock { pub const fn new() -> RwLock { - RwLock { - lock: Mutex::new(), - cond: MovableCondvar::new(), - state: UnsafeCell::new(State::Unlocked), - } + RwLock { lock: Mutex::new(), cond: Condvar::new(), state: UnsafeCell::new(State::Unlocked) } } #[inline] - pub unsafe fn read(&self) { - self.lock.lock(); - while !(*self.state.get()).inc_readers() { - self.cond.wait(&self.lock); + pub fn read(&self) { + unsafe { + self.lock.lock(); + while !(*self.state.get()).inc_readers() { + self.cond.wait(&self.lock); + } + self.lock.unlock(); } - self.lock.unlock(); } #[inline] - pub unsafe fn try_read(&self) -> bool { - self.lock.lock(); - let ok = (*self.state.get()).inc_readers(); - self.lock.unlock(); - return ok; + pub fn try_read(&self) -> bool { + unsafe { + self.lock.lock(); + let ok = (*self.state.get()).inc_readers(); + self.lock.unlock(); + ok + } } #[inline] - pub unsafe fn write(&self) { - self.lock.lock(); - while !(*self.state.get()).inc_writers() { - self.cond.wait(&self.lock); + pub fn write(&self) { + unsafe { + self.lock.lock(); + while !(*self.state.get()).inc_writers() { + self.cond.wait(&self.lock); + } + self.lock.unlock(); } - self.lock.unlock(); } #[inline] - pub unsafe fn try_write(&self) -> bool { - self.lock.lock(); - let ok = (*self.state.get()).inc_writers(); - self.lock.unlock(); - return ok; + pub fn try_write(&self) -> bool { + unsafe { + self.lock.lock(); + let ok = (*self.state.get()).inc_writers(); + self.lock.unlock(); + ok + } } #[inline] diff --git a/library/std/src/sys/itron/condvar.rs b/library/std/src/sys/itron/condvar.rs index 008cd8fb1e392..7a47cc6696a34 100644 --- a/library/std/src/sys/itron/condvar.rs +++ b/library/std/src/sys/itron/condvar.rs @@ -12,18 +12,13 @@ pub struct Condvar { unsafe impl Send for Condvar {} unsafe impl Sync for Condvar {} -pub type MovableCondvar = Condvar; - impl Condvar { #[inline] pub const fn new() -> Condvar { Condvar { waiters: SpinMutex::new(waiter_queue::WaiterQueue::new()) } } - #[inline] - pub unsafe fn init(&mut self) {} - - pub unsafe fn notify_one(&self) { + pub fn notify_one(&self) { self.waiters.with_locked(|waiters| { if let Some(task) = waiters.pop_front() { // Unpark the task @@ -39,7 +34,7 @@ impl Condvar { }); } - pub unsafe fn notify_all(&self) { + pub fn notify_all(&self) { self.waiters.with_locked(|waiters| { while let Some(task) = waiters.pop_front() { // Unpark the task @@ -76,7 +71,7 @@ impl Condvar { } } - unsafe { mutex.lock() }; + mutex.lock(); } pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { @@ -114,7 +109,7 @@ impl Condvar { // we woke up because of `notify_*`. let success = self.waiters.with_locked(|waiters| unsafe { !waiters.remove(waiter) }); - unsafe { mutex.lock() }; + mutex.lock(); success } } diff --git a/library/std/src/sys/itron/mutex.rs b/library/std/src/sys/itron/mutex.rs index 715e94c3b3d6a..f2eed8e771c40 100644 --- a/library/std/src/sys/itron/mutex.rs +++ b/library/std/src/sys/itron/mutex.rs @@ -11,8 +11,6 @@ pub struct Mutex { mtx: SpinIdOnceCell<()>, } -pub type MovableMutex = Mutex; - /// Create a mutex object. This function never panics. fn new_mtx() -> Result { ItronError::err_if_negative(unsafe { @@ -31,12 +29,6 @@ impl Mutex { Mutex { mtx: SpinIdOnceCell::new() } } - pub unsafe fn init(&mut self) { - // Initialize `self.mtx` eagerly - let id = new_mtx().unwrap_or_else(|e| fail(e, &"acre_mtx")); - unsafe { self.mtx.set_unchecked((id, ())) }; - } - /// Get the inner mutex's ID, which is lazily created. fn raw(&self) -> abi::ID { match self.mtx.get_or_try_init(|| new_mtx().map(|id| (id, ()))) { @@ -45,7 +37,7 @@ impl Mutex { } } - pub unsafe fn lock(&self) { + pub fn lock(&self) { let mtx = self.raw(); expect_success(unsafe { abi::loc_mtx(mtx) }, &"loc_mtx"); } @@ -55,7 +47,7 @@ impl Mutex { expect_success_aborting(unsafe { abi::unl_mtx(mtx) }, &"unl_mtx"); } - pub unsafe fn try_lock(&self) -> bool { + pub fn try_lock(&self) -> bool { let mtx = self.raw(); match unsafe { abi::ploc_mtx(mtx) } { abi::E_TMOUT => false, diff --git a/library/std/src/sys/sgx/condvar.rs b/library/std/src/sys/sgx/condvar.rs index 36534e0eff3fd..e46487b5dcde4 100644 --- a/library/std/src/sys/sgx/condvar.rs +++ b/library/std/src/sys/sgx/condvar.rs @@ -4,42 +4,41 @@ use crate::time::Duration; use super::waitqueue::{SpinMutex, WaitQueue, WaitVariable}; -pub struct Condvar { - inner: SpinMutex>, -} +pub struct Condvar(LazyBox); -pub(crate) type MovableCondvar = LazyBox; +struct StaticCondvar(SpinMutex>); -impl LazyInit for Condvar { - fn init() -> Box { - Box::new(Self::new()) +impl LazyInit for StaticCondvar { + fn init() -> Box { + Box::new(StaticCondvar(SpinMutex::new(WaitVariable::new(())))) } } impl Condvar { + #[inline] pub const fn new() -> Condvar { - Condvar { inner: SpinMutex::new(WaitVariable::new(())) } + Condvar(LazyBox::new()) } #[inline] - pub unsafe fn notify_one(&self) { - let _ = WaitQueue::notify_one(self.inner.lock()); + pub fn notify_one(&self) { + let _ = WaitQueue::notify_one(self.0.0.lock()); } #[inline] - pub unsafe fn notify_all(&self) { - let _ = WaitQueue::notify_all(self.inner.lock()); + pub fn notify_all(&self) { + let _ = WaitQueue::notify_all(self.0.0.lock()); } pub unsafe fn wait(&self, mutex: &Mutex) { - let guard = self.inner.lock(); + let guard = self.0.0.lock(); WaitQueue::wait(guard, || unsafe { mutex.unlock() }); - unsafe { mutex.lock() } + mutex.lock() } pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { - let success = WaitQueue::wait_timeout(&self.inner, dur, || unsafe { mutex.unlock() }); - unsafe { mutex.lock() }; + let success = WaitQueue::wait_timeout(&self.0.0, dur, || unsafe { mutex.unlock() }); + mutex.lock(); success } } diff --git a/library/std/src/sys/sgx/mutex.rs b/library/std/src/sys/sgx/mutex.rs index 513cd77fd2aad..e4e2e29b77a08 100644 --- a/library/std/src/sys/sgx/mutex.rs +++ b/library/std/src/sys/sgx/mutex.rs @@ -1,31 +1,25 @@ use super::waitqueue::{try_lock_or_false, SpinMutex, WaitQueue, WaitVariable}; use crate::sys_common::lazy_box::{LazyBox, LazyInit}; -pub struct Mutex { - inner: SpinMutex>, -} +pub struct Mutex(LazyBox); -// not movable: see UnsafeList implementation -pub(crate) type MovableMutex = LazyBox; +struct StaticMutex(SpinMutex>); -impl LazyInit for Mutex { +impl LazyInit for StaticMutex { fn init() -> Box { - Box::new(Self::new()) + Box::new(StaticMutex(SpinMutex::new(WaitVariable::new(false)))) } } // Implementation according to “Operating Systems: Three Easy Pieces”, chapter 28 impl Mutex { pub const fn new() -> Mutex { - Mutex { inner: SpinMutex::new(WaitVariable::new(false)) } + Mutex(LazyBox::new()) } #[inline] - pub unsafe fn init(&mut self) {} - - #[inline] - pub unsafe fn lock(&self) { - let mut guard = self.inner.lock(); + pub fn lock(&self) { + let mut guard = self.0.0.lock(); if *guard.lock_var() { // Another thread has the lock, wait WaitQueue::wait(guard, || {}) @@ -38,7 +32,7 @@ impl Mutex { #[inline] pub unsafe fn unlock(&self) { - let guard = self.inner.lock(); + let guard = self.0.0.lock(); if let Err(mut guard) = WaitQueue::notify_one(guard) { // No other waiters, unlock *guard.lock_var_mut() = false; @@ -48,8 +42,8 @@ impl Mutex { } #[inline] - pub unsafe fn try_lock(&self) -> bool { - let mut guard = try_lock_or_false!(self.inner); + pub fn try_lock(&self) -> bool { + let mut guard = try_lock_or_false!(self.0.0); if *guard.lock_var() { // Another thread has the lock false diff --git a/library/std/src/sys/sgx/rwlock.rs b/library/std/src/sys/sgx/rwlock.rs index a97fb9ab026f0..ec76e4f023e4a 100644 --- a/library/std/src/sys/sgx/rwlock.rs +++ b/library/std/src/sys/sgx/rwlock.rs @@ -9,38 +9,72 @@ use super::waitqueue::{ }; use crate::mem; -pub struct RwLock { +pub struct RwLock(LazyBox); + +impl RwLock { + #[inline] + pub const fn new() -> RwLock { + RwLock(LazyBox::new()) + } + + #[inline] + pub fn try_read(&self) -> bool { + self.0.try_read() + } + + #[inline] + pub fn read(&self) { + self.0.read() + } + + #[inline] + pub fn try_write(&self) -> bool { + self.0.try_write() + } + + #[inline] + pub fn write(&self) { + self.0.write() + } + + #[inline] + pub unsafe fn read_unlock(&self) { + unsafe { self.0.read_unlock() } + } + + #[inline] + pub unsafe fn write_unlock(&self) { + unsafe { self.0.write_unlock() } + } +} + +pub struct StaticRwLock { readers: SpinMutex>>, writer: SpinMutex>, } -pub(crate) type MovableRwLock = LazyBox; - -impl LazyInit for RwLock { +impl LazyInit for StaticRwLock { fn init() -> Box { - Box::new(Self::new()) + // Keep in sync with C definition. + Box::new(StaticRwLock { + readers: SpinMutex::new(WaitVariable::new(None)), + writer: SpinMutex::new(WaitVariable::new(false)), + }) } } -// Check at compile time that RwLock size matches C definition (see test_c_rwlock_initializer below) +// Check at compile time that RwLock size matches C definition. // // # Safety // Never called, as it is a compile time check. #[allow(dead_code)] -unsafe fn rw_lock_size_assert(r: RwLock) { - unsafe { mem::transmute::(r) }; +unsafe fn rw_lock_size_assert(r: StaticRwLock) { + unsafe { mem::transmute::(r) }; } -impl RwLock { - pub const fn new() -> RwLock { - RwLock { - readers: SpinMutex::new(WaitVariable::new(None)), - writer: SpinMutex::new(WaitVariable::new(false)), - } - } - +impl StaticRwLock { #[inline] - pub unsafe fn read(&self) { + fn read(&self) { let mut rguard = self.readers.lock(); let wguard = self.writer.lock(); if *wguard.lock_var() || !wguard.queue_empty() { @@ -56,7 +90,7 @@ impl RwLock { } #[inline] - pub unsafe fn try_read(&self) -> bool { + fn try_read(&self) -> bool { let mut rguard = try_lock_or_false!(self.readers); let wguard = try_lock_or_false!(self.writer); if *wguard.lock_var() || !wguard.queue_empty() { @@ -71,7 +105,7 @@ impl RwLock { } #[inline] - pub unsafe fn write(&self) { + fn write(&self) { let rguard = self.readers.lock(); let mut wguard = self.writer.lock(); if *wguard.lock_var() || rguard.lock_var().is_some() { @@ -86,7 +120,7 @@ impl RwLock { } #[inline] - pub unsafe fn try_write(&self) -> bool { + fn try_write(&self) -> bool { let rguard = try_lock_or_false!(self.readers); let mut wguard = try_lock_or_false!(self.writer); if *wguard.lock_var() || rguard.lock_var().is_some() { @@ -121,7 +155,7 @@ impl RwLock { } #[inline] - pub unsafe fn read_unlock(&self) { + unsafe fn read_unlock(&self) { let rguard = self.readers.lock(); let wguard = self.writer.lock(); unsafe { self.__read_unlock(rguard, wguard) }; @@ -157,7 +191,7 @@ impl RwLock { } #[inline] - pub unsafe fn write_unlock(&self) { + unsafe fn write_unlock(&self) { let rguard = self.readers.lock(); let wguard = self.writer.lock(); unsafe { self.__write_unlock(rguard, wguard) }; @@ -184,7 +218,7 @@ const EINVAL: i32 = 22; #[cfg(not(test))] #[no_mangle] -pub unsafe extern "C" fn __rust_rwlock_rdlock(p: *mut RwLock) -> i32 { +pub unsafe extern "C" fn __rust_rwlock_rdlock(p: *mut StaticRwLock) -> i32 { if p.is_null() { return EINVAL; } @@ -194,16 +228,17 @@ pub unsafe extern "C" fn __rust_rwlock_rdlock(p: *mut RwLock) -> i32 { #[cfg(not(test))] #[no_mangle] -pub unsafe extern "C" fn __rust_rwlock_wrlock(p: *mut RwLock) -> i32 { +pub unsafe extern "C" fn __rust_rwlock_wrlock(p: *mut StaticRwLock) -> i32 { if p.is_null() { return EINVAL; } unsafe { (*p).write() }; return 0; } + #[cfg(not(test))] #[no_mangle] -pub unsafe extern "C" fn __rust_rwlock_unlock(p: *mut RwLock) -> i32 { +pub unsafe extern "C" fn __rust_rwlock_unlock(p: *mut StaticRwLock) -> i32 { if p.is_null() { return EINVAL; } diff --git a/library/std/src/sys/sgx/thread_local_key.rs b/library/std/src/sys/sgx/thread_local_key.rs index b21784475f0d2..c7a57d3a3d47e 100644 --- a/library/std/src/sys/sgx/thread_local_key.rs +++ b/library/std/src/sys/sgx/thread_local_key.rs @@ -21,8 +21,3 @@ pub unsafe fn get(key: Key) -> *mut u8 { pub unsafe fn destroy(key: Key) { Tls::destroy(AbiKey::from_usize(key)) } - -#[inline] -pub fn requires_synchronized_create() -> bool { - false -} diff --git a/library/std/src/sys/solid/rwlock.rs b/library/std/src/sys/solid/rwlock.rs index 0a770cf03f2f5..ecb4eb83b9b05 100644 --- a/library/std/src/sys/solid/rwlock.rs +++ b/library/std/src/sys/solid/rwlock.rs @@ -12,8 +12,6 @@ pub struct RwLock { rwl: SpinIdOnceCell<()>, } -pub type MovableRwLock = RwLock; - // Safety: `num_readers` is protected by `mtx_num_readers` unsafe impl Send for RwLock {} unsafe impl Sync for RwLock {} @@ -37,13 +35,13 @@ impl RwLock { } #[inline] - pub unsafe fn read(&self) { + pub fn read(&self) { let rwl = self.raw(); expect_success(unsafe { abi::rwl_loc_rdl(rwl) }, &"rwl_loc_rdl"); } #[inline] - pub unsafe fn try_read(&self) -> bool { + pub fn try_read(&self) -> bool { let rwl = self.raw(); match unsafe { abi::rwl_ploc_rdl(rwl) } { abi::E_TMOUT => false, @@ -55,13 +53,13 @@ impl RwLock { } #[inline] - pub unsafe fn write(&self) { + pub fn write(&self) { let rwl = self.raw(); expect_success(unsafe { abi::rwl_loc_wrl(rwl) }, &"rwl_loc_wrl"); } #[inline] - pub unsafe fn try_write(&self) -> bool { + pub fn try_write(&self) -> bool { let rwl = self.raw(); match unsafe { abi::rwl_ploc_wrl(rwl) } { abi::E_TMOUT => false, diff --git a/library/std/src/sys/solid/thread_local_key.rs b/library/std/src/sys/solid/thread_local_key.rs index b17521f701daf..b37bf99969887 100644 --- a/library/std/src/sys/solid/thread_local_key.rs +++ b/library/std/src/sys/solid/thread_local_key.rs @@ -19,8 +19,3 @@ pub unsafe fn get(_key: Key) -> *mut u8 { pub unsafe fn destroy(_key: Key) { panic!("should not be used on the solid target"); } - -#[inline] -pub fn requires_synchronized_create() -> bool { - panic!("should not be used on the solid target"); -} diff --git a/library/std/src/sys/unix/locks/fuchsia_mutex.rs b/library/std/src/sys/unix/locks/fuchsia_mutex.rs index ce427599c3bdd..b30ba4582ec3c 100644 --- a/library/std/src/sys/unix/locks/fuchsia_mutex.rs +++ b/library/std/src/sys/unix/locks/fuchsia_mutex.rs @@ -53,8 +53,6 @@ const CONTESTED_BIT: u32 = 1; // This can never be a valid `zx_handle_t`. const UNLOCKED: u32 = 0; -pub type MovableMutex = Mutex; - pub struct Mutex { futex: AtomicU32, } @@ -86,17 +84,14 @@ impl Mutex { } #[inline] - pub unsafe fn init(&mut self) {} - - #[inline] - pub unsafe fn try_lock(&self) -> bool { - let thread_self = zx_thread_self(); + pub fn try_lock(&self) -> bool { + let thread_self = unsafe { zx_thread_self() }; self.futex.compare_exchange(UNLOCKED, to_state(thread_self), Acquire, Relaxed).is_ok() } #[inline] - pub unsafe fn lock(&self) { - let thread_self = zx_thread_self(); + pub fn lock(&self) { + let thread_self = unsafe { zx_thread_self() }; if let Err(state) = self.futex.compare_exchange(UNLOCKED, to_state(thread_self), Acquire, Relaxed) { diff --git a/library/std/src/sys/unix/locks/futex_condvar.rs b/library/std/src/sys/unix/locks/futex_condvar.rs index c0576c17880e1..4bd65dd25c292 100644 --- a/library/std/src/sys/unix/locks/futex_condvar.rs +++ b/library/std/src/sys/unix/locks/futex_condvar.rs @@ -3,8 +3,6 @@ use crate::sync::atomic::{AtomicU32, Ordering::Relaxed}; use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all}; use crate::time::Duration; -pub type MovableCondvar = Condvar; - pub struct Condvar { // The value of this atomic is simply incremented on every notification. // This is used by `.wait()` to not miss any notifications after @@ -21,12 +19,12 @@ impl Condvar { // All the memory orderings here are `Relaxed`, // because synchronization is done by unlocking and locking the mutex. - pub unsafe fn notify_one(&self) { + pub fn notify_one(&self) { self.futex.fetch_add(1, Relaxed); futex_wake(&self.futex); } - pub unsafe fn notify_all(&self) { + pub fn notify_all(&self) { self.futex.fetch_add(1, Relaxed); futex_wake_all(&self.futex); } diff --git a/library/std/src/sys/unix/locks/futex_mutex.rs b/library/std/src/sys/unix/locks/futex_mutex.rs index 99ba86e5f996d..c0d80b845fa4a 100644 --- a/library/std/src/sys/unix/locks/futex_mutex.rs +++ b/library/std/src/sys/unix/locks/futex_mutex.rs @@ -4,8 +4,6 @@ use crate::sync::atomic::{ }; use crate::sys::futex::{futex_wait, futex_wake}; -pub type MovableMutex = Mutex; - pub struct Mutex { /// 0: unlocked /// 1: locked, no other threads waiting @@ -20,15 +18,12 @@ impl Mutex { } #[inline] - pub unsafe fn init(&mut self) {} - - #[inline] - pub unsafe fn try_lock(&self) -> bool { + pub fn try_lock(&self) -> bool { self.futex.compare_exchange(0, 1, Acquire, Relaxed).is_ok() } #[inline] - pub unsafe fn lock(&self) { + pub fn lock(&self) { if self.futex.compare_exchange(0, 1, Acquire, Relaxed).is_err() { self.lock_contended(); } diff --git a/library/std/src/sys/unix/locks/futex_rwlock.rs b/library/std/src/sys/unix/locks/futex_rwlock.rs index b3bbbf743f84c..d69e66a073709 100644 --- a/library/std/src/sys/unix/locks/futex_rwlock.rs +++ b/library/std/src/sys/unix/locks/futex_rwlock.rs @@ -4,8 +4,6 @@ use crate::sync::atomic::{ }; use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all}; -pub type MovableRwLock = RwLock; - pub struct RwLock { // The state consists of a 30-bit reader counter, a 'readers waiting' flag, and a 'writers waiting' flag. // Bits 0..30: @@ -70,14 +68,14 @@ impl RwLock { } #[inline] - pub unsafe fn try_read(&self) -> bool { + pub fn try_read(&self) -> bool { self.state .fetch_update(Acquire, Relaxed, |s| is_read_lockable(s).then(|| s + READ_LOCKED)) .is_ok() } #[inline] - pub unsafe fn read(&self) { + pub fn read(&self) { let state = self.state.load(Relaxed); if !is_read_lockable(state) || self @@ -144,14 +142,14 @@ impl RwLock { } #[inline] - pub unsafe fn try_write(&self) -> bool { + pub fn try_write(&self) -> bool { self.state .fetch_update(Acquire, Relaxed, |s| is_unlocked(s).then(|| s + WRITE_LOCKED)) .is_ok() } #[inline] - pub unsafe fn write(&self) { + pub fn write(&self) { if self.state.compare_exchange_weak(0, WRITE_LOCKED, Acquire, Relaxed).is_err() { self.write_contended(); } diff --git a/library/std/src/sys/unix/locks/mod.rs b/library/std/src/sys/unix/locks/mod.rs index f5f92f6935830..b2e0e49ad736d 100644 --- a/library/std/src/sys/unix/locks/mod.rs +++ b/library/std/src/sys/unix/locks/mod.rs @@ -10,22 +10,22 @@ cfg_if::cfg_if! { mod futex_mutex; mod futex_rwlock; mod futex_condvar; - pub(crate) use futex_mutex::{Mutex, MovableMutex}; - pub(crate) use futex_rwlock::{RwLock, MovableRwLock}; - pub(crate) use futex_condvar::MovableCondvar; + pub(crate) use futex_mutex::Mutex; + pub(crate) use futex_rwlock::RwLock; + pub(crate) use futex_condvar::Condvar; } else if #[cfg(target_os = "fuchsia")] { mod fuchsia_mutex; mod futex_rwlock; mod futex_condvar; - pub(crate) use fuchsia_mutex::{Mutex, MovableMutex}; - pub(crate) use futex_rwlock::{RwLock, MovableRwLock}; - pub(crate) use futex_condvar::MovableCondvar; + pub(crate) use fuchsia_mutex::Mutex; + pub(crate) use futex_rwlock::RwLock; + pub(crate) use futex_condvar::Condvar; } else { mod pthread_mutex; mod pthread_rwlock; mod pthread_condvar; - pub(crate) use pthread_mutex::{Mutex, MovableMutex}; - pub(crate) use pthread_rwlock::{RwLock, MovableRwLock}; - pub(crate) use pthread_condvar::MovableCondvar; + pub(crate) use pthread_mutex::Mutex; + pub(crate) use pthread_rwlock::RwLock; + pub(crate) use pthread_condvar::Condvar; } } diff --git a/library/std/src/sys/unix/locks/pthread_condvar.rs b/library/std/src/sys/unix/locks/pthread_condvar.rs index 4741c0c6736e3..fe26d47b00cc2 100644 --- a/library/std/src/sys/unix/locks/pthread_condvar.rs +++ b/library/std/src/sys/unix/locks/pthread_condvar.rs @@ -1,39 +1,132 @@ use crate::cell::UnsafeCell; +use crate::ptr; +use crate::sync::atomic::{AtomicPtr, Ordering::Relaxed}; use crate::sys::locks::{pthread_mutex, Mutex}; use crate::sys_common::lazy_box::{LazyBox, LazyInit}; use crate::time::Duration; +const TIMESPEC_MAX: libc::timespec = + libc::timespec { tv_sec: ::MAX, tv_nsec: 1_000_000_000 - 1 }; + pub struct Condvar { - inner: UnsafeCell, + boxed: LazyBox, } -pub(crate) type MovableCondvar = LazyBox; +#[inline] +fn raw(condvar: &Condvar) -> *mut libc::pthread_cond_t { + condvar.boxed.inner.get() +} -unsafe impl Send for Condvar {} -unsafe impl Sync for Condvar {} +impl Condvar { + #[inline] + pub const fn new() -> Condvar { + Condvar { boxed: LazyBox::new() } + } -const TIMESPEC_MAX: libc::timespec = - libc::timespec { tv_sec: ::MAX, tv_nsec: 1_000_000_000 - 1 }; + #[inline] + fn verify(&self, mutex: &Mutex) { + let addr = pthread_mutex::raw(mutex); + match self.boxed.mutex_addr.compare_exchange(ptr::null_mut(), addr, Relaxed, Relaxed) { + // Stored the address + Ok(_) => {} + // Lost a race to store the same address + Err(n) if n == addr => {} + _ => panic!("attempted to use a condition variable with two mutexes"), + } + } + + #[inline] + pub fn notify_one(&self) { + let r = unsafe { libc::pthread_cond_signal(raw(self)) }; + debug_assert_eq!(r, 0); + } + + #[inline] + pub fn notify_all(&self) { + let r = unsafe { libc::pthread_cond_broadcast(raw(self)) }; + debug_assert_eq!(r, 0); + } + + #[inline] + pub unsafe fn wait(&self, mutex: &Mutex) { + self.verify(mutex); + let r = libc::pthread_cond_wait(raw(self), pthread_mutex::raw(mutex)); + debug_assert_eq!(r, 0); + } + + pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { + // Use the system clock on systems that do not support pthread_condattr_setclock. + // This unfortunately results in problems when the system time changes. + #[cfg(any( + target_os = "macos", + target_os = "ios", + target_os = "watchos", + target_os = "espidf", + target_os = "horizon", + ))] + let (now, dur) = { + use crate::cmp::min; + use crate::sys::time::SystemTime; + + // OSX implementation of `pthread_cond_timedwait` is buggy + // with super long durations. When duration is greater than + // 0x100_0000_0000_0000 seconds, `pthread_cond_timedwait` + // in macOS Sierra return error 316. + // + // This program demonstrates the issue: + // https://gist.github.com/stepancheg/198db4623a20aad2ad7cddb8fda4a63c + // + // To work around this issue, and possible bugs of other OSes, timeout + // is clamped to 1000 years, which is allowable per the API of `park_timeout` + // because of spurious wakeups. + let dur = min(dur, Duration::from_secs(1000 * 365 * 86400)); + let now = SystemTime::now().t; + (now, dur) + }; + // Use the monotonic clock on other systems. + #[cfg(not(any( + target_os = "macos", + target_os = "ios", + target_os = "watchos", + target_os = "espidf", + target_os = "horizon", + )))] + let (now, dur) = { + use crate::sys::time::Timespec; + + (Timespec::now(libc::CLOCK_MONOTONIC), dur) + }; + + let timeout = + now.checked_add_duration(&dur).and_then(|t| t.to_timespec()).unwrap_or(TIMESPEC_MAX); + self.verify(mutex); + let r = libc::pthread_cond_timedwait(raw(self), pthread_mutex::raw(mutex), &timeout); + // The mutex will be locked no matter the error, so only check it in debug mode. + debug_assert!(r == libc::ETIMEDOUT || r == 0); + r == 0 + } +} -fn saturating_cast_to_time_t(value: u64) -> libc::time_t { - if value > ::MAX as u64 { ::MAX } else { value as libc::time_t } +struct StaticCondvar { + inner: UnsafeCell, + mutex_addr: AtomicPtr, } -impl LazyInit for Condvar { +unsafe impl Send for StaticCondvar {} +unsafe impl Sync for StaticCondvar {} + +impl LazyInit for StaticCondvar { fn init() -> Box { - let mut condvar = Box::new(Self::new()); + let mut condvar = Box::new(StaticCondvar { + inner: UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER), + mutex_addr: AtomicPtr::new(ptr::null_mut()), + }); unsafe { condvar.init() }; condvar } } -impl Condvar { - pub const fn new() -> Condvar { - // Might be moved and address is changing it is better to avoid - // initialization of potentially opaque OS data before it landed - Condvar { inner: UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER) } - } - +impl StaticCondvar { #[cfg(any( target_os = "macos", target_os = "ios", @@ -79,122 +172,6 @@ impl Condvar { assert_eq!(r, 0); } - #[inline] - pub unsafe fn notify_one(&self) { - let r = libc::pthread_cond_signal(self.inner.get()); - debug_assert_eq!(r, 0); - } - - #[inline] - pub unsafe fn notify_all(&self) { - let r = libc::pthread_cond_broadcast(self.inner.get()); - debug_assert_eq!(r, 0); - } - - #[inline] - pub unsafe fn wait(&self, mutex: &Mutex) { - let r = libc::pthread_cond_wait(self.inner.get(), pthread_mutex::raw(mutex)); - debug_assert_eq!(r, 0); - } - - // This implementation is used on systems that support pthread_condattr_setclock - // where we configure condition variable to use monotonic clock (instead of - // default system clock). This approach avoids all problems that result - // from changes made to the system time. - #[cfg(not(any( - target_os = "macos", - target_os = "ios", - target_os = "watchos", - target_os = "android", - target_os = "espidf", - target_os = "horizon" - )))] - pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { - use crate::mem; - - let mut now: libc::timespec = mem::zeroed(); - let r = libc::clock_gettime(libc::CLOCK_MONOTONIC, &mut now); - assert_eq!(r, 0); - - // Nanosecond calculations can't overflow because both values are below 1e9. - let nsec = dur.subsec_nanos() + now.tv_nsec as u32; - - let sec = saturating_cast_to_time_t(dur.as_secs()) - .checked_add((nsec / 1_000_000_000) as libc::time_t) - .and_then(|s| s.checked_add(now.tv_sec)); - let nsec = nsec % 1_000_000_000; - - let timeout = - sec.map(|s| libc::timespec { tv_sec: s, tv_nsec: nsec as _ }).unwrap_or(TIMESPEC_MAX); - - let r = libc::pthread_cond_timedwait(self.inner.get(), pthread_mutex::raw(mutex), &timeout); - assert!(r == libc::ETIMEDOUT || r == 0); - r == 0 - } - - // This implementation is modeled after libcxx's condition_variable - // https://github.com/llvm-mirror/libcxx/blob/release_35/src/condition_variable.cpp#L46 - // https://github.com/llvm-mirror/libcxx/blob/release_35/include/__mutex_base#L367 - #[cfg(any( - target_os = "macos", - target_os = "ios", - target_os = "watchos", - target_os = "android", - target_os = "espidf", - target_os = "horizon" - ))] - pub unsafe fn wait_timeout(&self, mutex: &Mutex, mut dur: Duration) -> bool { - use crate::ptr; - use crate::time::Instant; - - // 1000 years - let max_dur = Duration::from_secs(1000 * 365 * 86400); - - if dur > max_dur { - // OSX implementation of `pthread_cond_timedwait` is buggy - // with super long durations. When duration is greater than - // 0x100_0000_0000_0000 seconds, `pthread_cond_timedwait` - // in macOS Sierra return error 316. - // - // This program demonstrates the issue: - // https://gist.github.com/stepancheg/198db4623a20aad2ad7cddb8fda4a63c - // - // To work around this issue, and possible bugs of other OSes, timeout - // is clamped to 1000 years, which is allowable per the API of `wait_timeout` - // because of spurious wakeups. - - dur = max_dur; - } - - // First, figure out what time it currently is, in both system and - // stable time. pthread_cond_timedwait uses system time, but we want to - // report timeout based on stable time. - let mut sys_now = libc::timeval { tv_sec: 0, tv_usec: 0 }; - let stable_now = Instant::now(); - let r = libc::gettimeofday(&mut sys_now, ptr::null_mut()); - assert_eq!(r, 0, "unexpected error: {:?}", crate::io::Error::last_os_error()); - - let nsec = dur.subsec_nanos() as libc::c_long + (sys_now.tv_usec * 1000) as libc::c_long; - let extra = (nsec / 1_000_000_000) as libc::time_t; - let nsec = nsec % 1_000_000_000; - let seconds = saturating_cast_to_time_t(dur.as_secs()); - - let timeout = sys_now - .tv_sec - .checked_add(extra) - .and_then(|s| s.checked_add(seconds)) - .map(|s| libc::timespec { tv_sec: s, tv_nsec: nsec }) - .unwrap_or(TIMESPEC_MAX); - - // And wait! - let r = libc::pthread_cond_timedwait(self.inner.get(), pthread_mutex::raw(mutex), &timeout); - debug_assert!(r == libc::ETIMEDOUT || r == 0); - - // ETIMEDOUT is not a totally reliable method of determining timeout due - // to clock shifts, so do the check ourselves - stable_now.elapsed() < dur - } - #[inline] #[cfg(not(target_os = "dragonfly"))] unsafe fn destroy(&mut self) { @@ -214,7 +191,7 @@ impl Condvar { } } -impl Drop for Condvar { +impl Drop for StaticCondvar { #[inline] fn drop(&mut self) { unsafe { self.destroy() }; diff --git a/library/std/src/sys/unix/locks/pthread_mutex.rs b/library/std/src/sys/unix/locks/pthread_mutex.rs index 98afee69ba622..2d15472e169a5 100644 --- a/library/std/src/sys/unix/locks/pthread_mutex.rs +++ b/library/std/src/sys/unix/locks/pthread_mutex.rs @@ -4,22 +4,47 @@ use crate::sys::cvt_nz; use crate::sys_common::lazy_box::{LazyBox, LazyInit}; pub struct Mutex { - inner: UnsafeCell, + boxed: LazyBox, } -pub(crate) type MovableMutex = LazyBox; +impl Mutex { + #[inline] + pub const fn new() -> Mutex { + Mutex { boxed: LazyBox::new() } + } + + #[inline] + pub fn try_lock(&self) -> bool { + unsafe { self.boxed.try_lock() } + } + + #[inline] + pub fn lock(&self) { + unsafe { self.boxed.lock() } + } + + #[inline] + pub unsafe fn unlock(&self) { + self.boxed.unlock() + } +} #[inline] -pub unsafe fn raw(m: &Mutex) -> *mut libc::pthread_mutex_t { - m.inner.get() +pub(super) fn raw(m: &Mutex) -> *mut libc::pthread_mutex_t { + m.boxed.inner.get() } -unsafe impl Send for Mutex {} -unsafe impl Sync for Mutex {} +struct StaticMutex { + inner: UnsafeCell, +} -impl LazyInit for Mutex { +unsafe impl Send for StaticMutex {} +unsafe impl Sync for StaticMutex {} + +impl LazyInit for StaticMutex { fn init() -> Box { - let mut mutex = Box::new(Self::new()); + let mut mutex = + Box::new(StaticMutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) }); unsafe { mutex.init() }; mutex } @@ -43,16 +68,9 @@ impl LazyInit for Mutex { } } -impl Mutex { - pub const fn new() -> Mutex { - // Might be moved to a different address, so it is better to avoid - // initialization of potentially opaque OS data before it landed. - // Be very careful using this newly constructed `Mutex`, reentrant - // locking is undefined behavior until `init` is called! - Mutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) } - } +impl StaticMutex { #[inline] - pub unsafe fn init(&mut self) { + unsafe fn init(&mut self) { // Issue #33770 // // A pthread mutex initialized with PTHREAD_MUTEX_INITIALIZER will have @@ -84,26 +102,31 @@ impl Mutex { .unwrap(); cvt_nz(libc::pthread_mutex_init(self.inner.get(), attr.0.as_ptr())).unwrap(); } + #[inline] pub unsafe fn lock(&self) { let r = libc::pthread_mutex_lock(self.inner.get()); debug_assert_eq!(r, 0); } + #[inline] pub unsafe fn unlock(&self) { let r = libc::pthread_mutex_unlock(self.inner.get()); debug_assert_eq!(r, 0); } + #[inline] pub unsafe fn try_lock(&self) -> bool { libc::pthread_mutex_trylock(self.inner.get()) == 0 } + #[inline] #[cfg(not(target_os = "dragonfly"))] unsafe fn destroy(&mut self) { let r = libc::pthread_mutex_destroy(self.inner.get()); debug_assert_eq!(r, 0); } + #[inline] #[cfg(target_os = "dragonfly")] unsafe fn destroy(&mut self) { @@ -116,14 +139,14 @@ impl Mutex { } } -impl Drop for Mutex { +impl Drop for StaticMutex { #[inline] fn drop(&mut self) { unsafe { self.destroy() }; } } -pub(super) struct PthreadMutexAttr<'a>(pub &'a mut MaybeUninit); +struct PthreadMutexAttr<'a>(pub &'a mut MaybeUninit); impl Drop for PthreadMutexAttr<'_> { fn drop(&mut self) { diff --git a/library/std/src/sys/unix/locks/pthread_rwlock.rs b/library/std/src/sys/unix/locks/pthread_rwlock.rs index adfe2a88338f5..d5d1a30c91ea4 100644 --- a/library/std/src/sys/unix/locks/pthread_rwlock.rs +++ b/library/std/src/sys/unix/locks/pthread_rwlock.rs @@ -4,155 +4,178 @@ use crate::sync::atomic::{AtomicUsize, Ordering}; use crate::sys_common::lazy_box::{LazyBox, LazyInit}; pub struct RwLock { - inner: UnsafeCell, - write_locked: UnsafeCell, // guarded by the `inner` RwLock - num_readers: AtomicUsize, -} - -pub(crate) type MovableRwLock = LazyBox; - -unsafe impl Send for RwLock {} -unsafe impl Sync for RwLock {} - -impl LazyInit for RwLock { - fn init() -> Box { - Box::new(Self::new()) - } - - fn destroy(mut rwlock: Box) { - // We're not allowed to pthread_rwlock_destroy a locked rwlock, - // so check first if it's unlocked. - if *rwlock.write_locked.get_mut() || *rwlock.num_readers.get_mut() != 0 { - // The rwlock is locked. This happens if a RwLock{Read,Write}Guard is leaked. - // In this case, we just leak the RwLock too. - forget(rwlock); - } - } - - fn cancel_init(_: Box) { - // In this case, we can just drop it without any checks, - // since it cannot have been locked yet. - } + boxed: LazyBox, } impl RwLock { + #[inline] pub const fn new() -> RwLock { - RwLock { - inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER), - write_locked: UnsafeCell::new(false), - num_readers: AtomicUsize::new(0), - } + RwLock { boxed: LazyBox::new() } } + #[inline] - pub unsafe fn read(&self) { - let r = libc::pthread_rwlock_rdlock(self.inner.get()); - - // According to POSIX, when a thread tries to acquire this read lock - // while it already holds the write lock - // (or vice versa, or tries to acquire the write lock twice), - // "the call shall either deadlock or return [EDEADLK]" - // (https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_wrlock.html, - // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_rdlock.html). - // So, in principle, all we have to do here is check `r == 0` to be sure we properly - // got the lock. - // - // However, (at least) glibc before version 2.25 does not conform to this spec, - // and can return `r == 0` even when this thread already holds the write lock. - // We thus check for this situation ourselves and panic when detecting that a thread - // got the write lock more than once, or got a read and a write lock. - if r == libc::EAGAIN { - panic!("rwlock maximum reader count exceeded"); - } else if r == libc::EDEADLK || (r == 0 && *self.write_locked.get()) { - // Above, we make sure to only access `write_locked` when `r == 0` to avoid - // data races. - if r == 0 { - // `pthread_rwlock_rdlock` succeeded when it should not have. - self.raw_unlock(); + pub fn read(&self) { + unsafe { + let r = libc::pthread_rwlock_rdlock(self.boxed.inner.get()); + + // According to POSIX, when a thread tries to acquire this read lock + // while it already holds the write lock + // (or vice versa, or tries to acquire the write lock twice), + // "the call shall either deadlock or return [EDEADLK]" + // (https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_wrlock.html, + // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_rdlock.html). + // So, in principle, all we have to do here is check `r == 0` to be sure we properly + // got the lock. + // + // However, (at least) glibc before version 2.25 does not conform to this spec, + // and can return `r == 0` even when this thread already holds the write lock. + // We thus check for this situation ourselves and panic when detecting that a thread + // got the write lock more than once, or got a read and a write lock. + if r == libc::EAGAIN { + panic!("rwlock maximum reader count exceeded"); + } else if r == libc::EDEADLK || (r == 0 && *self.boxed.write_locked.get()) { + // Above, we make sure to only access `write_locked` when `r == 0` to avoid + // data races. + if r == 0 { + // `pthread_rwlock_rdlock` succeeded when it should not have. + self.raw_unlock(); + } + panic!("rwlock read lock would result in deadlock"); + } else { + // POSIX does not make guarantees about all the errors that may be returned. + // See issue #94705 for more details. + assert_eq!(r, 0, "unexpected error during rwlock read lock: {:?}", r); + self.boxed.num_readers.fetch_add(1, Ordering::Relaxed); } - panic!("rwlock read lock would result in deadlock"); - } else { - // POSIX does not make guarantees about all the errors that may be returned. - // See issue #94705 for more details. - assert_eq!(r, 0, "unexpected error during rwlock read lock: {:?}", r); - self.num_readers.fetch_add(1, Ordering::Relaxed); } } + #[inline] - pub unsafe fn try_read(&self) -> bool { - let r = libc::pthread_rwlock_tryrdlock(self.inner.get()); - if r == 0 { - if *self.write_locked.get() { - // `pthread_rwlock_tryrdlock` succeeded when it should not have. - self.raw_unlock(); - false + pub fn try_read(&self) -> bool { + unsafe { + let r = libc::pthread_rwlock_tryrdlock(self.boxed.inner.get()); + if r == 0 { + if *self.boxed.write_locked.get() { + // `pthread_rwlock_tryrdlock` succeeded when it should not have. + self.raw_unlock(); + false + } else { + self.boxed.num_readers.fetch_add(1, Ordering::Relaxed); + true + } } else { - self.num_readers.fetch_add(1, Ordering::Relaxed); - true + false } - } else { - false } } + #[inline] - pub unsafe fn write(&self) { - let r = libc::pthread_rwlock_wrlock(self.inner.get()); - // See comments above for why we check for EDEADLK and write_locked. For the same reason, - // we also need to check that there are no readers (tracked in `num_readers`). - if r == libc::EDEADLK - || (r == 0 && *self.write_locked.get()) - || self.num_readers.load(Ordering::Relaxed) != 0 - { - // Above, we make sure to only access `write_locked` when `r == 0` to avoid - // data races. - if r == 0 { - // `pthread_rwlock_wrlock` succeeded when it should not have. - self.raw_unlock(); + pub fn write(&self) { + unsafe { + let r = libc::pthread_rwlock_wrlock(self.boxed.inner.get()); + // See comments above for why we check for EDEADLK and write_locked. For the same reason, + // we also need to check that there are no readers (tracked in `num_readers`). + if r == libc::EDEADLK + || (r == 0 && *self.boxed.write_locked.get()) + || self.boxed.num_readers.load(Ordering::Relaxed) != 0 + { + // Above, we make sure to only access `write_locked` when `r == 0` to avoid + // data races. + if r == 0 { + // `pthread_rwlock_wrlock` succeeded when it should not have. + self.raw_unlock(); + } + panic!("rwlock write lock would result in deadlock"); + } else { + // According to POSIX, for a properly initialized rwlock this can only + // return EDEADLK or 0. We rely on that. + debug_assert_eq!(r, 0); } - panic!("rwlock write lock would result in deadlock"); - } else { - // According to POSIX, for a properly initialized rwlock this can only - // return EDEADLK or 0. We rely on that. - debug_assert_eq!(r, 0); + *self.boxed.write_locked.get() = true; } - *self.write_locked.get() = true; } + #[inline] - pub unsafe fn try_write(&self) -> bool { - let r = libc::pthread_rwlock_trywrlock(self.inner.get()); - if r == 0 { - if *self.write_locked.get() || self.num_readers.load(Ordering::Relaxed) != 0 { - // `pthread_rwlock_trywrlock` succeeded when it should not have. - self.raw_unlock(); - false + pub fn try_write(&self) -> bool { + unsafe { + let r = libc::pthread_rwlock_trywrlock(self.boxed.inner.get()); + if r == 0 { + if *self.boxed.write_locked.get() + || self.boxed.num_readers.load(Ordering::Relaxed) != 0 + { + // `pthread_rwlock_trywrlock` succeeded when it should not have. + self.raw_unlock(); + false + } else { + *self.boxed.write_locked.get() = true; + true + } } else { - *self.write_locked.get() = true; - true + false } - } else { - false } } + #[inline] unsafe fn raw_unlock(&self) { - let r = libc::pthread_rwlock_unlock(self.inner.get()); + let r = libc::pthread_rwlock_unlock(self.boxed.inner.get()); debug_assert_eq!(r, 0); } + #[inline] pub unsafe fn read_unlock(&self) { - debug_assert!(!*self.write_locked.get()); - self.num_readers.fetch_sub(1, Ordering::Relaxed); + debug_assert!(!*self.boxed.write_locked.get()); + self.boxed.num_readers.fetch_sub(1, Ordering::Relaxed); self.raw_unlock(); } + #[inline] pub unsafe fn write_unlock(&self) { - debug_assert_eq!(self.num_readers.load(Ordering::Relaxed), 0); - debug_assert!(*self.write_locked.get()); - *self.write_locked.get() = false; + debug_assert_eq!(self.boxed.num_readers.load(Ordering::Relaxed), 0); + debug_assert!(*self.boxed.write_locked.get()); + *self.boxed.write_locked.get() = false; self.raw_unlock(); } +} + +struct StaticRwLock { + inner: UnsafeCell, + write_locked: UnsafeCell, // guarded by the `inner` RwLock + num_readers: AtomicUsize, +} + +unsafe impl Send for RwLock {} +unsafe impl Sync for RwLock {} + +impl LazyInit for StaticRwLock { + fn init() -> Box { + Box::new(StaticRwLock { + inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER), + write_locked: UnsafeCell::new(false), + num_readers: AtomicUsize::new(0), + }) + } + + fn destroy(mut rwlock: Box) { + // We're not allowed to pthread_rwlock_destroy a locked rwlock, + // so check first if it's unlocked. + if *rwlock.write_locked.get_mut() || *rwlock.num_readers.get_mut() != 0 { + // The rwlock is locked. This happens if a RwLock{Read,Write}Guard is leaked. + // In this case, we just leak the RwLock too. + forget(rwlock); + } + } + + fn cancel_init(_: Box) { + // In this case, we can just drop it without any checks, + // since it cannot have been locked yet. + } +} + +impl Drop for StaticRwLock { #[inline] - unsafe fn destroy(&mut self) { - let r = libc::pthread_rwlock_destroy(self.inner.get()); + fn drop(&mut self) { + let r = unsafe { libc::pthread_rwlock_destroy(self.inner.get()) }; // On DragonFly pthread_rwlock_destroy() returns EINVAL if called on a // rwlock that was just initialized with // libc::PTHREAD_RWLOCK_INITIALIZER. Once it is used (locked/unlocked) @@ -164,10 +187,3 @@ impl RwLock { } } } - -impl Drop for RwLock { - #[inline] - fn drop(&mut self) { - unsafe { self.destroy() }; - } -} diff --git a/library/std/src/sys/unix/os.rs b/library/std/src/sys/unix/os.rs index 46545a0839fe8..b6ef1f0977419 100644 --- a/library/std/src/sys/unix/os.rs +++ b/library/std/src/sys/unix/os.rs @@ -17,10 +17,10 @@ use crate::path::{self, PathBuf}; use crate::ptr; use crate::slice; use crate::str; +use crate::sync::{PoisonError, RwLock}; use crate::sys::cvt; use crate::sys::fd; use crate::sys::memchr; -use crate::sys_common::rwlock::{StaticRwLock, StaticRwLockReadGuard}; use crate::vec; #[cfg(all(target_env = "gnu", not(target_os = "vxworks")))] @@ -501,10 +501,11 @@ pub unsafe fn environ() -> *mut *const *const c_char { ptr::addr_of_mut!(environ) } -static ENV_LOCK: StaticRwLock = StaticRwLock::new(); +static ENV_LOCK: RwLock<()> = RwLock::new(()); -pub fn env_read_lock() -> StaticRwLockReadGuard { - ENV_LOCK.read() +#[inline] +pub fn env_read_lock() -> impl Drop { + ENV_LOCK.read().unwrap_or_else(PoisonError::into_inner) } /// Returns a vector of (variable, value) byte-vector pairs for all the diff --git a/library/std/src/sys/unix/thread_local_key.rs b/library/std/src/sys/unix/thread_local_key.rs index 2c5b94b1e61e5..2b2d079ee4d01 100644 --- a/library/std/src/sys/unix/thread_local_key.rs +++ b/library/std/src/sys/unix/thread_local_key.rs @@ -27,8 +27,3 @@ pub unsafe fn destroy(key: Key) { let r = libc::pthread_key_delete(key); debug_assert_eq!(r, 0); } - -#[inline] -pub fn requires_synchronized_create() -> bool { - false -} diff --git a/library/std/src/sys/unix/thread_parker.rs b/library/std/src/sys/unix/thread_parker.rs index ca1a7138fded2..922fad2bcaae0 100644 --- a/library/std/src/sys/unix/thread_parker.rs +++ b/library/std/src/sys/unix/thread_parker.rs @@ -56,7 +56,8 @@ unsafe fn wait_timeout( target_os = "macos", target_os = "ios", target_os = "watchos", - target_os = "espidf" + target_os = "espidf", + target_os = "horizon", ))] let (now, dur) = { use super::time::SystemTime; diff --git a/library/std/src/sys/unsupported/locks/condvar.rs b/library/std/src/sys/unsupported/locks/condvar.rs index e703fd0d26993..3cc5e9cccf142 100644 --- a/library/std/src/sys/unsupported/locks/condvar.rs +++ b/library/std/src/sys/unsupported/locks/condvar.rs @@ -3,8 +3,6 @@ use crate::time::Duration; pub struct Condvar {} -pub type MovableCondvar = Condvar; - impl Condvar { #[inline] pub const fn new() -> Condvar { @@ -12,10 +10,10 @@ impl Condvar { } #[inline] - pub unsafe fn notify_one(&self) {} + pub fn notify_one(&self) {} #[inline] - pub unsafe fn notify_all(&self) {} + pub fn notify_all(&self) {} pub unsafe fn wait(&self, _mutex: &Mutex) { panic!("condvar wait not supported") diff --git a/library/std/src/sys/unsupported/locks/mod.rs b/library/std/src/sys/unsupported/locks/mod.rs index d412ff152a047..0e0f9eccb2137 100644 --- a/library/std/src/sys/unsupported/locks/mod.rs +++ b/library/std/src/sys/unsupported/locks/mod.rs @@ -1,6 +1,6 @@ mod condvar; mod mutex; mod rwlock; -pub use condvar::{Condvar, MovableCondvar}; -pub use mutex::{MovableMutex, Mutex}; -pub use rwlock::{MovableRwLock, RwLock}; +pub use condvar::Condvar; +pub use mutex::Mutex; +pub use rwlock::RwLock; diff --git a/library/std/src/sys/unsupported/locks/mutex.rs b/library/std/src/sys/unsupported/locks/mutex.rs index d7cb12e0cf9a4..57c78f454c57c 100644 --- a/library/std/src/sys/unsupported/locks/mutex.rs +++ b/library/std/src/sys/unsupported/locks/mutex.rs @@ -5,8 +5,6 @@ pub struct Mutex { locked: Cell, } -pub type MovableMutex = Mutex; - unsafe impl Send for Mutex {} unsafe impl Sync for Mutex {} // no threads on this platform @@ -17,10 +15,7 @@ impl Mutex { } #[inline] - pub unsafe fn init(&mut self) {} - - #[inline] - pub unsafe fn lock(&self) { + pub fn lock(&self) { assert_eq!(self.locked.replace(true), false, "cannot recursively acquire mutex"); } @@ -30,7 +25,7 @@ impl Mutex { } #[inline] - pub unsafe fn try_lock(&self) -> bool { + pub fn try_lock(&self) -> bool { self.locked.replace(true) == false } } diff --git a/library/std/src/sys/unsupported/locks/rwlock.rs b/library/std/src/sys/unsupported/locks/rwlock.rs index aca5fb7152c99..ef2af82cbfbe3 100644 --- a/library/std/src/sys/unsupported/locks/rwlock.rs +++ b/library/std/src/sys/unsupported/locks/rwlock.rs @@ -5,8 +5,6 @@ pub struct RwLock { mode: Cell, } -pub type MovableRwLock = RwLock; - unsafe impl Send for RwLock {} unsafe impl Sync for RwLock {} // no threads on this platform @@ -17,7 +15,7 @@ impl RwLock { } #[inline] - pub unsafe fn read(&self) { + pub fn read(&self) { let m = self.mode.get(); if m >= 0 { self.mode.set(m + 1); @@ -27,7 +25,7 @@ impl RwLock { } #[inline] - pub unsafe fn try_read(&self) -> bool { + pub fn try_read(&self) -> bool { let m = self.mode.get(); if m >= 0 { self.mode.set(m + 1); @@ -38,14 +36,14 @@ impl RwLock { } #[inline] - pub unsafe fn write(&self) { + pub fn write(&self) { if self.mode.replace(-1) != 0 { rtabort!("rwlock locked for reading") } } #[inline] - pub unsafe fn try_write(&self) -> bool { + pub fn try_write(&self) -> bool { if self.mode.get() == 0 { self.mode.set(-1); true diff --git a/library/std/src/sys/unsupported/thread_local_key.rs b/library/std/src/sys/unsupported/thread_local_key.rs index c31b61cbf56d3..b6e5e4cd2e197 100644 --- a/library/std/src/sys/unsupported/thread_local_key.rs +++ b/library/std/src/sys/unsupported/thread_local_key.rs @@ -19,8 +19,3 @@ pub unsafe fn get(_key: Key) -> *mut u8 { pub unsafe fn destroy(_key: Key) { panic!("should not be used on this target"); } - -#[inline] -pub fn requires_synchronized_create() -> bool { - panic!("should not be used on this target"); -} diff --git a/library/std/src/sys/windows/locks/condvar.rs b/library/std/src/sys/windows/locks/condvar.rs index be9a2abbe35d9..66fafa2c00b00 100644 --- a/library/std/src/sys/windows/locks/condvar.rs +++ b/library/std/src/sys/windows/locks/condvar.rs @@ -8,8 +8,6 @@ pub struct Condvar { inner: UnsafeCell, } -pub type MovableCondvar = Condvar; - unsafe impl Send for Condvar {} unsafe impl Sync for Condvar {} @@ -41,12 +39,12 @@ impl Condvar { } #[inline] - pub unsafe fn notify_one(&self) { - c::WakeConditionVariable(self.inner.get()) + pub fn notify_one(&self) { + unsafe { c::WakeConditionVariable(self.inner.get()) } } #[inline] - pub unsafe fn notify_all(&self) { - c::WakeAllConditionVariable(self.inner.get()) + pub fn notify_all(&self) { + unsafe { c::WakeAllConditionVariable(self.inner.get()) } } } diff --git a/library/std/src/sys/windows/locks/mod.rs b/library/std/src/sys/windows/locks/mod.rs index d412ff152a047..0e0f9eccb2137 100644 --- a/library/std/src/sys/windows/locks/mod.rs +++ b/library/std/src/sys/windows/locks/mod.rs @@ -1,6 +1,6 @@ mod condvar; mod mutex; mod rwlock; -pub use condvar::{Condvar, MovableCondvar}; -pub use mutex::{MovableMutex, Mutex}; -pub use rwlock::{MovableRwLock, RwLock}; +pub use condvar::Condvar; +pub use mutex::Mutex; +pub use rwlock::RwLock; diff --git a/library/std/src/sys/windows/locks/mutex.rs b/library/std/src/sys/windows/locks/mutex.rs index f91e8f9f59a14..e15f766092f1f 100644 --- a/library/std/src/sys/windows/locks/mutex.rs +++ b/library/std/src/sys/windows/locks/mutex.rs @@ -21,14 +21,11 @@ pub struct Mutex { srwlock: UnsafeCell, } -// Windows SRW Locks are movable (while not borrowed). -pub type MovableMutex = Mutex; - unsafe impl Send for Mutex {} unsafe impl Sync for Mutex {} #[inline] -pub unsafe fn raw(m: &Mutex) -> c::PSRWLOCK { +pub fn raw(m: &Mutex) -> c::PSRWLOCK { m.srwlock.get() } @@ -37,17 +34,17 @@ impl Mutex { pub const fn new() -> Mutex { Mutex { srwlock: UnsafeCell::new(c::SRWLOCK_INIT) } } - #[inline] - pub unsafe fn init(&mut self) {} #[inline] - pub unsafe fn lock(&self) { - c::AcquireSRWLockExclusive(raw(self)); + pub fn lock(&self) { + unsafe { + c::AcquireSRWLockExclusive(raw(self)); + } } #[inline] - pub unsafe fn try_lock(&self) -> bool { - c::TryAcquireSRWLockExclusive(raw(self)) != 0 + pub fn try_lock(&self) -> bool { + unsafe { c::TryAcquireSRWLockExclusive(raw(self)) != 0 } } #[inline] diff --git a/library/std/src/sys/windows/locks/rwlock.rs b/library/std/src/sys/windows/locks/rwlock.rs index fa5ffe5749f25..9381e12414b42 100644 --- a/library/std/src/sys/windows/locks/rwlock.rs +++ b/library/std/src/sys/windows/locks/rwlock.rs @@ -5,8 +5,6 @@ pub struct RwLock { inner: UnsafeCell, } -pub type MovableRwLock = RwLock; - unsafe impl Send for RwLock {} unsafe impl Sync for RwLock {} @@ -15,26 +13,32 @@ impl RwLock { pub const fn new() -> RwLock { RwLock { inner: UnsafeCell::new(c::SRWLOCK_INIT) } } + #[inline] - pub unsafe fn read(&self) { - c::AcquireSRWLockShared(self.inner.get()) + pub fn read(&self) { + unsafe { c::AcquireSRWLockShared(self.inner.get()) } } + #[inline] - pub unsafe fn try_read(&self) -> bool { - c::TryAcquireSRWLockShared(self.inner.get()) != 0 + pub fn try_read(&self) -> bool { + unsafe { c::TryAcquireSRWLockShared(self.inner.get()) != 0 } } + #[inline] - pub unsafe fn write(&self) { - c::AcquireSRWLockExclusive(self.inner.get()) + pub fn write(&self) { + unsafe { c::AcquireSRWLockExclusive(self.inner.get()) } } + #[inline] - pub unsafe fn try_write(&self) -> bool { - c::TryAcquireSRWLockExclusive(self.inner.get()) != 0 + pub fn try_write(&self) -> bool { + unsafe { c::TryAcquireSRWLockExclusive(self.inner.get()) != 0 } } + #[inline] pub unsafe fn read_unlock(&self) { c::ReleaseSRWLockShared(self.inner.get()) } + #[inline] pub unsafe fn write_unlock(&self) { c::ReleaseSRWLockExclusive(self.inner.get()) diff --git a/library/std/src/sys/windows/process.rs b/library/std/src/sys/windows/process.rs index 02d5af4719ae8..9cbb4ef19e9b7 100644 --- a/library/std/src/sys/windows/process.rs +++ b/library/std/src/sys/windows/process.rs @@ -16,6 +16,7 @@ use crate::os::windows::ffi::{OsStrExt, OsStringExt}; use crate::os::windows::io::{AsHandle, AsRawHandle, BorrowedHandle, FromRawHandle, IntoRawHandle}; use crate::path::{Path, PathBuf}; use crate::ptr; +use crate::sync::Mutex; use crate::sys::args::{self, Arg}; use crate::sys::c; use crate::sys::c::NonZeroDWORD; @@ -25,7 +26,6 @@ use crate::sys::handle::Handle; use crate::sys::path; use crate::sys::pipe::{self, AnonPipe}; use crate::sys::stdio; -use crate::sys_common::mutex::StaticMutex; use crate::sys_common::process::{CommandEnv, CommandEnvs}; use crate::sys_common::IntoInner; @@ -301,9 +301,9 @@ impl Command { // // For more information, msdn also has an article about this race: // https://support.microsoft.com/kb/315939 - static CREATE_PROCESS_LOCK: StaticMutex = StaticMutex::new(); + static CREATE_PROCESS_LOCK: Mutex<()> = Mutex::new(()); - let _guard = unsafe { CREATE_PROCESS_LOCK.lock() }; + let _guard = CREATE_PROCESS_LOCK.lock(); let mut pipes = StdioPipes { stdin: None, stdout: None, stderr: None }; let null = Stdio::Null; diff --git a/library/std/src/sys/windows/thread_local_key.rs b/library/std/src/sys/windows/thread_local_key.rs index ec670238e6f0e..3796fe856574a 100644 --- a/library/std/src/sys/windows/thread_local_key.rs +++ b/library/std/src/sys/windows/thread_local_key.rs @@ -66,16 +66,6 @@ pub unsafe fn get(key: Key) -> *mut u8 { c::TlsGetValue(key) as *mut u8 } -#[inline] -pub unsafe fn destroy(_key: Key) { - rtabort!("can't destroy tls keys on windows") -} - -#[inline] -pub fn requires_synchronized_create() -> bool { - true -} - // ------------------------------------------------------------------------- // Dtor registration // diff --git a/library/std/src/sys_common/backtrace.rs b/library/std/src/sys_common/backtrace.rs index 31164afdc7b54..8807077cb4927 100644 --- a/library/std/src/sys_common/backtrace.rs +++ b/library/std/src/sys_common/backtrace.rs @@ -7,15 +7,14 @@ use crate::fmt; use crate::io; use crate::io::prelude::*; use crate::path::{self, Path, PathBuf}; -use crate::sys_common::mutex::StaticMutex; +use crate::sync::{Mutex, PoisonError}; /// Max number of frames to print. const MAX_NB_FRAMES: usize = 100; -// SAFETY: Don't attempt to lock this reentrantly. -pub unsafe fn lock() -> impl Drop { - static LOCK: StaticMutex = StaticMutex::new(); - LOCK.lock() +pub fn lock() -> impl Drop { + static LOCK: Mutex<()> = Mutex::new(()); + LOCK.lock().unwrap_or_else(PoisonError::into_inner) } /// Prints the current backtrace. diff --git a/library/std/src/sys_common/condvar.rs b/library/std/src/sys_common/condvar.rs deleted file mode 100644 index f3ac1061b8935..0000000000000 --- a/library/std/src/sys_common/condvar.rs +++ /dev/null @@ -1,56 +0,0 @@ -use crate::sys::locks as imp; -use crate::sys_common::mutex::MovableMutex; -use crate::time::Duration; - -mod check; - -type CondvarCheck = ::Check; - -/// An OS-based condition variable. -pub struct Condvar { - inner: imp::MovableCondvar, - check: CondvarCheck, -} - -impl Condvar { - /// Creates a new condition variable for use. - #[inline] - pub const fn new() -> Self { - Self { inner: imp::MovableCondvar::new(), check: CondvarCheck::new() } - } - - /// Signals one waiter on this condition variable to wake up. - #[inline] - pub fn notify_one(&self) { - unsafe { self.inner.notify_one() }; - } - - /// Awakens all current waiters on this condition variable. - #[inline] - pub fn notify_all(&self) { - unsafe { self.inner.notify_all() }; - } - - /// Waits for a signal on the specified mutex. - /// - /// Behavior is undefined if the mutex is not locked by the current thread. - /// - /// May panic if used with more than one mutex. - #[inline] - pub unsafe fn wait(&self, mutex: &MovableMutex) { - self.check.verify(mutex); - self.inner.wait(mutex.raw()) - } - - /// Waits for a signal on the specified mutex with a timeout duration - /// specified by `dur` (a relative time into the future). - /// - /// Behavior is undefined if the mutex is not locked by the current thread. - /// - /// May panic if used with more than one mutex. - #[inline] - pub unsafe fn wait_timeout(&self, mutex: &MovableMutex, dur: Duration) -> bool { - self.check.verify(mutex); - self.inner.wait_timeout(mutex.raw(), dur) - } -} diff --git a/library/std/src/sys_common/condvar/check.rs b/library/std/src/sys_common/condvar/check.rs deleted file mode 100644 index ce8f36704877f..0000000000000 --- a/library/std/src/sys_common/condvar/check.rs +++ /dev/null @@ -1,57 +0,0 @@ -use crate::ptr; -use crate::sync::atomic::{AtomicPtr, Ordering}; -use crate::sys::locks as imp; -use crate::sys_common::lazy_box::{LazyBox, LazyInit}; -use crate::sys_common::mutex::MovableMutex; - -pub trait CondvarCheck { - type Check; -} - -/// For boxed mutexes, a `Condvar` will check it's only ever used with the same -/// mutex, based on its (stable) address. -impl CondvarCheck for LazyBox { - type Check = SameMutexCheck; -} - -pub struct SameMutexCheck { - addr: AtomicPtr<()>, -} - -#[allow(dead_code)] -impl SameMutexCheck { - pub const fn new() -> Self { - Self { addr: AtomicPtr::new(ptr::null_mut()) } - } - pub fn verify(&self, mutex: &MovableMutex) { - let addr = mutex.raw() as *const imp::Mutex as *const () as *mut _; - // Relaxed is okay here because we never read through `self.addr`, and only use it to - // compare addresses. - match self.addr.compare_exchange( - ptr::null_mut(), - addr, - Ordering::Relaxed, - Ordering::Relaxed, - ) { - Ok(_) => {} // Stored the address - Err(n) if n == addr => {} // Lost a race to store the same address - _ => panic!("attempted to use a condition variable with two mutexes"), - } - } -} - -/// Unboxed mutexes may move, so `Condvar` can not require its address to stay -/// constant. -impl CondvarCheck for imp::Mutex { - type Check = NoCheck; -} - -pub struct NoCheck; - -#[allow(dead_code)] -impl NoCheck { - pub const fn new() -> Self { - Self - } - pub fn verify(&self, _: &MovableMutex) {} -} diff --git a/library/std/src/sys_common/mod.rs b/library/std/src/sys_common/mod.rs index 80f56bf7522b6..b1fc3c3073d2a 100644 --- a/library/std/src/sys_common/mod.rs +++ b/library/std/src/sys_common/mod.rs @@ -21,15 +21,12 @@ mod tests; pub mod backtrace; -pub mod condvar; pub mod fs; pub mod io; pub mod lazy_box; pub mod memchr; -pub mod mutex; pub mod process; pub mod remutex; -pub mod rwlock; pub mod thread; pub mod thread_info; pub mod thread_local_dtor; diff --git a/library/std/src/sys_common/mutex.rs b/library/std/src/sys_common/mutex.rs deleted file mode 100644 index 48479f5bdb3f7..0000000000000 --- a/library/std/src/sys_common/mutex.rs +++ /dev/null @@ -1,93 +0,0 @@ -use crate::sys::locks as imp; - -/// An OS-based mutual exclusion lock, meant for use in static variables. -/// -/// This mutex has a const constructor ([`StaticMutex::new`]), does not -/// implement `Drop` to cleanup resources, and causes UB when used reentrantly. -/// -/// This mutex does not implement poisoning. -/// -/// This is a wrapper around `imp::Mutex` that does *not* call `init()` and -/// `destroy()`. -pub struct StaticMutex(imp::Mutex); - -unsafe impl Sync for StaticMutex {} - -impl StaticMutex { - /// Creates a new mutex for use. - #[inline] - pub const fn new() -> Self { - Self(imp::Mutex::new()) - } - - /// Calls raw_lock() and then returns an RAII guard to guarantee the mutex - /// will be unlocked. - /// - /// It is undefined behaviour to call this function while locked by the - /// same thread. - #[inline] - pub unsafe fn lock(&'static self) -> StaticMutexGuard { - self.0.lock(); - StaticMutexGuard(&self.0) - } -} - -#[must_use] -pub struct StaticMutexGuard(&'static imp::Mutex); - -impl Drop for StaticMutexGuard { - #[inline] - fn drop(&mut self) { - unsafe { - self.0.unlock(); - } - } -} - -/// An OS-based mutual exclusion lock. -/// -/// This mutex cleans up its resources in its `Drop` implementation, may safely -/// be moved (when not borrowed), and does not cause UB when used reentrantly. -/// -/// This mutex does not implement poisoning. -/// -/// This is either a wrapper around `LazyBox` or `imp::Mutex`, -/// depending on the platform. It is boxed on platforms where `imp::Mutex` may -/// not be moved. -pub struct MovableMutex(imp::MovableMutex); - -unsafe impl Sync for MovableMutex {} - -impl MovableMutex { - /// Creates a new mutex. - #[inline] - pub const fn new() -> Self { - Self(imp::MovableMutex::new()) - } - - pub(super) fn raw(&self) -> &imp::Mutex { - &self.0 - } - - /// Locks the mutex blocking the current thread until it is available. - #[inline] - pub fn raw_lock(&self) { - unsafe { self.0.lock() } - } - - /// Attempts to lock the mutex without blocking, returning whether it was - /// successfully acquired or not. - #[inline] - pub fn try_lock(&self) -> bool { - unsafe { self.0.try_lock() } - } - - /// Unlocks the mutex. - /// - /// Behavior is undefined if the current thread does not actually hold the - /// mutex. - #[inline] - pub unsafe fn raw_unlock(&self) { - self.0.unlock() - } -} diff --git a/library/std/src/sys_common/remutex.rs b/library/std/src/sys_common/remutex.rs index 8921af311d415..4c054da64714c 100644 --- a/library/std/src/sys_common/remutex.rs +++ b/library/std/src/sys_common/remutex.rs @@ -2,10 +2,8 @@ mod tests; use crate::cell::UnsafeCell; -use crate::marker::PhantomPinned; use crate::ops::Deref; use crate::panic::{RefUnwindSafe, UnwindSafe}; -use crate::pin::Pin; use crate::sync::atomic::{AtomicUsize, Ordering::Relaxed}; use crate::sys::locks as sys; @@ -45,7 +43,6 @@ pub struct ReentrantMutex { owner: AtomicUsize, lock_count: UnsafeCell, data: T, - _pinned: PhantomPinned, } unsafe impl Send for ReentrantMutex {} @@ -68,39 +65,22 @@ impl RefUnwindSafe for ReentrantMutex {} /// guarded data. #[must_use = "if unused the ReentrantMutex will immediately unlock"] pub struct ReentrantMutexGuard<'a, T: 'a> { - lock: Pin<&'a ReentrantMutex>, + lock: &'a ReentrantMutex, } impl !Send for ReentrantMutexGuard<'_, T> {} impl ReentrantMutex { /// Creates a new reentrant mutex in an unlocked state. - /// - /// # Unsafety - /// - /// This function is unsafe because it is required that `init` is called - /// once this mutex is in its final resting place, and only then are the - /// lock/unlock methods safe. - pub const unsafe fn new(t: T) -> ReentrantMutex { + pub const fn new(t: T) -> ReentrantMutex { ReentrantMutex { mutex: sys::Mutex::new(), owner: AtomicUsize::new(0), lock_count: UnsafeCell::new(0), data: t, - _pinned: PhantomPinned, } } - /// Initializes this mutex so it's ready for use. - /// - /// # Unsafety - /// - /// Unsafe to call more than once, and must be called after this will no - /// longer move in memory. - pub unsafe fn init(self: Pin<&mut Self>) { - self.get_unchecked_mut().mutex.init() - } - /// Acquires a mutex, blocking the current thread until it is able to do so. /// /// This function will block the caller until it is available to acquire the mutex. @@ -113,10 +93,9 @@ impl ReentrantMutex { /// If another user of this mutex panicked while holding the mutex, then /// this call will return failure if the mutex would otherwise be /// acquired. - pub fn lock(self: Pin<&Self>) -> ReentrantMutexGuard<'_, T> { + pub fn lock(&self) -> ReentrantMutexGuard<'_, T> { let this_thread = current_thread_unique_ptr(); - // Safety: We only touch lock_count when we own the lock, - // and since self is pinned we can safely call the lock() on the mutex. + // Safety: We only touch lock_count when we own the lock. unsafe { if self.owner.load(Relaxed) == this_thread { self.increment_lock_count(); @@ -142,10 +121,9 @@ impl ReentrantMutex { /// If another user of this mutex panicked while holding the mutex, then /// this call will return failure if the mutex would otherwise be /// acquired. - pub fn try_lock(self: Pin<&Self>) -> Option> { + pub fn try_lock(&self) -> Option> { let this_thread = current_thread_unique_ptr(); - // Safety: We only touch lock_count when we own the lock, - // and since self is pinned we can safely call the try_lock on the mutex. + // Safety: We only touch lock_count when we own the lock. unsafe { if self.owner.load(Relaxed) == this_thread { self.increment_lock_count(); @@ -179,7 +157,7 @@ impl Deref for ReentrantMutexGuard<'_, T> { impl Drop for ReentrantMutexGuard<'_, T> { #[inline] fn drop(&mut self) { - // Safety: We own the lock, and the lock is pinned. + // Safety: We own the lock. unsafe { *self.lock.lock_count.get() -= 1; if *self.lock.lock_count.get() == 0 { diff --git a/library/std/src/sys_common/remutex/tests.rs b/library/std/src/sys_common/remutex/tests.rs index 64873b850d3fa..8e97ce11c34f5 100644 --- a/library/std/src/sys_common/remutex/tests.rs +++ b/library/std/src/sys_common/remutex/tests.rs @@ -1,18 +1,11 @@ -use crate::boxed::Box; use crate::cell::RefCell; -use crate::pin::Pin; use crate::sync::Arc; use crate::sys_common::remutex::{ReentrantMutex, ReentrantMutexGuard}; use crate::thread; #[test] fn smoke() { - let m = unsafe { - let mut m = Box::pin(ReentrantMutex::new(())); - m.as_mut().init(); - m - }; - let m = m.as_ref(); + let m = ReentrantMutex::new(()); { let a = m.lock(); { @@ -29,20 +22,15 @@ fn smoke() { #[test] fn is_mutex() { - let m = unsafe { - // FIXME: Simplify this if Arc gets an Arc::get_pin_mut. - let mut m = Arc::new(ReentrantMutex::new(RefCell::new(0))); - Pin::new_unchecked(Arc::get_mut_unchecked(&mut m)).init(); - Pin::new_unchecked(m) - }; + let m = Arc::new(ReentrantMutex::new(RefCell::new(0))); let m2 = m.clone(); - let lock = m.as_ref().lock(); + let lock = m.lock(); let child = thread::spawn(move || { - let lock = m2.as_ref().lock(); + let lock = m2.lock(); assert_eq!(*lock.borrow(), 4950); }); for i in 0..100 { - let lock = m.as_ref().lock(); + let lock = m.lock(); *lock.borrow_mut() += i; } drop(lock); @@ -51,22 +39,17 @@ fn is_mutex() { #[test] fn trylock_works() { - let m = unsafe { - // FIXME: Simplify this if Arc gets an Arc::get_pin_mut. - let mut m = Arc::new(ReentrantMutex::new(())); - Pin::new_unchecked(Arc::get_mut_unchecked(&mut m)).init(); - Pin::new_unchecked(m) - }; + let m = Arc::new(ReentrantMutex::new(())); let m2 = m.clone(); - let _lock = m.as_ref().try_lock(); - let _lock2 = m.as_ref().try_lock(); + let _lock = m.try_lock(); + let _lock2 = m.try_lock(); thread::spawn(move || { - let lock = m2.as_ref().try_lock(); + let lock = m2.try_lock(); assert!(lock.is_none()); }) .join() .unwrap(); - let _lock3 = m.as_ref().try_lock(); + let _lock3 = m.try_lock(); } pub struct Answer<'a>(pub ReentrantMutexGuard<'a, RefCell>); diff --git a/library/std/src/sys_common/rwlock.rs b/library/std/src/sys_common/rwlock.rs deleted file mode 100644 index ba56f3a8f1b52..0000000000000 --- a/library/std/src/sys_common/rwlock.rs +++ /dev/null @@ -1,130 +0,0 @@ -use crate::sys::locks as imp; - -/// An OS-based reader-writer lock, meant for use in static variables. -/// -/// This rwlock does not implement poisoning. -/// -/// This rwlock has a const constructor ([`StaticRwLock::new`]), does not -/// implement `Drop` to cleanup resources. -pub struct StaticRwLock(imp::RwLock); - -impl StaticRwLock { - /// Creates a new rwlock for use. - #[inline] - pub const fn new() -> Self { - Self(imp::RwLock::new()) - } - - /// Acquires shared access to the underlying lock, blocking the current - /// thread to do so. - /// - /// The lock is automatically unlocked when the returned guard is dropped. - #[inline] - pub fn read(&'static self) -> StaticRwLockReadGuard { - unsafe { self.0.read() }; - StaticRwLockReadGuard(&self.0) - } - - /// Acquires write access to the underlying lock, blocking the current thread - /// to do so. - /// - /// The lock is automatically unlocked when the returned guard is dropped. - #[inline] - pub fn write(&'static self) -> StaticRwLockWriteGuard { - unsafe { self.0.write() }; - StaticRwLockWriteGuard(&self.0) - } -} - -#[must_use] -pub struct StaticRwLockReadGuard(&'static imp::RwLock); - -impl Drop for StaticRwLockReadGuard { - #[inline] - fn drop(&mut self) { - unsafe { - self.0.read_unlock(); - } - } -} - -#[must_use] -pub struct StaticRwLockWriteGuard(&'static imp::RwLock); - -impl Drop for StaticRwLockWriteGuard { - #[inline] - fn drop(&mut self) { - unsafe { - self.0.write_unlock(); - } - } -} - -/// An OS-based reader-writer lock. -/// -/// This rwlock cleans up its resources in its `Drop` implementation and may -/// safely be moved (when not borrowed). -/// -/// This rwlock does not implement poisoning. -/// -/// This is either a wrapper around `LazyBox` or `imp::RwLock`, -/// depending on the platform. It is boxed on platforms where `imp::RwLock` may -/// not be moved. -pub struct MovableRwLock(imp::MovableRwLock); - -impl MovableRwLock { - /// Creates a new reader-writer lock for use. - #[inline] - pub const fn new() -> Self { - Self(imp::MovableRwLock::new()) - } - - /// Acquires shared access to the underlying lock, blocking the current - /// thread to do so. - #[inline] - pub fn read(&self) { - unsafe { self.0.read() } - } - - /// Attempts to acquire shared access to this lock, returning whether it - /// succeeded or not. - /// - /// This function does not block the current thread. - #[inline] - pub fn try_read(&self) -> bool { - unsafe { self.0.try_read() } - } - - /// Acquires write access to the underlying lock, blocking the current thread - /// to do so. - #[inline] - pub fn write(&self) { - unsafe { self.0.write() } - } - - /// Attempts to acquire exclusive access to this lock, returning whether it - /// succeeded or not. - /// - /// This function does not block the current thread. - #[inline] - pub fn try_write(&self) -> bool { - unsafe { self.0.try_write() } - } - - /// Unlocks previously acquired shared access to this lock. - /// - /// Behavior is undefined if the current thread does not have shared access. - #[inline] - pub unsafe fn read_unlock(&self) { - self.0.read_unlock() - } - - /// Unlocks previously acquired exclusive access to this lock. - /// - /// Behavior is undefined if the current thread does not currently have - /// exclusive access. - #[inline] - pub unsafe fn write_unlock(&self) { - self.0.write_unlock() - } -} diff --git a/library/std/src/sys_common/thread_local_key.rs b/library/std/src/sys_common/thread_local_key.rs index 70beebe86d20b..0c4f162882cd5 100644 --- a/library/std/src/sys_common/thread_local_key.rs +++ b/library/std/src/sys_common/thread_local_key.rs @@ -53,7 +53,6 @@ mod tests; use crate::sync::atomic::{self, AtomicUsize, Ordering}; use crate::sys::thread_local_key as imp; -use crate::sys_common::mutex::StaticMutex; /// A type for TLS keys that are statically allocated. /// @@ -149,50 +148,53 @@ impl StaticKey { } unsafe fn lazy_init(&self) -> usize { - // Currently the Windows implementation of TLS is pretty hairy, and - // it greatly simplifies creation if we just synchronize everything. - // - // Additionally a 0-index of a tls key hasn't been seen on windows, so - // we just simplify the whole branch. - if imp::requires_synchronized_create() { - // We never call `INIT_LOCK.init()`, so it is UB to attempt to - // acquire this mutex reentrantly! - static INIT_LOCK: StaticMutex = StaticMutex::new(); - let _guard = INIT_LOCK.lock(); - let mut key = self.key.load(Ordering::SeqCst); - if key == 0 { - key = imp::create(self.dtor) as usize; - self.key.store(key, Ordering::SeqCst); - } - rtassert!(key != 0); - return key; - } - - // POSIX allows the key created here to be 0, but the compare_exchange - // below relies on using 0 as a sentinel value to check who won the - // race to set the shared TLS key. As far as I know, there is no - // guaranteed value that cannot be returned as a posix_key_create key, - // so there is no value we can initialize the inner key with to - // prove that it has not yet been set. As such, we'll continue using a - // value of 0, but with some gyrations to make sure we have a non-0 - // value returned from the creation routine. - // FIXME: this is clearly a hack, and should be cleaned up. - let key1 = imp::create(self.dtor); - let key = if key1 != 0 { - key1 - } else { - let key2 = imp::create(self.dtor); - imp::destroy(key1); - key2 - }; - rtassert!(key != 0); - match self.key.compare_exchange(0, key as usize, Ordering::SeqCst, Ordering::SeqCst) { - // The CAS succeeded, so we've created the actual key - Ok(_) => key as usize, - // If someone beat us to the punch, use their key instead - Err(n) => { - imp::destroy(key); - n + cfg_if::cfg_if! { + if #[cfg(windows)] { + // Currently the Windows implementation of TLS is pretty hairy, and + // it greatly simplifies creation if we just synchronize everything. + // + // Additionally a 0-index of a tls key hasn't been seen on windows, so + // we just simplify the whole branch. + use crate::sync::Mutex; + + static INIT_LOCK: Mutex<()> = Mutex::new(()); + + let _guard = INIT_LOCK.lock(); + let mut key = self.key.load(Ordering::Acquire); + if key == 0 { + key = imp::create(self.dtor) as usize; + self.key.store(key, Ordering::Release); + } + rtassert!(key != 0); + key + } else { + // POSIX allows the key created here to be 0, but the compare_exchange + // below relies on using 0 as a sentinel value to check who won the + // race to set the shared TLS key. As far as I know, there is no + // guaranteed value that cannot be returned as a posix_key_create key, + // so there is no value we can initialize the inner key with to + // prove that it has not yet been set. As such, we'll continue using a + // value of 0, but with some gyrations to make sure we have a non-0 + // value returned from the creation routine. + // FIXME: this is clearly a hack, and should be cleaned up. + let key1 = imp::create(self.dtor); + let key = if key1 != 0 { + key1 + } else { + let key2 = imp::create(self.dtor); + imp::destroy(key1); + key2 + }; + rtassert!(key != 0); + match self.key.compare_exchange(0, key as usize, Ordering::Release, Ordering::Acquire) { + // The CAS succeeded, so we've created the actual key + Ok(_) => key as usize, + // If someone beat us to the punch, use their key instead + Err(n) => { + imp::destroy(key); + n + } + } } } } diff --git a/library/std/src/thread/mod.rs b/library/std/src/thread/mod.rs index 479669647c128..902ed1c6f278e 100644 --- a/library/std/src/thread/mod.rs +++ b/library/std/src/thread/mod.rs @@ -1055,24 +1055,21 @@ impl ThreadId { } } } else { - use crate::sys_common::mutex::StaticMutex; + use crate::sync::{Mutex, PoisonError}; - // It is UB to attempt to acquire this mutex reentrantly! - static GUARD: StaticMutex = StaticMutex::new(); - static mut COUNTER: u64 = 0; + static COUNTER: Mutex = Mutex::new(0); - unsafe { - let guard = GUARD.lock(); + let mut counter = COUNTER.lock().unwrap_or_else(PoisonError::into_inner); + let Some(id) = counter.checked_add(1) else { + // in case the panic handler ends up calling `ThreadId::new()`, + // avoid reentrant lock acquire. + drop(counter); + exhausted(); + }; - let Some(id) = COUNTER.checked_add(1) else { - drop(guard); // in case the panic handler ends up calling `ThreadId::new()`, avoid reentrant lock acquire. - exhausted(); - }; - - COUNTER = id; - drop(guard); - ThreadId(NonZeroU64::new(id).unwrap()) - } + *counter = id; + drop(counter); + ThreadId(NonZeroU64::new(id).unwrap()) } } }