std: remove lock wrappers in sys_common
This commit is contained in:
parent
534ddc6166
commit
98815742cf
@ -3,7 +3,7 @@
|
||||
|
||||
use crate::fmt;
|
||||
use crate::sync::{mutex, poison, LockResult, MutexGuard, PoisonError};
|
||||
use crate::sys_common::condvar as sys;
|
||||
use crate::sys::locks as sys;
|
||||
use crate::time::{Duration, Instant};
|
||||
|
||||
/// A type indicating whether a timed wait on a condition variable returned
|
||||
|
@ -5,7 +5,7 @@
|
||||
use crate::fmt;
|
||||
use crate::ops::{Deref, DerefMut};
|
||||
use crate::sync::{poison, LockResult, TryLockError, TryLockResult};
|
||||
use crate::sys_common::mutex as sys;
|
||||
use crate::sys::locks as sys;
|
||||
|
||||
/// A mutual exclusion primitive useful for protecting shared data
|
||||
///
|
||||
@ -163,7 +163,7 @@
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[cfg_attr(not(test), rustc_diagnostic_item = "Mutex")]
|
||||
pub struct Mutex<T: ?Sized> {
|
||||
inner: sys::MovableMutex,
|
||||
inner: sys::Mutex,
|
||||
poison: poison::Flag,
|
||||
data: UnsafeCell<T>,
|
||||
}
|
||||
@ -217,11 +217,7 @@ impl<T> Mutex<T> {
|
||||
#[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
|
||||
#[inline]
|
||||
pub const fn new(t: T) -> Mutex<T> {
|
||||
Mutex {
|
||||
inner: sys::MovableMutex::new(),
|
||||
poison: poison::Flag::new(),
|
||||
data: UnsafeCell::new(t),
|
||||
}
|
||||
Mutex { inner: sys::Mutex::new(), poison: poison::Flag::new(), data: UnsafeCell::new(t) }
|
||||
}
|
||||
}
|
||||
|
||||
@ -264,7 +260,7 @@ impl<T: ?Sized> Mutex<T> {
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn lock(&self) -> LockResult<MutexGuard<'_, T>> {
|
||||
unsafe {
|
||||
self.inner.raw_lock();
|
||||
self.inner.lock();
|
||||
MutexGuard::new(self)
|
||||
}
|
||||
}
|
||||
@ -526,7 +522,7 @@ impl<T: ?Sized> Drop for MutexGuard<'_, T> {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
self.lock.poison.done(&self.poison);
|
||||
self.lock.inner.raw_unlock();
|
||||
self.lock.inner.unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -545,7 +541,7 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn guard_lock<'a, T: ?Sized>(guard: &MutexGuard<'a, T>) -> &'a sys::MovableMutex {
|
||||
pub fn guard_lock<'a, T: ?Sized>(guard: &MutexGuard<'a, T>) -> &'a sys::Mutex {
|
||||
&guard.lock.inner
|
||||
}
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
use crate::ops::{Deref, DerefMut};
|
||||
use crate::ptr::NonNull;
|
||||
use crate::sync::{poison, LockResult, TryLockError, TryLockResult};
|
||||
use crate::sys_common::rwlock as sys;
|
||||
use crate::sys::locks as sys;
|
||||
|
||||
/// A reader-writer lock
|
||||
///
|
||||
@ -78,7 +78,7 @@
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[cfg_attr(not(test), rustc_diagnostic_item = "RwLock")]
|
||||
pub struct RwLock<T: ?Sized> {
|
||||
inner: sys::MovableRwLock,
|
||||
inner: sys::RwLock,
|
||||
poison: poison::Flag,
|
||||
data: UnsafeCell<T>,
|
||||
}
|
||||
@ -109,7 +109,7 @@ pub struct RwLockReadGuard<'a, T: ?Sized + 'a> {
|
||||
// `NonNull` is also covariant over `T`, just like we would have with `&T`. `NonNull`
|
||||
// is preferable over `const* T` to allow for niche optimization.
|
||||
data: NonNull<T>,
|
||||
inner_lock: &'a sys::MovableRwLock,
|
||||
inner_lock: &'a sys::RwLock,
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
@ -158,11 +158,7 @@ impl<T> RwLock<T> {
|
||||
#[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
|
||||
#[inline]
|
||||
pub const fn new(t: T) -> RwLock<T> {
|
||||
RwLock {
|
||||
inner: sys::MovableRwLock::new(),
|
||||
poison: poison::Flag::new(),
|
||||
data: UnsafeCell::new(t),
|
||||
}
|
||||
RwLock { inner: sys::RwLock::new(), poison: poison::Flag::new(), data: UnsafeCell::new(t) }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -51,9 +51,9 @@ pub mod locks {
|
||||
mod futex_condvar;
|
||||
mod futex_mutex;
|
||||
mod futex_rwlock;
|
||||
pub(crate) use futex_condvar::MovableCondvar;
|
||||
pub(crate) use futex_mutex::{MovableMutex, Mutex};
|
||||
pub(crate) use futex_rwlock::{MovableRwLock, RwLock};
|
||||
pub(crate) use futex_condvar::Condvar;
|
||||
pub(crate) use futex_mutex::Mutex;
|
||||
pub(crate) use futex_rwlock::RwLock;
|
||||
}
|
||||
|
||||
use crate::io::ErrorKind;
|
||||
|
@ -12,18 +12,13 @@ pub struct Condvar {
|
||||
unsafe impl Send for Condvar {}
|
||||
unsafe impl Sync for Condvar {}
|
||||
|
||||
pub type MovableCondvar = Condvar;
|
||||
|
||||
impl Condvar {
|
||||
#[inline]
|
||||
pub const fn new() -> Condvar {
|
||||
Condvar { waiters: SpinMutex::new(waiter_queue::WaiterQueue::new()) }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn init(&mut self) {}
|
||||
|
||||
pub unsafe fn notify_one(&self) {
|
||||
pub fn notify_one(&self) {
|
||||
self.waiters.with_locked(|waiters| {
|
||||
if let Some(task) = waiters.pop_front() {
|
||||
// Unpark the task
|
||||
@ -39,7 +34,7 @@ pub unsafe fn notify_one(&self) {
|
||||
});
|
||||
}
|
||||
|
||||
pub unsafe fn notify_all(&self) {
|
||||
pub fn notify_all(&self) {
|
||||
self.waiters.with_locked(|waiters| {
|
||||
while let Some(task) = waiters.pop_front() {
|
||||
// Unpark the task
|
||||
|
@ -11,8 +11,6 @@ pub struct Mutex {
|
||||
mtx: SpinIdOnceCell<()>,
|
||||
}
|
||||
|
||||
pub type MovableMutex = Mutex;
|
||||
|
||||
/// Create a mutex object. This function never panics.
|
||||
fn new_mtx() -> Result<abi::ID, ItronError> {
|
||||
ItronError::err_if_negative(unsafe {
|
||||
@ -39,7 +37,7 @@ fn raw(&self) -> abi::ID {
|
||||
}
|
||||
}
|
||||
|
||||
pub unsafe fn lock(&self) {
|
||||
pub fn lock(&self) {
|
||||
let mtx = self.raw();
|
||||
expect_success(unsafe { abi::loc_mtx(mtx) }, &"loc_mtx");
|
||||
}
|
||||
@ -49,7 +47,7 @@ pub unsafe fn unlock(&self) {
|
||||
expect_success_aborting(unsafe { abi::unl_mtx(mtx) }, &"unl_mtx");
|
||||
}
|
||||
|
||||
pub unsafe fn try_lock(&self) -> bool {
|
||||
pub fn try_lock(&self) -> bool {
|
||||
let mtx = self.raw();
|
||||
match unsafe { abi::ploc_mtx(mtx) } {
|
||||
abi::E_TMOUT => false,
|
||||
|
@ -4,42 +4,43 @@
|
||||
|
||||
use super::waitqueue::{SpinMutex, WaitQueue, WaitVariable};
|
||||
|
||||
/// FIXME: `UnsafeList` is not movable.
|
||||
struct AllocatedCondvar(SpinMutex<WaitVariable<()>>);
|
||||
|
||||
pub struct Condvar {
|
||||
inner: SpinMutex<WaitVariable<()>>,
|
||||
inner: LazyBox<AllocatedCondvar>,
|
||||
}
|
||||
|
||||
pub(crate) type MovableCondvar = LazyBox<Condvar>;
|
||||
|
||||
impl LazyInit for Condvar {
|
||||
impl LazyInit for AllocatedCondvar {
|
||||
fn init() -> Box<Self> {
|
||||
Box::new(Self::new())
|
||||
Box::new(AllocatedCondvar(SpinMutex::new(WaitVariable::new(()))))
|
||||
}
|
||||
}
|
||||
|
||||
impl Condvar {
|
||||
pub const fn new() -> Condvar {
|
||||
Condvar { inner: SpinMutex::new(WaitVariable::new(())) }
|
||||
Condvar { inner: LazyBox::new() }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn notify_one(&self) {
|
||||
let _ = WaitQueue::notify_one(self.inner.lock());
|
||||
pub fn notify_one(&self) {
|
||||
let _ = WaitQueue::notify_one(self.inner.0.lock());
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn notify_all(&self) {
|
||||
let _ = WaitQueue::notify_all(self.inner.lock());
|
||||
pub fn notify_all(&self) {
|
||||
let _ = WaitQueue::notify_all(self.inner.0.lock());
|
||||
}
|
||||
|
||||
pub unsafe fn wait(&self, mutex: &Mutex) {
|
||||
let guard = self.inner.lock();
|
||||
let guard = self.inner.0.lock();
|
||||
WaitQueue::wait(guard, || unsafe { mutex.unlock() });
|
||||
unsafe { mutex.lock() }
|
||||
mutex.lock()
|
||||
}
|
||||
|
||||
pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
|
||||
let success = WaitQueue::wait_timeout(&self.inner, dur, || unsafe { mutex.unlock() });
|
||||
unsafe { mutex.lock() };
|
||||
let success = WaitQueue::wait_timeout(&self.inner.0, dur, || unsafe { mutex.unlock() });
|
||||
mutex.lock();
|
||||
success
|
||||
}
|
||||
}
|
||||
|
@ -1,28 +1,28 @@
|
||||
use super::waitqueue::{try_lock_or_false, SpinMutex, WaitQueue, WaitVariable};
|
||||
use crate::sys_common::lazy_box::{LazyBox, LazyInit};
|
||||
|
||||
/// FIXME: `UnsafeList` is not movable.
|
||||
struct AllocatedMutex(SpinMutex<WaitVariable<bool>>);
|
||||
|
||||
pub struct Mutex {
|
||||
inner: SpinMutex<WaitVariable<bool>>,
|
||||
inner: LazyBox<AllocatedMutex>,
|
||||
}
|
||||
|
||||
// not movable: see UnsafeList implementation
|
||||
pub(crate) type MovableMutex = LazyBox<Mutex>;
|
||||
|
||||
impl LazyInit for Mutex {
|
||||
impl LazyInit for AllocatedMutex {
|
||||
fn init() -> Box<Self> {
|
||||
Box::new(Self::new())
|
||||
Box::new(AllocatedMutex(SpinMutex::new(WaitVariable::new(false))))
|
||||
}
|
||||
}
|
||||
|
||||
// Implementation according to “Operating Systems: Three Easy Pieces”, chapter 28
|
||||
impl Mutex {
|
||||
pub const fn new() -> Mutex {
|
||||
Mutex { inner: SpinMutex::new(WaitVariable::new(false)) }
|
||||
Mutex { inner: LazyBox::new() }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn lock(&self) {
|
||||
let mut guard = self.inner.lock();
|
||||
pub fn lock(&self) {
|
||||
let mut guard = self.inner.0.lock();
|
||||
if *guard.lock_var() {
|
||||
// Another thread has the lock, wait
|
||||
WaitQueue::wait(guard, || {})
|
||||
@ -35,7 +35,7 @@ pub unsafe fn lock(&self) {
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn unlock(&self) {
|
||||
let guard = self.inner.lock();
|
||||
let guard = self.inner.0.lock();
|
||||
if let Err(mut guard) = WaitQueue::notify_one(guard) {
|
||||
// No other waiters, unlock
|
||||
*guard.lock_var_mut() = false;
|
||||
@ -45,8 +45,8 @@ pub unsafe fn unlock(&self) {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn try_lock(&self) -> bool {
|
||||
let mut guard = try_lock_or_false!(self.inner);
|
||||
pub fn try_lock(&self) -> bool {
|
||||
let mut guard = try_lock_or_false!(self.inner.0);
|
||||
if *guard.lock_var() {
|
||||
// Another thread has the lock
|
||||
false
|
||||
|
@ -7,42 +7,45 @@
|
||||
use super::waitqueue::{
|
||||
try_lock_or_false, NotifiedTcs, SpinMutex, SpinMutexGuard, WaitQueue, WaitVariable,
|
||||
};
|
||||
use crate::mem;
|
||||
use crate::alloc::Layout;
|
||||
|
||||
pub struct RwLock {
|
||||
struct AllocatedRwLock {
|
||||
readers: SpinMutex<WaitVariable<Option<NonZeroUsize>>>,
|
||||
writer: SpinMutex<WaitVariable<bool>>,
|
||||
}
|
||||
|
||||
pub(crate) type MovableRwLock = LazyBox<RwLock>;
|
||||
pub struct RwLock {
|
||||
inner: LazyBox<AllocatedRwLock>,
|
||||
}
|
||||
|
||||
impl LazyInit for RwLock {
|
||||
impl LazyInit for AllocatedRwLock {
|
||||
fn init() -> Box<Self> {
|
||||
Box::new(Self::new())
|
||||
Box::new(AllocatedRwLock {
|
||||
readers: SpinMutex::new(WaitVariable::new(None)),
|
||||
writer: SpinMutex::new(WaitVariable::new(false)),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Check at compile time that RwLock size matches C definition (see test_c_rwlock_initializer below)
|
||||
//
|
||||
// # Safety
|
||||
// Never called, as it is a compile time check.
|
||||
#[allow(dead_code)]
|
||||
unsafe fn rw_lock_size_assert(r: RwLock) {
|
||||
unsafe { mem::transmute::<RwLock, [u8; 144]>(r) };
|
||||
}
|
||||
// Check at compile time that RwLock's size and alignment matches the C definition
|
||||
// in libunwind (see also `test_c_rwlock_initializer` in `tests`).
|
||||
const _: () = {
|
||||
let rust = Layout::new::<RwLock>();
|
||||
let c = Layout::new::<*mut ()>();
|
||||
assert!(rust.size() == c.size());
|
||||
assert!(rust.align() == c.align());
|
||||
};
|
||||
|
||||
impl RwLock {
|
||||
pub const fn new() -> RwLock {
|
||||
RwLock {
|
||||
readers: SpinMutex::new(WaitVariable::new(None)),
|
||||
writer: SpinMutex::new(WaitVariable::new(false)),
|
||||
}
|
||||
RwLock { inner: LazyBox::new() }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn read(&self) {
|
||||
let mut rguard = self.readers.lock();
|
||||
let wguard = self.writer.lock();
|
||||
pub fn read(&self) {
|
||||
let lock = &*self.inner;
|
||||
let mut rguard = lock.readers.lock();
|
||||
let wguard = lock.writer.lock();
|
||||
if *wguard.lock_var() || !wguard.queue_empty() {
|
||||
// Another thread has or is waiting for the write lock, wait
|
||||
drop(wguard);
|
||||
@ -57,8 +60,9 @@ pub unsafe fn read(&self) {
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn try_read(&self) -> bool {
|
||||
let mut rguard = try_lock_or_false!(self.readers);
|
||||
let wguard = try_lock_or_false!(self.writer);
|
||||
let lock = &*self.inner;
|
||||
let mut rguard = try_lock_or_false!(lock.readers);
|
||||
let wguard = try_lock_or_false!(lock.writer);
|
||||
if *wguard.lock_var() || !wguard.queue_empty() {
|
||||
// Another thread has or is waiting for the write lock
|
||||
false
|
||||
@ -71,9 +75,10 @@ pub unsafe fn try_read(&self) -> bool {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn write(&self) {
|
||||
let rguard = self.readers.lock();
|
||||
let mut wguard = self.writer.lock();
|
||||
pub fn write(&self) {
|
||||
let lock = &*self.inner;
|
||||
let rguard = lock.readers.lock();
|
||||
let mut wguard = lock.writer.lock();
|
||||
if *wguard.lock_var() || rguard.lock_var().is_some() {
|
||||
// Another thread has the lock, wait
|
||||
drop(rguard);
|
||||
@ -86,9 +91,10 @@ pub unsafe fn write(&self) {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn try_write(&self) -> bool {
|
||||
let rguard = try_lock_or_false!(self.readers);
|
||||
let mut wguard = try_lock_or_false!(self.writer);
|
||||
pub fn try_write(&self) -> bool {
|
||||
let lock = &*self.inner;
|
||||
let rguard = try_lock_or_false!(lock.readers);
|
||||
let mut wguard = try_lock_or_false!(lock.writer);
|
||||
if *wguard.lock_var() || rguard.lock_var().is_some() {
|
||||
// Another thread has the lock
|
||||
false
|
||||
@ -122,8 +128,9 @@ unsafe fn __read_unlock(
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn read_unlock(&self) {
|
||||
let rguard = self.readers.lock();
|
||||
let wguard = self.writer.lock();
|
||||
let lock = &*self.inner;
|
||||
let rguard = lock.readers.lock();
|
||||
let wguard = lock.writer.lock();
|
||||
unsafe { self.__read_unlock(rguard, wguard) };
|
||||
}
|
||||
|
||||
@ -158,8 +165,9 @@ unsafe fn __write_unlock(
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn write_unlock(&self) {
|
||||
let rguard = self.readers.lock();
|
||||
let wguard = self.writer.lock();
|
||||
let lock = &*self.inner;
|
||||
let rguard = lock.readers.lock();
|
||||
let wguard = lock.writer.lock();
|
||||
unsafe { self.__write_unlock(rguard, wguard) };
|
||||
}
|
||||
|
||||
@ -167,8 +175,9 @@ pub unsafe fn write_unlock(&self) {
|
||||
#[inline]
|
||||
#[cfg_attr(test, allow(dead_code))]
|
||||
unsafe fn unlock(&self) {
|
||||
let rguard = self.readers.lock();
|
||||
let wguard = self.writer.lock();
|
||||
let lock = &*self.inner;
|
||||
let rguard = lock.readers.lock();
|
||||
let wguard = lock.writer.lock();
|
||||
if *wguard.lock_var() == true {
|
||||
unsafe { self.__write_unlock(rguard, wguard) };
|
||||
} else {
|
||||
@ -201,6 +210,7 @@ unsafe fn unlock(&self) {
|
||||
unsafe { (*p).write() };
|
||||
return 0;
|
||||
}
|
||||
|
||||
#[cfg(not(test))]
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn __rust_rwlock_unlock(p: *mut RwLock) -> i32 {
|
||||
|
@ -1,22 +1,12 @@
|
||||
use super::*;
|
||||
use crate::ptr;
|
||||
|
||||
// Verify that the byte pattern libunwind uses to initialize an RwLock is
|
||||
// equivalent to the value of RwLock::new(). If the value changes,
|
||||
// `src/UnwindRustSgx.h` in libunwind needs to be changed too.
|
||||
#[test]
|
||||
fn test_c_rwlock_initializer() {
|
||||
#[rustfmt::skip]
|
||||
const C_RWLOCK_INIT: &[u8] = &[
|
||||
/* 0x00 */ 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
|
||||
/* 0x10 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
|
||||
/* 0x20 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
|
||||
/* 0x30 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
|
||||
/* 0x40 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
|
||||
/* 0x50 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
|
||||
/* 0x60 */ 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
|
||||
/* 0x70 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
|
||||
/* 0x80 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
|
||||
];
|
||||
const C_RWLOCK_INIT: *mut () = ptr::null_mut();
|
||||
|
||||
// For the test to work, we need the padding/unused bytes in RwLock to be
|
||||
// initialized as 0. In practice, this is the case with statics.
|
||||
@ -26,6 +16,6 @@ fn test_c_rwlock_initializer() {
|
||||
// If the assertion fails, that not necessarily an issue with the value
|
||||
// of C_RWLOCK_INIT. It might just be an issue with the way padding
|
||||
// bytes are initialized in the test code.
|
||||
assert_eq!(&crate::mem::transmute_copy::<_, [u8; 144]>(&RUST_RWLOCK_INIT), C_RWLOCK_INIT);
|
||||
assert_eq!(crate::mem::transmute_copy::<_, *mut ()>(&RUST_RWLOCK_INIT), C_RWLOCK_INIT);
|
||||
};
|
||||
}
|
||||
|
@ -12,8 +12,6 @@ pub struct RwLock {
|
||||
rwl: SpinIdOnceCell<()>,
|
||||
}
|
||||
|
||||
pub type MovableRwLock = RwLock;
|
||||
|
||||
// Safety: `num_readers` is protected by `mtx_num_readers`
|
||||
unsafe impl Send for RwLock {}
|
||||
unsafe impl Sync for RwLock {}
|
||||
@ -37,13 +35,13 @@ fn raw(&self) -> abi::ID {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn read(&self) {
|
||||
pub fn read(&self) {
|
||||
let rwl = self.raw();
|
||||
expect_success(unsafe { abi::rwl_loc_rdl(rwl) }, &"rwl_loc_rdl");
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn try_read(&self) -> bool {
|
||||
pub fn try_read(&self) -> bool {
|
||||
let rwl = self.raw();
|
||||
match unsafe { abi::rwl_ploc_rdl(rwl) } {
|
||||
abi::E_TMOUT => false,
|
||||
@ -55,13 +53,13 @@ pub unsafe fn try_read(&self) -> bool {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn write(&self) {
|
||||
pub fn write(&self) {
|
||||
let rwl = self.raw();
|
||||
expect_success(unsafe { abi::rwl_loc_wrl(rwl) }, &"rwl_loc_wrl");
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn try_write(&self) -> bool {
|
||||
pub fn try_write(&self) -> bool {
|
||||
let rwl = self.raw();
|
||||
match unsafe { abi::rwl_ploc_wrl(rwl) } {
|
||||
abi::E_TMOUT => false,
|
||||
|
@ -53,8 +53,6 @@
|
||||
// This can never be a valid `zx_handle_t`.
|
||||
const UNLOCKED: u32 = 0;
|
||||
|
||||
pub type MovableMutex = Mutex;
|
||||
|
||||
pub struct Mutex {
|
||||
futex: AtomicU32,
|
||||
}
|
||||
@ -86,23 +84,27 @@ pub const fn new() -> Mutex {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn try_lock(&self) -> bool {
|
||||
let thread_self = zx_thread_self();
|
||||
pub fn try_lock(&self) -> bool {
|
||||
let thread_self = unsafe { zx_thread_self() };
|
||||
self.futex.compare_exchange(UNLOCKED, to_state(thread_self), Acquire, Relaxed).is_ok()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn lock(&self) {
|
||||
let thread_self = zx_thread_self();
|
||||
pub fn lock(&self) {
|
||||
let thread_self = unsafe { zx_thread_self() };
|
||||
if let Err(state) =
|
||||
self.futex.compare_exchange(UNLOCKED, to_state(thread_self), Acquire, Relaxed)
|
||||
{
|
||||
self.lock_contested(state, thread_self);
|
||||
unsafe {
|
||||
self.lock_contested(state, thread_self);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
/// `thread_self` must be the handle for the current thread.
|
||||
#[cold]
|
||||
fn lock_contested(&self, mut state: u32, thread_self: zx_handle_t) {
|
||||
unsafe fn lock_contested(&self, mut state: u32, thread_self: zx_handle_t) {
|
||||
let owned_state = mark_contested(to_state(thread_self));
|
||||
loop {
|
||||
// Mark the mutex as contested if it is not already.
|
||||
|
@ -3,8 +3,6 @@
|
||||
use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all};
|
||||
use crate::time::Duration;
|
||||
|
||||
pub type MovableCondvar = Condvar;
|
||||
|
||||
pub struct Condvar {
|
||||
// The value of this atomic is simply incremented on every notification.
|
||||
// This is used by `.wait()` to not miss any notifications after
|
||||
@ -21,12 +19,12 @@ pub const fn new() -> Self {
|
||||
// All the memory orderings here are `Relaxed`,
|
||||
// because synchronization is done by unlocking and locking the mutex.
|
||||
|
||||
pub unsafe fn notify_one(&self) {
|
||||
pub fn notify_one(&self) {
|
||||
self.futex.fetch_add(1, Relaxed);
|
||||
futex_wake(&self.futex);
|
||||
}
|
||||
|
||||
pub unsafe fn notify_all(&self) {
|
||||
pub fn notify_all(&self) {
|
||||
self.futex.fetch_add(1, Relaxed);
|
||||
futex_wake_all(&self.futex);
|
||||
}
|
||||
|
@ -4,8 +4,6 @@
|
||||
};
|
||||
use crate::sys::futex::{futex_wait, futex_wake};
|
||||
|
||||
pub type MovableMutex = Mutex;
|
||||
|
||||
pub struct Mutex {
|
||||
/// 0: unlocked
|
||||
/// 1: locked, no other threads waiting
|
||||
@ -20,12 +18,12 @@ pub const fn new() -> Self {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn try_lock(&self) -> bool {
|
||||
pub fn try_lock(&self) -> bool {
|
||||
self.futex.compare_exchange(0, 1, Acquire, Relaxed).is_ok()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn lock(&self) {
|
||||
pub fn lock(&self) {
|
||||
if self.futex.compare_exchange(0, 1, Acquire, Relaxed).is_err() {
|
||||
self.lock_contended();
|
||||
}
|
||||
|
@ -4,8 +4,6 @@
|
||||
};
|
||||
use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all};
|
||||
|
||||
pub type MovableRwLock = RwLock;
|
||||
|
||||
pub struct RwLock {
|
||||
// The state consists of a 30-bit reader counter, a 'readers waiting' flag, and a 'writers waiting' flag.
|
||||
// Bits 0..30:
|
||||
@ -70,14 +68,14 @@ pub const fn new() -> Self {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn try_read(&self) -> bool {
|
||||
pub fn try_read(&self) -> bool {
|
||||
self.state
|
||||
.fetch_update(Acquire, Relaxed, |s| is_read_lockable(s).then(|| s + READ_LOCKED))
|
||||
.is_ok()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn read(&self) {
|
||||
pub fn read(&self) {
|
||||
let state = self.state.load(Relaxed);
|
||||
if !is_read_lockable(state)
|
||||
|| self
|
||||
@ -144,14 +142,14 @@ fn read_contended(&self) {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn try_write(&self) -> bool {
|
||||
pub fn try_write(&self) -> bool {
|
||||
self.state
|
||||
.fetch_update(Acquire, Relaxed, |s| is_unlocked(s).then(|| s + WRITE_LOCKED))
|
||||
.is_ok()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn write(&self) {
|
||||
pub fn write(&self) {
|
||||
if self.state.compare_exchange_weak(0, WRITE_LOCKED, Acquire, Relaxed).is_err() {
|
||||
self.write_contended();
|
||||
}
|
||||
|
@ -10,22 +10,22 @@
|
||||
mod futex_mutex;
|
||||
mod futex_rwlock;
|
||||
mod futex_condvar;
|
||||
pub(crate) use futex_mutex::{Mutex, MovableMutex};
|
||||
pub(crate) use futex_rwlock::MovableRwLock;
|
||||
pub(crate) use futex_condvar::MovableCondvar;
|
||||
pub(crate) use futex_mutex::Mutex;
|
||||
pub(crate) use futex_rwlock::RwLock;
|
||||
pub(crate) use futex_condvar::Condvar;
|
||||
} else if #[cfg(target_os = "fuchsia")] {
|
||||
mod fuchsia_mutex;
|
||||
mod futex_rwlock;
|
||||
mod futex_condvar;
|
||||
pub(crate) use fuchsia_mutex::{Mutex, MovableMutex};
|
||||
pub(crate) use futex_rwlock::MovableRwLock;
|
||||
pub(crate) use futex_condvar::MovableCondvar;
|
||||
pub(crate) use fuchsia_mutex::Mutex;
|
||||
pub(crate) use futex_rwlock::RwLock;
|
||||
pub(crate) use futex_condvar::Condvar;
|
||||
} else {
|
||||
mod pthread_mutex;
|
||||
mod pthread_rwlock;
|
||||
mod pthread_condvar;
|
||||
pub(crate) use pthread_mutex::{Mutex, MovableMutex};
|
||||
pub(crate) use pthread_rwlock::MovableRwLock;
|
||||
pub(crate) use pthread_condvar::MovableCondvar;
|
||||
pub(crate) use pthread_mutex::Mutex;
|
||||
pub(crate) use pthread_rwlock::RwLock;
|
||||
pub(crate) use pthread_condvar::Condvar;
|
||||
}
|
||||
}
|
||||
|
@ -1,17 +1,17 @@
|
||||
use crate::cell::UnsafeCell;
|
||||
use crate::ptr;
|
||||
use crate::sync::atomic::{AtomicPtr, Ordering::Relaxed};
|
||||
use crate::sys::locks::{pthread_mutex, Mutex};
|
||||
use crate::sys_common::lazy_box::{LazyBox, LazyInit};
|
||||
use crate::time::Duration;
|
||||
|
||||
struct AllocatedCondvar(UnsafeCell<libc::pthread_cond_t>);
|
||||
|
||||
pub struct Condvar {
|
||||
inner: UnsafeCell<libc::pthread_cond_t>,
|
||||
inner: LazyBox<AllocatedCondvar>,
|
||||
mutex: AtomicPtr<libc::pthread_mutex_t>,
|
||||
}
|
||||
|
||||
pub(crate) type MovableCondvar = LazyBox<Condvar>;
|
||||
|
||||
unsafe impl Send for Condvar {}
|
||||
unsafe impl Sync for Condvar {}
|
||||
|
||||
const TIMESPEC_MAX: libc::timespec =
|
||||
libc::timespec { tv_sec: <libc::time_t>::MAX, tv_nsec: 1_000_000_000 - 1 };
|
||||
|
||||
@ -19,81 +19,104 @@ fn saturating_cast_to_time_t(value: u64) -> libc::time_t {
|
||||
if value > <libc::time_t>::MAX as u64 { <libc::time_t>::MAX } else { value as libc::time_t }
|
||||
}
|
||||
|
||||
impl LazyInit for Condvar {
|
||||
#[inline]
|
||||
fn raw(c: &Condvar) -> *mut libc::pthread_cond_t {
|
||||
c.inner.0.get()
|
||||
}
|
||||
|
||||
unsafe impl Send for AllocatedCondvar {}
|
||||
unsafe impl Sync for AllocatedCondvar {}
|
||||
|
||||
impl LazyInit for AllocatedCondvar {
|
||||
fn init() -> Box<Self> {
|
||||
let mut condvar = Box::new(Self::new());
|
||||
unsafe { condvar.init() };
|
||||
let condvar = Box::new(AllocatedCondvar(UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER)));
|
||||
|
||||
cfg_if::cfg_if! {
|
||||
if #[cfg(any(
|
||||
target_os = "macos",
|
||||
target_os = "ios",
|
||||
target_os = "watchos",
|
||||
target_os = "l4re",
|
||||
target_os = "android",
|
||||
target_os = "redox"
|
||||
))] {
|
||||
// `pthread_condattr_setclock` is unfortunately not supported on these platforms.
|
||||
} else if #[cfg(any(target_os = "espidf", target_os = "horizon"))] {
|
||||
// NOTE: ESP-IDF's PTHREAD_COND_INITIALIZER support is not released yet
|
||||
// So on that platform, init() should always be called
|
||||
// Moreover, that platform does not have pthread_condattr_setclock support,
|
||||
// hence that initialization should be skipped as well
|
||||
//
|
||||
// Similar story for the 3DS (horizon).
|
||||
let r = unsafe { libc::pthread_cond_init(condvar.0.get(), crate::ptr::null()) };
|
||||
assert_eq!(r, 0);
|
||||
} else {
|
||||
use crate::mem::MaybeUninit;
|
||||
let mut attr = MaybeUninit::<libc::pthread_condattr_t>::uninit();
|
||||
let r = unsafe { libc::pthread_condattr_init(attr.as_mut_ptr()) };
|
||||
assert_eq!(r, 0);
|
||||
let r = unsafe { libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC) };
|
||||
assert_eq!(r, 0);
|
||||
let r = unsafe { libc::pthread_cond_init(condvar.0.get(), attr.as_ptr()) };
|
||||
assert_eq!(r, 0);
|
||||
let r = unsafe { libc::pthread_condattr_destroy(attr.as_mut_ptr()) };
|
||||
assert_eq!(r, 0);
|
||||
}
|
||||
}
|
||||
|
||||
condvar
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for AllocatedCondvar {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
let r = unsafe { libc::pthread_cond_destroy(self.0.get()) };
|
||||
if cfg!(target_os = "dragonfly") {
|
||||
// On DragonFly pthread_cond_destroy() returns EINVAL if called on
|
||||
// a condvar that was just initialized with
|
||||
// libc::PTHREAD_COND_INITIALIZER. Once it is used or
|
||||
// pthread_cond_init() is called, this behaviour no longer occurs.
|
||||
debug_assert!(r == 0 || r == libc::EINVAL);
|
||||
} else {
|
||||
debug_assert_eq!(r, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Condvar {
|
||||
pub const fn new() -> Condvar {
|
||||
// Might be moved and address is changing it is better to avoid
|
||||
// initialization of potentially opaque OS data before it landed
|
||||
Condvar { inner: UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER) }
|
||||
}
|
||||
|
||||
#[cfg(any(
|
||||
target_os = "macos",
|
||||
target_os = "ios",
|
||||
target_os = "watchos",
|
||||
target_os = "l4re",
|
||||
target_os = "android",
|
||||
target_os = "redox"
|
||||
))]
|
||||
unsafe fn init(&mut self) {}
|
||||
|
||||
// NOTE: ESP-IDF's PTHREAD_COND_INITIALIZER support is not released yet
|
||||
// So on that platform, init() should always be called
|
||||
// Moreover, that platform does not have pthread_condattr_setclock support,
|
||||
// hence that initialization should be skipped as well
|
||||
//
|
||||
// Similar story for the 3DS (horizon).
|
||||
#[cfg(any(target_os = "espidf", target_os = "horizon"))]
|
||||
unsafe fn init(&mut self) {
|
||||
let r = libc::pthread_cond_init(self.inner.get(), crate::ptr::null());
|
||||
assert_eq!(r, 0);
|
||||
}
|
||||
|
||||
#[cfg(not(any(
|
||||
target_os = "macos",
|
||||
target_os = "ios",
|
||||
target_os = "watchos",
|
||||
target_os = "l4re",
|
||||
target_os = "android",
|
||||
target_os = "redox",
|
||||
target_os = "espidf",
|
||||
target_os = "horizon"
|
||||
)))]
|
||||
unsafe fn init(&mut self) {
|
||||
use crate::mem::MaybeUninit;
|
||||
let mut attr = MaybeUninit::<libc::pthread_condattr_t>::uninit();
|
||||
let r = libc::pthread_condattr_init(attr.as_mut_ptr());
|
||||
assert_eq!(r, 0);
|
||||
let r = libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC);
|
||||
assert_eq!(r, 0);
|
||||
let r = libc::pthread_cond_init(self.inner.get(), attr.as_ptr());
|
||||
assert_eq!(r, 0);
|
||||
let r = libc::pthread_condattr_destroy(attr.as_mut_ptr());
|
||||
assert_eq!(r, 0);
|
||||
Condvar { inner: LazyBox::new(), mutex: AtomicPtr::new(ptr::null_mut()) }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn notify_one(&self) {
|
||||
let r = libc::pthread_cond_signal(self.inner.get());
|
||||
fn verify(&self, mutex: *mut libc::pthread_mutex_t) {
|
||||
// Relaxed is okay here because we never read through `self.addr`, and only use it to
|
||||
// compare addresses.
|
||||
match self.mutex.compare_exchange(ptr::null_mut(), mutex, Relaxed, Relaxed) {
|
||||
Ok(_) => {} // Stored the address
|
||||
Err(n) if n == mutex => {} // Lost a race to store the same address
|
||||
_ => panic!("attempted to use a condition variable with two mutexes"),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn notify_one(&self) {
|
||||
let r = unsafe { libc::pthread_cond_signal(raw(self)) };
|
||||
debug_assert_eq!(r, 0);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn notify_all(&self) {
|
||||
let r = libc::pthread_cond_broadcast(self.inner.get());
|
||||
pub fn notify_all(&self) {
|
||||
let r = unsafe { libc::pthread_cond_broadcast(raw(self)) };
|
||||
debug_assert_eq!(r, 0);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn wait(&self, mutex: &Mutex) {
|
||||
let r = libc::pthread_cond_wait(self.inner.get(), pthread_mutex::raw(mutex));
|
||||
let mutex = pthread_mutex::raw(mutex);
|
||||
self.verify(mutex);
|
||||
let r = libc::pthread_cond_wait(raw(self), mutex);
|
||||
debug_assert_eq!(r, 0);
|
||||
}
|
||||
|
||||
@ -112,6 +135,9 @@ pub unsafe fn wait(&self, mutex: &Mutex) {
|
||||
pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
|
||||
use crate::mem;
|
||||
|
||||
let mutex = pthread_mutex::raw(mutex);
|
||||
self.verify(mutex);
|
||||
|
||||
let mut now: libc::timespec = mem::zeroed();
|
||||
let r = libc::clock_gettime(libc::CLOCK_MONOTONIC, &mut now);
|
||||
assert_eq!(r, 0);
|
||||
@ -127,7 +153,7 @@ pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
|
||||
let timeout =
|
||||
sec.map(|s| libc::timespec { tv_sec: s, tv_nsec: nsec as _ }).unwrap_or(TIMESPEC_MAX);
|
||||
|
||||
let r = libc::pthread_cond_timedwait(self.inner.get(), pthread_mutex::raw(mutex), &timeout);
|
||||
let r = libc::pthread_cond_timedwait(raw(self), mutex, &timeout);
|
||||
assert!(r == libc::ETIMEDOUT || r == 0);
|
||||
r == 0
|
||||
}
|
||||
@ -144,9 +170,11 @@ pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
|
||||
target_os = "horizon"
|
||||
))]
|
||||
pub unsafe fn wait_timeout(&self, mutex: &Mutex, mut dur: Duration) -> bool {
|
||||
use crate::ptr;
|
||||
use crate::time::Instant;
|
||||
|
||||
let mutex = pthread_mutex::raw(mutex);
|
||||
self.verify(mutex);
|
||||
|
||||
// 1000 years
|
||||
let max_dur = Duration::from_secs(1000 * 365 * 86400);
|
||||
|
||||
@ -187,36 +215,11 @@ pub unsafe fn wait_timeout(&self, mutex: &Mutex, mut dur: Duration) -> bool {
|
||||
.unwrap_or(TIMESPEC_MAX);
|
||||
|
||||
// And wait!
|
||||
let r = libc::pthread_cond_timedwait(self.inner.get(), pthread_mutex::raw(mutex), &timeout);
|
||||
let r = libc::pthread_cond_timedwait(raw(self), mutex, &timeout);
|
||||
debug_assert!(r == libc::ETIMEDOUT || r == 0);
|
||||
|
||||
// ETIMEDOUT is not a totally reliable method of determining timeout due
|
||||
// to clock shifts, so do the check ourselves
|
||||
stable_now.elapsed() < dur
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[cfg(not(target_os = "dragonfly"))]
|
||||
unsafe fn destroy(&mut self) {
|
||||
let r = libc::pthread_cond_destroy(self.inner.get());
|
||||
debug_assert_eq!(r, 0);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[cfg(target_os = "dragonfly")]
|
||||
unsafe fn destroy(&mut self) {
|
||||
let r = libc::pthread_cond_destroy(self.inner.get());
|
||||
// On DragonFly pthread_cond_destroy() returns EINVAL if called on
|
||||
// a condvar that was just initialized with
|
||||
// libc::PTHREAD_COND_INITIALIZER. Once it is used or
|
||||
// pthread_cond_init() is called, this behaviour no longer occurs.
|
||||
debug_assert!(r == 0 || r == libc::EINVAL);
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Condvar {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
unsafe { self.destroy() };
|
||||
}
|
||||
}
|
||||
|
@ -3,56 +3,24 @@
|
||||
use crate::sys::cvt_nz;
|
||||
use crate::sys_common::lazy_box::{LazyBox, LazyInit};
|
||||
|
||||
pub struct Mutex {
|
||||
inner: UnsafeCell<libc::pthread_mutex_t>,
|
||||
}
|
||||
struct AllocatedMutex(UnsafeCell<libc::pthread_mutex_t>);
|
||||
|
||||
pub(crate) type MovableMutex = LazyBox<Mutex>;
|
||||
pub struct Mutex {
|
||||
inner: LazyBox<AllocatedMutex>,
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn raw(m: &Mutex) -> *mut libc::pthread_mutex_t {
|
||||
m.inner.get()
|
||||
m.inner.0.get()
|
||||
}
|
||||
|
||||
unsafe impl Send for Mutex {}
|
||||
unsafe impl Sync for Mutex {}
|
||||
unsafe impl Send for AllocatedMutex {}
|
||||
unsafe impl Sync for AllocatedMutex {}
|
||||
|
||||
impl LazyInit for Mutex {
|
||||
impl LazyInit for AllocatedMutex {
|
||||
fn init() -> Box<Self> {
|
||||
let mut mutex = Box::new(Self::new());
|
||||
unsafe { mutex.init() };
|
||||
mutex
|
||||
}
|
||||
let mutex = Box::new(AllocatedMutex(UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER)));
|
||||
|
||||
fn destroy(mutex: Box<Self>) {
|
||||
// We're not allowed to pthread_mutex_destroy a locked mutex,
|
||||
// so check first if it's unlocked.
|
||||
if unsafe { mutex.try_lock() } {
|
||||
unsafe { mutex.unlock() };
|
||||
drop(mutex);
|
||||
} else {
|
||||
// The mutex is locked. This happens if a MutexGuard is leaked.
|
||||
// In this case, we just leak the Mutex too.
|
||||
forget(mutex);
|
||||
}
|
||||
}
|
||||
|
||||
fn cancel_init(_: Box<Self>) {
|
||||
// In this case, we can just drop it without any checks,
|
||||
// since it cannot have been locked yet.
|
||||
}
|
||||
}
|
||||
|
||||
impl Mutex {
|
||||
pub const fn new() -> Mutex {
|
||||
// Might be moved to a different address, so it is better to avoid
|
||||
// initialization of potentially opaque OS data before it landed.
|
||||
// Be very careful using this newly constructed `Mutex`, reentrant
|
||||
// locking is undefined behavior until `init` is called!
|
||||
Mutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) }
|
||||
}
|
||||
#[inline]
|
||||
unsafe fn init(&mut self) {
|
||||
// Issue #33770
|
||||
//
|
||||
// A pthread mutex initialized with PTHREAD_MUTEX_INITIALIZER will have
|
||||
@ -77,49 +45,77 @@ unsafe fn init(&mut self) {
|
||||
// references, we instead create the mutex with type
|
||||
// PTHREAD_MUTEX_NORMAL which is guaranteed to deadlock if we try to
|
||||
// re-lock it from the same thread, thus avoiding undefined behavior.
|
||||
let mut attr = MaybeUninit::<libc::pthread_mutexattr_t>::uninit();
|
||||
cvt_nz(libc::pthread_mutexattr_init(attr.as_mut_ptr())).unwrap();
|
||||
let attr = PthreadMutexAttr(&mut attr);
|
||||
cvt_nz(libc::pthread_mutexattr_settype(attr.0.as_mut_ptr(), libc::PTHREAD_MUTEX_NORMAL))
|
||||
unsafe {
|
||||
let mut attr = MaybeUninit::<libc::pthread_mutexattr_t>::uninit();
|
||||
cvt_nz(libc::pthread_mutexattr_init(attr.as_mut_ptr())).unwrap();
|
||||
let attr = PthreadMutexAttr(&mut attr);
|
||||
cvt_nz(libc::pthread_mutexattr_settype(
|
||||
attr.0.as_mut_ptr(),
|
||||
libc::PTHREAD_MUTEX_NORMAL,
|
||||
))
|
||||
.unwrap();
|
||||
cvt_nz(libc::pthread_mutex_init(self.inner.get(), attr.0.as_ptr())).unwrap();
|
||||
cvt_nz(libc::pthread_mutex_init(mutex.0.get(), attr.0.as_ptr())).unwrap();
|
||||
}
|
||||
|
||||
mutex
|
||||
}
|
||||
#[inline]
|
||||
pub unsafe fn lock(&self) {
|
||||
let r = libc::pthread_mutex_lock(self.inner.get());
|
||||
debug_assert_eq!(r, 0);
|
||||
|
||||
fn destroy(mutex: Box<Self>) {
|
||||
// We're not allowed to pthread_mutex_destroy a locked mutex,
|
||||
// so check first if it's unlocked.
|
||||
if unsafe { libc::pthread_mutex_trylock(mutex.0.get()) == 0 } {
|
||||
unsafe { libc::pthread_mutex_destroy(mutex.0.get()) };
|
||||
drop(mutex);
|
||||
} else {
|
||||
// The mutex is locked. This happens if a MutexGuard is leaked.
|
||||
// In this case, we just leak the Mutex too.
|
||||
forget(mutex);
|
||||
}
|
||||
}
|
||||
#[inline]
|
||||
pub unsafe fn unlock(&self) {
|
||||
let r = libc::pthread_mutex_unlock(self.inner.get());
|
||||
debug_assert_eq!(r, 0);
|
||||
}
|
||||
#[inline]
|
||||
pub unsafe fn try_lock(&self) -> bool {
|
||||
libc::pthread_mutex_trylock(self.inner.get()) == 0
|
||||
}
|
||||
#[inline]
|
||||
#[cfg(not(target_os = "dragonfly"))]
|
||||
unsafe fn destroy(&mut self) {
|
||||
let r = libc::pthread_mutex_destroy(self.inner.get());
|
||||
debug_assert_eq!(r, 0);
|
||||
}
|
||||
#[inline]
|
||||
#[cfg(target_os = "dragonfly")]
|
||||
unsafe fn destroy(&mut self) {
|
||||
let r = libc::pthread_mutex_destroy(self.inner.get());
|
||||
// On DragonFly pthread_mutex_destroy() returns EINVAL if called on a
|
||||
// mutex that was just initialized with libc::PTHREAD_MUTEX_INITIALIZER.
|
||||
// Once it is used (locked/unlocked) or pthread_mutex_init() is called,
|
||||
// this behaviour no longer occurs.
|
||||
debug_assert!(r == 0 || r == libc::EINVAL);
|
||||
|
||||
fn cancel_init(_: Box<Self>) {
|
||||
// In this case, we can just drop it without any checks,
|
||||
// since it cannot have been locked yet.
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Mutex {
|
||||
impl Drop for AllocatedMutex {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
unsafe { self.destroy() };
|
||||
let r = unsafe { libc::pthread_mutex_destroy(self.0.get()) };
|
||||
if cfg!(target_os = "dragonfly") {
|
||||
// On DragonFly pthread_mutex_destroy() returns EINVAL if called on a
|
||||
// mutex that was just initialized with libc::PTHREAD_MUTEX_INITIALIZER.
|
||||
// Once it is used (locked/unlocked) or pthread_mutex_init() is called,
|
||||
// this behaviour no longer occurs.
|
||||
debug_assert!(r == 0 || r == libc::EINVAL);
|
||||
} else {
|
||||
debug_assert_eq!(r, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Mutex {
|
||||
#[inline]
|
||||
pub const fn new() -> Mutex {
|
||||
Mutex { inner: LazyBox::new() }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn lock(&self) {
|
||||
let r = libc::pthread_mutex_lock(raw(self));
|
||||
debug_assert_eq!(r, 0);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn unlock(&self) {
|
||||
let r = libc::pthread_mutex_unlock(raw(self));
|
||||
debug_assert_eq!(r, 0);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn try_lock(&self) -> bool {
|
||||
libc::pthread_mutex_trylock(raw(self)) == 0
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3,20 +3,26 @@
|
||||
use crate::sync::atomic::{AtomicUsize, Ordering};
|
||||
use crate::sys_common::lazy_box::{LazyBox, LazyInit};
|
||||
|
||||
pub struct RwLock {
|
||||
struct AllocatedRwLock {
|
||||
inner: UnsafeCell<libc::pthread_rwlock_t>,
|
||||
write_locked: UnsafeCell<bool>, // guarded by the `inner` RwLock
|
||||
num_readers: AtomicUsize,
|
||||
}
|
||||
|
||||
pub(crate) type MovableRwLock = LazyBox<RwLock>;
|
||||
unsafe impl Send for AllocatedRwLock {}
|
||||
unsafe impl Sync for AllocatedRwLock {}
|
||||
|
||||
unsafe impl Send for RwLock {}
|
||||
unsafe impl Sync for RwLock {}
|
||||
pub struct RwLock {
|
||||
inner: LazyBox<AllocatedRwLock>,
|
||||
}
|
||||
|
||||
impl LazyInit for RwLock {
|
||||
impl LazyInit for AllocatedRwLock {
|
||||
fn init() -> Box<Self> {
|
||||
Box::new(Self::new())
|
||||
Box::new(AllocatedRwLock {
|
||||
inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER),
|
||||
write_locked: UnsafeCell::new(false),
|
||||
num_readers: AtomicUsize::new(0),
|
||||
})
|
||||
}
|
||||
|
||||
fn destroy(mut rwlock: Box<Self>) {
|
||||
@ -35,17 +41,39 @@ fn cancel_init(_: Box<Self>) {
|
||||
}
|
||||
}
|
||||
|
||||
impl RwLock {
|
||||
pub const fn new() -> RwLock {
|
||||
RwLock {
|
||||
inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER),
|
||||
write_locked: UnsafeCell::new(false),
|
||||
num_readers: AtomicUsize::new(0),
|
||||
impl AllocatedRwLock {
|
||||
#[inline]
|
||||
unsafe fn raw_unlock(&self) {
|
||||
let r = libc::pthread_rwlock_unlock(self.inner.get());
|
||||
debug_assert_eq!(r, 0);
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for AllocatedRwLock {
|
||||
fn drop(&mut self) {
|
||||
let r = unsafe { libc::pthread_rwlock_destroy(self.inner.get()) };
|
||||
// On DragonFly pthread_rwlock_destroy() returns EINVAL if called on a
|
||||
// rwlock that was just initialized with
|
||||
// libc::PTHREAD_RWLOCK_INITIALIZER. Once it is used (locked/unlocked)
|
||||
// or pthread_rwlock_init() is called, this behaviour no longer occurs.
|
||||
if cfg!(target_os = "dragonfly") {
|
||||
debug_assert!(r == 0 || r == libc::EINVAL);
|
||||
} else {
|
||||
debug_assert_eq!(r, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RwLock {
|
||||
#[inline]
|
||||
pub unsafe fn read(&self) {
|
||||
let r = libc::pthread_rwlock_rdlock(self.inner.get());
|
||||
pub const fn new() -> RwLock {
|
||||
RwLock { inner: LazyBox::new() }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn read(&self) {
|
||||
let lock = &*self.inner;
|
||||
let r = unsafe { libc::pthread_rwlock_rdlock(lock.inner.get()) };
|
||||
|
||||
// According to POSIX, when a thread tries to acquire this read lock
|
||||
// while it already holds the write lock
|
||||
@ -62,51 +90,61 @@ pub unsafe fn read(&self) {
|
||||
// got the write lock more than once, or got a read and a write lock.
|
||||
if r == libc::EAGAIN {
|
||||
panic!("rwlock maximum reader count exceeded");
|
||||
} else if r == libc::EDEADLK || (r == 0 && *self.write_locked.get()) {
|
||||
} else if r == libc::EDEADLK || (r == 0 && unsafe { *lock.write_locked.get() }) {
|
||||
// Above, we make sure to only access `write_locked` when `r == 0` to avoid
|
||||
// data races.
|
||||
if r == 0 {
|
||||
// `pthread_rwlock_rdlock` succeeded when it should not have.
|
||||
self.raw_unlock();
|
||||
unsafe {
|
||||
lock.raw_unlock();
|
||||
}
|
||||
}
|
||||
panic!("rwlock read lock would result in deadlock");
|
||||
} else {
|
||||
// POSIX does not make guarantees about all the errors that may be returned.
|
||||
// See issue #94705 for more details.
|
||||
assert_eq!(r, 0, "unexpected error during rwlock read lock: {:?}", r);
|
||||
self.num_readers.fetch_add(1, Ordering::Relaxed);
|
||||
lock.num_readers.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn try_read(&self) -> bool {
|
||||
let r = libc::pthread_rwlock_tryrdlock(self.inner.get());
|
||||
pub fn try_read(&self) -> bool {
|
||||
let lock = &*self.inner;
|
||||
let r = unsafe { libc::pthread_rwlock_tryrdlock(lock.inner.get()) };
|
||||
if r == 0 {
|
||||
if *self.write_locked.get() {
|
||||
if unsafe { *lock.write_locked.get() } {
|
||||
// `pthread_rwlock_tryrdlock` succeeded when it should not have.
|
||||
self.raw_unlock();
|
||||
unsafe {
|
||||
lock.raw_unlock();
|
||||
}
|
||||
false
|
||||
} else {
|
||||
self.num_readers.fetch_add(1, Ordering::Relaxed);
|
||||
lock.num_readers.fetch_add(1, Ordering::Relaxed);
|
||||
true
|
||||
}
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn write(&self) {
|
||||
let r = libc::pthread_rwlock_wrlock(self.inner.get());
|
||||
pub fn write(&self) {
|
||||
let lock = &*self.inner;
|
||||
let r = unsafe { libc::pthread_rwlock_wrlock(lock.inner.get()) };
|
||||
// See comments above for why we check for EDEADLK and write_locked. For the same reason,
|
||||
// we also need to check that there are no readers (tracked in `num_readers`).
|
||||
if r == libc::EDEADLK
|
||||
|| (r == 0 && *self.write_locked.get())
|
||||
|| self.num_readers.load(Ordering::Relaxed) != 0
|
||||
|| (r == 0 && unsafe { *lock.write_locked.get() })
|
||||
|| lock.num_readers.load(Ordering::Relaxed) != 0
|
||||
{
|
||||
// Above, we make sure to only access `write_locked` when `r == 0` to avoid
|
||||
// data races.
|
||||
if r == 0 {
|
||||
// `pthread_rwlock_wrlock` succeeded when it should not have.
|
||||
self.raw_unlock();
|
||||
unsafe {
|
||||
lock.raw_unlock();
|
||||
}
|
||||
}
|
||||
panic!("rwlock write lock would result in deadlock");
|
||||
} else {
|
||||
@ -114,60 +152,44 @@ pub unsafe fn write(&self) {
|
||||
// return EDEADLK or 0. We rely on that.
|
||||
debug_assert_eq!(r, 0);
|
||||
}
|
||||
*self.write_locked.get() = true;
|
||||
|
||||
unsafe {
|
||||
*lock.write_locked.get() = true;
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn try_write(&self) -> bool {
|
||||
let r = libc::pthread_rwlock_trywrlock(self.inner.get());
|
||||
let lock = &*self.inner;
|
||||
let r = libc::pthread_rwlock_trywrlock(lock.inner.get());
|
||||
if r == 0 {
|
||||
if *self.write_locked.get() || self.num_readers.load(Ordering::Relaxed) != 0 {
|
||||
if *lock.write_locked.get() || lock.num_readers.load(Ordering::Relaxed) != 0 {
|
||||
// `pthread_rwlock_trywrlock` succeeded when it should not have.
|
||||
self.raw_unlock();
|
||||
lock.raw_unlock();
|
||||
false
|
||||
} else {
|
||||
*self.write_locked.get() = true;
|
||||
*lock.write_locked.get() = true;
|
||||
true
|
||||
}
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
#[inline]
|
||||
unsafe fn raw_unlock(&self) {
|
||||
let r = libc::pthread_rwlock_unlock(self.inner.get());
|
||||
debug_assert_eq!(r, 0);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn read_unlock(&self) {
|
||||
debug_assert!(!*self.write_locked.get());
|
||||
self.num_readers.fetch_sub(1, Ordering::Relaxed);
|
||||
self.raw_unlock();
|
||||
let lock = &*self.inner;
|
||||
debug_assert!(!*lock.write_locked.get());
|
||||
lock.num_readers.fetch_sub(1, Ordering::Relaxed);
|
||||
lock.raw_unlock();
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn write_unlock(&self) {
|
||||
debug_assert_eq!(self.num_readers.load(Ordering::Relaxed), 0);
|
||||
debug_assert!(*self.write_locked.get());
|
||||
*self.write_locked.get() = false;
|
||||
self.raw_unlock();
|
||||
}
|
||||
#[inline]
|
||||
unsafe fn destroy(&mut self) {
|
||||
let r = libc::pthread_rwlock_destroy(self.inner.get());
|
||||
// On DragonFly pthread_rwlock_destroy() returns EINVAL if called on a
|
||||
// rwlock that was just initialized with
|
||||
// libc::PTHREAD_RWLOCK_INITIALIZER. Once it is used (locked/unlocked)
|
||||
// or pthread_rwlock_init() is called, this behaviour no longer occurs.
|
||||
if cfg!(target_os = "dragonfly") {
|
||||
debug_assert!(r == 0 || r == libc::EINVAL);
|
||||
} else {
|
||||
debug_assert_eq!(r, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for RwLock {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
unsafe { self.destroy() };
|
||||
let lock = &*self.inner;
|
||||
debug_assert_eq!(lock.num_readers.load(Ordering::Relaxed), 0);
|
||||
debug_assert!(*lock.write_locked.get());
|
||||
*lock.write_locked.get() = false;
|
||||
lock.raw_unlock();
|
||||
}
|
||||
}
|
||||
|
@ -3,8 +3,6 @@
|
||||
|
||||
pub struct Condvar {}
|
||||
|
||||
pub type MovableCondvar = Condvar;
|
||||
|
||||
impl Condvar {
|
||||
#[inline]
|
||||
#[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
|
||||
@ -13,10 +11,10 @@ pub const fn new() -> Condvar {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn notify_one(&self) {}
|
||||
pub fn notify_one(&self) {}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn notify_all(&self) {}
|
||||
pub fn notify_all(&self) {}
|
||||
|
||||
pub unsafe fn wait(&self, _mutex: &Mutex) {
|
||||
panic!("condvar wait not supported")
|
||||
|
@ -1,6 +1,6 @@
|
||||
mod condvar;
|
||||
mod mutex;
|
||||
mod rwlock;
|
||||
pub use condvar::{Condvar, MovableCondvar};
|
||||
pub use mutex::{MovableMutex, Mutex};
|
||||
pub use rwlock::MovableRwLock;
|
||||
pub use condvar::Condvar;
|
||||
pub use mutex::Mutex;
|
||||
pub use rwlock::RwLock;
|
||||
|
@ -5,8 +5,6 @@ pub struct Mutex {
|
||||
locked: Cell<bool>,
|
||||
}
|
||||
|
||||
pub type MovableMutex = Mutex;
|
||||
|
||||
unsafe impl Send for Mutex {}
|
||||
unsafe impl Sync for Mutex {} // no threads on this platform
|
||||
|
||||
@ -18,7 +16,7 @@ pub const fn new() -> Mutex {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn lock(&self) {
|
||||
pub fn lock(&self) {
|
||||
assert_eq!(self.locked.replace(true), false, "cannot recursively acquire mutex");
|
||||
}
|
||||
|
||||
@ -28,7 +26,7 @@ pub unsafe fn unlock(&self) {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn try_lock(&self) -> bool {
|
||||
pub fn try_lock(&self) -> bool {
|
||||
self.locked.replace(true) == false
|
||||
}
|
||||
}
|
||||
|
@ -5,8 +5,6 @@ pub struct RwLock {
|
||||
mode: Cell<isize>,
|
||||
}
|
||||
|
||||
pub type MovableRwLock = RwLock;
|
||||
|
||||
unsafe impl Send for RwLock {}
|
||||
unsafe impl Sync for RwLock {} // no threads on this platform
|
||||
|
||||
@ -18,7 +16,7 @@ pub const fn new() -> RwLock {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn read(&self) {
|
||||
pub fn read(&self) {
|
||||
let m = self.mode.get();
|
||||
if m >= 0 {
|
||||
self.mode.set(m + 1);
|
||||
@ -28,7 +26,7 @@ pub unsafe fn read(&self) {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn try_read(&self) -> bool {
|
||||
pub fn try_read(&self) -> bool {
|
||||
let m = self.mode.get();
|
||||
if m >= 0 {
|
||||
self.mode.set(m + 1);
|
||||
@ -39,14 +37,14 @@ pub unsafe fn try_read(&self) -> bool {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn write(&self) {
|
||||
pub fn write(&self) {
|
||||
if self.mode.replace(-1) != 0 {
|
||||
rtabort!("rwlock locked for reading")
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn try_write(&self) -> bool {
|
||||
pub fn try_write(&self) -> bool {
|
||||
if self.mode.get() == 0 {
|
||||
self.mode.set(-1);
|
||||
true
|
||||
|
@ -55,9 +55,9 @@ pub mod locks {
|
||||
mod futex_condvar;
|
||||
mod futex_mutex;
|
||||
mod futex_rwlock;
|
||||
pub(crate) use futex_condvar::{Condvar, MovableCondvar};
|
||||
pub(crate) use futex_mutex::{Mutex, MovableMutex};
|
||||
pub(crate) use futex_rwlock::MovableRwLock;
|
||||
pub(crate) use futex_condvar::Condvar;
|
||||
pub(crate) use futex_mutex::Mutex;
|
||||
pub(crate) use futex_rwlock::RwLock;
|
||||
}
|
||||
#[path = "atomics/futex.rs"]
|
||||
pub mod futex;
|
||||
|
@ -8,8 +8,6 @@ pub struct Condvar {
|
||||
inner: UnsafeCell<c::CONDITION_VARIABLE>,
|
||||
}
|
||||
|
||||
pub type MovableCondvar = Condvar;
|
||||
|
||||
unsafe impl Send for Condvar {}
|
||||
unsafe impl Sync for Condvar {}
|
||||
|
||||
@ -41,12 +39,12 @@ pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn notify_one(&self) {
|
||||
c::WakeConditionVariable(self.inner.get())
|
||||
pub fn notify_one(&self) {
|
||||
unsafe { c::WakeConditionVariable(self.inner.get()) }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn notify_all(&self) {
|
||||
c::WakeAllConditionVariable(self.inner.get())
|
||||
pub fn notify_all(&self) {
|
||||
unsafe { c::WakeAllConditionVariable(self.inner.get()) }
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
mod condvar;
|
||||
mod mutex;
|
||||
mod rwlock;
|
||||
pub use condvar::{Condvar, MovableCondvar};
|
||||
pub use mutex::{MovableMutex, Mutex};
|
||||
pub use rwlock::MovableRwLock;
|
||||
pub use condvar::Condvar;
|
||||
pub use mutex::Mutex;
|
||||
pub use rwlock::RwLock;
|
||||
|
@ -21,9 +21,6 @@ pub struct Mutex {
|
||||
srwlock: UnsafeCell<c::SRWLOCK>,
|
||||
}
|
||||
|
||||
// Windows SRW Locks are movable (while not borrowed).
|
||||
pub type MovableMutex = Mutex;
|
||||
|
||||
unsafe impl Send for Mutex {}
|
||||
unsafe impl Sync for Mutex {}
|
||||
|
||||
@ -39,13 +36,15 @@ pub const fn new() -> Mutex {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn lock(&self) {
|
||||
c::AcquireSRWLockExclusive(raw(self));
|
||||
pub fn lock(&self) {
|
||||
unsafe {
|
||||
c::AcquireSRWLockExclusive(raw(self));
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn try_lock(&self) -> bool {
|
||||
c::TryAcquireSRWLockExclusive(raw(self)) != 0
|
||||
pub fn try_lock(&self) -> bool {
|
||||
unsafe { c::TryAcquireSRWLockExclusive(raw(self)) != 0 }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -5,8 +5,6 @@ pub struct RwLock {
|
||||
inner: UnsafeCell<c::SRWLOCK>,
|
||||
}
|
||||
|
||||
pub type MovableRwLock = RwLock;
|
||||
|
||||
unsafe impl Send for RwLock {}
|
||||
unsafe impl Sync for RwLock {}
|
||||
|
||||
@ -16,20 +14,20 @@ pub const fn new() -> RwLock {
|
||||
RwLock { inner: UnsafeCell::new(c::SRWLOCK_INIT) }
|
||||
}
|
||||
#[inline]
|
||||
pub unsafe fn read(&self) {
|
||||
c::AcquireSRWLockShared(self.inner.get())
|
||||
pub fn read(&self) {
|
||||
unsafe { c::AcquireSRWLockShared(self.inner.get()) }
|
||||
}
|
||||
#[inline]
|
||||
pub unsafe fn try_read(&self) -> bool {
|
||||
c::TryAcquireSRWLockShared(self.inner.get()) != 0
|
||||
pub fn try_read(&self) -> bool {
|
||||
unsafe { c::TryAcquireSRWLockShared(self.inner.get()) != 0 }
|
||||
}
|
||||
#[inline]
|
||||
pub unsafe fn write(&self) {
|
||||
c::AcquireSRWLockExclusive(self.inner.get())
|
||||
pub fn write(&self) {
|
||||
unsafe { c::AcquireSRWLockExclusive(self.inner.get()) }
|
||||
}
|
||||
#[inline]
|
||||
pub unsafe fn try_write(&self) -> bool {
|
||||
c::TryAcquireSRWLockExclusive(self.inner.get()) != 0
|
||||
pub fn try_write(&self) -> bool {
|
||||
unsafe { c::TryAcquireSRWLockExclusive(self.inner.get()) != 0 }
|
||||
}
|
||||
#[inline]
|
||||
pub unsafe fn read_unlock(&self) {
|
||||
|
@ -1,57 +0,0 @@
|
||||
use crate::sys::locks as imp;
|
||||
use crate::sys_common::mutex::MovableMutex;
|
||||
use crate::time::Duration;
|
||||
|
||||
mod check;
|
||||
|
||||
type CondvarCheck = <imp::MovableMutex as check::CondvarCheck>::Check;
|
||||
|
||||
/// An OS-based condition variable.
|
||||
pub struct Condvar {
|
||||
inner: imp::MovableCondvar,
|
||||
check: CondvarCheck,
|
||||
}
|
||||
|
||||
impl Condvar {
|
||||
/// Creates a new condition variable for use.
|
||||
#[inline]
|
||||
#[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
|
||||
pub const fn new() -> Self {
|
||||
Self { inner: imp::MovableCondvar::new(), check: CondvarCheck::new() }
|
||||
}
|
||||
|
||||
/// Signals one waiter on this condition variable to wake up.
|
||||
#[inline]
|
||||
pub fn notify_one(&self) {
|
||||
unsafe { self.inner.notify_one() };
|
||||
}
|
||||
|
||||
/// Awakens all current waiters on this condition variable.
|
||||
#[inline]
|
||||
pub fn notify_all(&self) {
|
||||
unsafe { self.inner.notify_all() };
|
||||
}
|
||||
|
||||
/// Waits for a signal on the specified mutex.
|
||||
///
|
||||
/// Behavior is undefined if the mutex is not locked by the current thread.
|
||||
///
|
||||
/// May panic if used with more than one mutex.
|
||||
#[inline]
|
||||
pub unsafe fn wait(&self, mutex: &MovableMutex) {
|
||||
self.check.verify(mutex);
|
||||
self.inner.wait(mutex.raw())
|
||||
}
|
||||
|
||||
/// Waits for a signal on the specified mutex with a timeout duration
|
||||
/// specified by `dur` (a relative time into the future).
|
||||
///
|
||||
/// Behavior is undefined if the mutex is not locked by the current thread.
|
||||
///
|
||||
/// May panic if used with more than one mutex.
|
||||
#[inline]
|
||||
pub unsafe fn wait_timeout(&self, mutex: &MovableMutex, dur: Duration) -> bool {
|
||||
self.check.verify(mutex);
|
||||
self.inner.wait_timeout(mutex.raw(), dur)
|
||||
}
|
||||
}
|
@ -1,58 +0,0 @@
|
||||
use crate::ptr;
|
||||
use crate::sync::atomic::{AtomicPtr, Ordering};
|
||||
use crate::sys::locks as imp;
|
||||
use crate::sys_common::lazy_box::{LazyBox, LazyInit};
|
||||
use crate::sys_common::mutex::MovableMutex;
|
||||
|
||||
pub trait CondvarCheck {
|
||||
type Check;
|
||||
}
|
||||
|
||||
/// For boxed mutexes, a `Condvar` will check it's only ever used with the same
|
||||
/// mutex, based on its (stable) address.
|
||||
impl<T: LazyInit> CondvarCheck for LazyBox<T> {
|
||||
type Check = SameMutexCheck;
|
||||
}
|
||||
|
||||
pub struct SameMutexCheck {
|
||||
addr: AtomicPtr<()>,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl SameMutexCheck {
|
||||
pub const fn new() -> Self {
|
||||
Self { addr: AtomicPtr::new(ptr::null_mut()) }
|
||||
}
|
||||
pub fn verify(&self, mutex: &MovableMutex) {
|
||||
let addr = mutex.raw() as *const imp::Mutex as *const () as *mut _;
|
||||
// Relaxed is okay here because we never read through `self.addr`, and only use it to
|
||||
// compare addresses.
|
||||
match self.addr.compare_exchange(
|
||||
ptr::null_mut(),
|
||||
addr,
|
||||
Ordering::Relaxed,
|
||||
Ordering::Relaxed,
|
||||
) {
|
||||
Ok(_) => {} // Stored the address
|
||||
Err(n) if n == addr => {} // Lost a race to store the same address
|
||||
_ => panic!("attempted to use a condition variable with two mutexes"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Unboxed mutexes may move, so `Condvar` can not require its address to stay
|
||||
/// constant.
|
||||
impl CondvarCheck for imp::Mutex {
|
||||
type Check = NoCheck;
|
||||
}
|
||||
|
||||
pub struct NoCheck;
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl NoCheck {
|
||||
#[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
|
||||
pub const fn new() -> Self {
|
||||
Self
|
||||
}
|
||||
pub fn verify(&self, _: &MovableMutex) {}
|
||||
}
|
@ -21,16 +21,13 @@
|
||||
mod tests;
|
||||
|
||||
pub mod backtrace;
|
||||
pub mod condvar;
|
||||
pub mod fs;
|
||||
pub mod io;
|
||||
pub mod lazy_box;
|
||||
pub mod memchr;
|
||||
pub mod mutex;
|
||||
pub mod once;
|
||||
pub mod process;
|
||||
pub mod remutex;
|
||||
pub mod rwlock;
|
||||
pub mod thread;
|
||||
pub mod thread_info;
|
||||
pub mod thread_local_dtor;
|
||||
|
@ -1,50 +0,0 @@
|
||||
use crate::sys::locks as imp;
|
||||
|
||||
/// An OS-based mutual exclusion lock.
|
||||
///
|
||||
/// This mutex cleans up its resources in its `Drop` implementation, may safely
|
||||
/// be moved (when not borrowed), and does not cause UB when used reentrantly.
|
||||
///
|
||||
/// This mutex does not implement poisoning.
|
||||
///
|
||||
/// This is either a wrapper around `LazyBox<imp::Mutex>` or `imp::Mutex`,
|
||||
/// depending on the platform. It is boxed on platforms where `imp::Mutex` may
|
||||
/// not be moved.
|
||||
pub struct MovableMutex(imp::MovableMutex);
|
||||
|
||||
unsafe impl Sync for MovableMutex {}
|
||||
|
||||
impl MovableMutex {
|
||||
/// Creates a new mutex.
|
||||
#[inline]
|
||||
#[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
|
||||
pub const fn new() -> Self {
|
||||
Self(imp::MovableMutex::new())
|
||||
}
|
||||
|
||||
pub(super) fn raw(&self) -> &imp::Mutex {
|
||||
&self.0
|
||||
}
|
||||
|
||||
/// Locks the mutex blocking the current thread until it is available.
|
||||
#[inline]
|
||||
pub fn raw_lock(&self) {
|
||||
unsafe { self.0.lock() }
|
||||
}
|
||||
|
||||
/// Attempts to lock the mutex without blocking, returning whether it was
|
||||
/// successfully acquired or not.
|
||||
#[inline]
|
||||
pub fn try_lock(&self) -> bool {
|
||||
unsafe { self.0.try_lock() }
|
||||
}
|
||||
|
||||
/// Unlocks the mutex.
|
||||
///
|
||||
/// Behavior is undefined if the current thread does not actually hold the
|
||||
/// mutex.
|
||||
#[inline]
|
||||
pub unsafe fn raw_unlock(&self) {
|
||||
self.0.unlock()
|
||||
}
|
||||
}
|
@ -1,11 +1,11 @@
|
||||
#[cfg(all(test, not(target_os = "emscripten")))]
|
||||
mod tests;
|
||||
|
||||
use super::mutex as sys;
|
||||
use crate::cell::UnsafeCell;
|
||||
use crate::ops::Deref;
|
||||
use crate::panic::{RefUnwindSafe, UnwindSafe};
|
||||
use crate::sync::atomic::{AtomicUsize, Ordering::Relaxed};
|
||||
use crate::sys::locks as sys;
|
||||
|
||||
/// A re-entrant mutual exclusion
|
||||
///
|
||||
@ -39,7 +39,7 @@
|
||||
/// synchronization is left to the mutex, making relaxed memory ordering for
|
||||
/// the `owner` field fine in all cases.
|
||||
pub struct ReentrantMutex<T> {
|
||||
mutex: sys::MovableMutex,
|
||||
mutex: sys::Mutex,
|
||||
owner: AtomicUsize,
|
||||
lock_count: UnsafeCell<u32>,
|
||||
data: T,
|
||||
@ -74,7 +74,7 @@ impl<T> ReentrantMutex<T> {
|
||||
/// Creates a new reentrant mutex in an unlocked state.
|
||||
pub const fn new(t: T) -> ReentrantMutex<T> {
|
||||
ReentrantMutex {
|
||||
mutex: sys::MovableMutex::new(),
|
||||
mutex: sys::Mutex::new(),
|
||||
owner: AtomicUsize::new(0),
|
||||
lock_count: UnsafeCell::new(0),
|
||||
data: t,
|
||||
@ -100,7 +100,7 @@ pub fn lock(&self) -> ReentrantMutexGuard<'_, T> {
|
||||
if self.owner.load(Relaxed) == this_thread {
|
||||
self.increment_lock_count();
|
||||
} else {
|
||||
self.mutex.raw_lock();
|
||||
self.mutex.lock();
|
||||
self.owner.store(this_thread, Relaxed);
|
||||
debug_assert_eq!(*self.lock_count.get(), 0);
|
||||
*self.lock_count.get() = 1;
|
||||
@ -162,7 +162,7 @@ fn drop(&mut self) {
|
||||
*self.lock.lock_count.get() -= 1;
|
||||
if *self.lock.lock_count.get() == 0 {
|
||||
self.lock.owner.store(0, Relaxed);
|
||||
self.lock.mutex.raw_unlock();
|
||||
self.lock.mutex.unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,71 +0,0 @@
|
||||
use crate::sys::locks as imp;
|
||||
|
||||
/// An OS-based reader-writer lock.
|
||||
///
|
||||
/// This rwlock cleans up its resources in its `Drop` implementation and may
|
||||
/// safely be moved (when not borrowed).
|
||||
///
|
||||
/// This rwlock does not implement poisoning.
|
||||
///
|
||||
/// This is either a wrapper around `LazyBox<imp::RwLock>` or `imp::RwLock`,
|
||||
/// depending on the platform. It is boxed on platforms where `imp::RwLock` may
|
||||
/// not be moved.
|
||||
pub struct MovableRwLock(imp::MovableRwLock);
|
||||
|
||||
impl MovableRwLock {
|
||||
/// Creates a new reader-writer lock for use.
|
||||
#[inline]
|
||||
#[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
|
||||
pub const fn new() -> Self {
|
||||
Self(imp::MovableRwLock::new())
|
||||
}
|
||||
|
||||
/// Acquires shared access to the underlying lock, blocking the current
|
||||
/// thread to do so.
|
||||
#[inline]
|
||||
pub fn read(&self) {
|
||||
unsafe { self.0.read() }
|
||||
}
|
||||
|
||||
/// Attempts to acquire shared access to this lock, returning whether it
|
||||
/// succeeded or not.
|
||||
///
|
||||
/// This function does not block the current thread.
|
||||
#[inline]
|
||||
pub fn try_read(&self) -> bool {
|
||||
unsafe { self.0.try_read() }
|
||||
}
|
||||
|
||||
/// Acquires write access to the underlying lock, blocking the current thread
|
||||
/// to do so.
|
||||
#[inline]
|
||||
pub fn write(&self) {
|
||||
unsafe { self.0.write() }
|
||||
}
|
||||
|
||||
/// Attempts to acquire exclusive access to this lock, returning whether it
|
||||
/// succeeded or not.
|
||||
///
|
||||
/// This function does not block the current thread.
|
||||
#[inline]
|
||||
pub fn try_write(&self) -> bool {
|
||||
unsafe { self.0.try_write() }
|
||||
}
|
||||
|
||||
/// Unlocks previously acquired shared access to this lock.
|
||||
///
|
||||
/// Behavior is undefined if the current thread does not have shared access.
|
||||
#[inline]
|
||||
pub unsafe fn read_unlock(&self) {
|
||||
self.0.read_unlock()
|
||||
}
|
||||
|
||||
/// Unlocks previously acquired exclusive access to this lock.
|
||||
///
|
||||
/// Behavior is undefined if the current thread does not currently have
|
||||
/// exclusive access.
|
||||
#[inline]
|
||||
pub unsafe fn write_unlock(&self) {
|
||||
self.0.write_unlock()
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user