Auto merge of #3973 - RalfJung:os-unfair-lock, r=RalfJung

ensure that a macOS os_unfair_lock that is moved while being held is not implicitly unlocked

Fixes https://github.com/rust-lang/miri/issues/3859

We mark an os_unfair_lock that is moved while being held as "poisoned", which means it is not considered forever locked. That's not quite what the real implementation does, but allowing arbitrary moves-while-locked would likely expose a ton of implementation details, so hopefully this is good enough.
This commit is contained in:
bors 2024-10-14 20:47:13 +00:00
commit b7c06b40e1
10 changed files with 297 additions and 122 deletions

View File

@ -193,75 +193,109 @@ pub fn get_sync<T: 'static>(&self, offset: Size) -> Option<&T> {
/// If `init` is set to this, we consider the primitive initialized. /// If `init` is set to this, we consider the primitive initialized.
pub const LAZY_INIT_COOKIE: u32 = 0xcafe_affe; pub const LAZY_INIT_COOKIE: u32 = 0xcafe_affe;
/// Helper for lazily initialized `alloc_extra.sync` data:
/// this forces an immediate init.
pub fn lazy_sync_init<'tcx, T: 'static + Copy>(
ecx: &mut MiriInterpCx<'tcx>,
primitive: &MPlaceTy<'tcx>,
init_offset: Size,
data: T,
) -> InterpResult<'tcx> {
let (alloc, offset, _) = ecx.ptr_get_alloc_id(primitive.ptr(), 0)?;
let (alloc_extra, _machine) = ecx.get_alloc_extra_mut(alloc)?;
alloc_extra.sync.insert(offset, Box::new(data));
// Mark this as "initialized".
let init_field = primitive.offset(init_offset, ecx.machine.layouts.u32, ecx)?;
ecx.write_scalar_atomic(
Scalar::from_u32(LAZY_INIT_COOKIE),
&init_field,
AtomicWriteOrd::Relaxed,
)?;
interp_ok(())
}
/// Helper for lazily initialized `alloc_extra.sync` data:
/// Checks if the primitive is initialized, and return its associated data if so.
/// Otherwise, calls `new_data` to initialize the primitive.
pub fn lazy_sync_get_data<'tcx, T: 'static + Copy>(
ecx: &mut MiriInterpCx<'tcx>,
primitive: &MPlaceTy<'tcx>,
init_offset: Size,
name: &str,
new_data: impl FnOnce(&mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, T>,
) -> InterpResult<'tcx, T> {
// Check if this is already initialized. Needs to be atomic because we can race with another
// thread initializing. Needs to be an RMW operation to ensure we read the *latest* value.
// So we just try to replace MUTEX_INIT_COOKIE with itself.
let init_cookie = Scalar::from_u32(LAZY_INIT_COOKIE);
let init_field = primitive.offset(init_offset, ecx.machine.layouts.u32, ecx)?;
let (_init, success) = ecx
.atomic_compare_exchange_scalar(
&init_field,
&ImmTy::from_scalar(init_cookie, ecx.machine.layouts.u32),
init_cookie,
AtomicRwOrd::Relaxed,
AtomicReadOrd::Relaxed,
/* can_fail_spuriously */ false,
)?
.to_scalar_pair();
if success.to_bool()? {
// If it is initialized, it must be found in the "sync primitive" table,
// or else it has been moved illegally.
let (alloc, offset, _) = ecx.ptr_get_alloc_id(primitive.ptr(), 0)?;
let alloc_extra = ecx.get_alloc_extra(alloc)?;
let data = alloc_extra
.get_sync::<T>(offset)
.ok_or_else(|| err_ub_format!("`{name}` can't be moved after first use"))?;
interp_ok(*data)
} else {
let data = new_data(ecx)?;
lazy_sync_init(ecx, primitive, init_offset, data)?;
interp_ok(data)
}
}
// Public interface to synchronization primitives. Please note that in most // Public interface to synchronization primitives. Please note that in most
// cases, the function calls are infallible and it is the client's (shim // cases, the function calls are infallible and it is the client's (shim
// implementation's) responsibility to detect and deal with erroneous // implementation's) responsibility to detect and deal with erroneous
// situations. // situations.
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {} impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
/// Helper for lazily initialized `alloc_extra.sync` data:
/// this forces an immediate init.
fn lazy_sync_init<T: 'static + Copy>(
&mut self,
primitive: &MPlaceTy<'tcx>,
init_offset: Size,
data: T,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let (alloc, offset, _) = this.ptr_get_alloc_id(primitive.ptr(), 0)?;
let (alloc_extra, _machine) = this.get_alloc_extra_mut(alloc)?;
alloc_extra.sync.insert(offset, Box::new(data));
// Mark this as "initialized".
let init_field = primitive.offset(init_offset, this.machine.layouts.u32, this)?;
this.write_scalar_atomic(
Scalar::from_u32(LAZY_INIT_COOKIE),
&init_field,
AtomicWriteOrd::Relaxed,
)?;
interp_ok(())
}
/// Helper for lazily initialized `alloc_extra.sync` data:
/// Checks if the primitive is initialized:
/// - If yes, fetches the data from `alloc_extra.sync`, or calls `missing_data` if that fails
/// and stores that in `alloc_extra.sync`.
/// - Otherwise, calls `new_data` to initialize the primitive.
fn lazy_sync_get_data<T: 'static + Copy>(
&mut self,
primitive: &MPlaceTy<'tcx>,
init_offset: Size,
missing_data: impl FnOnce() -> InterpResult<'tcx, T>,
new_data: impl FnOnce(&mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, T>,
) -> InterpResult<'tcx, T> {
let this = self.eval_context_mut();
// Check if this is already initialized. Needs to be atomic because we can race with another
// thread initializing. Needs to be an RMW operation to ensure we read the *latest* value.
// So we just try to replace MUTEX_INIT_COOKIE with itself.
let init_cookie = Scalar::from_u32(LAZY_INIT_COOKIE);
let init_field = primitive.offset(init_offset, this.machine.layouts.u32, this)?;
let (_init, success) = this
.atomic_compare_exchange_scalar(
&init_field,
&ImmTy::from_scalar(init_cookie, this.machine.layouts.u32),
init_cookie,
AtomicRwOrd::Relaxed,
AtomicReadOrd::Relaxed,
/* can_fail_spuriously */ false,
)?
.to_scalar_pair();
if success.to_bool()? {
// If it is initialized, it must be found in the "sync primitive" table,
// or else it has been moved illegally.
let (alloc, offset, _) = this.ptr_get_alloc_id(primitive.ptr(), 0)?;
let (alloc_extra, _machine) = this.get_alloc_extra_mut(alloc)?;
if let Some(data) = alloc_extra.get_sync::<T>(offset) {
interp_ok(*data)
} else {
let data = missing_data()?;
alloc_extra.sync.insert(offset, Box::new(data));
interp_ok(data)
}
} else {
let data = new_data(this)?;
this.lazy_sync_init(primitive, init_offset, data)?;
interp_ok(data)
}
}
/// Get the synchronization primitive associated with the given pointer,
/// or initialize a new one.
fn get_sync_or_init<'a, T: 'static>(
&'a mut self,
ptr: Pointer,
new: impl FnOnce(&'a mut MiriMachine<'tcx>) -> InterpResult<'tcx, T>,
) -> InterpResult<'tcx, &'a T>
where
'tcx: 'a,
{
let this = self.eval_context_mut();
// Ensure there is memory behind this pointer, so that this allocation
// is truly the only place where the data could be stored.
this.check_ptr_access(ptr, Size::from_bytes(1), CheckInAllocMsg::InboundsTest)?;
let (alloc, offset, _) = this.ptr_get_alloc_id(ptr, 0)?;
let (alloc_extra, machine) = this.get_alloc_extra_mut(alloc)?;
// Due to borrow checker reasons, we have to do the lookup twice.
if alloc_extra.get_sync::<T>(offset).is_none() {
let new = new(machine)?;
alloc_extra.sync.insert(offset, Box::new(new));
}
interp_ok(alloc_extra.get_sync::<T>(offset).unwrap())
}
#[inline] #[inline]
/// Get the id of the thread that currently owns this lock. /// Get the id of the thread that currently owns this lock.
fn mutex_get_owner(&mut self, id: MutexId) -> ThreadId { fn mutex_get_owner(&mut self, id: MutexId) -> ThreadId {

View File

@ -59,7 +59,7 @@ macro_rules! callback {
@unblock = |$this:ident| $unblock:block @unblock = |$this:ident| $unblock:block
) => { ) => {
callback!( callback!(
@capture<$tcx, $($lft),*> { $($name: $type),+ } @capture<$tcx, $($lft),*> { $($name: $type),* }
@unblock = |$this| $unblock @unblock = |$this| $unblock
@timeout = |_this| { @timeout = |_this| {
unreachable!( unreachable!(

View File

@ -10,28 +10,42 @@
//! and we do not detect copying of the lock, but macOS doesn't guarantee anything //! and we do not detect copying of the lock, but macOS doesn't guarantee anything
//! in that case either. //! in that case either.
use rustc_target::abi::Size;
use crate::*; use crate::*;
struct MacOsUnfairLock { #[derive(Copy, Clone)]
id: MutexId, enum MacOsUnfairLock {
Poisoned,
Active { id: MutexId },
} }
impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {} impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> { trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn os_unfair_lock_getid(&mut self, lock_ptr: &OpTy<'tcx>) -> InterpResult<'tcx, MutexId> { fn os_unfair_lock_get_data(
&mut self,
lock_ptr: &OpTy<'tcx>,
) -> InterpResult<'tcx, MacOsUnfairLock> {
let this = self.eval_context_mut(); let this = self.eval_context_mut();
let lock = this.deref_pointer(lock_ptr)?; let lock = this.deref_pointer(lock_ptr)?;
// We store the mutex ID in the `sync` metadata. This means that when the lock is moved, this.lazy_sync_get_data(
// that's just implicitly creating a new lock at the new location. &lock,
let (alloc, offset, _) = this.ptr_get_alloc_id(lock.ptr(), 0)?; Size::ZERO, // offset for init tracking
let (alloc_extra, machine) = this.get_alloc_extra_mut(alloc)?; || {
if let Some(data) = alloc_extra.get_sync::<MacOsUnfairLock>(offset) { // If we get here, due to how we reset things to zero in `os_unfair_lock_unlock`,
interp_ok(data.id) // this means the lock was moved while locked. This can happen with a `std` lock,
} else { // but then any future attempt to unlock will just deadlock. In practice, terrible
let id = machine.sync.mutex_create(); // things can probably happen if you swap two locked locks, since they'd wake up
alloc_extra.sync.insert(offset, Box::new(MacOsUnfairLock { id })); // from the wrong queue... we just won't catch all UB of this library API then (we
interp_ok(id) // would need to store some unique identifer in-memory for this, instead of a static
} // LAZY_INIT_COOKIE). This can't be hit via `std::sync::Mutex`.
interp_ok(MacOsUnfairLock::Poisoned)
},
|ecx| {
let id = ecx.machine.sync.mutex_create();
interp_ok(MacOsUnfairLock::Active { id })
},
)
} }
} }
@ -40,7 +54,21 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn os_unfair_lock_lock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> { fn os_unfair_lock_lock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
let this = self.eval_context_mut(); let this = self.eval_context_mut();
let id = this.os_unfair_lock_getid(lock_op)?; let MacOsUnfairLock::Active { id } = this.os_unfair_lock_get_data(lock_op)? else {
// Trying to get a poisoned lock. Just block forever...
this.block_thread(
BlockReason::Sleep,
None,
callback!(
@capture<'tcx> {}
@unblock = |_this| {
panic!("we shouldn't wake up ever")
}
),
);
return interp_ok(());
};
if this.mutex_is_locked(id) { if this.mutex_is_locked(id) {
if this.mutex_get_owner(id) == this.active_thread() { if this.mutex_get_owner(id) == this.active_thread() {
// Matching the current macOS implementation: abort on reentrant locking. // Matching the current macOS implementation: abort on reentrant locking.
@ -64,7 +92,12 @@ fn os_unfair_lock_trylock(
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
let this = self.eval_context_mut(); let this = self.eval_context_mut();
let id = this.os_unfair_lock_getid(lock_op)?; let MacOsUnfairLock::Active { id } = this.os_unfair_lock_get_data(lock_op)? else {
// Trying to get a poisoned lock. That never works.
this.write_scalar(Scalar::from_bool(false), dest)?;
return interp_ok(());
};
if this.mutex_is_locked(id) { if this.mutex_is_locked(id) {
// Contrary to the blocking lock function, this does not check for // Contrary to the blocking lock function, this does not check for
// reentrancy. // reentrancy.
@ -80,7 +113,14 @@ fn os_unfair_lock_trylock(
fn os_unfair_lock_unlock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> { fn os_unfair_lock_unlock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
let this = self.eval_context_mut(); let this = self.eval_context_mut();
let id = this.os_unfair_lock_getid(lock_op)?; let MacOsUnfairLock::Active { id } = this.os_unfair_lock_get_data(lock_op)? else {
// The lock is poisoned, who knows who owns it... we'll pretend: someone else.
throw_machine_stop!(TerminationInfo::Abort(
"attempted to unlock an os_unfair_lock not owned by the current thread".to_owned()
));
};
// Now, unlock.
if this.mutex_unlock(id)?.is_none() { if this.mutex_unlock(id)?.is_none() {
// Matching the current macOS implementation: abort. // Matching the current macOS implementation: abort.
throw_machine_stop!(TerminationInfo::Abort( throw_machine_stop!(TerminationInfo::Abort(
@ -88,32 +128,56 @@ fn os_unfair_lock_unlock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx>
)); ));
} }
// If the lock is not locked by anyone now, it went quer.
// Reset to zero so that it can be moved and initialized again for the next phase.
if !this.mutex_is_locked(id) {
let lock_place = this.deref_pointer_as(lock_op, this.machine.layouts.u32)?;
this.write_scalar_atomic(Scalar::from_u32(0), &lock_place, AtomicWriteOrd::Relaxed)?;
}
interp_ok(()) interp_ok(())
} }
fn os_unfair_lock_assert_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> { fn os_unfair_lock_assert_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
let this = self.eval_context_mut(); let this = self.eval_context_mut();
let id = this.os_unfair_lock_getid(lock_op)?; let MacOsUnfairLock::Active { id } = this.os_unfair_lock_get_data(lock_op)? else {
// The lock is poisoned, who knows who owns it... we'll pretend: someone else.
throw_machine_stop!(TerminationInfo::Abort(
"called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread".to_owned()
));
};
if !this.mutex_is_locked(id) || this.mutex_get_owner(id) != this.active_thread() { if !this.mutex_is_locked(id) || this.mutex_get_owner(id) != this.active_thread() {
throw_machine_stop!(TerminationInfo::Abort( throw_machine_stop!(TerminationInfo::Abort(
"called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread".to_owned() "called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread".to_owned()
)); ));
} }
// The lock is definitely not quiet since we are the owner.
interp_ok(()) interp_ok(())
} }
fn os_unfair_lock_assert_not_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> { fn os_unfair_lock_assert_not_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
let this = self.eval_context_mut(); let this = self.eval_context_mut();
let id = this.os_unfair_lock_getid(lock_op)?; let MacOsUnfairLock::Active { id } = this.os_unfair_lock_get_data(lock_op)? else {
// The lock is poisoned, who knows who owns it... we'll pretend: someone else.
return interp_ok(());
};
if this.mutex_is_locked(id) && this.mutex_get_owner(id) == this.active_thread() { if this.mutex_is_locked(id) && this.mutex_get_owner(id) == this.active_thread() {
throw_machine_stop!(TerminationInfo::Abort( throw_machine_stop!(TerminationInfo::Abort(
"called os_unfair_lock_assert_not_owner on an os_unfair_lock owned by the current thread".to_owned() "called os_unfair_lock_assert_not_owner on an os_unfair_lock owned by the current thread".to_owned()
)); ));
} }
// If the lock is not locked by anyone now, it went quer.
// Reset to zero so that it can be moved and initialized again for the next phase.
if !this.mutex_is_locked(id) {
let lock_place = this.deref_pointer_as(lock_op, this.machine.layouts.u32)?;
this.write_scalar_atomic(Scalar::from_u32(0), &lock_place, AtomicWriteOrd::Relaxed)?;
}
interp_ok(()) interp_ok(())
} }
} }

View File

@ -2,7 +2,7 @@
use rustc_target::abi::Size; use rustc_target::abi::Size;
use crate::concurrency::sync::{LAZY_INIT_COOKIE, lazy_sync_get_data, lazy_sync_init}; use crate::concurrency::sync::LAZY_INIT_COOKIE;
use crate::*; use crate::*;
/// Do a bytewise comparison of the two places, using relaxed atomic reads. This is used to check if /// Do a bytewise comparison of the two places, using relaxed atomic reads. This is used to check if
@ -176,7 +176,7 @@ fn mutex_create<'tcx>(
let mutex = ecx.deref_pointer(mutex_ptr)?; let mutex = ecx.deref_pointer(mutex_ptr)?;
let id = ecx.machine.sync.mutex_create(); let id = ecx.machine.sync.mutex_create();
let data = PthreadMutex { id, kind }; let data = PthreadMutex { id, kind };
lazy_sync_init(ecx, &mutex, mutex_init_offset(ecx)?, data)?; ecx.lazy_sync_init(&mutex, mutex_init_offset(ecx)?, data)?;
interp_ok(data) interp_ok(data)
} }
@ -189,11 +189,16 @@ fn mutex_get_data<'tcx, 'a>(
mutex_ptr: &OpTy<'tcx>, mutex_ptr: &OpTy<'tcx>,
) -> InterpResult<'tcx, PthreadMutex> { ) -> InterpResult<'tcx, PthreadMutex> {
let mutex = ecx.deref_pointer(mutex_ptr)?; let mutex = ecx.deref_pointer(mutex_ptr)?;
lazy_sync_get_data(ecx, &mutex, mutex_init_offset(ecx)?, "pthread_mutex_t", |ecx| { ecx.lazy_sync_get_data(
let kind = mutex_kind_from_static_initializer(ecx, &mutex)?; &mutex,
let id = ecx.machine.sync.mutex_create(); mutex_init_offset(ecx)?,
interp_ok(PthreadMutex { id, kind }) || throw_ub_format!("`pthread_mutex_t` can't be moved after first use"),
}) |ecx| {
let kind = mutex_kind_from_static_initializer(ecx, &mutex)?;
let id = ecx.machine.sync.mutex_create();
interp_ok(PthreadMutex { id, kind })
},
)
} }
/// Returns the kind of a static initializer. /// Returns the kind of a static initializer.
@ -261,17 +266,22 @@ fn rwlock_get_data<'tcx>(
rwlock_ptr: &OpTy<'tcx>, rwlock_ptr: &OpTy<'tcx>,
) -> InterpResult<'tcx, PthreadRwLock> { ) -> InterpResult<'tcx, PthreadRwLock> {
let rwlock = ecx.deref_pointer(rwlock_ptr)?; let rwlock = ecx.deref_pointer(rwlock_ptr)?;
lazy_sync_get_data(ecx, &rwlock, rwlock_init_offset(ecx)?, "pthread_rwlock_t", |ecx| { ecx.lazy_sync_get_data(
if !bytewise_equal_atomic_relaxed( &rwlock,
ecx, rwlock_init_offset(ecx)?,
&rwlock, || throw_ub_format!("`pthread_rwlock_t` can't be moved after first use"),
&ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]), |ecx| {
)? { if !bytewise_equal_atomic_relaxed(
throw_unsup_format!("unsupported static initializer used for `pthread_rwlock_t`"); ecx,
} &rwlock,
let id = ecx.machine.sync.rwlock_create(); &ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]),
interp_ok(PthreadRwLock { id }) )? {
}) throw_unsup_format!("unsupported static initializer used for `pthread_rwlock_t`");
}
let id = ecx.machine.sync.rwlock_create();
interp_ok(PthreadRwLock { id })
},
)
} }
// # pthread_condattr_t // # pthread_condattr_t
@ -377,7 +387,7 @@ fn cond_create<'tcx>(
let cond = ecx.deref_pointer(cond_ptr)?; let cond = ecx.deref_pointer(cond_ptr)?;
let id = ecx.machine.sync.condvar_create(); let id = ecx.machine.sync.condvar_create();
let data = PthreadCondvar { id, clock }; let data = PthreadCondvar { id, clock };
lazy_sync_init(ecx, &cond, cond_init_offset(ecx)?, data)?; ecx.lazy_sync_init(&cond, cond_init_offset(ecx)?, data)?;
interp_ok(data) interp_ok(data)
} }
@ -386,18 +396,23 @@ fn cond_get_data<'tcx>(
cond_ptr: &OpTy<'tcx>, cond_ptr: &OpTy<'tcx>,
) -> InterpResult<'tcx, PthreadCondvar> { ) -> InterpResult<'tcx, PthreadCondvar> {
let cond = ecx.deref_pointer(cond_ptr)?; let cond = ecx.deref_pointer(cond_ptr)?;
lazy_sync_get_data(ecx, &cond, cond_init_offset(ecx)?, "pthread_cond_t", |ecx| { ecx.lazy_sync_get_data(
if !bytewise_equal_atomic_relaxed( &cond,
ecx, cond_init_offset(ecx)?,
&cond, || throw_ub_format!("`pthread_cond_t` can't be moved after first use"),
&ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]), |ecx| {
)? { if !bytewise_equal_atomic_relaxed(
throw_unsup_format!("unsupported static initializer used for `pthread_cond_t`"); ecx,
} &cond,
// This used the static initializer. The clock there is always CLOCK_REALTIME. &ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]),
let id = ecx.machine.sync.condvar_create(); )? {
interp_ok(PthreadCondvar { id, clock: ClockId::Realtime }) throw_unsup_format!("unsupported static initializer used for `pthread_cond_t`");
}) }
// This used the static initializer. The clock there is always CLOCK_REALTIME.
let id = ecx.machine.sync.condvar_create();
interp_ok(PthreadCondvar { id, clock: ClockId::Realtime })
},
)
} }
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {} impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}

View File

@ -3,7 +3,6 @@
use rustc_target::abi::Size; use rustc_target::abi::Size;
use crate::concurrency::init_once::InitOnceStatus; use crate::concurrency::init_once::InitOnceStatus;
use crate::concurrency::sync::lazy_sync_get_data;
use crate::*; use crate::*;
#[derive(Copy, Clone)] #[derive(Copy, Clone)]
@ -25,11 +24,16 @@ fn init_once_get_data(
let init_once = this.deref_pointer(init_once_ptr)?; let init_once = this.deref_pointer(init_once_ptr)?;
let init_offset = Size::ZERO; let init_offset = Size::ZERO;
lazy_sync_get_data(this, &init_once, init_offset, "INIT_ONCE", |this| { this.lazy_sync_get_data(
// TODO: check that this is still all-zero. &init_once,
let id = this.machine.sync.init_once_create(); init_offset,
interp_ok(WindowsInitOnce { id }) || throw_ub_format!("`INIT_ONCE` can't be moved after first use"),
}) |this| {
// TODO: check that this is still all-zero.
let id = this.machine.sync.init_once_create();
interp_ok(WindowsInitOnce { id })
},
)
} }
/// Returns `true` if we were succssful, `false` if we would block. /// Returns `true` if we were succssful, `false` if we would block.

View File

@ -0,0 +1,13 @@
//@only-target: darwin
use std::cell::UnsafeCell;
fn main() {
let lock = UnsafeCell::new(libc::OS_UNFAIR_LOCK_INIT);
unsafe { libc::os_unfair_lock_lock(lock.get()) };
let lock = lock;
// This needs to either error or deadlock.
unsafe { libc::os_unfair_lock_lock(lock.get()) };
//~^ error: deadlock
}

View File

@ -0,0 +1,13 @@
error: deadlock: the evaluated program deadlocked
--> tests/fail-dep/concurrency/apple_os_unfair_lock_move_deadlock.rs:LL:CC
|
LL | unsafe { libc::os_unfair_lock_lock(lock.get()) };
| ^ the evaluated program deadlocked
|
= note: BACKTRACE:
= note: inside `main` at tests/fail-dep/concurrency/apple_os_unfair_lock_move_deadlock.rs:LL:CC
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View File

@ -0,0 +1,16 @@
//@error-in-other-file: deadlock
//@normalize-stderr-test: "src/sys/.*\.rs" -> "$$FILE"
//@normalize-stderr-test: "LL \| .*" -> "LL | $$CODE"
//@normalize-stderr-test: "\| +\^+" -> "| ^"
//@normalize-stderr-test: "\n *= note:.*" -> ""
use std::mem;
use std::sync::Mutex;
fn main() {
let m = Mutex::new(0);
mem::forget(m.lock());
// Move the lock while it is "held" (really: leaked)
let m2 = m;
// Now try to acquire the lock again.
let _guard = m2.lock();
}

View File

@ -0,0 +1,16 @@
error: deadlock: the evaluated program deadlocked
--> RUSTLIB/std/$FILE:LL:CC
|
LL | $CODE
| ^ the evaluated program deadlocked
|
note: inside `main`
--> tests/fail/concurrency/mutex-leak-move-deadlock.rs:LL:CC
|
LL | $CODE
| ^
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View File

@ -16,8 +16,8 @@ fn main() {
// `os_unfair_lock`s can be moved and leaked. // `os_unfair_lock`s can be moved and leaked.
// In the real implementation, even moving it while locked is possible // In the real implementation, even moving it while locked is possible
// (and "forks" the lock, i.e. old and new location have independent wait queues); // (and "forks" the lock, i.e. old and new location have independent wait queues).
// Miri behavior differs here and anyway none of this is documented. // We only test the somewhat sane case of moving while unlocked that `std` plans to rely on.
let lock = lock; let lock = lock;
let locked = unsafe { libc::os_unfair_lock_trylock(lock.get()) }; let locked = unsafe { libc::os_unfair_lock_trylock(lock.get()) };
assert!(locked); assert!(locked);