Auto merge of #1441 - RalfJung:sync-cleanup, r=RalfJung
Synchronization primitive cleanup Make some methods infallible, move a bit more work into the platform-independent `sync.rs`, and fix a bug in rwlock unlocking.
This commit is contained in:
commit
4fd0aa316e
@ -112,7 +112,7 @@ There is no way to list all the infinite things Miri cannot do, but the
|
||||
interpreter will explicitly tell you when it finds something unsupported:
|
||||
|
||||
```
|
||||
error: unsupported operation: Miri does not support threading
|
||||
error: unsupported operation: can't call foreign function: bind
|
||||
...
|
||||
= help: this is likely not a bug in the program; it indicates that the program \
|
||||
performed an operation that the interpreter does not support
|
||||
|
@ -254,14 +254,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
"pthread_getspecific" => {
|
||||
let &[key] = check_arg_count(args)?;
|
||||
let key = this.force_bits(this.read_scalar(key)?.not_undef()?, key.layout.size)?;
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
let ptr = this.machine.tls.load_tls(key, active_thread, this)?;
|
||||
this.write_scalar(ptr, dest)?;
|
||||
}
|
||||
"pthread_setspecific" => {
|
||||
let &[key, new_ptr] = check_arg_count(args)?;
|
||||
let key = this.force_bits(this.read_scalar(key)?.not_undef()?, key.layout.size)?;
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
let new_ptr = this.read_scalar(new_ptr)?.not_undef()?;
|
||||
this.machine.tls.store_tls(key, active_thread, this.test_null(new_ptr)?)?;
|
||||
|
||||
|
@ -98,7 +98,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
let dtor = this.read_scalar(dtor)?.not_undef()?;
|
||||
let dtor = this.memory.get_fn(dtor)?.as_instance()?;
|
||||
let data = this.read_scalar(data)?.not_undef()?;
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
this.machine.tls.set_macos_thread_dtor(active_thread, dtor, data)?;
|
||||
}
|
||||
|
||||
|
@ -163,14 +163,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
"TlsGetValue" => {
|
||||
let &[key] = check_arg_count(args)?;
|
||||
let key = u128::from(this.read_scalar(key)?.to_u32()?);
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
let ptr = this.machine.tls.load_tls(key, active_thread, this)?;
|
||||
this.write_scalar(ptr, dest)?;
|
||||
}
|
||||
"TlsSetValue" => {
|
||||
let &[key, new_ptr] = check_arg_count(args)?;
|
||||
let key = u128::from(this.read_scalar(key)?.to_u32()?);
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
let new_ptr = this.read_scalar(new_ptr)?.not_undef()?;
|
||||
this.machine.tls.store_tls(key, active_thread, this.test_null(new_ptr)?)?;
|
||||
|
||||
@ -291,7 +291,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
if this.frame().instance.to_string().starts_with("std::sys::windows::") => {
|
||||
#[allow(non_snake_case)]
|
||||
let &[_lpCriticalSection] = check_arg_count(args)?;
|
||||
assert_eq!(this.get_total_thread_count()?, 1, "concurrency on Windows not supported");
|
||||
assert_eq!(this.get_total_thread_count(), 1, "concurrency on Windows not supported");
|
||||
// Nothing to do, not even a return value.
|
||||
// (Windows locks are reentrant, and we have only 1 thread,
|
||||
// so not doing any futher checks here is at least not incorrect.)
|
||||
@ -300,7 +300,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
if this.frame().instance.to_string().starts_with("std::sys::windows::") => {
|
||||
#[allow(non_snake_case)]
|
||||
let &[_lpCriticalSection] = check_arg_count(args)?;
|
||||
assert_eq!(this.get_total_thread_count()?, 1, "concurrency on Windows not supported");
|
||||
assert_eq!(this.get_total_thread_count(), 1, "concurrency on Windows not supported");
|
||||
// There is only one thread, so this always succeeds and returns TRUE
|
||||
this.write_scalar(Scalar::from_i32(1), dest)?;
|
||||
}
|
||||
|
@ -1,5 +1,6 @@
|
||||
use std::convert::TryInto;
|
||||
use std::time::{Duration, SystemTime};
|
||||
use std::ops::Not;
|
||||
|
||||
use rustc_middle::ty::{layout::TyAndLayout, TyKind, TypeAndMut};
|
||||
use rustc_target::abi::{LayoutOf, Size};
|
||||
@ -284,15 +285,16 @@ fn reacquire_cond_mutex<'mir, 'tcx: 'mir>(
|
||||
thread: ThreadId,
|
||||
mutex: MutexId,
|
||||
) -> InterpResult<'tcx> {
|
||||
ecx.unblock_thread(thread);
|
||||
if ecx.mutex_is_locked(mutex) {
|
||||
ecx.mutex_enqueue(mutex, thread);
|
||||
ecx.mutex_enqueue_and_block(mutex, thread);
|
||||
} else {
|
||||
ecx.mutex_lock(mutex, thread);
|
||||
ecx.unblock_thread(thread)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// After a thread waiting on a condvar was signalled:
|
||||
/// Reacquire the conditional variable and remove the timeout callback if any
|
||||
/// was registered.
|
||||
fn post_cond_signal<'mir, 'tcx: 'mir>(
|
||||
@ -303,24 +305,25 @@ fn post_cond_signal<'mir, 'tcx: 'mir>(
|
||||
reacquire_cond_mutex(ecx, thread, mutex)?;
|
||||
// Waiting for the mutex is not included in the waiting time because we need
|
||||
// to acquire the mutex always even if we get a timeout.
|
||||
ecx.unregister_timeout_callback_if_exists(thread)
|
||||
ecx.unregister_timeout_callback_if_exists(thread);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Release the mutex associated with the condition variable because we are
|
||||
/// entering the waiting state.
|
||||
fn release_cond_mutex<'mir, 'tcx: 'mir>(
|
||||
fn release_cond_mutex_and_block<'mir, 'tcx: 'mir>(
|
||||
ecx: &mut MiriEvalContext<'mir, 'tcx>,
|
||||
active_thread: ThreadId,
|
||||
mutex: MutexId,
|
||||
) -> InterpResult<'tcx> {
|
||||
if let Some(old_locked_count) = ecx.mutex_unlock(mutex, active_thread)? {
|
||||
if let Some(old_locked_count) = ecx.mutex_unlock(mutex, active_thread) {
|
||||
if old_locked_count != 1 {
|
||||
throw_unsup_format!("awaiting on a lock acquired multiple times is not supported");
|
||||
}
|
||||
} else {
|
||||
throw_ub_format!("awaiting on unlocked or owned by a different thread mutex");
|
||||
}
|
||||
ecx.block_thread(active_thread)?;
|
||||
ecx.block_thread(active_thread);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -411,14 +414,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
|
||||
let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
|
||||
let id = mutex_get_or_create_id(this, mutex_op)?;
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
if this.mutex_is_locked(id) {
|
||||
let owner_thread = this.mutex_get_owner(id);
|
||||
if owner_thread != active_thread {
|
||||
// Block the active thread.
|
||||
this.block_thread(active_thread)?;
|
||||
this.mutex_enqueue(id, active_thread);
|
||||
// Enqueue the active thread.
|
||||
this.mutex_enqueue_and_block(id, active_thread);
|
||||
Ok(0)
|
||||
} else {
|
||||
// Trying to acquire the same mutex again.
|
||||
@ -449,7 +451,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
|
||||
let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
|
||||
let id = mutex_get_or_create_id(this, mutex_op)?;
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
if this.mutex_is_locked(id) {
|
||||
let owner_thread = this.mutex_get_owner(id);
|
||||
@ -482,9 +484,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
|
||||
let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
|
||||
let id = mutex_get_or_create_id(this, mutex_op)?;
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
if let Some(_old_locked_count) = this.mutex_unlock(id, active_thread)? {
|
||||
if let Some(_old_locked_count) = this.mutex_unlock(id, active_thread) {
|
||||
// The mutex was locked by the current thread.
|
||||
Ok(0)
|
||||
} else {
|
||||
@ -528,10 +530,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let id = rwlock_get_or_create_id(this, rwlock_op)?;
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
if this.rwlock_is_write_locked(id) {
|
||||
this.rwlock_enqueue_and_block_reader(id, active_thread)?;
|
||||
this.rwlock_enqueue_and_block_reader(id, active_thread);
|
||||
Ok(0)
|
||||
} else {
|
||||
this.rwlock_reader_lock(id, active_thread);
|
||||
@ -543,7 +545,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let id = rwlock_get_or_create_id(this, rwlock_op)?;
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
if this.rwlock_is_write_locked(id) {
|
||||
this.eval_libc_i32("EBUSY")
|
||||
@ -557,7 +559,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let id = rwlock_get_or_create_id(this, rwlock_op)?;
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
if this.rwlock_is_locked(id) {
|
||||
// Note: this will deadlock if the lock is already locked by this
|
||||
@ -565,14 +567,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
//
|
||||
// Relevant documentation:
|
||||
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_wrlock.html
|
||||
// An in depth discussion on this topic:
|
||||
// An in-depth discussion on this topic:
|
||||
// https://github.com/rust-lang/rust/issues/53127
|
||||
//
|
||||
// FIXME: Detect and report the deadlock proactively. (We currently
|
||||
// report the deadlock only when no thread can continue execution,
|
||||
// but we could detect that this lock is already locked and report
|
||||
// an error.)
|
||||
this.rwlock_enqueue_and_block_writer(id, active_thread)?;
|
||||
this.rwlock_enqueue_and_block_writer(id, active_thread);
|
||||
} else {
|
||||
this.rwlock_writer_lock(id, active_thread);
|
||||
}
|
||||
@ -584,7 +586,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let id = rwlock_get_or_create_id(this, rwlock_op)?;
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
if this.rwlock_is_locked(id) {
|
||||
this.eval_libc_i32("EBUSY")
|
||||
@ -598,17 +600,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let id = rwlock_get_or_create_id(this, rwlock_op)?;
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
if this.rwlock_reader_unlock(id, active_thread) {
|
||||
// The thread was a reader.
|
||||
if this.rwlock_is_locked(id) {
|
||||
if this.rwlock_is_locked(id).not() {
|
||||
// No more readers owning the lock. Give it to a writer if there
|
||||
// is any.
|
||||
if let Some(writer) = this.rwlock_dequeue_writer(id) {
|
||||
this.unblock_thread(writer)?;
|
||||
this.rwlock_writer_lock(id, writer);
|
||||
}
|
||||
this.rwlock_dequeue_and_lock_writer(id);
|
||||
}
|
||||
Ok(0)
|
||||
} else if Some(active_thread) == this.rwlock_writer_unlock(id) {
|
||||
@ -617,15 +616,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
// We are prioritizing writers here against the readers. As a
|
||||
// result, not only readers can starve writers, but also writers can
|
||||
// starve readers.
|
||||
if let Some(writer) = this.rwlock_dequeue_writer(id) {
|
||||
// Give the lock to another writer.
|
||||
this.unblock_thread(writer)?;
|
||||
this.rwlock_writer_lock(id, writer);
|
||||
if this.rwlock_dequeue_and_lock_writer(id) {
|
||||
// Someone got the write lock, nice.
|
||||
} else {
|
||||
// Give the lock to all readers.
|
||||
while let Some(reader) = this.rwlock_dequeue_reader(id) {
|
||||
this.unblock_thread(reader)?;
|
||||
this.rwlock_reader_lock(id, reader);
|
||||
while this.rwlock_dequeue_and_lock_reader(id) {
|
||||
// Rinse and repeat.
|
||||
}
|
||||
}
|
||||
Ok(0)
|
||||
@ -753,9 +749,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
|
||||
let id = cond_get_or_create_id(this, cond_op)?;
|
||||
let mutex_id = mutex_get_or_create_id(this, mutex_op)?;
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
release_cond_mutex(this, active_thread, mutex_id)?;
|
||||
release_cond_mutex_and_block(this, active_thread, mutex_id)?;
|
||||
this.condvar_wait(id, active_thread, mutex_id);
|
||||
|
||||
Ok(0)
|
||||
@ -774,9 +770,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
|
||||
let id = cond_get_or_create_id(this, cond_op)?;
|
||||
let mutex_id = mutex_get_or_create_id(this, mutex_op)?;
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
release_cond_mutex(this, active_thread, mutex_id)?;
|
||||
release_cond_mutex_and_block(this, active_thread, mutex_id)?;
|
||||
this.condvar_wait(id, active_thread, mutex_id);
|
||||
|
||||
// We return success for now and override it in the timeout callback.
|
||||
@ -823,7 +819,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
|
||||
Ok(())
|
||||
}),
|
||||
)?;
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -833,7 +829,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
|
||||
let id = cond_get_or_create_id(this, cond_op)?;
|
||||
if this.condvar_is_awaited(id) {
|
||||
throw_ub_format!("destroyed an awaited conditional variable");
|
||||
throw_ub_format!("destroying an awaited conditional variable");
|
||||
}
|
||||
cond_set_id(this, cond_op, ScalarMaybeUninit::Uninit)?;
|
||||
cond_set_clock_id(this, cond_op, ScalarMaybeUninit::Uninit)?;
|
||||
|
@ -19,9 +19,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
For example, Miri does not detect data races yet.",
|
||||
);
|
||||
|
||||
let new_thread_id = this.create_thread()?;
|
||||
let new_thread_id = this.create_thread();
|
||||
// Also switch to new thread so that we can push the first stackframe.
|
||||
let old_thread_id = this.set_active_thread(new_thread_id)?;
|
||||
let old_thread_id = this.set_active_thread(new_thread_id);
|
||||
|
||||
let thread_info_place = this.deref_operand(thread)?;
|
||||
this.write_scalar(
|
||||
@ -47,7 +47,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
StackPopCleanup::None { cleanup: true },
|
||||
)?;
|
||||
|
||||
this.set_active_thread(old_thread_id)?;
|
||||
this.set_active_thread(old_thread_id);
|
||||
|
||||
Ok(0)
|
||||
}
|
||||
@ -82,7 +82,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
fn pthread_self(&mut self, dest: PlaceTy<'tcx, Tag>) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let thread_id = this.get_active_thread()?;
|
||||
let thread_id = this.get_active_thread();
|
||||
this.write_scalar(Scalar::from_uint(thread_id.to_u32(), dest.layout.size), dest)
|
||||
}
|
||||
|
||||
@ -105,10 +105,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
// byte. Since `read_c_str` returns the string without the null
|
||||
// byte, we need to truncate to 15.
|
||||
name.truncate(15);
|
||||
this.set_active_thread_name(name)?;
|
||||
this.set_active_thread_name(name);
|
||||
} else if option == this.eval_libc_i32("PR_GET_NAME")? {
|
||||
let address = this.read_scalar(arg2)?.not_undef()?;
|
||||
let mut name = this.get_active_thread_name()?.to_vec();
|
||||
let mut name = this.get_active_thread_name().to_vec();
|
||||
name.push(0u8);
|
||||
assert!(name.len() <= 16);
|
||||
this.memory.write_bytes(address, name)?;
|
||||
@ -127,7 +127,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
this.assert_target_os("macos", "pthread_setname_np");
|
||||
|
||||
let name = this.memory.read_c_str(name)?.to_owned();
|
||||
this.set_active_thread_name(name)?;
|
||||
this.set_active_thread_name(name);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -135,7 +135,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
fn sched_yield(&mut self) -> InterpResult<'tcx, i32> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
this.yield_active_thread()?;
|
||||
this.yield_active_thread();
|
||||
|
||||
Ok(0)
|
||||
}
|
||||
|
@ -232,8 +232,8 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
/// yet.
|
||||
fn schedule_windows_tls_dtors(&mut self) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
let active_thread = this.get_active_thread()?;
|
||||
assert_eq!(this.get_total_thread_count()?, 1, "concurrency on Windows not supported");
|
||||
let active_thread = this.get_active_thread();
|
||||
assert_eq!(this.get_total_thread_count(), 1, "concurrency on Windows not supported");
|
||||
// Windows has a special magic linker section that is run on certain events.
|
||||
// Instead of searching for that section and supporting arbitrary hooks in there
|
||||
// (that would be basically https://github.com/rust-lang/miri/issues/450),
|
||||
@ -252,7 +252,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
StackPopCleanup::None { cleanup: true },
|
||||
)?;
|
||||
|
||||
this.enable_thread(active_thread)?;
|
||||
this.enable_thread(active_thread);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -262,7 +262,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
/// Note: It is safe to call this function also on other Unixes.
|
||||
fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx, bool> {
|
||||
let this = self.eval_context_mut();
|
||||
let thread_id = this.get_active_thread()?;
|
||||
let thread_id = this.get_active_thread();
|
||||
if let Some((instance, data)) = this.machine.tls.macos_thread_dtors.remove(&thread_id) {
|
||||
trace!("Running macos dtor {:?} on {:?} at {:?}", instance, data, thread_id);
|
||||
|
||||
@ -278,7 +278,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
// we just scheduled. Since we deleted the destructor, it is
|
||||
// guaranteed that we will schedule it again. The `dtors_running`
|
||||
// flag will prevent the code from adding the destructor again.
|
||||
this.enable_thread(thread_id)?;
|
||||
this.enable_thread(thread_id);
|
||||
Ok(true)
|
||||
} else {
|
||||
Ok(false)
|
||||
@ -289,9 +289,9 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
/// a destructor to schedule, and `false` otherwise.
|
||||
fn schedule_next_pthread_tls_dtor(&mut self) -> InterpResult<'tcx, bool> {
|
||||
let this = self.eval_context_mut();
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
assert!(this.has_terminated(active_thread)?, "running TLS dtors for non-terminated thread");
|
||||
assert!(this.has_terminated(active_thread), "running TLS dtors for non-terminated thread");
|
||||
// Fetch next dtor after `key`.
|
||||
let last_key = this.machine.tls.dtors_running[&active_thread].last_dtor_key.clone();
|
||||
let dtor = match this.machine.tls.fetch_tls_dtor(last_key, active_thread) {
|
||||
@ -314,7 +314,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
StackPopCleanup::None { cleanup: true },
|
||||
)?;
|
||||
|
||||
this.enable_thread(active_thread)?;
|
||||
this.enable_thread(active_thread);
|
||||
return Ok(true);
|
||||
}
|
||||
this.machine.tls.dtors_running.get_mut(&active_thread).unwrap().last_dtor_key = None;
|
||||
@ -340,7 +340,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
/// https://github.com/rust-lang/rust/issues/28129.
|
||||
fn schedule_next_tls_dtor_for_active_thread(&mut self) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
if !this.machine.tls.set_dtors_running_for_thread(active_thread) {
|
||||
// This is the first time we got asked to schedule a destructor. The
|
||||
|
74
src/sync.rs
74
src/sync.rs
@ -145,25 +145,22 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
mutex.lock_count = mutex.lock_count.checked_add(1).unwrap();
|
||||
}
|
||||
|
||||
/// Try unlocking by decreasing the lock count and returning the old owner
|
||||
/// and the old lock count. If the lock count reaches 0, release the lock
|
||||
/// and potentially give to a new owner. If the lock was not locked, return
|
||||
/// `None`.
|
||||
///
|
||||
/// Note: It is the caller's responsibility to check that the thread that
|
||||
/// unlocked the lock actually is the same one, which owned it.
|
||||
/// Try unlocking by decreasing the lock count and returning the old lock
|
||||
/// count. If the lock count reaches 0, release the lock and potentially
|
||||
/// give to a new owner. If the lock was not locked by `expected_owner`,
|
||||
/// return `None`.
|
||||
fn mutex_unlock(
|
||||
&mut self,
|
||||
id: MutexId,
|
||||
expected_owner: ThreadId,
|
||||
) -> InterpResult<'tcx, Option<usize>> {
|
||||
) -> Option<usize> {
|
||||
let this = self.eval_context_mut();
|
||||
let mutex = &mut this.machine.threads.sync.mutexes[id];
|
||||
if let Some(current_owner) = mutex.owner {
|
||||
// Mutex is locked.
|
||||
if current_owner != expected_owner {
|
||||
// Only the owner can unlock the mutex.
|
||||
return Ok(None);
|
||||
return None;
|
||||
}
|
||||
let old_lock_count = mutex.lock_count;
|
||||
mutex.lock_count = old_lock_count
|
||||
@ -173,31 +170,36 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
mutex.owner = None;
|
||||
// The mutex is completely unlocked. Try transfering ownership
|
||||
// to another thread.
|
||||
if let Some(new_owner) = this.mutex_dequeue(id) {
|
||||
this.mutex_lock(id, new_owner);
|
||||
this.unblock_thread(new_owner)?;
|
||||
}
|
||||
this.mutex_dequeue_and_lock(id);
|
||||
}
|
||||
Ok(Some(old_lock_count))
|
||||
Some(old_lock_count)
|
||||
} else {
|
||||
// Mutex is unlocked.
|
||||
Ok(None)
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Put the thread into the queue waiting for the lock.
|
||||
fn mutex_enqueue(&mut self, id: MutexId, thread: ThreadId) {
|
||||
/// Put the thread into the queue waiting for the mutex.
|
||||
fn mutex_enqueue_and_block(&mut self, id: MutexId, thread: ThreadId) {
|
||||
let this = self.eval_context_mut();
|
||||
assert!(this.mutex_is_locked(id), "queing on unlocked mutex");
|
||||
this.machine.threads.sync.mutexes[id].queue.push_back(thread);
|
||||
this.block_thread(thread);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Take a thread out of the queue waiting for the lock.
|
||||
fn mutex_dequeue(&mut self, id: MutexId) -> Option<ThreadId> {
|
||||
/// Take a thread out of the queue waiting for the mutex, and lock
|
||||
/// the mutex for it. Returns `true` if some thread has the mutex now.
|
||||
fn mutex_dequeue_and_lock(&mut self, id: MutexId) -> bool {
|
||||
let this = self.eval_context_mut();
|
||||
this.machine.threads.sync.mutexes[id].queue.pop_front()
|
||||
if let Some(thread) = this.machine.threads.sync.mutexes[id].queue.pop_front() {
|
||||
this.unblock_thread(thread);
|
||||
this.mutex_lock(id, thread);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -255,25 +257,32 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
&mut self,
|
||||
id: RwLockId,
|
||||
reader: ThreadId,
|
||||
) -> InterpResult<'tcx> {
|
||||
) {
|
||||
let this = self.eval_context_mut();
|
||||
assert!(this.rwlock_is_write_locked(id), "queueing on not write locked lock");
|
||||
this.machine.threads.sync.rwlocks[id].reader_queue.push_back(reader);
|
||||
this.block_thread(reader)
|
||||
this.block_thread(reader);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Take a reader out the queue waiting for the lock.
|
||||
fn rwlock_dequeue_reader(&mut self, id: RwLockId) -> Option<ThreadId> {
|
||||
/// Returns `true` if some thread got the rwlock.
|
||||
fn rwlock_dequeue_and_lock_reader(&mut self, id: RwLockId) -> bool {
|
||||
let this = self.eval_context_mut();
|
||||
this.machine.threads.sync.rwlocks[id].reader_queue.pop_front()
|
||||
if let Some(reader) = this.machine.threads.sync.rwlocks[id].reader_queue.pop_front() {
|
||||
this.unblock_thread(reader);
|
||||
this.rwlock_reader_lock(id, reader);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Lock by setting the writer that owns the lock.
|
||||
fn rwlock_writer_lock(&mut self, id: RwLockId, writer: ThreadId) {
|
||||
let this = self.eval_context_mut();
|
||||
assert!(!this.rwlock_is_locked(id), "the lock is already locked");
|
||||
assert!(!this.rwlock_is_locked(id), "the rwlock is already locked");
|
||||
this.machine.threads.sync.rwlocks[id].writer = Some(writer);
|
||||
}
|
||||
|
||||
@ -290,18 +299,25 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
&mut self,
|
||||
id: RwLockId,
|
||||
writer: ThreadId,
|
||||
) -> InterpResult<'tcx> {
|
||||
) {
|
||||
let this = self.eval_context_mut();
|
||||
assert!(this.rwlock_is_locked(id), "queueing on unlocked lock");
|
||||
this.machine.threads.sync.rwlocks[id].writer_queue.push_back(writer);
|
||||
this.block_thread(writer)
|
||||
this.block_thread(writer);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Take the writer out the queue waiting for the lock.
|
||||
fn rwlock_dequeue_writer(&mut self, id: RwLockId) -> Option<ThreadId> {
|
||||
/// Returns `true` if some thread got the rwlock.
|
||||
fn rwlock_dequeue_and_lock_writer(&mut self, id: RwLockId) -> bool {
|
||||
let this = self.eval_context_mut();
|
||||
this.machine.threads.sync.rwlocks[id].writer_queue.pop_front()
|
||||
if let Some(writer) = this.machine.threads.sync.rwlocks[id].writer_queue.pop_front() {
|
||||
this.unblock_thread(writer);
|
||||
this.rwlock_writer_lock(id, writer);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -581,9 +581,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn create_thread(&mut self) -> InterpResult<'tcx, ThreadId> {
|
||||
fn create_thread(&mut self) -> ThreadId {
|
||||
let this = self.eval_context_mut();
|
||||
Ok(this.machine.threads.create_thread())
|
||||
this.machine.threads.create_thread()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -599,34 +599,33 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn set_active_thread(&mut self, thread_id: ThreadId) -> InterpResult<'tcx, ThreadId> {
|
||||
fn set_active_thread(&mut self, thread_id: ThreadId) -> ThreadId {
|
||||
let this = self.eval_context_mut();
|
||||
Ok(this.machine.threads.set_active_thread_id(thread_id))
|
||||
this.machine.threads.set_active_thread_id(thread_id)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_active_thread(&self) -> InterpResult<'tcx, ThreadId> {
|
||||
fn get_active_thread(&self) -> ThreadId {
|
||||
let this = self.eval_context_ref();
|
||||
Ok(this.machine.threads.get_active_thread_id())
|
||||
this.machine.threads.get_active_thread_id()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_total_thread_count(&self) -> InterpResult<'tcx, usize> {
|
||||
fn get_total_thread_count(&self) -> usize {
|
||||
let this = self.eval_context_ref();
|
||||
Ok(this.machine.threads.get_total_thread_count())
|
||||
this.machine.threads.get_total_thread_count()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn has_terminated(&self, thread_id: ThreadId) -> InterpResult<'tcx, bool> {
|
||||
fn has_terminated(&self, thread_id: ThreadId) -> bool {
|
||||
let this = self.eval_context_ref();
|
||||
Ok(this.machine.threads.has_terminated(thread_id))
|
||||
this.machine.threads.has_terminated(thread_id)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn enable_thread(&mut self, thread_id: ThreadId) -> InterpResult<'tcx> {
|
||||
fn enable_thread(&mut self, thread_id: ThreadId) {
|
||||
let this = self.eval_context_mut();
|
||||
this.machine.threads.enable_thread(thread_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -642,37 +641,36 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn set_active_thread_name(&mut self, new_thread_name: Vec<u8>) -> InterpResult<'tcx, ()> {
|
||||
fn set_active_thread_name(&mut self, new_thread_name: Vec<u8>) {
|
||||
let this = self.eval_context_mut();
|
||||
Ok(this.machine.threads.set_thread_name(new_thread_name))
|
||||
this.machine.threads.set_thread_name(new_thread_name);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_active_thread_name<'c>(&'c self) -> InterpResult<'tcx, &'c [u8]>
|
||||
fn get_active_thread_name<'c>(&'c self) -> &'c [u8]
|
||||
where
|
||||
'mir: 'c,
|
||||
{
|
||||
let this = self.eval_context_ref();
|
||||
Ok(this.machine.threads.get_thread_name())
|
||||
this.machine.threads.get_thread_name()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn block_thread(&mut self, thread: ThreadId) -> InterpResult<'tcx> {
|
||||
fn block_thread(&mut self, thread: ThreadId) {
|
||||
let this = self.eval_context_mut();
|
||||
Ok(this.machine.threads.block_thread(thread))
|
||||
this.machine.threads.block_thread(thread);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn unblock_thread(&mut self, thread: ThreadId) -> InterpResult<'tcx> {
|
||||
fn unblock_thread(&mut self, thread: ThreadId) {
|
||||
let this = self.eval_context_mut();
|
||||
Ok(this.machine.threads.unblock_thread(thread))
|
||||
this.machine.threads.unblock_thread(thread);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn yield_active_thread(&mut self) -> InterpResult<'tcx> {
|
||||
fn yield_active_thread(&mut self) {
|
||||
let this = self.eval_context_mut();
|
||||
this.machine.threads.yield_active_thread();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -681,17 +679,15 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
thread: ThreadId,
|
||||
call_time: Time,
|
||||
callback: TimeoutCallback<'mir, 'tcx>,
|
||||
) -> InterpResult<'tcx> {
|
||||
) {
|
||||
let this = self.eval_context_mut();
|
||||
this.machine.threads.register_timeout_callback(thread, call_time, callback);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn unregister_timeout_callback_if_exists(&mut self, thread: ThreadId) -> InterpResult<'tcx> {
|
||||
fn unregister_timeout_callback_if_exists(&mut self, thread: ThreadId) {
|
||||
let this = self.eval_context_mut();
|
||||
this.machine.threads.unregister_timeout_callback_if_exists(thread);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Execute a timeout callback on the callback's thread.
|
||||
@ -706,9 +702,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
// thread.
|
||||
// 2. Make the scheduler the only place that can change the active
|
||||
// thread.
|
||||
let old_thread = this.set_active_thread(thread)?;
|
||||
let old_thread = this.set_active_thread(thread);
|
||||
callback(this)?;
|
||||
this.set_active_thread(old_thread)?;
|
||||
this.set_active_thread(old_thread);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -267,6 +267,51 @@ fn check_once() {
|
||||
}
|
||||
}
|
||||
|
||||
fn check_rwlock_unlock_bug1() {
|
||||
// There was a bug where when un-read-locking an rwlock that still has other
|
||||
// readers waiting, we'd accidentally also let a writer in.
|
||||
// That caused an ICE.
|
||||
let l = Arc::new(RwLock::new(0));
|
||||
|
||||
let r1 = l.read().unwrap();
|
||||
let r2 = l.read().unwrap();
|
||||
|
||||
// Make a waiting writer.
|
||||
let l2 = l.clone();
|
||||
thread::spawn(move || {
|
||||
let mut w = l2.write().unwrap();
|
||||
*w += 1;
|
||||
});
|
||||
thread::yield_now();
|
||||
|
||||
drop(r1);
|
||||
assert_eq!(*r2, 0);
|
||||
thread::yield_now();
|
||||
thread::yield_now();
|
||||
thread::yield_now();
|
||||
assert_eq!(*r2, 0);
|
||||
drop(r2);
|
||||
}
|
||||
|
||||
fn check_rwlock_unlock_bug2() {
|
||||
// There was a bug where when un-read-locking an rwlock by letting the last reader leaver,
|
||||
// we'd forget to wake up a writer.
|
||||
// That meant the writer thread could never run again.
|
||||
let l = Arc::new(RwLock::new(0));
|
||||
|
||||
let r = l.read().unwrap();
|
||||
|
||||
// Make a waiting writer.
|
||||
let l2 = l.clone();
|
||||
let h = thread::spawn(move || {
|
||||
let _w = l2.write().unwrap();
|
||||
});
|
||||
thread::yield_now();
|
||||
|
||||
drop(r);
|
||||
h.join().unwrap();
|
||||
}
|
||||
|
||||
fn main() {
|
||||
check_barriers();
|
||||
check_conditional_variables_notify_one();
|
||||
@ -280,4 +325,6 @@ fn main() {
|
||||
multiple_send();
|
||||
send_on_sync();
|
||||
check_once();
|
||||
check_rwlock_unlock_bug1();
|
||||
check_rwlock_unlock_bug2();
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user