sync cleanup: mark infallible ops as such; consistent combine en/dequeue with (un)block; comments
This commit is contained in:
parent
508371ac5a
commit
7589bc7ba9
@ -254,14 +254,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
"pthread_getspecific" => {
|
||||
let &[key] = check_arg_count(args)?;
|
||||
let key = this.force_bits(this.read_scalar(key)?.not_undef()?, key.layout.size)?;
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
let ptr = this.machine.tls.load_tls(key, active_thread, this)?;
|
||||
this.write_scalar(ptr, dest)?;
|
||||
}
|
||||
"pthread_setspecific" => {
|
||||
let &[key, new_ptr] = check_arg_count(args)?;
|
||||
let key = this.force_bits(this.read_scalar(key)?.not_undef()?, key.layout.size)?;
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
let new_ptr = this.read_scalar(new_ptr)?.not_undef()?;
|
||||
this.machine.tls.store_tls(key, active_thread, this.test_null(new_ptr)?)?;
|
||||
|
||||
|
@ -98,7 +98,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
let dtor = this.read_scalar(dtor)?.not_undef()?;
|
||||
let dtor = this.memory.get_fn(dtor)?.as_instance()?;
|
||||
let data = this.read_scalar(data)?.not_undef()?;
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
this.machine.tls.set_macos_thread_dtor(active_thread, dtor, data)?;
|
||||
}
|
||||
|
||||
|
@ -163,14 +163,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
"TlsGetValue" => {
|
||||
let &[key] = check_arg_count(args)?;
|
||||
let key = u128::from(this.read_scalar(key)?.to_u32()?);
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
let ptr = this.machine.tls.load_tls(key, active_thread, this)?;
|
||||
this.write_scalar(ptr, dest)?;
|
||||
}
|
||||
"TlsSetValue" => {
|
||||
let &[key, new_ptr] = check_arg_count(args)?;
|
||||
let key = u128::from(this.read_scalar(key)?.to_u32()?);
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
let new_ptr = this.read_scalar(new_ptr)?.not_undef()?;
|
||||
this.machine.tls.store_tls(key, active_thread, this.test_null(new_ptr)?)?;
|
||||
|
||||
@ -291,7 +291,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
if this.frame().instance.to_string().starts_with("std::sys::windows::") => {
|
||||
#[allow(non_snake_case)]
|
||||
let &[_lpCriticalSection] = check_arg_count(args)?;
|
||||
assert_eq!(this.get_total_thread_count()?, 1, "concurrency on Windows not supported");
|
||||
assert_eq!(this.get_total_thread_count(), 1, "concurrency on Windows not supported");
|
||||
// Nothing to do, not even a return value.
|
||||
// (Windows locks are reentrant, and we have only 1 thread,
|
||||
// so not doing any futher checks here is at least not incorrect.)
|
||||
@ -300,7 +300,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
if this.frame().instance.to_string().starts_with("std::sys::windows::") => {
|
||||
#[allow(non_snake_case)]
|
||||
let &[_lpCriticalSection] = check_arg_count(args)?;
|
||||
assert_eq!(this.get_total_thread_count()?, 1, "concurrency on Windows not supported");
|
||||
assert_eq!(this.get_total_thread_count(), 1, "concurrency on Windows not supported");
|
||||
// There is only one thread, so this always succeeds and returns TRUE
|
||||
this.write_scalar(Scalar::from_i32(1), dest)?;
|
||||
}
|
||||
|
@ -284,15 +284,16 @@ fn reacquire_cond_mutex<'mir, 'tcx: 'mir>(
|
||||
thread: ThreadId,
|
||||
mutex: MutexId,
|
||||
) -> InterpResult<'tcx> {
|
||||
ecx.unblock_thread(thread);
|
||||
if ecx.mutex_is_locked(mutex) {
|
||||
ecx.mutex_enqueue(mutex, thread);
|
||||
ecx.mutex_enqueue_and_block(mutex, thread);
|
||||
} else {
|
||||
ecx.mutex_lock(mutex, thread);
|
||||
ecx.unblock_thread(thread)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// After a thread waiting on a condvar was signalled:
|
||||
/// Reacquire the conditional variable and remove the timeout callback if any
|
||||
/// was registered.
|
||||
fn post_cond_signal<'mir, 'tcx: 'mir>(
|
||||
@ -303,12 +304,13 @@ fn post_cond_signal<'mir, 'tcx: 'mir>(
|
||||
reacquire_cond_mutex(ecx, thread, mutex)?;
|
||||
// Waiting for the mutex is not included in the waiting time because we need
|
||||
// to acquire the mutex always even if we get a timeout.
|
||||
ecx.unregister_timeout_callback_if_exists(thread)
|
||||
ecx.unregister_timeout_callback_if_exists(thread);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Release the mutex associated with the condition variable because we are
|
||||
/// entering the waiting state.
|
||||
fn release_cond_mutex<'mir, 'tcx: 'mir>(
|
||||
fn release_cond_mutex_and_block<'mir, 'tcx: 'mir>(
|
||||
ecx: &mut MiriEvalContext<'mir, 'tcx>,
|
||||
active_thread: ThreadId,
|
||||
mutex: MutexId,
|
||||
@ -320,7 +322,7 @@ fn release_cond_mutex<'mir, 'tcx: 'mir>(
|
||||
} else {
|
||||
throw_ub_format!("awaiting on unlocked or owned by a different thread mutex");
|
||||
}
|
||||
ecx.block_thread(active_thread)?;
|
||||
ecx.block_thread(active_thread);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -411,14 +413,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
|
||||
let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
|
||||
let id = mutex_get_or_create_id(this, mutex_op)?;
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
if this.mutex_is_locked(id) {
|
||||
let owner_thread = this.mutex_get_owner(id);
|
||||
if owner_thread != active_thread {
|
||||
// Block the active thread.
|
||||
this.block_thread(active_thread)?;
|
||||
this.mutex_enqueue(id, active_thread);
|
||||
// Enqueue the active thread.
|
||||
this.mutex_enqueue_and_block(id, active_thread);
|
||||
Ok(0)
|
||||
} else {
|
||||
// Trying to acquire the same mutex again.
|
||||
@ -449,7 +450,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
|
||||
let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
|
||||
let id = mutex_get_or_create_id(this, mutex_op)?;
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
if this.mutex_is_locked(id) {
|
||||
let owner_thread = this.mutex_get_owner(id);
|
||||
@ -482,7 +483,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
|
||||
let kind = mutex_get_kind(this, mutex_op)?.not_undef()?;
|
||||
let id = mutex_get_or_create_id(this, mutex_op)?;
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
if let Some(_old_locked_count) = this.mutex_unlock(id, active_thread)? {
|
||||
// The mutex was locked by the current thread.
|
||||
@ -528,10 +529,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let id = rwlock_get_or_create_id(this, rwlock_op)?;
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
if this.rwlock_is_write_locked(id) {
|
||||
this.rwlock_enqueue_and_block_reader(id, active_thread)?;
|
||||
this.rwlock_enqueue_and_block_reader(id, active_thread);
|
||||
Ok(0)
|
||||
} else {
|
||||
this.rwlock_reader_lock(id, active_thread);
|
||||
@ -543,7 +544,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let id = rwlock_get_or_create_id(this, rwlock_op)?;
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
if this.rwlock_is_write_locked(id) {
|
||||
this.eval_libc_i32("EBUSY")
|
||||
@ -557,7 +558,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let id = rwlock_get_or_create_id(this, rwlock_op)?;
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
if this.rwlock_is_locked(id) {
|
||||
// Note: this will deadlock if the lock is already locked by this
|
||||
@ -565,14 +566,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
//
|
||||
// Relevant documentation:
|
||||
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_wrlock.html
|
||||
// An in depth discussion on this topic:
|
||||
// An in-depth discussion on this topic:
|
||||
// https://github.com/rust-lang/rust/issues/53127
|
||||
//
|
||||
// FIXME: Detect and report the deadlock proactively. (We currently
|
||||
// report the deadlock only when no thread can continue execution,
|
||||
// but we could detect that this lock is already locked and report
|
||||
// an error.)
|
||||
this.rwlock_enqueue_and_block_writer(id, active_thread)?;
|
||||
this.rwlock_enqueue_and_block_writer(id, active_thread);
|
||||
} else {
|
||||
this.rwlock_writer_lock(id, active_thread);
|
||||
}
|
||||
@ -584,7 +585,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let id = rwlock_get_or_create_id(this, rwlock_op)?;
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
if this.rwlock_is_locked(id) {
|
||||
this.eval_libc_i32("EBUSY")
|
||||
@ -598,15 +599,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let id = rwlock_get_or_create_id(this, rwlock_op)?;
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
if this.rwlock_reader_unlock(id, active_thread) {
|
||||
// The thread was a reader.
|
||||
if this.rwlock_is_locked(id) {
|
||||
// No more readers owning the lock. Give it to a writer if there
|
||||
// is any.
|
||||
if let Some(writer) = this.rwlock_dequeue_writer(id) {
|
||||
this.unblock_thread(writer)?;
|
||||
if let Some(writer) = this.rwlock_dequeue_and_unblock_writer(id) {
|
||||
this.rwlock_writer_lock(id, writer);
|
||||
}
|
||||
}
|
||||
@ -617,14 +617,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
// We are prioritizing writers here against the readers. As a
|
||||
// result, not only readers can starve writers, but also writers can
|
||||
// starve readers.
|
||||
if let Some(writer) = this.rwlock_dequeue_writer(id) {
|
||||
if let Some(writer) = this.rwlock_dequeue_and_unblock_writer(id) {
|
||||
// Give the lock to another writer.
|
||||
this.unblock_thread(writer)?;
|
||||
this.rwlock_writer_lock(id, writer);
|
||||
} else {
|
||||
// Give the lock to all readers.
|
||||
while let Some(reader) = this.rwlock_dequeue_reader(id) {
|
||||
this.unblock_thread(reader)?;
|
||||
while let Some(reader) = this.rwlock_dequeue_and_unblock_reader(id) {
|
||||
this.rwlock_reader_lock(id, reader);
|
||||
}
|
||||
}
|
||||
@ -753,9 +751,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
|
||||
let id = cond_get_or_create_id(this, cond_op)?;
|
||||
let mutex_id = mutex_get_or_create_id(this, mutex_op)?;
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
release_cond_mutex(this, active_thread, mutex_id)?;
|
||||
release_cond_mutex_and_block(this, active_thread, mutex_id)?;
|
||||
this.condvar_wait(id, active_thread, mutex_id);
|
||||
|
||||
Ok(0)
|
||||
@ -774,9 +772,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
|
||||
let id = cond_get_or_create_id(this, cond_op)?;
|
||||
let mutex_id = mutex_get_or_create_id(this, mutex_op)?;
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
release_cond_mutex(this, active_thread, mutex_id)?;
|
||||
release_cond_mutex_and_block(this, active_thread, mutex_id)?;
|
||||
this.condvar_wait(id, active_thread, mutex_id);
|
||||
|
||||
// We return success for now and override it in the timeout callback.
|
||||
@ -823,7 +821,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
|
||||
Ok(())
|
||||
}),
|
||||
)?;
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -833,7 +831,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
|
||||
let id = cond_get_or_create_id(this, cond_op)?;
|
||||
if this.condvar_is_awaited(id) {
|
||||
throw_ub_format!("destroyed an awaited conditional variable");
|
||||
throw_ub_format!("destroying an awaited conditional variable");
|
||||
}
|
||||
cond_set_id(this, cond_op, ScalarMaybeUninit::Uninit)?;
|
||||
cond_set_clock_id(this, cond_op, ScalarMaybeUninit::Uninit)?;
|
||||
|
@ -19,9 +19,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
For example, Miri does not detect data races yet.",
|
||||
);
|
||||
|
||||
let new_thread_id = this.create_thread()?;
|
||||
let new_thread_id = this.create_thread();
|
||||
// Also switch to new thread so that we can push the first stackframe.
|
||||
let old_thread_id = this.set_active_thread(new_thread_id)?;
|
||||
let old_thread_id = this.set_active_thread(new_thread_id);
|
||||
|
||||
let thread_info_place = this.deref_operand(thread)?;
|
||||
this.write_scalar(
|
||||
@ -47,7 +47,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
StackPopCleanup::None { cleanup: true },
|
||||
)?;
|
||||
|
||||
this.set_active_thread(old_thread_id)?;
|
||||
this.set_active_thread(old_thread_id);
|
||||
|
||||
Ok(0)
|
||||
}
|
||||
@ -82,7 +82,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
fn pthread_self(&mut self, dest: PlaceTy<'tcx, Tag>) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let thread_id = this.get_active_thread()?;
|
||||
let thread_id = this.get_active_thread();
|
||||
this.write_scalar(Scalar::from_uint(thread_id.to_u32(), dest.layout.size), dest)
|
||||
}
|
||||
|
||||
@ -105,10 +105,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
// byte. Since `read_c_str` returns the string without the null
|
||||
// byte, we need to truncate to 15.
|
||||
name.truncate(15);
|
||||
this.set_active_thread_name(name)?;
|
||||
this.set_active_thread_name(name);
|
||||
} else if option == this.eval_libc_i32("PR_GET_NAME")? {
|
||||
let address = this.read_scalar(arg2)?.not_undef()?;
|
||||
let mut name = this.get_active_thread_name()?.to_vec();
|
||||
let mut name = this.get_active_thread_name().to_vec();
|
||||
name.push(0u8);
|
||||
assert!(name.len() <= 16);
|
||||
this.memory.write_bytes(address, name)?;
|
||||
@ -127,7 +127,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
this.assert_target_os("macos", "pthread_setname_np");
|
||||
|
||||
let name = this.memory.read_c_str(name)?.to_owned();
|
||||
this.set_active_thread_name(name)?;
|
||||
this.set_active_thread_name(name);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -135,7 +135,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
fn sched_yield(&mut self) -> InterpResult<'tcx, i32> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
this.yield_active_thread()?;
|
||||
this.yield_active_thread();
|
||||
|
||||
Ok(0)
|
||||
}
|
||||
|
@ -232,8 +232,8 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
/// yet.
|
||||
fn schedule_windows_tls_dtors(&mut self) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
let active_thread = this.get_active_thread()?;
|
||||
assert_eq!(this.get_total_thread_count()?, 1, "concurrency on Windows not supported");
|
||||
let active_thread = this.get_active_thread();
|
||||
assert_eq!(this.get_total_thread_count(), 1, "concurrency on Windows not supported");
|
||||
// Windows has a special magic linker section that is run on certain events.
|
||||
// Instead of searching for that section and supporting arbitrary hooks in there
|
||||
// (that would be basically https://github.com/rust-lang/miri/issues/450),
|
||||
@ -252,7 +252,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
StackPopCleanup::None { cleanup: true },
|
||||
)?;
|
||||
|
||||
this.enable_thread(active_thread)?;
|
||||
this.enable_thread(active_thread);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -262,7 +262,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
/// Note: It is safe to call this function also on other Unixes.
|
||||
fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx, bool> {
|
||||
let this = self.eval_context_mut();
|
||||
let thread_id = this.get_active_thread()?;
|
||||
let thread_id = this.get_active_thread();
|
||||
if let Some((instance, data)) = this.machine.tls.macos_thread_dtors.remove(&thread_id) {
|
||||
trace!("Running macos dtor {:?} on {:?} at {:?}", instance, data, thread_id);
|
||||
|
||||
@ -278,7 +278,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
// we just scheduled. Since we deleted the destructor, it is
|
||||
// guaranteed that we will schedule it again. The `dtors_running`
|
||||
// flag will prevent the code from adding the destructor again.
|
||||
this.enable_thread(thread_id)?;
|
||||
this.enable_thread(thread_id);
|
||||
Ok(true)
|
||||
} else {
|
||||
Ok(false)
|
||||
@ -289,9 +289,9 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
/// a destructor to schedule, and `false` otherwise.
|
||||
fn schedule_next_pthread_tls_dtor(&mut self) -> InterpResult<'tcx, bool> {
|
||||
let this = self.eval_context_mut();
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
assert!(this.has_terminated(active_thread)?, "running TLS dtors for non-terminated thread");
|
||||
assert!(this.has_terminated(active_thread), "running TLS dtors for non-terminated thread");
|
||||
// Fetch next dtor after `key`.
|
||||
let last_key = this.machine.tls.dtors_running[&active_thread].last_dtor_key.clone();
|
||||
let dtor = match this.machine.tls.fetch_tls_dtor(last_key, active_thread) {
|
||||
@ -314,7 +314,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
StackPopCleanup::None { cleanup: true },
|
||||
)?;
|
||||
|
||||
this.enable_thread(active_thread)?;
|
||||
this.enable_thread(active_thread);
|
||||
return Ok(true);
|
||||
}
|
||||
this.machine.tls.dtors_running.get_mut(&active_thread).unwrap().last_dtor_key = None;
|
||||
@ -340,7 +340,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
/// https://github.com/rust-lang/rust/issues/28129.
|
||||
fn schedule_next_tls_dtor_for_active_thread(&mut self) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
let active_thread = this.get_active_thread()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
if !this.machine.tls.set_dtors_running_for_thread(active_thread) {
|
||||
// This is the first time we got asked to schedule a destructor. The
|
||||
|
54
src/sync.rs
54
src/sync.rs
@ -145,13 +145,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
mutex.lock_count = mutex.lock_count.checked_add(1).unwrap();
|
||||
}
|
||||
|
||||
/// Try unlocking by decreasing the lock count and returning the old owner
|
||||
/// and the old lock count. If the lock count reaches 0, release the lock
|
||||
/// and potentially give to a new owner. If the lock was not locked, return
|
||||
/// `None`.
|
||||
///
|
||||
/// Note: It is the caller's responsibility to check that the thread that
|
||||
/// unlocked the lock actually is the same one, which owned it.
|
||||
/// Try unlocking by decreasing the lock count and returning the old lock
|
||||
/// count. If the lock count reaches 0, release the lock and potentially
|
||||
/// give to a new owner. If the lock was not locked by `expected_owner`,
|
||||
/// return `None`.
|
||||
fn mutex_unlock(
|
||||
&mut self,
|
||||
id: MutexId,
|
||||
@ -173,9 +170,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
mutex.owner = None;
|
||||
// The mutex is completely unlocked. Try transfering ownership
|
||||
// to another thread.
|
||||
if let Some(new_owner) = this.mutex_dequeue(id) {
|
||||
if let Some(new_owner) = this.mutex_dequeue_and_unblock(id) {
|
||||
this.mutex_lock(id, new_owner);
|
||||
this.unblock_thread(new_owner)?;
|
||||
}
|
||||
}
|
||||
Ok(Some(old_lock_count))
|
||||
@ -187,17 +183,23 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
|
||||
#[inline]
|
||||
/// Put the thread into the queue waiting for the lock.
|
||||
fn mutex_enqueue(&mut self, id: MutexId, thread: ThreadId) {
|
||||
fn mutex_enqueue_and_block(&mut self, id: MutexId, thread: ThreadId) {
|
||||
let this = self.eval_context_mut();
|
||||
assert!(this.mutex_is_locked(id), "queing on unlocked mutex");
|
||||
this.machine.threads.sync.mutexes[id].queue.push_back(thread);
|
||||
this.block_thread(thread);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Take a thread out of the queue waiting for the lock.
|
||||
fn mutex_dequeue(&mut self, id: MutexId) -> Option<ThreadId> {
|
||||
fn mutex_dequeue_and_unblock(&mut self, id: MutexId) -> Option<ThreadId> {
|
||||
let this = self.eval_context_mut();
|
||||
this.machine.threads.sync.mutexes[id].queue.pop_front()
|
||||
if let Some(thread) = this.machine.threads.sync.mutexes[id].queue.pop_front() {
|
||||
this.unblock_thread(thread);
|
||||
Some(thread)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -255,25 +257,30 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
&mut self,
|
||||
id: RwLockId,
|
||||
reader: ThreadId,
|
||||
) -> InterpResult<'tcx> {
|
||||
) {
|
||||
let this = self.eval_context_mut();
|
||||
assert!(this.rwlock_is_write_locked(id), "queueing on not write locked lock");
|
||||
this.machine.threads.sync.rwlocks[id].reader_queue.push_back(reader);
|
||||
this.block_thread(reader)
|
||||
this.block_thread(reader);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Take a reader out the queue waiting for the lock.
|
||||
fn rwlock_dequeue_reader(&mut self, id: RwLockId) -> Option<ThreadId> {
|
||||
fn rwlock_dequeue_and_unblock_reader(&mut self, id: RwLockId) -> Option<ThreadId> {
|
||||
let this = self.eval_context_mut();
|
||||
this.machine.threads.sync.rwlocks[id].reader_queue.pop_front()
|
||||
if let Some(reader) = this.machine.threads.sync.rwlocks[id].reader_queue.pop_front() {
|
||||
this.unblock_thread(reader);
|
||||
Some(reader)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Lock by setting the writer that owns the lock.
|
||||
fn rwlock_writer_lock(&mut self, id: RwLockId, writer: ThreadId) {
|
||||
let this = self.eval_context_mut();
|
||||
assert!(!this.rwlock_is_locked(id), "the lock is already locked");
|
||||
assert!(!this.rwlock_is_locked(id), "the relock is already locked");
|
||||
this.machine.threads.sync.rwlocks[id].writer = Some(writer);
|
||||
}
|
||||
|
||||
@ -290,18 +297,23 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
&mut self,
|
||||
id: RwLockId,
|
||||
writer: ThreadId,
|
||||
) -> InterpResult<'tcx> {
|
||||
) {
|
||||
let this = self.eval_context_mut();
|
||||
assert!(this.rwlock_is_locked(id), "queueing on unlocked lock");
|
||||
this.machine.threads.sync.rwlocks[id].writer_queue.push_back(writer);
|
||||
this.block_thread(writer)
|
||||
this.block_thread(writer);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Take the writer out the queue waiting for the lock.
|
||||
fn rwlock_dequeue_writer(&mut self, id: RwLockId) -> Option<ThreadId> {
|
||||
fn rwlock_dequeue_and_unblock_writer(&mut self, id: RwLockId) -> Option<ThreadId> {
|
||||
let this = self.eval_context_mut();
|
||||
this.machine.threads.sync.rwlocks[id].writer_queue.pop_front()
|
||||
if let Some(writer) = this.machine.threads.sync.rwlocks[id].writer_queue.pop_front() {
|
||||
this.unblock_thread(writer);
|
||||
Some(writer)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -581,9 +581,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn create_thread(&mut self) -> InterpResult<'tcx, ThreadId> {
|
||||
fn create_thread(&mut self) -> ThreadId {
|
||||
let this = self.eval_context_mut();
|
||||
Ok(this.machine.threads.create_thread())
|
||||
this.machine.threads.create_thread()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -599,34 +599,33 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn set_active_thread(&mut self, thread_id: ThreadId) -> InterpResult<'tcx, ThreadId> {
|
||||
fn set_active_thread(&mut self, thread_id: ThreadId) -> ThreadId {
|
||||
let this = self.eval_context_mut();
|
||||
Ok(this.machine.threads.set_active_thread_id(thread_id))
|
||||
this.machine.threads.set_active_thread_id(thread_id)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_active_thread(&self) -> InterpResult<'tcx, ThreadId> {
|
||||
fn get_active_thread(&self) -> ThreadId {
|
||||
let this = self.eval_context_ref();
|
||||
Ok(this.machine.threads.get_active_thread_id())
|
||||
this.machine.threads.get_active_thread_id()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_total_thread_count(&self) -> InterpResult<'tcx, usize> {
|
||||
fn get_total_thread_count(&self) -> usize {
|
||||
let this = self.eval_context_ref();
|
||||
Ok(this.machine.threads.get_total_thread_count())
|
||||
this.machine.threads.get_total_thread_count()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn has_terminated(&self, thread_id: ThreadId) -> InterpResult<'tcx, bool> {
|
||||
fn has_terminated(&self, thread_id: ThreadId) -> bool {
|
||||
let this = self.eval_context_ref();
|
||||
Ok(this.machine.threads.has_terminated(thread_id))
|
||||
this.machine.threads.has_terminated(thread_id)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn enable_thread(&mut self, thread_id: ThreadId) -> InterpResult<'tcx> {
|
||||
fn enable_thread(&mut self, thread_id: ThreadId) {
|
||||
let this = self.eval_context_mut();
|
||||
this.machine.threads.enable_thread(thread_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -642,37 +641,36 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn set_active_thread_name(&mut self, new_thread_name: Vec<u8>) -> InterpResult<'tcx, ()> {
|
||||
fn set_active_thread_name(&mut self, new_thread_name: Vec<u8>) {
|
||||
let this = self.eval_context_mut();
|
||||
Ok(this.machine.threads.set_thread_name(new_thread_name))
|
||||
this.machine.threads.set_thread_name(new_thread_name);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_active_thread_name<'c>(&'c self) -> InterpResult<'tcx, &'c [u8]>
|
||||
fn get_active_thread_name<'c>(&'c self) -> &'c [u8]
|
||||
where
|
||||
'mir: 'c,
|
||||
{
|
||||
let this = self.eval_context_ref();
|
||||
Ok(this.machine.threads.get_thread_name())
|
||||
this.machine.threads.get_thread_name()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn block_thread(&mut self, thread: ThreadId) -> InterpResult<'tcx> {
|
||||
fn block_thread(&mut self, thread: ThreadId) {
|
||||
let this = self.eval_context_mut();
|
||||
Ok(this.machine.threads.block_thread(thread))
|
||||
this.machine.threads.block_thread(thread);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn unblock_thread(&mut self, thread: ThreadId) -> InterpResult<'tcx> {
|
||||
fn unblock_thread(&mut self, thread: ThreadId) {
|
||||
let this = self.eval_context_mut();
|
||||
Ok(this.machine.threads.unblock_thread(thread))
|
||||
this.machine.threads.unblock_thread(thread);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn yield_active_thread(&mut self) -> InterpResult<'tcx> {
|
||||
fn yield_active_thread(&mut self) {
|
||||
let this = self.eval_context_mut();
|
||||
this.machine.threads.yield_active_thread();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -681,17 +679,15 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
thread: ThreadId,
|
||||
call_time: Time,
|
||||
callback: TimeoutCallback<'mir, 'tcx>,
|
||||
) -> InterpResult<'tcx> {
|
||||
) {
|
||||
let this = self.eval_context_mut();
|
||||
this.machine.threads.register_timeout_callback(thread, call_time, callback);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn unregister_timeout_callback_if_exists(&mut self, thread: ThreadId) -> InterpResult<'tcx> {
|
||||
fn unregister_timeout_callback_if_exists(&mut self, thread: ThreadId) {
|
||||
let this = self.eval_context_mut();
|
||||
this.machine.threads.unregister_timeout_callback_if_exists(thread);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Execute a timeout callback on the callback's thread.
|
||||
@ -706,9 +702,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
// thread.
|
||||
// 2. Make the scheduler the only place that can change the active
|
||||
// thread.
|
||||
let old_thread = this.set_active_thread(thread)?;
|
||||
let old_thread = this.set_active_thread(thread);
|
||||
callback(this)?;
|
||||
this.set_active_thread(old_thread)?;
|
||||
this.set_active_thread(old_thread);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user