2020-04-21 18:38:14 -05:00
|
|
|
use std::collections::{hash_map::Entry, HashMap, VecDeque};
|
|
|
|
use std::convert::TryFrom;
|
|
|
|
use std::num::NonZeroU32;
|
2020-05-18 10:18:15 -05:00
|
|
|
use std::ops::Not;
|
2020-04-21 18:38:14 -05:00
|
|
|
|
2020-06-28 02:47:20 -05:00
|
|
|
use log::trace;
|
|
|
|
|
2020-04-21 18:38:14 -05:00
|
|
|
use rustc_index::vec::{Idx, IndexVec};
|
|
|
|
|
|
|
|
use crate::*;
|
|
|
|
|
2020-05-18 09:39:19 -05:00
|
|
|
/// We cannot use the `newtype_index!` macro because we have to use 0 as a
|
|
|
|
/// sentinel value meaning that the identifier is not assigned. This is because
|
|
|
|
/// the pthreads static initializers initialize memory with zeros (see the
|
|
|
|
/// `src/shims/sync.rs` file).
|
2020-04-21 18:38:14 -05:00
|
|
|
macro_rules! declare_id {
|
|
|
|
($name: ident) => {
|
2020-04-30 16:07:07 -05:00
|
|
|
/// 0 is used to indicate that the id was not yet assigned and,
|
|
|
|
/// therefore, is not a valid identifier.
|
2020-04-21 18:38:14 -05:00
|
|
|
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)]
|
|
|
|
pub struct $name(NonZeroU32);
|
|
|
|
|
2020-04-30 16:07:07 -05:00
|
|
|
impl $name {
|
|
|
|
// Panics if `id == 0`.
|
|
|
|
pub fn from_u32(id: u32) -> Self {
|
|
|
|
Self(NonZeroU32::new(id).unwrap())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-21 18:38:14 -05:00
|
|
|
impl Idx for $name {
|
|
|
|
fn new(idx: usize) -> Self {
|
2020-05-18 09:39:19 -05:00
|
|
|
// We use 0 as a sentinel value (see the comment above) and,
|
|
|
|
// therefore, need to shift by one when converting from an index
|
|
|
|
// into a vector.
|
2020-05-19 11:44:32 -05:00
|
|
|
let shifted_idx = u32::try_from(idx).unwrap().checked_add(1).unwrap();
|
|
|
|
$name(NonZeroU32::new(shifted_idx).unwrap())
|
2020-04-21 18:38:14 -05:00
|
|
|
}
|
|
|
|
fn index(self) -> usize {
|
2020-05-18 09:39:19 -05:00
|
|
|
// See the comment in `Self::new`.
|
2020-05-19 11:44:32 -05:00
|
|
|
// (This cannot underflow because self is NonZeroU32.)
|
2020-04-21 18:38:14 -05:00
|
|
|
usize::try_from(self.0.get() - 1).unwrap()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl $name {
|
|
|
|
pub fn to_u32_scalar<'tcx>(&self) -> Scalar<Tag> {
|
|
|
|
Scalar::from_u32(self.0.get())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
declare_id!(MutexId);
|
|
|
|
|
|
|
|
/// The mutex state.
|
|
|
|
#[derive(Default, Debug)]
|
|
|
|
struct Mutex {
|
|
|
|
/// The thread that currently owns the lock.
|
|
|
|
owner: Option<ThreadId>,
|
|
|
|
/// How many times the mutex was locked by the owner.
|
|
|
|
lock_count: usize,
|
|
|
|
/// The queue of threads waiting for this mutex.
|
|
|
|
queue: VecDeque<ThreadId>,
|
2020-11-01 18:23:27 -06:00
|
|
|
/// Data race handle
|
2020-11-15 12:30:26 -06:00
|
|
|
data_race: VClock
|
2020-04-21 18:38:14 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
declare_id!(RwLockId);
|
|
|
|
|
|
|
|
/// The read-write lock state.
|
|
|
|
#[derive(Default, Debug)]
|
|
|
|
struct RwLock {
|
|
|
|
/// The writer thread that currently owns the lock.
|
|
|
|
writer: Option<ThreadId>,
|
|
|
|
/// The readers that currently own the lock and how many times they acquired
|
|
|
|
/// the lock.
|
|
|
|
readers: HashMap<ThreadId, usize>,
|
|
|
|
/// The queue of writer threads waiting for this lock.
|
|
|
|
writer_queue: VecDeque<ThreadId>,
|
|
|
|
/// The queue of reader threads waiting for this lock.
|
|
|
|
reader_queue: VecDeque<ThreadId>,
|
2020-11-01 18:23:27 -06:00
|
|
|
/// Data race handle for writers
|
2020-11-15 12:30:26 -06:00
|
|
|
data_race: VClock,
|
2020-11-01 18:23:27 -06:00
|
|
|
/// Data race handle for readers
|
2020-11-15 12:30:26 -06:00
|
|
|
data_race_reader: VClock,
|
2020-04-21 18:38:14 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
declare_id!(CondvarId);
|
|
|
|
|
|
|
|
/// A thread waiting on a conditional variable.
|
|
|
|
#[derive(Debug)]
|
|
|
|
struct CondvarWaiter {
|
|
|
|
/// The thread that is waiting on this variable.
|
|
|
|
thread: ThreadId,
|
|
|
|
/// The mutex on which the thread is waiting.
|
|
|
|
mutex: MutexId,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// The conditional variable state.
|
|
|
|
#[derive(Default, Debug)]
|
|
|
|
struct Condvar {
|
|
|
|
waiters: VecDeque<CondvarWaiter>,
|
2020-11-15 12:30:26 -06:00
|
|
|
data_race: VClock,
|
2020-04-21 18:38:14 -05:00
|
|
|
}
|
|
|
|
|
2020-10-01 13:47:31 -05:00
|
|
|
/// The futex state.
|
|
|
|
#[derive(Default, Debug)]
|
|
|
|
struct Futex {
|
|
|
|
waiters: VecDeque<FutexWaiter>,
|
2020-11-15 12:30:26 -06:00
|
|
|
data_race: VClock,
|
2020-10-01 13:47:31 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/// A thread waiting on a futex.
|
|
|
|
#[derive(Debug)]
|
|
|
|
struct FutexWaiter {
|
|
|
|
/// The thread that is waiting on this futex.
|
|
|
|
thread: ThreadId,
|
|
|
|
}
|
|
|
|
|
2020-04-21 18:38:14 -05:00
|
|
|
/// The state of all synchronization variables.
|
|
|
|
#[derive(Default, Debug)]
|
|
|
|
pub(super) struct SynchronizationState {
|
|
|
|
mutexes: IndexVec<MutexId, Mutex>,
|
|
|
|
rwlocks: IndexVec<RwLockId, RwLock>,
|
|
|
|
condvars: IndexVec<CondvarId, Condvar>,
|
2020-10-01 14:06:16 -05:00
|
|
|
futexes: HashMap<Pointer, Futex>,
|
2020-04-21 18:38:14 -05:00
|
|
|
}
|
|
|
|
|
2020-06-28 02:47:20 -05:00
|
|
|
// Private extension trait for local helper methods
|
|
|
|
impl<'mir, 'tcx: 'mir> EvalContextExtPriv<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
|
|
|
|
trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
|
|
|
|
/// Take a reader out of the queue waiting for the lock.
|
|
|
|
/// Returns `true` if some thread got the rwlock.
|
|
|
|
#[inline]
|
|
|
|
fn rwlock_dequeue_and_lock_reader(&mut self, id: RwLockId) -> bool {
|
|
|
|
let this = self.eval_context_mut();
|
|
|
|
if let Some(reader) = this.machine.threads.sync.rwlocks[id].reader_queue.pop_front() {
|
|
|
|
this.unblock_thread(reader);
|
|
|
|
this.rwlock_reader_lock(id, reader);
|
|
|
|
true
|
|
|
|
} else {
|
|
|
|
false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Take the writer out of the queue waiting for the lock.
|
|
|
|
/// Returns `true` if some thread got the rwlock.
|
|
|
|
#[inline]
|
|
|
|
fn rwlock_dequeue_and_lock_writer(&mut self, id: RwLockId) -> bool {
|
|
|
|
let this = self.eval_context_mut();
|
|
|
|
if let Some(writer) = this.machine.threads.sync.rwlocks[id].writer_queue.pop_front() {
|
|
|
|
this.unblock_thread(writer);
|
|
|
|
this.rwlock_writer_lock(id, writer);
|
|
|
|
true
|
|
|
|
} else {
|
|
|
|
false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Take a thread out of the queue waiting for the mutex, and lock
|
|
|
|
/// the mutex for it. Returns `true` if some thread has the mutex now.
|
|
|
|
#[inline]
|
|
|
|
fn mutex_dequeue_and_lock(&mut self, id: MutexId) -> bool {
|
|
|
|
let this = self.eval_context_mut();
|
|
|
|
if let Some(thread) = this.machine.threads.sync.mutexes[id].queue.pop_front() {
|
|
|
|
this.unblock_thread(thread);
|
|
|
|
this.mutex_lock(id, thread);
|
|
|
|
true
|
|
|
|
} else {
|
|
|
|
false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-21 18:38:14 -05:00
|
|
|
// Public interface to synchronization primitives. Please note that in most
|
|
|
|
// cases, the function calls are infallible and it is the client's (shim
|
|
|
|
// implementation's) responsibility to detect and deal with erroneous
|
|
|
|
// situations.
|
|
|
|
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
|
|
|
|
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
|
|
|
|
#[inline]
|
|
|
|
/// Create state for a new mutex.
|
|
|
|
fn mutex_create(&mut self) -> MutexId {
|
|
|
|
let this = self.eval_context_mut();
|
|
|
|
this.machine.threads.sync.mutexes.push(Default::default())
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
/// Get the id of the thread that currently owns this lock.
|
|
|
|
fn mutex_get_owner(&mut self, id: MutexId) -> ThreadId {
|
|
|
|
let this = self.eval_context_ref();
|
|
|
|
this.machine.threads.sync.mutexes[id].owner.unwrap()
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
/// Check if locked.
|
2020-06-28 02:47:20 -05:00
|
|
|
fn mutex_is_locked(&self, id: MutexId) -> bool {
|
|
|
|
let this = self.eval_context_ref();
|
2020-04-21 18:38:14 -05:00
|
|
|
this.machine.threads.sync.mutexes[id].owner.is_some()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Lock by setting the mutex owner and increasing the lock count.
|
|
|
|
fn mutex_lock(&mut self, id: MutexId, thread: ThreadId) {
|
|
|
|
let this = self.eval_context_mut();
|
|
|
|
let mutex = &mut this.machine.threads.sync.mutexes[id];
|
|
|
|
if let Some(current_owner) = mutex.owner {
|
|
|
|
assert_eq!(thread, current_owner, "mutex already locked by another thread");
|
|
|
|
assert!(
|
|
|
|
mutex.lock_count > 0,
|
|
|
|
"invariant violation: lock_count == 0 iff the thread is unlocked"
|
|
|
|
);
|
|
|
|
} else {
|
|
|
|
mutex.owner = Some(thread);
|
|
|
|
}
|
|
|
|
mutex.lock_count = mutex.lock_count.checked_add(1).unwrap();
|
2020-11-15 12:30:26 -06:00
|
|
|
if let Some(data_race) = &this.memory.extra.data_race {
|
|
|
|
data_race.validate_lock_acquire(&mutex.data_race, thread);
|
|
|
|
}
|
2020-04-21 18:38:14 -05:00
|
|
|
}
|
|
|
|
|
2020-05-30 15:29:27 -05:00
|
|
|
/// Try unlocking by decreasing the lock count and returning the old lock
|
|
|
|
/// count. If the lock count reaches 0, release the lock and potentially
|
|
|
|
/// give to a new owner. If the lock was not locked by `expected_owner`,
|
|
|
|
/// return `None`.
|
2020-05-19 11:44:32 -05:00
|
|
|
fn mutex_unlock(
|
|
|
|
&mut self,
|
|
|
|
id: MutexId,
|
|
|
|
expected_owner: ThreadId,
|
2020-05-30 15:54:37 -05:00
|
|
|
) -> Option<usize> {
|
2020-04-21 18:38:14 -05:00
|
|
|
let this = self.eval_context_mut();
|
|
|
|
let mutex = &mut this.machine.threads.sync.mutexes[id];
|
|
|
|
if let Some(current_owner) = mutex.owner {
|
2020-05-18 10:18:15 -05:00
|
|
|
// Mutex is locked.
|
2020-05-19 11:44:32 -05:00
|
|
|
if current_owner != expected_owner {
|
|
|
|
// Only the owner can unlock the mutex.
|
2020-05-30 15:54:37 -05:00
|
|
|
return None;
|
2020-05-19 11:44:32 -05:00
|
|
|
}
|
2020-05-18 10:18:15 -05:00
|
|
|
let old_lock_count = mutex.lock_count;
|
|
|
|
mutex.lock_count = old_lock_count
|
2020-04-21 18:38:14 -05:00
|
|
|
.checked_sub(1)
|
|
|
|
.expect("invariant violation: lock_count == 0 iff the thread is unlocked");
|
|
|
|
if mutex.lock_count == 0 {
|
|
|
|
mutex.owner = None;
|
2020-05-18 10:18:15 -05:00
|
|
|
// The mutex is completely unlocked. Try transfering ownership
|
|
|
|
// to another thread.
|
2020-11-15 12:30:26 -06:00
|
|
|
if let Some(data_race) = &this.memory.extra.data_race {
|
|
|
|
data_race.validate_lock_release(&mut mutex.data_race, current_owner);
|
|
|
|
}
|
2020-05-30 15:35:58 -05:00
|
|
|
this.mutex_dequeue_and_lock(id);
|
2020-04-21 18:38:14 -05:00
|
|
|
}
|
2020-05-30 15:54:37 -05:00
|
|
|
Some(old_lock_count)
|
2020-04-21 18:38:14 -05:00
|
|
|
} else {
|
2020-06-28 02:47:20 -05:00
|
|
|
// Mutex is not locked.
|
2020-05-30 15:54:37 -05:00
|
|
|
None
|
2020-04-21 18:38:14 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
2020-05-30 15:35:58 -05:00
|
|
|
/// Put the thread into the queue waiting for the mutex.
|
2020-05-30 15:29:27 -05:00
|
|
|
fn mutex_enqueue_and_block(&mut self, id: MutexId, thread: ThreadId) {
|
2020-04-21 18:38:14 -05:00
|
|
|
let this = self.eval_context_mut();
|
2020-05-19 09:47:25 -05:00
|
|
|
assert!(this.mutex_is_locked(id), "queing on unlocked mutex");
|
2020-04-21 18:38:14 -05:00
|
|
|
this.machine.threads.sync.mutexes[id].queue.push_back(thread);
|
2020-05-30 15:29:27 -05:00
|
|
|
this.block_thread(thread);
|
2020-04-21 18:38:14 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
/// Create state for a new read write lock.
|
|
|
|
fn rwlock_create(&mut self) -> RwLockId {
|
|
|
|
let this = self.eval_context_mut();
|
|
|
|
this.machine.threads.sync.rwlocks.push(Default::default())
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
/// Check if locked.
|
2020-06-28 02:47:20 -05:00
|
|
|
fn rwlock_is_locked(&self, id: RwLockId) -> bool {
|
|
|
|
let this = self.eval_context_ref();
|
|
|
|
let rwlock = &this.machine.threads.sync.rwlocks[id];
|
|
|
|
trace!(
|
|
|
|
"rwlock_is_locked: {:?} writer is {:?} and there are {} reader threads (some of which could hold multiple read locks)",
|
|
|
|
id, rwlock.writer, rwlock.readers.len(),
|
|
|
|
);
|
|
|
|
rwlock.writer.is_some()|| rwlock.readers.is_empty().not()
|
2020-04-21 18:38:14 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
/// Check if write locked.
|
2020-06-28 02:47:20 -05:00
|
|
|
fn rwlock_is_write_locked(&self, id: RwLockId) -> bool {
|
|
|
|
let this = self.eval_context_ref();
|
|
|
|
let rwlock = &this.machine.threads.sync.rwlocks[id];
|
|
|
|
trace!("rwlock_is_write_locked: {:?} writer is {:?}", id, rwlock.writer);
|
|
|
|
rwlock.writer.is_some()
|
2020-04-21 18:38:14 -05:00
|
|
|
}
|
|
|
|
|
2020-05-18 10:18:15 -05:00
|
|
|
/// Read-lock the lock by adding the `reader` the list of threads that own
|
|
|
|
/// this lock.
|
|
|
|
fn rwlock_reader_lock(&mut self, id: RwLockId, reader: ThreadId) {
|
2020-04-21 18:38:14 -05:00
|
|
|
let this = self.eval_context_mut();
|
|
|
|
assert!(!this.rwlock_is_write_locked(id), "the lock is write locked");
|
2020-06-28 02:47:20 -05:00
|
|
|
trace!("rwlock_reader_lock: {:?} now also held (one more time) by {:?}", id, reader);
|
2020-11-01 18:23:27 -06:00
|
|
|
let rwlock = &mut this.machine.threads.sync.rwlocks[id];
|
|
|
|
let count = rwlock.readers.entry(reader).or_insert(0);
|
2020-05-18 10:18:15 -05:00
|
|
|
*count = count.checked_add(1).expect("the reader counter overflowed");
|
2020-11-15 12:30:26 -06:00
|
|
|
if let Some(data_race) = &this.memory.extra.data_race {
|
|
|
|
data_race.validate_lock_acquire(&rwlock.data_race, reader);
|
|
|
|
}
|
2020-04-21 18:38:14 -05:00
|
|
|
}
|
|
|
|
|
2020-06-28 02:47:20 -05:00
|
|
|
/// Try read-unlock the lock for `reader` and potentially give the lock to a new owner.
|
|
|
|
/// Returns `true` if succeeded, `false` if this `reader` did not hold the lock.
|
2020-05-18 10:18:15 -05:00
|
|
|
fn rwlock_reader_unlock(&mut self, id: RwLockId, reader: ThreadId) -> bool {
|
2020-04-21 18:38:14 -05:00
|
|
|
let this = self.eval_context_mut();
|
2020-11-01 18:23:27 -06:00
|
|
|
let rwlock = &mut this.machine.threads.sync.rwlocks[id];
|
|
|
|
match rwlock.readers.entry(reader) {
|
2020-04-21 18:38:14 -05:00
|
|
|
Entry::Occupied(mut entry) => {
|
|
|
|
let count = entry.get_mut();
|
2020-05-25 02:45:42 -05:00
|
|
|
assert!(*count > 0, "rwlock locked with count == 0");
|
2020-04-21 18:38:14 -05:00
|
|
|
*count -= 1;
|
|
|
|
if *count == 0 {
|
2020-06-28 02:47:20 -05:00
|
|
|
trace!("rwlock_reader_unlock: {:?} no longer held by {:?}", id, reader);
|
2020-04-21 18:38:14 -05:00
|
|
|
entry.remove();
|
2020-06-28 02:47:20 -05:00
|
|
|
} else {
|
|
|
|
trace!("rwlock_reader_unlock: {:?} held one less time by {:?}", id, reader);
|
2020-04-21 18:38:14 -05:00
|
|
|
}
|
|
|
|
}
|
2020-06-28 02:47:20 -05:00
|
|
|
Entry::Vacant(_) => return false, // we did not even own this lock
|
|
|
|
}
|
2020-11-15 12:30:26 -06:00
|
|
|
if let Some(data_race) = &this.memory.extra.data_race {
|
|
|
|
data_race.validate_lock_release_shared(&mut rwlock.data_race_reader, reader);
|
|
|
|
}
|
2020-11-01 18:23:27 -06:00
|
|
|
|
2020-06-28 02:47:20 -05:00
|
|
|
// The thread was a reader. If the lock is not held any more, give it to a writer.
|
|
|
|
if this.rwlock_is_locked(id).not() {
|
2020-11-01 18:23:27 -06:00
|
|
|
|
|
|
|
// All the readers are finished, so set the writer data-race handle to the value
|
|
|
|
// of the union of all reader data race handles, since the set of readers
|
|
|
|
// happen-before the writers
|
|
|
|
let rwlock = &mut this.machine.threads.sync.rwlocks[id];
|
2020-11-15 12:30:26 -06:00
|
|
|
rwlock.data_race.clone_from(&rwlock.data_race_reader);
|
2020-06-28 02:47:20 -05:00
|
|
|
this.rwlock_dequeue_and_lock_writer(id);
|
2020-04-21 18:38:14 -05:00
|
|
|
}
|
2020-06-28 02:47:20 -05:00
|
|
|
true
|
2020-04-21 18:38:14 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
2020-05-18 10:18:15 -05:00
|
|
|
/// Put the reader in the queue waiting for the lock and block it.
|
|
|
|
fn rwlock_enqueue_and_block_reader(
|
|
|
|
&mut self,
|
|
|
|
id: RwLockId,
|
|
|
|
reader: ThreadId,
|
2020-05-30 15:29:27 -05:00
|
|
|
) {
|
2020-04-21 18:38:14 -05:00
|
|
|
let this = self.eval_context_mut();
|
2020-06-28 02:47:20 -05:00
|
|
|
assert!(this.rwlock_is_write_locked(id), "read-queueing on not write locked rwlock");
|
2020-04-21 18:38:14 -05:00
|
|
|
this.machine.threads.sync.rwlocks[id].reader_queue.push_back(reader);
|
2020-05-30 15:29:27 -05:00
|
|
|
this.block_thread(reader);
|
2020-04-21 18:38:14 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
/// Lock by setting the writer that owns the lock.
|
2020-05-18 10:18:15 -05:00
|
|
|
fn rwlock_writer_lock(&mut self, id: RwLockId, writer: ThreadId) {
|
2020-04-21 18:38:14 -05:00
|
|
|
let this = self.eval_context_mut();
|
2020-05-30 15:48:43 -05:00
|
|
|
assert!(!this.rwlock_is_locked(id), "the rwlock is already locked");
|
2020-06-28 02:47:20 -05:00
|
|
|
trace!("rwlock_writer_lock: {:?} now held by {:?}", id, writer);
|
2020-11-01 18:23:27 -06:00
|
|
|
let rwlock = &mut this.machine.threads.sync.rwlocks[id];
|
|
|
|
rwlock.writer = Some(writer);
|
2020-11-15 12:30:26 -06:00
|
|
|
if let Some(data_race) = &this.memory.extra.data_race {
|
|
|
|
data_race.validate_lock_acquire(&rwlock.data_race, writer);
|
|
|
|
}
|
2020-04-21 18:38:14 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
2020-05-18 10:18:15 -05:00
|
|
|
/// Try to unlock by removing the writer.
|
2020-06-28 02:47:20 -05:00
|
|
|
fn rwlock_writer_unlock(&mut self, id: RwLockId, expected_writer: ThreadId) -> bool {
|
2020-04-21 18:38:14 -05:00
|
|
|
let this = self.eval_context_mut();
|
2020-06-28 02:47:20 -05:00
|
|
|
let rwlock = &mut this.machine.threads.sync.rwlocks[id];
|
|
|
|
if let Some(current_writer) = rwlock.writer {
|
|
|
|
if current_writer != expected_writer {
|
|
|
|
// Only the owner can unlock the rwlock.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
rwlock.writer = None;
|
|
|
|
trace!("rwlock_writer_unlock: {:?} unlocked by {:?}", id, expected_writer);
|
2020-11-01 18:23:27 -06:00
|
|
|
// Release memory to both reader and writer vector clocks
|
|
|
|
// since this writer happens-before both the union of readers once they are finished
|
|
|
|
// and the next writer
|
2020-11-15 12:30:26 -06:00
|
|
|
if let Some(data_race) = &this.memory.extra.data_race {
|
|
|
|
data_race.validate_lock_release(&mut rwlock.data_race, current_writer);
|
|
|
|
data_race.validate_lock_release(&mut rwlock.data_race_reader, current_writer);
|
|
|
|
}
|
2020-06-28 02:47:20 -05:00
|
|
|
// The thread was a writer.
|
|
|
|
//
|
|
|
|
// We are prioritizing writers here against the readers. As a
|
|
|
|
// result, not only readers can starve writers, but also writers can
|
|
|
|
// starve readers.
|
|
|
|
if this.rwlock_dequeue_and_lock_writer(id) {
|
|
|
|
// Someone got the write lock, nice.
|
|
|
|
} else {
|
|
|
|
// Give the lock to all readers.
|
|
|
|
while this.rwlock_dequeue_and_lock_reader(id) {
|
|
|
|
// Rinse and repeat.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
true
|
|
|
|
} else {
|
|
|
|
false
|
|
|
|
}
|
2020-04-21 18:38:14 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
/// Put the writer in the queue waiting for the lock.
|
2020-05-18 10:18:15 -05:00
|
|
|
fn rwlock_enqueue_and_block_writer(
|
|
|
|
&mut self,
|
|
|
|
id: RwLockId,
|
|
|
|
writer: ThreadId,
|
2020-05-30 15:29:27 -05:00
|
|
|
) {
|
2020-04-21 18:38:14 -05:00
|
|
|
let this = self.eval_context_mut();
|
2020-06-28 02:47:20 -05:00
|
|
|
assert!(this.rwlock_is_locked(id), "write-queueing on unlocked rwlock");
|
2020-04-21 18:38:14 -05:00
|
|
|
this.machine.threads.sync.rwlocks[id].writer_queue.push_back(writer);
|
2020-05-30 15:29:27 -05:00
|
|
|
this.block_thread(writer);
|
2020-04-21 18:38:14 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
/// Create state for a new conditional variable.
|
|
|
|
fn condvar_create(&mut self) -> CondvarId {
|
|
|
|
let this = self.eval_context_mut();
|
|
|
|
this.machine.threads.sync.condvars.push(Default::default())
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
/// Is the conditional variable awaited?
|
|
|
|
fn condvar_is_awaited(&mut self, id: CondvarId) -> bool {
|
|
|
|
let this = self.eval_context_mut();
|
|
|
|
!this.machine.threads.sync.condvars[id].waiters.is_empty()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Mark that the thread is waiting on the conditional variable.
|
|
|
|
fn condvar_wait(&mut self, id: CondvarId, thread: ThreadId, mutex: MutexId) {
|
|
|
|
let this = self.eval_context_mut();
|
|
|
|
let waiters = &mut this.machine.threads.sync.condvars[id].waiters;
|
|
|
|
assert!(waiters.iter().all(|waiter| waiter.thread != thread), "thread is already waiting");
|
2020-05-18 09:28:19 -05:00
|
|
|
waiters.push_back(CondvarWaiter { thread, mutex });
|
2020-04-21 18:38:14 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Wake up some thread (if there is any) sleeping on the conditional
|
|
|
|
/// variable.
|
|
|
|
fn condvar_signal(&mut self, id: CondvarId) -> Option<(ThreadId, MutexId)> {
|
|
|
|
let this = self.eval_context_mut();
|
2020-11-01 18:23:27 -06:00
|
|
|
let current_thread = this.get_active_thread();
|
|
|
|
let condvar = &mut this.machine.threads.sync.condvars[id];
|
2020-11-15 12:30:26 -06:00
|
|
|
let data_race = &this.memory.extra.data_race;
|
2020-11-01 18:23:27 -06:00
|
|
|
|
|
|
|
// Each condvar signal happens-before the end of the condvar wake
|
2020-11-15 12:30:26 -06:00
|
|
|
if let Some(data_race) = data_race {
|
|
|
|
data_race.validate_lock_release(&mut condvar.data_race, current_thread);
|
|
|
|
}
|
2020-11-01 18:23:27 -06:00
|
|
|
condvar.waiters
|
2020-04-21 18:38:14 -05:00
|
|
|
.pop_front()
|
2020-11-01 18:23:27 -06:00
|
|
|
.map(|waiter| {
|
2020-11-15 12:30:26 -06:00
|
|
|
if let Some(data_race) = data_race {
|
|
|
|
data_race.validate_lock_acquire(&mut condvar.data_race, waiter.thread);
|
|
|
|
}
|
2020-11-01 18:23:27 -06:00
|
|
|
(waiter.thread, waiter.mutex)
|
|
|
|
})
|
2020-04-21 18:38:14 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
/// Remove the thread from the queue of threads waiting on this conditional variable.
|
|
|
|
fn condvar_remove_waiter(&mut self, id: CondvarId, thread: ThreadId) {
|
|
|
|
let this = self.eval_context_mut();
|
|
|
|
this.machine.threads.sync.condvars[id].waiters.retain(|waiter| waiter.thread != thread);
|
|
|
|
}
|
2020-10-01 13:47:31 -05:00
|
|
|
|
2020-10-02 03:47:36 -05:00
|
|
|
fn futex_wait(&mut self, addr: Pointer<stacked_borrows::Tag>, thread: ThreadId) {
|
2020-10-01 13:47:31 -05:00
|
|
|
let this = self.eval_context_mut();
|
2020-11-01 18:23:27 -06:00
|
|
|
let futex = &mut this.machine.threads.sync.futexes.entry(addr.erase_tag()).or_default();
|
|
|
|
let waiters = &mut futex.waiters;
|
2020-10-01 13:47:31 -05:00
|
|
|
assert!(waiters.iter().all(|waiter| waiter.thread != thread), "thread is already waiting");
|
|
|
|
waiters.push_back(FutexWaiter { thread });
|
|
|
|
}
|
|
|
|
|
2020-10-02 03:47:36 -05:00
|
|
|
fn futex_wake(&mut self, addr: Pointer<stacked_borrows::Tag>) -> Option<ThreadId> {
|
2020-10-01 13:47:31 -05:00
|
|
|
let this = self.eval_context_mut();
|
2020-11-01 18:23:27 -06:00
|
|
|
let current_thread = this.get_active_thread();
|
|
|
|
let futex = &mut this.machine.threads.sync.futexes.get_mut(&addr.erase_tag())?;
|
2020-11-15 12:30:26 -06:00
|
|
|
let data_race = &this.memory.extra.data_race;
|
2020-11-01 18:23:27 -06:00
|
|
|
|
|
|
|
// Each futex-wake happens-before the end of the futex wait
|
2020-11-15 12:30:26 -06:00
|
|
|
if let Some(data_race) = data_race {
|
|
|
|
data_race.validate_lock_release(&mut futex.data_race, current_thread);
|
|
|
|
}
|
2020-11-01 18:23:27 -06:00
|
|
|
let res = futex.waiters.pop_front().map(|waiter| {
|
2020-11-15 12:30:26 -06:00
|
|
|
if let Some(data_race) = data_race {
|
|
|
|
data_race.validate_lock_acquire(&futex.data_race, waiter.thread);
|
|
|
|
}
|
2020-11-01 18:23:27 -06:00
|
|
|
waiter.thread
|
|
|
|
});
|
|
|
|
res
|
2020-10-01 13:47:31 -05:00
|
|
|
}
|
2020-10-02 16:34:14 -05:00
|
|
|
|
|
|
|
fn futex_remove_waiter(&mut self, addr: Pointer<stacked_borrows::Tag>, thread: ThreadId) {
|
|
|
|
let this = self.eval_context_mut();
|
|
|
|
if let Some(futex) = this.machine.threads.sync.futexes.get_mut(&addr.erase_tag()) {
|
|
|
|
futex.waiters.retain(|waiter| waiter.thread != thread);
|
|
|
|
}
|
|
|
|
}
|
2020-04-21 18:38:14 -05:00
|
|
|
}
|