move rwlock dequeuing to shared code, and use that code for Windows rwlocks

This commit is contained in:
Ralf Jung 2020-06-28 09:47:20 +02:00
parent a9dc2796ca
commit 3a5bcb97ed
5 changed files with 160 additions and 161 deletions

View File

@ -1,6 +1,5 @@
use std::convert::TryInto;
use std::time::{Duration, SystemTime};
use std::ops::Not;
use crate::*;
use stacked_borrows::Tag;
@ -548,27 +547,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let active_thread = this.get_active_thread();
if this.rwlock_reader_unlock(id, active_thread) {
// The thread was a reader.
if this.rwlock_is_locked(id).not() {
// No more readers owning the lock. Give it to a writer if there
// is any.
this.rwlock_dequeue_and_lock_writer(id);
}
Ok(0)
} else if Some(active_thread) == this.rwlock_writer_unlock(id) {
// The thread was a writer.
//
// We are prioritizing writers here against the readers. As a
// result, not only readers can starve writers, but also writers can
// starve readers.
if this.rwlock_dequeue_and_lock_writer(id) {
// Someone got the write lock, nice.
} else {
// Give the lock to all readers.
while this.rwlock_dequeue_and_lock_reader(id) {
// Rinse and repeat.
}
}
} else if this.rwlock_writer_unlock(id, active_thread) {
Ok(0)
} else {
throw_ub_format!("unlocked an rwlock that was not locked by the active thread");

View File

@ -257,7 +257,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// Better error for attempts to create a thread
"CreateThread" => {
throw_unsup_format!("Miri does not support threading");
throw_unsup_format!("Miri does not support concurrency on Windows");
}
// Incomplete shims that we "stub out" just to get pre-main initialization code to work.
@ -292,7 +292,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
if this.frame().instance.to_string().starts_with("std::sys::windows::") => {
#[allow(non_snake_case)]
let &[_lpCriticalSection] = check_arg_count(args)?;
assert_eq!(this.get_total_thread_count(), 1, "concurrency on Windows not supported");
assert_eq!(this.get_total_thread_count(), 1, "concurrency on Windows is not supported");
// Nothing to do, not even a return value.
// (Windows locks are reentrant, and we have only 1 thread,
// so not doing any futher checks here is at least not incorrect.)
@ -301,7 +301,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
if this.frame().instance.to_string().starts_with("std::sys::windows::") => {
#[allow(non_snake_case)]
let &[_lpCriticalSection] = check_arg_count(args)?;
assert_eq!(this.get_total_thread_count(), 1, "concurrency on Windows not supported");
assert_eq!(this.get_total_thread_count(), 1, "concurrency on Windows is not supported");
// There is only one thread, so this always succeeds and returns TRUE.
this.write_scalar(Scalar::from_i32(1), dest)?;
}

View File

@ -1,19 +1,22 @@
use rustc_target::abi::Size;
use crate::*;
// Locks are pointer-sized pieces of data, initialized to 0.
// We use them to count readers, with usize::MAX representing the write-locked state.
// We use the first 4 bytes to store the RwLockId.
fn deref_lock<'mir, 'tcx: 'mir>(
fn srwlock_get_or_create_id<'mir, 'tcx: 'mir>(
ecx: &mut MiriEvalContext<'mir, 'tcx>,
lock_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, Tag>> {
// `lock` is a pointer to `void*`; cast it to a pointer to `usize`.
let lock = ecx.deref_operand(lock_op)?;
let usize = ecx.machine.layouts.usize;
assert_eq!(lock.layout.size, usize.size);
Ok(lock.offset(Size::ZERO, MemPlaceMeta::None, usize, ecx)?)
) -> InterpResult<'tcx, RwLockId> {
let id = ecx.read_scalar_at_offset(lock_op, 0, ecx.machine.layouts.u32)?.to_u32()?;
if id == 0 {
// 0 is a default value and also not a valid rwlock id. Need to allocate
// a new rwlock.
let id = ecx.rwlock_create();
ecx.write_scalar_at_offset(lock_op, 0, id.to_u32_scalar(), ecx.machine.layouts.u32)?;
Ok(id)
} else {
Ok(RwLockId::from_u32(id))
}
}
impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
@ -24,17 +27,20 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
lock_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
assert_eq!(this.get_total_thread_count(), 1, "concurrency on Windows is not supported");
let id = srwlock_get_or_create_id(this, lock_op)?;
let active_thread = this.get_active_thread();
let lock = deref_lock(this, lock_op)?;
let lock_val = this.read_scalar(lock.into())?.to_machine_usize(this)?;
if lock_val == 0 {
// Currently not locked. Lock it.
let new_val = Scalar::from_machine_usize(this.machine_usize_max(), this);
this.write_scalar(new_val, lock.into())?;
if this.rwlock_is_locked(id) {
// Note: this will deadlock if the lock is already locked by this
// thread in any way.
//
// FIXME: Detect and report the deadlock proactively. (We currently
// report the deadlock only when no thread can continue execution,
// but we could detect that this lock is already locked and report
// an error.)
this.rwlock_enqueue_and_block_writer(id, active_thread);
} else {
// Lock is already held. This is a deadlock.
throw_machine_stop!(TerminationInfo::Deadlock);
this.rwlock_writer_lock(id, active_thread);
}
Ok(())
@ -46,18 +52,15 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
lock_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, u8> {
let this = self.eval_context_mut();
assert_eq!(this.get_total_thread_count(), 1, "concurrency on Windows is not supported");
let id = srwlock_get_or_create_id(this, lock_op)?;
let active_thread = this.get_active_thread();
let lock = deref_lock(this, lock_op)?;
let lock_val = this.read_scalar(lock.into())?.to_machine_usize(this)?;
if lock_val == 0 {
// Currently not locked. Lock it.
let new_val = this.machine_usize_max();
this.write_scalar(Scalar::from_machine_usize(new_val, this), lock.into())?;
Ok(1)
} else {
if this.rwlock_is_locked(id) {
// Lock is already held.
Ok(0)
} else {
this.rwlock_writer_lock(id, active_thread);
Ok(1)
}
}
@ -67,17 +70,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
lock_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
assert_eq!(this.get_total_thread_count(), 1, "concurrency on Windows is not supported");
let id = srwlock_get_or_create_id(this, lock_op)?;
let active_thread = this.get_active_thread();
let lock = deref_lock(this, lock_op)?;
let lock_val = this.read_scalar(lock.into())?.to_machine_usize(this)?;
if lock_val == this.machine_usize_max() {
// Currently locked. Unlock it.
let new_val = 0;
this.write_scalar(Scalar::from_machine_usize(new_val, this), lock.into())?;
} else {
// Lock is not locked.
throw_ub_format!("calling ReleaseSRWLockExclusive on an SRWLock that is not exclusively locked");
if !this.rwlock_writer_unlock(id, active_thread) {
// The docs do not say anything about this case, but it seems better to not allow it.
throw_ub_format!("calling ReleaseSRWLockExclusive on an SRWLock that is not exclusively locked by the current thread");
}
Ok(())
@ -89,21 +87,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
lock_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
assert_eq!(this.get_total_thread_count(), 1, "concurrency on Windows is not supported");
let id = srwlock_get_or_create_id(this, lock_op)?;
let active_thread = this.get_active_thread();
let lock = deref_lock(this, lock_op)?;
let lock_val = this.read_scalar(lock.into())?.to_machine_usize(this)?;
if lock_val == this.machine_usize_max() {
// Currently write locked. This is a deadlock.
throw_machine_stop!(TerminationInfo::Deadlock);
if this.rwlock_is_write_locked(id) {
this.rwlock_enqueue_and_block_reader(id, active_thread);
} else {
// Bump up read counter (cannot overflow as we just checkd against usize::MAX);
let new_val = lock_val+1;
// Make sure this does not reach the "write locked" flag.
if new_val == this.machine_usize_max() {
throw_unsup_format!("SRWLock read-acquired too many times");
}
this.write_scalar(Scalar::from_machine_usize(new_val, this), lock.into())?;
this.rwlock_reader_lock(id, active_thread);
}
Ok(())
@ -115,21 +105,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
lock_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, u8> {
let this = self.eval_context_mut();
assert_eq!(this.get_total_thread_count(), 1, "concurrency on Windows is not supported");
let id = srwlock_get_or_create_id(this, lock_op)?;
let active_thread = this.get_active_thread();
let lock = deref_lock(this, lock_op)?;
let lock_val = this.read_scalar(lock.into())?.to_machine_usize(this)?;
if lock_val == this.machine_usize_max() {
// Currently write locked.
if this.rwlock_is_write_locked(id) {
Ok(0)
} else {
// Bump up read counter (cannot overflow as we just checkd against usize::MAX);
let new_val = lock_val+1;
// Make sure this does not reach the "write locked" flag.
if new_val == this.machine_usize_max() {
throw_unsup_format!("SRWLock read-acquired too many times");
}
this.write_scalar(Scalar::from_machine_usize(new_val, this), lock.into())?;
this.rwlock_reader_lock(id, active_thread);
Ok(1)
}
}
@ -140,20 +122,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
lock_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
assert_eq!(this.get_total_thread_count(), 1, "concurrency on Windows is not supported");
let id = srwlock_get_or_create_id(this, lock_op)?;
let active_thread = this.get_active_thread();
let lock = deref_lock(this, lock_op)?;
let lock_val = this.read_scalar(lock.into())?.to_machine_usize(this)?;
if lock_val == this.machine_usize_max() {
// Currently write locked. This is a UB.
throw_ub_format!("calling ReleaseSRWLockShared on write-locked SRWLock");
} else if lock_val == 0 {
// Currently not locked at all.
throw_ub_format!("calling ReleaseSRWLockShared on unlocked SRWLock");
} else {
// Decrement read counter (cannot overflow as we just checkd against 0);
let new_val = lock_val-1;
this.write_scalar(Scalar::from_machine_usize(new_val, this), lock.into())?;
if !this.rwlock_reader_unlock(id, active_thread) {
// The docs do not say anything about this case, but it seems better to not allow it.
throw_ub_format!("calling ReleaseSRWLockShared on an SRWLock that is not locked by the current thread");
}
Ok(())

View File

@ -3,6 +3,8 @@ use std::convert::TryFrom;
use std::num::NonZeroU32;
use std::ops::Not;
use log::trace;
use rustc_index::vec::{Idx, IndexVec};
use crate::*;
@ -102,6 +104,52 @@ pub(super) struct SynchronizationState {
condvars: IndexVec<CondvarId, Condvar>,
}
// Private extension trait for local helper methods
impl<'mir, 'tcx: 'mir> EvalContextExtPriv<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
/// Take a reader out of the queue waiting for the lock.
/// Returns `true` if some thread got the rwlock.
#[inline]
fn rwlock_dequeue_and_lock_reader(&mut self, id: RwLockId) -> bool {
let this = self.eval_context_mut();
if let Some(reader) = this.machine.threads.sync.rwlocks[id].reader_queue.pop_front() {
this.unblock_thread(reader);
this.rwlock_reader_lock(id, reader);
true
} else {
false
}
}
/// Take the writer out of the queue waiting for the lock.
/// Returns `true` if some thread got the rwlock.
#[inline]
fn rwlock_dequeue_and_lock_writer(&mut self, id: RwLockId) -> bool {
let this = self.eval_context_mut();
if let Some(writer) = this.machine.threads.sync.rwlocks[id].writer_queue.pop_front() {
this.unblock_thread(writer);
this.rwlock_writer_lock(id, writer);
true
} else {
false
}
}
/// Take a thread out of the queue waiting for the mutex, and lock
/// the mutex for it. Returns `true` if some thread has the mutex now.
#[inline]
fn mutex_dequeue_and_lock(&mut self, id: MutexId) -> bool {
let this = self.eval_context_mut();
if let Some(thread) = this.machine.threads.sync.mutexes[id].queue.pop_front() {
this.unblock_thread(thread);
this.mutex_lock(id, thread);
true
} else {
false
}
}
}
// Public interface to synchronization primitives. Please note that in most
// cases, the function calls are infallible and it is the client's (shim
// implementation's) responsibility to detect and deal with erroneous
@ -124,8 +172,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
#[inline]
/// Check if locked.
fn mutex_is_locked(&mut self, id: MutexId) -> bool {
let this = self.eval_context_mut();
fn mutex_is_locked(&self, id: MutexId) -> bool {
let this = self.eval_context_ref();
this.machine.threads.sync.mutexes[id].owner.is_some()
}
@ -174,7 +222,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}
Some(old_lock_count)
} else {
// Mutex is unlocked.
// Mutex is not locked.
None
}
}
@ -188,20 +236,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
this.block_thread(thread);
}
#[inline]
/// Take a thread out of the queue waiting for the mutex, and lock
/// the mutex for it. Returns `true` if some thread has the mutex now.
fn mutex_dequeue_and_lock(&mut self, id: MutexId) -> bool {
let this = self.eval_context_mut();
if let Some(thread) = this.machine.threads.sync.mutexes[id].queue.pop_front() {
this.unblock_thread(thread);
this.mutex_lock(id, thread);
true
} else {
false
}
}
#[inline]
/// Create state for a new read write lock.
fn rwlock_create(&mut self) -> RwLockId {
@ -211,17 +245,23 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
#[inline]
/// Check if locked.
fn rwlock_is_locked(&mut self, id: RwLockId) -> bool {
let this = self.eval_context_mut();
this.machine.threads.sync.rwlocks[id].writer.is_some()
|| this.machine.threads.sync.rwlocks[id].readers.is_empty().not()
fn rwlock_is_locked(&self, id: RwLockId) -> bool {
let this = self.eval_context_ref();
let rwlock = &this.machine.threads.sync.rwlocks[id];
trace!(
"rwlock_is_locked: {:?} writer is {:?} and there are {} reader threads (some of which could hold multiple read locks)",
id, rwlock.writer, rwlock.readers.len(),
);
rwlock.writer.is_some()|| rwlock.readers.is_empty().not()
}
#[inline]
/// Check if write locked.
fn rwlock_is_write_locked(&mut self, id: RwLockId) -> bool {
let this = self.eval_context_mut();
this.machine.threads.sync.rwlocks[id].writer.is_some()
fn rwlock_is_write_locked(&self, id: RwLockId) -> bool {
let this = self.eval_context_ref();
let rwlock = &this.machine.threads.sync.rwlocks[id];
trace!("rwlock_is_write_locked: {:?} writer is {:?}", id, rwlock.writer);
rwlock.writer.is_some()
}
/// Read-lock the lock by adding the `reader` the list of threads that own
@ -229,12 +269,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
fn rwlock_reader_lock(&mut self, id: RwLockId, reader: ThreadId) {
let this = self.eval_context_mut();
assert!(!this.rwlock_is_write_locked(id), "the lock is write locked");
trace!("rwlock_reader_lock: {:?} now also held (one more time) by {:?}", id, reader);
let count = this.machine.threads.sync.rwlocks[id].readers.entry(reader).or_insert(0);
*count = count.checked_add(1).expect("the reader counter overflowed");
}
/// Try read-unlock the lock for `reader`. Returns `true` if succeeded,
/// `false` if this `reader` did not hold the lock.
/// Try read-unlock the lock for `reader` and potentially give the lock to a new owner.
/// Returns `true` if succeeded, `false` if this `reader` did not hold the lock.
fn rwlock_reader_unlock(&mut self, id: RwLockId, reader: ThreadId) -> bool {
let this = self.eval_context_mut();
match this.machine.threads.sync.rwlocks[id].readers.entry(reader) {
@ -243,12 +284,19 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
assert!(*count > 0, "rwlock locked with count == 0");
*count -= 1;
if *count == 0 {
trace!("rwlock_reader_unlock: {:?} no longer held by {:?}", id, reader);
entry.remove();
} else {
trace!("rwlock_reader_unlock: {:?} held one less time by {:?}", id, reader);
}
true
}
Entry::Vacant(_) => false,
Entry::Vacant(_) => return false, // we did not even own this lock
}
// The thread was a reader. If the lock is not held any more, give it to a writer.
if this.rwlock_is_locked(id).not() {
this.rwlock_dequeue_and_lock_writer(id);
}
true
}
#[inline]
@ -259,38 +307,49 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
reader: ThreadId,
) {
let this = self.eval_context_mut();
assert!(this.rwlock_is_write_locked(id), "queueing on not write locked lock");
assert!(this.rwlock_is_write_locked(id), "read-queueing on not write locked rwlock");
this.machine.threads.sync.rwlocks[id].reader_queue.push_back(reader);
this.block_thread(reader);
}
#[inline]
/// Take a reader out the queue waiting for the lock.
/// Returns `true` if some thread got the rwlock.
fn rwlock_dequeue_and_lock_reader(&mut self, id: RwLockId) -> bool {
let this = self.eval_context_mut();
if let Some(reader) = this.machine.threads.sync.rwlocks[id].reader_queue.pop_front() {
this.unblock_thread(reader);
this.rwlock_reader_lock(id, reader);
true
} else {
false
}
}
#[inline]
/// Lock by setting the writer that owns the lock.
fn rwlock_writer_lock(&mut self, id: RwLockId, writer: ThreadId) {
let this = self.eval_context_mut();
assert!(!this.rwlock_is_locked(id), "the rwlock is already locked");
trace!("rwlock_writer_lock: {:?} now held by {:?}", id, writer);
this.machine.threads.sync.rwlocks[id].writer = Some(writer);
}
#[inline]
/// Try to unlock by removing the writer.
fn rwlock_writer_unlock(&mut self, id: RwLockId) -> Option<ThreadId> {
fn rwlock_writer_unlock(&mut self, id: RwLockId, expected_writer: ThreadId) -> bool {
let this = self.eval_context_mut();
this.machine.threads.sync.rwlocks[id].writer.take()
let rwlock = &mut this.machine.threads.sync.rwlocks[id];
if let Some(current_writer) = rwlock.writer {
if current_writer != expected_writer {
// Only the owner can unlock the rwlock.
return false;
}
rwlock.writer = None;
trace!("rwlock_writer_unlock: {:?} unlocked by {:?}", id, expected_writer);
// The thread was a writer.
//
// We are prioritizing writers here against the readers. As a
// result, not only readers can starve writers, but also writers can
// starve readers.
if this.rwlock_dequeue_and_lock_writer(id) {
// Someone got the write lock, nice.
} else {
// Give the lock to all readers.
while this.rwlock_dequeue_and_lock_reader(id) {
// Rinse and repeat.
}
}
true
} else {
false
}
}
#[inline]
@ -301,25 +360,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
writer: ThreadId,
) {
let this = self.eval_context_mut();
assert!(this.rwlock_is_locked(id), "queueing on unlocked lock");
assert!(this.rwlock_is_locked(id), "write-queueing on unlocked rwlock");
this.machine.threads.sync.rwlocks[id].writer_queue.push_back(writer);
this.block_thread(writer);
}
#[inline]
/// Take the writer out the queue waiting for the lock.
/// Returns `true` if some thread got the rwlock.
fn rwlock_dequeue_and_lock_writer(&mut self, id: RwLockId) -> bool {
let this = self.eval_context_mut();
if let Some(writer) = this.machine.threads.sync.rwlocks[id].writer_queue.pop_front() {
this.unblock_thread(writer);
this.rwlock_writer_lock(id, writer);
true
} else {
false
}
}
#[inline]
/// Create state for a new conditional variable.
fn condvar_create(&mut self) -> CondvarId {

View File

@ -3,7 +3,7 @@
use std::thread;
// error-pattern: Miri does not support threading
// error-pattern: Miri does not support concurrency on Windows
fn main() {
thread::spawn(|| {});