Auto merge of #3370 - RalfJung:windows-sync, r=RalfJung

windows: remove support for slim rwlock

Since https://github.com/rust-lang/rust/pull/121956 we don't need it any more, and we are generally short on Windows staff so reducing the amount of code we have to test and maintain sounds like a good idea.

The InitOnce stuff is still used by `thread_local_key::StaticKey` on 64bit windows-gnu.
This commit is contained in:
bors 2024-03-12 19:07:42 +00:00
commit b6f4ff580f
4 changed files with 0 additions and 602 deletions

View File

@ -297,32 +297,6 @@ fn emulate_foreign_item_inner(
} }
// Synchronization primitives // Synchronization primitives
"AcquireSRWLockExclusive" => {
let [ptr] = this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
this.AcquireSRWLockExclusive(ptr)?;
}
"ReleaseSRWLockExclusive" => {
let [ptr] = this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
this.ReleaseSRWLockExclusive(ptr)?;
}
"TryAcquireSRWLockExclusive" => {
let [ptr] = this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
let ret = this.TryAcquireSRWLockExclusive(ptr)?;
this.write_scalar(ret, dest)?;
}
"AcquireSRWLockShared" => {
let [ptr] = this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
this.AcquireSRWLockShared(ptr)?;
}
"ReleaseSRWLockShared" => {
let [ptr] = this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
this.ReleaseSRWLockShared(ptr)?;
}
"TryAcquireSRWLockShared" => {
let [ptr] = this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
let ret = this.TryAcquireSRWLockShared(ptr)?;
this.write_scalar(ret, dest)?;
}
"InitOnceBeginInitialize" => { "InitOnceBeginInitialize" => {
let [ptr, flags, pending, context] = let [ptr, flags, pending, context] =
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?; this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
@ -335,25 +309,6 @@ fn emulate_foreign_item_inner(
let result = this.InitOnceComplete(ptr, flags, context)?; let result = this.InitOnceComplete(ptr, flags, context)?;
this.write_scalar(result, dest)?; this.write_scalar(result, dest)?;
} }
"SleepConditionVariableSRW" => {
let [condvar, lock, timeout, flags] =
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
let result = this.SleepConditionVariableSRW(condvar, lock, timeout, flags, dest)?;
this.write_scalar(result, dest)?;
}
"WakeConditionVariable" => {
let [condvar] =
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
this.WakeConditionVariable(condvar)?;
}
"WakeAllConditionVariable" => {
let [condvar] =
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
this.WakeAllConditionVariable(condvar)?;
}
"WaitOnAddress" => { "WaitOnAddress" => {
let [ptr_op, compare_op, size_op, timeout_op] = let [ptr_op, compare_op, size_op, timeout_op] =
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?; this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;

View File

@ -3,52 +3,14 @@
use rustc_target::abi::Size; use rustc_target::abi::Size;
use crate::concurrency::init_once::InitOnceStatus; use crate::concurrency::init_once::InitOnceStatus;
use crate::concurrency::sync::{CondvarLock, RwLockMode};
use crate::concurrency::thread::MachineCallback; use crate::concurrency::thread::MachineCallback;
use crate::*; use crate::*;
impl<'mir, 'tcx> EvalContextExtPriv<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {} impl<'mir, 'tcx> EvalContextExtPriv<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
/// Try to reacquire the lock associated with the condition variable after we
/// were signaled.
fn reacquire_cond_lock(
&mut self,
thread: ThreadId,
lock: RwLockId,
mode: RwLockMode,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
this.unblock_thread(thread);
match mode {
RwLockMode::Read =>
if this.rwlock_is_write_locked(lock) {
this.rwlock_enqueue_and_block_reader(lock, thread);
} else {
this.rwlock_reader_lock(lock, thread);
},
RwLockMode::Write =>
if this.rwlock_is_locked(lock) {
this.rwlock_enqueue_and_block_writer(lock, thread);
} else {
this.rwlock_writer_lock(lock, thread);
},
}
Ok(())
}
// Windows sync primitives are pointer sized. // Windows sync primitives are pointer sized.
// We only use the first 4 bytes for the id. // We only use the first 4 bytes for the id.
fn srwlock_get_id(
&mut self,
rwlock_op: &OpTy<'tcx, Provenance>,
) -> InterpResult<'tcx, RwLockId> {
let this = self.eval_context_mut();
this.rwlock_get_or_create_id(rwlock_op, this.windows_ty_layout("SRWLOCK"), 0)
}
fn init_once_get_id( fn init_once_get_id(
&mut self, &mut self,
init_once_op: &OpTy<'tcx, Provenance>, init_once_op: &OpTy<'tcx, Provenance>,
@ -56,117 +18,11 @@ fn init_once_get_id(
let this = self.eval_context_mut(); let this = self.eval_context_mut();
this.init_once_get_or_create_id(init_once_op, this.windows_ty_layout("INIT_ONCE"), 0) this.init_once_get_or_create_id(init_once_op, this.windows_ty_layout("INIT_ONCE"), 0)
} }
fn condvar_get_id(
&mut self,
condvar_op: &OpTy<'tcx, Provenance>,
) -> InterpResult<'tcx, CondvarId> {
let this = self.eval_context_mut();
this.condvar_get_or_create_id(condvar_op, this.windows_ty_layout("CONDITION_VARIABLE"), 0)
}
} }
impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {} impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
#[allow(non_snake_case)] #[allow(non_snake_case)]
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
fn AcquireSRWLockExclusive(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let id = this.srwlock_get_id(lock_op)?;
let active_thread = this.get_active_thread();
if this.rwlock_is_locked(id) {
// Note: this will deadlock if the lock is already locked by this
// thread in any way.
//
// FIXME: Detect and report the deadlock proactively. (We currently
// report the deadlock only when no thread can continue execution,
// but we could detect that this lock is already locked and report
// an error.)
this.rwlock_enqueue_and_block_writer(id, active_thread);
} else {
this.rwlock_writer_lock(id, active_thread);
}
Ok(())
}
fn TryAcquireSRWLockExclusive(
&mut self,
lock_op: &OpTy<'tcx, Provenance>,
) -> InterpResult<'tcx, Scalar<Provenance>> {
let this = self.eval_context_mut();
let id = this.srwlock_get_id(lock_op)?;
let active_thread = this.get_active_thread();
if this.rwlock_is_locked(id) {
// Lock is already held.
Ok(Scalar::from_u8(0))
} else {
this.rwlock_writer_lock(id, active_thread);
Ok(Scalar::from_u8(1))
}
}
fn ReleaseSRWLockExclusive(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let id = this.srwlock_get_id(lock_op)?;
let active_thread = this.get_active_thread();
if !this.rwlock_writer_unlock(id, active_thread) {
// The docs do not say anything about this case, but it seems better to not allow it.
throw_ub_format!(
"calling ReleaseSRWLockExclusive on an SRWLock that is not exclusively locked by the current thread"
);
}
Ok(())
}
fn AcquireSRWLockShared(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let id = this.srwlock_get_id(lock_op)?;
let active_thread = this.get_active_thread();
if this.rwlock_is_write_locked(id) {
this.rwlock_enqueue_and_block_reader(id, active_thread);
} else {
this.rwlock_reader_lock(id, active_thread);
}
Ok(())
}
fn TryAcquireSRWLockShared(
&mut self,
lock_op: &OpTy<'tcx, Provenance>,
) -> InterpResult<'tcx, Scalar<Provenance>> {
let this = self.eval_context_mut();
let id = this.srwlock_get_id(lock_op)?;
let active_thread = this.get_active_thread();
if this.rwlock_is_write_locked(id) {
Ok(Scalar::from_u8(0))
} else {
this.rwlock_reader_lock(id, active_thread);
Ok(Scalar::from_u8(1))
}
}
fn ReleaseSRWLockShared(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let id = this.srwlock_get_id(lock_op)?;
let active_thread = this.get_active_thread();
if !this.rwlock_reader_unlock(id, active_thread) {
// The docs do not say anything about this case, but it seems better to not allow it.
throw_ub_format!(
"calling ReleaseSRWLockShared on an SRWLock that is not locked by the current thread"
);
}
Ok(())
}
fn InitOnceBeginInitialize( fn InitOnceBeginInitialize(
&mut self, &mut self,
init_once_op: &OpTy<'tcx, Provenance>, init_once_op: &OpTy<'tcx, Provenance>,
@ -399,131 +255,4 @@ fn WakeByAddressAll(&mut self, ptr_op: &OpTy<'tcx, Provenance>) -> InterpResult<
Ok(()) Ok(())
} }
fn SleepConditionVariableSRW(
&mut self,
condvar_op: &OpTy<'tcx, Provenance>,
lock_op: &OpTy<'tcx, Provenance>,
timeout_op: &OpTy<'tcx, Provenance>,
flags_op: &OpTy<'tcx, Provenance>,
dest: &MPlaceTy<'tcx, Provenance>,
) -> InterpResult<'tcx, Scalar<Provenance>> {
let this = self.eval_context_mut();
let condvar_id = this.condvar_get_id(condvar_op)?;
let lock_id = this.srwlock_get_id(lock_op)?;
let timeout_ms = this.read_scalar(timeout_op)?.to_u32()?;
let flags = this.read_scalar(flags_op)?.to_u32()?;
let timeout_time = if timeout_ms == this.eval_windows_u32("c", "INFINITE") {
None
} else {
let duration = Duration::from_millis(timeout_ms.into());
Some(this.machine.clock.now().checked_add(duration).unwrap())
};
let shared_mode = 0x1; // CONDITION_VARIABLE_LOCKMODE_SHARED is not in std
let mode = if flags == 0 {
RwLockMode::Write
} else if flags == shared_mode {
RwLockMode::Read
} else {
throw_unsup_format!("unsupported `Flags` {flags} in `SleepConditionVariableSRW`");
};
let active_thread = this.get_active_thread();
let was_locked = match mode {
RwLockMode::Read => this.rwlock_reader_unlock(lock_id, active_thread),
RwLockMode::Write => this.rwlock_writer_unlock(lock_id, active_thread),
};
if !was_locked {
throw_ub_format!(
"calling SleepConditionVariableSRW with an SRWLock that is not locked by the current thread"
);
}
this.block_thread(active_thread);
this.condvar_wait(condvar_id, active_thread, CondvarLock::RwLock { id: lock_id, mode });
if let Some(timeout_time) = timeout_time {
struct Callback<'tcx> {
thread: ThreadId,
condvar_id: CondvarId,
lock_id: RwLockId,
mode: RwLockMode,
dest: MPlaceTy<'tcx, Provenance>,
}
impl<'tcx> VisitProvenance for Callback<'tcx> {
fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
let Callback { thread: _, condvar_id: _, lock_id: _, mode: _, dest } = self;
dest.visit_provenance(visit);
}
}
impl<'mir, 'tcx: 'mir> MachineCallback<'mir, 'tcx> for Callback<'tcx> {
fn call(&self, this: &mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx> {
this.reacquire_cond_lock(self.thread, self.lock_id, self.mode)?;
this.condvar_remove_waiter(self.condvar_id, self.thread);
let error_timeout = this.eval_windows("c", "ERROR_TIMEOUT");
this.set_last_error(error_timeout)?;
this.write_scalar(this.eval_windows("c", "FALSE"), &self.dest)?;
Ok(())
}
}
this.register_timeout_callback(
active_thread,
Time::Monotonic(timeout_time),
Box::new(Callback {
thread: active_thread,
condvar_id,
lock_id,
mode,
dest: dest.clone(),
}),
);
}
Ok(this.eval_windows("c", "TRUE"))
}
fn WakeConditionVariable(&mut self, condvar_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let condvar_id = this.condvar_get_id(condvar_op)?;
if let Some((thread, lock)) = this.condvar_signal(condvar_id) {
if let CondvarLock::RwLock { id, mode } = lock {
this.reacquire_cond_lock(thread, id, mode)?;
this.unregister_timeout_callback_if_exists(thread);
} else {
panic!("mutexes should not exist on windows");
}
}
Ok(())
}
fn WakeAllConditionVariable(
&mut self,
condvar_op: &OpTy<'tcx, Provenance>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let condvar_id = this.condvar_get_id(condvar_op)?;
while let Some((thread, lock)) = this.condvar_signal(condvar_id) {
if let CondvarLock::RwLock { id, mode } = lock {
this.reacquire_cond_lock(thread, id, mode)?;
this.unregister_timeout_callback_if_exists(thread);
} else {
panic!("mutexes should not exist on windows");
}
}
Ok(())
}
} }

View File

@ -1,226 +0,0 @@
//@only-target-windows: Uses win32 api functions
// We are making scheduler assumptions here.
//@compile-flags: -Zmiri-preemption-rate=0
use std::ptr::null_mut;
use std::thread;
use windows_sys::Win32::System::Threading::{
AcquireSRWLockExclusive, AcquireSRWLockShared, ReleaseSRWLockExclusive, ReleaseSRWLockShared,
SleepConditionVariableSRW, WakeAllConditionVariable, CONDITION_VARIABLE,
CONDITION_VARIABLE_LOCKMODE_SHARED, INFINITE, SRWLOCK,
};
// not in windows-sys
const SRWLOCK_INIT: SRWLOCK = SRWLOCK { Ptr: null_mut() };
const CONDITION_VARIABLE_INIT: CONDITION_VARIABLE = CONDITION_VARIABLE { Ptr: null_mut() };
#[derive(Copy, Clone)]
struct SendPtr<T>(*mut T);
unsafe impl<T> Send for SendPtr<T> {}
/// threads should be able to reacquire the lock while it is locked by multiple other threads in shared mode
fn all_shared() {
println!("all_shared");
let mut lock = SRWLOCK_INIT;
let mut condvar = CONDITION_VARIABLE_INIT;
let lock_ptr = SendPtr(&mut lock);
let condvar_ptr = SendPtr(&mut condvar);
let mut handles = Vec::with_capacity(10);
// waiters
for i in 0..5 {
handles.push(thread::spawn(move || {
let condvar_ptr = condvar_ptr; // avoid field capture
let lock_ptr = lock_ptr; // avoid field capture
unsafe {
AcquireSRWLockShared(lock_ptr.0);
}
println!("exclusive waiter {i} locked");
let r = unsafe {
SleepConditionVariableSRW(
condvar_ptr.0,
lock_ptr.0,
INFINITE,
CONDITION_VARIABLE_LOCKMODE_SHARED,
)
};
assert_ne!(r, 0);
println!("exclusive waiter {i} reacquired lock");
// unlocking is unnecessary because the lock is never used again
}));
}
// ensures each waiter is waiting by this point
thread::yield_now();
// readers
for i in 0..5 {
handles.push(thread::spawn(move || {
let lock_ptr = lock_ptr; // avoid field capture
unsafe {
AcquireSRWLockShared(lock_ptr.0);
}
println!("reader {i} locked");
// switch to next reader or main thread
thread::yield_now();
unsafe {
ReleaseSRWLockShared(lock_ptr.0);
}
println!("reader {i} unlocked");
}));
}
// ensures each reader has acquired the lock
thread::yield_now();
unsafe {
WakeAllConditionVariable(condvar_ptr.0);
}
for handle in handles {
handle.join().unwrap();
}
}
// reacquiring a lock should wait until the lock is not exclusively locked
fn shared_sleep_and_exclusive_lock() {
println!("shared_sleep_and_exclusive_lock");
let mut lock = SRWLOCK_INIT;
let mut condvar = CONDITION_VARIABLE_INIT;
let lock_ptr = SendPtr(&mut lock);
let condvar_ptr = SendPtr(&mut condvar);
let mut waiters = Vec::with_capacity(5);
for i in 0..5 {
waiters.push(thread::spawn(move || {
let lock_ptr = lock_ptr; // avoid field capture
let condvar_ptr = condvar_ptr; // avoid field capture
unsafe {
AcquireSRWLockShared(lock_ptr.0);
}
println!("shared waiter {i} locked");
let r = unsafe {
SleepConditionVariableSRW(
condvar_ptr.0,
lock_ptr.0,
INFINITE,
CONDITION_VARIABLE_LOCKMODE_SHARED,
)
};
assert_ne!(r, 0);
println!("shared waiter {i} reacquired lock");
// unlocking is unnecessary because the lock is never used again
}));
}
// ensures each waiter is waiting by this point
thread::yield_now();
unsafe {
AcquireSRWLockExclusive(lock_ptr.0);
}
println!("main locked");
unsafe {
WakeAllConditionVariable(condvar_ptr.0);
}
// waiters are now waiting for the lock to be unlocked
thread::yield_now();
unsafe {
ReleaseSRWLockExclusive(lock_ptr.0);
}
println!("main unlocked");
for handle in waiters {
handle.join().unwrap();
}
}
// threads reacquiring locks should wait for all locks to be released first
fn exclusive_sleep_and_shared_lock() {
println!("exclusive_sleep_and_shared_lock");
let mut lock = SRWLOCK_INIT;
let mut condvar = CONDITION_VARIABLE_INIT;
let lock_ptr = SendPtr(&mut lock);
let condvar_ptr = SendPtr(&mut condvar);
let mut handles = Vec::with_capacity(10);
for i in 0..5 {
handles.push(thread::spawn(move || {
let lock_ptr = lock_ptr; // avoid field capture
let condvar_ptr = condvar_ptr; // avoid field capture
unsafe {
AcquireSRWLockExclusive(lock_ptr.0);
}
println!("exclusive waiter {i} locked");
let r = unsafe { SleepConditionVariableSRW(condvar_ptr.0, lock_ptr.0, INFINITE, 0) };
assert_ne!(r, 0);
println!("exclusive waiter {i} reacquired lock");
// switch to next waiter or main thread
thread::yield_now();
unsafe {
ReleaseSRWLockExclusive(lock_ptr.0);
}
println!("exclusive waiter {i} unlocked");
}));
}
for i in 0..5 {
handles.push(thread::spawn(move || {
let lock_ptr = lock_ptr; // avoid field capture
unsafe {
AcquireSRWLockShared(lock_ptr.0);
}
println!("reader {i} locked");
// switch to next reader or main thread
thread::yield_now();
unsafe {
ReleaseSRWLockShared(lock_ptr.0);
}
println!("reader {i} unlocked");
}));
}
// ensures each reader has acquired the lock
thread::yield_now();
unsafe {
WakeAllConditionVariable(condvar_ptr.0);
}
for handle in handles {
handle.join().unwrap();
}
}
fn main() {
all_shared();
shared_sleep_and_exclusive_lock();
exclusive_sleep_and_shared_lock();
}

View File

@ -1,60 +0,0 @@
all_shared
exclusive waiter 0 locked
exclusive waiter 1 locked
exclusive waiter 2 locked
exclusive waiter 3 locked
exclusive waiter 4 locked
reader 0 locked
reader 1 locked
reader 2 locked
reader 3 locked
reader 4 locked
exclusive waiter 0 reacquired lock
exclusive waiter 1 reacquired lock
exclusive waiter 2 reacquired lock
exclusive waiter 3 reacquired lock
exclusive waiter 4 reacquired lock
reader 0 unlocked
reader 1 unlocked
reader 2 unlocked
reader 3 unlocked
reader 4 unlocked
shared_sleep_and_exclusive_lock
shared waiter 0 locked
shared waiter 1 locked
shared waiter 2 locked
shared waiter 3 locked
shared waiter 4 locked
main locked
main unlocked
shared waiter 0 reacquired lock
shared waiter 1 reacquired lock
shared waiter 2 reacquired lock
shared waiter 3 reacquired lock
shared waiter 4 reacquired lock
exclusive_sleep_and_shared_lock
exclusive waiter 0 locked
exclusive waiter 1 locked
exclusive waiter 2 locked
exclusive waiter 3 locked
exclusive waiter 4 locked
reader 0 locked
reader 1 locked
reader 2 locked
reader 3 locked
reader 4 locked
reader 0 unlocked
reader 1 unlocked
reader 2 unlocked
reader 3 unlocked
reader 4 unlocked
exclusive waiter 0 reacquired lock
exclusive waiter 0 unlocked
exclusive waiter 1 reacquired lock
exclusive waiter 1 unlocked
exclusive waiter 2 reacquired lock
exclusive waiter 2 unlocked
exclusive waiter 3 reacquired lock
exclusive waiter 3 unlocked
exclusive waiter 4 reacquired lock
exclusive waiter 4 unlocked