Windows InitOnce: also store ID outside addressable memory
This commit is contained in:
parent
8f342edd7c
commit
0f7d321684
@ -2,7 +2,6 @@ use std::collections::VecDeque;
|
||||
|
||||
use rustc_index::Idx;
|
||||
|
||||
use super::sync::EvalContextExtPriv as _;
|
||||
use super::vector_clock::VClock;
|
||||
use crate::*;
|
||||
|
||||
@ -27,22 +26,6 @@ pub(super) struct InitOnce {
|
||||
|
||||
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
|
||||
pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
fn init_once_get_or_create_id(
|
||||
&mut self,
|
||||
lock: &MPlaceTy<'tcx>,
|
||||
offset: u64,
|
||||
) -> InterpResult<'tcx, InitOnceId> {
|
||||
let this = self.eval_context_mut();
|
||||
this.get_or_create_id(
|
||||
lock,
|
||||
offset,
|
||||
|ecx| &mut ecx.machine.sync.init_onces,
|
||||
|_| interp_ok(Default::default()),
|
||||
)?
|
||||
.ok_or_else(|| err_ub_format!("init_once has invalid ID"))
|
||||
.into()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn init_once_status(&mut self, id: InitOnceId) -> InitOnceStatus {
|
||||
let this = self.eval_context_ref();
|
||||
|
@ -11,11 +11,6 @@ use super::init_once::InitOnce;
|
||||
use super::vector_clock::VClock;
|
||||
use crate::*;
|
||||
|
||||
pub trait SyncId {
|
||||
fn from_u32(id: u32) -> Self;
|
||||
fn to_u32(&self) -> u32;
|
||||
}
|
||||
|
||||
/// We cannot use the `newtype_index!` macro because we have to use 0 as a
|
||||
/// sentinel value meaning that the identifier is not assigned. This is because
|
||||
/// the pthreads static initializers initialize memory with zeros (see the
|
||||
@ -27,16 +22,6 @@ macro_rules! declare_id {
|
||||
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)]
|
||||
pub struct $name(std::num::NonZero<u32>);
|
||||
|
||||
impl $crate::concurrency::sync::SyncId for $name {
|
||||
// Panics if `id == 0`.
|
||||
fn from_u32(id: u32) -> Self {
|
||||
Self(std::num::NonZero::new(id).unwrap())
|
||||
}
|
||||
fn to_u32(&self) -> u32 {
|
||||
self.0.get()
|
||||
}
|
||||
}
|
||||
|
||||
impl $crate::VisitProvenance for $name {
|
||||
fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {}
|
||||
}
|
||||
@ -55,12 +40,6 @@ macro_rules! declare_id {
|
||||
usize::try_from(self.0.get() - 1).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl $name {
|
||||
pub fn to_u32_scalar(&self) -> Scalar {
|
||||
Scalar::from_u32(self.0.get())
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
pub(super) use declare_id;
|
||||
@ -166,56 +145,6 @@ pub struct SynchronizationObjects {
|
||||
// Private extension trait for local helper methods
|
||||
impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
|
||||
pub(super) trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
/// Lazily initialize the ID of this Miri sync structure.
|
||||
/// If memory stores '0', that indicates uninit and we generate a new instance.
|
||||
/// Returns `None` if memory stores a non-zero invalid ID.
|
||||
///
|
||||
/// `get_objs` must return the `IndexVec` that stores all the objects of this type.
|
||||
/// `create_obj` must create the new object if initialization is needed.
|
||||
#[inline]
|
||||
fn get_or_create_id<Id: SyncId + Idx, T>(
|
||||
&mut self,
|
||||
lock: &MPlaceTy<'tcx>,
|
||||
offset: u64,
|
||||
get_objs: impl for<'a> Fn(&'a mut MiriInterpCx<'tcx>) -> &'a mut IndexVec<Id, T>,
|
||||
create_obj: impl for<'a> FnOnce(&'a mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, T>,
|
||||
) -> InterpResult<'tcx, Option<Id>> {
|
||||
let this = self.eval_context_mut();
|
||||
let offset = Size::from_bytes(offset);
|
||||
assert!(lock.layout.size >= offset + this.machine.layouts.u32.size);
|
||||
let id_place = lock.offset(offset, this.machine.layouts.u32, this)?;
|
||||
let next_index = get_objs(this).next_index();
|
||||
|
||||
// Since we are lazy, this update has to be atomic.
|
||||
let (old, success) = this
|
||||
.atomic_compare_exchange_scalar(
|
||||
&id_place,
|
||||
&ImmTy::from_uint(0u32, this.machine.layouts.u32),
|
||||
Scalar::from_u32(next_index.to_u32()),
|
||||
AtomicRwOrd::Relaxed, // deliberately *no* synchronization
|
||||
AtomicReadOrd::Relaxed,
|
||||
false,
|
||||
)?
|
||||
.to_scalar_pair();
|
||||
|
||||
interp_ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
|
||||
// We set the in-memory ID to `next_index`, now also create this object in the machine
|
||||
// state.
|
||||
let obj = create_obj(this)?;
|
||||
let new_index = get_objs(this).push(obj);
|
||||
assert_eq!(next_index, new_index);
|
||||
Some(new_index)
|
||||
} else {
|
||||
let id = Id::from_u32(old.to_u32().expect("layout is u32"));
|
||||
if get_objs(this).get(id).is_none() {
|
||||
// The in-memory ID is invalid.
|
||||
None
|
||||
} else {
|
||||
Some(id)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn condvar_reacquire_mutex(
|
||||
&mut self,
|
||||
mutex: MutexId,
|
||||
@ -248,6 +177,80 @@ impl SynchronizationObjects {
|
||||
pub fn condvar_create(&mut self) -> CondvarId {
|
||||
self.condvars.push(Default::default())
|
||||
}
|
||||
|
||||
pub fn init_once_create(&mut self) -> InitOnceId {
|
||||
self.init_onces.push(Default::default())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx> AllocExtra<'tcx> {
|
||||
pub fn get_sync<T: 'static>(&self, offset: Size) -> Option<&T> {
|
||||
self.sync.get(&offset).and_then(|s| s.downcast_ref::<T>())
|
||||
}
|
||||
}
|
||||
|
||||
/// We designate an `init`` field in all primitives.
|
||||
/// If `init` is set to this, we consider the primitive initialized.
|
||||
pub const LAZY_INIT_COOKIE: u32 = 0xcafe_affe;
|
||||
|
||||
/// Helper for lazily initialized `alloc_extra.sync` data:
|
||||
/// this forces an immediate init.
|
||||
pub fn lazy_sync_init<'tcx, T: 'static + Copy>(
|
||||
ecx: &mut MiriInterpCx<'tcx>,
|
||||
primitive: &MPlaceTy<'tcx>,
|
||||
init_offset: Size,
|
||||
data: T,
|
||||
) -> InterpResult<'tcx> {
|
||||
let init_field = primitive.offset(init_offset, ecx.machine.layouts.u32, ecx)?;
|
||||
|
||||
let (alloc, offset, _) = ecx.ptr_get_alloc_id(primitive.ptr(), 0)?;
|
||||
let (alloc_extra, _machine) = ecx.get_alloc_extra_mut(alloc)?;
|
||||
alloc_extra.sync.insert(offset, Box::new(data));
|
||||
// Mark this as "initialized".
|
||||
ecx.write_scalar_atomic(
|
||||
Scalar::from_u32(LAZY_INIT_COOKIE),
|
||||
&init_field,
|
||||
AtomicWriteOrd::Relaxed,
|
||||
)?;
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Helper for lazily initialized `alloc_extra.sync` data:
|
||||
/// Checks if the primitive is initialized, and return its associated data if so.
|
||||
/// Otherwise, return None.
|
||||
pub fn lazy_sync_get_data<'tcx, T: 'static + Copy>(
|
||||
ecx: &mut MiriInterpCx<'tcx>,
|
||||
primitive: &MPlaceTy<'tcx>,
|
||||
init_offset: Size,
|
||||
name: &str,
|
||||
) -> InterpResult<'tcx, Option<T>> {
|
||||
let init_field = primitive.offset(init_offset, ecx.machine.layouts.u32, ecx)?;
|
||||
// Check if this is already initialized. Needs to be atomic because we can race with another
|
||||
// thread initializing. Needs to be an RMW operation to ensure we read the *latest* value.
|
||||
// So we just try to replace MUTEX_INIT_COOKIE with itself.
|
||||
let init_cookie = Scalar::from_u32(LAZY_INIT_COOKIE);
|
||||
let (_init, success) = ecx
|
||||
.atomic_compare_exchange_scalar(
|
||||
&init_field,
|
||||
&ImmTy::from_scalar(init_cookie, ecx.machine.layouts.u32),
|
||||
init_cookie,
|
||||
AtomicRwOrd::Acquire,
|
||||
AtomicReadOrd::Acquire,
|
||||
/* can_fail_spuriously */ false,
|
||||
)?
|
||||
.to_scalar_pair();
|
||||
if success.to_bool()? {
|
||||
// If it is initialized, it must be found in the "sync primitive" table,
|
||||
// or else it has been moved illegally.
|
||||
let (alloc, offset, _) = ecx.ptr_get_alloc_id(primitive.ptr(), 0)?;
|
||||
let alloc_extra = ecx.get_alloc_extra(alloc)?;
|
||||
let data = alloc_extra
|
||||
.get_sync::<T>(offset)
|
||||
.ok_or_else(|| err_ub_format!("`{name}` can't be moved after first use"))?;
|
||||
interp_ok(Some(*data))
|
||||
} else {
|
||||
interp_ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
// Public interface to synchronization primitives. Please note that in most
|
||||
|
@ -362,12 +362,6 @@ impl VisitProvenance for AllocExtra<'_> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx> AllocExtra<'tcx> {
|
||||
pub fn get_sync<T: 'static>(&self, offset: Size) -> Option<&T> {
|
||||
self.sync.get(&offset).and_then(|s| s.downcast_ref::<T>())
|
||||
}
|
||||
}
|
||||
|
||||
/// Precomputed layouts of primitive types
|
||||
pub struct PrimitiveLayouts<'tcx> {
|
||||
pub unit: TyAndLayout<'tcx>,
|
||||
|
@ -2,10 +2,13 @@ use std::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
use rustc_target::abi::Size;
|
||||
|
||||
use crate::concurrency::sync::{LAZY_INIT_COOKIE, lazy_sync_get_data, lazy_sync_init};
|
||||
use crate::*;
|
||||
|
||||
/// Do a bytewise comparison of the two places, using relaxed atomic reads.
|
||||
/// This is used to check if a mutex matches its static initializer value.
|
||||
/// Do a bytewise comparison of the two places, using relaxed atomic reads. This is used to check if
|
||||
/// a synchronization primitive matches its static initializer value.
|
||||
///
|
||||
/// The reads happen in chunks of 4, so all racing accesses must also use that access size.
|
||||
fn bytewise_equal_atomic_relaxed<'tcx>(
|
||||
ecx: &MiriInterpCx<'tcx>,
|
||||
left: &MPlaceTy<'tcx>,
|
||||
@ -33,63 +36,6 @@ fn bytewise_equal_atomic_relaxed<'tcx>(
|
||||
interp_ok(true)
|
||||
}
|
||||
|
||||
/// We designate an `init`` field in all primitives.
|
||||
/// If `init` is set to this, we consider the primitive initialized.
|
||||
const INIT_COOKIE: u32 = 0xcafe_affe;
|
||||
|
||||
fn sync_init<'tcx, T: 'static + Copy>(
|
||||
ecx: &mut MiriInterpCx<'tcx>,
|
||||
primitive: &MPlaceTy<'tcx>,
|
||||
init_offset: Size,
|
||||
data: T,
|
||||
) -> InterpResult<'tcx> {
|
||||
let init_field = primitive.offset(init_offset, ecx.machine.layouts.u32, ecx)?;
|
||||
|
||||
let (alloc, offset, _) = ecx.ptr_get_alloc_id(primitive.ptr(), 0)?;
|
||||
let (alloc_extra, _machine) = ecx.get_alloc_extra_mut(alloc)?;
|
||||
alloc_extra.sync.insert(offset, Box::new(data));
|
||||
// Mark this as "initialized".
|
||||
ecx.write_scalar_atomic(Scalar::from_u32(INIT_COOKIE), &init_field, AtomicWriteOrd::Relaxed)?;
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Checks if the primitive is initialized, and return its associated data if so.
|
||||
/// Otherwise, return None.
|
||||
fn sync_get_data<'tcx, T: 'static + Copy>(
|
||||
ecx: &mut MiriInterpCx<'tcx>,
|
||||
primitive: &MPlaceTy<'tcx>,
|
||||
init_offset: Size,
|
||||
name: &str,
|
||||
) -> InterpResult<'tcx, Option<T>> {
|
||||
let init_field = primitive.offset(init_offset, ecx.machine.layouts.u32, ecx)?;
|
||||
// Check if this is already initialized. Needs to be atomic because we can race with another
|
||||
// thread initializing. Needs to be an RMW operation to ensure we read the *latest* value.
|
||||
// So we just try to replace MUTEX_INIT_COOKIE with itself.
|
||||
let init_cookie = Scalar::from_u32(INIT_COOKIE);
|
||||
let (_init, success) = ecx
|
||||
.atomic_compare_exchange_scalar(
|
||||
&init_field,
|
||||
&ImmTy::from_scalar(init_cookie, ecx.machine.layouts.u32),
|
||||
init_cookie,
|
||||
AtomicRwOrd::Acquire,
|
||||
AtomicReadOrd::Acquire,
|
||||
/* can_fail_spuriously */ false,
|
||||
)?
|
||||
.to_scalar_pair();
|
||||
if success.to_bool()? {
|
||||
// If it is initialized, it must be found in the "sync primitive" table,
|
||||
// or else it has been moved illegally.
|
||||
let (alloc, offset, _) = ecx.ptr_get_alloc_id(primitive.ptr(), 0)?;
|
||||
let alloc_extra = ecx.get_alloc_extra(alloc)?;
|
||||
let data = alloc_extra
|
||||
.get_sync::<T>(offset)
|
||||
.ok_or_else(|| err_ub_format!("`{name}` can't be moved after first use"))?;
|
||||
interp_ok(Some(*data))
|
||||
} else {
|
||||
interp_ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
// # pthread_mutexattr_t
|
||||
// We store some data directly inside the type, ignoring the platform layout:
|
||||
// - kind: i32
|
||||
@ -198,7 +144,10 @@ fn mutex_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size>
|
||||
let init_field =
|
||||
static_initializer.offset(offset, ecx.machine.layouts.u32, ecx).unwrap();
|
||||
let init = ecx.read_scalar(&init_field).unwrap().to_u32().unwrap();
|
||||
assert_ne!(init, INIT_COOKIE, "{name} is incompatible with our initialization cookie");
|
||||
assert_ne!(
|
||||
init, LAZY_INIT_COOKIE,
|
||||
"{name} is incompatible with our initialization cookie"
|
||||
);
|
||||
};
|
||||
|
||||
check_static_initializer("PTHREAD_MUTEX_INITIALIZER");
|
||||
@ -228,7 +177,7 @@ fn mutex_create<'tcx>(
|
||||
let mutex = ecx.deref_pointer(mutex_ptr)?;
|
||||
let id = ecx.machine.sync.mutex_create();
|
||||
let data = MutexData { id, kind };
|
||||
sync_init(ecx, &mutex, mutex_init_offset(ecx)?, data)?;
|
||||
lazy_sync_init(ecx, &mutex, mutex_init_offset(ecx)?, data)?;
|
||||
interp_ok(data)
|
||||
}
|
||||
|
||||
@ -243,7 +192,7 @@ fn mutex_get_data<'tcx, 'a>(
|
||||
let mutex = ecx.deref_pointer(mutex_ptr)?;
|
||||
|
||||
if let Some(data) =
|
||||
sync_get_data::<MutexData>(ecx, &mutex, mutex_init_offset(ecx)?, "pthread_mutex_t")?
|
||||
lazy_sync_get_data::<MutexData>(ecx, &mutex, mutex_init_offset(ecx)?, "pthread_mutex_t")?
|
||||
{
|
||||
interp_ok(data)
|
||||
} else {
|
||||
@ -302,14 +251,14 @@ fn rwlock_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size
|
||||
let offset = Size::from_bytes(offset);
|
||||
|
||||
// Sanity-check this against PTHREAD_RWLOCK_INITIALIZER (but only once):
|
||||
// the `init` field must start out not equal to INIT_COOKIE.
|
||||
// the `init` field must start out not equal to LAZY_INIT_COOKIE.
|
||||
static SANITY: AtomicBool = AtomicBool::new(false);
|
||||
if !SANITY.swap(true, Ordering::Relaxed) {
|
||||
let static_initializer = ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]);
|
||||
let init_field = static_initializer.offset(offset, ecx.machine.layouts.u32, ecx).unwrap();
|
||||
let init = ecx.read_scalar(&init_field).unwrap().to_u32().unwrap();
|
||||
assert_ne!(
|
||||
init, INIT_COOKIE,
|
||||
init, LAZY_INIT_COOKIE,
|
||||
"PTHREAD_RWLOCK_INITIALIZER is incompatible with our initialization cookie"
|
||||
);
|
||||
}
|
||||
@ -324,7 +273,8 @@ fn rwlock_get_data<'tcx>(
|
||||
let rwlock = ecx.deref_pointer(rwlock_ptr)?;
|
||||
let init_offset = rwlock_init_offset(ecx)?;
|
||||
|
||||
if let Some(data) = sync_get_data::<RwLockData>(ecx, &rwlock, init_offset, "pthread_rwlock_t")?
|
||||
if let Some(data) =
|
||||
lazy_sync_get_data::<RwLockData>(ecx, &rwlock, init_offset, "pthread_rwlock_t")?
|
||||
{
|
||||
interp_ok(data)
|
||||
} else {
|
||||
@ -337,7 +287,7 @@ fn rwlock_get_data<'tcx>(
|
||||
}
|
||||
let id = ecx.machine.sync.rwlock_create();
|
||||
let data = RwLockData { id };
|
||||
sync_init(ecx, &rwlock, init_offset, data)?;
|
||||
lazy_sync_init(ecx, &rwlock, init_offset, data)?;
|
||||
interp_ok(data)
|
||||
}
|
||||
}
|
||||
@ -410,14 +360,14 @@ fn cond_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size>
|
||||
let offset = Size::from_bytes(offset);
|
||||
|
||||
// Sanity-check this against PTHREAD_COND_INITIALIZER (but only once):
|
||||
// the `init` field must start out not equal to INIT_COOKIE.
|
||||
// the `init` field must start out not equal to LAZY_INIT_COOKIE.
|
||||
static SANITY: AtomicBool = AtomicBool::new(false);
|
||||
if !SANITY.swap(true, Ordering::Relaxed) {
|
||||
let static_initializer = ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]);
|
||||
let init_field = static_initializer.offset(offset, ecx.machine.layouts.u32, ecx).unwrap();
|
||||
let init = ecx.read_scalar(&init_field).unwrap().to_u32().unwrap();
|
||||
assert_ne!(
|
||||
init, INIT_COOKIE,
|
||||
init, LAZY_INIT_COOKIE,
|
||||
"PTHREAD_COND_INITIALIZER is incompatible with our initialization cookie"
|
||||
);
|
||||
}
|
||||
@ -446,7 +396,7 @@ fn cond_create<'tcx>(
|
||||
let cond = ecx.deref_pointer(cond_ptr)?;
|
||||
let id = ecx.machine.sync.condvar_create();
|
||||
let data = CondData { id, clock };
|
||||
sync_init(ecx, &cond, cond_init_offset(ecx)?, data)?;
|
||||
lazy_sync_init(ecx, &cond, cond_init_offset(ecx)?, data)?;
|
||||
interp_ok(data)
|
||||
}
|
||||
|
||||
@ -457,7 +407,7 @@ fn cond_get_data<'tcx>(
|
||||
let cond = ecx.deref_pointer(cond_ptr)?;
|
||||
let init_offset = cond_init_offset(ecx)?;
|
||||
|
||||
if let Some(data) = sync_get_data::<CondData>(ecx, &cond, init_offset, "pthread_cond_t")? {
|
||||
if let Some(data) = lazy_sync_get_data::<CondData>(ecx, &cond, init_offset, "pthread_cond_t")? {
|
||||
interp_ok(data)
|
||||
} else {
|
||||
// This used the static initializer. The clock there is always CLOCK_REALTIME.
|
||||
|
@ -3,8 +3,14 @@ use std::time::Duration;
|
||||
use rustc_target::abi::Size;
|
||||
|
||||
use crate::concurrency::init_once::InitOnceStatus;
|
||||
use crate::concurrency::sync::{lazy_sync_get_data, lazy_sync_init};
|
||||
use crate::*;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
struct InitOnceData {
|
||||
id: InitOnceId,
|
||||
}
|
||||
|
||||
impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
|
||||
trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
// Windows sync primitives are pointer sized.
|
||||
@ -12,8 +18,21 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
|
||||
fn init_once_get_id(&mut self, init_once_ptr: &OpTy<'tcx>) -> InterpResult<'tcx, InitOnceId> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let init_once = this.deref_pointer(init_once_ptr)?;
|
||||
this.init_once_get_or_create_id(&init_once, 0)
|
||||
let init_offset = Size::ZERO;
|
||||
|
||||
if let Some(data) =
|
||||
lazy_sync_get_data::<InitOnceData>(this, &init_once, init_offset, "INIT_ONCE")?
|
||||
{
|
||||
interp_ok(data.id)
|
||||
} else {
|
||||
// TODO: check that this is still all-zero.
|
||||
let id = this.machine.sync.init_once_create();
|
||||
let data = InitOnceData { id };
|
||||
lazy_sync_init(this, &init_once, init_offset, data)?;
|
||||
interp_ok(id)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if we were succssful, `false` if we would block.
|
||||
|
Loading…
x
Reference in New Issue
Block a user