make some operations private to the data race detector / atomic intrinsic file
This commit is contained in:
parent
cd2edbfd09
commit
927ab19cfc
@ -464,33 +464,6 @@ fn write_scalar_at_offset_atomic(
|
|||||||
this.write_scalar_atomic(value.into(), &value_place, atomic)
|
this.write_scalar_atomic(value.into(), &value_place, atomic)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Checks that an atomic access is legal at the given place.
|
|
||||||
fn atomic_access_check(&self, place: &MPlaceTy<'tcx, Provenance>) -> InterpResult<'tcx> {
|
|
||||||
let this = self.eval_context_ref();
|
|
||||||
// Check alignment requirements. Atomics must always be aligned to their size,
|
|
||||||
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
|
|
||||||
// be 8-aligned).
|
|
||||||
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
|
|
||||||
this.check_ptr_access_align(
|
|
||||||
place.ptr,
|
|
||||||
place.layout.size,
|
|
||||||
align,
|
|
||||||
CheckInAllocMsg::MemoryAccessTest,
|
|
||||||
)?;
|
|
||||||
// Ensure the allocation is mutable. Even failing (read-only) compare_exchange need mutable
|
|
||||||
// memory on many targets (i.e., they segfault if taht memory is mapped read-only), and
|
|
||||||
// atomic loads can be implemented via compare_exchange on some targets. See
|
|
||||||
// <https://github.com/rust-lang/miri/issues/2463>.
|
|
||||||
// We avoid `get_ptr_alloc` since we do *not* want to run the access hooks -- the actual
|
|
||||||
// access will happen later.
|
|
||||||
let (alloc_id, _offset, _prov) =
|
|
||||||
this.ptr_try_get_alloc_id(place.ptr).expect("there are no zero-sized atomic accesses");
|
|
||||||
if this.get_alloc_mutability(alloc_id)? == Mutability::Not {
|
|
||||||
throw_ub_format!("atomic operations cannot be performed on read-only memory");
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Perform an atomic read operation at the memory location.
|
/// Perform an atomic read operation at the memory location.
|
||||||
fn read_scalar_atomic(
|
fn read_scalar_atomic(
|
||||||
&self,
|
&self,
|
||||||
@ -682,80 +655,8 @@ fn atomic_compare_exchange_scalar(
|
|||||||
Ok(res)
|
Ok(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Update the data-race detector for an atomic read occurring at the
|
|
||||||
/// associated memory-place and on the current thread.
|
|
||||||
fn validate_atomic_load(
|
|
||||||
&self,
|
|
||||||
place: &MPlaceTy<'tcx, Provenance>,
|
|
||||||
atomic: AtomicReadOrd,
|
|
||||||
) -> InterpResult<'tcx> {
|
|
||||||
let this = self.eval_context_ref();
|
|
||||||
this.validate_overlapping_atomic(place)?;
|
|
||||||
this.validate_atomic_op(
|
|
||||||
place,
|
|
||||||
atomic,
|
|
||||||
"Atomic Load",
|
|
||||||
move |memory, clocks, index, atomic| {
|
|
||||||
if atomic == AtomicReadOrd::Relaxed {
|
|
||||||
memory.load_relaxed(&mut *clocks, index)
|
|
||||||
} else {
|
|
||||||
memory.load_acquire(&mut *clocks, index)
|
|
||||||
}
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Update the data-race detector for an atomic write occurring at the
|
|
||||||
/// associated memory-place and on the current thread.
|
|
||||||
fn validate_atomic_store(
|
|
||||||
&mut self,
|
|
||||||
place: &MPlaceTy<'tcx, Provenance>,
|
|
||||||
atomic: AtomicWriteOrd,
|
|
||||||
) -> InterpResult<'tcx> {
|
|
||||||
let this = self.eval_context_mut();
|
|
||||||
this.validate_overlapping_atomic(place)?;
|
|
||||||
this.validate_atomic_op(
|
|
||||||
place,
|
|
||||||
atomic,
|
|
||||||
"Atomic Store",
|
|
||||||
move |memory, clocks, index, atomic| {
|
|
||||||
if atomic == AtomicWriteOrd::Relaxed {
|
|
||||||
memory.store_relaxed(clocks, index)
|
|
||||||
} else {
|
|
||||||
memory.store_release(clocks, index)
|
|
||||||
}
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Update the data-race detector for an atomic read-modify-write occurring
|
|
||||||
/// at the associated memory place and on the current thread.
|
|
||||||
fn validate_atomic_rmw(
|
|
||||||
&mut self,
|
|
||||||
place: &MPlaceTy<'tcx, Provenance>,
|
|
||||||
atomic: AtomicRwOrd,
|
|
||||||
) -> InterpResult<'tcx> {
|
|
||||||
use AtomicRwOrd::*;
|
|
||||||
let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
|
|
||||||
let release = matches!(atomic, Release | AcqRel | SeqCst);
|
|
||||||
let this = self.eval_context_mut();
|
|
||||||
this.validate_overlapping_atomic(place)?;
|
|
||||||
this.validate_atomic_op(place, atomic, "Atomic RMW", move |memory, clocks, index, _| {
|
|
||||||
if acquire {
|
|
||||||
memory.load_acquire(clocks, index)?;
|
|
||||||
} else {
|
|
||||||
memory.load_relaxed(clocks, index)?;
|
|
||||||
}
|
|
||||||
if release {
|
|
||||||
memory.rmw_release(clocks, index)
|
|
||||||
} else {
|
|
||||||
memory.rmw_relaxed(clocks, index)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Update the data-race detector for an atomic fence on the current thread.
|
/// Update the data-race detector for an atomic fence on the current thread.
|
||||||
fn validate_atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx> {
|
fn atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx> {
|
||||||
let this = self.eval_context_mut();
|
let this = self.eval_context_mut();
|
||||||
if let Some(data_race) = &mut this.machine.data_race {
|
if let Some(data_race) = &mut this.machine.data_race {
|
||||||
data_race.maybe_perform_sync_operation(&this.machine.threads, |index, mut clocks| {
|
data_race.maybe_perform_sync_operation(&this.machine.threads, |index, mut clocks| {
|
||||||
@ -1081,6 +982,105 @@ fn allow_data_races_mut<R>(
|
|||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Checks that an atomic access is legal at the given place.
|
||||||
|
fn atomic_access_check(&self, place: &MPlaceTy<'tcx, Provenance>) -> InterpResult<'tcx> {
|
||||||
|
let this = self.eval_context_ref();
|
||||||
|
// Check alignment requirements. Atomics must always be aligned to their size,
|
||||||
|
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
|
||||||
|
// be 8-aligned).
|
||||||
|
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
|
||||||
|
this.check_ptr_access_align(
|
||||||
|
place.ptr,
|
||||||
|
place.layout.size,
|
||||||
|
align,
|
||||||
|
CheckInAllocMsg::MemoryAccessTest,
|
||||||
|
)?;
|
||||||
|
// Ensure the allocation is mutable. Even failing (read-only) compare_exchange need mutable
|
||||||
|
// memory on many targets (i.e., they segfault if taht memory is mapped read-only), and
|
||||||
|
// atomic loads can be implemented via compare_exchange on some targets. See
|
||||||
|
// <https://github.com/rust-lang/miri/issues/2463>.
|
||||||
|
// We avoid `get_ptr_alloc` since we do *not* want to run the access hooks -- the actual
|
||||||
|
// access will happen later.
|
||||||
|
let (alloc_id, _offset, _prov) =
|
||||||
|
this.ptr_try_get_alloc_id(place.ptr).expect("there are no zero-sized atomic accesses");
|
||||||
|
if this.get_alloc_mutability(alloc_id)? == Mutability::Not {
|
||||||
|
throw_ub_format!("atomic operations cannot be performed on read-only memory");
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Update the data-race detector for an atomic read occurring at the
|
||||||
|
/// associated memory-place and on the current thread.
|
||||||
|
fn validate_atomic_load(
|
||||||
|
&self,
|
||||||
|
place: &MPlaceTy<'tcx, Provenance>,
|
||||||
|
atomic: AtomicReadOrd,
|
||||||
|
) -> InterpResult<'tcx> {
|
||||||
|
let this = self.eval_context_ref();
|
||||||
|
this.validate_overlapping_atomic(place)?;
|
||||||
|
this.validate_atomic_op(
|
||||||
|
place,
|
||||||
|
atomic,
|
||||||
|
"Atomic Load",
|
||||||
|
move |memory, clocks, index, atomic| {
|
||||||
|
if atomic == AtomicReadOrd::Relaxed {
|
||||||
|
memory.load_relaxed(&mut *clocks, index)
|
||||||
|
} else {
|
||||||
|
memory.load_acquire(&mut *clocks, index)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Update the data-race detector for an atomic write occurring at the
|
||||||
|
/// associated memory-place and on the current thread.
|
||||||
|
fn validate_atomic_store(
|
||||||
|
&mut self,
|
||||||
|
place: &MPlaceTy<'tcx, Provenance>,
|
||||||
|
atomic: AtomicWriteOrd,
|
||||||
|
) -> InterpResult<'tcx> {
|
||||||
|
let this = self.eval_context_mut();
|
||||||
|
this.validate_overlapping_atomic(place)?;
|
||||||
|
this.validate_atomic_op(
|
||||||
|
place,
|
||||||
|
atomic,
|
||||||
|
"Atomic Store",
|
||||||
|
move |memory, clocks, index, atomic| {
|
||||||
|
if atomic == AtomicWriteOrd::Relaxed {
|
||||||
|
memory.store_relaxed(clocks, index)
|
||||||
|
} else {
|
||||||
|
memory.store_release(clocks, index)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Update the data-race detector for an atomic read-modify-write occurring
|
||||||
|
/// at the associated memory place and on the current thread.
|
||||||
|
fn validate_atomic_rmw(
|
||||||
|
&mut self,
|
||||||
|
place: &MPlaceTy<'tcx, Provenance>,
|
||||||
|
atomic: AtomicRwOrd,
|
||||||
|
) -> InterpResult<'tcx> {
|
||||||
|
use AtomicRwOrd::*;
|
||||||
|
let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
|
||||||
|
let release = matches!(atomic, Release | AcqRel | SeqCst);
|
||||||
|
let this = self.eval_context_mut();
|
||||||
|
this.validate_overlapping_atomic(place)?;
|
||||||
|
this.validate_atomic_op(place, atomic, "Atomic RMW", move |memory, clocks, index, _| {
|
||||||
|
if acquire {
|
||||||
|
memory.load_acquire(clocks, index)?;
|
||||||
|
} else {
|
||||||
|
memory.load_relaxed(clocks, index)?;
|
||||||
|
}
|
||||||
|
if release {
|
||||||
|
memory.rmw_release(clocks, index)
|
||||||
|
} else {
|
||||||
|
memory.rmw_relaxed(clocks, index)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
/// Generic atomic operation implementation
|
/// Generic atomic operation implementation
|
||||||
fn validate_atomic_op<A: Debug + Copy>(
|
fn validate_atomic_op<A: Debug + Copy>(
|
||||||
&self,
|
&self,
|
||||||
|
@ -67,8 +67,8 @@ fn fence_ord<'tcx>(ord: &str) -> InterpResult<'tcx, AtomicFenceOrd> {
|
|||||||
["load", ord] => this.atomic_load(args, dest, read_ord(ord)?)?,
|
["load", ord] => this.atomic_load(args, dest, read_ord(ord)?)?,
|
||||||
["store", ord] => this.atomic_store(args, write_ord(ord)?)?,
|
["store", ord] => this.atomic_store(args, write_ord(ord)?)?,
|
||||||
|
|
||||||
["fence", ord] => this.atomic_fence(args, fence_ord(ord)?)?,
|
["fence", ord] => this.atomic_fence_intrinsic(args, fence_ord(ord)?)?,
|
||||||
["singlethreadfence", ord] => this.compiler_fence(args, fence_ord(ord)?)?,
|
["singlethreadfence", ord] => this.compiler_fence_intrinsic(args, fence_ord(ord)?)?,
|
||||||
|
|
||||||
["xchg", ord] => this.atomic_exchange(args, dest, rw_ord(ord)?)?,
|
["xchg", ord] => this.atomic_exchange(args, dest, rw_ord(ord)?)?,
|
||||||
["cxchg", ord1, ord2] =>
|
["cxchg", ord1, ord2] =>
|
||||||
@ -117,7 +117,10 @@ fn fence_ord<'tcx>(ord: &str) -> InterpResult<'tcx, AtomicFenceOrd> {
|
|||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> {}
|
||||||
|
trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
||||||
fn atomic_load(
|
fn atomic_load(
|
||||||
&mut self,
|
&mut self,
|
||||||
args: &[OpTy<'tcx, Provenance>],
|
args: &[OpTy<'tcx, Provenance>],
|
||||||
@ -153,7 +156,7 @@ fn atomic_store(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compiler_fence(
|
fn compiler_fence_intrinsic(
|
||||||
&mut self,
|
&mut self,
|
||||||
args: &[OpTy<'tcx, Provenance>],
|
args: &[OpTy<'tcx, Provenance>],
|
||||||
atomic: AtomicFenceOrd,
|
atomic: AtomicFenceOrd,
|
||||||
@ -164,14 +167,14 @@ fn compiler_fence(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn atomic_fence(
|
fn atomic_fence_intrinsic(
|
||||||
&mut self,
|
&mut self,
|
||||||
args: &[OpTy<'tcx, Provenance>],
|
args: &[OpTy<'tcx, Provenance>],
|
||||||
atomic: AtomicFenceOrd,
|
atomic: AtomicFenceOrd,
|
||||||
) -> InterpResult<'tcx> {
|
) -> InterpResult<'tcx> {
|
||||||
let this = self.eval_context_mut();
|
let this = self.eval_context_mut();
|
||||||
let [] = check_arg_count(args)?;
|
let [] = check_arg_count(args)?;
|
||||||
this.validate_atomic_fence(atomic)?;
|
this.atomic_fence(atomic)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -169,7 +169,7 @@ pub fn futex<'tcx>(
|
|||||||
//
|
//
|
||||||
// Thankfully, preemptions cannot happen inside a Miri shim, so we do not need to
|
// Thankfully, preemptions cannot happen inside a Miri shim, so we do not need to
|
||||||
// do anything special to guarantee fence-load-comparison atomicity.
|
// do anything special to guarantee fence-load-comparison atomicity.
|
||||||
this.validate_atomic_fence(AtomicFenceOrd::SeqCst)?;
|
this.atomic_fence(AtomicFenceOrd::SeqCst)?;
|
||||||
// Read an `i32` through the pointer, regardless of any wrapper types.
|
// Read an `i32` through the pointer, regardless of any wrapper types.
|
||||||
// It's not uncommon for `addr` to be passed as another type than `*mut i32`, such as `*const AtomicI32`.
|
// It's not uncommon for `addr` to be passed as another type than `*mut i32`, such as `*const AtomicI32`.
|
||||||
let futex_val = this
|
let futex_val = this
|
||||||
@ -240,7 +240,7 @@ pub fn futex<'tcx>(
|
|||||||
// Together with the SeqCst fence in futex_wait, this makes sure that futex_wait
|
// Together with the SeqCst fence in futex_wait, this makes sure that futex_wait
|
||||||
// will see the latest value on addr which could be changed by our caller
|
// will see the latest value on addr which could be changed by our caller
|
||||||
// before doing the syscall.
|
// before doing the syscall.
|
||||||
this.validate_atomic_fence(AtomicFenceOrd::SeqCst)?;
|
this.atomic_fence(AtomicFenceOrd::SeqCst)?;
|
||||||
let mut n = 0;
|
let mut n = 0;
|
||||||
for _ in 0..val {
|
for _ in 0..val {
|
||||||
if let Some(thread) = this.futex_wake(addr_usize, bitset) {
|
if let Some(thread) = this.futex_wake(addr_usize, bitset) {
|
||||||
|
Loading…
Reference in New Issue
Block a user