Auto merge of #2464 - RalfJung:atomic-must-be-mutable, r=RalfJung
Atomics must be mutable Fixes https://github.com/rust-lang/miri/issues/2463 Needs https://github.com/rust-lang/rust/pull/100181
This commit is contained in:
commit
5aef34c016
@ -1 +1 @@
|
||||
93ab13b4e894ab74258c40aaf29872db2b17b6b4
|
||||
6d3f1beae1720055e5a30f4dbe7a9e7fb810c65e
|
||||
|
@ -46,10 +46,11 @@ use std::{
|
||||
mem,
|
||||
};
|
||||
|
||||
use rustc_ast::Mutability;
|
||||
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
|
||||
use rustc_index::vec::{Idx, IndexVec};
|
||||
use rustc_middle::{mir, ty::layout::TyAndLayout};
|
||||
use rustc_target::abi::Size;
|
||||
use rustc_target::abi::{Align, Size};
|
||||
|
||||
use crate::*;
|
||||
|
||||
@ -470,6 +471,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
||||
atomic: AtomicReadOrd,
|
||||
) -> InterpResult<'tcx, ScalarMaybeUninit<Provenance>> {
|
||||
let this = self.eval_context_ref();
|
||||
this.atomic_access_check(place)?;
|
||||
// This will read from the last store in the modification order of this location. In case
|
||||
// weak memory emulation is enabled, this may not be the store we will pick to actually read from and return.
|
||||
// This is fine with StackedBorrow and race checks because they don't concern metadata on
|
||||
@ -490,6 +492,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
||||
atomic: AtomicWriteOrd,
|
||||
) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
this.atomic_access_check(dest)?;
|
||||
|
||||
this.validate_overlapping_atomic(dest)?;
|
||||
this.allow_data_races_mut(move |this| this.write_scalar(val, &dest.into()))?;
|
||||
this.validate_atomic_store(dest, atomic)?;
|
||||
@ -511,6 +515,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
||||
atomic: AtomicRwOrd,
|
||||
) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>> {
|
||||
let this = self.eval_context_mut();
|
||||
this.atomic_access_check(place)?;
|
||||
|
||||
this.validate_overlapping_atomic(place)?;
|
||||
let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
|
||||
@ -540,6 +545,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
||||
atomic: AtomicRwOrd,
|
||||
) -> InterpResult<'tcx, ScalarMaybeUninit<Provenance>> {
|
||||
let this = self.eval_context_mut();
|
||||
this.atomic_access_check(place)?;
|
||||
|
||||
this.validate_overlapping_atomic(place)?;
|
||||
let old = this.allow_data_races_mut(|this| this.read_scalar(&place.into()))?;
|
||||
@ -561,6 +567,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
||||
atomic: AtomicRwOrd,
|
||||
) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>> {
|
||||
let this = self.eval_context_mut();
|
||||
this.atomic_access_check(place)?;
|
||||
|
||||
this.validate_overlapping_atomic(place)?;
|
||||
let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
|
||||
@ -604,6 +611,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
||||
) -> InterpResult<'tcx, Immediate<Provenance>> {
|
||||
use rand::Rng as _;
|
||||
let this = self.eval_context_mut();
|
||||
this.atomic_access_check(place)?;
|
||||
|
||||
this.validate_overlapping_atomic(place)?;
|
||||
// Failure ordering cannot be stronger than success ordering, therefore first attempt
|
||||
@ -647,80 +655,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Update the data-race detector for an atomic read occurring at the
|
||||
/// associated memory-place and on the current thread.
|
||||
fn validate_atomic_load(
|
||||
&self,
|
||||
place: &MPlaceTy<'tcx, Provenance>,
|
||||
atomic: AtomicReadOrd,
|
||||
) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_ref();
|
||||
this.validate_overlapping_atomic(place)?;
|
||||
this.validate_atomic_op(
|
||||
place,
|
||||
atomic,
|
||||
"Atomic Load",
|
||||
move |memory, clocks, index, atomic| {
|
||||
if atomic == AtomicReadOrd::Relaxed {
|
||||
memory.load_relaxed(&mut *clocks, index)
|
||||
} else {
|
||||
memory.load_acquire(&mut *clocks, index)
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/// Update the data-race detector for an atomic write occurring at the
|
||||
/// associated memory-place and on the current thread.
|
||||
fn validate_atomic_store(
|
||||
&mut self,
|
||||
place: &MPlaceTy<'tcx, Provenance>,
|
||||
atomic: AtomicWriteOrd,
|
||||
) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
this.validate_overlapping_atomic(place)?;
|
||||
this.validate_atomic_op(
|
||||
place,
|
||||
atomic,
|
||||
"Atomic Store",
|
||||
move |memory, clocks, index, atomic| {
|
||||
if atomic == AtomicWriteOrd::Relaxed {
|
||||
memory.store_relaxed(clocks, index)
|
||||
} else {
|
||||
memory.store_release(clocks, index)
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/// Update the data-race detector for an atomic read-modify-write occurring
|
||||
/// at the associated memory place and on the current thread.
|
||||
fn validate_atomic_rmw(
|
||||
&mut self,
|
||||
place: &MPlaceTy<'tcx, Provenance>,
|
||||
atomic: AtomicRwOrd,
|
||||
) -> InterpResult<'tcx> {
|
||||
use AtomicRwOrd::*;
|
||||
let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
|
||||
let release = matches!(atomic, Release | AcqRel | SeqCst);
|
||||
let this = self.eval_context_mut();
|
||||
this.validate_overlapping_atomic(place)?;
|
||||
this.validate_atomic_op(place, atomic, "Atomic RMW", move |memory, clocks, index, _| {
|
||||
if acquire {
|
||||
memory.load_acquire(clocks, index)?;
|
||||
} else {
|
||||
memory.load_relaxed(clocks, index)?;
|
||||
}
|
||||
if release {
|
||||
memory.rmw_release(clocks, index)
|
||||
} else {
|
||||
memory.rmw_relaxed(clocks, index)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Update the data-race detector for an atomic fence on the current thread.
|
||||
fn validate_atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx> {
|
||||
fn atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
if let Some(data_race) = &mut this.machine.data_race {
|
||||
data_race.maybe_perform_sync_operation(&this.machine.threads, |index, mut clocks| {
|
||||
@ -1016,6 +952,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
||||
fn allow_data_races_ref<R>(&self, op: impl FnOnce(&MiriEvalContext<'mir, 'tcx>) -> R) -> R {
|
||||
let this = self.eval_context_ref();
|
||||
if let Some(data_race) = &this.machine.data_race {
|
||||
assert!(!data_race.ongoing_action_data_race_free.get(), "cannot nest allow_data_races");
|
||||
data_race.ongoing_action_data_race_free.set(true);
|
||||
}
|
||||
let result = op(this);
|
||||
@ -1035,6 +972,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
||||
) -> R {
|
||||
let this = self.eval_context_mut();
|
||||
if let Some(data_race) = &this.machine.data_race {
|
||||
assert!(!data_race.ongoing_action_data_race_free.get(), "cannot nest allow_data_races");
|
||||
data_race.ongoing_action_data_race_free.set(true);
|
||||
}
|
||||
let result = op(this);
|
||||
@ -1044,6 +982,114 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
||||
result
|
||||
}
|
||||
|
||||
/// Checks that an atomic access is legal at the given place.
|
||||
fn atomic_access_check(&self, place: &MPlaceTy<'tcx, Provenance>) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_ref();
|
||||
// Check alignment requirements. Atomics must always be aligned to their size,
|
||||
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
|
||||
// be 8-aligned).
|
||||
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
|
||||
this.check_ptr_access_align(
|
||||
place.ptr,
|
||||
place.layout.size,
|
||||
align,
|
||||
CheckInAllocMsg::MemoryAccessTest,
|
||||
)?;
|
||||
// Ensure the allocation is mutable. Even failing (read-only) compare_exchange need mutable
|
||||
// memory on many targets (i.e., they segfault if taht memory is mapped read-only), and
|
||||
// atomic loads can be implemented via compare_exchange on some targets. There could
|
||||
// possibly be some very specific exceptions to this, see
|
||||
// <https://github.com/rust-lang/miri/pull/2464#discussion_r939636130> for details.
|
||||
// We avoid `get_ptr_alloc` since we do *not* want to run the access hooks -- the actual
|
||||
// access will happen later.
|
||||
let (alloc_id, _offset, _prov) =
|
||||
this.ptr_try_get_alloc_id(place.ptr).expect("there are no zero-sized atomic accesses");
|
||||
if this.get_alloc_mutability(alloc_id)? == Mutability::Not {
|
||||
// FIXME: make this prettier, once these messages have separate title/span/help messages.
|
||||
throw_ub_format!(
|
||||
"atomic operations cannot be performed on read-only memory\n\
|
||||
many platforms require atomic read-modify-write instructions to be performed on writeable memory, even if the operation fails \
|
||||
(and is hence nominally read-only)\n\
|
||||
some platforms implement (some) atomic loads via compare-exchange, which means they do not work on read-only memory; \
|
||||
it is possible that we could have an exception permitting this for specific kinds of loads\n\
|
||||
please report an issue at <https://github.com/rust-lang/miri/issues> if this is a problem for you"
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Update the data-race detector for an atomic read occurring at the
|
||||
/// associated memory-place and on the current thread.
|
||||
fn validate_atomic_load(
|
||||
&self,
|
||||
place: &MPlaceTy<'tcx, Provenance>,
|
||||
atomic: AtomicReadOrd,
|
||||
) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_ref();
|
||||
this.validate_overlapping_atomic(place)?;
|
||||
this.validate_atomic_op(
|
||||
place,
|
||||
atomic,
|
||||
"Atomic Load",
|
||||
move |memory, clocks, index, atomic| {
|
||||
if atomic == AtomicReadOrd::Relaxed {
|
||||
memory.load_relaxed(&mut *clocks, index)
|
||||
} else {
|
||||
memory.load_acquire(&mut *clocks, index)
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/// Update the data-race detector for an atomic write occurring at the
|
||||
/// associated memory-place and on the current thread.
|
||||
fn validate_atomic_store(
|
||||
&mut self,
|
||||
place: &MPlaceTy<'tcx, Provenance>,
|
||||
atomic: AtomicWriteOrd,
|
||||
) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
this.validate_overlapping_atomic(place)?;
|
||||
this.validate_atomic_op(
|
||||
place,
|
||||
atomic,
|
||||
"Atomic Store",
|
||||
move |memory, clocks, index, atomic| {
|
||||
if atomic == AtomicWriteOrd::Relaxed {
|
||||
memory.store_relaxed(clocks, index)
|
||||
} else {
|
||||
memory.store_release(clocks, index)
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/// Update the data-race detector for an atomic read-modify-write occurring
|
||||
/// at the associated memory place and on the current thread.
|
||||
fn validate_atomic_rmw(
|
||||
&mut self,
|
||||
place: &MPlaceTy<'tcx, Provenance>,
|
||||
atomic: AtomicRwOrd,
|
||||
) -> InterpResult<'tcx> {
|
||||
use AtomicRwOrd::*;
|
||||
let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
|
||||
let release = matches!(atomic, Release | AcqRel | SeqCst);
|
||||
let this = self.eval_context_mut();
|
||||
this.validate_overlapping_atomic(place)?;
|
||||
this.validate_atomic_op(place, atomic, "Atomic RMW", move |memory, clocks, index, _| {
|
||||
if acquire {
|
||||
memory.load_acquire(clocks, index)?;
|
||||
} else {
|
||||
memory.load_relaxed(clocks, index)?;
|
||||
}
|
||||
if release {
|
||||
memory.rmw_release(clocks, index)
|
||||
} else {
|
||||
memory.rmw_relaxed(clocks, index)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Generic atomic operation implementation
|
||||
fn validate_atomic_op<A: Debug + Copy>(
|
||||
&self,
|
||||
|
@ -1,5 +1,4 @@
|
||||
use rustc_middle::{mir, mir::BinOp, ty};
|
||||
use rustc_target::abi::Align;
|
||||
|
||||
use crate::*;
|
||||
use helpers::check_arg_count;
|
||||
@ -68,8 +67,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
["load", ord] => this.atomic_load(args, dest, read_ord(ord)?)?,
|
||||
["store", ord] => this.atomic_store(args, write_ord(ord)?)?,
|
||||
|
||||
["fence", ord] => this.atomic_fence(args, fence_ord(ord)?)?,
|
||||
["singlethreadfence", ord] => this.compiler_fence(args, fence_ord(ord)?)?,
|
||||
["fence", ord] => this.atomic_fence_intrinsic(args, fence_ord(ord)?)?,
|
||||
["singlethreadfence", ord] => this.compiler_fence_intrinsic(args, fence_ord(ord)?)?,
|
||||
|
||||
["xchg", ord] => this.atomic_exchange(args, dest, rw_ord(ord)?)?,
|
||||
["cxchg", ord1, ord2] =>
|
||||
@ -118,7 +117,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> {}
|
||||
trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
||||
fn atomic_load(
|
||||
&mut self,
|
||||
args: &[OpTy<'tcx, Provenance>],
|
||||
@ -130,20 +132,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
let [place] = check_arg_count(args)?;
|
||||
let place = this.deref_operand(place)?;
|
||||
|
||||
// make sure it fits into a scalar; otherwise it cannot be atomic
|
||||
// Perform atomic load.
|
||||
let val = this.read_scalar_atomic(&place, atomic)?;
|
||||
|
||||
// Check alignment requirements. Atomics must always be aligned to their size,
|
||||
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
|
||||
// be 8-aligned).
|
||||
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
|
||||
this.check_ptr_access_align(
|
||||
place.ptr,
|
||||
place.layout.size,
|
||||
align,
|
||||
CheckInAllocMsg::MemoryAccessTest,
|
||||
)?;
|
||||
// Perform regular access.
|
||||
// Perform regular store.
|
||||
this.write_scalar(val, dest)?;
|
||||
Ok(())
|
||||
}
|
||||
@ -157,25 +148,15 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
|
||||
let [place, val] = check_arg_count(args)?;
|
||||
let place = this.deref_operand(place)?;
|
||||
let val = this.read_scalar(val)?; // make sure it fits into a scalar; otherwise it cannot be atomic
|
||||
|
||||
// Check alignment requirements. Atomics must always be aligned to their size,
|
||||
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
|
||||
// be 8-aligned).
|
||||
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
|
||||
this.check_ptr_access_align(
|
||||
place.ptr,
|
||||
place.layout.size,
|
||||
align,
|
||||
CheckInAllocMsg::MemoryAccessTest,
|
||||
)?;
|
||||
|
||||
// Perform regular load.
|
||||
let val = this.read_scalar(val)?;
|
||||
// Perform atomic store
|
||||
this.write_scalar_atomic(val, &place, atomic)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn compiler_fence(
|
||||
fn compiler_fence_intrinsic(
|
||||
&mut self,
|
||||
args: &[OpTy<'tcx, Provenance>],
|
||||
atomic: AtomicFenceOrd,
|
||||
@ -186,14 +167,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn atomic_fence(
|
||||
fn atomic_fence_intrinsic(
|
||||
&mut self,
|
||||
args: &[OpTy<'tcx, Provenance>],
|
||||
atomic: AtomicFenceOrd,
|
||||
) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
let [] = check_arg_count(args)?;
|
||||
this.validate_atomic_fence(atomic)?;
|
||||
this.atomic_fence(atomic)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -220,17 +201,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
span_bug!(this.cur_span(), "atomic arithmetic operation type mismatch");
|
||||
}
|
||||
|
||||
// Check alignment requirements. Atomics must always be aligned to their size,
|
||||
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
|
||||
// be 8-aligned).
|
||||
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
|
||||
this.check_ptr_access_align(
|
||||
place.ptr,
|
||||
place.layout.size,
|
||||
align,
|
||||
CheckInAllocMsg::MemoryAccessTest,
|
||||
)?;
|
||||
|
||||
match atomic_op {
|
||||
AtomicOp::Min => {
|
||||
let old = this.atomic_min_max_scalar(&place, rhs, true, atomic)?;
|
||||
@ -262,17 +232,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
let place = this.deref_operand(place)?;
|
||||
let new = this.read_scalar(new)?;
|
||||
|
||||
// Check alignment requirements. Atomics must always be aligned to their size,
|
||||
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
|
||||
// be 8-aligned).
|
||||
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
|
||||
this.check_ptr_access_align(
|
||||
place.ptr,
|
||||
place.layout.size,
|
||||
align,
|
||||
CheckInAllocMsg::MemoryAccessTest,
|
||||
)?;
|
||||
|
||||
let old = this.atomic_exchange_scalar(&place, new, atomic)?;
|
||||
this.write_scalar(old, dest)?; // old value is returned
|
||||
Ok(())
|
||||
@ -293,17 +252,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
let expect_old = this.read_immediate(expect_old)?; // read as immediate for the sake of `binary_op()`
|
||||
let new = this.read_scalar(new)?;
|
||||
|
||||
// Check alignment requirements. Atomics must always be aligned to their size,
|
||||
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
|
||||
// be 8-aligned).
|
||||
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
|
||||
this.check_ptr_access_align(
|
||||
place.ptr,
|
||||
place.layout.size,
|
||||
align,
|
||||
CheckInAllocMsg::MemoryAccessTest,
|
||||
)?;
|
||||
|
||||
let old = this.atomic_compare_exchange_scalar(
|
||||
&place,
|
||||
&expect_old,
|
||||
|
@ -169,7 +169,7 @@ pub fn futex<'tcx>(
|
||||
//
|
||||
// Thankfully, preemptions cannot happen inside a Miri shim, so we do not need to
|
||||
// do anything special to guarantee fence-load-comparison atomicity.
|
||||
this.validate_atomic_fence(AtomicFenceOrd::SeqCst)?;
|
||||
this.atomic_fence(AtomicFenceOrd::SeqCst)?;
|
||||
// Read an `i32` through the pointer, regardless of any wrapper types.
|
||||
// It's not uncommon for `addr` to be passed as another type than `*mut i32`, such as `*const AtomicI32`.
|
||||
let futex_val = this
|
||||
@ -240,7 +240,7 @@ pub fn futex<'tcx>(
|
||||
// Together with the SeqCst fence in futex_wait, this makes sure that futex_wait
|
||||
// will see the latest value on addr which could be changed by our caller
|
||||
// before doing the syscall.
|
||||
this.validate_atomic_fence(AtomicFenceOrd::SeqCst)?;
|
||||
this.atomic_fence(AtomicFenceOrd::SeqCst)?;
|
||||
let mut n = 0;
|
||||
for _ in 0..val {
|
||||
if let Some(thread) = this.futex_wake(addr_usize, bitset) {
|
||||
|
11
tests/fail/concurrency/read_only_atomic_cmpxchg.rs
Normal file
11
tests/fail/concurrency/read_only_atomic_cmpxchg.rs
Normal file
@ -0,0 +1,11 @@
|
||||
// Should not rely on the aliasing model for its failure.
|
||||
//@compile-flags: -Zmiri-disable-stacked-borrows
|
||||
|
||||
use std::sync::atomic::{AtomicI32, Ordering};
|
||||
|
||||
fn main() {
|
||||
static X: i32 = 0;
|
||||
let x = &X as *const i32 as *const AtomicI32;
|
||||
let x = unsafe { &*x };
|
||||
x.compare_exchange(1, 2, Ordering::Relaxed, Ordering::Relaxed).unwrap_err(); //~ERROR: atomic operations cannot be performed on read-only memory
|
||||
}
|
21
tests/fail/concurrency/read_only_atomic_cmpxchg.stderr
Normal file
21
tests/fail/concurrency/read_only_atomic_cmpxchg.stderr
Normal file
@ -0,0 +1,21 @@
|
||||
error: Undefined Behavior: atomic operations cannot be performed on read-only memory
|
||||
many platforms require atomic read-modify-write instructions to be performed on writeable memory, even if the operation fails (and is hence nominally read-only)
|
||||
some platforms implement (some) atomic loads via compare-exchange, which means they do not work on read-only memory; it is possible that we could have an exception permitting this for specific kinds of loads
|
||||
please report an issue at <https://github.com/rust-lang/miri/issues> if this is a problem for you
|
||||
--> $DIR/read_only_atomic_cmpxchg.rs:LL:CC
|
||||
|
|
||||
LL | x.compare_exchange(1, 2, Ordering::Relaxed, Ordering::Relaxed).unwrap_err();
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ atomic operations cannot be performed on read-only memory
|
||||
many platforms require atomic read-modify-write instructions to be performed on writeable memory, even if the operation fails (and is hence nominally read-only)
|
||||
some platforms implement (some) atomic loads via compare-exchange, which means they do not work on read-only memory; it is possible that we could have an exception permitting this for specific kinds of loads
|
||||
please report an issue at <https://github.com/rust-lang/miri/issues> if this is a problem for you
|
||||
|
|
||||
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
|
||||
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
|
||||
= note: backtrace:
|
||||
= note: inside `main` at $DIR/read_only_atomic_cmpxchg.rs:LL:CC
|
||||
|
||||
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
|
||||
|
||||
error: aborting due to previous error
|
||||
|
13
tests/fail/concurrency/read_only_atomic_load.rs
Normal file
13
tests/fail/concurrency/read_only_atomic_load.rs
Normal file
@ -0,0 +1,13 @@
|
||||
// Should not rely on the aliasing model for its failure.
|
||||
//@compile-flags: -Zmiri-disable-stacked-borrows
|
||||
|
||||
use std::sync::atomic::{AtomicI32, Ordering};
|
||||
|
||||
fn main() {
|
||||
static X: i32 = 0;
|
||||
let x = &X as *const i32 as *const AtomicI32;
|
||||
let x = unsafe { &*x };
|
||||
// Some targets can implement atomic loads via compare_exchange, so we cannot allow them on
|
||||
// read-only memory.
|
||||
x.load(Ordering::Relaxed); //~ERROR: atomic operations cannot be performed on read-only memory
|
||||
}
|
21
tests/fail/concurrency/read_only_atomic_load.stderr
Normal file
21
tests/fail/concurrency/read_only_atomic_load.stderr
Normal file
@ -0,0 +1,21 @@
|
||||
error: Undefined Behavior: atomic operations cannot be performed on read-only memory
|
||||
many platforms require atomic read-modify-write instructions to be performed on writeable memory, even if the operation fails (and is hence nominally read-only)
|
||||
some platforms implement (some) atomic loads via compare-exchange, which means they do not work on read-only memory; it is possible that we could have an exception permitting this for specific kinds of loads
|
||||
please report an issue at <https://github.com/rust-lang/miri/issues> if this is a problem for you
|
||||
--> $DIR/read_only_atomic_load.rs:LL:CC
|
||||
|
|
||||
LL | x.load(Ordering::Relaxed);
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^ atomic operations cannot be performed on read-only memory
|
||||
many platforms require atomic read-modify-write instructions to be performed on writeable memory, even if the operation fails (and is hence nominally read-only)
|
||||
some platforms implement (some) atomic loads via compare-exchange, which means they do not work on read-only memory; it is possible that we could have an exception permitting this for specific kinds of loads
|
||||
please report an issue at <https://github.com/rust-lang/miri/issues> if this is a problem for you
|
||||
|
|
||||
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
|
||||
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
|
||||
= note: backtrace:
|
||||
= note: inside `main` at $DIR/read_only_atomic_load.rs:LL:CC
|
||||
|
||||
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
|
||||
|
||||
error: aborting due to previous error
|
||||
|
@ -130,7 +130,7 @@ fn wait_absolute_timeout() {
|
||||
fn wait_wake() {
|
||||
let start = Instant::now();
|
||||
|
||||
static FUTEX: i32 = 0;
|
||||
static mut FUTEX: i32 = 0;
|
||||
|
||||
let t = thread::spawn(move || {
|
||||
thread::sleep(Duration::from_millis(200));
|
||||
@ -167,7 +167,7 @@ fn wait_wake() {
|
||||
fn wait_wake_bitset() {
|
||||
let start = Instant::now();
|
||||
|
||||
static FUTEX: i32 = 0;
|
||||
static mut FUTEX: i32 = 0;
|
||||
|
||||
let t = thread::spawn(move || {
|
||||
thread::sleep(Duration::from_millis(200));
|
||||
@ -277,8 +277,8 @@ fn concurrent_wait_wake() {
|
||||
|
||||
// Make sure we got the interesting case (of having woken a thread) at least once, but not *each* time.
|
||||
let woken = WOKEN.load(Ordering::Relaxed);
|
||||
assert!(woken > 0 && woken < rounds);
|
||||
//eprintln!("waking happened {woken} times");
|
||||
assert!(woken > 0 && woken < rounds);
|
||||
}
|
||||
|
||||
fn main() {
|
||||
|
Loading…
x
Reference in New Issue
Block a user