Forbade all racing mixed size atomic accesses

This commit is contained in:
Andy Wang 2022-05-29 15:05:07 +01:00
parent ceb173d647
commit 4a07f78dad
No known key found for this signature in database
GPG Key ID: 181B49F9F38F3374
6 changed files with 78 additions and 129 deletions

View File

@ -287,8 +287,8 @@ impl MemoryCellClocks {
Ok(())
}
/// Checks if the memory cell write races with any prior atomic read or write
fn write_race_free_with_atomic(&mut self, clocks: &ThreadClockSet) -> bool {
/// Checks if the memory cell access is ordered with all prior atomic reads and writes
fn race_free_with_atomic(&self, clocks: &ThreadClockSet) -> bool {
if let Some(atomic) = self.atomic() {
atomic.read_vector <= clocks.clock && atomic.write_vector <= clocks.clock
} else {
@ -296,11 +296,6 @@ impl MemoryCellClocks {
}
}
/// Checks if the memory cell read races with any prior atomic write
fn read_race_free_with_atomic(&self, clocks: &ThreadClockSet) -> bool {
if let Some(atomic) = self.atomic() { atomic.write_vector <= clocks.clock } else { true }
}
/// Update memory cell data-race tracking for atomic
/// load relaxed semantics, is a no-op if this memory was
/// not used previously as atomic memory.
@ -528,7 +523,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
// the *value* (including the associated provenance if this is an AtomicPtr) at this location.
// Only metadata on the location itself is used.
let scalar = this.allow_data_races_ref(move |this| this.read_scalar(&place.into()))?;
this.validate_overlapping_atomic_read(place)?;
this.validate_overlapping_atomic(place)?;
this.buffered_atomic_read(place, atomic, scalar, || {
this.validate_atomic_load(place, atomic)
})
@ -542,7 +537,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
atomic: AtomicWriteOp,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
this.validate_overlapping_atomic_write(dest)?;
this.validate_overlapping_atomic(dest)?;
this.allow_data_races_mut(move |this| this.write_scalar(val, &(*dest).into()))?;
this.validate_atomic_store(dest, atomic)?;
// FIXME: it's not possible to get the value before write_scalar. A read_scalar will cause
@ -563,7 +558,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
) -> InterpResult<'tcx, ImmTy<'tcx, Tag>> {
let this = self.eval_context_mut();
this.validate_overlapping_atomic_write(place)?;
this.validate_overlapping_atomic(place)?;
let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
// Atomics wrap around on overflow.
@ -592,7 +587,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
let this = self.eval_context_mut();
this.validate_overlapping_atomic_write(place)?;
this.validate_overlapping_atomic(place)?;
let old = this.allow_data_races_mut(|this| this.read_scalar(&place.into()))?;
this.allow_data_races_mut(|this| this.write_scalar(new, &(*place).into()))?;
@ -613,7 +608,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
) -> InterpResult<'tcx, ImmTy<'tcx, Tag>> {
let this = self.eval_context_mut();
this.validate_overlapping_atomic_write(place)?;
this.validate_overlapping_atomic(place)?;
let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
let lt = this.binary_op(mir::BinOp::Lt, &old, &rhs)?.to_scalar()?.to_bool()?;
@ -656,7 +651,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
use rand::Rng as _;
let this = self.eval_context_mut();
this.validate_overlapping_atomic_write(place)?;
this.validate_overlapping_atomic(place)?;
// Failure ordering cannot be stronger than success ordering, therefore first attempt
// to read with the failure ordering and if successful then try again with the success
// read ordering and write in the success case.
@ -706,7 +701,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
atomic: AtomicReadOp,
) -> InterpResult<'tcx> {
let this = self.eval_context_ref();
this.validate_overlapping_atomic_read(place)?;
this.validate_overlapping_atomic(place)?;
this.validate_atomic_op(
place,
atomic,
@ -729,7 +724,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
atomic: AtomicWriteOp,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
this.validate_overlapping_atomic_write(place)?;
this.validate_overlapping_atomic(place)?;
this.validate_atomic_op(
place,
atomic,
@ -755,7 +750,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
let release = matches!(atomic, Release | AcqRel | SeqCst);
let this = self.eval_context_mut();
this.validate_overlapping_atomic_write(place)?;
this.validate_overlapping_atomic(place)?;
this.validate_atomic_op(place, atomic, "Atomic RMW", move |memory, clocks, index, _| {
if acquire {
memory.load_acquire(clocks, index)?;
@ -941,9 +936,9 @@ impl VClockAlloc {
)
}
/// Detect racing atomic writes (not data races)
/// Detect racing atomic read and writes (not data races)
/// on every byte of the current access range
pub(super) fn read_race_free_with_atomic<'tcx>(
pub(super) fn race_free_with_atomic<'tcx>(
&self,
range: AllocRange,
global: &GlobalState,
@ -952,26 +947,7 @@ impl VClockAlloc {
let (_, clocks) = global.current_thread_state();
let alloc_ranges = self.alloc_ranges.borrow();
for (_, range) in alloc_ranges.iter(range.start, range.size) {
if !range.read_race_free_with_atomic(&clocks) {
return false;
}
}
}
true
}
/// Detect racing atomic read and writes (not data races)
/// on every byte of the current access range
pub(super) fn write_race_free_with_atomic<'tcx>(
&mut self,
range: AllocRange,
global: &GlobalState,
) -> bool {
if global.race_detecting() {
let (_, clocks) = global.current_thread_state();
let alloc_ranges = self.alloc_ranges.get_mut();
for (_, range) in alloc_ranges.iter_mut(range.start, range.size) {
if !range.write_race_free_with_atomic(&clocks) {
if !range.race_free_with_atomic(&clocks) {
return false;
}
}

View File

@ -35,7 +35,8 @@
//! (such as accessing the top 16 bits of an AtomicU32). These senarios are generally undiscussed in formalisations of C++ memory model.
//! In Rust, these operations can only be done through a `&mut AtomicFoo` reference or one derived from it, therefore these operations
//! can only happen after all previous accesses on the same locations. This implementation is adapted to allow these operations.
//! A mixed size/atomicity read that races with writes, or a write that races with reads or writes will still cause UBs to be thrown.
//! A mixed atomicity read that races with writes, or a write that races with reads or writes will still cause UBs to be thrown.
//! Mixed size atomic accesses must not race with any other atomic access, whether read or write, or a UB will be thrown.
//! You can refer to test cases in weak_memory/extra_cpp.rs and weak_memory/extra_cpp_unsafe.rs for examples of these operations.
// Our and the author's own implementation (tsan11) of the paper have some deviations from the provided operational semantics in §5.3:
@ -403,9 +404,9 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
crate::MiriEvalContextExt<'mir, 'tcx>
{
// If weak memory emulation is enabled, check if this atomic op imperfectly overlaps with a previous
// atomic write. If it does, then we require it to be ordered (non-racy) with all previous atomic
// writes on all the bytes in range
fn validate_overlapping_atomic_read(&self, place: &MPlaceTy<'tcx, Tag>) -> InterpResult<'tcx> {
// atomic read or write. If it does, then we require it to be ordered (non-racy) with all previous atomic
// accesses on all the bytes in range
fn validate_overlapping_atomic(&self, place: &MPlaceTy<'tcx, Tag>) -> InterpResult<'tcx> {
let this = self.eval_context_ref();
let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(place.ptr)?;
if let crate::AllocExtra {
@ -417,37 +418,9 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
let range = alloc_range(base_offset, place.layout.size);
if alloc_buffers.is_overlapping(range)
&& !alloc_clocks
.read_race_free_with_atomic(range, this.machine.data_race.as_ref().unwrap())
.race_free_with_atomic(range, this.machine.data_race.as_ref().unwrap())
{
throw_ub_format!(
"racy imperfectly overlapping atomic access is not possible in the C++20 memory model"
);
}
}
Ok(())
}
// Same as above but needs to be ordered with all previous atomic read or writes
fn validate_overlapping_atomic_write(
&mut self,
place: &MPlaceTy<'tcx, Tag>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(place.ptr)?;
if let (
crate::AllocExtra {
weak_memory: Some(alloc_buffers),
data_race: Some(alloc_clocks),
..
},
crate::Evaluator { data_race: Some(global), .. },
) = this.get_alloc_extra_mut(alloc_id)?
{
let range = alloc_range(base_offset, place.layout.size);
if alloc_buffers.is_overlapping(range)
&& !alloc_clocks.write_race_free_with_atomic(range, global)
{
throw_ub_format!("racy imperfectly overlapping atomic access");
throw_ub_format!("racy imperfectly overlapping atomic access is not possible in the C++20 memory model");
}
}
Ok(())

View File

@ -1,5 +1,4 @@
// ignore-windows: Concurrency on Windows is not supported yet.
// compile-flags: -Zmiri-ignore-leaks
#![feature(core_intrinsics)]

View File

@ -0,0 +1,39 @@
// ignore-windows: Concurrency on Windows is not supported yet.
#![feature(core_intrinsics)]
use std::sync::atomic::AtomicU32;
use std::sync::atomic::Ordering::*;
use std::thread::spawn;
fn static_atomic(val: u32) -> &'static AtomicU32 {
let ret = Box::leak(Box::new(AtomicU32::new(val)));
ret
}
fn split_u32_ptr(dword: *const u32) -> *const [u16; 2] {
unsafe { std::mem::transmute::<*const u32, *const [u16; 2]>(dword) }
}
// Racing mixed size reads may cause two loads to read-from
// the same store but observe different values, which doesn't make
// sense under the formal model so we forbade this.
pub fn main() {
let x = static_atomic(0);
let j1 = spawn(move || {
x.load(Relaxed);
});
let j2 = spawn(move || {
let x_ptr = x as *const AtomicU32 as *const u32;
let x_split = split_u32_ptr(x_ptr);
unsafe {
let hi = &(*x_split)[0] as *const u16;
std::intrinsics::atomic_load_relaxed(hi); //~ ERROR: imperfectly overlapping
}
});
j1.join().unwrap();
j2.join().unwrap();
}

View File

@ -0,0 +1,18 @@
warning: thread support is experimental: the scheduler is not preemptive, and can get stuck in spin loops.
(see https://github.com/rust-lang/miri/issues/1388)
error: Undefined Behavior: racy imperfectly overlapping atomic access is not possible in the C++20 memory model
--> $DIR/racing_mixed_size_read.rs:LL:CC
|
LL | std::intrinsics::atomic_load_relaxed(hi);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ racy imperfectly overlapping atomic access is not possible in the C++20 memory model
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
= note: inside closure at $DIR/racing_mixed_size_read.rs:LL:CC
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to previous error; 1 warning emitted

View File

@ -18,10 +18,6 @@ fn static_atomic(val: u32) -> &'static AtomicU32 {
ret
}
fn split_u32_ptr(dword: *const u32) -> *const [u16; 2] {
unsafe { std::mem::transmute::<*const u32, *const [u16; 2]>(dword) }
}
// We allow non-atomic and atomic reads to race
fn racing_mixed_atomicity_read() {
let x = static_atomic(0);
@ -41,58 +37,6 @@ fn racing_mixed_atomicity_read() {
assert_eq!(r2, 42);
}
// We allow mixed-size atomic reads to race
fn racing_mixed_size_read() {
let x = static_atomic(0);
let j1 = spawn(move || {
x.load(Relaxed);
});
let j2 = spawn(move || {
let x_ptr = x as *const AtomicU32 as *const u32;
let x_split = split_u32_ptr(x_ptr);
unsafe {
let hi = &(*x_split)[0] as *const u16;
std::intrinsics::atomic_load_relaxed(hi);
}
});
j1.join().unwrap();
j2.join().unwrap();
}
// And we allow the combination of both of the above.
fn racing_mixed_atomicity_and_size_read() {
let x = static_atomic(u32::from_be(0xabbafafa));
let j1 = spawn(move || {
x.load(Relaxed);
});
let j2 = spawn(move || {
let x_ptr = x as *const AtomicU32 as *const u32;
unsafe { *x_ptr };
});
let j3 = spawn(move || {
let x_ptr = x as *const AtomicU32 as *const u32;
let x_split = split_u32_ptr(x_ptr);
unsafe {
let hi = &(*x_split)[0] as *const u16;
std::intrinsics::atomic_load_relaxed(hi)
}
});
j1.join().unwrap();
j2.join().unwrap();
let r3 = j3.join().unwrap();
assert_eq!(r3, u16::from_be(0xabba));
}
pub fn main() {
racing_mixed_atomicity_read();
racing_mixed_size_read();
racing_mixed_atomicity_and_size_read();
}