Split extra_cpp tests into sound and unsafe

This commit is contained in:
Andy Wang 2022-05-25 21:10:00 +01:00
parent 613d60db0b
commit bfa56454e9
No known key found for this signature in database
GPG Key ID: 181B49F9F38F3374
4 changed files with 102 additions and 81 deletions

View File

@ -36,6 +36,7 @@
//! In Rust, these operations can only be done through a `&mut AtomicFoo` reference or one derived from it, therefore these operations
//! can only happen after all previous accesses on the same locations. This implementation is adapted to allow these operations.
//! A mixed size/atomicity read that races with writes, or a write that races with reads or writes will still cause UBs to be thrown.
//! You can refer to test cases in weak_memory/extra_cpp.rs and weak_memory/extra_cpp_unsafe.rs for examples of these operations.
// Our and the author's own implementation (tsan11) of the paper have some deviations from the provided operational semantics in §5.3:
// 1. In the operational semantics, store elements keep a copy of the atomic object's vector clock (AtomicCellClocks::sync_vector in miri),

View File

@ -15,19 +15,10 @@ fn static_atomic_mut(val: u32) -> &'static mut AtomicU32 {
ret
}
fn static_atomic(val: u32) -> &'static AtomicU32 {
let ret = Box::leak(Box::new(AtomicU32::new(val)));
ret
}
fn split_u32(dword: &mut u32) -> &mut [u16; 2] {
unsafe { std::mem::transmute::<&mut u32, &mut [u16; 2]>(dword) }
}
fn split_u32_ptr(dword: *const u32) -> *const [u16; 2] {
unsafe { std::mem::transmute::<*const u32, *const [u16; 2]>(dword) }
}
fn mem_replace() {
let mut x = AtomicU32::new(0);
@ -71,6 +62,8 @@ fn from_mut_split() {
x_atomic.store(u32::from_be(0xabbafafa), Relaxed);
}
// Split the `AtomicU32` into two `AtomicU16`.
// Crucially, there is no non-atomic access to `x`! All accesses are atomic, but of different size.
let (x_hi, x_lo) = split_u32(&mut x).split_at_mut(1);
let x_hi_atomic = AtomicU16::from_mut(&mut x_hi[0]);
@ -80,81 +73,9 @@ fn from_mut_split() {
assert_eq!(x_lo_atomic.load(Relaxed), u16::from_be(0xfafa));
}
// Although not possible to do in safe Rust,
// we allow non-atomic and atomic reads to race
// as this should be sound
fn racing_mixed_atomicity_read() {
let x = static_atomic(0);
x.store(42, Relaxed);
let j1 = spawn(move || x.load(Relaxed));
let j2 = spawn(move || {
let x_ptr = x as *const AtomicU32 as *const u32;
unsafe { std::intrinsics::atomic_load_relaxed(x_ptr) }
});
let r1 = j1.join().unwrap();
let r2 = j2.join().unwrap();
assert_eq!(r1, 42);
assert_eq!(r2, 42);
}
fn racing_mixed_size_read() {
let x = static_atomic(0);
let j1 = spawn(move || {
x.load(Relaxed);
});
let j2 = spawn(move || {
let x_ptr = x as *const AtomicU32 as *const u32;
let x_split = split_u32_ptr(x_ptr);
unsafe {
let hi = &(*x_split)[0] as *const u16;
std::intrinsics::atomic_load_relaxed(hi); //~ ERROR: imperfectly overlapping
}
});
j1.join().unwrap();
j2.join().unwrap();
}
fn racing_mixed_atomicity_and_size_read() {
let x = static_atomic(u32::from_be(0xabbafafa));
let j1 = spawn(move || {
x.load(Relaxed);
});
let j2 = spawn(move || {
let x_ptr = x as *const AtomicU32 as *const u32;
unsafe { *x_ptr };
});
let j3 = spawn(move || {
let x_ptr = x as *const AtomicU32 as *const u32;
let x_split = split_u32_ptr(x_ptr);
unsafe {
let hi = &(*x_split)[0] as *const u16;
std::intrinsics::atomic_load_relaxed(hi)
}
});
j1.join().unwrap();
j2.join().unwrap();
let r3 = j3.join().unwrap();
assert_eq!(r3, u16::from_be(0xabba));
}
pub fn main() {
get_mut_write();
from_mut_split();
assign_to_mut();
mem_replace();
racing_mixed_atomicity_read();
racing_mixed_size_read();
racing_mixed_atomicity_and_size_read();
}

View File

@ -0,0 +1,97 @@
// compile-flags: -Zmiri-ignore-leaks
// Tests operations not perfomable through C++'s atomic API
// but doable in unsafe Rust which we think *should* be fine.
// Nonetheless they may be determined as inconsistent with the
// memory model in the future.
#![feature(atomic_from_mut)]
#![feature(core_intrinsics)]
use std::sync::atomic::AtomicU32;
use std::sync::atomic::Ordering::*;
use std::thread::spawn;
fn static_atomic(val: u32) -> &'static AtomicU32 {
let ret = Box::leak(Box::new(AtomicU32::new(val)));
ret
}
fn split_u32_ptr(dword: *const u32) -> *const [u16; 2] {
unsafe { std::mem::transmute::<*const u32, *const [u16; 2]>(dword) }
}
// We allow non-atomic and atomic reads to race
fn racing_mixed_atomicity_read() {
let x = static_atomic(0);
x.store(42, Relaxed);
let j1 = spawn(move || x.load(Relaxed));
let j2 = spawn(move || {
let x_ptr = x as *const AtomicU32 as *const u32;
unsafe { std::intrinsics::atomic_load_relaxed(x_ptr) }
});
let r1 = j1.join().unwrap();
let r2 = j2.join().unwrap();
assert_eq!(r1, 42);
assert_eq!(r2, 42);
}
// We allow mixed-size atomic reads to race
fn racing_mixed_size_read() {
let x = static_atomic(0);
let j1 = spawn(move || {
x.load(Relaxed);
});
let j2 = spawn(move || {
let x_ptr = x as *const AtomicU32 as *const u32;
let x_split = split_u32_ptr(x_ptr);
unsafe {
let hi = &(*x_split)[0] as *const u16;
std::intrinsics::atomic_load_relaxed(hi);
}
});
j1.join().unwrap();
j2.join().unwrap();
}
// And the combination of both of above
fn racing_mixed_atomicity_and_size_read() {
let x = static_atomic(u32::from_be(0xabbafafa));
let j1 = spawn(move || {
x.load(Relaxed);
});
let j2 = spawn(move || {
let x_ptr = x as *const AtomicU32 as *const u32;
unsafe { *x_ptr };
});
let j3 = spawn(move || {
let x_ptr = x as *const AtomicU32 as *const u32;
let x_split = split_u32_ptr(x_ptr);
unsafe {
let hi = &(*x_split)[0] as *const u16;
std::intrinsics::atomic_load_relaxed(hi)
}
});
j1.join().unwrap();
j2.join().unwrap();
let r3 = j3.join().unwrap();
assert_eq!(r3, u16::from_be(0xabba));
}
pub fn main() {
racing_mixed_atomicity_read();
racing_mixed_size_read();
racing_mixed_atomicity_and_size_read();
}

View File

@ -0,0 +1,2 @@
warning: thread support is experimental: weak memory effects are not fully compatible with the Rust atomics memory model.