diff --git a/tests/pass/0weak_memory_consistency.rs b/tests/pass/0weak_memory_consistency.rs index 19e19b47cfe..8b7ce50d2d4 100644 --- a/tests/pass/0weak_memory_consistency.rs +++ b/tests/pass/0weak_memory_consistency.rs @@ -20,8 +20,8 @@ // "Mathematizing C++ concurrency", ACM SIGPLAN Notices, vol. 46, no. 1, pp. 55-66, 2011. // Available: https://ss265.host.cs.st-andrews.ac.uk/papers/n3132.pdf. -use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering::*; +use std::sync::atomic::{fence, AtomicBool, AtomicI32}; use std::thread::spawn; #[derive(Copy, Clone)] @@ -32,13 +32,19 @@ unsafe impl Sync for EvilSend {} // We can't create static items because we need to run each test // multiple times -fn static_atomic(val: usize) -> &'static AtomicUsize { - let ret = Box::leak(Box::new(AtomicUsize::new(val))); +fn static_atomic(val: i32) -> &'static AtomicI32 { + let ret = Box::leak(Box::new(AtomicI32::new(val))); + ret.store(val, Relaxed); // work around https://github.com/rust-lang/miri/issues/2164 + ret +} +fn static_atomic_bool(val: bool) -> &'static AtomicBool { + let ret = Box::leak(Box::new(AtomicBool::new(val))); + ret.store(val, Relaxed); // work around https://github.com/rust-lang/miri/issues/2164 ret } // Spins until it acquires a pre-determined value. -fn acquires_value(loc: &AtomicUsize, val: usize) -> usize { +fn acquires_value(loc: &AtomicI32, val: i32) -> i32 { while loc.load(Acquire) != val { std::hint::spin_loop(); } @@ -207,7 +213,7 @@ fn test_sc_store_buffering() { } fn test_single_thread() { - let x = AtomicUsize::new(42); + let x = AtomicI32::new(42); assert_eq!(x.load(Relaxed), 42); @@ -216,6 +222,42 @@ fn test_single_thread() { assert_eq!(x.load(Relaxed), 43); } +fn test_sync_through_rmw_and_fences() { + // Example from https://github.com/llvm/llvm-project/issues/56450#issuecomment-1183695905 + #[no_mangle] + pub fn rdmw(storing: &AtomicI32, sync: &AtomicI32, loading: &AtomicI32) -> i32 { + storing.store(1, Relaxed); + fence(Release); + sync.fetch_add(0, Relaxed); + fence(Acquire); + loading.load(Relaxed) + } + + let x = static_atomic(0); + let y = static_atomic(0); + let z = static_atomic(0); + + // Since each thread is so short, we need to make sure that they truely run at the same time + // Otherwise t1 will finish before t2 even starts + let go = static_atomic_bool(false); + + let t1 = spawn(move || { + while !go.load(Relaxed) {} + rdmw(y, x, z) + }); + + let t2 = spawn(move || { + while !go.load(Relaxed) {} + rdmw(z, x, y) + }); + + go.store(true, Relaxed); + + let a = t1.join().unwrap(); + let b = t2.join().unwrap(); + assert_ne!((a, b), (0, 0)); +} + pub fn main() { for _ in 0..50 { test_single_thread(); @@ -225,5 +267,6 @@ pub fn main() { test_wrc(); test_corr(); test_sc_store_buffering(); + test_sync_through_rmw_and_fences(); } } diff --git a/tests/pass/weak_memory/weak.rs b/tests/pass/weak_memory/weak.rs index 1d82b85844f..71d57dd11ec 100644 --- a/tests/pass/weak_memory/weak.rs +++ b/tests/pass/weak_memory/weak.rs @@ -8,8 +8,8 @@ // Spurious failure is possible, if you are really unlucky with // the RNG and always read the latest value from the store buffer. -use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering::*; +use std::sync::atomic::{fence, AtomicUsize}; use std::thread::spawn; #[derive(Copy, Clone)] @@ -70,9 +70,9 @@ fn seq_cst() -> bool { r3 == 1 } -fn initialization_write() -> bool { +fn initialization_write(add_fence: bool) -> bool { let x = static_atomic(11); - assert_eq!(x.load(Relaxed), 11); + assert_eq!(x.load(Relaxed), 11); // work around https://github.com/rust-lang/miri/issues/2164 let wait = static_atomic(0); @@ -85,6 +85,9 @@ fn initialization_write() -> bool { let j2 = spawn(move || { reads_value(wait, 1); + if add_fence { + fence(AcqRel); + } x.load(Relaxed) }); @@ -94,15 +97,55 @@ fn initialization_write() -> bool { r2 == 11 } -// Asserts that the function returns true at least once in 100 runs -macro_rules! assert_once { - ($f:ident) => { - assert!(std::iter::repeat_with(|| $f()).take(100).any(|x| x)); - }; +fn faa_replaced_by_load() -> bool { + // Example from https://github.com/llvm/llvm-project/issues/56450#issuecomment-1183695905 + #[no_mangle] + pub fn rdmw(storing: &AtomicUsize, sync: &AtomicUsize, loading: &AtomicUsize) -> usize { + storing.store(1, Relaxed); + fence(Release); + // sync.fetch_add(0, Relaxed); + sync.load(Relaxed); + fence(Acquire); + loading.load(Relaxed) + } + + let x = static_atomic(0); + assert_eq!(x.load(Relaxed), 0); // work around https://github.com/rust-lang/miri/issues/2164 + let y = static_atomic(0); + assert_eq!(y.load(Relaxed), 0); // work around https://github.com/rust-lang/miri/issues/2164 + let z = static_atomic(0); + assert_eq!(z.load(Relaxed), 0); // work around https://github.com/rust-lang/miri/issues/2164 + + // Since each thread is so short, we need to make sure that they truely run at the same time + // Otherwise t1 will finish before t2 even starts + let go = static_atomic(0); + + let t1 = spawn(move || { + while go.load(Relaxed) == 0 {} + rdmw(y, x, z) + }); + + let t2 = spawn(move || { + while go.load(Relaxed) == 0 {} + rdmw(z, x, y) + }); + + go.store(1, Relaxed); + + let a = t1.join().unwrap(); + let b = t2.join().unwrap(); + (a, b) == (0, 0) +} + +/// Asserts that the function returns true at least once in 100 runs +fn assert_once(f: fn() -> bool) { + assert!(std::iter::repeat_with(|| f()).take(100).any(|x| x)); } pub fn main() { - assert_once!(relaxed); - assert_once!(seq_cst); - assert_once!(initialization_write); + assert_once(relaxed); + assert_once(seq_cst); + assert_once(|| initialization_write(false)); + assert_once(|| initialization_write(true)); + assert_once(faa_replaced_by_load); }