From f729f289255bcc8d1bfad614ac74bc51d411826a Mon Sep 17 00:00:00 2001 From: Andy Wang Date: Sat, 7 May 2022 17:34:18 +0100 Subject: [PATCH] Move cpp20_rwc_syncs into compile-fail --- .../weak_memory/cpp20_rwc_syncs.rs | 85 +++++++++++++++++++ tests/run-pass/concurrency/weak_memory.rs | 58 +------------ 2 files changed, 86 insertions(+), 57 deletions(-) create mode 100644 tests/compile-fail/weak_memory/cpp20_rwc_syncs.rs diff --git a/tests/compile-fail/weak_memory/cpp20_rwc_syncs.rs b/tests/compile-fail/weak_memory/cpp20_rwc_syncs.rs new file mode 100644 index 00000000000..b9e395fd774 --- /dev/null +++ b/tests/compile-fail/weak_memory/cpp20_rwc_syncs.rs @@ -0,0 +1,85 @@ +// ignore-windows: Concurrency on Windows is not supported yet. +// compile-flags: -Zmiri-ignore-leaks + +// https://plv.mpi-sws.org/scfix/paper.pdf +// 2.2 Second Problem: SC Fences are Too Weak +// This test should pass under the C++20 model Rust is using. +// Unfortunately, Miri's weak memory emulation only follows C++11 model +// as we don't know how to correctly emulate C++20's revised SC semantics, +// so we have to stick to C++11 emulation from exiting research. + +use std::sync::atomic::Ordering::*; +use std::thread::{spawn, yield_now}; +use std::sync::atomic::{fence, AtomicUsize}; + +// Spins and yields until until it reads value +fn reads_value(loc: &AtomicUsize, val: usize) -> usize { + while loc.load(Relaxed) != val { + yield_now(); + } + val +} + +// We can't create static items because we need to run each test +// multiple tests +fn static_atomic(val: usize) -> &'static AtomicUsize { + let ret = Box::leak(Box::new(AtomicUsize::new(val))); + // A workaround to put the initialisation value in the store buffer + ret.store(val, Relaxed); + ret +} + +fn test_cpp20_rwc_syncs() { + /* + int main() { + atomic_int x = 0; + atomic_int y = 0; + + {{{ x.store(1,mo_relaxed); + ||| { r1=x.load(mo_relaxed).readsvalue(1); + fence(mo_seq_cst); + r2=y.load(mo_relaxed); } + ||| { y.store(1,mo_relaxed); + fence(mo_seq_cst); + r3=x.load(mo_relaxed); } + }}} + return 0; + } + */ + let x = static_atomic(0); + let y = static_atomic(0); + + let j1 = spawn(move || { + x.store(1, Relaxed); + }); + + let j2 = spawn(move || { + reads_value(&x, 1); + fence(SeqCst); + y.load(Relaxed) + }); + + let j3 = spawn(move || { + y.store(1, Relaxed); + fence(SeqCst); + x.load(Relaxed) + }); + + j1.join().unwrap(); + let b = j2.join().unwrap(); + let c = j3.join().unwrap(); + + if (b, c) == (0, 0) { + // FIXME: the standalone compiletest-rs needs to support + // failure-status header to allow us to write assert_ne!((b, c), (0, 0)) + // https://rustc-dev-guide.rust-lang.org/tests/headers.html#miscellaneous-headers + // because panic exits with 101 but compile-rs expects 1 + let _ = unsafe { std::mem::MaybeUninit::<*const u32>::uninit().assume_init() }; //~ ERROR uninitialized + } +} + +pub fn main() { + for _ in 0..500 { + test_cpp20_rwc_syncs(); + } +} \ No newline at end of file diff --git a/tests/run-pass/concurrency/weak_memory.rs b/tests/run-pass/concurrency/weak_memory.rs index 90820d4348d..e85c2d1960c 100644 --- a/tests/run-pass/concurrency/weak_memory.rs +++ b/tests/run-pass/concurrency/weak_memory.rs @@ -31,7 +31,7 @@ #![feature(atomic_from_mut)] use std::sync::atomic::Ordering::*; -use std::sync::atomic::{fence, AtomicU16, AtomicU32, AtomicUsize}; +use std::sync::atomic::{AtomicU16, AtomicU32, AtomicUsize}; use std::thread::{spawn, yield_now}; #[derive(Copy, Clone)] @@ -57,13 +57,6 @@ fn acquires_value(loc: &AtomicUsize, val: usize) -> usize { val } -fn reads_value(loc: &AtomicUsize, val: usize) -> usize { - while loc.load(Relaxed) != val { - yield_now(); - } - val -} - fn test_corr() { let x = static_atomic(0); let y = static_atomic(0); @@ -242,54 +235,6 @@ fn test_sc_store_buffering() { assert_ne!((a, b), (0, 0)); } -// 2.2 Second Problem: SC Fences are Too Weak -// This test should pass under the C++20 model Rust is using. -// Unfortunately, Miri's weak memory emulation only follows C++11 model -// as we don't know how to correctly emulate C++20's revised SC semantics -#[allow(dead_code)] -fn test_cpp20_rwc_syncs() { - /* - int main() { - atomic_int x = 0; - atomic_int y = 0; - - {{{ x.store(1,mo_relaxed); - ||| { r1=x.load(mo_relaxed).readsvalue(1); - fence(mo_seq_cst); - r2=y.load(mo_relaxed); } - ||| { y.store(1,mo_relaxed); - fence(mo_seq_cst); - r3=x.load(mo_relaxed); } - }}} - return 0; - } - */ - let x = static_atomic(0); - let y = static_atomic(0); - - let j1 = spawn(move || { - x.store(1, Relaxed); - }); - - let j2 = spawn(move || { - reads_value(&x, 1); - fence(SeqCst); - y.load(Relaxed) - }); - - let j3 = spawn(move || { - y.store(1, Relaxed); - fence(SeqCst); - x.load(Relaxed) - }); - - j1.join().unwrap(); - let b = j2.join().unwrap(); - let c = j3.join().unwrap(); - - assert_ne!((b, c), (0, 0)); -} - pub fn main() { test_imperfectly_overlapping_access(); // TODO: does this make chances of spurious success @@ -303,6 +248,5 @@ pub fn main() { test_wrc(); test_corr(); test_sc_store_buffering(); - // test_cpp20_rwc_syncs(); } }