// We want to control preemption here. //@compile-flags: -Zmiri-preemption-rate=0 use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering; use std::thread::spawn; #[derive(Copy, Clone)] struct EvilSend(pub T); unsafe impl Send for EvilSend {} unsafe impl Sync for EvilSend {} pub fn main() { let mut a = AtomicUsize::new(0); let b = &mut a as *mut AtomicUsize; let c = EvilSend(b); unsafe { let j1 = spawn(move || { let atomic_ref = &mut *c.0; atomic_ref.store(64, Ordering::SeqCst); }); let j2 = spawn(move || { let atomic_ref = &mut *c.0; *atomic_ref.get_mut() = 32; //~ ERROR: Data race detected between Write on thread `` and Atomic Store on thread `` }); j1.join().unwrap(); j2.join().unwrap(); } }