//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 #![feature(new_uninit)] use std::ptr::null_mut; use std::sync::atomic::{AtomicPtr, Ordering}; use std::thread::spawn; #[derive(Copy, Clone)] struct EvilSend(pub T); unsafe impl Send for EvilSend {} unsafe impl Sync for EvilSend {} pub fn main() { // Shared atomic pointer let pointer = AtomicPtr::new(null_mut::()); let ptr = EvilSend(&pointer as *const AtomicPtr); // Note: this is scheduler-dependent // the operations need to occur in // order, otherwise the allocation is // not visible to the other-thread to // detect the race: // 1. alloc // 2. write unsafe { let j1 = spawn(move || { // Concurrent allocate the memory. // Uses relaxed semantics to not generate // a release sequence. let pointer = &*ptr.0; pointer .store(Box::into_raw(Box::::new_uninit()) as *mut usize, Ordering::Relaxed); }); let j2 = spawn(move || { let pointer = &*ptr.0; *pointer.load(Ordering::Relaxed) = 2; //~ ERROR: Data race detected between Write on thread `` and Allocate on thread `` }); j1.join().unwrap(); j2.join().unwrap(); // Clean up memory, will never be executed drop(Box::from_raw(pointer.load(Ordering::Relaxed))); } }