diff --git a/tests/compile-fail/weak_memory/imperfectly_overlapping.rs b/tests/compile-fail/weak_memory/imperfectly_overlapping.rs deleted file mode 100644 index 6f91e147fa8..00000000000 --- a/tests/compile-fail/weak_memory/imperfectly_overlapping.rs +++ /dev/null @@ -1,35 +0,0 @@ -// ignore-windows: Concurrency on Windows is not supported yet. -#![feature(atomic_from_mut)] -#![feature(core_intrinsics)] - -use std::intrinsics::atomic_load; -use std::sync::atomic::Ordering::*; -use std::sync::atomic::{AtomicU16, AtomicU32}; - -fn split_u32(dword: &mut u32) -> &mut [u16; 2] { - unsafe { std::mem::transmute::<&mut u32, &mut [u16; 2]>(dword) } -} - -fn test_same_thread() { - let mut dword = AtomicU32::new(42); - assert_eq!(dword.load(Relaxed), 42); - dword.store(0xabbafafa, Relaxed); - - let dword_mut = dword.get_mut(); - - let words_mut = split_u32(dword_mut); - - let (hi_mut, lo_mut) = words_mut.split_at_mut(1); - - let (hi, _) = (AtomicU16::from_mut(&mut hi_mut[0]), AtomicU16::from_mut(&mut lo_mut[0])); - - unsafe { - // Equivalent to: hi.load(Ordering::SeqCst) - // We need to use intrisics to for precise error location - atomic_load(hi.get_mut() as *mut u16); //~ ERROR: mixed-size access on an existing atomic object - } -} - -pub fn main() { - test_same_thread(); -} diff --git a/tests/run-pass/weak_memory/extra_cpp.rs b/tests/run-pass/weak_memory/extra_cpp.rs new file mode 100644 index 00000000000..b20ec583499 --- /dev/null +++ b/tests/run-pass/weak_memory/extra_cpp.rs @@ -0,0 +1,79 @@ +// compile-flags: -Zmiri-ignore-leaks + +// Tests operations not perfomable through C++'s atomic API +// but doable in safe (at least sound) Rust. + +#![feature(atomic_from_mut)] + +use std::sync::atomic::Ordering::*; +use std::sync::atomic::{AtomicU16, AtomicU32, AtomicUsize}; +use std::thread::spawn; + +fn static_atomic_mut(val: usize) -> &'static mut AtomicUsize { + let ret = Box::leak(Box::new(AtomicUsize::new(val))); + ret +} + +fn split_u32(dword: &mut u32) -> &mut [u16; 2] { + unsafe { std::mem::transmute::<&mut u32, &mut [u16; 2]>(dword) } +} + +fn mem_replace() { + let mut x = AtomicU32::new(0); + + let old_x = std::mem::replace(&mut x, AtomicU32::new(42)); + + assert_eq!(x.load(Relaxed), 42); + assert_eq!(old_x.load(Relaxed), 0); +} + +fn assign_to_mut() { + let x = static_atomic_mut(0); + x.store(1, Relaxed); + + *x = AtomicUsize::new(2); + + assert_eq!(x.load(Relaxed), 2); +} + +fn get_mut_write() { + let x = static_atomic_mut(0); + x.store(1, Relaxed); + { + let x_mut = x.get_mut(); + *x_mut = 2; + } + + let j1 = spawn(move || x.load(Relaxed)); + + let r1 = j1.join().unwrap(); + assert_eq!(r1, 2); +} + +// This is technically doable in C++ with atomic_ref +// but little literature exists atm on its involvement +// in mixed size/atomicity accesses +fn from_mut_split() { + let mut x: u32 = 0; + + { + let x_atomic = AtomicU32::from_mut(&mut x); + x_atomic.store(u32::from_be(0xabbafafa), Relaxed); + } + + let (x_hi, x_lo) = split_u32(&mut x).split_at_mut(1); + + let x_hi_atomic = AtomicU16::from_mut(&mut x_hi[0]); + let x_lo_atomic = AtomicU16::from_mut(&mut x_lo[0]); + + assert_eq!(x_hi_atomic.load(Relaxed), u16::from_be(0xabba)); + assert_eq!(x_lo_atomic.load(Relaxed), u16::from_be(0xfafa)); +} + + +pub fn main() { + get_mut_write(); + from_mut_split(); + assign_to_mut(); + mem_replace(); +} diff --git a/tests/run-pass/weak_memory/extra_cpp.stderr b/tests/run-pass/weak_memory/extra_cpp.stderr new file mode 100644 index 00000000000..1d0ce4b3853 --- /dev/null +++ b/tests/run-pass/weak_memory/extra_cpp.stderr @@ -0,0 +1,2 @@ +warning: thread support is experimental: weak memory effects are not fully compatible with the Rust atomics memory model. +