Rollup merge of #122729 - m-ou-se:relax, r=Amanieu

Relax SeqCst ordering in standard library.

Every single SeqCst in the standard library is unnecessary. In all cases, Relaxed or Release+Acquire was sufficient.

As I [wrote](https://marabos.nl/atomics/memory-ordering.html#common-misconceptions) in my book on atomics:

> [..] when reading code, SeqCst basically tells the reader: "this operation depends on the total order of every single SeqCst operation in the program," which is an incredibly far-reaching claim. The same code would likely be easier to review and verify if it used weaker memory ordering instead, if possible. For example, Release effectively tells the reader: "this relates to an acquire operation on the same variable," which involves far fewer considerations when forming an understanding of the code.
>
> It is advisable to see SeqCst as a warning sign. Seeing it in the wild often means that either something complicated is going on, or simply that the author did not take the time to analyze their memory ordering related assumptions, both of which are reasons for extra scrutiny.

r? ````@Amanieu```` ````@joboet````
This commit is contained in:
Jacob Pratt 2024-03-20 20:29:44 -04:00 committed by GitHub
commit 43ad753adb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
17 changed files with 70 additions and 59 deletions

View File

@ -233,7 +233,7 @@ macro_rules! acquire {
/// let val = Arc::clone(&val); /// let val = Arc::clone(&val);
/// ///
/// thread::spawn(move || { /// thread::spawn(move || {
/// let v = val.fetch_add(1, Ordering::SeqCst); /// let v = val.fetch_add(1, Ordering::Relaxed);
/// println!("{v:?}"); /// println!("{v:?}");
/// }); /// });
/// } /// }

View File

@ -24,10 +24,7 @@ use crate::ptr;
/// use std::alloc::{GlobalAlloc, Layout}; /// use std::alloc::{GlobalAlloc, Layout};
/// use std::cell::UnsafeCell; /// use std::cell::UnsafeCell;
/// use std::ptr::null_mut; /// use std::ptr::null_mut;
/// use std::sync::atomic::{ /// use std::sync::atomic::{AtomicUsize, Ordering::Relaxed};
/// AtomicUsize,
/// Ordering::{Acquire, SeqCst},
/// };
/// ///
/// const ARENA_SIZE: usize = 128 * 1024; /// const ARENA_SIZE: usize = 128 * 1024;
/// const MAX_SUPPORTED_ALIGN: usize = 4096; /// const MAX_SUPPORTED_ALIGN: usize = 4096;
@ -61,7 +58,7 @@ use crate::ptr;
/// let mut allocated = 0; /// let mut allocated = 0;
/// if self /// if self
/// .remaining /// .remaining
/// .fetch_update(SeqCst, SeqCst, |mut remaining| { /// .fetch_update(Relaxed, Relaxed, |mut remaining| {
/// if size > remaining { /// if size > remaining {
/// return None; /// return None;
/// } /// }
@ -81,7 +78,7 @@ use crate::ptr;
/// ///
/// fn main() { /// fn main() {
/// let _s = format!("allocating a string!"); /// let _s = format!("allocating a string!");
/// let currently = ALLOCATOR.remaining.load(Acquire); /// let currently = ALLOCATOR.remaining.load(Relaxed);
/// println!("allocated so far: {}", ARENA_SIZE - currently); /// println!("allocated so far: {}", ARENA_SIZE - currently);
/// } /// }
/// ``` /// ```

View File

@ -84,7 +84,7 @@ pub unsafe fn cleanup(ptr: *mut u8) -> Box<dyn Any + Send> {
super::__rust_foreign_exception(); super::__rust_foreign_exception();
} }
let was_caught = (*adjusted_ptr).caught.swap(true, Ordering::SeqCst); let was_caught = (*adjusted_ptr).caught.swap(true, Ordering::Relaxed);
if was_caught { if was_caught {
// Since cleanup() isn't allowed to panic, we just abort instead. // Since cleanup() isn't allowed to panic, we just abort instead.
intrinsics::abort(); intrinsics::abort();

View File

@ -21,7 +21,7 @@ impl<T> OwnedStore<T> {
pub(super) fn new(counter: &'static AtomicU32) -> Self { pub(super) fn new(counter: &'static AtomicU32) -> Self {
// Ensure the handle counter isn't 0, which would panic later, // Ensure the handle counter isn't 0, which would panic later,
// when `NonZero::new` (aka `Handle::new`) is called in `alloc`. // when `NonZero::new` (aka `Handle::new`) is called in `alloc`.
assert_ne!(counter.load(Ordering::SeqCst), 0); assert_ne!(counter.load(Ordering::Relaxed), 0);
OwnedStore { counter, data: BTreeMap::new() } OwnedStore { counter, data: BTreeMap::new() }
} }
@ -29,7 +29,7 @@ impl<T> OwnedStore<T> {
impl<T> OwnedStore<T> { impl<T> OwnedStore<T> {
pub(super) fn alloc(&mut self, x: T) -> Handle { pub(super) fn alloc(&mut self, x: T) -> Handle {
let counter = self.counter.fetch_add(1, Ordering::SeqCst); let counter = self.counter.fetch_add(1, Ordering::Relaxed);
let handle = Handle::new(counter).expect("`proc_macro` handle counter overflowed"); let handle = Handle::new(counter).expect("`proc_macro` handle counter overflowed");
assert!(self.data.insert(handle, x).is_none()); assert!(self.data.insert(handle, x).is_none());
handle handle

View File

@ -329,7 +329,7 @@ static HOOK: AtomicPtr<()> = AtomicPtr::new(ptr::null_mut());
/// ``` /// ```
#[unstable(feature = "alloc_error_hook", issue = "51245")] #[unstable(feature = "alloc_error_hook", issue = "51245")]
pub fn set_alloc_error_hook(hook: fn(Layout)) { pub fn set_alloc_error_hook(hook: fn(Layout)) {
HOOK.store(hook as *mut (), Ordering::SeqCst); HOOK.store(hook as *mut (), Ordering::Release);
} }
/// Unregisters the current allocation error hook, returning it. /// Unregisters the current allocation error hook, returning it.
@ -339,7 +339,7 @@ pub fn set_alloc_error_hook(hook: fn(Layout)) {
/// If no custom hook is registered, the default hook will be returned. /// If no custom hook is registered, the default hook will be returned.
#[unstable(feature = "alloc_error_hook", issue = "51245")] #[unstable(feature = "alloc_error_hook", issue = "51245")]
pub fn take_alloc_error_hook() -> fn(Layout) { pub fn take_alloc_error_hook() -> fn(Layout) {
let hook = HOOK.swap(ptr::null_mut(), Ordering::SeqCst); let hook = HOOK.swap(ptr::null_mut(), Ordering::Acquire);
if hook.is_null() { default_alloc_error_hook } else { unsafe { mem::transmute(hook) } } if hook.is_null() { default_alloc_error_hook } else { unsafe { mem::transmute(hook) } }
} }
@ -362,7 +362,7 @@ fn default_alloc_error_hook(layout: Layout) {
#[alloc_error_handler] #[alloc_error_handler]
#[unstable(feature = "alloc_internals", issue = "none")] #[unstable(feature = "alloc_internals", issue = "none")]
pub fn rust_oom(layout: Layout) -> ! { pub fn rust_oom(layout: Layout) -> ! {
let hook = HOOK.load(Ordering::SeqCst); let hook = HOOK.load(Ordering::Acquire);
let hook: fn(Layout) = let hook: fn(Layout) =
if hook.is_null() { default_alloc_error_hook } else { unsafe { mem::transmute(hook) } }; if hook.is_null() { default_alloc_error_hook } else { unsafe { mem::transmute(hook) } };
hook(layout); hook(layout);

View File

@ -7,12 +7,12 @@ use crate::sync::atomic::{AtomicUsize, Ordering};
static PORT: AtomicUsize = AtomicUsize::new(0); static PORT: AtomicUsize = AtomicUsize::new(0);
pub fn next_test_ip4() -> SocketAddr { pub fn next_test_ip4() -> SocketAddr {
let port = PORT.fetch_add(1, Ordering::SeqCst) as u16 + base_port(); let port = PORT.fetch_add(1, Ordering::Relaxed) as u16 + base_port();
SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), port)) SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), port))
} }
pub fn next_test_ip6() -> SocketAddr { pub fn next_test_ip6() -> SocketAddr {
let port = PORT.fetch_add(1, Ordering::SeqCst) as u16 + base_port(); let port = PORT.fetch_add(1, Ordering::Relaxed) as u16 + base_port();
SocketAddr::V6(SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), port, 0, 0)) SocketAddr::V6(SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), port, 0, 0))
} }

View File

@ -272,7 +272,7 @@ fn default_hook(info: &PanicInfo<'_>) {
drop(backtrace::print(err, crate::backtrace_rs::PrintFmt::Full)) drop(backtrace::print(err, crate::backtrace_rs::PrintFmt::Full))
} }
Some(BacktraceStyle::Off) => { Some(BacktraceStyle::Off) => {
if FIRST_PANIC.swap(false, Ordering::SeqCst) { if FIRST_PANIC.swap(false, Ordering::Relaxed) {
let _ = writeln!( let _ = writeln!(
err, err,
"note: run with `RUST_BACKTRACE=1` environment variable to display a \ "note: run with `RUST_BACKTRACE=1` environment variable to display a \

View File

@ -170,14 +170,14 @@ fn wait_timeout_wake() {
let t = thread::spawn(move || { let t = thread::spawn(move || {
let _g = m2.lock().unwrap(); let _g = m2.lock().unwrap();
thread::sleep(Duration::from_millis(1)); thread::sleep(Duration::from_millis(1));
notified_copy.store(true, Ordering::SeqCst); notified_copy.store(true, Ordering::Relaxed);
c2.notify_one(); c2.notify_one();
}); });
let (g, timeout_res) = c.wait_timeout(g, Duration::from_millis(u64::MAX)).unwrap(); let (g, timeout_res) = c.wait_timeout(g, Duration::from_millis(u64::MAX)).unwrap();
assert!(!timeout_res.timed_out()); assert!(!timeout_res.timed_out());
// spurious wakeups mean this isn't necessarily true // spurious wakeups mean this isn't necessarily true
// so execute test again, if not notified // so execute test again, if not notified
if !notified.load(Ordering::SeqCst) { if !notified.load(Ordering::Relaxed) {
t.join().unwrap(); t.join().unwrap();
continue; continue;
} }

View File

@ -5,7 +5,7 @@ use crate::marker::PhantomPinned;
use crate::pin::Pin; use crate::pin::Pin;
use crate::ptr::addr_of_mut; use crate::ptr::addr_of_mut;
use crate::sync::atomic::AtomicUsize; use crate::sync::atomic::AtomicUsize;
use crate::sync::atomic::Ordering::SeqCst; use crate::sync::atomic::Ordering::{Acquire, Relaxed, Release};
#[cfg(not(target_os = "nto"))] #[cfg(not(target_os = "nto"))]
use crate::sys::time::TIMESPEC_MAX; use crate::sys::time::TIMESPEC_MAX;
#[cfg(target_os = "nto")] #[cfg(target_os = "nto")]
@ -150,16 +150,18 @@ impl Parker {
// This implementation doesn't require `unsafe`, but other implementations // This implementation doesn't require `unsafe`, but other implementations
// may assume this is only called by the thread that owns the Parker. // may assume this is only called by the thread that owns the Parker.
//
// For memory ordering, see std/src/sys_common/thread_parking/futex.rs
pub unsafe fn park(self: Pin<&Self>) { pub unsafe fn park(self: Pin<&Self>) {
// If we were previously notified then we consume this notification and // If we were previously notified then we consume this notification and
// return quickly. // return quickly.
if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() { if self.state.compare_exchange(NOTIFIED, EMPTY, Acquire, Relaxed).is_ok() {
return; return;
} }
// Otherwise we need to coordinate going to sleep // Otherwise we need to coordinate going to sleep
lock(self.lock.get()); lock(self.lock.get());
match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { match self.state.compare_exchange(EMPTY, PARKED, Relaxed, Relaxed) {
Ok(_) => {} Ok(_) => {}
Err(NOTIFIED) => { Err(NOTIFIED) => {
// We must read here, even though we know it will be `NOTIFIED`. // We must read here, even though we know it will be `NOTIFIED`.
@ -168,7 +170,7 @@ impl Parker {
// acquire operation that synchronizes with that `unpark` to observe // acquire operation that synchronizes with that `unpark` to observe
// any writes it made before the call to unpark. To do that we must // any writes it made before the call to unpark. To do that we must
// read from the write it made to `state`. // read from the write it made to `state`.
let old = self.state.swap(EMPTY, SeqCst); let old = self.state.swap(EMPTY, Acquire);
unlock(self.lock.get()); unlock(self.lock.get());
@ -185,7 +187,7 @@ impl Parker {
loop { loop {
wait(self.cvar.get(), self.lock.get()); wait(self.cvar.get(), self.lock.get());
match self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) { match self.state.compare_exchange(NOTIFIED, EMPTY, Acquire, Relaxed) {
Ok(_) => break, // got a notification Ok(_) => break, // got a notification
Err(_) => {} // spurious wakeup, go back to sleep Err(_) => {} // spurious wakeup, go back to sleep
} }
@ -201,16 +203,16 @@ impl Parker {
// Like `park` above we have a fast path for an already-notified thread, and // Like `park` above we have a fast path for an already-notified thread, and
// afterwards we start coordinating for a sleep. // afterwards we start coordinating for a sleep.
// return quickly. // return quickly.
if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() { if self.state.compare_exchange(NOTIFIED, EMPTY, Acquire, Relaxed).is_ok() {
return; return;
} }
lock(self.lock.get()); lock(self.lock.get());
match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { match self.state.compare_exchange(EMPTY, PARKED, Relaxed, Relaxed) {
Ok(_) => {} Ok(_) => {}
Err(NOTIFIED) => { Err(NOTIFIED) => {
// We must read again here, see `park`. // We must read again here, see `park`.
let old = self.state.swap(EMPTY, SeqCst); let old = self.state.swap(EMPTY, Acquire);
unlock(self.lock.get()); unlock(self.lock.get());
assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); assert_eq!(old, NOTIFIED, "park state changed unexpectedly");
@ -228,7 +230,7 @@ impl Parker {
// parked. // parked.
wait_timeout(self.cvar.get(), self.lock.get(), dur); wait_timeout(self.cvar.get(), self.lock.get(), dur);
match self.state.swap(EMPTY, SeqCst) { match self.state.swap(EMPTY, Acquire) {
NOTIFIED => unlock(self.lock.get()), // got a notification, hurray! NOTIFIED => unlock(self.lock.get()), // got a notification, hurray!
PARKED => unlock(self.lock.get()), // no notification, alas PARKED => unlock(self.lock.get()), // no notification, alas
n => { n => {
@ -245,7 +247,7 @@ impl Parker {
// `state` is already `NOTIFIED`. That is why this must be a swap // `state` is already `NOTIFIED`. That is why this must be a swap
// rather than a compare-and-swap that returns if it reads `NOTIFIED` // rather than a compare-and-swap that returns if it reads `NOTIFIED`
// on failure. // on failure.
match self.state.swap(NOTIFIED, SeqCst) { match self.state.swap(NOTIFIED, Release) {
EMPTY => return, // no one was waiting EMPTY => return, // no one was waiting
NOTIFIED => return, // already unparked NOTIFIED => return, // already unparked
PARKED => {} // gotta go wake someone up PARKED => {} // gotta go wake someone up

View File

@ -57,7 +57,10 @@ unsafe impl GlobalAlloc for System {
#[cfg(target_feature = "atomics")] #[cfg(target_feature = "atomics")]
mod lock { mod lock {
use crate::sync::atomic::{AtomicI32, Ordering::SeqCst}; use crate::sync::atomic::{
AtomicI32,
Ordering::{Acquire, Release},
};
static LOCKED: AtomicI32 = AtomicI32::new(0); static LOCKED: AtomicI32 = AtomicI32::new(0);
@ -65,7 +68,7 @@ mod lock {
pub fn lock() -> DropLock { pub fn lock() -> DropLock {
loop { loop {
if LOCKED.swap(1, SeqCst) == 0 { if LOCKED.swap(1, Acquire) == 0 {
return DropLock; return DropLock;
} }
// Ok so here's where things get a little depressing. At this point // Ok so here's where things get a little depressing. At this point
@ -143,7 +146,7 @@ mod lock {
impl Drop for DropLock { impl Drop for DropLock {
fn drop(&mut self) { fn drop(&mut self) {
let r = LOCKED.swap(0, SeqCst); let r = LOCKED.swap(0, Release);
debug_assert_eq!(r, 1); debug_assert_eq!(r, 1);
// Note that due to the above logic we don't actually need to wake // Note that due to the above logic we don't actually need to wake

View File

@ -7,7 +7,7 @@ use crate::path::Path;
use crate::ptr; use crate::ptr;
use crate::slice; use crate::slice;
use crate::sync::atomic::AtomicUsize; use crate::sync::atomic::AtomicUsize;
use crate::sync::atomic::Ordering::SeqCst; use crate::sync::atomic::Ordering::Relaxed;
use crate::sys::c; use crate::sys::c;
use crate::sys::fs::{File, OpenOptions}; use crate::sys::fs::{File, OpenOptions};
use crate::sys::handle::Handle; use crate::sys::handle::Handle;
@ -214,11 +214,11 @@ pub fn spawn_pipe_relay(
fn random_number() -> usize { fn random_number() -> usize {
static N: AtomicUsize = AtomicUsize::new(0); static N: AtomicUsize = AtomicUsize::new(0);
loop { loop {
if N.load(SeqCst) != 0 { if N.load(Relaxed) != 0 {
return N.fetch_add(1, SeqCst); return N.fetch_add(1, Relaxed);
} }
N.store(hashmap_random_keys().0 as usize, SeqCst); N.store(hashmap_random_keys().0 as usize, Relaxed);
} }
} }

View File

@ -46,7 +46,10 @@ unsafe impl GlobalAlloc for System {
} }
mod lock { mod lock {
use crate::sync::atomic::{AtomicI32, Ordering::SeqCst}; use crate::sync::atomic::{
AtomicI32,
Ordering::{Acquire, Release},
};
static LOCKED: AtomicI32 = AtomicI32::new(0); static LOCKED: AtomicI32 = AtomicI32::new(0);
@ -54,7 +57,7 @@ mod lock {
pub fn lock() -> DropLock { pub fn lock() -> DropLock {
loop { loop {
if LOCKED.swap(1, SeqCst) == 0 { if LOCKED.swap(1, Acquire) == 0 {
return DropLock; return DropLock;
} }
crate::os::xous::ffi::do_yield(); crate::os::xous::ffi::do_yield();
@ -63,7 +66,7 @@ mod lock {
impl Drop for DropLock { impl Drop for DropLock {
fn drop(&mut self) { fn drop(&mut self) {
let r = LOCKED.swap(0, SeqCst); let r = LOCKED.swap(0, Release);
debug_assert_eq!(r, 1); debug_assert_eq!(r, 1);
} }
} }

View File

@ -406,7 +406,7 @@ impl TcpStream {
} }
pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
self.nonblocking.store(nonblocking, Ordering::SeqCst); self.nonblocking.store(nonblocking, Ordering::Relaxed);
Ok(()) Ok(())
} }
} }

View File

@ -2,7 +2,7 @@ use crate::mem::ManuallyDrop;
use crate::ptr; use crate::ptr;
use crate::sync::atomic::AtomicPtr; use crate::sync::atomic::AtomicPtr;
use crate::sync::atomic::AtomicUsize; use crate::sync::atomic::AtomicUsize;
use crate::sync::atomic::Ordering::SeqCst; use crate::sync::atomic::Ordering::{Acquire, Relaxed, Release};
use core::arch::asm; use core::arch::asm;
use crate::os::xous::ffi::{map_memory, unmap_memory, MemoryFlags}; use crate::os::xous::ffi::{map_memory, unmap_memory, MemoryFlags};
@ -92,7 +92,7 @@ fn tls_table() -> &'static mut [*mut u8] {
pub unsafe fn create(dtor: Option<Dtor>) -> Key { pub unsafe fn create(dtor: Option<Dtor>) -> Key {
// Allocate a new TLS key. These keys are shared among all threads. // Allocate a new TLS key. These keys are shared among all threads.
#[allow(unused_unsafe)] #[allow(unused_unsafe)]
let key = unsafe { TLS_KEY_INDEX.fetch_add(1, SeqCst) }; let key = unsafe { TLS_KEY_INDEX.fetch_add(1, Relaxed) };
if let Some(f) = dtor { if let Some(f) = dtor {
unsafe { register_dtor(key, f) }; unsafe { register_dtor(key, f) };
} }
@ -154,11 +154,11 @@ unsafe fn register_dtor(key: Key, dtor: Dtor) {
let mut node = ManuallyDrop::new(Box::new(Node { key, dtor, next: ptr::null_mut() })); let mut node = ManuallyDrop::new(Box::new(Node { key, dtor, next: ptr::null_mut() }));
#[allow(unused_unsafe)] #[allow(unused_unsafe)]
let mut head = unsafe { DTORS.load(SeqCst) }; let mut head = unsafe { DTORS.load(Acquire) };
loop { loop {
node.next = head; node.next = head;
#[allow(unused_unsafe)] #[allow(unused_unsafe)]
match unsafe { DTORS.compare_exchange(head, &mut **node, SeqCst, SeqCst) } { match unsafe { DTORS.compare_exchange(head, &mut **node, Release, Acquire) } {
Ok(_) => return, // nothing to drop, we successfully added the node to the list Ok(_) => return, // nothing to drop, we successfully added the node to the list
Err(cur) => head = cur, Err(cur) => head = cur,
} }
@ -199,7 +199,7 @@ unsafe fn run_dtors() {
} }
any_run = false; any_run = false;
#[allow(unused_unsafe)] #[allow(unused_unsafe)]
let mut cur = unsafe { DTORS.load(SeqCst) }; let mut cur = unsafe { DTORS.load(Acquire) };
while !cur.is_null() { while !cur.is_null() {
let ptr = unsafe { get((*cur).key) }; let ptr = unsafe { get((*cur).key) };

View File

@ -1,6 +1,9 @@
use crate::os::xous::ffi::{blocking_scalar, do_yield}; use crate::os::xous::ffi::{blocking_scalar, do_yield};
use crate::os::xous::services::{ticktimer_server, TicktimerScalar}; use crate::os::xous::services::{ticktimer_server, TicktimerScalar};
use crate::sync::atomic::{AtomicBool, AtomicUsize, Ordering::Relaxed, Ordering::SeqCst}; use crate::sync::atomic::{
AtomicBool, AtomicUsize,
Ordering::{Acquire, Relaxed, Release},
};
pub struct Mutex { pub struct Mutex {
/// The "locked" value indicates how many threads are waiting on this /// The "locked" value indicates how many threads are waiting on this
@ -68,7 +71,7 @@ impl Mutex {
#[inline] #[inline]
pub unsafe fn unlock(&self) { pub unsafe fn unlock(&self) {
let prev = self.locked.fetch_sub(1, SeqCst); let prev = self.locked.fetch_sub(1, Release);
// If the previous value was 1, then this was a "fast path" unlock, so no // If the previous value was 1, then this was a "fast path" unlock, so no
// need to involve the Ticktimer server // need to involve the Ticktimer server
@ -89,12 +92,12 @@ impl Mutex {
#[inline] #[inline]
pub unsafe fn try_lock(&self) -> bool { pub unsafe fn try_lock(&self) -> bool {
self.locked.compare_exchange(0, 1, SeqCst, SeqCst).is_ok() self.locked.compare_exchange(0, 1, Acquire, Relaxed).is_ok()
} }
#[inline] #[inline]
pub unsafe fn try_lock_or_poison(&self) -> bool { pub unsafe fn try_lock_or_poison(&self) -> bool {
self.locked.fetch_add(1, SeqCst) == 0 self.locked.fetch_add(1, Acquire) == 0
} }
} }

View File

@ -128,7 +128,7 @@ impl StaticKey {
#[inline] #[inline]
unsafe fn key(&self) -> imp::Key { unsafe fn key(&self) -> imp::Key {
match self.key.load(Ordering::Relaxed) { match self.key.load(Ordering::Acquire) {
KEY_SENTVAL => self.lazy_init() as imp::Key, KEY_SENTVAL => self.lazy_init() as imp::Key,
n => n as imp::Key, n => n as imp::Key,
} }
@ -156,8 +156,8 @@ impl StaticKey {
match self.key.compare_exchange( match self.key.compare_exchange(
KEY_SENTVAL, KEY_SENTVAL,
key as usize, key as usize,
Ordering::SeqCst, Ordering::Release,
Ordering::SeqCst, Ordering::Acquire,
) { ) {
// The CAS succeeded, so we've created the actual key // The CAS succeeded, so we've created the actual key
Ok(_) => key as usize, Ok(_) => key as usize,

View File

@ -255,6 +255,9 @@ fn join_orders_after_tls_destructors() {
// observe the channel in the `THREAD1_WAITING` state. If this does occur, // observe the channel in the `THREAD1_WAITING` state. If this does occur,
// we switch to the “poison” state `THREAD2_JOINED` and panic all around. // we switch to the “poison” state `THREAD2_JOINED` and panic all around.
// (This is equivalent to “sending” from an alternate producer thread.) // (This is equivalent to “sending” from an alternate producer thread.)
//
// Relaxed memory ordering is fine because and spawn()/join() already provide all the
// synchronization we need here.
const FRESH: u8 = 0; const FRESH: u8 = 0;
const THREAD2_LAUNCHED: u8 = 1; const THREAD2_LAUNCHED: u8 = 1;
const THREAD1_WAITING: u8 = 2; const THREAD1_WAITING: u8 = 2;
@ -263,7 +266,7 @@ fn join_orders_after_tls_destructors() {
static SYNC_STATE: AtomicU8 = AtomicU8::new(FRESH); static SYNC_STATE: AtomicU8 = AtomicU8::new(FRESH);
for _ in 0..10 { for _ in 0..10 {
SYNC_STATE.store(FRESH, Ordering::SeqCst); SYNC_STATE.store(FRESH, Ordering::Relaxed);
let jh = thread::Builder::new() let jh = thread::Builder::new()
.name("thread1".into()) .name("thread1".into())
@ -272,7 +275,7 @@ fn join_orders_after_tls_destructors() {
impl Drop for TlDrop { impl Drop for TlDrop {
fn drop(&mut self) { fn drop(&mut self) {
let mut sync_state = SYNC_STATE.swap(THREAD1_WAITING, Ordering::SeqCst); let mut sync_state = SYNC_STATE.swap(THREAD1_WAITING, Ordering::Relaxed);
loop { loop {
match sync_state { match sync_state {
THREAD2_LAUNCHED | THREAD1_WAITING => thread::yield_now(), THREAD2_LAUNCHED | THREAD1_WAITING => thread::yield_now(),
@ -282,7 +285,7 @@ fn join_orders_after_tls_destructors() {
), ),
v => unreachable!("sync state: {}", v), v => unreachable!("sync state: {}", v),
} }
sync_state = SYNC_STATE.load(Ordering::SeqCst); sync_state = SYNC_STATE.load(Ordering::Relaxed);
} }
} }
} }
@ -294,7 +297,7 @@ fn join_orders_after_tls_destructors() {
TL_DROP.with(|_| {}); TL_DROP.with(|_| {});
loop { loop {
match SYNC_STATE.load(Ordering::SeqCst) { match SYNC_STATE.load(Ordering::Relaxed) {
FRESH => thread::yield_now(), FRESH => thread::yield_now(),
THREAD2_LAUNCHED => break, THREAD2_LAUNCHED => break,
v => unreachable!("sync state: {}", v), v => unreachable!("sync state: {}", v),
@ -306,9 +309,9 @@ fn join_orders_after_tls_destructors() {
let jh2 = thread::Builder::new() let jh2 = thread::Builder::new()
.name("thread2".into()) .name("thread2".into())
.spawn(move || { .spawn(move || {
assert_eq!(SYNC_STATE.swap(THREAD2_LAUNCHED, Ordering::SeqCst), FRESH); assert_eq!(SYNC_STATE.swap(THREAD2_LAUNCHED, Ordering::Relaxed), FRESH);
jh.join().unwrap(); jh.join().unwrap();
match SYNC_STATE.swap(THREAD2_JOINED, Ordering::SeqCst) { match SYNC_STATE.swap(THREAD2_JOINED, Ordering::Relaxed) {
MAIN_THREAD_RENDEZVOUS => return, MAIN_THREAD_RENDEZVOUS => return,
THREAD2_LAUNCHED | THREAD1_WAITING => { THREAD2_LAUNCHED | THREAD1_WAITING => {
panic!("Thread 2 running after thread 1 join before main thread rendezvous") panic!("Thread 2 running after thread 1 join before main thread rendezvous")
@ -322,8 +325,8 @@ fn join_orders_after_tls_destructors() {
match SYNC_STATE.compare_exchange( match SYNC_STATE.compare_exchange(
THREAD1_WAITING, THREAD1_WAITING,
MAIN_THREAD_RENDEZVOUS, MAIN_THREAD_RENDEZVOUS,
Ordering::SeqCst, Ordering::Relaxed,
Ordering::SeqCst, Ordering::Relaxed,
) { ) {
Ok(_) => break, Ok(_) => break,
Err(FRESH) => thread::yield_now(), Err(FRESH) => thread::yield_now(),