Rollup merge of #96935 - thomcc:atomicptr-strict-prov, r=dtolnay
Allow arithmetic and certain bitwise ops on AtomicPtr This is mainly to support migrating from `AtomicUsize`, for the strict provenance experiment. This is a pretty dubious set of APIs, but it should be sufficient to allow code that's using `AtomicUsize` to manipulate a tagged pointer atomically. It's under a new feature gate, `#![feature(strict_provenance_atomic_ptr)]`, but I'm not sure if it needs its own tracking issue. I'm happy to make one, but it's not clear that it's needed. I'm unsure if it needs changes in the various non-LLVM backends. Because we just cast things to integers anyway (and were already doing so), I doubt it. API change proposal: https://github.com/rust-lang/libs-team/issues/60 Fixes #95492
This commit is contained in:
commit
4755173cf6
@ -513,9 +513,7 @@ pub fn codegen_intrinsic_call(
|
||||
};
|
||||
|
||||
let ty = substs.type_at(0);
|
||||
if int_type_width_signed(ty, bx.tcx()).is_some()
|
||||
|| (ty.is_unsafe_ptr() && op == "xchg")
|
||||
{
|
||||
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
|
||||
let mut ptr = args[0].immediate();
|
||||
let mut val = args[1].immediate();
|
||||
if ty.is_unsafe_ptr() {
|
||||
|
@ -1451,6 +1451,347 @@ pub fn fetch_update<F>(
|
||||
}
|
||||
Err(prev)
|
||||
}
|
||||
|
||||
/// Offsets the pointer's address by adding `val` (in units of `T`),
|
||||
/// returning the previous pointer.
|
||||
///
|
||||
/// This is equivalent to using [`wrapping_add`] to atomically perform the
|
||||
/// equivalent of `ptr = ptr.wrapping_add(val);`.
|
||||
///
|
||||
/// This method operates in units of `T`, which means that it cannot be used
|
||||
/// to offset the pointer by an amount which is not a multiple of
|
||||
/// `size_of::<T>()`. This can sometimes be inconvenient, as you may want to
|
||||
/// work with a deliberately misaligned pointer. In such cases, you may use
|
||||
/// the [`fetch_byte_add`](Self::fetch_byte_add) method instead.
|
||||
///
|
||||
/// `fetch_ptr_add` takes an [`Ordering`] argument which describes the
|
||||
/// memory ordering of this operation. All ordering modes are possible. Note
|
||||
/// that using [`Acquire`] makes the store part of this operation
|
||||
/// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
|
||||
///
|
||||
/// **Note**: This method is only available on platforms that support atomic
|
||||
/// operations on [`AtomicPtr`].
|
||||
///
|
||||
/// [`wrapping_add`]: pointer::wrapping_add
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
|
||||
/// use core::sync::atomic::{AtomicPtr, Ordering};
|
||||
///
|
||||
/// let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
|
||||
/// assert_eq!(atom.fetch_ptr_add(1, Ordering::Relaxed).addr(), 0);
|
||||
/// // Note: units of `size_of::<i64>()`.
|
||||
/// assert_eq!(atom.load(Ordering::Relaxed).addr(), 8);
|
||||
/// ```
|
||||
#[inline]
|
||||
#[cfg(target_has_atomic = "ptr")]
|
||||
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")]
|
||||
pub fn fetch_ptr_add(&self, val: usize, order: Ordering) -> *mut T {
|
||||
self.fetch_byte_add(val.wrapping_mul(core::mem::size_of::<T>()), order)
|
||||
}
|
||||
|
||||
/// Offsets the pointer's address by subtracting `val` (in units of `T`),
|
||||
/// returning the previous pointer.
|
||||
///
|
||||
/// This is equivalent to using [`wrapping_sub`] to atomically perform the
|
||||
/// equivalent of `ptr = ptr.wrapping_sub(val);`.
|
||||
///
|
||||
/// This method operates in units of `T`, which means that it cannot be used
|
||||
/// to offset the pointer by an amount which is not a multiple of
|
||||
/// `size_of::<T>()`. This can sometimes be inconvenient, as you may want to
|
||||
/// work with a deliberately misaligned pointer. In such cases, you may use
|
||||
/// the [`fetch_byte_sub`](Self::fetch_byte_sub) method instead.
|
||||
///
|
||||
/// `fetch_ptr_sub` takes an [`Ordering`] argument which describes the memory
|
||||
/// ordering of this operation. All ordering modes are possible. Note that
|
||||
/// using [`Acquire`] makes the store part of this operation [`Relaxed`],
|
||||
/// and using [`Release`] makes the load part [`Relaxed`].
|
||||
///
|
||||
/// **Note**: This method is only available on platforms that support atomic
|
||||
/// operations on [`AtomicPtr`].
|
||||
///
|
||||
/// [`wrapping_sub`]: pointer::wrapping_sub
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(strict_provenance_atomic_ptr)]
|
||||
/// use core::sync::atomic::{AtomicPtr, Ordering};
|
||||
///
|
||||
/// let array = [1i32, 2i32];
|
||||
/// let atom = AtomicPtr::new(array.as_ptr().wrapping_add(1) as *mut _);
|
||||
///
|
||||
/// assert!(core::ptr::eq(
|
||||
/// atom.fetch_ptr_sub(1, Ordering::Relaxed),
|
||||
/// &array[1],
|
||||
/// ));
|
||||
/// assert!(core::ptr::eq(atom.load(Ordering::Relaxed), &array[0]));
|
||||
/// ```
|
||||
#[inline]
|
||||
#[cfg(target_has_atomic = "ptr")]
|
||||
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")]
|
||||
pub fn fetch_ptr_sub(&self, val: usize, order: Ordering) -> *mut T {
|
||||
self.fetch_byte_sub(val.wrapping_mul(core::mem::size_of::<T>()), order)
|
||||
}
|
||||
|
||||
/// Offsets the pointer's address by adding `val` *bytes*, returning the
|
||||
/// previous pointer.
|
||||
///
|
||||
/// This is equivalent to using [`wrapping_add`] and [`cast`] to atomically
|
||||
/// perform `ptr = ptr.cast::<u8>().wrapping_add(val).cast::<T>()`.
|
||||
///
|
||||
/// `fetch_byte_add` takes an [`Ordering`] argument which describes the
|
||||
/// memory ordering of this operation. All ordering modes are possible. Note
|
||||
/// that using [`Acquire`] makes the store part of this operation
|
||||
/// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
|
||||
///
|
||||
/// **Note**: This method is only available on platforms that support atomic
|
||||
/// operations on [`AtomicPtr`].
|
||||
///
|
||||
/// [`wrapping_add`]: pointer::wrapping_add
|
||||
/// [`cast`]: pointer::cast
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
|
||||
/// use core::sync::atomic::{AtomicPtr, Ordering};
|
||||
///
|
||||
/// let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
|
||||
/// assert_eq!(atom.fetch_byte_add(1, Ordering::Relaxed).addr(), 0);
|
||||
/// // Note: in units of bytes, not `size_of::<i64>()`.
|
||||
/// assert_eq!(atom.load(Ordering::Relaxed).addr(), 1);
|
||||
/// ```
|
||||
#[inline]
|
||||
#[cfg(target_has_atomic = "ptr")]
|
||||
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")]
|
||||
pub fn fetch_byte_add(&self, val: usize, order: Ordering) -> *mut T {
|
||||
#[cfg(not(bootstrap))]
|
||||
// SAFETY: data races are prevented by atomic intrinsics.
|
||||
unsafe {
|
||||
atomic_add(self.p.get(), core::ptr::invalid_mut(val), order).cast()
|
||||
}
|
||||
#[cfg(bootstrap)]
|
||||
// SAFETY: data races are prevented by atomic intrinsics.
|
||||
unsafe {
|
||||
atomic_add(self.p.get().cast::<usize>(), val, order) as *mut T
|
||||
}
|
||||
}
|
||||
|
||||
/// Offsets the pointer's address by subtracting `val` *bytes*, returning the
|
||||
/// previous pointer.
|
||||
///
|
||||
/// This is equivalent to using [`wrapping_sub`] and [`cast`] to atomically
|
||||
/// perform `ptr = ptr.cast::<u8>().wrapping_sub(val).cast::<T>()`.
|
||||
///
|
||||
/// `fetch_byte_sub` takes an [`Ordering`] argument which describes the
|
||||
/// memory ordering of this operation. All ordering modes are possible. Note
|
||||
/// that using [`Acquire`] makes the store part of this operation
|
||||
/// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
|
||||
///
|
||||
/// **Note**: This method is only available on platforms that support atomic
|
||||
/// operations on [`AtomicPtr`].
|
||||
///
|
||||
/// [`wrapping_sub`]: pointer::wrapping_sub
|
||||
/// [`cast`]: pointer::cast
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
|
||||
/// use core::sync::atomic::{AtomicPtr, Ordering};
|
||||
///
|
||||
/// let atom = AtomicPtr::<i64>::new(core::ptr::invalid_mut(1));
|
||||
/// assert_eq!(atom.fetch_byte_sub(1, Ordering::Relaxed).addr(), 1);
|
||||
/// assert_eq!(atom.load(Ordering::Relaxed).addr(), 0);
|
||||
/// ```
|
||||
#[inline]
|
||||
#[cfg(target_has_atomic = "ptr")]
|
||||
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")]
|
||||
pub fn fetch_byte_sub(&self, val: usize, order: Ordering) -> *mut T {
|
||||
#[cfg(not(bootstrap))]
|
||||
// SAFETY: data races are prevented by atomic intrinsics.
|
||||
unsafe {
|
||||
atomic_sub(self.p.get(), core::ptr::invalid_mut(val), order).cast()
|
||||
}
|
||||
#[cfg(bootstrap)]
|
||||
// SAFETY: data races are prevented by atomic intrinsics.
|
||||
unsafe {
|
||||
atomic_sub(self.p.get().cast::<usize>(), val, order) as *mut T
|
||||
}
|
||||
}
|
||||
|
||||
/// Performs a bitwise "or" operation on the address of the current pointer,
|
||||
/// and the argument `val`, and stores a pointer with provenance of the
|
||||
/// current pointer and the resulting address.
|
||||
///
|
||||
/// This is equivalent equivalent to using [`map_addr`] to atomically
|
||||
/// perform `ptr = ptr.map_addr(|a| a | val)`. This can be used in tagged
|
||||
/// pointer schemes to atomically set tag bits.
|
||||
///
|
||||
/// **Caveat**: This operation returns the previous value. To compute the
|
||||
/// stored value without losing provenance, you may use [`map_addr`]. For
|
||||
/// example: `a.fetch_or(val).map_addr(|a| a | val)`.
|
||||
///
|
||||
/// `fetch_or` takes an [`Ordering`] argument which describes the memory
|
||||
/// ordering of this operation. All ordering modes are possible. Note that
|
||||
/// using [`Acquire`] makes the store part of this operation [`Relaxed`],
|
||||
/// and using [`Release`] makes the load part [`Relaxed`].
|
||||
///
|
||||
/// **Note**: This method is only available on platforms that support atomic
|
||||
/// operations on [`AtomicPtr`].
|
||||
///
|
||||
/// This API and its claimed semantics are part of the Strict Provenance
|
||||
/// experiment, see the [module documentation for `ptr`][crate::ptr] for
|
||||
/// details.
|
||||
///
|
||||
/// [`map_addr`]: pointer::map_addr
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
|
||||
/// use core::sync::atomic::{AtomicPtr, Ordering};
|
||||
///
|
||||
/// let pointer = &mut 3i64 as *mut i64;
|
||||
///
|
||||
/// let atom = AtomicPtr::<i64>::new(pointer);
|
||||
/// // Tag the bottom bit of the pointer.
|
||||
/// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).addr() & 1, 0);
|
||||
/// // Extract and untag.
|
||||
/// let tagged = atom.load(Ordering::Relaxed);
|
||||
/// assert_eq!(tagged.addr() & 1, 1);
|
||||
/// assert_eq!(tagged.map_addr(|p| p & !1), pointer);
|
||||
/// ```
|
||||
#[inline]
|
||||
#[cfg(target_has_atomic = "ptr")]
|
||||
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")]
|
||||
pub fn fetch_or(&self, val: usize, order: Ordering) -> *mut T {
|
||||
#[cfg(not(bootstrap))]
|
||||
// SAFETY: data races are prevented by atomic intrinsics.
|
||||
unsafe {
|
||||
atomic_or(self.p.get(), core::ptr::invalid_mut(val), order).cast()
|
||||
}
|
||||
#[cfg(bootstrap)]
|
||||
// SAFETY: data races are prevented by atomic intrinsics.
|
||||
unsafe {
|
||||
atomic_or(self.p.get().cast::<usize>(), val, order) as *mut T
|
||||
}
|
||||
}
|
||||
|
||||
/// Performs a bitwise "and" operation on the address of the current
|
||||
/// pointer, and the argument `val`, and stores a pointer with provenance of
|
||||
/// the current pointer and the resulting address.
|
||||
///
|
||||
/// This is equivalent equivalent to using [`map_addr`] to atomically
|
||||
/// perform `ptr = ptr.map_addr(|a| a & val)`. This can be used in tagged
|
||||
/// pointer schemes to atomically unset tag bits.
|
||||
///
|
||||
/// **Caveat**: This operation returns the previous value. To compute the
|
||||
/// stored value without losing provenance, you may use [`map_addr`]. For
|
||||
/// example: `a.fetch_and(val).map_addr(|a| a & val)`.
|
||||
///
|
||||
/// `fetch_and` takes an [`Ordering`] argument which describes the memory
|
||||
/// ordering of this operation. All ordering modes are possible. Note that
|
||||
/// using [`Acquire`] makes the store part of this operation [`Relaxed`],
|
||||
/// and using [`Release`] makes the load part [`Relaxed`].
|
||||
///
|
||||
/// **Note**: This method is only available on platforms that support atomic
|
||||
/// operations on [`AtomicPtr`].
|
||||
///
|
||||
/// This API and its claimed semantics are part of the Strict Provenance
|
||||
/// experiment, see the [module documentation for `ptr`][crate::ptr] for
|
||||
/// details.
|
||||
///
|
||||
/// [`map_addr`]: pointer::map_addr
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
|
||||
/// use core::sync::atomic::{AtomicPtr, Ordering};
|
||||
///
|
||||
/// let pointer = &mut 3i64 as *mut i64;
|
||||
/// // A tagged pointer
|
||||
/// let atom = AtomicPtr::<i64>::new(pointer.map_addr(|a| a | 1));
|
||||
/// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).addr() & 1, 1);
|
||||
/// // Untag, and extract the previously tagged pointer.
|
||||
/// let untagged = atom.fetch_and(!1, Ordering::Relaxed)
|
||||
/// .map_addr(|a| a & !1);
|
||||
/// assert_eq!(untagged, pointer);
|
||||
/// ```
|
||||
#[inline]
|
||||
#[cfg(target_has_atomic = "ptr")]
|
||||
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")]
|
||||
pub fn fetch_and(&self, val: usize, order: Ordering) -> *mut T {
|
||||
#[cfg(not(bootstrap))]
|
||||
// SAFETY: data races are prevented by atomic intrinsics.
|
||||
unsafe {
|
||||
atomic_and(self.p.get(), core::ptr::invalid_mut(val), order).cast()
|
||||
}
|
||||
#[cfg(bootstrap)]
|
||||
// SAFETY: data races are prevented by atomic intrinsics.
|
||||
unsafe {
|
||||
atomic_and(self.p.get().cast::<usize>(), val, order) as *mut T
|
||||
}
|
||||
}
|
||||
|
||||
/// Performs a bitwise "xor" operation on the address of the current
|
||||
/// pointer, and the argument `val`, and stores a pointer with provenance of
|
||||
/// the current pointer and the resulting address.
|
||||
///
|
||||
/// This is equivalent equivalent to using [`map_addr`] to atomically
|
||||
/// perform `ptr = ptr.map_addr(|a| a ^ val)`. This can be used in tagged
|
||||
/// pointer schemes to atomically toggle tag bits.
|
||||
///
|
||||
/// **Caveat**: This operation returns the previous value. To compute the
|
||||
/// stored value without losing provenance, you may use [`map_addr`]. For
|
||||
/// example: `a.fetch_xor(val).map_addr(|a| a ^ val)`.
|
||||
///
|
||||
/// `fetch_xor` takes an [`Ordering`] argument which describes the memory
|
||||
/// ordering of this operation. All ordering modes are possible. Note that
|
||||
/// using [`Acquire`] makes the store part of this operation [`Relaxed`],
|
||||
/// and using [`Release`] makes the load part [`Relaxed`].
|
||||
///
|
||||
/// **Note**: This method is only available on platforms that support atomic
|
||||
/// operations on [`AtomicPtr`].
|
||||
///
|
||||
/// This API and its claimed semantics are part of the Strict Provenance
|
||||
/// experiment, see the [module documentation for `ptr`][crate::ptr] for
|
||||
/// details.
|
||||
///
|
||||
/// [`map_addr`]: pointer::map_addr
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(strict_provenance_atomic_ptr, strict_provenance)]
|
||||
/// use core::sync::atomic::{AtomicPtr, Ordering};
|
||||
///
|
||||
/// let pointer = &mut 3i64 as *mut i64;
|
||||
/// let atom = AtomicPtr::<i64>::new(pointer);
|
||||
///
|
||||
/// // Toggle a tag bit on the pointer.
|
||||
/// atom.fetch_xor(1, Ordering::Relaxed);
|
||||
/// assert_eq!(atom.load(Ordering::Relaxed).addr() & 1, 1);
|
||||
/// ```
|
||||
#[inline]
|
||||
#[cfg(target_has_atomic = "ptr")]
|
||||
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")]
|
||||
pub fn fetch_xor(&self, val: usize, order: Ordering) -> *mut T {
|
||||
#[cfg(not(bootstrap))]
|
||||
// SAFETY: data races are prevented by atomic intrinsics.
|
||||
unsafe {
|
||||
atomic_xor(self.p.get(), core::ptr::invalid_mut(val), order).cast()
|
||||
}
|
||||
#[cfg(bootstrap)]
|
||||
// SAFETY: data races are prevented by atomic intrinsics.
|
||||
unsafe {
|
||||
atomic_xor(self.p.get().cast::<usize>(), val, order) as *mut T
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_has_atomic_load_store = "8")]
|
||||
|
@ -127,6 +127,91 @@ fn int_max() {
|
||||
assert_eq!(x.load(SeqCst), 0xf731);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins
|
||||
fn ptr_add_null() {
|
||||
let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
|
||||
assert_eq!(atom.fetch_ptr_add(1, SeqCst).addr(), 0);
|
||||
assert_eq!(atom.load(SeqCst).addr(), 8);
|
||||
|
||||
assert_eq!(atom.fetch_byte_add(1, SeqCst).addr(), 8);
|
||||
assert_eq!(atom.load(SeqCst).addr(), 9);
|
||||
|
||||
assert_eq!(atom.fetch_ptr_sub(1, SeqCst).addr(), 9);
|
||||
assert_eq!(atom.load(SeqCst).addr(), 1);
|
||||
|
||||
assert_eq!(atom.fetch_byte_sub(1, SeqCst).addr(), 1);
|
||||
assert_eq!(atom.load(SeqCst).addr(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins
|
||||
fn ptr_add_data() {
|
||||
let num = 0i64;
|
||||
let n = &num as *const i64 as *mut _;
|
||||
let atom = AtomicPtr::<i64>::new(n);
|
||||
assert_eq!(atom.fetch_ptr_add(1, SeqCst), n);
|
||||
assert_eq!(atom.load(SeqCst), n.wrapping_add(1));
|
||||
|
||||
assert_eq!(atom.fetch_ptr_sub(1, SeqCst), n.wrapping_add(1));
|
||||
assert_eq!(atom.load(SeqCst), n);
|
||||
let bytes_from_n = |b| n.cast::<u8>().wrapping_add(b).cast::<i64>();
|
||||
|
||||
assert_eq!(atom.fetch_byte_add(1, SeqCst), n);
|
||||
assert_eq!(atom.load(SeqCst), bytes_from_n(1));
|
||||
|
||||
assert_eq!(atom.fetch_byte_add(5, SeqCst), bytes_from_n(1));
|
||||
assert_eq!(atom.load(SeqCst), bytes_from_n(6));
|
||||
|
||||
assert_eq!(atom.fetch_byte_sub(1, SeqCst), bytes_from_n(6));
|
||||
assert_eq!(atom.load(SeqCst), bytes_from_n(5));
|
||||
|
||||
assert_eq!(atom.fetch_byte_sub(5, SeqCst), bytes_from_n(5));
|
||||
assert_eq!(atom.load(SeqCst), n);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins
|
||||
fn ptr_bitops() {
|
||||
let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
|
||||
assert_eq!(atom.fetch_or(0b0111, SeqCst).addr(), 0);
|
||||
assert_eq!(atom.load(SeqCst).addr(), 0b0111);
|
||||
|
||||
assert_eq!(atom.fetch_and(0b1101, SeqCst).addr(), 0b0111);
|
||||
assert_eq!(atom.load(SeqCst).addr(), 0b0101);
|
||||
|
||||
assert_eq!(atom.fetch_xor(0b1111, SeqCst).addr(), 0b0101);
|
||||
assert_eq!(atom.load(SeqCst).addr(), 0b1010);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins
|
||||
fn ptr_bitops_tagging() {
|
||||
#[repr(align(16))]
|
||||
struct Tagme(u128);
|
||||
|
||||
let tagme = Tagme(1000);
|
||||
let ptr = &tagme as *const Tagme as *mut Tagme;
|
||||
let atom: AtomicPtr<Tagme> = AtomicPtr::new(ptr);
|
||||
|
||||
const MASK_TAG: usize = 0b1111;
|
||||
const MASK_PTR: usize = !MASK_TAG;
|
||||
|
||||
assert_eq!(ptr.addr() & MASK_TAG, 0);
|
||||
|
||||
assert_eq!(atom.fetch_or(0b0111, SeqCst), ptr);
|
||||
assert_eq!(atom.load(SeqCst), ptr.map_addr(|a| a | 0b111));
|
||||
|
||||
assert_eq!(atom.fetch_and(MASK_PTR | 0b0010, SeqCst), ptr.map_addr(|a| a | 0b111));
|
||||
assert_eq!(atom.load(SeqCst), ptr.map_addr(|a| a | 0b0010));
|
||||
|
||||
assert_eq!(atom.fetch_xor(0b1011, SeqCst), ptr.map_addr(|a| a | 0b0010));
|
||||
assert_eq!(atom.load(SeqCst), ptr.map_addr(|a| a | 0b1001));
|
||||
|
||||
assert_eq!(atom.fetch_and(MASK_PTR, SeqCst), ptr.map_addr(|a| a | 0b1001));
|
||||
assert_eq!(atom.load(SeqCst), ptr);
|
||||
}
|
||||
|
||||
static S_FALSE: AtomicBool = AtomicBool::new(false);
|
||||
static S_TRUE: AtomicBool = AtomicBool::new(true);
|
||||
static S_INT: AtomicIsize = AtomicIsize::new(0);
|
||||
|
@ -90,6 +90,7 @@
|
||||
#![feature(slice_group_by)]
|
||||
#![feature(split_array)]
|
||||
#![feature(strict_provenance)]
|
||||
#![feature(strict_provenance_atomic_ptr)]
|
||||
#![feature(trusted_random_access)]
|
||||
#![feature(unsize)]
|
||||
#![feature(unzip_option)]
|
||||
|
Loading…
Reference in New Issue
Block a user