Use &raw
in the standard library
Since the stabilization in #127679 has reached stage0, 1.82-beta, we can start using `&raw` freely, and even the soft-deprecated `ptr::addr_of!` and `ptr::addr_of_mut!` can stop allowing the unstable feature. I intentionally did not change any documentation or tests, but the rest of those macro uses are all now using `&raw const` or `&raw mut` in the standard library.
This commit is contained in:
parent
0399709cdc
commit
f4d9d1a0ea
@ -199,7 +199,7 @@
|
||||
DerefPure, DispatchFromDyn, Receiver,
|
||||
};
|
||||
use core::pin::{Pin, PinCoerceUnsized};
|
||||
use core::ptr::{self, NonNull, Unique, addr_of_mut};
|
||||
use core::ptr::{self, NonNull, Unique};
|
||||
use core::task::{Context, Poll};
|
||||
use core::{borrow, fmt, slice};
|
||||
|
||||
@ -1277,7 +1277,7 @@ impl<T: ?Sized, A: Allocator> Box<T, A> {
|
||||
#[inline]
|
||||
pub fn into_raw(b: Self) -> *mut T {
|
||||
// Make sure Miri realizes that we transition from a noalias pointer to a raw pointer here.
|
||||
unsafe { addr_of_mut!(*&mut *Self::into_raw_with_allocator(b).0) }
|
||||
unsafe { &raw mut *&mut *Self::into_raw_with_allocator(b).0 }
|
||||
}
|
||||
|
||||
/// Consumes the `Box`, returning a wrapped `NonNull` pointer.
|
||||
@ -1396,7 +1396,7 @@ pub fn into_raw_with_allocator(b: Self) -> (*mut T, A) {
|
||||
// want *no* aliasing requirements here!
|
||||
// In case `A` *is* `Global`, this does not quite have the right behavior; `into_raw`
|
||||
// works around that.
|
||||
let ptr = addr_of_mut!(**b);
|
||||
let ptr = &raw mut **b;
|
||||
let alloc = unsafe { ptr::read(&b.1) };
|
||||
(ptr, alloc)
|
||||
}
|
||||
@ -1506,7 +1506,7 @@ pub fn into_unique(b: Self) -> (Unique<T>, A) {
|
||||
pub fn as_mut_ptr(b: &mut Self) -> *mut T {
|
||||
// This is a primitive deref, not going through `DerefMut`, and therefore not materializing
|
||||
// any references.
|
||||
ptr::addr_of_mut!(**b)
|
||||
&raw mut **b
|
||||
}
|
||||
|
||||
/// Returns a raw pointer to the `Box`'s contents.
|
||||
@ -1554,7 +1554,7 @@ pub fn as_mut_ptr(b: &mut Self) -> *mut T {
|
||||
pub fn as_ptr(b: &Self) -> *const T {
|
||||
// This is a primitive deref, not going through `DerefMut`, and therefore not materializing
|
||||
// any references.
|
||||
ptr::addr_of!(**b)
|
||||
&raw const **b
|
||||
}
|
||||
|
||||
/// Returns a reference to the underlying allocator.
|
||||
|
@ -186,7 +186,7 @@ fn data(&self) -> *mut u8 {
|
||||
|
||||
fn with_header(&self) -> &WithHeader<<T as Pointee>::Metadata> {
|
||||
// SAFETY: both types are transparent to `NonNull<u8>`
|
||||
unsafe { &*(core::ptr::addr_of!(self.ptr) as *const WithHeader<_>) }
|
||||
unsafe { &*((&raw const self.ptr) as *const WithHeader<_>) }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -72,8 +72,8 @@ unsafe fn init(this: *mut Self) {
|
||||
// be both slightly faster and easier to track in Valgrind.
|
||||
unsafe {
|
||||
// parent_idx, keys, and vals are all MaybeUninit
|
||||
ptr::addr_of_mut!((*this).parent).write(None);
|
||||
ptr::addr_of_mut!((*this).len).write(0);
|
||||
(&raw mut (*this).parent).write(None);
|
||||
(&raw mut (*this).len).write(0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -114,7 +114,7 @@ unsafe fn new<A: Allocator + Clone>(alloc: A) -> Box<Self, A> {
|
||||
unsafe {
|
||||
let mut node = Box::<Self, _>::new_uninit_in(alloc);
|
||||
// We only need to initialize the data; the edges are MaybeUninit.
|
||||
LeafNode::init(ptr::addr_of_mut!((*node.as_mut_ptr()).data));
|
||||
LeafNode::init(&raw mut (*node.as_mut_ptr()).data);
|
||||
node.assume_init()
|
||||
}
|
||||
}
|
||||
@ -525,8 +525,8 @@ unsafe fn into_key_val_mut_at(mut self, idx: usize) -> (&'a K, &'a mut V) {
|
||||
// to avoid aliasing with outstanding references to other elements,
|
||||
// in particular, those returned to the caller in earlier iterations.
|
||||
let leaf = Self::as_leaf_ptr(&mut self);
|
||||
let keys = unsafe { ptr::addr_of!((*leaf).keys) };
|
||||
let vals = unsafe { ptr::addr_of_mut!((*leaf).vals) };
|
||||
let keys = unsafe { &raw const (*leaf).keys };
|
||||
let vals = unsafe { &raw mut (*leaf).vals };
|
||||
// We must coerce to unsized array pointers because of Rust issue #74679.
|
||||
let keys: *const [_] = keys;
|
||||
let vals: *mut [_] = vals;
|
||||
|
@ -787,7 +787,7 @@ pub fn new_cyclic_in<F>(data_fn: F, alloc: A) -> Rc<T, A>
|
||||
|
||||
let strong = unsafe {
|
||||
let inner = init_ptr.as_ptr();
|
||||
ptr::write(ptr::addr_of_mut!((*inner).value), data);
|
||||
ptr::write(&raw mut (*inner).value, data);
|
||||
|
||||
let prev_value = (*inner).strong.get();
|
||||
debug_assert_eq!(prev_value, 0, "No prior strong references should exist");
|
||||
@ -1442,7 +1442,7 @@ pub fn as_ptr(this: &Self) -> *const T {
|
||||
// SAFETY: This cannot go through Deref::deref or Rc::inner because
|
||||
// this is required to retain raw/mut provenance such that e.g. `get_mut` can
|
||||
// write through the pointer after the Rc is recovered through `from_raw`.
|
||||
unsafe { ptr::addr_of_mut!((*ptr).value) }
|
||||
unsafe { &raw mut (*ptr).value }
|
||||
}
|
||||
|
||||
/// Constructs an `Rc<T, A>` from a raw pointer in the provided allocator.
|
||||
@ -2042,8 +2042,8 @@ unsafe fn try_allocate_for_layout(
|
||||
unsafe {
|
||||
debug_assert_eq!(Layout::for_value_raw(inner), layout);
|
||||
|
||||
ptr::addr_of_mut!((*inner).strong).write(Cell::new(1));
|
||||
ptr::addr_of_mut!((*inner).weak).write(Cell::new(1));
|
||||
(&raw mut (*inner).strong).write(Cell::new(1));
|
||||
(&raw mut (*inner).weak).write(Cell::new(1));
|
||||
}
|
||||
|
||||
Ok(inner)
|
||||
@ -2072,8 +2072,8 @@ fn from_box_in(src: Box<T, A>) -> Rc<T, A> {
|
||||
|
||||
// Copy value as bytes
|
||||
ptr::copy_nonoverlapping(
|
||||
core::ptr::addr_of!(*src) as *const u8,
|
||||
ptr::addr_of_mut!((*ptr).value) as *mut u8,
|
||||
(&raw const *src) as *const u8,
|
||||
(&raw mut (*ptr).value) as *mut u8,
|
||||
value_size,
|
||||
);
|
||||
|
||||
@ -2107,11 +2107,7 @@ unsafe fn allocate_for_slice(len: usize) -> *mut RcBox<[T]> {
|
||||
unsafe fn copy_from_slice(v: &[T]) -> Rc<[T]> {
|
||||
unsafe {
|
||||
let ptr = Self::allocate_for_slice(v.len());
|
||||
ptr::copy_nonoverlapping(
|
||||
v.as_ptr(),
|
||||
ptr::addr_of_mut!((*ptr).value) as *mut T,
|
||||
v.len(),
|
||||
);
|
||||
ptr::copy_nonoverlapping(v.as_ptr(), (&raw mut (*ptr).value) as *mut T, v.len());
|
||||
Self::from_ptr(ptr)
|
||||
}
|
||||
}
|
||||
@ -2149,7 +2145,7 @@ fn drop(&mut self) {
|
||||
let layout = Layout::for_value_raw(ptr);
|
||||
|
||||
// Pointer to first element
|
||||
let elems = ptr::addr_of_mut!((*ptr).value) as *mut T;
|
||||
let elems = (&raw mut (*ptr).value) as *mut T;
|
||||
|
||||
let mut guard = Guard { mem: NonNull::new_unchecked(mem), elems, layout, n_elems: 0 };
|
||||
|
||||
@ -2577,7 +2573,7 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T: ?Sized, A: Allocator> fmt::Pointer for Rc<T, A> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt::Pointer::fmt(&core::ptr::addr_of!(**self), f)
|
||||
fmt::Pointer::fmt(&(&raw const **self), f)
|
||||
}
|
||||
}
|
||||
|
||||
@ -2718,7 +2714,7 @@ fn from(v: Vec<T, A>) -> Rc<[T], A> {
|
||||
let (vec_ptr, len, cap, alloc) = v.into_raw_parts_with_alloc();
|
||||
|
||||
let rc_ptr = Self::allocate_for_slice_in(len, &alloc);
|
||||
ptr::copy_nonoverlapping(vec_ptr, ptr::addr_of_mut!((*rc_ptr).value) as *mut T, len);
|
||||
ptr::copy_nonoverlapping(vec_ptr, (&raw mut (*rc_ptr).value) as *mut T, len);
|
||||
|
||||
// Create a `Vec<T, &A>` with length 0, to deallocate the buffer
|
||||
// without dropping its contents or the allocator
|
||||
@ -3084,7 +3080,7 @@ pub fn as_ptr(&self) -> *const T {
|
||||
// SAFETY: if is_dangling returns false, then the pointer is dereferenceable.
|
||||
// The payload may be dropped at this point, and we have to maintain provenance,
|
||||
// so use raw pointer manipulation.
|
||||
unsafe { ptr::addr_of_mut!((*ptr).value) }
|
||||
unsafe { &raw mut (*ptr).value }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -797,7 +797,7 @@ pub fn new_cyclic_in<F>(data_fn: F, alloc: A) -> Arc<T, A>
|
||||
// reference into a strong reference.
|
||||
let strong = unsafe {
|
||||
let inner = init_ptr.as_ptr();
|
||||
ptr::write(ptr::addr_of_mut!((*inner).data), data);
|
||||
ptr::write(&raw mut (*inner).data, data);
|
||||
|
||||
// The above write to the data field must be visible to any threads which
|
||||
// observe a non-zero strong count. Therefore we need at least "Release" ordering
|
||||
@ -1583,7 +1583,7 @@ pub fn as_ptr(this: &Self) -> *const T {
|
||||
// SAFETY: This cannot go through Deref::deref or RcBoxPtr::inner because
|
||||
// this is required to retain raw/mut provenance such that e.g. `get_mut` can
|
||||
// write through the pointer after the Rc is recovered through `from_raw`.
|
||||
unsafe { ptr::addr_of_mut!((*ptr).data) }
|
||||
unsafe { &raw mut (*ptr).data }
|
||||
}
|
||||
|
||||
/// Constructs an `Arc<T, A>` from a raw pointer.
|
||||
@ -1955,8 +1955,8 @@ unsafe fn initialize_arcinner(
|
||||
debug_assert_eq!(unsafe { Layout::for_value_raw(inner) }, layout);
|
||||
|
||||
unsafe {
|
||||
ptr::addr_of_mut!((*inner).strong).write(atomic::AtomicUsize::new(1));
|
||||
ptr::addr_of_mut!((*inner).weak).write(atomic::AtomicUsize::new(1));
|
||||
(&raw mut (*inner).strong).write(atomic::AtomicUsize::new(1));
|
||||
(&raw mut (*inner).weak).write(atomic::AtomicUsize::new(1));
|
||||
}
|
||||
|
||||
inner
|
||||
@ -1986,8 +1986,8 @@ fn from_box_in(src: Box<T, A>) -> Arc<T, A> {
|
||||
|
||||
// Copy value as bytes
|
||||
ptr::copy_nonoverlapping(
|
||||
core::ptr::addr_of!(*src) as *const u8,
|
||||
ptr::addr_of_mut!((*ptr).data) as *mut u8,
|
||||
(&raw const *src) as *const u8,
|
||||
(&raw mut (*ptr).data) as *mut u8,
|
||||
value_size,
|
||||
);
|
||||
|
||||
@ -2022,7 +2022,7 @@ unsafe fn copy_from_slice(v: &[T]) -> Arc<[T]> {
|
||||
unsafe {
|
||||
let ptr = Self::allocate_for_slice(v.len());
|
||||
|
||||
ptr::copy_nonoverlapping(v.as_ptr(), ptr::addr_of_mut!((*ptr).data) as *mut T, v.len());
|
||||
ptr::copy_nonoverlapping(v.as_ptr(), (&raw mut (*ptr).data) as *mut T, v.len());
|
||||
|
||||
Self::from_ptr(ptr)
|
||||
}
|
||||
@ -2061,7 +2061,7 @@ fn drop(&mut self) {
|
||||
let layout = Layout::for_value_raw(ptr);
|
||||
|
||||
// Pointer to first element
|
||||
let elems = ptr::addr_of_mut!((*ptr).data) as *mut T;
|
||||
let elems = (&raw mut (*ptr).data) as *mut T;
|
||||
|
||||
let mut guard = Guard { mem: NonNull::new_unchecked(mem), elems, layout, n_elems: 0 };
|
||||
|
||||
@ -2805,7 +2805,7 @@ pub fn as_ptr(&self) -> *const T {
|
||||
// SAFETY: if is_dangling returns false, then the pointer is dereferenceable.
|
||||
// The payload may be dropped at this point, and we have to maintain provenance,
|
||||
// so use raw pointer manipulation.
|
||||
unsafe { ptr::addr_of_mut!((*ptr).data) }
|
||||
unsafe { &raw mut (*ptr).data }
|
||||
}
|
||||
}
|
||||
|
||||
@ -3428,7 +3428,7 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T: ?Sized, A: Allocator> fmt::Pointer for Arc<T, A> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt::Pointer::fmt(&core::ptr::addr_of!(**self), f)
|
||||
fmt::Pointer::fmt(&(&raw const **self), f)
|
||||
}
|
||||
}
|
||||
|
||||
@ -3678,7 +3678,7 @@ fn from(v: Vec<T, A>) -> Arc<[T], A> {
|
||||
let (vec_ptr, len, cap, alloc) = v.into_raw_parts_with_alloc();
|
||||
|
||||
let rc_ptr = Self::allocate_for_slice_in(len, &alloc);
|
||||
ptr::copy_nonoverlapping(vec_ptr, ptr::addr_of_mut!((*rc_ptr).data) as *mut T, len);
|
||||
ptr::copy_nonoverlapping(vec_ptr, (&raw mut (*rc_ptr).data) as *mut T, len);
|
||||
|
||||
// Create a `Vec<T, &A>` with length 0, to deallocate the buffer
|
||||
// without dropping its contents or the allocator
|
||||
|
@ -21,11 +21,11 @@
|
||||
macro non_null {
|
||||
(mut $place:expr, $t:ident) => {{
|
||||
#![allow(unused_unsafe)] // we're sometimes used within an unsafe block
|
||||
unsafe { &mut *(ptr::addr_of_mut!($place) as *mut NonNull<$t>) }
|
||||
unsafe { &mut *((&raw mut $place) as *mut NonNull<$t>) }
|
||||
}},
|
||||
($place:expr, $t:ident) => {{
|
||||
#![allow(unused_unsafe)] // we're sometimes used within an unsafe block
|
||||
unsafe { *(ptr::addr_of!($place) as *const NonNull<$t>) }
|
||||
unsafe { *((&raw const $place) as *const NonNull<$t>) }
|
||||
}},
|
||||
}
|
||||
|
||||
|
@ -5,7 +5,7 @@
|
||||
use crate::ffi::c_char;
|
||||
use crate::iter::FusedIterator;
|
||||
use crate::marker::PhantomData;
|
||||
use crate::ptr::{NonNull, addr_of};
|
||||
use crate::ptr::NonNull;
|
||||
use crate::slice::memchr;
|
||||
use crate::{fmt, intrinsics, ops, slice, str};
|
||||
|
||||
@ -623,7 +623,7 @@ pub const fn to_bytes(&self) -> &[u8] {
|
||||
pub const fn to_bytes_with_nul(&self) -> &[u8] {
|
||||
// SAFETY: Transmuting a slice of `c_char`s to a slice of `u8`s
|
||||
// is safe on all supported targets.
|
||||
unsafe { &*(addr_of!(self.inner) as *const [u8]) }
|
||||
unsafe { &*((&raw const self.inner) as *const [u8]) }
|
||||
}
|
||||
|
||||
/// Iterates over the bytes in this C string.
|
||||
|
@ -3,7 +3,6 @@
|
||||
use crate::mem::{ManuallyDrop, MaybeUninit};
|
||||
use crate::num::NonZero;
|
||||
use crate::ops::{ControlFlow, Try};
|
||||
use crate::ptr::addr_of;
|
||||
use crate::{array, fmt};
|
||||
|
||||
/// An iterator that uses `f` to both filter and map elements from `iter`.
|
||||
@ -101,7 +100,7 @@ fn drop(&mut self) {
|
||||
|
||||
unsafe {
|
||||
let opt_payload_at: *const MaybeUninit<B> =
|
||||
addr_of!(val).byte_add(core::mem::offset_of!(Option<B>, Some.0)).cast();
|
||||
(&raw const val).byte_add(core::mem::offset_of!(Option<B>, Some.0)).cast();
|
||||
let dst = guard.array.as_mut_ptr().add(idx);
|
||||
crate::ptr::copy_nonoverlapping(opt_payload_at, dst, 1);
|
||||
crate::mem::forget(val);
|
||||
|
@ -1730,7 +1730,7 @@ macro_rules! attempt_swap_as_chunks {
|
||||
// `dst` cannot overlap `src` because the caller has mutable access
|
||||
// to `dst` while `src` is owned by this function.
|
||||
unsafe {
|
||||
copy_nonoverlapping(addr_of!(src) as *const u8, dst as *mut u8, mem::size_of::<T>());
|
||||
copy_nonoverlapping((&raw const src) as *const u8, dst as *mut u8, mem::size_of::<T>());
|
||||
// We are calling the intrinsic directly to avoid function calls in the generated code.
|
||||
intrinsics::forget(src);
|
||||
}
|
||||
@ -2348,7 +2348,6 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
/// no difference whether the pointer is null or dangling.)
|
||||
#[stable(feature = "raw_ref_macros", since = "1.51.0")]
|
||||
#[rustc_macro_transparency = "semitransparent"]
|
||||
#[allow_internal_unstable(raw_ref_op)]
|
||||
pub macro addr_of($place:expr) {
|
||||
&raw const $place
|
||||
}
|
||||
@ -2439,7 +2438,6 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
/// makes no difference whether the pointer is null or dangling.)
|
||||
#[stable(feature = "raw_ref_macros", since = "1.51.0")]
|
||||
#[rustc_macro_transparency = "semitransparent"]
|
||||
#[allow_internal_unstable(raw_ref_op)]
|
||||
pub macro addr_of_mut($place:expr) {
|
||||
&raw mut $place
|
||||
}
|
||||
|
@ -11,7 +11,7 @@
|
||||
use crate::marker::PhantomData;
|
||||
use crate::mem::{self, SizedTypeProperties};
|
||||
use crate::num::NonZero;
|
||||
use crate::ptr::{self, NonNull, without_provenance, without_provenance_mut};
|
||||
use crate::ptr::{NonNull, without_provenance, without_provenance_mut};
|
||||
use crate::{cmp, fmt};
|
||||
|
||||
#[stable(feature = "boxed_slice_into_iter", since = "1.80.0")]
|
||||
|
@ -14,11 +14,11 @@ macro_rules! if_zst {
|
||||
if T::IS_ZST {
|
||||
// SAFETY: for ZSTs, the pointer is storing a provenance-free length,
|
||||
// so consuming and updating it as a `usize` is fine.
|
||||
let $len = unsafe { &mut *ptr::addr_of_mut!($this.end_or_len).cast::<usize>() };
|
||||
let $len = unsafe { &mut *(&raw mut $this.end_or_len).cast::<usize>() };
|
||||
$zst_body
|
||||
} else {
|
||||
// SAFETY: for non-ZSTs, the type invariant ensures it cannot be null
|
||||
let $end = unsafe { &mut *ptr::addr_of_mut!($this.end_or_len).cast::<NonNull<T>>() };
|
||||
let $end = unsafe { &mut *(&raw mut $this.end_or_len).cast::<NonNull<T>>() };
|
||||
$other_body
|
||||
}
|
||||
}};
|
||||
@ -30,7 +30,7 @@ macro_rules! if_zst {
|
||||
$zst_body
|
||||
} else {
|
||||
// SAFETY: for non-ZSTs, the type invariant ensures it cannot be null
|
||||
let $end = unsafe { *ptr::addr_of!($this.end_or_len).cast::<NonNull<T>>() };
|
||||
let $end = unsafe { *(&raw const $this.end_or_len).cast::<NonNull<T>>() };
|
||||
$other_body
|
||||
}
|
||||
}};
|
||||
|
@ -883,8 +883,8 @@ pub const fn as_mut_ptr_range(&mut self) -> Range<*mut T> {
|
||||
pub const fn swap(&mut self, a: usize, b: usize) {
|
||||
// FIXME: use swap_unchecked here (https://github.com/rust-lang/rust/pull/88540#issuecomment-944344343)
|
||||
// Can't take two mutable loans from one vector, so instead use raw pointers.
|
||||
let pa = ptr::addr_of_mut!(self[a]);
|
||||
let pb = ptr::addr_of_mut!(self[b]);
|
||||
let pa = &raw mut self[a];
|
||||
let pb = &raw mut self[b];
|
||||
// SAFETY: `pa` and `pb` have been created from safe mutable references and refer
|
||||
// to elements in the slice and therefore are guaranteed to be valid and aligned.
|
||||
// Note that accessing the elements behind `a` and `b` is checked and will
|
||||
|
@ -78,7 +78,7 @@ struct CatchData {
|
||||
super::__rust_foreign_exception();
|
||||
}
|
||||
|
||||
let canary = ptr::addr_of!((*adjusted_ptr).canary).read();
|
||||
let canary = (&raw const (*adjusted_ptr).canary).read();
|
||||
if !ptr::eq(canary, &EXCEPTION_TYPE_INFO) {
|
||||
super::__rust_foreign_exception();
|
||||
}
|
||||
|
@ -92,7 +92,7 @@ pub unsafe fn cleanup(ptr: *mut u8) -> Box<dyn Any + Send> {
|
||||
let exception = exception.cast::<Exception>();
|
||||
// Just access the canary field, avoid accessing the entire `Exception` as
|
||||
// it can be a foreign Rust exception.
|
||||
let canary = ptr::addr_of!((*exception).canary).read();
|
||||
let canary = (&raw const (*exception).canary).read();
|
||||
if !ptr::eq(canary, &CANARY) {
|
||||
// A foreign Rust exception, treat it slightly differently from other
|
||||
// foreign exceptions, because call into `_Unwind_DeleteException` will
|
||||
|
@ -50,7 +50,6 @@
|
||||
use core::any::Any;
|
||||
use core::ffi::{c_int, c_uint, c_void};
|
||||
use core::mem::{self, ManuallyDrop};
|
||||
use core::ptr::{addr_of, addr_of_mut};
|
||||
|
||||
// NOTE(nbdd0121): The `canary` field is part of stable ABI.
|
||||
#[repr(C)]
|
||||
@ -131,8 +130,6 @@ pub const fn raw(self) -> *mut u8 {
|
||||
|
||||
#[cfg(not(target_arch = "x86"))]
|
||||
mod imp {
|
||||
use core::ptr::addr_of;
|
||||
|
||||
// On 64-bit systems, SEH represents pointers as 32-bit offsets from `__ImageBase`.
|
||||
#[repr(transparent)]
|
||||
#[derive(Copy, Clone)]
|
||||
@ -157,7 +154,7 @@ pub fn new(ptr: *mut u8) -> Self {
|
||||
// going to be cross-lang LTOed anyway. However, using expose is shorter and
|
||||
// requires less unsafe.
|
||||
let addr: usize = ptr.expose_provenance();
|
||||
let image_base = addr_of!(__ImageBase).addr();
|
||||
let image_base = (&raw const __ImageBase).addr();
|
||||
let offset: usize = addr - image_base;
|
||||
Self(offset as u32)
|
||||
}
|
||||
@ -250,7 +247,7 @@ pub struct _TypeDescriptor {
|
||||
// This is fine since the MSVC runtime uses string comparison on the type name
|
||||
// to match TypeDescriptors rather than pointer equality.
|
||||
static mut TYPE_DESCRIPTOR: _TypeDescriptor = _TypeDescriptor {
|
||||
pVFTable: addr_of!(TYPE_INFO_VTABLE) as *const _,
|
||||
pVFTable: (&raw const TYPE_INFO_VTABLE) as *const _,
|
||||
spare: core::ptr::null_mut(),
|
||||
name: TYPE_NAME,
|
||||
};
|
||||
@ -304,8 +301,8 @@ pub unsafe fn panic(data: Box<dyn Any + Send>) -> u32 {
|
||||
// dropped when unwinding. Instead it will be dropped by exception_cleanup
|
||||
// which is invoked by the C++ runtime.
|
||||
let mut exception =
|
||||
ManuallyDrop::new(Exception { canary: addr_of!(TYPE_DESCRIPTOR), data: Some(data) });
|
||||
let throw_ptr = addr_of_mut!(exception) as *mut _;
|
||||
ManuallyDrop::new(Exception { canary: (&raw const TYPE_DESCRIPTOR), data: Some(data) });
|
||||
let throw_ptr = (&raw mut exception) as *mut _;
|
||||
|
||||
// This... may seems surprising, and justifiably so. On 32-bit MSVC the
|
||||
// pointers between these structure are just that, pointers. On 64-bit MSVC,
|
||||
@ -328,23 +325,23 @@ pub unsafe fn panic(data: Box<dyn Any + Send>) -> u32 {
|
||||
// In any case, we basically need to do something like this until we can
|
||||
// express more operations in statics (and we may never be able to).
|
||||
atomic_store_seqcst(
|
||||
addr_of_mut!(THROW_INFO.pmfnUnwind).cast(),
|
||||
(&raw mut THROW_INFO.pmfnUnwind).cast(),
|
||||
ptr_t::new(exception_cleanup as *mut u8).raw(),
|
||||
);
|
||||
atomic_store_seqcst(
|
||||
addr_of_mut!(THROW_INFO.pCatchableTypeArray).cast(),
|
||||
ptr_t::new(addr_of_mut!(CATCHABLE_TYPE_ARRAY).cast()).raw(),
|
||||
(&raw mut THROW_INFO.pCatchableTypeArray).cast(),
|
||||
ptr_t::new((&raw mut CATCHABLE_TYPE_ARRAY).cast()).raw(),
|
||||
);
|
||||
atomic_store_seqcst(
|
||||
addr_of_mut!(CATCHABLE_TYPE_ARRAY.arrayOfCatchableTypes[0]).cast(),
|
||||
ptr_t::new(addr_of_mut!(CATCHABLE_TYPE).cast()).raw(),
|
||||
(&raw mut CATCHABLE_TYPE_ARRAY.arrayOfCatchableTypes[0]).cast(),
|
||||
ptr_t::new((&raw mut CATCHABLE_TYPE).cast()).raw(),
|
||||
);
|
||||
atomic_store_seqcst(
|
||||
addr_of_mut!(CATCHABLE_TYPE.pType).cast(),
|
||||
ptr_t::new(addr_of_mut!(TYPE_DESCRIPTOR).cast()).raw(),
|
||||
(&raw mut CATCHABLE_TYPE.pType).cast(),
|
||||
ptr_t::new((&raw mut TYPE_DESCRIPTOR).cast()).raw(),
|
||||
);
|
||||
atomic_store_seqcst(
|
||||
addr_of_mut!(CATCHABLE_TYPE.copyFunction).cast(),
|
||||
(&raw mut CATCHABLE_TYPE.copyFunction).cast(),
|
||||
ptr_t::new(exception_copy as *mut u8).raw(),
|
||||
);
|
||||
|
||||
@ -352,7 +349,7 @@ pub unsafe fn panic(data: Box<dyn Any + Send>) -> u32 {
|
||||
fn _CxxThrowException(pExceptionObject: *mut c_void, pThrowInfo: *mut u8) -> !;
|
||||
}
|
||||
|
||||
_CxxThrowException(throw_ptr, addr_of_mut!(THROW_INFO) as *mut _);
|
||||
_CxxThrowException(throw_ptr, (&raw mut THROW_INFO) as *mut _);
|
||||
}
|
||||
|
||||
pub unsafe fn cleanup(payload: *mut u8) -> Box<dyn Any + Send> {
|
||||
@ -362,8 +359,8 @@ pub unsafe fn cleanup(payload: *mut u8) -> Box<dyn Any + Send> {
|
||||
super::__rust_foreign_exception();
|
||||
}
|
||||
let exception = payload as *mut Exception;
|
||||
let canary = addr_of!((*exception).canary).read();
|
||||
if !core::ptr::eq(canary, addr_of!(TYPE_DESCRIPTOR)) {
|
||||
let canary = (&raw const (*exception).canary).read();
|
||||
if !core::ptr::eq(canary, &raw const TYPE_DESCRIPTOR) {
|
||||
// A foreign Rust exception.
|
||||
super::__rust_foreign_exception();
|
||||
}
|
||||
|
@ -9,7 +9,6 @@
|
||||
use crate::collections::TryReserveError;
|
||||
use crate::hash::{Hash, Hasher};
|
||||
use crate::ops::{self, Range};
|
||||
use crate::ptr::addr_of_mut;
|
||||
use crate::rc::Rc;
|
||||
use crate::str::FromStr;
|
||||
use crate::sync::Arc;
|
||||
@ -1272,7 +1271,7 @@ unsafe impl CloneToUninit for OsStr {
|
||||
#[cfg_attr(debug_assertions, track_caller)]
|
||||
unsafe fn clone_to_uninit(&self, dst: *mut Self) {
|
||||
// SAFETY: we're just a wrapper around a platform-specific Slice
|
||||
unsafe { self.inner.clone_to_uninit(addr_of_mut!((*dst).inner)) }
|
||||
unsafe { self.inner.clone_to_uninit(&raw mut (*dst).inner) }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1732,7 +1732,7 @@ fn windows_unix_socket_exists() {
|
||||
let bytes = core::slice::from_raw_parts(bytes.as_ptr().cast::<i8>(), bytes.len());
|
||||
addr.sun_path[..bytes.len()].copy_from_slice(bytes);
|
||||
let len = mem::size_of_val(&addr) as i32;
|
||||
let result = c::bind(socket, ptr::addr_of!(addr).cast::<c::SOCKADDR>(), len);
|
||||
let result = c::bind(socket, (&raw const addr).cast::<c::SOCKADDR>(), len);
|
||||
c::closesocket(socket);
|
||||
assert_eq!(result, 0);
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ pub(super) fn new<F>(f: F) -> io::Result<SocketAddr>
|
||||
unsafe {
|
||||
let mut addr: libc::sockaddr_un = mem::zeroed();
|
||||
let mut len = mem::size_of::<libc::sockaddr_un>() as libc::socklen_t;
|
||||
cvt(f(core::ptr::addr_of_mut!(addr) as *mut _, &mut len))?;
|
||||
cvt(f((&raw mut addr) as *mut _, &mut len))?;
|
||||
SocketAddr::from_parts(addr, len)
|
||||
}
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ pub(super) fn recv_vectored_with_ancillary_from(
|
||||
unsafe {
|
||||
let mut msg_name: libc::sockaddr_un = zeroed();
|
||||
let mut msg: libc::msghdr = zeroed();
|
||||
msg.msg_name = core::ptr::addr_of_mut!(msg_name) as *mut _;
|
||||
msg.msg_name = (&raw mut msg_name) as *mut _;
|
||||
msg.msg_namelen = size_of::<libc::sockaddr_un>() as libc::socklen_t;
|
||||
msg.msg_iov = bufs.as_mut_ptr().cast();
|
||||
msg.msg_iovlen = bufs.len() as _;
|
||||
@ -70,7 +70,7 @@ pub(super) fn send_vectored_with_ancillary_to(
|
||||
if let Some(path) = path { sockaddr_un(path)? } else { (zeroed(), 0) };
|
||||
|
||||
let mut msg: libc::msghdr = zeroed();
|
||||
msg.msg_name = core::ptr::addr_of_mut!(msg_name) as *mut _;
|
||||
msg.msg_name = (&raw mut msg_name) as *mut _;
|
||||
msg.msg_namelen = msg_namelen;
|
||||
msg.msg_iov = bufs.as_ptr() as *mut _;
|
||||
msg.msg_iovlen = bufs.len() as _;
|
||||
|
@ -100,7 +100,7 @@ pub fn bind<P: AsRef<Path>>(path: P) -> io::Result<UnixDatagram> {
|
||||
let socket = UnixDatagram::unbound()?;
|
||||
let (addr, len) = sockaddr_un(path.as_ref())?;
|
||||
|
||||
cvt(libc::bind(socket.as_raw_fd(), core::ptr::addr_of!(addr) as *const _, len as _))?;
|
||||
cvt(libc::bind(socket.as_raw_fd(), (&raw const addr) as *const _, len as _))?;
|
||||
|
||||
Ok(socket)
|
||||
}
|
||||
@ -133,7 +133,7 @@ pub fn bind_addr(socket_addr: &SocketAddr) -> io::Result<UnixDatagram> {
|
||||
let socket = UnixDatagram::unbound()?;
|
||||
cvt(libc::bind(
|
||||
socket.as_raw_fd(),
|
||||
core::ptr::addr_of!(socket_addr.addr) as *const _,
|
||||
(&raw const socket_addr.addr) as *const _,
|
||||
socket_addr.len as _,
|
||||
))?;
|
||||
Ok(socket)
|
||||
@ -215,7 +215,7 @@ pub fn connect<P: AsRef<Path>>(&self, path: P) -> io::Result<()> {
|
||||
unsafe {
|
||||
let (addr, len) = sockaddr_un(path.as_ref())?;
|
||||
|
||||
cvt(libc::connect(self.as_raw_fd(), core::ptr::addr_of!(addr) as *const _, len))?;
|
||||
cvt(libc::connect(self.as_raw_fd(), (&raw const addr) as *const _, len))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -247,7 +247,7 @@ pub fn connect_addr(&self, socket_addr: &SocketAddr) -> io::Result<()> {
|
||||
unsafe {
|
||||
cvt(libc::connect(
|
||||
self.as_raw_fd(),
|
||||
core::ptr::addr_of!(socket_addr.addr) as *const _,
|
||||
(&raw const socket_addr.addr) as *const _,
|
||||
socket_addr.len,
|
||||
))?;
|
||||
}
|
||||
@ -514,7 +514,7 @@ pub fn send_to<P: AsRef<Path>>(&self, buf: &[u8], path: P) -> io::Result<usize>
|
||||
buf.as_ptr() as *const _,
|
||||
buf.len(),
|
||||
MSG_NOSIGNAL,
|
||||
core::ptr::addr_of!(addr) as *const _,
|
||||
(&raw const addr) as *const _,
|
||||
len,
|
||||
))?;
|
||||
Ok(count as usize)
|
||||
@ -549,7 +549,7 @@ pub fn send_to_addr(&self, buf: &[u8], socket_addr: &SocketAddr) -> io::Result<u
|
||||
buf.as_ptr() as *const _,
|
||||
buf.len(),
|
||||
MSG_NOSIGNAL,
|
||||
core::ptr::addr_of!(socket_addr.addr) as *const _,
|
||||
(&raw const socket_addr.addr) as *const _,
|
||||
socket_addr.len,
|
||||
))?;
|
||||
Ok(count as usize)
|
||||
|
@ -103,11 +103,7 @@ pub fn bind<P: AsRef<Path>>(path: P) -> io::Result<UnixListener> {
|
||||
)))]
|
||||
const backlog: libc::c_int = libc::SOMAXCONN;
|
||||
|
||||
cvt(libc::bind(
|
||||
inner.as_inner().as_raw_fd(),
|
||||
core::ptr::addr_of!(addr) as *const _,
|
||||
len as _,
|
||||
))?;
|
||||
cvt(libc::bind(inner.as_inner().as_raw_fd(), (&raw const addr) as *const _, len as _))?;
|
||||
cvt(libc::listen(inner.as_inner().as_raw_fd(), backlog))?;
|
||||
|
||||
Ok(UnixListener(inner))
|
||||
@ -147,7 +143,7 @@ pub fn bind_addr(socket_addr: &SocketAddr) -> io::Result<UnixListener> {
|
||||
const backlog: core::ffi::c_int = 128;
|
||||
cvt(libc::bind(
|
||||
inner.as_raw_fd(),
|
||||
core::ptr::addr_of!(socket_addr.addr) as *const _,
|
||||
(&raw const socket_addr.addr) as *const _,
|
||||
socket_addr.len as _,
|
||||
))?;
|
||||
cvt(libc::listen(inner.as_raw_fd(), backlog))?;
|
||||
@ -182,7 +178,7 @@ pub fn bind_addr(socket_addr: &SocketAddr) -> io::Result<UnixListener> {
|
||||
pub fn accept(&self) -> io::Result<(UnixStream, SocketAddr)> {
|
||||
let mut storage: libc::sockaddr_un = unsafe { mem::zeroed() };
|
||||
let mut len = mem::size_of_val(&storage) as libc::socklen_t;
|
||||
let sock = self.0.accept(core::ptr::addr_of_mut!(storage) as *mut _, &mut len)?;
|
||||
let sock = self.0.accept((&raw mut storage) as *mut _, &mut len)?;
|
||||
let addr = SocketAddr::from_parts(storage, len)?;
|
||||
Ok((UnixStream(sock), addr))
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ pub fn connect<P: AsRef<Path>>(path: P) -> io::Result<UnixStream> {
|
||||
let inner = Socket::new_raw(libc::AF_UNIX, libc::SOCK_STREAM)?;
|
||||
let (addr, len) = sockaddr_un(path.as_ref())?;
|
||||
|
||||
cvt(libc::connect(inner.as_raw_fd(), core::ptr::addr_of!(addr) as *const _, len))?;
|
||||
cvt(libc::connect(inner.as_raw_fd(), (&raw const addr) as *const _, len))?;
|
||||
Ok(UnixStream(inner))
|
||||
}
|
||||
}
|
||||
@ -118,7 +118,7 @@ pub fn connect_addr(socket_addr: &SocketAddr) -> io::Result<UnixStream> {
|
||||
let inner = Socket::new_raw(libc::AF_UNIX, libc::SOCK_STREAM)?;
|
||||
cvt(libc::connect(
|
||||
inner.as_raw_fd(),
|
||||
core::ptr::addr_of!(socket_addr.addr) as *const _,
|
||||
(&raw const socket_addr.addr) as *const _,
|
||||
socket_addr.len,
|
||||
))?;
|
||||
Ok(UnixStream(inner))
|
||||
|
@ -60,7 +60,7 @@ pub fn peer_cred(socket: &UnixStream) -> io::Result<UCred> {
|
||||
socket.as_raw_fd(),
|
||||
SOL_SOCKET,
|
||||
SO_PEERCRED,
|
||||
core::ptr::addr_of_mut!(ucred) as *mut c_void,
|
||||
(&raw mut ucred) as *mut c_void,
|
||||
&mut ucred_size,
|
||||
);
|
||||
|
||||
@ -121,7 +121,7 @@ pub fn peer_cred(socket: &UnixStream) -> io::Result<UCred> {
|
||||
socket.as_raw_fd(),
|
||||
SOL_LOCAL,
|
||||
LOCAL_PEERPID,
|
||||
core::ptr::addr_of_mut!(pid) as *mut c_void,
|
||||
(&raw mut pid) as *mut c_void,
|
||||
&mut pid_size,
|
||||
);
|
||||
|
||||
|
@ -506,7 +506,7 @@ union Data<F, R> {
|
||||
// method of calling a catch panic whilst juggling ownership.
|
||||
let mut data = Data { f: ManuallyDrop::new(f) };
|
||||
|
||||
let data_ptr = core::ptr::addr_of_mut!(data) as *mut u8;
|
||||
let data_ptr = (&raw mut data) as *mut u8;
|
||||
// SAFETY:
|
||||
//
|
||||
// Access to the union's fields: this is `std` and we know that the `r#try`
|
||||
|
@ -3144,7 +3144,7 @@ unsafe impl CloneToUninit for Path {
|
||||
#[cfg_attr(debug_assertions, track_caller)]
|
||||
unsafe fn clone_to_uninit(&self, dst: *mut Self) {
|
||||
// SAFETY: Path is just a wrapper around OsStr
|
||||
unsafe { self.inner.clone_to_uninit(core::ptr::addr_of_mut!((*dst).inner)) }
|
||||
unsafe { self.inner.clone_to_uninit(&raw mut (*dst).inner) }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -185,11 +185,7 @@ pub(crate) fn send(
|
||||
// Prepare for blocking until a receiver wakes us up.
|
||||
let oper = Operation::hook(token);
|
||||
let mut packet = Packet::<T>::message_on_stack(msg);
|
||||
inner.senders.register_with_packet(
|
||||
oper,
|
||||
core::ptr::addr_of_mut!(packet) as *mut (),
|
||||
cx,
|
||||
);
|
||||
inner.senders.register_with_packet(oper, (&raw mut packet) as *mut (), cx);
|
||||
inner.receivers.notify();
|
||||
drop(inner);
|
||||
|
||||
@ -256,11 +252,7 @@ pub(crate) fn recv(&self, deadline: Option<Instant>) -> Result<T, RecvTimeoutErr
|
||||
// Prepare for blocking until a sender wakes us up.
|
||||
let oper = Operation::hook(token);
|
||||
let mut packet = Packet::<T>::empty_on_stack();
|
||||
inner.receivers.register_with_packet(
|
||||
oper,
|
||||
core::ptr::addr_of_mut!(packet) as *mut (),
|
||||
cx,
|
||||
);
|
||||
inner.receivers.register_with_packet(oper, (&raw mut packet) as *mut (), cx);
|
||||
inner.senders.notify();
|
||||
drop(inner);
|
||||
|
||||
|
@ -2,7 +2,6 @@
|
||||
//! systems: just a `Vec<u8>`/`[u8]`.
|
||||
|
||||
use core::clone::CloneToUninit;
|
||||
use core::ptr::addr_of_mut;
|
||||
|
||||
use crate::borrow::Cow;
|
||||
use crate::collections::TryReserveError;
|
||||
@ -355,6 +354,6 @@ unsafe impl CloneToUninit for Slice {
|
||||
#[cfg_attr(debug_assertions, track_caller)]
|
||||
unsafe fn clone_to_uninit(&self, dst: *mut Self) {
|
||||
// SAFETY: we're just a wrapper around [u8]
|
||||
unsafe { self.inner.clone_to_uninit(addr_of_mut!((*dst).inner)) }
|
||||
unsafe { self.inner.clone_to_uninit(&raw mut (*dst).inner) }
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,6 @@
|
||||
//! The underlying OsString/OsStr implementation on Windows is a
|
||||
//! wrapper around the "WTF-8" encoding; see the `wtf8` module for more.
|
||||
use core::clone::CloneToUninit;
|
||||
use core::ptr::addr_of_mut;
|
||||
|
||||
use crate::borrow::Cow;
|
||||
use crate::collections::TryReserveError;
|
||||
@ -278,6 +277,6 @@ unsafe impl CloneToUninit for Slice {
|
||||
#[cfg_attr(debug_assertions, track_caller)]
|
||||
unsafe fn clone_to_uninit(&self, dst: *mut Self) {
|
||||
// SAFETY: we're just a wrapper around Wtf8
|
||||
unsafe { self.inner.clone_to_uninit(addr_of_mut!((*dst).inner)) }
|
||||
unsafe { self.inner.clone_to_uninit(&raw mut (*dst).inner) }
|
||||
}
|
||||
}
|
||||
|
@ -192,7 +192,7 @@ fn recv_from_with_flags(&self, buf: &mut [u8], flags: i32) -> io::Result<(usize,
|
||||
buf.as_mut_ptr(),
|
||||
buf.len(),
|
||||
flags,
|
||||
core::ptr::addr_of_mut!(storage) as *mut _,
|
||||
(&raw mut storage) as *mut _,
|
||||
&mut addrlen,
|
||||
)
|
||||
})?;
|
||||
@ -298,7 +298,7 @@ pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
|
||||
netc::ioctl(
|
||||
self.as_raw_fd(),
|
||||
netc::FIONBIO,
|
||||
core::ptr::addr_of_mut!(nonblocking) as *mut core::ffi::c_void,
|
||||
(&raw mut nonblocking) as *mut core::ffi::c_void,
|
||||
)
|
||||
})
|
||||
.map(drop)
|
||||
|
@ -107,8 +107,7 @@ fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
impl Instant {
|
||||
pub fn now() -> Instant {
|
||||
let mut time: Timespec = Timespec::zero();
|
||||
let _ =
|
||||
unsafe { hermit_abi::clock_gettime(CLOCK_MONOTONIC, core::ptr::addr_of_mut!(time.t)) };
|
||||
let _ = unsafe { hermit_abi::clock_gettime(CLOCK_MONOTONIC, &raw mut time.t) };
|
||||
|
||||
Instant(time)
|
||||
}
|
||||
@ -209,8 +208,7 @@ pub fn new(tv_sec: i64, tv_nsec: i32) -> SystemTime {
|
||||
|
||||
pub fn now() -> SystemTime {
|
||||
let mut time: Timespec = Timespec::zero();
|
||||
let _ =
|
||||
unsafe { hermit_abi::clock_gettime(CLOCK_REALTIME, core::ptr::addr_of_mut!(time.t)) };
|
||||
let _ = unsafe { hermit_abi::clock_gettime(CLOCK_REALTIME, &raw mut time.t) };
|
||||
|
||||
SystemTime(time)
|
||||
}
|
||||
|
@ -740,7 +740,7 @@ fn next(&mut self) -> Option<io::Result<DirEntry>> {
|
||||
//
|
||||
// Like for uninitialized contents, converting entry_ptr to `&dirent64`
|
||||
// would not be legal. However, unique to dirent64 is that we don't even
|
||||
// get to use `addr_of!((*entry_ptr).d_name)` because that operation
|
||||
// get to use `&raw const (*entry_ptr).d_name` because that operation
|
||||
// requires the full extent of *entry_ptr to be in bounds of the same
|
||||
// allocation, which is not necessarily the case here.
|
||||
//
|
||||
@ -754,7 +754,7 @@ macro_rules! offset_ptr {
|
||||
} else {
|
||||
#[allow(deref_nullptr)]
|
||||
{
|
||||
ptr::addr_of!((*ptr::null::<dirent64>()).$field)
|
||||
&raw const (*ptr::null::<dirent64>()).$field
|
||||
}
|
||||
}
|
||||
}};
|
||||
@ -1385,7 +1385,7 @@ pub fn set_times(&self, times: FileTimes) -> io::Result<()> {
|
||||
}
|
||||
cvt(unsafe { libc::fsetattrlist(
|
||||
self.as_raw_fd(),
|
||||
core::ptr::addr_of!(attrlist).cast::<libc::c_void>().cast_mut(),
|
||||
(&raw const attrlist).cast::<libc::c_void>().cast_mut(),
|
||||
buf.as_ptr().cast::<libc::c_void>().cast_mut(),
|
||||
num_times * mem::size_of::<libc::timespec>(),
|
||||
0
|
||||
@ -1944,7 +1944,7 @@ fn drop(&mut self) {
|
||||
libc::copyfile_state_get(
|
||||
state.0,
|
||||
libc::COPYFILE_STATE_COPIED as u32,
|
||||
core::ptr::addr_of_mut!(bytes_copied) as *mut libc::c_void,
|
||||
(&raw mut bytes_copied) as *mut libc::c_void,
|
||||
)
|
||||
})?;
|
||||
Ok(bytes_copied as u64)
|
||||
|
@ -329,7 +329,7 @@ fn recv_from_with_flags(
|
||||
buf.as_mut_ptr() as *mut c_void,
|
||||
buf.len(),
|
||||
flags,
|
||||
core::ptr::addr_of_mut!(storage) as *mut _,
|
||||
(&raw mut storage) as *mut _,
|
||||
&mut addrlen,
|
||||
)
|
||||
})?;
|
||||
|
@ -612,7 +612,7 @@ pub unsafe fn environ() -> *mut *const *const c_char {
|
||||
extern "C" {
|
||||
static mut environ: *const *const c_char;
|
||||
}
|
||||
ptr::addr_of_mut!(environ)
|
||||
&raw mut environ
|
||||
}
|
||||
|
||||
static ENV_LOCK: RwLock<()> = RwLock::new(());
|
||||
|
@ -178,7 +178,7 @@ pub fn wait(&mut self) -> io::Result<ExitStatus> {
|
||||
zx_cvt(zx_object_get_info(
|
||||
self.handle.raw(),
|
||||
ZX_INFO_PROCESS,
|
||||
core::ptr::addr_of_mut!(proc_info) as *mut libc::c_void,
|
||||
(&raw mut proc_info) as *mut libc::c_void,
|
||||
mem::size_of::<zx_info_process_t>(),
|
||||
&mut actual,
|
||||
&mut avail,
|
||||
@ -215,7 +215,7 @@ pub fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> {
|
||||
zx_cvt(zx_object_get_info(
|
||||
self.handle.raw(),
|
||||
ZX_INFO_PROCESS,
|
||||
core::ptr::addr_of_mut!(proc_info) as *mut libc::c_void,
|
||||
(&raw mut proc_info) as *mut libc::c_void,
|
||||
mem::size_of::<zx_info_process_t>(),
|
||||
&mut actual,
|
||||
&mut avail,
|
||||
|
@ -788,15 +788,15 @@ union Cmsg {
|
||||
let mut iov = [IoSlice::new(b"")];
|
||||
let mut msg: libc::msghdr = mem::zeroed();
|
||||
|
||||
msg.msg_iov = core::ptr::addr_of_mut!(iov) as *mut _;
|
||||
msg.msg_iov = (&raw mut iov) as *mut _;
|
||||
msg.msg_iovlen = 1;
|
||||
|
||||
// only attach cmsg if we successfully acquired the pidfd
|
||||
if pidfd >= 0 {
|
||||
msg.msg_controllen = mem::size_of_val(&cmsg.buf) as _;
|
||||
msg.msg_control = core::ptr::addr_of_mut!(cmsg.buf) as *mut _;
|
||||
msg.msg_control = (&raw mut cmsg.buf) as *mut _;
|
||||
|
||||
let hdr = CMSG_FIRSTHDR(core::ptr::addr_of_mut!(msg) as *mut _);
|
||||
let hdr = CMSG_FIRSTHDR((&raw mut msg) as *mut _);
|
||||
(*hdr).cmsg_level = SOL_SOCKET;
|
||||
(*hdr).cmsg_type = SCM_RIGHTS;
|
||||
(*hdr).cmsg_len = CMSG_LEN(SCM_MSG_LEN as _) as _;
|
||||
@ -838,17 +838,17 @@ union Cmsg {
|
||||
|
||||
let mut msg: libc::msghdr = mem::zeroed();
|
||||
|
||||
msg.msg_iov = core::ptr::addr_of_mut!(iov) as *mut _;
|
||||
msg.msg_iov = (&raw mut iov) as *mut _;
|
||||
msg.msg_iovlen = 1;
|
||||
msg.msg_controllen = mem::size_of::<Cmsg>() as _;
|
||||
msg.msg_control = core::ptr::addr_of_mut!(cmsg) as *mut _;
|
||||
msg.msg_control = (&raw mut cmsg) as *mut _;
|
||||
|
||||
match cvt_r(|| libc::recvmsg(sock.as_raw(), &mut msg, libc::MSG_CMSG_CLOEXEC)) {
|
||||
Err(_) => return -1,
|
||||
Ok(_) => {}
|
||||
}
|
||||
|
||||
let hdr = CMSG_FIRSTHDR(core::ptr::addr_of_mut!(msg) as *mut _);
|
||||
let hdr = CMSG_FIRSTHDR((&raw mut msg) as *mut _);
|
||||
if hdr.is_null()
|
||||
|| (*hdr).cmsg_level != SOL_SOCKET
|
||||
|| (*hdr).cmsg_type != SCM_RIGHTS
|
||||
|
@ -426,8 +426,8 @@ unsafe fn install_main_guard_freebsd(page_size: usize) -> Option<Range<usize>> {
|
||||
match sysctlbyname.get() {
|
||||
Some(fcn) if unsafe {
|
||||
fcn(oid.as_ptr(),
|
||||
ptr::addr_of_mut!(guard).cast(),
|
||||
ptr::addr_of_mut!(size),
|
||||
(&raw mut guard).cast(),
|
||||
&raw mut size,
|
||||
ptr::null_mut(),
|
||||
0) == 0
|
||||
} => guard,
|
||||
|
@ -258,7 +258,7 @@ pub fn sleep(dur: Duration) {
|
||||
tv_nsec: nsecs,
|
||||
};
|
||||
secs -= ts.tv_sec as u64;
|
||||
let ts_ptr = core::ptr::addr_of_mut!(ts);
|
||||
let ts_ptr = &raw mut ts;
|
||||
if libc::nanosleep(ts_ptr, ts_ptr) == -1 {
|
||||
assert_eq!(os::errno(), libc::EINTR);
|
||||
secs += ts.tv_sec as u64;
|
||||
@ -447,8 +447,8 @@ pub fn available_parallelism() -> io::Result<NonZero<usize>> {
|
||||
libc::sysctl(
|
||||
mib.as_mut_ptr(),
|
||||
2,
|
||||
core::ptr::addr_of_mut!(cpus) as *mut _,
|
||||
core::ptr::addr_of_mut!(cpus_size) as *mut _,
|
||||
(&raw mut cpus) as *mut _,
|
||||
(&raw mut cpus_size) as *mut _,
|
||||
ptr::null_mut(),
|
||||
0,
|
||||
)
|
||||
|
@ -30,7 +30,6 @@
|
||||
//! should go in sys/pal/windows/mod.rs rather than here. See `IoResult` as an example.
|
||||
|
||||
use core::ffi::c_void;
|
||||
use core::ptr::addr_of;
|
||||
|
||||
use super::c;
|
||||
|
||||
@ -186,7 +185,7 @@ unsafe trait SizedSetFileInformation: Sized {
|
||||
unsafe impl<T: SizedSetFileInformation> SetFileInformation for T {
|
||||
const CLASS: i32 = T::CLASS;
|
||||
fn as_ptr(&self) -> *const c_void {
|
||||
addr_of!(*self).cast::<c_void>()
|
||||
(&raw const *self).cast::<c_void>()
|
||||
}
|
||||
fn size(&self) -> u32 {
|
||||
win32_size_of::<Self>()
|
||||
|
@ -1,5 +1,3 @@
|
||||
use core::ptr::addr_of;
|
||||
|
||||
use super::api::{self, WinError};
|
||||
use super::{IoResult, to_u16s};
|
||||
use crate::borrow::Cow;
|
||||
@ -325,7 +323,7 @@ pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> {
|
||||
let result = c::SetFileInformationByHandle(
|
||||
handle.as_raw_handle(),
|
||||
c::FileEndOfFileInfo,
|
||||
ptr::addr_of!(eof).cast::<c_void>(),
|
||||
(&raw const eof).cast::<c_void>(),
|
||||
mem::size_of::<c::FILE_END_OF_FILE_INFO>() as u32,
|
||||
);
|
||||
if result == 0 {
|
||||
@ -364,7 +362,7 @@ pub fn file_attr(&self) -> io::Result<FileAttr> {
|
||||
cvt(c::GetFileInformationByHandleEx(
|
||||
self.handle.as_raw_handle(),
|
||||
c::FileAttributeTagInfo,
|
||||
ptr::addr_of_mut!(attr_tag).cast(),
|
||||
(&raw mut attr_tag).cast(),
|
||||
mem::size_of::<c::FILE_ATTRIBUTE_TAG_INFO>().try_into().unwrap(),
|
||||
))?;
|
||||
if attr_tag.FileAttributes & c::FILE_ATTRIBUTE_REPARSE_POINT != 0 {
|
||||
@ -396,7 +394,7 @@ pub fn file_attr(&self) -> io::Result<FileAttr> {
|
||||
cvt(c::GetFileInformationByHandleEx(
|
||||
self.handle.as_raw_handle(),
|
||||
c::FileBasicInfo,
|
||||
core::ptr::addr_of_mut!(info) as *mut c_void,
|
||||
(&raw mut info) as *mut c_void,
|
||||
size as u32,
|
||||
))?;
|
||||
let mut attr = FileAttr {
|
||||
@ -428,7 +426,7 @@ pub fn file_attr(&self) -> io::Result<FileAttr> {
|
||||
cvt(c::GetFileInformationByHandleEx(
|
||||
self.handle.as_raw_handle(),
|
||||
c::FileStandardInfo,
|
||||
core::ptr::addr_of_mut!(info) as *mut c_void,
|
||||
(&raw mut info) as *mut c_void,
|
||||
size as u32,
|
||||
))?;
|
||||
attr.file_size = info.AllocationSize as u64;
|
||||
@ -438,7 +436,7 @@ pub fn file_attr(&self) -> io::Result<FileAttr> {
|
||||
cvt(c::GetFileInformationByHandleEx(
|
||||
self.handle.as_raw_handle(),
|
||||
c::FileAttributeTagInfo,
|
||||
ptr::addr_of_mut!(attr_tag).cast(),
|
||||
(&raw mut attr_tag).cast(),
|
||||
mem::size_of::<c::FILE_ATTRIBUTE_TAG_INFO>().try_into().unwrap(),
|
||||
))?;
|
||||
if attr_tag.FileAttributes & c::FILE_ATTRIBUTE_REPARSE_POINT != 0 {
|
||||
@ -545,22 +543,20 @@ fn readlink(&self) -> io::Result<PathBuf> {
|
||||
unsafe {
|
||||
let (path_buffer, subst_off, subst_len, relative) = match (*buf).ReparseTag {
|
||||
c::IO_REPARSE_TAG_SYMLINK => {
|
||||
let info: *mut c::SYMBOLIC_LINK_REPARSE_BUFFER =
|
||||
ptr::addr_of_mut!((*buf).rest).cast();
|
||||
let info: *mut c::SYMBOLIC_LINK_REPARSE_BUFFER = (&raw mut (*buf).rest).cast();
|
||||
assert!(info.is_aligned());
|
||||
(
|
||||
ptr::addr_of_mut!((*info).PathBuffer).cast::<u16>(),
|
||||
(&raw mut (*info).PathBuffer).cast::<u16>(),
|
||||
(*info).SubstituteNameOffset / 2,
|
||||
(*info).SubstituteNameLength / 2,
|
||||
(*info).Flags & c::SYMLINK_FLAG_RELATIVE != 0,
|
||||
)
|
||||
}
|
||||
c::IO_REPARSE_TAG_MOUNT_POINT => {
|
||||
let info: *mut c::MOUNT_POINT_REPARSE_BUFFER =
|
||||
ptr::addr_of_mut!((*buf).rest).cast();
|
||||
let info: *mut c::MOUNT_POINT_REPARSE_BUFFER = (&raw mut (*buf).rest).cast();
|
||||
assert!(info.is_aligned());
|
||||
(
|
||||
ptr::addr_of_mut!((*info).PathBuffer).cast::<u16>(),
|
||||
(&raw mut (*info).PathBuffer).cast::<u16>(),
|
||||
(*info).SubstituteNameOffset / 2,
|
||||
(*info).SubstituteNameLength / 2,
|
||||
false,
|
||||
@ -643,7 +639,7 @@ fn basic_info(&self) -> io::Result<c::FILE_BASIC_INFO> {
|
||||
cvt(c::GetFileInformationByHandleEx(
|
||||
self.handle.as_raw_handle(),
|
||||
c::FileBasicInfo,
|
||||
core::ptr::addr_of_mut!(info) as *mut c_void,
|
||||
(&raw mut info) as *mut c_void,
|
||||
size as u32,
|
||||
))?;
|
||||
Ok(info)
|
||||
@ -790,11 +786,11 @@ fn next(&mut self) -> Option<Self::Item> {
|
||||
// it does not seem that reality is so kind, and assuming this
|
||||
// caused crashes in some cases (https://github.com/rust-lang/rust/issues/104530)
|
||||
// presumably, this can be blamed on buggy filesystem drivers, but who knows.
|
||||
let next_entry = ptr::addr_of!((*info).NextEntryOffset).read_unaligned() as usize;
|
||||
let length = ptr::addr_of!((*info).FileNameLength).read_unaligned() as usize;
|
||||
let attrs = ptr::addr_of!((*info).FileAttributes).read_unaligned();
|
||||
let next_entry = (&raw const (*info).NextEntryOffset).read_unaligned() as usize;
|
||||
let length = (&raw const (*info).FileNameLength).read_unaligned() as usize;
|
||||
let attrs = (&raw const (*info).FileAttributes).read_unaligned();
|
||||
let name = from_maybe_unaligned(
|
||||
ptr::addr_of!((*info).FileName).cast::<u16>(),
|
||||
(&raw const (*info).FileName).cast::<u16>(),
|
||||
length / size_of::<u16>(),
|
||||
);
|
||||
let is_directory = (attrs & c::FILE_ATTRIBUTE_DIRECTORY) != 0;
|
||||
@ -1326,7 +1322,7 @@ pub fn copy(from: &Path, to: &Path) -> io::Result<u64> {
|
||||
pfrom.as_ptr(),
|
||||
pto.as_ptr(),
|
||||
Some(callback),
|
||||
core::ptr::addr_of_mut!(size) as *mut _,
|
||||
(&raw mut size) as *mut _,
|
||||
ptr::null_mut(),
|
||||
0,
|
||||
)
|
||||
@ -1405,7 +1401,7 @@ pub struct MountPointBuffer {
|
||||
cvt(c::DeviceIoControl(
|
||||
d.as_raw_handle(),
|
||||
c::FSCTL_SET_REPARSE_POINT,
|
||||
addr_of!(header).cast::<c_void>(),
|
||||
(&raw const header).cast::<c_void>(),
|
||||
data_len as u32 + 8,
|
||||
ptr::null_mut(),
|
||||
0,
|
||||
|
@ -57,7 +57,7 @@ pub fn wait_on_address<W: Waitable>(
|
||||
unsafe {
|
||||
let addr = ptr::from_ref(address).cast::<c_void>();
|
||||
let size = mem::size_of::<W>();
|
||||
let compare_addr = ptr::addr_of!(compare).cast::<c_void>();
|
||||
let compare_addr = (&raw const compare).cast::<c_void>();
|
||||
let timeout = timeout.map(dur2timeout).unwrap_or(c::INFINITE);
|
||||
c::WaitOnAddress(addr, compare_addr, size, timeout) == c::TRUE
|
||||
}
|
||||
|
@ -122,7 +122,7 @@ struct FILE_NAME_INFO {
|
||||
c::GetFileInformationByHandleEx(
|
||||
handle.as_raw_handle(),
|
||||
c::FileNameInfo,
|
||||
core::ptr::addr_of_mut!(name_info) as *mut c_void,
|
||||
(&raw mut name_info) as *mut c_void,
|
||||
size_of::<FILE_NAME_INFO>() as u32,
|
||||
)
|
||||
};
|
||||
|
@ -390,7 +390,7 @@ fn recv_from_with_flags(
|
||||
buf.as_mut_ptr() as *mut _,
|
||||
length,
|
||||
flags,
|
||||
core::ptr::addr_of_mut!(storage) as *mut _,
|
||||
(&raw mut storage) as *mut _,
|
||||
&mut addrlen,
|
||||
)
|
||||
};
|
||||
|
@ -375,7 +375,7 @@ struct AsyncResult {
|
||||
let mut overlapped: c::OVERLAPPED = unsafe { crate::mem::zeroed() };
|
||||
// `hEvent` is unused by `ReadFileEx` and `WriteFileEx`.
|
||||
// Therefore the documentation suggests using it to smuggle a pointer to the callback.
|
||||
overlapped.hEvent = core::ptr::addr_of_mut!(async_result) as *mut _;
|
||||
overlapped.hEvent = (&raw mut async_result) as *mut _;
|
||||
|
||||
// Asynchronous read of the pipe.
|
||||
// If successful, `callback` will be called once it completes.
|
||||
|
@ -368,10 +368,10 @@ pub fn spawn(
|
||||
StartupInfo: si,
|
||||
lpAttributeList: proc_thread_attribute_list.0.as_mut_ptr() as _,
|
||||
};
|
||||
si_ptr = core::ptr::addr_of_mut!(si_ex) as _;
|
||||
si_ptr = (&raw mut si_ex) as _;
|
||||
} else {
|
||||
si.cb = mem::size_of::<c::STARTUPINFOW>() as u32;
|
||||
si_ptr = core::ptr::addr_of_mut!(si) as _;
|
||||
si_ptr = (&raw mut si) as _;
|
||||
}
|
||||
|
||||
unsafe {
|
||||
@ -953,7 +953,7 @@ fn make_proc_thread_attribute_list(
|
||||
// It's theoretically possible for the attribute count to exceed a u32 value.
|
||||
// Therefore, we ensure that we don't add more attributes than the buffer was initialized for.
|
||||
for (&attribute, value) in attributes.iter().take(attribute_count as usize) {
|
||||
let value_ptr = core::ptr::addr_of!(*value.data) as _;
|
||||
let value_ptr = (&raw const *value.data) as _;
|
||||
cvt(unsafe {
|
||||
c::UpdateProcThreadAttribute(
|
||||
proc_thread_attribute_list.0.as_mut_ptr() as _,
|
||||
|
@ -3,7 +3,6 @@
|
||||
use crate::cell::UnsafeCell;
|
||||
use crate::marker::PhantomPinned;
|
||||
use crate::pin::Pin;
|
||||
use crate::ptr::addr_of_mut;
|
||||
use crate::sync::atomic::AtomicUsize;
|
||||
use crate::sync::atomic::Ordering::{Acquire, Relaxed, Release};
|
||||
#[cfg(not(target_os = "nto"))]
|
||||
@ -101,8 +100,8 @@ pub unsafe fn new_in_place(parker: *mut Parker) {
|
||||
// This could lead to undefined behaviour when deadlocking. This is avoided
|
||||
// by not deadlocking. Note in particular the unlocking operation before any
|
||||
// panic, as code after the panic could try to park again.
|
||||
addr_of_mut!((*parker).state).write(AtomicUsize::new(EMPTY));
|
||||
addr_of_mut!((*parker).lock).write(UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER));
|
||||
(&raw mut (*parker).state).write(AtomicUsize::new(EMPTY));
|
||||
(&raw mut (*parker).lock).write(UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER));
|
||||
|
||||
cfg_if::cfg_if! {
|
||||
if #[cfg(any(
|
||||
@ -112,9 +111,9 @@ pub unsafe fn new_in_place(parker: *mut Parker) {
|
||||
target_os = "vita",
|
||||
target_vendor = "apple",
|
||||
))] {
|
||||
addr_of_mut!((*parker).cvar).write(UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER));
|
||||
(&raw mut (*parker).cvar).write(UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER));
|
||||
} else if #[cfg(any(target_os = "espidf", target_os = "horizon"))] {
|
||||
let r = libc::pthread_cond_init(addr_of_mut!((*parker).cvar).cast(), crate::ptr::null());
|
||||
let r = libc::pthread_cond_init((&raw mut (*parker).cvar).cast(), crate::ptr::null());
|
||||
assert_eq!(r, 0);
|
||||
} else {
|
||||
use crate::mem::MaybeUninit;
|
||||
@ -123,7 +122,7 @@ pub unsafe fn new_in_place(parker: *mut Parker) {
|
||||
assert_eq!(r, 0);
|
||||
let r = libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC);
|
||||
assert_eq!(r, 0);
|
||||
let r = libc::pthread_cond_init(addr_of_mut!((*parker).cvar).cast(), attr.as_ptr());
|
||||
let r = libc::pthread_cond_init((&raw mut (*parker).cvar).cast(), attr.as_ptr());
|
||||
assert_eq!(r, 0);
|
||||
let r = libc::pthread_condattr_destroy(attr.as_mut_ptr());
|
||||
assert_eq!(r, 0);
|
||||
|
@ -178,7 +178,7 @@ pub fn unpark(self: Pin<&Self>) {
|
||||
}
|
||||
|
||||
fn ptr(&self) -> *const c_void {
|
||||
core::ptr::addr_of!(self.state).cast::<c_void>()
|
||||
(&raw const self.state).cast::<c_void>()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -47,7 +47,7 @@ pub unsafe fn register(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
|
||||
dtor,
|
||||
),
|
||||
t.cast(),
|
||||
core::ptr::addr_of!(__dso_handle) as *mut _,
|
||||
(&raw const __dso_handle) as *mut _,
|
||||
);
|
||||
}
|
||||
} else {
|
||||
|
@ -74,7 +74,7 @@ pub fn setsockopt<T>(
|
||||
sock.as_raw(),
|
||||
level,
|
||||
option_name,
|
||||
core::ptr::addr_of!(option_value) as *const _,
|
||||
(&raw const option_value) as *const _,
|
||||
mem::size_of::<T>() as c::socklen_t,
|
||||
))?;
|
||||
Ok(())
|
||||
@ -89,7 +89,7 @@ pub fn getsockopt<T: Copy>(sock: &Socket, level: c_int, option_name: c_int) -> i
|
||||
sock.as_raw(),
|
||||
level,
|
||||
option_name,
|
||||
core::ptr::addr_of_mut!(option_value) as *mut _,
|
||||
(&raw mut option_value) as *mut _,
|
||||
&mut option_len,
|
||||
))?;
|
||||
Ok(option_value)
|
||||
@ -103,7 +103,7 @@ fn sockname<F>(f: F) -> io::Result<SocketAddr>
|
||||
unsafe {
|
||||
let mut storage: c::sockaddr_storage = mem::zeroed();
|
||||
let mut len = mem::size_of_val(&storage) as c::socklen_t;
|
||||
cvt(f(core::ptr::addr_of_mut!(storage) as *mut _, &mut len))?;
|
||||
cvt(f((&raw mut storage) as *mut _, &mut len))?;
|
||||
sockaddr_to_addr(&storage, len as usize)
|
||||
}
|
||||
}
|
||||
@ -452,7 +452,7 @@ pub fn socket_addr(&self) -> io::Result<SocketAddr> {
|
||||
pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> {
|
||||
let mut storage: c::sockaddr_storage = unsafe { mem::zeroed() };
|
||||
let mut len = mem::size_of_val(&storage) as c::socklen_t;
|
||||
let sock = self.inner.accept(core::ptr::addr_of_mut!(storage) as *mut _, &mut len)?;
|
||||
let sock = self.inner.accept((&raw mut storage) as *mut _, &mut len)?;
|
||||
let addr = sockaddr_to_addr(&storage, len as usize)?;
|
||||
Ok((TcpStream { inner: sock }, addr))
|
||||
}
|
||||
|
@ -26,7 +26,6 @@
|
||||
use crate::collections::TryReserveError;
|
||||
use crate::hash::{Hash, Hasher};
|
||||
use crate::iter::FusedIterator;
|
||||
use crate::ptr::addr_of_mut;
|
||||
use crate::rc::Rc;
|
||||
use crate::sync::Arc;
|
||||
use crate::sys_common::AsInner;
|
||||
@ -1055,6 +1054,6 @@ unsafe impl CloneToUninit for Wtf8 {
|
||||
#[cfg_attr(debug_assertions, track_caller)]
|
||||
unsafe fn clone_to_uninit(&self, dst: *mut Self) {
|
||||
// SAFETY: we're just a wrapper around [u8]
|
||||
unsafe { self.bytes.clone_to_uninit(addr_of_mut!((*dst).bytes)) }
|
||||
unsafe { self.bytes.clone_to_uninit(&raw mut (*dst).bytes) }
|
||||
}
|
||||
}
|
||||
|
@ -165,7 +165,6 @@
|
||||
use crate::mem::{self, ManuallyDrop, forget};
|
||||
use crate::num::NonZero;
|
||||
use crate::pin::Pin;
|
||||
use crate::ptr::addr_of_mut;
|
||||
use crate::sync::Arc;
|
||||
use crate::sync::atomic::{AtomicUsize, Ordering};
|
||||
use crate::sys::sync::Parker;
|
||||
@ -1386,9 +1385,9 @@ fn new_inner(name: ThreadName) -> Thread {
|
||||
let inner = unsafe {
|
||||
let mut arc = Arc::<Inner>::new_uninit();
|
||||
let ptr = Arc::get_mut_unchecked(&mut arc).as_mut_ptr();
|
||||
addr_of_mut!((*ptr).name).write(name);
|
||||
addr_of_mut!((*ptr).id).write(ThreadId::new());
|
||||
Parker::new_in_place(addr_of_mut!((*ptr).parker));
|
||||
(&raw mut (*ptr).name).write(name);
|
||||
(&raw mut (*ptr).id).write(ThreadId::new());
|
||||
Parker::new_in_place(&raw mut (*ptr).parker);
|
||||
Pin::new_unchecked(arc.assume_init())
|
||||
};
|
||||
|
||||
|
@ -222,14 +222,14 @@ fn _Unwind_VRS_Set(ctx: *mut _Unwind_Context,
|
||||
pub unsafe fn _Unwind_GetGR(ctx: *mut _Unwind_Context, reg_index: c_int) -> _Unwind_Word {
|
||||
let mut val: _Unwind_Word = core::ptr::null();
|
||||
_Unwind_VRS_Get(ctx, _UVRSC_CORE, reg_index as _Unwind_Word, _UVRSD_UINT32,
|
||||
core::ptr::addr_of_mut!(val) as *mut c_void);
|
||||
(&raw mut val) as *mut c_void);
|
||||
val
|
||||
}
|
||||
|
||||
pub unsafe fn _Unwind_SetGR(ctx: *mut _Unwind_Context, reg_index: c_int, value: _Unwind_Word) {
|
||||
let mut value = value;
|
||||
_Unwind_VRS_Set(ctx, _UVRSC_CORE, reg_index as _Unwind_Word, _UVRSD_UINT32,
|
||||
core::ptr::addr_of_mut!(value) as *mut c_void);
|
||||
(&raw mut value) as *mut c_void);
|
||||
}
|
||||
|
||||
pub unsafe fn _Unwind_GetIP(ctx: *mut _Unwind_Context)
|
||||
|
Loading…
Reference in New Issue
Block a user