diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs index 7c892f03bfb..31365562ddb 100644 --- a/library/alloc/src/slice.rs +++ b/library/alloc/src/slice.rs @@ -1043,9 +1043,9 @@ struct MergeHole { impl Drop for MergeHole { fn drop(&mut self) { - // `T` is not a zero-sized type, so it's okay to divide by its size. - let len = (self.end.addr() - self.start.addr()) / mem::size_of::(); + // `T` is not a zero-sized type, and these are pointers into a slice's elements. unsafe { + let len = self.end.offset_from(self.start) as usize; ptr::copy_nonoverlapping(self.start, self.dest, len); } } diff --git a/library/core/src/slice/ascii.rs b/library/core/src/slice/ascii.rs index 63d761d3c02..6c9107401fd 100644 --- a/library/core/src/slice/ascii.rs +++ b/library/core/src/slice/ascii.rs @@ -294,7 +294,7 @@ fn is_ascii(s: &[u8]) -> bool { // Paranoia check about alignment, since we're about to do a bunch of // unaligned loads. In practice this should be impossible barring a bug in // `align_offset` though. - debug_assert_eq!((word_ptr.addr()) % mem::align_of::(), 0); + debug_assert_eq!(word_ptr.addr() % mem::align_of::(), 0); // Read subsequent words until the last aligned word, excluding the last // aligned word by itself to be done in tail check later, to ensure that @@ -302,9 +302,9 @@ fn is_ascii(s: &[u8]) -> bool { while byte_pos < len - USIZE_SIZE { debug_assert!( // Sanity check that the read is in bounds - (word_ptr.addr() + USIZE_SIZE) <= (start.wrapping_add(len).addr()) && + (word_ptr.addr() + USIZE_SIZE) <= start.addr().wrapping_add(len) && // And that our assumptions about `byte_pos` hold. - (word_ptr.addr()) - (start.addr()) == byte_pos + (word_ptr.addr() - start.addr()) == byte_pos ); // SAFETY: We know `word_ptr` is properly aligned (because of diff --git a/library/core/src/slice/iter/macros.rs b/library/core/src/slice/iter/macros.rs index 96ead49dd6a..b74ab28fc09 100644 --- a/library/core/src/slice/iter/macros.rs +++ b/library/core/src/slice/iter/macros.rs @@ -20,7 +20,7 @@ macro_rules! len { if size == 0 { // This _cannot_ use `unchecked_sub` because we depend on wrapping // to represent the length of long ZST slice iterators. - ($self.end.addr()).wrapping_sub(start.as_ptr().addr()) + $self.end.addr().wrapping_sub(start.as_ptr().addr()) } else { // We know that `start <= end`, so can do better than `offset_from`, // which needs to deal in signed. By setting appropriate flags here diff --git a/library/std/src/io/error/repr_bitpacked.rs b/library/std/src/io/error/repr_bitpacked.rs index 7cc1c701064..e80068b46ab 100644 --- a/library/std/src/io/error/repr_bitpacked.rs +++ b/library/std/src/io/error/repr_bitpacked.rs @@ -136,7 +136,7 @@ pub(super) fn new_custom(b: Box) -> Self { let p = Box::into_raw(b).cast::(); // Should only be possible if an allocator handed out a pointer with // wrong alignment. - debug_assert_eq!((p.addr() & TAG_MASK), 0); + debug_assert_eq!(p.addr() & TAG_MASK, 0); // Note: We know `TAG_CUSTOM <= size_of::()` (static_assert at // end of file), and both the start and end of the expression must be // valid without address space wraparound due to `Box`'s semantics.