diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs index ae730be0d25..8853577371a 100644 --- a/library/alloc/src/slice.rs +++ b/library/alloc/src/slice.rs @@ -892,7 +892,7 @@ fn insert_head(v: &mut [T], is_less: &mut F) // performance than with the 2nd method. // // All methods were benchmarked, and the 3rd showed best results. So we chose that one. - let mut tmp = mem::ManuallyDrop::new(ptr::read(&v[0])); + let tmp = mem::ManuallyDrop::new(ptr::read(&v[0])); // Intermediate state of the insertion process is always tracked by `hole`, which // serves two purposes: @@ -904,7 +904,7 @@ fn insert_head(v: &mut [T], is_less: &mut F) // If `is_less` panics at any point during the process, `hole` will get dropped and // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it // initially held exactly once. - let mut hole = InsertionHole { src: &mut *tmp, dest: &mut v[1] }; + let mut hole = InsertionHole { src: &*tmp, dest: &mut v[1] }; ptr::copy_nonoverlapping(&v[1], &mut v[0], 1); for i in 2..v.len() { @@ -920,7 +920,7 @@ fn insert_head(v: &mut [T], is_less: &mut F) // When dropped, copies from `src` into `dest`. struct InsertionHole { - src: *mut T, + src: *const T, dest: *mut T, } diff --git a/library/core/src/slice/sort.rs b/library/core/src/slice/sort.rs index b5e6083c663..8f58e8897b3 100644 --- a/library/core/src/slice/sort.rs +++ b/library/core/src/slice/sort.rs @@ -12,7 +12,7 @@ /// When dropped, copies from `src` into `dest`. struct CopyOnDrop { - src: *mut T, + src: *const T, dest: *mut T, } @@ -54,9 +54,9 @@ fn shift_head(v: &mut [T], is_less: &mut F) // Read the first element into a stack-allocated variable. If a following comparison // operation panics, `hole` will get dropped and automatically write the element back // into the slice. - let mut tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(0))); + let tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(0))); let v = v.as_mut_ptr(); - let mut hole = CopyOnDrop { src: &mut *tmp, dest: v.add(1) }; + let mut hole = CopyOnDrop { src: &*tmp, dest: v.add(1) }; ptr::copy_nonoverlapping(v.add(1), v.add(0), 1); for i in 2..len { @@ -100,9 +100,9 @@ fn shift_tail(v: &mut [T], is_less: &mut F) // Read the last element into a stack-allocated variable. If a following comparison // operation panics, `hole` will get dropped and automatically write the element back // into the slice. - let mut tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(len - 1))); + let tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(len - 1))); let v = v.as_mut_ptr(); - let mut hole = CopyOnDrop { src: &mut *tmp, dest: v.add(len - 2) }; + let mut hole = CopyOnDrop { src: &*tmp, dest: v.add(len - 2) }; ptr::copy_nonoverlapping(v.add(len - 2), v.add(len - 1), 1); for i in (0..len - 2).rev() { @@ -498,8 +498,8 @@ fn partition(v: &mut [T], pivot: usize, is_less: &mut F) -> (usize, bool) // operation panics, the pivot will be automatically written back into the slice. // SAFETY: `pivot` is a reference to the first element of `v`, so `ptr::read` is safe. - let mut tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) }); - let _pivot_guard = CopyOnDrop { src: &mut *tmp, dest: pivot }; + let tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) }); + let _pivot_guard = CopyOnDrop { src: &*tmp, dest: pivot }; let pivot = &*tmp; // Find the first pair of out-of-order elements. @@ -551,8 +551,8 @@ fn partition_equal(v: &mut [T], pivot: usize, is_less: &mut F) -> usize // Read the pivot into a stack-allocated variable for efficiency. If a following comparison // operation panics, the pivot will be automatically written back into the slice. // SAFETY: The pointer here is valid because it is obtained from a reference to a slice. - let mut tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) }); - let _pivot_guard = CopyOnDrop { src: &mut *tmp, dest: pivot }; + let tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) }); + let _pivot_guard = CopyOnDrop { src: &*tmp, dest: pivot }; let pivot = &*tmp; // Now partition the slice.