diff --git a/src/liballoc/raw_vec.rs b/src/liballoc/raw_vec.rs index 12e32fd9d35..7ac67870eb7 100644 --- a/src/liballoc/raw_vec.rs +++ b/src/liballoc/raw_vec.rs @@ -570,16 +570,19 @@ impl RawVec { /// /// # Safety /// - /// `shrink_to_fit(len)` must be called immediately prior to calling this function. This - /// implies, that `len` must be smaller than or equal to `self.capacity()`. + /// * `len` must be greater than or equal to the most recently requested capacity, and + /// * `len` must be less than or equal to `self.capacity()`. + /// + /// Note, that the requested capacity and `self.capacity()` could differ, as + /// an allocator could overallocate and return a greater memory block than requested. pub unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit]> { + // Sanity-check one half of the safety requirement (we cannot check the other half). debug_assert!( len <= self.capacity(), "`len` must be smaller than or equal to `self.capacity()`" ); let me = ManuallyDrop::new(self); - // NOTE: not calling `capacity()` here; actually using the real `cap` field! let slice = slice::from_raw_parts_mut(me.ptr() as *mut MaybeUninit, len); Box::from_raw(slice) } diff --git a/src/liballoc/tests/vec.rs b/src/liballoc/tests/vec.rs index 9c4ac52acac..6321e7154e7 100644 --- a/src/liballoc/tests/vec.rs +++ b/src/liballoc/tests/vec.rs @@ -1351,17 +1351,26 @@ fn test_try_reserve_exact() { } #[test] -fn test_stable_push_pop() { +fn test_stable_pointers() { + /// Pull an element from the iterator, then drop it. + /// Useful to cover both the `next` and `drop` paths of an iterator. + fn next_then_drop(mut i: I) { + i.next().unwrap(); + drop(i); + } + // Test that, if we reserved enough space, adding and removing elements does not // invalidate references into the vector (such as `v0`). This test also // runs in Miri, which would detect such problems. - let mut v = Vec::with_capacity(10); + let mut v = Vec::with_capacity(128); v.push(13); - // laundering the lifetime -- we take care that `v` does not reallocate, so that's okay. - let v0 = unsafe { &*(&v[0] as *const _) }; - + // Laundering the lifetime -- we take care that `v` does not reallocate, so that's okay. + let v0 = &mut v[0]; + let v0 = unsafe { &mut *(v0 as *mut _) }; // Now do a bunch of things and occasionally use `v0` again to assert it is still valid. + + // Pushing/inserting and popping/removing v.push(1); v.push(2); v.insert(1, 1); @@ -1369,6 +1378,58 @@ fn test_stable_push_pop() { v.remove(1); v.pop().unwrap(); assert_eq!(*v0, 13); + v.push(1); + v.swap_remove(1); + assert_eq!(v.len(), 2); + v.swap_remove(1); // swap_remove the last element + assert_eq!(*v0, 13); + + // Appending + v.append(&mut vec![27, 19]); + assert_eq!(*v0, 13); + + // Extending + v.extend_from_slice(&[1, 2]); + v.extend(&[1, 2]); // `slice::Iter` (with `T: Copy`) specialization + v.extend(vec![2, 3]); // `vec::IntoIter` specialization + v.extend(std::iter::once(3)); // `TrustedLen` specialization + v.extend(std::iter::empty::()); // `TrustedLen` specialization with empty iterator + v.extend(std::iter::once(3).filter(|_| true)); // base case + v.extend(std::iter::once(&3)); // `cloned` specialization + assert_eq!(*v0, 13); + + // Truncation + v.truncate(2); + assert_eq!(*v0, 13); + + // Resizing + v.resize_with(v.len() + 10, || 42); + assert_eq!(*v0, 13); + v.resize_with(2, || panic!()); + assert_eq!(*v0, 13); + + // No-op reservation + v.reserve(32); + v.reserve_exact(32); + assert_eq!(*v0, 13); + + // Partial draining + v.resize_with(10, || 42); + next_then_drop(v.drain(5..)); + assert_eq!(*v0, 13); + + // Splicing + v.resize_with(10, || 42); + next_then_drop(v.splice(5.., vec![1, 2, 3, 4, 5])); // empty tail after range + assert_eq!(*v0, 13); + next_then_drop(v.splice(5..8, vec![1])); // replacement is smaller than original range + assert_eq!(*v0, 13); + next_then_drop(v.splice(5..6, vec![1; 10].into_iter().filter(|_| true))); // lower bound not exact + assert_eq!(*v0, 13); + + // Smoke test that would fire even outside Miri if an actual relocation happened. + *v0 -= 13; + assert_eq!(v[0], 0); } // https://github.com/rust-lang/rust/pull/49496 introduced specialization based on: diff --git a/src/liballoc/vec.rs b/src/liballoc/vec.rs index 80574efe062..a48e48d7da3 100644 --- a/src/liballoc/vec.rs +++ b/src/liballoc/vec.rs @@ -740,7 +740,8 @@ pub fn truncate(&mut self, len: usize) { if len > self.len { return; } - let s = self.get_unchecked_mut(len..) as *mut _; + let remaining_len = self.len - len; + let s = slice::from_raw_parts_mut(self.as_mut_ptr().add(len), remaining_len); self.len = len; ptr::drop_in_place(s); } @@ -963,13 +964,15 @@ pub unsafe fn set_len(&mut self, new_len: usize) { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn swap_remove(&mut self, index: usize) -> T { + let len = self.len(); + assert!(index < len); unsafe { // We replace self[index] with the last element. Note that if the - // bounds check on hole succeeds there must be a last element (which + // bounds check above succeeds there must be a last element (which // can be self[index] itself). - let hole: *mut T = &mut self[index]; - let last = ptr::read(self.get_unchecked(self.len - 1)); - self.len -= 1; + let last = ptr::read(self.as_ptr().add(len - 1)); + let hole: *mut T = self.as_mut_ptr().add(index); + self.set_len(len - 1); ptr::replace(hole, last) } } @@ -1200,7 +1203,7 @@ pub fn pop(&mut self) -> Option { } else { unsafe { self.len -= 1; - Some(ptr::read(self.get_unchecked(self.len()))) + Some(ptr::read(self.as_ptr().add(self.len()))) } } } @@ -2020,7 +2023,7 @@ impl SpecExtend for Vec let (lower, _) = iterator.size_hint(); let mut vector = Vec::with_capacity(lower.saturating_add(1)); unsafe { - ptr::write(vector.get_unchecked_mut(0), element); + ptr::write(vector.as_mut_ptr(), element); vector.set_len(1); } vector @@ -2122,8 +2125,9 @@ fn spec_extend(&mut self, iterator: slice::Iter<'a, T>) { self.reserve(slice.len()); unsafe { let len = self.len(); + let dst_slice = slice::from_raw_parts_mut(self.as_mut_ptr().add(len), slice.len()); + dst_slice.copy_from_slice(slice); self.set_len(len + slice.len()); - self.get_unchecked_mut(len..).copy_from_slice(slice); } } } @@ -2144,7 +2148,7 @@ fn extend_desugared>(&mut self, mut iterator: I) { self.reserve(lower.saturating_add(1)); } unsafe { - ptr::write(self.get_unchecked_mut(len), element); + ptr::write(self.as_mut_ptr().add(len), element); // NB can't overflow since we would have had to alloc the address space self.set_len(len + 1); } diff --git a/src/libcore/result.rs b/src/libcore/result.rs index 0087b92f1f2..c7b5777a16e 100644 --- a/src/libcore/result.rs +++ b/src/libcore/result.rs @@ -521,14 +521,16 @@ pub fn map U>(self, op: F) -> Result { } } - /// Applies a function to the contained value (if any), - /// or returns the provided default (if not). + /// Applies a function to the contained value (if [`Ok`]), + /// or returns the provided default (if [`Err`]). /// /// Arguments passed to `map_or` are eagerly evaluated; if you are passing /// the result of a function call, it is recommended to use [`map_or_else`], /// which is lazily evaluated. /// /// [`map_or_else`]: #method.map_or_else + /// [`Ok`]: enum.Result.html#variant.Ok + /// [`Err`]: enum.Result.html#variant.Err /// /// # Examples /// diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index 10d3101ebb8..0b182d42287 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -14,11 +14,11 @@ sign_extend, truncate, AllocId, FrameInfo, GlobalId, InterpResult, Pointer, Scalar, }; use rustc_middle::ty::layout::{self, TyAndLayout}; -use rustc_middle::ty::query::TyCtxtAt; -use rustc_middle::ty::subst::SubstsRef; -use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable}; +use rustc_middle::ty::{ + self, fold::BottomUpFolder, query::TyCtxtAt, subst::SubstsRef, Ty, TyCtxt, TypeFoldable, +}; use rustc_span::source_map::DUMMY_SP; -use rustc_target::abi::{Abi, Align, HasDataLayout, LayoutOf, Size, TargetDataLayout}; +use rustc_target::abi::{Align, HasDataLayout, LayoutOf, Size, TargetDataLayout}; use super::{ Immediate, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Memory, OpTy, Operand, Place, PlaceTy, @@ -213,6 +213,7 @@ fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout { /// Test if it is valid for a MIR assignment to assign `src`-typed place to `dest`-typed value. /// This test should be symmetric, as it is primarily about layout compatibility. pub(super) fn mir_assign_valid_types<'tcx>( + tcx: TyCtxt<'tcx>, src: TyAndLayout<'tcx>, dest: TyAndLayout<'tcx>, ) -> bool { @@ -220,23 +221,42 @@ pub(super) fn mir_assign_valid_types<'tcx>( // Equal types, all is good. return true; } - // Type-changing assignments can happen for (at least) two reasons: - // - `&mut T` -> `&T` gets optimized from a reborrow to a mere assignment. - // - Subtyping is used. While all normal lifetimes are erased, higher-ranked lifetime - // bounds are still around and can lead to type differences. - // There is no good way to check the latter, so we compare layouts instead -- but only - // for values with `Scalar`/`ScalarPair` abi. - // FIXME: Do something more accurate, type-based. - match &src.abi { - Abi::Scalar(..) | Abi::ScalarPair(..) => src.layout == dest.layout, - _ => false, + if src.layout != dest.layout { + // Layout differs, definitely not equal. + // We do this here because Miri would *do the wrong thing* if we allowed layout-changing + // assignments. + return false; } + + // Type-changing assignments can happen for (at least) two reasons: + // 1. `&mut T` -> `&T` gets optimized from a reborrow to a mere assignment. + // 2. Subtyping is used. While all normal lifetimes are erased, higher-ranked types + // with their late-bound lifetimes are still around and can lead to type differences. + // Normalize both of them away. + let normalize = |ty: Ty<'tcx>| { + ty.fold_with(&mut BottomUpFolder { + tcx, + // Normalize all references to immutable. + ty_op: |ty| match ty.kind { + ty::Ref(_, pointee, _) => tcx.mk_imm_ref(tcx.lifetimes.re_erased, pointee), + _ => ty, + }, + // We just erase all late-bound lifetimes, but this is not fully correct (FIXME): + // lifetimes in invariant positions could matter (e.g. through associated types). + // We rely on the fact that layout was confirmed to be equal above. + lt_op: |_| tcx.lifetimes.re_erased, + // Leave consts unchanged. + ct_op: |ct| ct, + }) + }; + normalize(src.ty) == normalize(dest.ty) } /// Use the already known layout if given (but sanity check in debug mode), /// or compute the layout. #[cfg_attr(not(debug_assertions), inline(always))] pub(super) fn from_known_layout<'tcx>( + tcx: TyCtxt<'tcx>, known_layout: Option>, compute: impl FnOnce() -> InterpResult<'tcx, TyAndLayout<'tcx>>, ) -> InterpResult<'tcx, TyAndLayout<'tcx>> { @@ -246,7 +266,7 @@ pub(super) fn from_known_layout<'tcx>( if cfg!(debug_assertions) { let check_layout = compute()?; assert!( - mir_assign_valid_types(check_layout, known_layout), + mir_assign_valid_types(tcx, check_layout, known_layout), "expected type differs from actual type.\nexpected: {:?}\nactual: {:?}", known_layout.ty, check_layout.ty, @@ -424,7 +444,7 @@ pub fn layout_of_local( // have to support that case (mostly by skipping all caching). match frame.locals.get(local).and_then(|state| state.layout.get()) { None => { - let layout = from_known_layout(layout, || { + let layout = from_known_layout(self.tcx.tcx, layout, || { let local_ty = frame.body.local_decls[local].ty; let local_ty = self.subst_from_frame_and_normalize_erasing_regions(frame, local_ty); diff --git a/src/librustc_mir/interpret/operand.rs b/src/librustc_mir/interpret/operand.rs index 12595e4e4d9..03614b2803f 100644 --- a/src/librustc_mir/interpret/operand.rs +++ b/src/librustc_mir/interpret/operand.rs @@ -529,7 +529,7 @@ pub(super) fn eval_operands( ty::ConstKind::Value(val_val) => val_val, }; // Other cases need layout. - let layout = from_known_layout(layout, || self.layout_of(val.ty))?; + let layout = from_known_layout(self.tcx.tcx, layout, || self.layout_of(val.ty))?; let op = match val_val { ConstValue::ByRef { alloc, offset } => { let id = self.tcx.alloc_map.lock().create_memory_alloc(alloc); diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs index ec299cdd213..716c7c7d933 100644 --- a/src/librustc_mir/interpret/place.rs +++ b/src/librustc_mir/interpret/place.rs @@ -868,7 +868,7 @@ fn copy_op_no_validate( // We do NOT compare the types for equality, because well-typed code can // actually "transmute" `&mut T` to `&T` in an assignment without a cast. assert!( - mir_assign_valid_types(src.layout, dest.layout), + mir_assign_valid_types(self.tcx.tcx, src.layout, dest.layout), "type mismatch when copying!\nsrc: {:?},\ndest: {:?}", src.layout.ty, dest.layout.ty, @@ -922,7 +922,7 @@ pub fn copy_op_transmute( src: OpTy<'tcx, M::PointerTag>, dest: PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { - if mir_assign_valid_types(src.layout, dest.layout) { + if mir_assign_valid_types(self.tcx.tcx, src.layout, dest.layout) { // Fast path: Just use normal `copy_op` return self.copy_op(src, dest); } diff --git a/src/librustc_mir/interpret/terminator.rs b/src/librustc_mir/interpret/terminator.rs index 8ad743d2b8b..6ebe5b80370 100644 --- a/src/librustc_mir/interpret/terminator.rs +++ b/src/librustc_mir/interpret/terminator.rs @@ -75,7 +75,6 @@ pub(super) fn eval_terminator( } Drop { location, target, unwind } => { - // FIXME(CTFE): forbid drop in const eval let place = self.eval_place(location)?; let ty = place.layout.ty; trace!("TerminatorKind::drop: {:?}, type {}", location, ty); diff --git a/src/test/ui/consts/const-eval/issue-70804-fn-subtyping.rs b/src/test/ui/consts/const-eval/issue-70804-fn-subtyping.rs new file mode 100644 index 00000000000..59d46ea66c9 --- /dev/null +++ b/src/test/ui/consts/const-eval/issue-70804-fn-subtyping.rs @@ -0,0 +1,10 @@ +// check-pass +#![feature(const_fn)] + +const fn nested(x: (for<'a> fn(&'a ()), String)) -> (fn(&'static ()), String) { + x +} + +pub const TEST: (fn(&'static ()), String) = nested((|_x| (), String::new())); + +fn main() {}