diff --git a/compiler/rustc_codegen_cranelift/src/constant.rs b/compiler/rustc_codegen_cranelift/src/constant.rs index a60964f0f75..12e492da6e9 100644 --- a/compiler/rustc_codegen_cranelift/src/constant.rs +++ b/compiler/rustc_codegen_cranelift/src/constant.rs @@ -116,7 +116,7 @@ pub(crate) fn codegen_const_value<'tcx>( } match const_val { - ConstValue::ZeroSized => unreachable!(), // we already handles ZST above + ConstValue::ZeroSized => unreachable!(), // we already handled ZST above ConstValue::Scalar(x) => match x { Scalar::Int(int) => { if fx.clif_type(layout.ty).is_some() { @@ -200,7 +200,7 @@ pub(crate) fn codegen_const_value<'tcx>( CValue::by_val(val, layout) } }, - ConstValue::ByRef { alloc_id, offset } => { + ConstValue::Indirect { alloc_id, offset } => { let alloc = fx.tcx.global_alloc(alloc_id).unwrap_memory(); // FIXME: avoid creating multiple allocations for the same AllocId? CValue::by_ref( diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs index e17d587076f..c64a4008996 100644 --- a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs +++ b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs @@ -172,7 +172,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( .expect("simd_shuffle idx not const"); let idx_bytes = match idx_const { - ConstValue::ByRef { alloc_id, offset } => { + ConstValue::Indirect { alloc_id, offset } => { let alloc = fx.tcx.global_alloc(alloc_id).unwrap_memory(); let size = Size::from_bytes( 4 * ret_lane_count, /* size_of([u32; ret_lane_count]) */ diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index 18c3fd0f1d1..d3101df7a6a 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -116,7 +116,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { let b_llval = bx.const_usize((end - start) as u64); OperandValue::Pair(a_llval, b_llval) } - ConstValue::ByRef { alloc_id, offset } => { + ConstValue::Indirect { alloc_id, offset } => { let alloc = bx.tcx().global_alloc(alloc_id).unwrap_memory(); // FIXME: should we attempt to avoid building the same AllocId multiple times? return Self::from_const_alloc(bx, layout, alloc, offset); diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs index 7f336daa28a..d4b7d6f4e29 100644 --- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs +++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs @@ -112,13 +112,13 @@ pub(super) fn op_to_const<'tcx>( ecx: &CompileTimeEvalContext<'_, 'tcx>, op: &OpTy<'tcx>, ) -> ConstValue<'tcx> { + // Handle ZST consistently and early. + if op.layout.is_zst() { + return ConstValue::ZeroSized; + } + // We do not have value optimizations for everything. // Only scalars and slices, since they are very common. - // Note that further down we turn scalars of uninitialized bits back to `ByRef`. These can result - // from scalar unions that are initialized with one of their zero sized variants. We could - // instead allow `ConstValue::Scalar` to store `ScalarMaybeUninit`, but that would affect all - // the usual cases of extracting e.g. a `usize`, without there being a real use case for the - // `Undef` situation. let try_as_immediate = match op.layout.abi { Abi::Scalar(abi::Scalar::Initialized { .. }) => true, Abi::ScalarPair(..) => match op.layout.ty.kind() { @@ -134,7 +134,7 @@ pub(super) fn op_to_const<'tcx>( let immediate = if try_as_immediate { Right(ecx.read_immediate(op).expect("normalization works on validated constants")) } else { - // It is guaranteed that any non-slice scalar pair is actually ByRef here. + // It is guaranteed that any non-slice scalar pair is actually `Indirect` here. // When we come back from raw const eval, we are always by-ref. The only way our op here is // by-val is if we are in destructure_mir_constant, i.e., if this is (a field of) something that we // "tried to make immediate" before. We wouldn't do that for non-slice scalar pairs or @@ -144,28 +144,15 @@ pub(super) fn op_to_const<'tcx>( debug!(?immediate); - // We know `offset` is relative to the allocation, so we can use `into_parts`. - let to_const_value = |mplace: &MPlaceTy<'_>| { - debug!("to_const_value(mplace: {:?})", mplace); - match mplace.ptr().into_parts() { - (Some(alloc_id), offset) => ConstValue::ByRef { alloc_id, offset }, - (None, offset) => { - assert!(mplace.layout.is_zst()); - assert_eq!( - offset.bytes() % mplace.layout.align.abi.bytes(), - 0, - "this MPlaceTy must come from a validated constant, thus we can assume the \ - alignment is correct", - ); - ConstValue::ZeroSized - } - } - }; match immediate { - Left(ref mplace) => to_const_value(mplace), + Left(ref mplace) => { + // We know `offset` is relative to the allocation, so we can use `into_parts`. + let (alloc_id, offset) = mplace.ptr().into_parts(); + let alloc_id = alloc_id.expect("cannot have `fake` place fot non-ZST type"); + ConstValue::Indirect { alloc_id, offset } + } // see comment on `let try_as_immediate` above Right(imm) => match *imm { - _ if imm.layout.is_zst() => ConstValue::ZeroSized, Immediate::Scalar(x) => ConstValue::Scalar(x), Immediate::ScalarPair(a, b) => { debug!("ScalarPair(a: {:?}, b: {:?})", a, b); @@ -186,7 +173,7 @@ pub(super) fn op_to_const<'tcx>( let len: usize = len.try_into().unwrap(); ConstValue::Slice { data, start, end: start + len } } - Immediate::Uninit => to_const_value(&op.assert_mem_place()), + Immediate::Uninit => bug!("`Uninit` is not a valid value for {}", op.layout.ty), }, } } diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs index f27c5bee8b5..d61a360ef0f 100644 --- a/compiler/rustc_const_eval/src/interpret/operand.rs +++ b/compiler/rustc_const_eval/src/interpret/operand.rs @@ -756,7 +756,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { }; let layout = from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty))?; let op = match val_val { - ConstValue::ByRef { alloc_id, offset } => { + ConstValue::Indirect { alloc_id, offset } => { // We rely on mutability being set correctly in that allocation to prevent writes // where none should happen. let ptr = self.global_base_pointer(Pointer::new(alloc_id, offset))?; diff --git a/compiler/rustc_middle/src/mir/interpret/value.rs b/compiler/rustc_middle/src/mir/interpret/value.rs index 634a22d5dd6..a77d16e42e9 100644 --- a/compiler/rustc_middle/src/mir/interpret/value.rs +++ b/compiler/rustc_middle/src/mir/interpret/value.rs @@ -30,19 +30,21 @@ pub struct ConstAlloc<'tcx> { #[derive(Copy, Clone, Debug, Eq, PartialEq, TyEncodable, TyDecodable, Hash)] #[derive(HashStable, Lift)] pub enum ConstValue<'tcx> { - /// Used only for types with `layout::abi::Scalar` ABI. + /// Used for types with `layout::abi::Scalar` ABI. /// /// Not using the enum `Value` to encode that this must not be `Uninit`. Scalar(Scalar), - /// Only used for ZSTs. + /// Only for ZSTs. ZeroSized, - /// Used only for `&[u8]` and `&str` + /// Used for `&[u8]` and `&str`. + /// + /// This is worth the optimization since Rust has literals of that type. Slice { data: ConstAllocation<'tcx>, start: usize, end: usize }, - /// A value not represented/representable by `Scalar` or `Slice` - ByRef { + /// A value not representable by the other variants; needs to be stored in-memory. + Indirect { /// The backing memory of the value. May contain more memory than needed for just the value /// if this points into some other larger ConstValue. /// @@ -62,7 +64,7 @@ impl<'tcx> ConstValue<'tcx> { #[inline] pub fn try_to_scalar(&self) -> Option> { match *self { - ConstValue::ByRef { .. } | ConstValue::Slice { .. } | ConstValue::ZeroSized => None, + ConstValue::Indirect { .. } | ConstValue::Slice { .. } | ConstValue::ZeroSized => None, ConstValue::Scalar(val) => Some(val), } } diff --git a/compiler/rustc_middle/src/mir/mod.rs b/compiler/rustc_middle/src/mir/mod.rs index 54eb6cc7b49..6e3409f17ca 100644 --- a/compiler/rustc_middle/src/mir/mod.rs +++ b/compiler/rustc_middle/src/mir/mod.rs @@ -2914,7 +2914,7 @@ fn pretty_print_const_value<'tcx>( _ => {} } } - (ConstValue::ByRef { alloc_id, offset }, ty::Array(t, n)) if *t == u8_type => { + (ConstValue::Indirect { alloc_id, offset }, ty::Array(t, n)) if *t == u8_type => { let n = n.try_to_target_usize(tcx).unwrap(); let alloc = tcx.global_alloc(alloc_id).unwrap_memory(); // cast is ok because we already checked for pointer size (32 or 64 bit) above diff --git a/compiler/rustc_middle/src/mir/pretty.rs b/compiler/rustc_middle/src/mir/pretty.rs index 4b4efe64db2..c4fae27a7a5 100644 --- a/compiler/rustc_middle/src/mir/pretty.rs +++ b/compiler/rustc_middle/src/mir/pretty.rs @@ -460,7 +460,7 @@ impl<'tcx> Visitor<'tcx> for ExtraComments<'tcx> { ConstValue::ZeroSized => "".to_string(), ConstValue::Scalar(s) => format!("Scalar({s:?})"), ConstValue::Slice { .. } => "Slice(..)".to_string(), - ConstValue::ByRef { .. } => "ByRef(..)".to_string(), + ConstValue::Indirect { .. } => "ByRef(..)".to_string(), }; let fmt_valtree = |valtree: &ty::ValTree<'tcx>| match valtree { @@ -709,7 +709,11 @@ pub fn write_allocations<'tcx>( // `u8`/`str` slices, shouldn't contain pointers that we want to print. Either::Right(std::iter::empty()) } - ConstValue::ByRef { alloc_id, .. } => Either::Left(std::iter::once(alloc_id)), + ConstValue::Indirect { alloc_id, .. } => { + // FIXME: we don't actually want to print all of these, since some are printed nicely directly as values inline in MIR. + // Really we'd want `pretty_print_const_value` to decide which allocations to print, instead of having a separate visitor. + Either::Left(std::iter::once(alloc_id)) + } } } struct CollectAllocIds(BTreeSet); diff --git a/compiler/rustc_mir_transform/src/const_prop.rs b/compiler/rustc_mir_transform/src/const_prop.rs index 0ffd64904ec..00e3e3a8f9f 100644 --- a/compiler/rustc_mir_transform/src/const_prop.rs +++ b/compiler/rustc_mir_transform/src/const_prop.rs @@ -143,7 +143,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx> #[inline(always)] fn enforce_alignment(_ecx: &InterpCx<'mir, 'tcx, Self>) -> CheckAlignment { // We do not check for alignment to avoid having to carry an `Align` - // in `ConstValue::ByRef`. + // in `ConstValue::Indirect`. CheckAlignment::No } @@ -552,7 +552,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { .ok()?; Some(ConstantKind::Val( - ConstValue::ByRef { alloc_id, offset: Size::ZERO }, + ConstValue::Indirect { alloc_id, offset: Size::ZERO }, value.layout.ty, )) } diff --git a/compiler/rustc_mir_transform/src/large_enums.rs b/compiler/rustc_mir_transform/src/large_enums.rs index 69489565212..f6526b5de1e 100644 --- a/compiler/rustc_mir_transform/src/large_enums.rs +++ b/compiler/rustc_mir_transform/src/large_enums.rs @@ -153,7 +153,7 @@ impl EnumSizeOpt { span, user_ty: None, literal: ConstantKind::Val( - interpret::ConstValue::ByRef { alloc_id, offset: Size::ZERO }, + interpret::ConstValue::Indirect { alloc_id, offset: Size::ZERO }, tmp_ty, ), }; diff --git a/compiler/rustc_monomorphize/src/collector.rs b/compiler/rustc_monomorphize/src/collector.rs index 7a36e787c82..bf2581bc61e 100644 --- a/compiler/rustc_monomorphize/src/collector.rs +++ b/compiler/rustc_monomorphize/src/collector.rs @@ -1470,7 +1470,7 @@ fn collect_const_value<'tcx>( ) { match value { ConstValue::Scalar(Scalar::Ptr(ptr, _size)) => collect_alloc(tcx, ptr.provenance, output), - ConstValue::ByRef { alloc_id, .. } => collect_alloc(tcx, alloc_id, output), + ConstValue::Indirect { alloc_id, .. } => collect_alloc(tcx, alloc_id, output), ConstValue::Slice { data, start: _, end: _ } => { for &id in data.inner().provenance().ptrs().values() { collect_alloc(tcx, id, output); diff --git a/compiler/rustc_smir/src/rustc_smir/alloc.rs b/compiler/rustc_smir/src/rustc_smir/alloc.rs index 3c50d5d2ddd..f7966564b7f 100644 --- a/compiler/rustc_smir/src/rustc_smir/alloc.rs +++ b/compiler/rustc_smir/src/rustc_smir/alloc.rs @@ -72,7 +72,7 @@ pub fn new_allocation<'tcx>( .unwrap(); allocation.stable(tables) } - ConstValue::ByRef { alloc_id, offset } => { + ConstValue::Indirect { alloc_id, offset } => { let alloc = tables.tcx.global_alloc(alloc_id).unwrap_memory(); let ty_size = tables .tcx diff --git a/src/tools/clippy/clippy_lints/src/utils/internal_lints/unnecessary_def_path.rs b/src/tools/clippy/clippy_lints/src/utils/internal_lints/unnecessary_def_path.rs index f66f33fee16..4a5b6fa5c18 100644 --- a/src/tools/clippy/clippy_lints/src/utils/internal_lints/unnecessary_def_path.rs +++ b/src/tools/clippy/clippy_lints/src/utils/internal_lints/unnecessary_def_path.rs @@ -232,7 +232,7 @@ fn path_to_matched_type(cx: &LateContext<'_>, expr: &hir::Expr<'_>) -> Option match cx.tcx.const_eval_poly(def_id).ok()? { - ConstValue::ByRef { alloc, offset } if offset.bytes() == 0 => { + ConstValue::Indirect { alloc, offset } if offset.bytes() == 0 => { read_mir_alloc_def_path(cx, alloc.inner(), cx.tcx.type_of(def_id).instantiate_identity()) }, _ => None, diff --git a/src/tools/clippy/clippy_utils/src/consts.rs b/src/tools/clippy/clippy_utils/src/consts.rs index 03341feffb8..61b944667a4 100644 --- a/src/tools/clippy/clippy_utils/src/consts.rs +++ b/src/tools/clippy/clippy_utils/src/consts.rs @@ -684,7 +684,7 @@ pub fn miri_to_const<'tcx>(lcx: &LateContext<'tcx>, result: mir::ConstantKind<'t }, _ => None, }, - mir::ConstantKind::Val(ConstValue::ByRef { alloc_id, offset: _ }, _) => { + mir::ConstantKind::Val(ConstValue::Indirect { alloc_id, offset: _ }, _) => { let alloc = lcx.tcx.global_alloc(alloc_id).unwrap_memory(); match result.ty().kind() { ty::Adt(adt_def, _) if adt_def.is_struct() => Some(Constant::Adt(result)),