Auto merge of #127995 - workingjubilee:say-turings-prayer, r=BoxyUwU

compiler: Never debug_assert in codegen

In the name of Turing and his Hoarey heralds, assert our truths before creating a monster!

The `rustc_codegen_llvm` and `rustc_codegen_ssa` crates are fairly critical for rustc's correctness. Small mistakes here can easily result in undefined behavior, since a "small mistake" can mean something like "link and execute the wrong code". We should probably run any and all asserts in these modules unconditionally on whether this is a "debug build", and damn the costs in performance.

...Especially because the costs in performance seem to be *nothing*. It is not clear how much correctness we gain here, but I'll take free correctness improvements.
This commit is contained in:
bors 2024-07-25 07:52:31 +00:00
commit 28e684b470
16 changed files with 57 additions and 65 deletions

View File

@ -330,7 +330,7 @@ pub(crate) fn get_static_inner(&self, def_id: DefId, llty: &'ll Type) -> &'ll Va
// If this assertion triggers, there's something wrong with commandline
// argument validation.
debug_assert!(
assert!(
!(self.tcx.sess.opts.cg.linker_plugin_lto.enabled()
&& self.tcx.sess.target.is_like_windows
&& self.tcx.sess.opts.cg.prefer_dynamic)

View File

@ -170,7 +170,7 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>(
) -> DINodeCreationResult<'ll> {
// The debuginfo generated by this function is only valid if `ptr_type` is really just
// a (fat) pointer. Make sure it is not called for e.g. `Box<T, NonZSTAllocator>`.
debug_assert_eq!(
assert_eq!(
cx.size_and_align_of(ptr_type),
cx.size_and_align_of(Ty::new_mut_ptr(cx.tcx, pointee_type))
);
@ -185,7 +185,7 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>(
match fat_pointer_kind(cx, pointee_type) {
None => {
// This is a thin pointer. Create a regular pointer type and give it the correct name.
debug_assert_eq!(
assert_eq!(
(data_layout.pointer_size, data_layout.pointer_align.abi),
cx.size_and_align_of(ptr_type),
"ptr_type={ptr_type}, pointee_type={pointee_type}",
@ -240,8 +240,8 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>(
FatPtrKind::Slice => ("data_ptr", "length"),
};
debug_assert_eq!(abi::FAT_PTR_ADDR, 0);
debug_assert_eq!(abi::FAT_PTR_EXTRA, 1);
assert_eq!(abi::FAT_PTR_ADDR, 0);
assert_eq!(abi::FAT_PTR_EXTRA, 1);
// The data pointer type is a regular, thin pointer, regardless of whether this
// is a slice or a trait object.
@ -498,7 +498,7 @@ pub fn type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll D
}
};
debug_assert_eq!(di_node_for_uid as *const _, di_node as *const _);
assert_eq!(di_node_for_uid as *const _, di_node as *const _);
} else {
debug_context(cx).type_map.insert(unique_type_id, di_node);
}
@ -1060,7 +1060,7 @@ fn build_struct_type_di_node<'ll, 'tcx>(
let ty::Adt(adt_def, _) = struct_type.kind() else {
bug!("build_struct_type_di_node() called with non-struct-type: {:?}", struct_type);
};
debug_assert!(adt_def.is_struct());
assert!(adt_def.is_struct());
let containing_scope = get_namespace_for_item(cx, adt_def.did());
let struct_type_and_layout = cx.layout_of(struct_type);
let variant_def = adt_def.non_enum_variant();
@ -1130,7 +1130,7 @@ fn build_upvar_field_di_nodes<'ll, 'tcx>(
}
};
debug_assert!(
assert!(
up_var_tys.iter().all(|t| t == cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t))
);

View File

@ -204,7 +204,7 @@ pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
let enum_type_and_layout = cx.layout_of(enum_type);
let enum_type_name = compute_debuginfo_type_name(cx.tcx, enum_type, false);
debug_assert!(!wants_c_like_enum_debuginfo(enum_type_and_layout));
assert!(!wants_c_like_enum_debuginfo(enum_type_and_layout));
type_map::build_type_with_children(
cx,
@ -279,7 +279,7 @@ pub(super) fn build_coroutine_di_node<'ll, 'tcx>(
let coroutine_type_and_layout = cx.layout_of(coroutine_type);
let coroutine_type_name = compute_debuginfo_type_name(cx.tcx, coroutine_type, false);
debug_assert!(!wants_c_like_enum_debuginfo(coroutine_type_and_layout));
assert!(!wants_c_like_enum_debuginfo(coroutine_type_and_layout));
type_map::build_type_with_children(
cx,
@ -517,7 +517,7 @@ enum DiscrKind {
if is_128_bits {
DiscrKind::Exact128(discr_val)
} else {
debug_assert_eq!(discr_val, discr_val as u64 as u128);
assert_eq!(discr_val, discr_val as u64 as u128);
DiscrKind::Exact(discr_val as u64)
}
}
@ -526,8 +526,8 @@ enum DiscrKind {
if is_128_bits {
DiscrKind::Range128(min, max)
} else {
debug_assert_eq!(min, min as u64 as u128);
debug_assert_eq!(max, max as u64 as u128);
assert_eq!(min, min as u64 as u128);
assert_eq!(max, max as u64 as u128);
DiscrKind::Range(min as u64, max as u64)
}
}
@ -815,7 +815,7 @@ fn build_union_fields_for_direct_tag_enum_or_coroutine<'ll, 'tcx>(
}
}));
debug_assert_eq!(
assert_eq!(
cx.size_and_align_of(enum_type_and_layout.field(cx, tag_field).ty),
cx.size_and_align_of(super::tag_base_type(cx, enum_type_and_layout))
);

View File

@ -106,7 +106,7 @@ fn tag_base_type<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
enum_type_and_layout: TyAndLayout<'tcx>,
) -> Ty<'tcx> {
debug_assert!(match enum_type_and_layout.ty.kind() {
assert!(match enum_type_and_layout.ty.kind() {
ty::Coroutine(..) => true,
ty::Adt(adt_def, _) => adt_def.is_enum(),
_ => false,
@ -251,7 +251,7 @@ fn build_enum_variant_struct_type_di_node<'ll, 'tcx>(
variant_layout: TyAndLayout<'tcx>,
di_flags: DIFlags,
) -> &'ll DIType {
debug_assert_eq!(variant_layout.ty, enum_type_and_layout.ty);
assert_eq!(variant_layout.ty, enum_type_and_layout.ty);
type_map::build_type_with_children(
cx,

View File

@ -65,7 +65,7 @@ pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
let visibility_flags = visibility_di_flags(cx, enum_adt_def.did(), enum_adt_def.did());
debug_assert!(!wants_c_like_enum_debuginfo(enum_type_and_layout));
assert!(!wants_c_like_enum_debuginfo(enum_type_and_layout));
type_map::build_type_with_children(
cx,
@ -142,7 +142,7 @@ pub(super) fn build_coroutine_di_node<'ll, 'tcx>(
let containing_scope = get_namespace_for_item(cx, coroutine_def_id);
let coroutine_type_and_layout = cx.layout_of(coroutine_type);
debug_assert!(!wants_c_like_enum_debuginfo(coroutine_type_and_layout));
assert!(!wants_c_like_enum_debuginfo(coroutine_type_and_layout));
let coroutine_type_name = compute_debuginfo_type_name(cx.tcx, coroutine_type, false);

View File

@ -36,7 +36,7 @@ mod private {
/// A unique identifier for anything that we create a debuginfo node for.
/// The types it contains are expected to already be normalized (which
/// is debug_asserted in the constructors).
/// is asserted in the constructors).
///
/// Note that there are some things that only show up in debuginfo, like
/// the separate type descriptions for each enum variant. These get an ID
@ -58,12 +58,12 @@ pub(super) enum UniqueTypeId<'tcx> {
impl<'tcx> UniqueTypeId<'tcx> {
pub fn for_ty(tcx: TyCtxt<'tcx>, t: Ty<'tcx>) -> Self {
debug_assert_eq!(t, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t));
assert_eq!(t, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t));
UniqueTypeId::Ty(t, private::HiddenZst)
}
pub fn for_enum_variant_part(tcx: TyCtxt<'tcx>, enum_ty: Ty<'tcx>) -> Self {
debug_assert_eq!(enum_ty, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), enum_ty));
assert_eq!(enum_ty, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), enum_ty));
UniqueTypeId::VariantPart(enum_ty, private::HiddenZst)
}
@ -72,7 +72,7 @@ pub fn for_enum_variant_struct_type(
enum_ty: Ty<'tcx>,
variant_idx: VariantIdx,
) -> Self {
debug_assert_eq!(enum_ty, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), enum_ty));
assert_eq!(enum_ty, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), enum_ty));
UniqueTypeId::VariantStructType(enum_ty, variant_idx, private::HiddenZst)
}
@ -81,7 +81,7 @@ pub fn for_enum_variant_struct_type_wrapper(
enum_ty: Ty<'tcx>,
variant_idx: VariantIdx,
) -> Self {
debug_assert_eq!(enum_ty, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), enum_ty));
assert_eq!(enum_ty, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), enum_ty));
UniqueTypeId::VariantStructTypeCppLikeWrapper(enum_ty, variant_idx, private::HiddenZst)
}
@ -90,11 +90,8 @@ pub fn for_vtable_ty(
self_type: Ty<'tcx>,
implemented_trait: Option<PolyExistentialTraitRef<'tcx>>,
) -> Self {
debug_assert_eq!(
self_type,
tcx.normalize_erasing_regions(ParamEnv::reveal_all(), self_type)
);
debug_assert_eq!(
assert_eq!(self_type, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), self_type));
assert_eq!(
implemented_trait,
tcx.normalize_erasing_regions(ParamEnv::reveal_all(), implemented_trait)
);
@ -252,10 +249,7 @@ pub(super) fn build_type_with_children<'ll, 'tcx>(
members: impl FnOnce(&CodegenCx<'ll, 'tcx>, &'ll DIType) -> SmallVec<&'ll DIType>,
generics: impl FnOnce(&CodegenCx<'ll, 'tcx>) -> SmallVec<&'ll DIType>,
) -> DINodeCreationResult<'ll> {
debug_assert_eq!(
debug_context(cx).type_map.di_node_for_unique_id(stub_info.unique_type_id),
None
);
assert_eq!(debug_context(cx).type_map.di_node_for_unique_id(stub_info.unique_type_id), None);
debug_context(cx).type_map.insert(stub_info.unique_type_id, stub_info.metadata);

View File

@ -81,7 +81,7 @@ pub(crate) fn fat_pointer_kind<'ll, 'tcx>(
ty::Dynamic(..) => Some(FatPtrKind::Dyn),
ty::Foreign(_) => {
// Assert that pointers to foreign types really are thin:
debug_assert_eq!(
assert_eq!(
cx.size_of(Ty::new_imm_ptr(cx.tcx, pointee_tail_ty)),
cx.size_of(Ty::new_imm_ptr(cx.tcx, cx.tcx.types.u8))
);

View File

@ -352,7 +352,7 @@ fn exported_symbols_provider_local(
}
MonoItem::Fn(Instance { def: InstanceKind::DropGlue(def_id, Some(ty)), args }) => {
// A little sanity-check
debug_assert_eq!(
assert_eq!(
args.non_erasable_generics(tcx, def_id).next(),
Some(GenericArgKind::Type(ty))
);
@ -370,7 +370,7 @@ fn exported_symbols_provider_local(
args,
}) => {
// A little sanity-check
debug_assert_eq!(
assert_eq!(
args.non_erasable_generics(tcx, def_id).next(),
Some(GenericArgKind::Type(ty))
);
@ -462,7 +462,7 @@ fn upstream_monomorphizations_for_provider(
tcx: TyCtxt<'_>,
def_id: DefId,
) -> Option<&UnordMap<GenericArgsRef<'_>, CrateNum>> {
debug_assert!(!def_id.is_local());
assert!(!def_id.is_local());
tcx.upstream_monomorphizations(()).get(&def_id)
}

View File

@ -1512,7 +1512,7 @@ enum CodegenState {
// We reduce the `running` counter by one. The
// `tokens.truncate()` below will take care of
// giving the Token back.
debug_assert!(running_with_own_token > 0);
assert!(running_with_own_token > 0);
running_with_own_token -= 1;
main_thread_state = MainThreadState::Lending;
}

View File

@ -459,7 +459,7 @@ fn msvc_enum_fallback<'tcx>(
output: &mut String,
visited: &mut FxHashSet<Ty<'tcx>>,
) {
debug_assert!(!wants_c_like_enum_debuginfo(ty_and_layout));
assert!(!wants_c_like_enum_debuginfo(ty_and_layout));
output.push_str("enum2$<");
push_inner(output, visited);
push_close_angle_bracket(true, output);
@ -660,7 +660,7 @@ fn push_generic_params_internal<'tcx>(
output: &mut String,
visited: &mut FxHashSet<Ty<'tcx>>,
) -> bool {
debug_assert_eq!(args, tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), args));
assert_eq!(args, tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), args));
let mut args = args.non_erasable_generics(tcx, def_id).peekable();
if args.peek().is_none() {
return false;

View File

@ -84,7 +84,7 @@ fn llbb_with_cleanup<Bx: BuilderMethods<'a, 'tcx>>(
}
if is_cleanupret {
// Cross-funclet jump - need a trampoline
debug_assert!(base::wants_new_eh_instructions(fx.cx.tcx().sess));
assert!(base::wants_new_eh_instructions(fx.cx.tcx().sess));
debug!("llbb_with_cleanup: creating cleanup trampoline for {:?}", target);
let name = &format!("{:?}_cleanup_trampoline_{:?}", self.bb, target);
let trampoline_llbb = Bx::append_block(fx.cx, fx.llfn, name);

View File

@ -194,7 +194,7 @@ fn calculate_debuginfo_offset<
}
_ => {
// Sanity check for `can_use_in_debuginfo`.
debug_assert!(!elem.can_use_in_debuginfo());
assert!(!elem.can_use_in_debuginfo());
bug!("unsupported var debuginfo projection `{:?}`", projection)
}
}
@ -502,7 +502,7 @@ pub fn compute_per_local_var_debug_info(
let DebugInfoOffset { direct_offset, indirect_offsets, result: fragment_layout } =
calculate_debuginfo_offset(bx, &fragment.projection, var_layout);
debug_assert!(indirect_offsets.is_empty());
assert!(indirect_offsets.is_empty());
if fragment_layout.size == Size::ZERO {
// Fragment is a ZST, so does not represent anything. Avoid generating anything

View File

@ -565,7 +565,7 @@ fn maybe_codegen_consume_direct(
for elem in place_ref.projection.iter() {
match elem {
mir::ProjectionElem::Field(ref f, _) => {
debug_assert!(
assert!(
!o.layout.ty.is_any_ptr(),
"Bad PlaceRef: destructing pointers should use cast/PtrMetadata, \
but tried to access field {f:?} of pointer {o:?}",

View File

@ -55,7 +55,7 @@ pub fn alloca<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx, Value = V>>(
/// Creates a `PlaceRef` to this location with the given type.
pub fn with_type<'tcx>(self, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> {
debug_assert!(
assert!(
layout.is_unsized() || layout.abi.is_uninhabited() || self.llextra.is_none(),
"Had pointer metadata {:?} for sized type {layout:?}",
self.llextra,
@ -488,7 +488,7 @@ pub fn codegen_place(
cg_base = match *elem {
mir::ProjectionElem::Deref => bx.load_operand(cg_base).deref(bx.cx()),
mir::ProjectionElem::Field(ref field, _) => {
debug_assert!(
assert!(
!cg_base.layout.ty.is_any_ptr(),
"Bad PlaceRef: destructing pointers should use cast/PtrMetadata, \
but tried to access field {field:?} of pointer {cg_base:?}",

View File

@ -168,8 +168,8 @@ fn codegen_transmute(
dst: PlaceRef<'tcx, Bx::Value>,
) {
// The MIR validator enforces no unsized transmutes.
debug_assert!(src.layout.is_sized());
debug_assert!(dst.layout.is_sized());
assert!(src.layout.is_sized());
assert!(dst.layout.is_sized());
if let Some(val) = self.codegen_transmute_operand(bx, src, dst.layout) {
val.store(bx, dst);
@ -223,8 +223,8 @@ fn codegen_transmute_operand(
match operand.val {
OperandValue::Ref(source_place_val) => {
debug_assert_eq!(source_place_val.llextra, None);
debug_assert!(matches!(operand_kind, OperandValueKind::Ref));
assert_eq!(source_place_val.llextra, None);
assert!(matches!(operand_kind, OperandValueKind::Ref));
Some(bx.load_operand(source_place_val.with_type(cast)).val)
}
OperandValue::ZeroSized => {
@ -295,7 +295,7 @@ fn transmute_immediate(
to_scalar: abi::Scalar,
to_backend_ty: Bx::Type,
) -> Bx::Value {
debug_assert_eq!(from_scalar.size(self.cx), to_scalar.size(self.cx));
assert_eq!(from_scalar.size(self.cx), to_scalar.size(self.cx));
use abi::Primitive::*;
imm = bx.from_immediate(imm);
@ -639,9 +639,7 @@ pub fn codegen_rvalue_operand(
(OperandValue::Immediate(llval), operand.layout)
}
mir::UnOp::PtrMetadata => {
debug_assert!(
operand.layout.ty.is_unsafe_ptr() || operand.layout.ty.is_ref(),
);
assert!(operand.layout.ty.is_unsafe_ptr() || operand.layout.ty.is_ref(),);
let (_, meta) = operand.val.pointer_parts();
assert_eq!(operand.layout.fields.count() > 1, meta.is_some());
if let Some(meta) = meta {
@ -651,7 +649,7 @@ pub fn codegen_rvalue_operand(
}
}
};
debug_assert!(
assert!(
val.is_expected_variant_for_type(self.cx, layout),
"Made wrong variant {val:?} for type {layout:?}",
);
@ -742,7 +740,7 @@ pub fn codegen_rvalue_operand(
bug!("Field {field_idx:?} is {p:?} making {layout:?}");
});
let scalars = self.value_kind(op.layout).scalars().unwrap();
debug_assert_eq!(values.len(), scalars.len());
assert_eq!(values.len(), scalars.len());
inputs.extend(values);
input_scalars.extend(scalars);
}
@ -760,7 +758,7 @@ pub fn codegen_rvalue_operand(
);
let val = OperandValue::from_immediates(inputs);
debug_assert!(
assert!(
val.is_expected_variant_for_type(self.cx, layout),
"Made wrong variant {val:?} for type {layout:?}",
);
@ -805,7 +803,7 @@ fn codegen_place_to_pointer(
let val = cg_place.val.address();
let ty = cg_place.layout.ty;
debug_assert!(
assert!(
if bx.cx().type_has_metadata(ty) {
matches!(val, OperandValue::Pair(..))
} else {
@ -927,7 +925,7 @@ pub fn codegen_scalar_binop(
}
mir::BinOp::Cmp => {
use std::cmp::Ordering;
debug_assert!(!is_float);
assert!(!is_float);
let pred = |op| base::bin_op_to_icmp_predicate(op, is_signed);
if bx.cx().tcx().sess.opts.optimize == OptLevel::No {
// FIXME: This actually generates tighter assembly, and is a classic trick
@ -1111,7 +1109,7 @@ fn value_kind(&self, layout: TyAndLayout<'tcx>) -> OperandValueKind {
if layout.is_zst() {
OperandValueKind::ZeroSized
} else if self.cx.is_backend_immediate(layout) {
debug_assert!(!self.cx.is_backend_scalar_pair(layout));
assert!(!self.cx.is_backend_scalar_pair(layout));
OperandValueKind::Immediate(match layout.abi {
abi::Abi::Scalar(s) => s,
abi::Abi::Vector { element, .. } => element,

View File

@ -165,7 +165,7 @@ fn atomic_load(
size: Size,
) -> Self::Value;
fn load_from_place(&mut self, ty: Self::Type, place: PlaceValue<Self::Value>) -> Self::Value {
debug_assert_eq!(place.llextra, None);
assert_eq!(place.llextra, None);
self.load(ty, place.llval, place.align)
}
fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>)
@ -184,7 +184,7 @@ fn write_operand_repeatedly(
fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value;
fn store_to_place(&mut self, val: Self::Value, place: PlaceValue<Self::Value>) -> Self::Value {
debug_assert_eq!(place.llextra, None);
assert_eq!(place.llextra, None);
self.store(val, place.llval, place.align)
}
fn store_with_flags(
@ -200,7 +200,7 @@ fn store_to_place_with_flags(
place: PlaceValue<Self::Value>,
flags: MemFlags,
) -> Self::Value {
debug_assert_eq!(place.llextra, None);
assert_eq!(place.llextra, None);
self.store_with_flags(val, place.llval, place.align, flags)
}
fn atomic_store(
@ -320,9 +320,9 @@ fn typed_place_copy_with_flags(
layout: TyAndLayout<'tcx>,
flags: MemFlags,
) {
debug_assert!(layout.is_sized(), "cannot typed-copy an unsigned type");
debug_assert!(src.llextra.is_none(), "cannot directly copy from unsized values");
debug_assert!(dst.llextra.is_none(), "cannot directly copy into unsized values");
assert!(layout.is_sized(), "cannot typed-copy an unsigned type");
assert!(src.llextra.is_none(), "cannot directly copy from unsized values");
assert!(dst.llextra.is_none(), "cannot directly copy into unsized values");
if flags.contains(MemFlags::NONTEMPORAL) {
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
let ty = self.backend_type(layout);