From 0eb782ba13d34c75de2c3d72343a1c946e57d7d3 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sat, 8 Jun 2024 16:13:45 +0200 Subject: [PATCH] ScalarInt: size mismatches are a bug, do not delay the panic --- src/constant.rs | 6 +++--- src/intrinsics/llvm_x86.rs | 10 +++++----- src/intrinsics/simd.rs | 11 +++-------- src/value_and_place.rs | 4 ++-- 4 files changed, 13 insertions(+), 18 deletions(-) diff --git a/src/constant.rs b/src/constant.rs index ba98f2e772c..a53598018f4 100644 --- a/src/constant.rs +++ b/src/constant.rs @@ -110,7 +110,7 @@ pub(crate) fn codegen_const_value<'tcx>( if fx.clif_type(layout.ty).is_some() { return CValue::const_val(fx, layout, int); } else { - let raw_val = int.size().truncate(int.assert_bits(int.size())); + let raw_val = int.size().truncate(int.to_bits(int.size())); let val = match int.size().bytes() { 1 => fx.bcx.ins().iconst(types::I8, raw_val as i64), 2 => fx.bcx.ins().iconst(types::I16, raw_val as i64), @@ -501,12 +501,12 @@ pub(crate) fn mir_operand_get_const_val<'tcx>( Ordering::Equal => scalar_int, Ordering::Less => match ty.kind() { ty::Uint(_) => ScalarInt::try_from_uint( - scalar_int.assert_uint(scalar_int.size()), + scalar_int.to_uint(scalar_int.size()), fx.layout_of(*ty).size, ) .unwrap(), ty::Int(_) => ScalarInt::try_from_int( - scalar_int.assert_int(scalar_int.size()), + scalar_int.to_int(scalar_int.size()), fx.layout_of(*ty).size, ) .unwrap(), diff --git a/src/intrinsics/llvm_x86.rs b/src/intrinsics/llvm_x86.rs index 27b55ecc72e..d454f3c1de7 100644 --- a/src/intrinsics/llvm_x86.rs +++ b/src/intrinsics/llvm_x86.rs @@ -902,7 +902,7 @@ fn select4( .span_fatal(span, "Index argument for `_mm_cmpestri` is not a constant"); }; - let imm8 = imm8.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", imm8)); + let imm8 = imm8.to_u8(); codegen_inline_asm_inner( fx, @@ -955,7 +955,7 @@ fn select4( .span_fatal(span, "Index argument for `_mm_cmpestrm` is not a constant"); }; - let imm8 = imm8.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", imm8)); + let imm8 = imm8.to_u8(); codegen_inline_asm_inner( fx, @@ -1003,7 +1003,7 @@ fn select4( ); }; - let imm8 = imm8.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", imm8)); + let imm8 = imm8.to_u8(); codegen_inline_asm_inner( fx, @@ -1040,7 +1040,7 @@ fn select4( ); }; - let imm8 = imm8.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", imm8)); + let imm8 = imm8.to_u8(); codegen_inline_asm_inner( fx, @@ -1195,7 +1195,7 @@ fn select4( .span_fatal(span, "Func argument for `_mm_sha1rnds4_epu32` is not a constant"); }; - let func = func.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", func)); + let func = func.to_u8(); codegen_inline_asm_inner( fx, diff --git a/src/intrinsics/simd.rs b/src/intrinsics/simd.rs index 65eeaf156d8..ca910dccb0d 100644 --- a/src/intrinsics/simd.rs +++ b/src/intrinsics/simd.rs @@ -147,8 +147,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( let total_len = lane_count * 2; - let indexes = - idx.iter().map(|idx| idx.unwrap_leaf().try_to_u32().unwrap()).collect::>(); + let indexes = idx.iter().map(|idx| idx.unwrap_leaf().to_u32()).collect::>(); for &idx in &indexes { assert!(u64::from(idx) < total_len, "idx {} out of range 0..{}", idx, total_len); @@ -282,9 +281,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( fx.tcx.dcx().span_fatal(span, "Index argument for `simd_insert` is not a constant"); }; - let idx: u32 = idx_const - .try_to_u32() - .unwrap_or_else(|_| panic!("kind not scalar: {:?}", idx_const)); + let idx: u32 = idx_const.to_u32(); let (lane_count, _lane_ty) = base.layout().ty.simd_size_and_type(fx.tcx); if u64::from(idx) >= lane_count { fx.tcx.dcx().span_fatal( @@ -330,9 +327,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( return; }; - let idx = idx_const - .try_to_u32() - .unwrap_or_else(|_| panic!("kind not scalar: {:?}", idx_const)); + let idx = idx_const.to_u32(); let (lane_count, _lane_ty) = v.layout().ty.simd_size_and_type(fx.tcx); if u64::from(idx) >= lane_count { fx.tcx.dcx().span_fatal( diff --git a/src/value_and_place.rs b/src/value_and_place.rs index 512a96450a4..1aa28daeafc 100644 --- a/src/value_and_place.rs +++ b/src/value_and_place.rs @@ -327,7 +327,7 @@ pub(crate) fn const_val( let val = match layout.ty.kind() { ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => { - let const_val = const_val.assert_bits(layout.size); + let const_val = const_val.to_bits(layout.size); let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64); let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64); fx.bcx.ins().iconcat(lsb, msb) @@ -339,7 +339,7 @@ pub(crate) fn const_val( | ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..) => { - let raw_val = const_val.size().truncate(const_val.assert_bits(layout.size)); + let raw_val = const_val.size().truncate(const_val.to_bits(layout.size)); fx.bcx.ins().iconst(clif_ty, raw_val as i64) } ty::Float(FloatTy::F32) => {