ScalarInt: size mismatches are a bug, do not delay the panic
This commit is contained in:
parent
6210c26a5a
commit
0eb782ba13
@ -110,7 +110,7 @@ pub(crate) fn codegen_const_value<'tcx>(
|
|||||||
if fx.clif_type(layout.ty).is_some() {
|
if fx.clif_type(layout.ty).is_some() {
|
||||||
return CValue::const_val(fx, layout, int);
|
return CValue::const_val(fx, layout, int);
|
||||||
} else {
|
} else {
|
||||||
let raw_val = int.size().truncate(int.assert_bits(int.size()));
|
let raw_val = int.size().truncate(int.to_bits(int.size()));
|
||||||
let val = match int.size().bytes() {
|
let val = match int.size().bytes() {
|
||||||
1 => fx.bcx.ins().iconst(types::I8, raw_val as i64),
|
1 => fx.bcx.ins().iconst(types::I8, raw_val as i64),
|
||||||
2 => fx.bcx.ins().iconst(types::I16, raw_val as i64),
|
2 => fx.bcx.ins().iconst(types::I16, raw_val as i64),
|
||||||
@ -501,12 +501,12 @@ pub(crate) fn mir_operand_get_const_val<'tcx>(
|
|||||||
Ordering::Equal => scalar_int,
|
Ordering::Equal => scalar_int,
|
||||||
Ordering::Less => match ty.kind() {
|
Ordering::Less => match ty.kind() {
|
||||||
ty::Uint(_) => ScalarInt::try_from_uint(
|
ty::Uint(_) => ScalarInt::try_from_uint(
|
||||||
scalar_int.assert_uint(scalar_int.size()),
|
scalar_int.to_uint(scalar_int.size()),
|
||||||
fx.layout_of(*ty).size,
|
fx.layout_of(*ty).size,
|
||||||
)
|
)
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
ty::Int(_) => ScalarInt::try_from_int(
|
ty::Int(_) => ScalarInt::try_from_int(
|
||||||
scalar_int.assert_int(scalar_int.size()),
|
scalar_int.to_int(scalar_int.size()),
|
||||||
fx.layout_of(*ty).size,
|
fx.layout_of(*ty).size,
|
||||||
)
|
)
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
|
@ -902,7 +902,7 @@ fn select4(
|
|||||||
.span_fatal(span, "Index argument for `_mm_cmpestri` is not a constant");
|
.span_fatal(span, "Index argument for `_mm_cmpestri` is not a constant");
|
||||||
};
|
};
|
||||||
|
|
||||||
let imm8 = imm8.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", imm8));
|
let imm8 = imm8.to_u8();
|
||||||
|
|
||||||
codegen_inline_asm_inner(
|
codegen_inline_asm_inner(
|
||||||
fx,
|
fx,
|
||||||
@ -955,7 +955,7 @@ fn select4(
|
|||||||
.span_fatal(span, "Index argument for `_mm_cmpestrm` is not a constant");
|
.span_fatal(span, "Index argument for `_mm_cmpestrm` is not a constant");
|
||||||
};
|
};
|
||||||
|
|
||||||
let imm8 = imm8.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", imm8));
|
let imm8 = imm8.to_u8();
|
||||||
|
|
||||||
codegen_inline_asm_inner(
|
codegen_inline_asm_inner(
|
||||||
fx,
|
fx,
|
||||||
@ -1003,7 +1003,7 @@ fn select4(
|
|||||||
);
|
);
|
||||||
};
|
};
|
||||||
|
|
||||||
let imm8 = imm8.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", imm8));
|
let imm8 = imm8.to_u8();
|
||||||
|
|
||||||
codegen_inline_asm_inner(
|
codegen_inline_asm_inner(
|
||||||
fx,
|
fx,
|
||||||
@ -1040,7 +1040,7 @@ fn select4(
|
|||||||
);
|
);
|
||||||
};
|
};
|
||||||
|
|
||||||
let imm8 = imm8.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", imm8));
|
let imm8 = imm8.to_u8();
|
||||||
|
|
||||||
codegen_inline_asm_inner(
|
codegen_inline_asm_inner(
|
||||||
fx,
|
fx,
|
||||||
@ -1195,7 +1195,7 @@ fn select4(
|
|||||||
.span_fatal(span, "Func argument for `_mm_sha1rnds4_epu32` is not a constant");
|
.span_fatal(span, "Func argument for `_mm_sha1rnds4_epu32` is not a constant");
|
||||||
};
|
};
|
||||||
|
|
||||||
let func = func.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", func));
|
let func = func.to_u8();
|
||||||
|
|
||||||
codegen_inline_asm_inner(
|
codegen_inline_asm_inner(
|
||||||
fx,
|
fx,
|
||||||
|
@ -147,8 +147,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
|
|||||||
|
|
||||||
let total_len = lane_count * 2;
|
let total_len = lane_count * 2;
|
||||||
|
|
||||||
let indexes =
|
let indexes = idx.iter().map(|idx| idx.unwrap_leaf().to_u32()).collect::<Vec<u32>>();
|
||||||
idx.iter().map(|idx| idx.unwrap_leaf().try_to_u32().unwrap()).collect::<Vec<u32>>();
|
|
||||||
|
|
||||||
for &idx in &indexes {
|
for &idx in &indexes {
|
||||||
assert!(u64::from(idx) < total_len, "idx {} out of range 0..{}", idx, total_len);
|
assert!(u64::from(idx) < total_len, "idx {} out of range 0..{}", idx, total_len);
|
||||||
@ -282,9 +281,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
|
|||||||
fx.tcx.dcx().span_fatal(span, "Index argument for `simd_insert` is not a constant");
|
fx.tcx.dcx().span_fatal(span, "Index argument for `simd_insert` is not a constant");
|
||||||
};
|
};
|
||||||
|
|
||||||
let idx: u32 = idx_const
|
let idx: u32 = idx_const.to_u32();
|
||||||
.try_to_u32()
|
|
||||||
.unwrap_or_else(|_| panic!("kind not scalar: {:?}", idx_const));
|
|
||||||
let (lane_count, _lane_ty) = base.layout().ty.simd_size_and_type(fx.tcx);
|
let (lane_count, _lane_ty) = base.layout().ty.simd_size_and_type(fx.tcx);
|
||||||
if u64::from(idx) >= lane_count {
|
if u64::from(idx) >= lane_count {
|
||||||
fx.tcx.dcx().span_fatal(
|
fx.tcx.dcx().span_fatal(
|
||||||
@ -330,9 +327,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
|
|||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
|
||||||
let idx = idx_const
|
let idx = idx_const.to_u32();
|
||||||
.try_to_u32()
|
|
||||||
.unwrap_or_else(|_| panic!("kind not scalar: {:?}", idx_const));
|
|
||||||
let (lane_count, _lane_ty) = v.layout().ty.simd_size_and_type(fx.tcx);
|
let (lane_count, _lane_ty) = v.layout().ty.simd_size_and_type(fx.tcx);
|
||||||
if u64::from(idx) >= lane_count {
|
if u64::from(idx) >= lane_count {
|
||||||
fx.tcx.dcx().span_fatal(
|
fx.tcx.dcx().span_fatal(
|
||||||
|
@ -327,7 +327,7 @@ pub(crate) fn const_val(
|
|||||||
|
|
||||||
let val = match layout.ty.kind() {
|
let val = match layout.ty.kind() {
|
||||||
ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
|
ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
|
||||||
let const_val = const_val.assert_bits(layout.size);
|
let const_val = const_val.to_bits(layout.size);
|
||||||
let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64);
|
let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64);
|
||||||
let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64);
|
let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64);
|
||||||
fx.bcx.ins().iconcat(lsb, msb)
|
fx.bcx.ins().iconcat(lsb, msb)
|
||||||
@ -339,7 +339,7 @@ pub(crate) fn const_val(
|
|||||||
| ty::Ref(..)
|
| ty::Ref(..)
|
||||||
| ty::RawPtr(..)
|
| ty::RawPtr(..)
|
||||||
| ty::FnPtr(..) => {
|
| ty::FnPtr(..) => {
|
||||||
let raw_val = const_val.size().truncate(const_val.assert_bits(layout.size));
|
let raw_val = const_val.size().truncate(const_val.to_bits(layout.size));
|
||||||
fx.bcx.ins().iconst(clif_ty, raw_val as i64)
|
fx.bcx.ins().iconst(clif_ty, raw_val as i64)
|
||||||
}
|
}
|
||||||
ty::Float(FloatTy::F32) => {
|
ty::Float(FloatTy::F32) => {
|
||||||
|
Loading…
Reference in New Issue
Block a user