Sync from rust b5b13568fb5da4ac988bde370008d6134d3dfe6c

This commit is contained in:
bjorn3 2024-06-11 11:54:36 +00:00
commit c0477a1231
6 changed files with 41 additions and 21 deletions

View File

@ -832,9 +832,10 @@ fn codegen_stmt<'tcx>(
let val = match null_op { let val = match null_op {
NullOp::SizeOf => layout.size.bytes(), NullOp::SizeOf => layout.size.bytes(),
NullOp::AlignOf => layout.align.abi.bytes(), NullOp::AlignOf => layout.align.abi.bytes(),
NullOp::OffsetOf(fields) => { NullOp::OffsetOf(fields) => fx
layout.offset_of_subfield(fx, fields.iter()).bytes() .tcx
} .offset_of_subfield(ParamEnv::reveal_all(), layout, fields.iter())
.bytes(),
NullOp::UbChecks => { NullOp::UbChecks => {
let val = fx.tcx.sess.ub_checks(); let val = fx.tcx.sess.ub_checks();
let val = CValue::by_val( let val = CValue::by_val(

View File

@ -110,7 +110,7 @@ pub(crate) fn codegen_const_value<'tcx>(
if fx.clif_type(layout.ty).is_some() { if fx.clif_type(layout.ty).is_some() {
return CValue::const_val(fx, layout, int); return CValue::const_val(fx, layout, int);
} else { } else {
let raw_val = int.size().truncate(int.assert_bits(int.size())); let raw_val = int.size().truncate(int.to_bits(int.size()));
let val = match int.size().bytes() { let val = match int.size().bytes() {
1 => fx.bcx.ins().iconst(types::I8, raw_val as i64), 1 => fx.bcx.ins().iconst(types::I8, raw_val as i64),
2 => fx.bcx.ins().iconst(types::I16, raw_val as i64), 2 => fx.bcx.ins().iconst(types::I16, raw_val as i64),
@ -501,12 +501,12 @@ pub(crate) fn mir_operand_get_const_val<'tcx>(
Ordering::Equal => scalar_int, Ordering::Equal => scalar_int,
Ordering::Less => match ty.kind() { Ordering::Less => match ty.kind() {
ty::Uint(_) => ScalarInt::try_from_uint( ty::Uint(_) => ScalarInt::try_from_uint(
scalar_int.assert_uint(scalar_int.size()), scalar_int.to_uint(scalar_int.size()),
fx.layout_of(*ty).size, fx.layout_of(*ty).size,
) )
.unwrap(), .unwrap(),
ty::Int(_) => ScalarInt::try_from_int( ty::Int(_) => ScalarInt::try_from_int(
scalar_int.assert_int(scalar_int.size()), scalar_int.to_int(scalar_int.size()),
fx.layout_of(*ty).size, fx.layout_of(*ty).size,
) )
.unwrap(), .unwrap(),

View File

@ -288,6 +288,29 @@ fn produce_final_output_artifacts(
} }
} }
if sess.opts.json_artifact_notifications {
if codegen_results.modules.len() == 1 {
codegen_results.modules[0].for_each_output(|_path, ty| {
if sess.opts.output_types.contains_key(&ty) {
let descr = ty.shorthand();
// for single cgu file is renamed to drop cgu specific suffix
// so we regenerate it the same way
let path = crate_output.path(ty);
sess.dcx().emit_artifact_notification(path.as_path(), descr);
}
});
} else {
for module in &codegen_results.modules {
module.for_each_output(|path, ty| {
if sess.opts.output_types.contains_key(&ty) {
let descr = ty.shorthand();
sess.dcx().emit_artifact_notification(&path, descr);
}
});
}
}
}
// We leave the following files around by default: // We leave the following files around by default:
// - #crate#.o // - #crate#.o
// - #crate#.crate.metadata.o // - #crate#.crate.metadata.o

View File

@ -911,7 +911,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
.span_fatal(span, "Index argument for `_mm_cmpestri` is not a constant"); .span_fatal(span, "Index argument for `_mm_cmpestri` is not a constant");
}; };
let imm8 = imm8.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", imm8)); let imm8 = imm8.to_u8();
codegen_inline_asm_inner( codegen_inline_asm_inner(
fx, fx,
@ -964,7 +964,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
.span_fatal(span, "Index argument for `_mm_cmpestrm` is not a constant"); .span_fatal(span, "Index argument for `_mm_cmpestrm` is not a constant");
}; };
let imm8 = imm8.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", imm8)); let imm8 = imm8.to_u8();
codegen_inline_asm_inner( codegen_inline_asm_inner(
fx, fx,
@ -1012,7 +1012,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
); );
}; };
let imm8 = imm8.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", imm8)); let imm8 = imm8.to_u8();
codegen_inline_asm_inner( codegen_inline_asm_inner(
fx, fx,
@ -1049,7 +1049,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
); );
}; };
let imm8 = imm8.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", imm8)); let imm8 = imm8.to_u8();
codegen_inline_asm_inner( codegen_inline_asm_inner(
fx, fx,
@ -1204,7 +1204,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
.span_fatal(span, "Func argument for `_mm_sha1rnds4_epu32` is not a constant"); .span_fatal(span, "Func argument for `_mm_sha1rnds4_epu32` is not a constant");
}; };
let func = func.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", func)); let func = func.to_u8();
codegen_inline_asm_inner( codegen_inline_asm_inner(
fx, fx,

View File

@ -133,6 +133,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
.expect_const() .expect_const()
.eval(fx.tcx, ty::ParamEnv::reveal_all(), span) .eval(fx.tcx, ty::ParamEnv::reveal_all(), span)
.unwrap() .unwrap()
.1
.unwrap_branch(); .unwrap_branch();
assert_eq!(x.layout(), y.layout()); assert_eq!(x.layout(), y.layout());
@ -146,8 +147,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
let total_len = lane_count * 2; let total_len = lane_count * 2;
let indexes = let indexes = idx.iter().map(|idx| idx.unwrap_leaf().to_u32()).collect::<Vec<u32>>();
idx.iter().map(|idx| idx.unwrap_leaf().try_to_u32().unwrap()).collect::<Vec<u32>>();
for &idx in &indexes { for &idx in &indexes {
assert!(u64::from(idx) < total_len, "idx {} out of range 0..{}", idx, total_len); assert!(u64::from(idx) < total_len, "idx {} out of range 0..{}", idx, total_len);
@ -281,9 +281,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
fx.tcx.dcx().span_fatal(span, "Index argument for `simd_insert` is not a constant"); fx.tcx.dcx().span_fatal(span, "Index argument for `simd_insert` is not a constant");
}; };
let idx: u32 = idx_const let idx: u32 = idx_const.to_u32();
.try_to_u32()
.unwrap_or_else(|_| panic!("kind not scalar: {:?}", idx_const));
let (lane_count, _lane_ty) = base.layout().ty.simd_size_and_type(fx.tcx); let (lane_count, _lane_ty) = base.layout().ty.simd_size_and_type(fx.tcx);
if u64::from(idx) >= lane_count { if u64::from(idx) >= lane_count {
fx.tcx.dcx().span_fatal( fx.tcx.dcx().span_fatal(
@ -329,9 +327,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
return; return;
}; };
let idx = idx_const let idx = idx_const.to_u32();
.try_to_u32()
.unwrap_or_else(|_| panic!("kind not scalar: {:?}", idx_const));
let (lane_count, _lane_ty) = v.layout().ty.simd_size_and_type(fx.tcx); let (lane_count, _lane_ty) = v.layout().ty.simd_size_and_type(fx.tcx);
if u64::from(idx) >= lane_count { if u64::from(idx) >= lane_count {
fx.tcx.dcx().span_fatal( fx.tcx.dcx().span_fatal(

View File

@ -327,7 +327,7 @@ impl<'tcx> CValue<'tcx> {
let val = match layout.ty.kind() { let val = match layout.ty.kind() {
ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => { ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
let const_val = const_val.assert_bits(layout.size); let const_val = const_val.to_bits(layout.size);
let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64); let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64);
let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64); let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64);
fx.bcx.ins().iconcat(lsb, msb) fx.bcx.ins().iconcat(lsb, msb)
@ -339,7 +339,7 @@ impl<'tcx> CValue<'tcx> {
| ty::Ref(..) | ty::Ref(..)
| ty::RawPtr(..) | ty::RawPtr(..)
| ty::FnPtr(..) => { | ty::FnPtr(..) => {
let raw_val = const_val.size().truncate(const_val.assert_bits(layout.size)); let raw_val = const_val.size().truncate(const_val.to_bits(layout.size));
fx.bcx.ins().iconst(clif_ty, raw_val as i64) fx.bcx.ins().iconst(clif_ty, raw_val as i64)
} }
ty::Float(FloatTy::F32) => { ty::Float(FloatTy::F32) => {