Sync from rust 04ae50179a
This commit is contained in:
commit
6a0ce7f6bb
@ -469,8 +469,11 @@ pub(crate) fn codegen_terminator_call<'tcx>(
|
||||
|
||||
// FIXME find a cleaner way to support varargs
|
||||
if fn_sig.c_variadic {
|
||||
if fn_sig.abi != Abi::C {
|
||||
fx.tcx.sess.span_fatal(span, &format!("Variadic call for non-C abi {:?}", fn_sig.abi));
|
||||
if !matches!(fn_sig.abi, Abi::C { .. }) {
|
||||
fx.tcx.sess.span_fatal(
|
||||
span,
|
||||
&format!("Variadic call for non-C abi {:?}", fn_sig.abi),
|
||||
);
|
||||
}
|
||||
let sig_ref = fx.bcx.func.dfg.call_signature(call_inst).unwrap();
|
||||
let abi_params = call_args
|
||||
|
21
src/base.rs
21
src/base.rs
@ -835,6 +835,27 @@ fn is_fat_ptr<'tcx>(fx: &FunctionCx<'_, '_, 'tcx>, ty: Ty<'tcx>) -> bool {
|
||||
}
|
||||
}
|
||||
StatementKind::Coverage { .. } => fx.tcx.sess.fatal("-Zcoverage is unimplemented"),
|
||||
StatementKind::CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping {
|
||||
src,
|
||||
dst,
|
||||
count,
|
||||
}) => {
|
||||
let dst = codegen_operand(fx, dst);
|
||||
let pointee = dst
|
||||
.layout()
|
||||
.pointee_info_at(fx, rustc_target::abi::Size::ZERO)
|
||||
.expect("Expected pointer");
|
||||
let dst = dst.load_scalar(fx);
|
||||
let src = codegen_operand(fx, src).load_scalar(fx);
|
||||
let count = codegen_operand(fx, count).load_scalar(fx);
|
||||
let elem_size: u64 = pointee.size.bytes();
|
||||
let bytes = if elem_size != 1 {
|
||||
fx.bcx.ins().imul_imm(count, elem_size as i64)
|
||||
} else {
|
||||
count
|
||||
};
|
||||
fx.bcx.call_memcpy(fx.cx.module.target_config(), dst, src, bytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
use rustc_middle::mir::interpret::{
|
||||
read_target_uint, AllocId, Allocation, ConstValue, ErrorHandled, GlobalAlloc, Pointer, Scalar,
|
||||
};
|
||||
use rustc_middle::ty::{Const, ConstKind};
|
||||
use rustc_middle::ty::ConstKind;
|
||||
|
||||
use cranelift_codegen::ir::GlobalValueData;
|
||||
use cranelift_module::*;
|
||||
@ -39,7 +39,10 @@ pub(crate) fn finalize(mut self, tcx: TyCtxt<'_>, module: &mut dyn Module) {
|
||||
pub(crate) fn check_constants(fx: &mut FunctionCx<'_, '_, '_>) -> bool {
|
||||
let mut all_constants_ok = true;
|
||||
for constant in &fx.mir.required_consts {
|
||||
let const_ = fx.monomorphize(constant.literal);
|
||||
let const_ = match fx.monomorphize(constant.literal) {
|
||||
ConstantKind::Ty(ct) => ct,
|
||||
ConstantKind::Val(..) => continue,
|
||||
};
|
||||
match const_.val {
|
||||
ConstKind::Value(_) => {}
|
||||
ConstKind::Unevaluated(def, ref substs, promoted) => {
|
||||
@ -115,19 +118,17 @@ pub(crate) fn codegen_constant<'tcx>(
|
||||
fx: &mut FunctionCx<'_, '_, 'tcx>,
|
||||
constant: &Constant<'tcx>,
|
||||
) -> CValue<'tcx> {
|
||||
let const_ = fx.monomorphize(constant.literal);
|
||||
let const_ = match fx.monomorphize(constant.literal) {
|
||||
ConstantKind::Ty(ct) => ct,
|
||||
ConstantKind::Val(val, ty) => return codegen_const_value(fx, val, ty),
|
||||
};
|
||||
let const_val = match const_.val {
|
||||
ConstKind::Value(const_val) => const_val,
|
||||
ConstKind::Unevaluated(def, ref substs, promoted) if fx.tcx.is_static(def.did) => {
|
||||
assert!(substs.is_empty());
|
||||
assert!(promoted.is_none());
|
||||
|
||||
return codegen_static_ref(
|
||||
fx,
|
||||
def.did,
|
||||
fx.layout_of(fx.monomorphize(&constant.literal.ty)),
|
||||
)
|
||||
.to_cvalue(fx);
|
||||
return codegen_static_ref(fx, def.did, fx.layout_of(const_.ty)).to_cvalue(fx);
|
||||
}
|
||||
ConstKind::Unevaluated(def, ref substs, promoted) => {
|
||||
match fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None) {
|
||||
@ -427,11 +428,14 @@ fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut dyn Module, cx: &mut Constant
|
||||
pub(crate) fn mir_operand_get_const_val<'tcx>(
|
||||
fx: &FunctionCx<'_, '_, 'tcx>,
|
||||
operand: &Operand<'tcx>,
|
||||
) -> Option<&'tcx Const<'tcx>> {
|
||||
) -> Option<ConstValue<'tcx>> {
|
||||
match operand {
|
||||
Operand::Copy(_) | Operand::Move(_) => None,
|
||||
Operand::Constant(const_) => {
|
||||
Some(fx.monomorphize(const_.literal).eval(fx.tcx, ParamEnv::reveal_all()))
|
||||
}
|
||||
Operand::Constant(const_) => match const_.literal {
|
||||
ConstantKind::Ty(const_) => {
|
||||
fx.monomorphize(const_).eval(fx.tcx, ParamEnv::reveal_all()).val.try_to_value()
|
||||
}
|
||||
ConstantKind::Val(val, _) => Some(val),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
|
||||
};
|
||||
llvm.x86.sse2.cmp.ps | llvm.x86.sse2.cmp.pd, (c x, c y, o kind) {
|
||||
let kind_const = crate::constant::mir_operand_get_const_val(fx, kind).expect("llvm.x86.sse2.cmp.* kind not const");
|
||||
let flt_cc = match kind_const.val.try_to_bits(Size::from_bytes(1)).unwrap_or_else(|| panic!("kind not scalar: {:?}", kind_const)) {
|
||||
let flt_cc = match kind_const.try_to_bits(Size::from_bytes(1)).unwrap_or_else(|| panic!("kind not scalar: {:?}", kind_const)) {
|
||||
0 => FloatCC::Equal,
|
||||
1 => FloatCC::LessThan,
|
||||
2 => FloatCC::LessThanOrEqual,
|
||||
@ -84,7 +84,7 @@ pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
|
||||
llvm.x86.sse2.psrli.d, (c a, o imm8) {
|
||||
let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8).expect("llvm.x86.sse2.psrli.d imm8 not const");
|
||||
simd_for_each_lane(fx, a, ret, |fx, _lane_layout, res_lane_layout, lane| {
|
||||
let res_lane = match imm8.val.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) {
|
||||
let res_lane = match imm8.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) {
|
||||
imm8 if imm8 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
|
||||
_ => fx.bcx.ins().iconst(types::I32, 0),
|
||||
};
|
||||
@ -94,7 +94,7 @@ pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
|
||||
llvm.x86.sse2.pslli.d, (c a, o imm8) {
|
||||
let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8).expect("llvm.x86.sse2.psrli.d imm8 not const");
|
||||
simd_for_each_lane(fx, a, ret, |fx, _lane_layout, res_lane_layout, lane| {
|
||||
let res_lane = match imm8.val.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) {
|
||||
let res_lane = match imm8.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) {
|
||||
imm8 if imm8 < 32 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
|
||||
_ => fx.bcx.ins().iconst(types::I32, 0),
|
||||
};
|
||||
|
@ -85,8 +85,8 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
|
||||
use rustc_middle::mir::interpret::*;
|
||||
let idx_const = crate::constant::mir_operand_get_const_val(fx, idx).expect("simd_shuffle* idx not const");
|
||||
|
||||
let idx_bytes = match idx_const.val {
|
||||
ty::ConstKind::Value(ConstValue::ByRef { alloc, offset }) => {
|
||||
let idx_bytes = match idx_const {
|
||||
ConstValue::ByRef { alloc, offset } => {
|
||||
let ptr = Pointer::new(AllocId(0 /* dummy */), offset);
|
||||
let size = Size::from_bytes(4 * ret_lane_count /* size_of([u32; ret_lane_count]) */);
|
||||
alloc.get_bytes(fx, ptr, size).unwrap()
|
||||
@ -130,7 +130,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
|
||||
);
|
||||
};
|
||||
|
||||
let idx = idx_const.val.try_to_bits(Size::from_bytes(4 /* u32*/)).unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
|
||||
let idx = idx_const.try_to_bits(Size::from_bytes(4 /* u32*/)).unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
|
||||
let (lane_count, _lane_ty) = base.layout().ty.simd_size_and_type(fx.tcx);
|
||||
if idx >= lane_count.into() {
|
||||
fx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_insert] idx {} >= lane_count {}", idx, lane_count));
|
||||
@ -159,7 +159,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
|
||||
return;
|
||||
};
|
||||
|
||||
let idx = idx_const.val.try_to_bits(Size::from_bytes(4 /* u32*/)).unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
|
||||
let idx = idx_const.try_to_bits(Size::from_bytes(4 /* u32*/)).unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
|
||||
let (lane_count, _lane_ty) = v.layout().ty.simd_size_and_type(fx.tcx);
|
||||
if idx >= lane_count.into() {
|
||||
fx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_extract] idx {} >= lane_count {}", idx, lane_count));
|
||||
|
Loading…
Reference in New Issue
Block a user