Merge pull request #1247 from bjorn3/melt_some_ice

Melt some ICE
This commit is contained in:
bjorn3 2022-07-25 15:46:04 +02:00 committed by GitHub
commit 722733c0c6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 227 additions and 107 deletions

View File

@ -330,6 +330,17 @@ struct ExternTypeWrapper {
static REF1: &u8 = &42;
static REF2: &u8 = REF1;
assert_eq!(*REF1, *REF2);
extern "C" {
type A;
}
fn main() {
let x: &A = unsafe { &*(1usize as *const A) };
assert_eq!(unsafe { intrinsics::size_of_val(x) }, 0);
assert_eq!(unsafe { intrinsics::min_align_of_val(x) }, 1);
}
}
#[cfg(all(not(jit), target_arch = "x86_64", target_os = "linux"))]

View File

@ -128,6 +128,25 @@ enum Nums {
0 => loop {},
v => panic(v),
};
if black_box(false) {
// Based on https://github.com/rust-lang/rust/blob/2f320a224e827b400be25966755a621779f797cc/src/test/ui/debuginfo/debuginfo_with_uninhabitable_field_and_unsized.rs
let _ = Foo::<dyn Send>::new();
#[allow(dead_code)]
struct Foo<T: ?Sized> {
base: Never,
value: T,
}
impl<T: ?Sized> Foo<T> {
pub fn new() -> Box<Foo<T>> {
todo!()
}
}
enum Never {}
}
}
fn panic(_: u128) {

View File

@ -99,9 +99,6 @@ rm -r src/test/run-make/remap-path-prefix-dwarf # requires llvm-dwarfdump
# ============
rm src/test/ui/allocator/no_std-alloc-error-handler-default.rs # missing rust_oom definition
rm -r src/test/ui/polymorphization/ # polymorphization not yet supported
rm src/test/codegen-units/polymorphization/unused_type_parameters.rs # same
rm src/test/incremental/spike-neg1.rs # errors out for some reason
rm src/test/incremental/spike-neg2.rs # same
rm src/test/ui/issues/issue-74564-if-expr-stack-overflow.rs # gives a stackoverflow before the backend runs

View File

@ -4,6 +4,7 @@
mod pass_mode;
mod returning;
use cranelift_module::ModuleError;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::ty::layout::FnAbiOf;
use rustc_target::abi::call::{Conv, FnAbi};
@ -69,7 +70,17 @@ pub(crate) fn import_function<'tcx>(
) -> FuncId {
let name = tcx.symbol_name(inst).name;
let sig = get_function_sig(tcx, module.isa().triple(), inst);
module.declare_function(name, Linkage::Import, &sig).unwrap()
match module.declare_function(name, Linkage::Import, &sig) {
Ok(func_id) => func_id,
Err(ModuleError::IncompatibleDeclaration(_)) => tcx.sess.fatal(&format!(
"attempt to declare `{name}` as function, but it was already declared as static"
)),
Err(ModuleError::IncompatibleSignature(_, prev_sig, new_sig)) => tcx.sess.fatal(&format!(
"attempt to declare `{name}` with signature {new_sig:?}, \
but it was already declared with signature {prev_sig:?}"
)),
Err(err) => Err::<_, _>(err).unwrap(),
}
}
impl<'tcx> FunctionCx<'_, '_, 'tcx> {
@ -182,6 +193,15 @@ enum ArgKind<'tcx> {
}
let fn_abi = fx.fn_abi.take().unwrap();
// FIXME implement variadics in cranelift
if fn_abi.c_variadic {
fx.tcx.sess.span_fatal(
fx.mir.span,
"Defining variadic functions is not yet supported by Cranelift",
);
}
let mut arg_abis_iter = fn_abi.args.iter();
let func_params = fx

View File

@ -216,7 +216,7 @@ pub(super) fn adjust_arg_for_abi<'tcx>(
arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
is_owned: bool,
) -> SmallVec<[Value; 2]> {
assert_assignable(fx, arg.layout().ty, arg_abi.layout.ty);
assert_assignable(fx, arg.layout().ty, arg_abi.layout.ty, 16);
match arg_abi.mode {
PassMode::Ignore => smallvec![],
PassMode::Direct(_) => smallvec![arg.load_scalar(fx)],

View File

@ -316,14 +316,18 @@ fn data_id_for_static(
let attrs = tcx.codegen_fn_attrs(def_id);
let data_id = module
.declare_data(
&*symbol_name,
linkage,
is_mutable,
attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL),
)
.unwrap();
let data_id = match module.declare_data(
&*symbol_name,
linkage,
is_mutable,
attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL),
) {
Ok(data_id) => data_id,
Err(ModuleError::IncompatibleDeclaration(_)) => tcx.sess.fatal(&format!(
"attempt to declare `{symbol_name}` as static, but it was already declared as function"
)),
Err(err) => Err::<_, _>(err).unwrap(),
};
if rlinkage.is_some() {
// Comment copied from https://github.com/rust-lang/rust/blob/45060c2a66dfd667f88bd8b94261b28a58d85bd5/src/librustc_codegen_llvm/consts.rs#L141
@ -428,7 +432,8 @@ fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut dyn Module, cx: &mut Constant
let data_id = match reloc_target_alloc {
GlobalAlloc::Function(instance) => {
assert_eq!(addend, 0);
let func_id = crate::abi::import_function(tcx, module, instance);
let func_id =
crate::abi::import_function(tcx, module, instance.polymorphize(tcx));
let local_func_id = module.declare_func_in_data(func_id, &mut data_ctx);
data_ctx.write_function_addr(offset.bytes() as u32, local_func_id);
continue;

View File

@ -18,86 +18,96 @@ pub(crate) fn codegen_inline_asm<'tcx>(
) {
// FIXME add .eh_frame unwind info directives
if template[0] == InlineAsmTemplatePiece::String("int $$0x29".to_string()) {
let true_ = fx.bcx.ins().iconst(types::I32, 1);
fx.bcx.ins().trapnz(true_, TrapCode::User(1));
return;
} else if template[0] == InlineAsmTemplatePiece::String("movq %rbx, ".to_string())
&& matches!(
template[1],
InlineAsmTemplatePiece::Placeholder { operand_idx: 0, modifier: Some('r'), span: _ }
)
&& template[2] == InlineAsmTemplatePiece::String("\n".to_string())
&& template[3] == InlineAsmTemplatePiece::String("cpuid".to_string())
&& template[4] == InlineAsmTemplatePiece::String("\n".to_string())
&& template[5] == InlineAsmTemplatePiece::String("xchgq %rbx, ".to_string())
&& matches!(
template[6],
InlineAsmTemplatePiece::Placeholder { operand_idx: 0, modifier: Some('r'), span: _ }
)
{
assert_eq!(operands.len(), 4);
let (leaf, eax_place) = match operands[1] {
InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
assert_eq!(
reg,
InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax))
);
(
crate::base::codegen_operand(fx, in_value).load_scalar(fx),
crate::base::codegen_place(fx, out_place.unwrap()),
)
}
_ => unreachable!(),
};
let ebx_place = match operands[0] {
InlineAsmOperand::Out { reg, late: true, place } => {
assert_eq!(
reg,
InlineAsmRegOrRegClass::RegClass(InlineAsmRegClass::X86(
X86InlineAsmRegClass::reg
))
);
crate::base::codegen_place(fx, place.unwrap())
}
_ => unreachable!(),
};
let (sub_leaf, ecx_place) = match operands[2] {
InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
assert_eq!(
reg,
InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::cx))
);
(
crate::base::codegen_operand(fx, in_value).load_scalar(fx),
crate::base::codegen_place(fx, out_place.unwrap()),
)
}
_ => unreachable!(),
};
let edx_place = match operands[3] {
InlineAsmOperand::Out { reg, late: true, place } => {
assert_eq!(
reg,
InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx))
);
crate::base::codegen_place(fx, place.unwrap())
}
_ => unreachable!(),
};
if !template.is_empty() {
if template[0] == InlineAsmTemplatePiece::String("int $$0x29".to_string()) {
let true_ = fx.bcx.ins().iconst(types::I32, 1);
fx.bcx.ins().trapnz(true_, TrapCode::User(1));
return;
} else if template[0] == InlineAsmTemplatePiece::String("movq %rbx, ".to_string())
&& matches!(
template[1],
InlineAsmTemplatePiece::Placeholder {
operand_idx: 0,
modifier: Some('r'),
span: _
}
)
&& template[2] == InlineAsmTemplatePiece::String("\n".to_string())
&& template[3] == InlineAsmTemplatePiece::String("cpuid".to_string())
&& template[4] == InlineAsmTemplatePiece::String("\n".to_string())
&& template[5] == InlineAsmTemplatePiece::String("xchgq %rbx, ".to_string())
&& matches!(
template[6],
InlineAsmTemplatePiece::Placeholder {
operand_idx: 0,
modifier: Some('r'),
span: _
}
)
{
assert_eq!(operands.len(), 4);
let (leaf, eax_place) = match operands[1] {
InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
assert_eq!(
reg,
InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax))
);
(
crate::base::codegen_operand(fx, in_value).load_scalar(fx),
crate::base::codegen_place(fx, out_place.unwrap()),
)
}
_ => unreachable!(),
};
let ebx_place = match operands[0] {
InlineAsmOperand::Out { reg, late: true, place } => {
assert_eq!(
reg,
InlineAsmRegOrRegClass::RegClass(InlineAsmRegClass::X86(
X86InlineAsmRegClass::reg
))
);
crate::base::codegen_place(fx, place.unwrap())
}
_ => unreachable!(),
};
let (sub_leaf, ecx_place) = match operands[2] {
InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
assert_eq!(
reg,
InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::cx))
);
(
crate::base::codegen_operand(fx, in_value).load_scalar(fx),
crate::base::codegen_place(fx, out_place.unwrap()),
)
}
_ => unreachable!(),
};
let edx_place = match operands[3] {
InlineAsmOperand::Out { reg, late: true, place } => {
assert_eq!(
reg,
InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx))
);
crate::base::codegen_place(fx, place.unwrap())
}
_ => unreachable!(),
};
let (eax, ebx, ecx, edx) = crate::intrinsics::codegen_cpuid_call(fx, leaf, sub_leaf);
let (eax, ebx, ecx, edx) = crate::intrinsics::codegen_cpuid_call(fx, leaf, sub_leaf);
eax_place.write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32)));
ebx_place.write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
ecx_place.write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
edx_place.write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
return;
} else if fx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") {
// ___chkstk, ___chkstk_ms and __alloca are only used on Windows
crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
} else if fx.tcx.symbol_name(fx.instance).name == "__alloca" {
crate::trap::trap_unimplemented(fx, "Alloca is not supported");
eax_place.write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32)));
ebx_place.write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
ecx_place.write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
edx_place.write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
return;
} else if fx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") {
// ___chkstk, ___chkstk_ms and __alloca are only used on Windows
crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
} else if fx.tcx.symbol_name(fx.instance).name == "__alloca" {
crate::trap::trap_unimplemented(fx, "Alloca is not supported");
}
}
let mut inputs = Vec::new();

View File

@ -404,7 +404,9 @@ fn codegen_regular_intrinsic_call<'tcx>(
};
size_of_val, (c ptr) {
let layout = fx.layout_of(substs.type_at(0));
let size = if layout.is_unsized() {
// Note: Can't use is_unsized here as truly unsized types need to take the fixed size
// branch
let size = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
let (_ptr, info) = ptr.load_scalar_pair(fx);
let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
size
@ -418,7 +420,9 @@ fn codegen_regular_intrinsic_call<'tcx>(
};
min_align_of_val, (c ptr) {
let layout = fx.layout_of(substs.type_at(0));
let align = if layout.is_unsized() {
// Note: Can't use is_unsized here as truly unsized types need to take the fixed size
// branch
let align = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
let (_ptr, info) = ptr.load_scalar_pair(fx);
let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
align
@ -1135,6 +1139,20 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
// FIXME implement black_box semantics
ret.write_cvalue(fx, a);
};
// FIXME implement variadics in cranelift
va_copy, (o _dest, o _src) {
fx.tcx.sess.span_fatal(
source_info.span,
"Defining variadic functions is not yet supported by Cranelift",
);
};
va_arg | va_end, (o _valist) {
fx.tcx.sess.span_fatal(
source_info.span,
"Defining variadic functions is not yet supported by Cranelift",
);
};
}
let ret_block = fx.get_block(destination.unwrap());

View File

@ -109,7 +109,8 @@ fn create_entry_fn(
tcx.mk_substs([GenericArg::from(main_ret_ty)].iter()),
)
.unwrap()
.unwrap();
.unwrap()
.polymorphize(tcx);
let report_name = tcx.symbol_name(report).name;
let report_sig = get_function_sig(tcx, m.isa().triple(), report);

View File

@ -153,11 +153,7 @@ pub(crate) fn size_and_align_of_dst<'tcx>(
layout: TyAndLayout<'tcx>,
info: Value,
) -> (Value, Value) {
if !layout.is_unsized() {
let size = fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64);
let align = fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64);
return (size, align);
}
assert!(layout.is_unsized() || layout.abi == Abi::Uninhabited);
match layout.ty.kind() {
ty::Dynamic(..) => {
// load size/align from vtable

View File

@ -324,6 +324,12 @@ pub(crate) fn new_stack_slot(
};
}
if layout.size.bytes() >= u64::from(u32::MAX - 16) {
fx.tcx
.sess
.fatal(&format!("values of type {} are too big to store on the stack", layout.ty));
}
let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
kind: StackSlotKind::ExplicitSlot,
// FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
@ -420,7 +426,7 @@ pub(crate) fn to_ptr_maybe_unsized(self) -> (Pointer, Option<Value>) {
}
pub(crate) fn write_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>, from: CValue<'tcx>) {
assert_assignable(fx, from.layout().ty, self.layout().ty);
assert_assignable(fx, from.layout().ty, self.layout().ty, 16);
self.write_cvalue_maybe_transmute(fx, from, "write_cvalue");
}
@ -782,18 +788,25 @@ pub(crate) fn assert_assignable<'tcx>(
fx: &FunctionCx<'_, '_, 'tcx>,
from_ty: Ty<'tcx>,
to_ty: Ty<'tcx>,
limit: usize,
) {
if limit == 0 {
// assert_assignable exists solely to catch bugs in cg_clif. it isn't necessary for
// soundness. don't attempt to check deep types to avoid exponential behavior in certain
// cases.
return;
}
match (from_ty.kind(), to_ty.kind()) {
(ty::Ref(_, a, _), ty::Ref(_, b, _))
| (
ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }),
ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }),
) => {
assert_assignable(fx, *a, *b);
assert_assignable(fx, *a, *b, limit - 1);
}
(ty::Ref(_, a, _), ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }))
| (ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }), ty::Ref(_, b, _)) => {
assert_assignable(fx, *a, *b);
assert_assignable(fx, *a, *b, limit - 1);
}
(ty::FnPtr(_), ty::FnPtr(_)) => {
let from_sig = fx.tcx.normalize_erasing_late_bound_regions(
@ -823,6 +836,17 @@ pub(crate) fn assert_assignable<'tcx>(
}
// dyn for<'r> Trait<'r> -> dyn Trait<'_> is allowed
}
(&ty::Tuple(types_a), &ty::Tuple(types_b)) => {
let mut types_a = types_a.iter();
let mut types_b = types_b.iter();
loop {
match (types_a.next(), types_b.next()) {
(Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
(None, None) => return,
(Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
}
}
}
(&ty::Adt(adt_def_a, substs_a), &ty::Adt(adt_def_b, substs_b))
if adt_def_a.did() == adt_def_b.did() =>
{
@ -830,18 +854,37 @@ pub(crate) fn assert_assignable<'tcx>(
let mut types_b = substs_b.types();
loop {
match (types_a.next(), types_b.next()) {
(Some(a), Some(b)) => assert_assignable(fx, a, b),
(Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
(None, None) => return,
(Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
}
}
}
(ty::Array(a, _), ty::Array(b, _)) => assert_assignable(fx, *a, *b),
(ty::Array(a, _), ty::Array(b, _)) => assert_assignable(fx, *a, *b, limit - 1),
(&ty::Closure(def_id_a, substs_a), &ty::Closure(def_id_b, substs_b))
if def_id_a == def_id_b =>
{
let mut types_a = substs_a.types();
let mut types_b = substs_b.types();
loop {
match (types_a.next(), types_b.next()) {
(Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
(None, None) => return,
(Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
}
}
}
(ty::Param(_), _) | (_, ty::Param(_)) if fx.tcx.sess.opts.unstable_opts.polymorphize => {
// No way to check if it is correct or not with polymorphization enabled
}
_ => {
assert_eq!(
from_ty, to_ty,
from_ty,
to_ty,
"Can't write value with incompatible type {:?} to place with type {:?}\n\n{:#?}",
from_ty, to_ty, fx,
from_ty.kind(),
to_ty.kind(),
fx,
);
}
}