From d757c4b904869967f1e665dc2bb9a2ca5122bc96 Mon Sep 17 00:00:00 2001 From: Scott McMurray Date: Thu, 6 Apr 2023 16:24:32 -0700 Subject: [PATCH] Handle not all immediates having `abi::Scalar`s --- compiler/rustc_codegen_ssa/src/mir/rvalue.rs | 54 ++++++++++++++---- tests/codegen/intrinsics/transmute-x64.rs | 35 ++++++++++++ tests/codegen/intrinsics/transmute.rs | 58 +++++++++++++++++++- 3 files changed, 134 insertions(+), 13 deletions(-) create mode 100644 tests/codegen/intrinsics/transmute-x64.rs diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 6e4c0be12f0..d88226f5db0 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -223,13 +223,21 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let OperandValueKind::Immediate(in_scalar) = operand_kind else { bug!("Found {operand_kind:?} for operand {operand:?}"); }; - if let OperandValueKind::Immediate(out_scalar) = cast_kind - && in_scalar.size(self.cx) == out_scalar.size(self.cx) - { - let cast_bty = bx.backend_type(cast); - Some(OperandValue::Immediate( - self.transmute_immediate(bx, imm, in_scalar, out_scalar, cast_bty), - )) + if let OperandValueKind::Immediate(out_scalar) = cast_kind { + match (in_scalar, out_scalar) { + (ScalarOrZst::Zst, ScalarOrZst::Zst) => { + Some(OperandRef::new_zst(bx, cast).val) + } + (ScalarOrZst::Scalar(in_scalar), ScalarOrZst::Scalar(out_scalar)) + if in_scalar.size(self.cx) == out_scalar.size(self.cx) => + { + let cast_bty = bx.backend_type(cast); + Some(OperandValue::Immediate( + self.transmute_immediate(bx, imm, in_scalar, out_scalar, cast_bty), + )) + } + _ => None, + } } else { None } @@ -892,13 +900,18 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if self.cx.is_backend_immediate(layout) { debug_assert!(!self.cx.is_backend_scalar_pair(layout)); OperandValueKind::Immediate(match layout.abi { - abi::Abi::Scalar(s) => s, - abi::Abi::Vector { element, .. } => element, - x => bug!("Couldn't translate {x:?} as backend immediate"), + abi::Abi::Scalar(s) => ScalarOrZst::Scalar(s), + abi::Abi::Vector { element, .. } => ScalarOrZst::Scalar(element), + _ if layout.is_zst() => ScalarOrZst::Zst, + x => span_bug!(self.mir.span, "Couldn't translate {x:?} as backend immediate"), }) } else if self.cx.is_backend_scalar_pair(layout) { let abi::Abi::ScalarPair(s1, s2) = layout.abi else { - bug!("Couldn't translate {:?} as backend scalar pair", layout.abi) + span_bug!( + self.mir.span, + "Couldn't translate {:?} as backend scalar pair", + layout.abi, + ); }; OperandValueKind::Pair(s1, s2) } else { @@ -907,9 +920,26 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } +/// The variants of this match [`OperandValue`], giving details about the +/// backend values that will be held in that other type. #[derive(Debug, Copy, Clone)] enum OperandValueKind { Ref, - Immediate(abi::Scalar), + Immediate(ScalarOrZst), Pair(abi::Scalar, abi::Scalar), } + +#[derive(Debug, Copy, Clone)] +enum ScalarOrZst { + Zst, + Scalar(abi::Scalar), +} + +impl ScalarOrZst { + pub fn size(self, cx: &impl abi::HasDataLayout) -> abi::Size { + match self { + ScalarOrZst::Zst => abi::Size::ZERO, + ScalarOrZst::Scalar(s) => s.size(cx), + } + } +} diff --git a/tests/codegen/intrinsics/transmute-x64.rs b/tests/codegen/intrinsics/transmute-x64.rs new file mode 100644 index 00000000000..99d258c6204 --- /dev/null +++ b/tests/codegen/intrinsics/transmute-x64.rs @@ -0,0 +1,35 @@ +// compile-flags: -O -C no-prepopulate-passes +// only-x86_64 (it's using arch-specific types) +// min-llvm-version: 15.0 # this test assumes `ptr`s + +#![crate_type = "lib"] + +use std::arch::x86_64::{__m128, __m128i, __m256i}; +use std::mem::transmute; + +// CHECK-LABEL: @check_sse_float_to_int( +#[no_mangle] +pub unsafe fn check_sse_float_to_int(x: __m128) -> __m128i { + // CHECK-NOT: alloca + // CHECK: %1 = load <4 x float>, ptr %x, align 16 + // CHECK: store <4 x float> %1, ptr %0, align 16 + transmute(x) +} + +// CHECK-LABEL: @check_sse_pair_to_avx( +#[no_mangle] +pub unsafe fn check_sse_pair_to_avx(x: (__m128i, __m128i)) -> __m256i { + // CHECK-NOT: alloca + // CHECK: %1 = load <4 x i64>, ptr %x, align 16 + // CHECK: store <4 x i64> %1, ptr %0, align 32 + transmute(x) +} + +// CHECK-LABEL: @check_sse_pair_from_avx( +#[no_mangle] +pub unsafe fn check_sse_pair_from_avx(x: __m256i) -> (__m128i, __m128i) { + // CHECK-NOT: alloca + // CHECK: %1 = load <4 x i64>, ptr %x, align 32 + // CHECK: store <4 x i64> %1, ptr %0, align 16 + transmute(x) +} diff --git a/tests/codegen/intrinsics/transmute.rs b/tests/codegen/intrinsics/transmute.rs index c2295ca9a0c..57f901c6719 100644 --- a/tests/codegen/intrinsics/transmute.rs +++ b/tests/codegen/intrinsics/transmute.rs @@ -8,7 +8,7 @@ #![feature(inline_const)] #![allow(unreachable_code)] -use std::mem::transmute; +use std::mem::{transmute, MaybeUninit}; // Some of the cases here are statically rejected by `mem::transmute`, so // we need to generate custom MIR for those cases to get to codegen. @@ -373,3 +373,59 @@ pub unsafe fn check_pair_to_dst_ref<'a>(x: (usize, usize)) -> &'a [u8] { // CHECK: ret { ptr, i64 } %2 transmute(x) } + +// CHECK-LABEL: @check_issue_109992( +#[no_mangle] +#[custom_mir(dialect = "runtime", phase = "optimized")] +pub unsafe fn check_issue_109992(x: ()) -> [(); 1] { + // This uses custom MIR to avoid MIR optimizations having removed ZST ops. + + // CHECK: start + // CHECK-NEXT: ret void + mir!{ + { + RET = CastTransmute(x); + Return() + } + } +} + +// CHECK-LABEL: @check_maybe_uninit_pair(i16 %x.0, i64 %x.1) +#[no_mangle] +pub unsafe fn check_maybe_uninit_pair( + x: (MaybeUninit, MaybeUninit), +) -> (MaybeUninit, MaybeUninit) { + // Thanks to `MaybeUninit` this is actually defined behaviour, + // unlike the examples above with pairs of primitives. + + // CHECK: store i16 %x.0 + // CHECK: store i64 %x.1 + // CHECK: load i64 + // CHECK-NOT: noundef + // CHECK: load i16 + // CHECK-NOT: noundef + // CHECK: ret { i64, i16 } + transmute(x) +} + +#[repr(align(8))] +pub struct HighAlignScalar(u8); + +// CHECK-LABEL: @check_to_overalign( +#[no_mangle] +pub unsafe fn check_to_overalign(x: u64) -> HighAlignScalar { + // CHECK: %0 = alloca %HighAlignScalar, align 8 + // CHECK: store i64 %x, ptr %0, align 8 + // CHECK: %1 = load i64, ptr %0, align 8 + // CHECK: ret i64 %1 + transmute(x) +} + +// CHECK-LABEL: @check_from_overalign( +#[no_mangle] +pub unsafe fn check_from_overalign(x: HighAlignScalar) -> u64 { + // CHECK: %x = alloca %HighAlignScalar, align 8 + // CHECK: %[[VAL:.+]] = load i64, ptr %x, align 8 + // CHECK: ret i64 %[[VAL]] + transmute(x) +}