diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index 97dc401251c..0718bebb31b 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -6,7 +6,7 @@ use crate::type_of::LayoutLlvmExt; use crate::value::Value; -use rustc_codegen_ssa::mir::operand::OperandValue; +use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::MemFlags; @@ -253,7 +253,7 @@ fn store( bx.lifetime_end(llscratch, scratch_size); } } else { - OperandValue::Immediate(val).store(bx, dst); + OperandRef::from_immediate_or_packed_pair(bx, val, self.layout).val.store(bx, dst); } } diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 4e5fe290bb1..7ed27b33dce 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -558,10 +558,17 @@ fn scalar_load_metadata<'a, 'll, 'tcx>( OperandValue::Immediate(self.to_immediate(llval, place.layout)) } else if let abi::Abi::ScalarPair(a, b) = place.layout.abi { let b_offset = a.size(self).align_to(b.align(self).abi); - let pair_ty = place.layout.llvm_type(self); let mut load = |i, scalar: abi::Scalar, layout, align, offset| { - let llptr = self.struct_gep(pair_ty, place.llval, i as u64); + let llptr = if i == 0 { + place.llval + } else { + self.inbounds_gep( + self.type_i8(), + place.llval, + &[self.const_usize(b_offset.bytes())], + ) + }; let llty = place.layout.scalar_pair_element_llvm_type(self, i, false); let load = self.load(llty, llptr, align); scalar_load_metadata(self, load, scalar, layout, offset); diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 6043a8ebded..a0f9d5cf7cd 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -179,7 +179,10 @@ fn codegen_intrinsic_call( unsafe { llvm::LLVMSetAlignment(load, align); } - self.to_immediate(load, self.layout_of(tp_ty)) + if !result.layout.is_zst() { + self.store(load, result.llval, result.align); + } + return; } sym::volatile_store => { let dst = args[0].deref(self.cx()); diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index 57b46382c96..e88f4217c9d 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -26,16 +26,7 @@ fn uncached_llvm_type<'a, 'tcx>( let element = layout.scalar_llvm_type_at(cx, element); return cx.type_vector(element, count); } - Abi::ScalarPair(..) => { - return cx.type_struct( - &[ - layout.scalar_pair_element_llvm_type(cx, 0, false), - layout.scalar_pair_element_llvm_type(cx, 1, false), - ], - false, - ); - } - Abi::Uninhabited | Abi::Aggregate { .. } => {} + Abi::Uninhabited | Abi::Aggregate { .. } | Abi::ScalarPair(..) => {} } let name = match layout.ty.kind() { @@ -275,11 +266,25 @@ fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type { } fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type { - if let Abi::Scalar(scalar) = self.abi { - if scalar.is_bool() { - return cx.type_i1(); + match self.abi { + Abi::Scalar(scalar) => { + if scalar.is_bool() { + return cx.type_i1(); + } } - } + Abi::ScalarPair(..) => { + // An immediate pair always contains just the two elements, without any padding + // filler, as it should never be stored to memory. + return cx.type_struct( + &[ + self.scalar_pair_element_llvm_type(cx, 0, true), + self.scalar_pair_element_llvm_type(cx, 1, true), + ], + false, + ); + } + _ => {} + }; self.llvm_type(cx) } diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index 794cbd315b7..6f6f010422f 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -231,14 +231,12 @@ pub fn immediate_or_packed_pair>( bx: &mut Bx, ) -> V { if let OperandValue::Pair(a, b) = self.val { - let llty = bx.cx().backend_type(self.layout); + let llty = bx.cx().immediate_backend_type(self.layout); debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", self, llty); // Reconstruct the immediate aggregate. let mut llpair = bx.cx().const_poison(llty); - let imm_a = bx.from_immediate(a); - let imm_b = bx.from_immediate(b); - llpair = bx.insert_value(llpair, imm_a, 0); - llpair = bx.insert_value(llpair, imm_b, 1); + llpair = bx.insert_value(llpair, a, 0); + llpair = bx.insert_value(llpair, b, 1); llpair } else { self.immediate() @@ -251,14 +249,12 @@ pub fn from_immediate_or_packed_pair>( llval: V, layout: TyAndLayout<'tcx>, ) -> Self { - let val = if let Abi::ScalarPair(a, b) = layout.abi { + let val = if let Abi::ScalarPair(..) = layout.abi { debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout); // Deconstruct the immediate aggregate. let a_llval = bx.extract_value(llval, 0); - let a_llval = bx.to_immediate_scalar(a_llval, a); let b_llval = bx.extract_value(llval, 1); - let b_llval = bx.to_immediate_scalar(b_llval, b); OperandValue::Pair(a_llval, b_llval) } else { OperandValue::Immediate(llval) @@ -435,15 +431,14 @@ fn store_with_flags>( let Abi::ScalarPair(a_scalar, b_scalar) = dest.layout.abi else { bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout); }; - let ty = bx.backend_type(dest.layout); let b_offset = a_scalar.size(bx).align_to(b_scalar.align(bx).abi); - let llptr = bx.struct_gep(ty, dest.llval, 0); let val = bx.from_immediate(a); let align = dest.align; - bx.store_with_flags(val, llptr, align, flags); + bx.store_with_flags(val, dest.llval, align, flags); - let llptr = bx.struct_gep(ty, dest.llval, 1); + let llptr = + bx.inbounds_gep(bx.type_i8(), dest.llval, &[bx.const_usize(b_offset.bytes())]); let val = bx.from_immediate(b); let align = dest.align.restrict_for_offset(b_offset); bx.store_with_flags(val, llptr, align, flags); diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index c0bb3ac5661..73c08e2ca61 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -108,20 +108,17 @@ pub fn project_field>( // Also handles the first field of Scalar, ScalarPair, and Vector layouts. self.llval } - Abi::ScalarPair(a, b) - if offset == a.size(bx.cx()).align_to(b.align(bx.cx()).abi) => - { - // Offset matches second field. - let ty = bx.backend_type(self.layout); - bx.struct_gep(ty, self.llval, 1) + Abi::ScalarPair(..) => { + // FIXME(nikic): Generate this for all ABIs. + bx.inbounds_gep(bx.type_i8(), self.llval, &[bx.const_usize(offset.bytes())]) } - Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => { + Abi::Scalar(_) | Abi::Vector { .. } if field.is_zst() => { // ZST fields (even some that require alignment) are not included in Scalar, // ScalarPair, and Vector layouts, so manually offset the pointer. bx.gep(bx.cx().type_i8(), self.llval, &[bx.const_usize(offset.bytes())]) } - Abi::Scalar(_) | Abi::ScalarPair(..) => { - // All fields of Scalar and ScalarPair layouts must have been handled by this point. + Abi::Scalar(_) => { + // All fields of Scalar layouts must have been handled by this point. // Vector layouts have additional fields for each element of the vector, so don't panic in that case. bug!( "offset of non-ZST field `{:?}` does not match layout `{:#?}`", diff --git a/tests/codegen/align-byval.rs b/tests/codegen/align-byval.rs index e2446e02ef4..c6e3a8ef5f0 100644 --- a/tests/codegen/align-byval.rs +++ b/tests/codegen/align-byval.rs @@ -106,21 +106,21 @@ pub struct ForceAlign16 { pub unsafe fn call_na1(x: NaturalAlign1) { // CHECK: start: - // m68k: [[ALLOCA:%[a-z0-9+]]] = alloca { i8, i8 }, align 1 - // m68k: call void @natural_align_1({{.*}}byval({ i8, i8 }) align 1{{.*}} [[ALLOCA]]) + // m68k: [[ALLOCA:%[a-z0-9+]]] = alloca %NaturalAlign1, align 1 + // m68k: call void @natural_align_1({{.*}}byval(%NaturalAlign1) align 1{{.*}} [[ALLOCA]]) - // wasm: [[ALLOCA:%[a-z0-9+]]] = alloca { i8, i8 }, align 1 - // wasm: call void @natural_align_1({{.*}}byval({ i8, i8 }) align 1{{.*}} [[ALLOCA]]) + // wasm: [[ALLOCA:%[a-z0-9+]]] = alloca %NaturalAlign1, align 1 + // wasm: call void @natural_align_1({{.*}}byval(%NaturalAlign1) align 1{{.*}} [[ALLOCA]]) // x86_64-linux: call void @natural_align_1(i16 // x86_64-windows: call void @natural_align_1(i16 - // i686-linux: [[ALLOCA:%[a-z0-9+]]] = alloca { i8, i8 }, align 4 - // i686-linux: call void @natural_align_1({{.*}}byval({ i8, i8 }) align 4{{.*}} [[ALLOCA]]) + // i686-linux: [[ALLOCA:%[a-z0-9+]]] = alloca %NaturalAlign1, align 4 + // i686-linux: call void @natural_align_1({{.*}}byval(%NaturalAlign1) align 4{{.*}} [[ALLOCA]]) - // i686-windows: [[ALLOCA:%[a-z0-9+]]] = alloca { i8, i8 }, align 4 - // i686-windows: call void @natural_align_1({{.*}}byval({ i8, i8 }) align 4{{.*}} [[ALLOCA]]) + // i686-windows: [[ALLOCA:%[a-z0-9+]]] = alloca %NaturalAlign1, align 4 + // i686-windows: call void @natural_align_1({{.*}}byval(%NaturalAlign1) align 4{{.*}} [[ALLOCA]]) natural_align_1(x); } @@ -199,17 +199,17 @@ pub unsafe fn call_fa16(x: ForceAlign16) { } extern "C" { - // m68k: declare void @natural_align_1({{.*}}byval({ i8, i8 }) align 1{{.*}}) + // m68k: declare void @natural_align_1({{.*}}byval(%NaturalAlign1) align 1{{.*}}) - // wasm: declare void @natural_align_1({{.*}}byval({ i8, i8 }) align 1{{.*}}) + // wasm: declare void @natural_align_1({{.*}}byval(%NaturalAlign1) align 1{{.*}}) // x86_64-linux: declare void @natural_align_1(i16) // x86_64-windows: declare void @natural_align_1(i16) - // i686-linux: declare void @natural_align_1({{.*}}byval({ i8, i8 }) align 4{{.*}}) + // i686-linux: declare void @natural_align_1({{.*}}byval(%NaturalAlign1) align 4{{.*}}) - // i686-windows: declare void @natural_align_1({{.*}}byval({ i8, i8 }) align 4{{.*}}) + // i686-windows: declare void @natural_align_1({{.*}}byval(%NaturalAlign1) align 4{{.*}}) fn natural_align_1(x: NaturalAlign1); // m68k: declare void @natural_align_2({{.*}}byval(%NaturalAlign2) align 2{{.*}}) diff --git a/tests/codegen/align-struct.rs b/tests/codegen/align-struct.rs index 40bba6d5254..d483a2ba151 100644 --- a/tests/codegen/align-struct.rs +++ b/tests/codegen/align-struct.rs @@ -57,7 +57,7 @@ pub fn nested64(a: Align64, b: i32, c: i32, d: i8) -> Nested64 { // CHECK-LABEL: @enum4 #[no_mangle] pub fn enum4(a: i32) -> Enum4 { -// CHECK: %e4 = alloca { i32, i32 }, align 4 +// CHECK: %e4 = alloca %Enum4, align 4 let e4 = Enum4::A(a); e4 } diff --git a/tests/codegen/function-arguments-noopt.rs b/tests/codegen/function-arguments-noopt.rs index 8fe6c790dda..1bd735cc1af 100644 --- a/tests/codegen/function-arguments-noopt.rs +++ b/tests/codegen/function-arguments-noopt.rs @@ -55,7 +55,7 @@ pub fn struct_call(x: S, f: fn(S) -> S) -> S { f(x) } -// CHECK: { i8, i8 } @enum_(i1 zeroext %x.0, i8 %x.1) +// CHECK: { i1, i8 } @enum_(i1 zeroext %x.0, i8 %x.1) #[no_mangle] pub fn enum_(x: Option) -> Option { x @@ -64,6 +64,6 @@ pub fn enum_(x: Option) -> Option { // CHECK-LABEL: @enum_call #[no_mangle] pub fn enum_call(x: Option, f: fn(Option) -> Option) -> Option { - // CHECK: call { i8, i8 } %f(i1 zeroext %x.0, i8 %x.1) + // CHECK: call { i1, i8 } %f(i1 zeroext %x.0, i8 %x.1) f(x) } diff --git a/tests/codegen/function-arguments.rs b/tests/codegen/function-arguments.rs index a218596da1d..64ebd3c05af 100644 --- a/tests/codegen/function-arguments.rs +++ b/tests/codegen/function-arguments.rs @@ -275,7 +275,7 @@ pub fn enum_id_1(x: Option>) -> Option> { x } -// CHECK: { i8, i8 } @enum_id_2(i1 noundef zeroext %x.0, i8 %x.1) +// CHECK: { i1, i8 } @enum_id_2(i1 noundef zeroext %x.0, i8 %x.1) #[no_mangle] pub fn enum_id_2(x: Option) -> Option { x diff --git a/tests/codegen/intrinsics/transmute.rs b/tests/codegen/intrinsics/transmute.rs index e64af33ab6c..eff16050875 100644 --- a/tests/codegen/intrinsics/transmute.rs +++ b/tests/codegen/intrinsics/transmute.rs @@ -190,7 +190,7 @@ pub unsafe fn check_byte_from_bool(x: bool) -> u8 { // CHECK-LABEL: @check_to_pair( #[no_mangle] pub unsafe fn check_to_pair(x: u64) -> Option { - // CHECK: %_0 = alloca { i32, i32 }, align 4 + // CHECK: %_0 = alloca %"core::option::Option", align 4 // CHECK: store i64 %x, ptr %_0, align 4 transmute(x) } @@ -203,10 +203,10 @@ pub unsafe fn check_from_pair(x: Option) -> u64 { const { assert!(std::mem::align_of::>() == 4) }; // CHECK: %_0 = alloca i64, align 8 - // CHECK: store i32 %x.0, ptr %0, align 8 - // CHECK: store i32 %x.1, ptr %1, align 4 - // CHECK: %2 = load i64, ptr %_0, align 8 - // CHECK: ret i64 %2 + // CHECK: store i32 %x.0, ptr %_0, align 8 + // CHECK: store i32 %x.1, ptr %0, align 4 + // CHECK: %[[R:.+]] = load i64, ptr %_0, align 8 + // CHECK: ret i64 %[[R]] transmute(x) } diff --git a/tests/codegen/personality_lifetimes.rs b/tests/codegen/personality_lifetimes.rs index b39718a8d08..e2bc0ebcb37 100644 --- a/tests/codegen/personality_lifetimes.rs +++ b/tests/codegen/personality_lifetimes.rs @@ -24,7 +24,7 @@ pub fn test() { let _s = S; // Check that the personality slot alloca gets a lifetime start in each cleanup block, not just // in the first one. - // CHECK: [[SLOT:%[0-9]+]] = alloca { ptr, i32 } + // CHECK: [[SLOT:%[0-9]+]] = alloca { ptr, i32{{.*}} } // CHECK-LABEL: cleanup: // CHECK: call void @llvm.lifetime.start.{{.*}}({{.*}}) // CHECK-LABEL: cleanup1: diff --git a/tests/codegen/refs.rs b/tests/codegen/refs.rs index 1c7746a3079..9e1205f5d1d 100644 --- a/tests/codegen/refs.rs +++ b/tests/codegen/refs.rs @@ -13,9 +13,8 @@ pub fn helper(_: usize) { pub fn ref_dst(s: &[u8]) { // We used to generate an extra alloca and memcpy to ref the dst, so check that we copy // directly to the alloca for "x" -// CHECK: [[X0:%[0-9]+]] = getelementptr inbounds { ptr, [[USIZE]] }, {{.*}} %x, i32 0, i32 0 -// CHECK: store ptr %s.0, {{.*}} [[X0]] -// CHECK: [[X1:%[0-9]+]] = getelementptr inbounds { ptr, [[USIZE]] }, {{.*}} %x, i32 0, i32 1 +// CHECK: store ptr %s.0, {{.*}} %x +// CHECK: [[X1:%[0-9]+]] = getelementptr inbounds i8, {{.*}} %x, {{i32 4|i64 8}} // CHECK: store [[USIZE]] %s.1, {{.*}} [[X1]] let x = &*s; diff --git a/tests/codegen/scalar-pair-bool.rs b/tests/codegen/scalar-pair-bool.rs index 8e8365b6a67..a8d16bc8f29 100644 --- a/tests/codegen/scalar-pair-bool.rs +++ b/tests/codegen/scalar-pair-bool.rs @@ -2,25 +2,25 @@ #![crate_type = "lib"] -// CHECK: define{{.*}}{ i8, i8 } @pair_bool_bool(i1 noundef zeroext %pair.0, i1 noundef zeroext %pair.1) +// CHECK: define{{.*}}{ i1, i1 } @pair_bool_bool(i1 noundef zeroext %pair.0, i1 noundef zeroext %pair.1) #[no_mangle] pub fn pair_bool_bool(pair: (bool, bool)) -> (bool, bool) { pair } -// CHECK: define{{.*}}{ i8, i32 } @pair_bool_i32(i1 noundef zeroext %pair.0, i32 noundef %pair.1) +// CHECK: define{{.*}}{ i1, i32 } @pair_bool_i32(i1 noundef zeroext %pair.0, i32 noundef %pair.1) #[no_mangle] pub fn pair_bool_i32(pair: (bool, i32)) -> (bool, i32) { pair } -// CHECK: define{{.*}}{ i32, i8 } @pair_i32_bool(i32 noundef %pair.0, i1 noundef zeroext %pair.1) +// CHECK: define{{.*}}{ i32, i1 } @pair_i32_bool(i32 noundef %pair.0, i1 noundef zeroext %pair.1) #[no_mangle] pub fn pair_i32_bool(pair: (i32, bool)) -> (i32, bool) { pair } -// CHECK: define{{.*}}{ i8, i8 } @pair_and_or(i1 noundef zeroext %_1.0, i1 noundef zeroext %_1.1) +// CHECK: define{{.*}}{ i1, i1 } @pair_and_or(i1 noundef zeroext %_1.0, i1 noundef zeroext %_1.1) #[no_mangle] pub fn pair_and_or((a, b): (bool, bool)) -> (bool, bool) { // Make sure it can operate directly on the unpacked args diff --git a/tests/codegen/slice-iter-nonnull.rs b/tests/codegen/slice-iter-nonnull.rs index 8749226d401..1e691cc34c9 100644 --- a/tests/codegen/slice-iter-nonnull.rs +++ b/tests/codegen/slice-iter-nonnull.rs @@ -15,7 +15,7 @@ // CHECK-LABEL: @slice_iter_next( #[no_mangle] pub fn slice_iter_next<'a>(it: &mut std::slice::Iter<'a, u32>) -> Option<&'a u32> { - // CHECK: %[[ENDP:.+]] = getelementptr{{.+}}ptr %it,{{.+}} 1 + // CHECK: %[[ENDP:.+]] = getelementptr inbounds i8, ptr %it, {{i32 4|i64 8}} // CHECK: %[[END:.+]] = load ptr, ptr %[[ENDP]] // CHECK-SAME: !nonnull // CHECK-SAME: !noundef @@ -32,7 +32,7 @@ pub fn slice_iter_next<'a>(it: &mut std::slice::Iter<'a, u32>) -> Option<&'a u32 // CHECK-LABEL: @slice_iter_next_back( #[no_mangle] pub fn slice_iter_next_back<'a>(it: &mut std::slice::Iter<'a, u32>) -> Option<&'a u32> { - // CHECK: %[[ENDP:.+]] = getelementptr{{.+}}ptr %it,{{.+}} 1 + // CHECK: %[[ENDP:.+]] = getelementptr inbounds i8, ptr %it, {{i32 4|i64 8}} // CHECK: %[[END:.+]] = load ptr, ptr %[[ENDP]] // CHECK-SAME: !nonnull // CHECK-SAME: !noundef @@ -84,7 +84,7 @@ pub fn slice_iter_mut_new(slice: &mut [u32]) -> std::slice::IterMut<'_, u32> { // CHECK-LABEL: @slice_iter_is_empty #[no_mangle] pub fn slice_iter_is_empty(it: &std::slice::Iter<'_, u32>) -> bool { - // CHECK: %[[ENDP:.+]] = getelementptr{{.+}}ptr %it,{{.+}} 1 + // CHECK: %[[ENDP:.+]] = getelementptr inbounds i8, ptr %it, {{i32 4|i64 8}} // CHECK: %[[END:.+]] = load ptr, ptr %[[ENDP]] // CHECK-SAME: !nonnull // CHECK-SAME: !noundef @@ -100,7 +100,7 @@ pub fn slice_iter_is_empty(it: &std::slice::Iter<'_, u32>) -> bool { // CHECK-LABEL: @slice_iter_len #[no_mangle] pub fn slice_iter_len(it: &std::slice::Iter<'_, u32>) -> usize { - // CHECK: %[[ENDP:.+]] = getelementptr{{.+}}ptr %it,{{.+}} 1 + // CHECK: %[[ENDP:.+]] = getelementptr inbounds i8, ptr %it, {{i32 4|i64 8}} // CHECK: %[[END:.+]] = load ptr, ptr %[[ENDP]] // CHECK-SAME: !nonnull // CHECK-SAME: !noundef diff --git a/tests/codegen/zst-offset.rs b/tests/codegen/zst-offset.rs index cef4b9bdaaf..56dfd96ab2c 100644 --- a/tests/codegen/zst-offset.rs +++ b/tests/codegen/zst-offset.rs @@ -22,7 +22,7 @@ pub fn scalar_layout(s: &(u64, ())) { // CHECK-LABEL: @scalarpair_layout #[no_mangle] pub fn scalarpair_layout(s: &(u64, u32, ())) { -// CHECK: getelementptr i8, {{.+}}, [[USIZE]] 12 +// CHECK: getelementptr inbounds i8, {{.+}}, [[USIZE]] 12 let x = &s.2; witness(&x); // keep variable in an alloca }