diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index 7c3b80c9c8f..91609b22615 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -93,15 +93,33 @@ pub fn project_field>( let effective_field_align = self.align.restrict_for_offset(offset); let mut simple = || { - // Unions and newtypes only use an offset of 0. - let llval = if offset.bytes() == 0 { - self.llval - } else if let Abi::ScalarPair(ref a, ref b) = self.layout.abi { - // Offsets have to match either first or second field. - assert_eq!(offset, a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi)); - bx.struct_gep(self.llval, 1) - } else { - bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix)) + let llval = match self.layout.abi { + _ if offset.bytes() == 0 => { + // Unions and newtypes only use an offset of 0. + // Also handles the first field of Scalar, ScalarPair, and Vector layouts. + self.llval + } + Abi::ScalarPair(ref a, ref b) + if offset == a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi) => + { + // Offset matches second field. + bx.struct_gep(self.llval, 1) + } + Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => { + // ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer. + let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p()); + bx.gep(byte_ptr, &[bx.const_usize(offset.bytes())]) + } + Abi::Scalar(_) | Abi::ScalarPair(..) => { + // All fields of Scalar and ScalarPair layouts must have been handled by this point. + // Vector layouts have additional fields for each element of the vector, so don't panic in that case. + bug!( + "offset of non-ZST field `{:?}` does not match layout `{:#?}`", + field, + self.layout + ); + } + _ => bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix)), }; PlaceRef { // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types. diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs index cb79b089d94..0fda1473f64 100644 --- a/compiler/rustc_middle/src/ty/layout.rs +++ b/compiler/rustc_middle/src/ty/layout.rs @@ -390,78 +390,60 @@ fn univariant_uninterned( // Unpack newtype ABIs and find scalar pairs. if sized && size.bytes() > 0 { - // All other fields must be ZSTs, and we need them to all start at 0. - let mut zst_offsets = offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst()); - if zst_offsets.all(|(_, o)| o.bytes() == 0) { - let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst()); + // All other fields must be ZSTs. + let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst()); - match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) { - // We have exactly one non-ZST field. - (Some((i, field)), None, None) => { - // Field fills the struct and it has a scalar or scalar pair ABI. - if offsets[i].bytes() == 0 - && align.abi == field.align.abi - && size == field.size - { - match field.abi { - // For plain scalars, or vectors of them, we can't unpack - // newtypes for `#[repr(C)]`, as that affects C ABIs. - Abi::Scalar(_) | Abi::Vector { .. } if optimize => { - abi = field.abi.clone(); - } - // But scalar pairs are Rust-specific and get - // treated as aggregates by C ABIs anyway. - Abi::ScalarPair(..) => { - abi = field.abi.clone(); - } - _ => {} + match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) { + // We have exactly one non-ZST field. + (Some((i, field)), None, None) => { + // Field fills the struct and it has a scalar or scalar pair ABI. + if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size + { + match field.abi { + // For plain scalars, or vectors of them, we can't unpack + // newtypes for `#[repr(C)]`, as that affects C ABIs. + Abi::Scalar(_) | Abi::Vector { .. } if optimize => { + abi = field.abi.clone(); } + // But scalar pairs are Rust-specific and get + // treated as aggregates by C ABIs anyway. + Abi::ScalarPair(..) => { + abi = field.abi.clone(); + } + _ => {} } } - - // Two non-ZST fields, and they're both scalars. - ( - Some(( - i, - &TyAndLayout { - layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. - }, - )), - Some(( - j, - &TyAndLayout { - layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. - }, - )), - None, - ) => { - // Order by the memory placement, not source order. - let ((i, a), (j, b)) = if offsets[i] < offsets[j] { - ((i, a), (j, b)) - } else { - ((j, b), (i, a)) - }; - let pair = self.scalar_pair(a.clone(), b.clone()); - let pair_offsets = match pair.fields { - FieldsShape::Arbitrary { ref offsets, ref memory_index } => { - assert_eq!(memory_index, &[0, 1]); - offsets - } - _ => bug!(), - }; - if offsets[i] == pair_offsets[0] - && offsets[j] == pair_offsets[1] - && align == pair.align - && size == pair.size - { - // We can use `ScalarPair` only when it matches our - // already computed layout (including `#[repr(C)]`). - abi = pair.abi; - } - } - - _ => {} } + + // Two non-ZST fields, and they're both scalars. + ( + Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })), + Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })), + None, + ) => { + // Order by the memory placement, not source order. + let ((i, a), (j, b)) = + if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) }; + let pair = self.scalar_pair(a.clone(), b.clone()); + let pair_offsets = match pair.fields { + FieldsShape::Arbitrary { ref offsets, ref memory_index } => { + assert_eq!(memory_index, &[0, 1]); + offsets + } + _ => bug!(), + }; + if offsets[i] == pair_offsets[0] + && offsets[j] == pair_offsets[1] + && align == pair.align + && size == pair.size + { + // We can use `ScalarPair` only when it matches our + // already computed layout (including `#[repr(C)]`). + abi = pair.abi; + } + } + + _ => {} } } diff --git a/src/test/codegen/tuple-layout-opt.rs b/src/test/codegen/tuple-layout-opt.rs new file mode 100644 index 00000000000..e86c75f3f48 --- /dev/null +++ b/src/test/codegen/tuple-layout-opt.rs @@ -0,0 +1,36 @@ +// ignore-emscripten +// compile-flags: -C no-prepopulate-passes + +// Test that tuples get optimized layout, in particular with a ZST in the last field (#63244) + +#![crate_type="lib"] + +type ScalarZstLast = (u128, ()); +// CHECK: define i128 @test_ScalarZstLast(i128 %_1) +#[no_mangle] +pub fn test_ScalarZstLast(_: ScalarZstLast) -> ScalarZstLast { loop {} } + +type ScalarZstFirst = ((), u128); +// CHECK: define i128 @test_ScalarZstFirst(i128 %_1) +#[no_mangle] +pub fn test_ScalarZstFirst(_: ScalarZstFirst) -> ScalarZstFirst { loop {} } + +type ScalarPairZstLast = (u8, u128, ()); +// CHECK: define { i128, i8 } @test_ScalarPairZstLast(i128 %_1.0, i8 %_1.1) +#[no_mangle] +pub fn test_ScalarPairZstLast(_: ScalarPairZstLast) -> ScalarPairZstLast { loop {} } + +type ScalarPairZstFirst = ((), u8, u128); +// CHECK: define { i8, i128 } @test_ScalarPairZstFirst(i8 %_1.0, i128 %_1.1) +#[no_mangle] +pub fn test_ScalarPairZstFirst(_: ScalarPairZstFirst) -> ScalarPairZstFirst { loop {} } + +type ScalarPairLotsOfZsts = ((), u8, (), u128, ()); +// CHECK: define { i128, i8 } @test_ScalarPairLotsOfZsts(i128 %_1.0, i8 %_1.1) +#[no_mangle] +pub fn test_ScalarPairLotsOfZsts(_: ScalarPairLotsOfZsts) -> ScalarPairLotsOfZsts { loop {} } + +type ScalarPairLottaNesting = (((), ((), u8, (), u128, ())), ()); +// CHECK: define { i128, i8 } @test_ScalarPairLottaNesting(i128 %_1.0, i8 %_1.1) +#[no_mangle] +pub fn test_ScalarPairLottaNesting(_: ScalarPairLottaNesting) -> ScalarPairLottaNesting { loop {} } diff --git a/src/test/codegen/zst-offset.rs b/src/test/codegen/zst-offset.rs new file mode 100644 index 00000000000..0c015fca325 --- /dev/null +++ b/src/test/codegen/zst-offset.rs @@ -0,0 +1,43 @@ +// compile-flags: -C no-prepopulate-passes + +#![crate_type = "lib"] +#![feature(repr_simd)] + +// Hack to get the correct size for the length part in slices +// CHECK: @helper([[USIZE:i[0-9]+]] %_1) +#[no_mangle] +pub fn helper(_: usize) { +} + +// Check that we correctly generate a GEP for a ZST that is not included in Scalar layout +// CHECK-LABEL: @scalar_layout +#[no_mangle] +pub fn scalar_layout(s: &(u64, ())) { +// CHECK: [[X0:%[0-9]+]] = bitcast i64* %s to i8* +// CHECK-NEXT: [[X1:%[0-9]+]] = getelementptr i8, i8* [[X0]], [[USIZE]] 8 + let x = &s.1; + &x; // keep variable in an alloca +} + +// Check that we correctly generate a GEP for a ZST that is not included in ScalarPair layout +// CHECK-LABEL: @scalarpair_layout +#[no_mangle] +pub fn scalarpair_layout(s: &(u64, u32, ())) { +// CHECK: [[X0:%[0-9]+]] = bitcast { i64, i32 }* %s to i8* +// CHECK-NEXT: [[X1:%[0-9]+]] = getelementptr i8, i8* [[X0]], [[USIZE]] 12 + let x = &s.2; + &x; // keep variable in an alloca +} + +#[repr(simd)] +pub struct U64x4(u64, u64, u64, u64); + +// Check that we correctly generate a GEP for a ZST that is not included in Vector layout +// CHECK-LABEL: @vector_layout +#[no_mangle] +pub fn vector_layout(s: &(U64x4, ())) { +// CHECK: [[X0:%[0-9]+]] = bitcast <4 x i64>* %s to i8* +// CHECK-NEXT: [[X1:%[0-9]+]] = getelementptr i8, i8* [[X0]], [[USIZE]] 32 + let x = &s.1; + &x; // keep variable in an alloca +} diff --git a/src/test/ui/dynamically-sized-types/dst-tuple-no-reorder.rs b/src/test/ui/dynamically-sized-types/dst-tuple-no-reorder.rs new file mode 100644 index 00000000000..26b923f431f --- /dev/null +++ b/src/test/ui/dynamically-sized-types/dst-tuple-no-reorder.rs @@ -0,0 +1,26 @@ +// run-pass + +#![feature(unsized_tuple_coercion)] + +// Ensure that unsizable fields that might be accessed don't get reordered + +fn nonzero_size() { + let sized: (u8, [u32; 2]) = (123, [456, 789]); + let unsize: &(u8, [u32]) = &sized; + assert_eq!(unsize.0, 123); + assert_eq!(unsize.1.len(), 2); + assert_eq!(unsize.1[0], 456); + assert_eq!(unsize.1[1], 789); +} + +fn zst() { + let sized: (u8, [u32; 0]) = (123, []); + let unsize: &(u8, [u32]) = &sized; + assert_eq!(unsize.0, 123); + assert_eq!(unsize.1.len(), 0); +} + +pub fn main() { + nonzero_size(); + zst(); +} diff --git a/src/test/ui/dynamically-sized-types/dst-tuple-zst-offsets.rs b/src/test/ui/dynamically-sized-types/dst-tuple-zst-offsets.rs new file mode 100644 index 00000000000..b0cefe77039 --- /dev/null +++ b/src/test/ui/dynamically-sized-types/dst-tuple-zst-offsets.rs @@ -0,0 +1,22 @@ +// run-pass + +#![feature(unsized_tuple_coercion)] + +// Check that we do not change the offsets of ZST fields when unsizing + +fn scalar_layout() { + let sized: &(u8, [(); 13]) = &(123, [(); 13]); + let unsize: &(u8, [()]) = sized; + assert_eq!(sized.1.as_ptr(), unsize.1.as_ptr()); +} + +fn scalarpair_layout() { + let sized: &(u8, u16, [(); 13]) = &(123, 456, [(); 13]); + let unsize: &(u8, u16, [()]) = sized; + assert_eq!(sized.2.as_ptr(), unsize.2.as_ptr()); +} + +pub fn main() { + scalar_layout(); + scalarpair_layout(); +} diff --git a/src/test/ui/mir/mir_const_prop_tuple_field_reorder.rs b/src/test/ui/mir/mir_const_prop_tuple_field_reorder.rs new file mode 100644 index 00000000000..629b50dec65 --- /dev/null +++ b/src/test/ui/mir/mir_const_prop_tuple_field_reorder.rs @@ -0,0 +1,27 @@ +// compile-flags: -Z mir-opt-level=2 +// build-pass +#![crate_type="lib"] + +// This used to ICE: const-prop did not account for field reordering of scalar pairs, +// and would generate a tuple like `(0x1337, VariantBar): (FooEnum, isize)`, +// causing assertion failures in codegen when trying to read 0x1337 at the wrong type. + +pub enum FooEnum { + VariantBar, + VariantBaz, + VariantBuz, +} + +pub fn wrong_index() -> isize { + let (_, b) = id((FooEnum::VariantBar, 0x1337)); + b +} + +pub fn wrong_index_two() -> isize { + let (_, (_, b)) = id(((), (FooEnum::VariantBar, 0x1338))); + b +} + +fn id(x: T) -> T { + x +}