Auto merge of #73453 - erikdesjardins:tuplayout, r=eddyb
Ignore ZST offsets when deciding whether to use Scalar/ScalarPair layout This is important because Scalar/ScalarPair layout previously would not be used if any ZST had nonzero offset. For example, before this change, only `((), u128)` would be laid out like `u128`, not `(u128, ())`. Fixes #63244
This commit is contained in:
commit
5b9e886403
@ -93,15 +93,33 @@ pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
|
||||
let effective_field_align = self.align.restrict_for_offset(offset);
|
||||
|
||||
let mut simple = || {
|
||||
let llval = match self.layout.abi {
|
||||
_ if offset.bytes() == 0 => {
|
||||
// Unions and newtypes only use an offset of 0.
|
||||
let llval = if offset.bytes() == 0 {
|
||||
// Also handles the first field of Scalar, ScalarPair, and Vector layouts.
|
||||
self.llval
|
||||
} else if let Abi::ScalarPair(ref a, ref b) = self.layout.abi {
|
||||
// Offsets have to match either first or second field.
|
||||
assert_eq!(offset, a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi));
|
||||
}
|
||||
Abi::ScalarPair(ref a, ref b)
|
||||
if offset == a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi) =>
|
||||
{
|
||||
// Offset matches second field.
|
||||
bx.struct_gep(self.llval, 1)
|
||||
} else {
|
||||
bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix))
|
||||
}
|
||||
Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => {
|
||||
// ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer.
|
||||
let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
|
||||
bx.gep(byte_ptr, &[bx.const_usize(offset.bytes())])
|
||||
}
|
||||
Abi::Scalar(_) | Abi::ScalarPair(..) => {
|
||||
// All fields of Scalar and ScalarPair layouts must have been handled by this point.
|
||||
// Vector layouts have additional fields for each element of the vector, so don't panic in that case.
|
||||
bug!(
|
||||
"offset of non-ZST field `{:?}` does not match layout `{:#?}`",
|
||||
field,
|
||||
self.layout
|
||||
);
|
||||
}
|
||||
_ => bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix)),
|
||||
};
|
||||
PlaceRef {
|
||||
// HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.
|
||||
|
@ -390,18 +390,14 @@ fn univariant_uninterned(
|
||||
|
||||
// Unpack newtype ABIs and find scalar pairs.
|
||||
if sized && size.bytes() > 0 {
|
||||
// All other fields must be ZSTs, and we need them to all start at 0.
|
||||
let mut zst_offsets = offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
|
||||
if zst_offsets.all(|(_, o)| o.bytes() == 0) {
|
||||
// All other fields must be ZSTs.
|
||||
let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
|
||||
|
||||
match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
|
||||
// We have exactly one non-ZST field.
|
||||
(Some((i, field)), None, None) => {
|
||||
// Field fills the struct and it has a scalar or scalar pair ABI.
|
||||
if offsets[i].bytes() == 0
|
||||
&& align.abi == field.align.abi
|
||||
&& size == field.size
|
||||
if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
|
||||
{
|
||||
match field.abi {
|
||||
// For plain scalars, or vectors of them, we can't unpack
|
||||
@ -421,26 +417,13 @@ fn univariant_uninterned(
|
||||
|
||||
// Two non-ZST fields, and they're both scalars.
|
||||
(
|
||||
Some((
|
||||
i,
|
||||
&TyAndLayout {
|
||||
layout: &Layout { abi: Abi::Scalar(ref a), .. }, ..
|
||||
},
|
||||
)),
|
||||
Some((
|
||||
j,
|
||||
&TyAndLayout {
|
||||
layout: &Layout { abi: Abi::Scalar(ref b), .. }, ..
|
||||
},
|
||||
)),
|
||||
Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })),
|
||||
Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })),
|
||||
None,
|
||||
) => {
|
||||
// Order by the memory placement, not source order.
|
||||
let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
|
||||
((i, a), (j, b))
|
||||
} else {
|
||||
((j, b), (i, a))
|
||||
};
|
||||
let ((i, a), (j, b)) =
|
||||
if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
|
||||
let pair = self.scalar_pair(a.clone(), b.clone());
|
||||
let pair_offsets = match pair.fields {
|
||||
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
|
||||
@ -463,7 +446,6 @@ fn univariant_uninterned(
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
|
||||
abi = Abi::Uninhabited;
|
||||
|
36
src/test/codegen/tuple-layout-opt.rs
Normal file
36
src/test/codegen/tuple-layout-opt.rs
Normal file
@ -0,0 +1,36 @@
|
||||
// ignore-emscripten
|
||||
// compile-flags: -C no-prepopulate-passes
|
||||
|
||||
// Test that tuples get optimized layout, in particular with a ZST in the last field (#63244)
|
||||
|
||||
#![crate_type="lib"]
|
||||
|
||||
type ScalarZstLast = (u128, ());
|
||||
// CHECK: define i128 @test_ScalarZstLast(i128 %_1)
|
||||
#[no_mangle]
|
||||
pub fn test_ScalarZstLast(_: ScalarZstLast) -> ScalarZstLast { loop {} }
|
||||
|
||||
type ScalarZstFirst = ((), u128);
|
||||
// CHECK: define i128 @test_ScalarZstFirst(i128 %_1)
|
||||
#[no_mangle]
|
||||
pub fn test_ScalarZstFirst(_: ScalarZstFirst) -> ScalarZstFirst { loop {} }
|
||||
|
||||
type ScalarPairZstLast = (u8, u128, ());
|
||||
// CHECK: define { i128, i8 } @test_ScalarPairZstLast(i128 %_1.0, i8 %_1.1)
|
||||
#[no_mangle]
|
||||
pub fn test_ScalarPairZstLast(_: ScalarPairZstLast) -> ScalarPairZstLast { loop {} }
|
||||
|
||||
type ScalarPairZstFirst = ((), u8, u128);
|
||||
// CHECK: define { i8, i128 } @test_ScalarPairZstFirst(i8 %_1.0, i128 %_1.1)
|
||||
#[no_mangle]
|
||||
pub fn test_ScalarPairZstFirst(_: ScalarPairZstFirst) -> ScalarPairZstFirst { loop {} }
|
||||
|
||||
type ScalarPairLotsOfZsts = ((), u8, (), u128, ());
|
||||
// CHECK: define { i128, i8 } @test_ScalarPairLotsOfZsts(i128 %_1.0, i8 %_1.1)
|
||||
#[no_mangle]
|
||||
pub fn test_ScalarPairLotsOfZsts(_: ScalarPairLotsOfZsts) -> ScalarPairLotsOfZsts { loop {} }
|
||||
|
||||
type ScalarPairLottaNesting = (((), ((), u8, (), u128, ())), ());
|
||||
// CHECK: define { i128, i8 } @test_ScalarPairLottaNesting(i128 %_1.0, i8 %_1.1)
|
||||
#[no_mangle]
|
||||
pub fn test_ScalarPairLottaNesting(_: ScalarPairLottaNesting) -> ScalarPairLottaNesting { loop {} }
|
43
src/test/codegen/zst-offset.rs
Normal file
43
src/test/codegen/zst-offset.rs
Normal file
@ -0,0 +1,43 @@
|
||||
// compile-flags: -C no-prepopulate-passes
|
||||
|
||||
#![crate_type = "lib"]
|
||||
#![feature(repr_simd)]
|
||||
|
||||
// Hack to get the correct size for the length part in slices
|
||||
// CHECK: @helper([[USIZE:i[0-9]+]] %_1)
|
||||
#[no_mangle]
|
||||
pub fn helper(_: usize) {
|
||||
}
|
||||
|
||||
// Check that we correctly generate a GEP for a ZST that is not included in Scalar layout
|
||||
// CHECK-LABEL: @scalar_layout
|
||||
#[no_mangle]
|
||||
pub fn scalar_layout(s: &(u64, ())) {
|
||||
// CHECK: [[X0:%[0-9]+]] = bitcast i64* %s to i8*
|
||||
// CHECK-NEXT: [[X1:%[0-9]+]] = getelementptr i8, i8* [[X0]], [[USIZE]] 8
|
||||
let x = &s.1;
|
||||
&x; // keep variable in an alloca
|
||||
}
|
||||
|
||||
// Check that we correctly generate a GEP for a ZST that is not included in ScalarPair layout
|
||||
// CHECK-LABEL: @scalarpair_layout
|
||||
#[no_mangle]
|
||||
pub fn scalarpair_layout(s: &(u64, u32, ())) {
|
||||
// CHECK: [[X0:%[0-9]+]] = bitcast { i64, i32 }* %s to i8*
|
||||
// CHECK-NEXT: [[X1:%[0-9]+]] = getelementptr i8, i8* [[X0]], [[USIZE]] 12
|
||||
let x = &s.2;
|
||||
&x; // keep variable in an alloca
|
||||
}
|
||||
|
||||
#[repr(simd)]
|
||||
pub struct U64x4(u64, u64, u64, u64);
|
||||
|
||||
// Check that we correctly generate a GEP for a ZST that is not included in Vector layout
|
||||
// CHECK-LABEL: @vector_layout
|
||||
#[no_mangle]
|
||||
pub fn vector_layout(s: &(U64x4, ())) {
|
||||
// CHECK: [[X0:%[0-9]+]] = bitcast <4 x i64>* %s to i8*
|
||||
// CHECK-NEXT: [[X1:%[0-9]+]] = getelementptr i8, i8* [[X0]], [[USIZE]] 32
|
||||
let x = &s.1;
|
||||
&x; // keep variable in an alloca
|
||||
}
|
26
src/test/ui/dynamically-sized-types/dst-tuple-no-reorder.rs
Normal file
26
src/test/ui/dynamically-sized-types/dst-tuple-no-reorder.rs
Normal file
@ -0,0 +1,26 @@
|
||||
// run-pass
|
||||
|
||||
#![feature(unsized_tuple_coercion)]
|
||||
|
||||
// Ensure that unsizable fields that might be accessed don't get reordered
|
||||
|
||||
fn nonzero_size() {
|
||||
let sized: (u8, [u32; 2]) = (123, [456, 789]);
|
||||
let unsize: &(u8, [u32]) = &sized;
|
||||
assert_eq!(unsize.0, 123);
|
||||
assert_eq!(unsize.1.len(), 2);
|
||||
assert_eq!(unsize.1[0], 456);
|
||||
assert_eq!(unsize.1[1], 789);
|
||||
}
|
||||
|
||||
fn zst() {
|
||||
let sized: (u8, [u32; 0]) = (123, []);
|
||||
let unsize: &(u8, [u32]) = &sized;
|
||||
assert_eq!(unsize.0, 123);
|
||||
assert_eq!(unsize.1.len(), 0);
|
||||
}
|
||||
|
||||
pub fn main() {
|
||||
nonzero_size();
|
||||
zst();
|
||||
}
|
22
src/test/ui/dynamically-sized-types/dst-tuple-zst-offsets.rs
Normal file
22
src/test/ui/dynamically-sized-types/dst-tuple-zst-offsets.rs
Normal file
@ -0,0 +1,22 @@
|
||||
// run-pass
|
||||
|
||||
#![feature(unsized_tuple_coercion)]
|
||||
|
||||
// Check that we do not change the offsets of ZST fields when unsizing
|
||||
|
||||
fn scalar_layout() {
|
||||
let sized: &(u8, [(); 13]) = &(123, [(); 13]);
|
||||
let unsize: &(u8, [()]) = sized;
|
||||
assert_eq!(sized.1.as_ptr(), unsize.1.as_ptr());
|
||||
}
|
||||
|
||||
fn scalarpair_layout() {
|
||||
let sized: &(u8, u16, [(); 13]) = &(123, 456, [(); 13]);
|
||||
let unsize: &(u8, u16, [()]) = sized;
|
||||
assert_eq!(sized.2.as_ptr(), unsize.2.as_ptr());
|
||||
}
|
||||
|
||||
pub fn main() {
|
||||
scalar_layout();
|
||||
scalarpair_layout();
|
||||
}
|
27
src/test/ui/mir/mir_const_prop_tuple_field_reorder.rs
Normal file
27
src/test/ui/mir/mir_const_prop_tuple_field_reorder.rs
Normal file
@ -0,0 +1,27 @@
|
||||
// compile-flags: -Z mir-opt-level=2
|
||||
// build-pass
|
||||
#![crate_type="lib"]
|
||||
|
||||
// This used to ICE: const-prop did not account for field reordering of scalar pairs,
|
||||
// and would generate a tuple like `(0x1337, VariantBar): (FooEnum, isize)`,
|
||||
// causing assertion failures in codegen when trying to read 0x1337 at the wrong type.
|
||||
|
||||
pub enum FooEnum {
|
||||
VariantBar,
|
||||
VariantBaz,
|
||||
VariantBuz,
|
||||
}
|
||||
|
||||
pub fn wrong_index() -> isize {
|
||||
let (_, b) = id((FooEnum::VariantBar, 0x1337));
|
||||
b
|
||||
}
|
||||
|
||||
pub fn wrong_index_two() -> isize {
|
||||
let (_, (_, b)) = id(((), (FooEnum::VariantBar, 0x1338)));
|
||||
b
|
||||
}
|
||||
|
||||
fn id<T>(x: T) -> T {
|
||||
x
|
||||
}
|
Loading…
Reference in New Issue
Block a user