ignore zst offsets instead

This commit is contained in:
Erik Desjardins 2020-08-16 19:25:39 -04:00
parent e9bc3ddb07
commit 68217c9e0f
2 changed files with 78 additions and 89 deletions

View File

@ -93,15 +93,29 @@ pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
let effective_field_align = self.align.restrict_for_offset(offset);
let mut simple = || {
let llval = match self.layout.abi {
_ if offset.bytes() == 0 => {
// Unions and newtypes only use an offset of 0.
let llval = if offset.bytes() == 0 {
// Also handles the first field of Scalar and ScalarPair layouts.
self.llval
} else if let Abi::ScalarPair(ref a, ref b) = self.layout.abi {
// Offsets have to match either first or second field.
assert_eq!(offset, a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi));
}
Abi::ScalarPair(ref a, ref b)
if offset == a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi) =>
{
// Offset matches second field.
bx.struct_gep(self.llval, 1)
} else {
bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix))
}
Abi::ScalarPair(..) | Abi::Scalar(_) => {
// ZST fields are not included in Scalar and ScalarPair layouts, so manually offset the pointer.
assert!(
field.is_zst(),
"non-ZST field offset does not match layout: {:?}",
field
);
let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
bx.gep(byte_ptr, &[bx.const_usize(offset.bytes())])
}
_ => bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix)),
};
PlaceRef {
// HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.

View File

@ -289,32 +289,25 @@ fn univariant_uninterned(
let optimize = !repr.inhibit_struct_field_reordering_opt();
if optimize {
let end =
if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
let optimizing = &mut inverse_memory_index[..end];
let field_align = |f: &TyAndLayout<'_>| {
if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
};
match kind {
StructKind::AlwaysSized => {
inverse_memory_index.sort_by_key(|&x| {
StructKind::AlwaysSized | StructKind::MaybeUnsized => {
optimizing.sort_by_key(|&x| {
// Place ZSTs first to avoid "interesting offsets",
// especially with only one or two non-ZST fields.
let f = &fields[x as usize];
(!f.is_zst(), cmp::Reverse(field_align(f)))
});
}
StructKind::MaybeUnsized => {
// Sort in descending alignment, except for the last field,
// which may be accessed through an unsized type.
inverse_memory_index[..fields.len() - 1]
.sort_by_key(|&x| cmp::Reverse(field_align(&fields[x as usize])));
// Place ZSTs first to avoid "interesting offsets".
// This will reorder the last field if it is a ZST, which is okay because
// there's nothing in memory that could be accessed through an unsized type.
inverse_memory_index.sort_by_key(|&x| !fields[x as usize].is_zst());
}
StructKind::Prefixed(..) => {
// Sort in ascending alignment so that the layout stay optimal
// regardless of the prefix
inverse_memory_index.sort_by_key(|&x| field_align(&fields[x as usize]));
optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
}
}
}
@ -397,18 +390,14 @@ fn univariant_uninterned(
// Unpack newtype ABIs and find scalar pairs.
if sized && size.bytes() > 0 {
// All other fields must be ZSTs, and we need them to all start at 0.
let mut zst_offsets = offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
if zst_offsets.all(|(_, o)| o.bytes() == 0) {
// All other fields must be ZSTs.
let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
// We have exactly one non-ZST field.
(Some((i, field)), None, None) => {
// Field fills the struct and it has a scalar or scalar pair ABI.
if offsets[i].bytes() == 0
&& align.abi == field.align.abi
&& size == field.size
if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
{
match field.abi {
// For plain scalars, or vectors of them, we can't unpack
@ -428,26 +417,13 @@ fn univariant_uninterned(
// Two non-ZST fields, and they're both scalars.
(
Some((
i,
&TyAndLayout {
layout: &Layout { abi: Abi::Scalar(ref a), .. }, ..
},
)),
Some((
j,
&TyAndLayout {
layout: &Layout { abi: Abi::Scalar(ref b), .. }, ..
},
)),
Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })),
Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })),
None,
) => {
// Order by the memory placement, not source order.
let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
((i, a), (j, b))
} else {
((j, b), (i, a))
};
let ((i, a), (j, b)) =
if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
let pair = self.scalar_pair(a.clone(), b.clone());
let pair_offsets = match pair.fields {
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
@ -470,7 +446,6 @@ fn univariant_uninterned(
_ => {}
}
}
}
if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
abi = Abi::Uninhabited;