don't promote large fields to higher alignments if that would affect niche placement

This commit is contained in:
The 8472 2023-03-05 16:15:16 +01:00
parent faf2da3e2f
commit 4907dac54c
2 changed files with 42 additions and 13 deletions

View File

@ -772,19 +772,6 @@ fn univariant(
if optimize {
let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
let optimizing = &mut inverse_memory_index.raw[..end];
let effective_field_align = |layout: Layout<'_>| {
if let Some(pack) = pack {
// return the packed alignment in bytes
layout.align().abi.min(pack).bytes()
} else {
// returns log2(effective-align).
// This is ok since `pack` applies to all fields equally.
// The calculation assumes that size is an integer multiple of align, except for ZSTs.
//
// group [u8; 4] with align-4 or [u8; 6] with align-2 fields
layout.align().abi.bytes().max(layout.size().bytes()).trailing_zeros() as u64
}
};
// If `-Z randomize-layout` was enabled for the type definition we can shuffle
// the field ordering to try and catch some code making assumptions about layouts
@ -801,6 +788,30 @@ fn univariant(
}
// Otherwise we just leave things alone and actually optimize the type's fields
} else {
let max_field_align = fields.iter().map(|f| f.align().abi.bytes()).max().unwrap_or(1);
let any_niche = fields.iter().any(|f| f.largest_niche().is_some());
let effective_field_align = |layout: Layout<'_>| {
if let Some(pack) = pack {
// return the packed alignment in bytes
layout.align().abi.min(pack).bytes()
} else {
// returns log2(effective-align).
// This is ok since `pack` applies to all fields equally.
// The calculation assumes that size is an integer multiple of align, except for ZSTs.
//
// group [u8; 4] with align-4 or [u8; 6] with align-2 fields
let align = layout.align().abi.bytes();
let size = layout.size().bytes();
let size_as_align = align.max(size).trailing_zeros();
let size_as_align = if any_niche {
max_field_align.trailing_zeros().min(size_as_align)
} else {
size_as_align
};
size_as_align as u64
}
};
match kind {
StructKind::AlwaysSized | StructKind::MaybeUnsized => {
optimizing.sort_by_key(|&x| {

View File

@ -186,6 +186,18 @@ struct Reorder2 {
ary: [u8; 6],
}
// We want the niche in the front, which means we can't treat the array as quasi-aligned more than
// 4 bytes even though we also want to place it at an 8-aligned offset where possible.
// So the ideal layout would look like: (char, u32, [u8; 8], u8)
// The current layout algorithm does (char, [u8; 8], u32, u8)
#[repr(align(8))]
struct ReorderWithNiche {
a: u32,
b: char,
c: u8,
ary: [u8; 8]
}
// standins for std types which we want to be laid out in a reasonable way
struct RawVecDummy {
ptr: NonNull<u8>,
@ -298,4 +310,10 @@ pub fn main() {
assert!(ptr::from_ref(&b.1).addr() > ptr::from_ref(&b.2).addr());
assert_eq!(size_of::<Cow<'static, str>>(), size_of::<String>());
let v = ReorderWithNiche {a: 0, b: ' ', c: 0, ary: [0; 8]};
assert!((&v.ary).as_ptr().is_aligned_to(4),
"here [u8; 8] should group with _at least_ align-4 fields");
assert_eq!(ptr::from_ref(&v), ptr::from_ref(&v.b).cast(),
"sort niches to the front where possible");
}