Auto merge of #103693 - HKalbasi:master, r=oli-obk
Make rustc_target usable outside of rustc I'm working on showing type size in rust-analyzer (https://github.com/rust-lang/rust-analyzer/pull/13490) and I currently copied rustc code inside rust-analyzer, which works, but is bad. With this change, I would become able to use `rustc_target` and `rustc_index` directly in r-a, reducing the amount of copy needed. This PR contains some feature flag to put nightly features behind them to make crates buildable on the stable compiler + makes layout related types generic over index type + removes interning of nested layouts.
This commit is contained in:
commit
b3bc6bf312
18
Cargo.lock
18
Cargo.lock
@ -3202,6 +3202,20 @@ dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustc_abi"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"rand 0.8.5",
|
||||
"rand_xoshiro",
|
||||
"rustc_data_structures",
|
||||
"rustc_index",
|
||||
"rustc_macros",
|
||||
"rustc_serialize",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustc_apfloat"
|
||||
version = "0.0.0"
|
||||
@ -4281,6 +4295,7 @@ name = "rustc_target"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"rustc_abi",
|
||||
"rustc_data_structures",
|
||||
"rustc_feature",
|
||||
"rustc_index",
|
||||
@ -4336,6 +4351,7 @@ dependencies = [
|
||||
"rustc_infer",
|
||||
"rustc_middle",
|
||||
"rustc_span",
|
||||
"rustc_target",
|
||||
"rustc_trait_selection",
|
||||
"smallvec",
|
||||
"tracing",
|
||||
@ -4360,8 +4376,6 @@ dependencies = [
|
||||
name = "rustc_ty_utils"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"rand 0.8.5",
|
||||
"rand_xoshiro",
|
||||
"rustc_data_structures",
|
||||
"rustc_errors",
|
||||
"rustc_hir",
|
||||
|
24
compiler/rustc_abi/Cargo.toml
Normal file
24
compiler/rustc_abi/Cargo.toml
Normal file
@ -0,0 +1,24 @@
|
||||
[package]
|
||||
name = "rustc_abi"
|
||||
version = "0.0.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
bitflags = "1.2.1"
|
||||
tracing = "0.1"
|
||||
rand = { version = "0.8.4", default-features = false, optional = true }
|
||||
rand_xoshiro = { version = "0.6.0", optional = true }
|
||||
rustc_data_structures = { path = "../rustc_data_structures", optional = true }
|
||||
rustc_index = { path = "../rustc_index", default-features = false }
|
||||
rustc_macros = { path = "../rustc_macros", optional = true }
|
||||
rustc_serialize = { path = "../rustc_serialize", optional = true }
|
||||
|
||||
[features]
|
||||
default = ["nightly", "randomize"]
|
||||
randomize = ["rand", "rand_xoshiro"]
|
||||
nightly = [
|
||||
"rustc_data_structures",
|
||||
"rustc_index/nightly",
|
||||
"rustc_macros",
|
||||
"rustc_serialize",
|
||||
]
|
947
compiler/rustc_abi/src/layout.rs
Normal file
947
compiler/rustc_abi/src/layout.rs
Normal file
@ -0,0 +1,947 @@
|
||||
use super::*;
|
||||
use std::{
|
||||
borrow::Borrow,
|
||||
cmp,
|
||||
fmt::Debug,
|
||||
iter,
|
||||
ops::{Bound, Deref},
|
||||
};
|
||||
|
||||
#[cfg(feature = "randomize")]
|
||||
use rand::{seq::SliceRandom, SeedableRng};
|
||||
#[cfg(feature = "randomize")]
|
||||
use rand_xoshiro::Xoshiro128StarStar;
|
||||
|
||||
use tracing::debug;
|
||||
|
||||
// Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
|
||||
// This is used to go between `memory_index` (source field order to memory order)
|
||||
// and `inverse_memory_index` (memory order to source field order).
|
||||
// See also `FieldsShape::Arbitrary::memory_index` for more details.
|
||||
// FIXME(eddyb) build a better abstraction for permutations, if possible.
|
||||
fn invert_mapping(map: &[u32]) -> Vec<u32> {
|
||||
let mut inverse = vec![0; map.len()];
|
||||
for i in 0..map.len() {
|
||||
inverse[map[i] as usize] = i as u32;
|
||||
}
|
||||
inverse
|
||||
}
|
||||
|
||||
pub trait LayoutCalculator {
|
||||
type TargetDataLayoutRef: Borrow<TargetDataLayout>;
|
||||
|
||||
fn delay_bug(&self, txt: &str);
|
||||
fn current_data_layout(&self) -> Self::TargetDataLayoutRef;
|
||||
|
||||
fn scalar_pair<V: Idx>(&self, a: Scalar, b: Scalar) -> LayoutS<V> {
|
||||
let dl = self.current_data_layout();
|
||||
let dl = dl.borrow();
|
||||
let b_align = b.align(dl);
|
||||
let align = a.align(dl).max(b_align).max(dl.aggregate_align);
|
||||
let b_offset = a.size(dl).align_to(b_align.abi);
|
||||
let size = (b_offset + b.size(dl)).align_to(align.abi);
|
||||
|
||||
// HACK(nox): We iter on `b` and then `a` because `max_by_key`
|
||||
// returns the last maximum.
|
||||
let largest_niche = Niche::from_scalar(dl, b_offset, b)
|
||||
.into_iter()
|
||||
.chain(Niche::from_scalar(dl, Size::ZERO, a))
|
||||
.max_by_key(|niche| niche.available(dl));
|
||||
|
||||
LayoutS {
|
||||
variants: Variants::Single { index: V::new(0) },
|
||||
fields: FieldsShape::Arbitrary {
|
||||
offsets: vec![Size::ZERO, b_offset],
|
||||
memory_index: vec![0, 1],
|
||||
},
|
||||
abi: Abi::ScalarPair(a, b),
|
||||
largest_niche,
|
||||
align,
|
||||
size,
|
||||
}
|
||||
}
|
||||
|
||||
fn univariant<'a, V: Idx, F: Deref<Target = &'a LayoutS<V>> + Debug>(
|
||||
&self,
|
||||
dl: &TargetDataLayout,
|
||||
fields: &[F],
|
||||
repr: &ReprOptions,
|
||||
kind: StructKind,
|
||||
) -> Option<LayoutS<V>> {
|
||||
let pack = repr.pack;
|
||||
let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
|
||||
let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
|
||||
let optimize = !repr.inhibit_struct_field_reordering_opt();
|
||||
if optimize {
|
||||
let end =
|
||||
if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
|
||||
let optimizing = &mut inverse_memory_index[..end];
|
||||
let effective_field_align = |f: &F| {
|
||||
if let Some(pack) = pack {
|
||||
// return the packed alignment in bytes
|
||||
f.align.abi.min(pack).bytes()
|
||||
} else {
|
||||
// returns log2(effective-align).
|
||||
// This is ok since `pack` applies to all fields equally.
|
||||
// The calculation assumes that size is an integer multiple of align, except for ZSTs.
|
||||
//
|
||||
// group [u8; 4] with align-4 or [u8; 6] with align-2 fields
|
||||
f.align.abi.bytes().max(f.size.bytes()).trailing_zeros() as u64
|
||||
}
|
||||
};
|
||||
|
||||
// If `-Z randomize-layout` was enabled for the type definition we can shuffle
|
||||
// the field ordering to try and catch some code making assumptions about layouts
|
||||
// we don't guarantee
|
||||
if repr.can_randomize_type_layout() && cfg!(feature = "randomize") {
|
||||
#[cfg(feature = "randomize")]
|
||||
{
|
||||
// `ReprOptions.layout_seed` is a deterministic seed that we can use to
|
||||
// randomize field ordering with
|
||||
let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
|
||||
|
||||
// Shuffle the ordering of the fields
|
||||
optimizing.shuffle(&mut rng);
|
||||
}
|
||||
// Otherwise we just leave things alone and actually optimize the type's fields
|
||||
} else {
|
||||
match kind {
|
||||
StructKind::AlwaysSized | StructKind::MaybeUnsized => {
|
||||
optimizing.sort_by_key(|&x| {
|
||||
// Place ZSTs first to avoid "interesting offsets",
|
||||
// especially with only one or two non-ZST fields.
|
||||
// Then place largest alignments first, largest niches within an alignment group last
|
||||
let f = &fields[x as usize];
|
||||
let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
|
||||
(!f.is_zst(), cmp::Reverse(effective_field_align(f)), niche_size)
|
||||
});
|
||||
}
|
||||
|
||||
StructKind::Prefixed(..) => {
|
||||
// Sort in ascending alignment so that the layout stays optimal
|
||||
// regardless of the prefix.
|
||||
// And put the largest niche in an alignment group at the end
|
||||
// so it can be used as discriminant in jagged enums
|
||||
optimizing.sort_by_key(|&x| {
|
||||
let f = &fields[x as usize];
|
||||
let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
|
||||
(effective_field_align(f), niche_size)
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME(Kixiron): We can always shuffle fields within a given alignment class
|
||||
// regardless of the status of `-Z randomize-layout`
|
||||
}
|
||||
}
|
||||
// inverse_memory_index holds field indices by increasing memory offset.
|
||||
// That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
|
||||
// We now write field offsets to the corresponding offset slot;
|
||||
// field 5 with offset 0 puts 0 in offsets[5].
|
||||
// At the bottom of this function, we invert `inverse_memory_index` to
|
||||
// produce `memory_index` (see `invert_mapping`).
|
||||
let mut sized = true;
|
||||
let mut offsets = vec![Size::ZERO; fields.len()];
|
||||
let mut offset = Size::ZERO;
|
||||
let mut largest_niche = None;
|
||||
let mut largest_niche_available = 0;
|
||||
if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
|
||||
let prefix_align =
|
||||
if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
|
||||
align = align.max(AbiAndPrefAlign::new(prefix_align));
|
||||
offset = prefix_size.align_to(prefix_align);
|
||||
}
|
||||
for &i in &inverse_memory_index {
|
||||
let field = &fields[i as usize];
|
||||
if !sized {
|
||||
self.delay_bug(&format!(
|
||||
"univariant: field #{} comes after unsized field",
|
||||
offsets.len(),
|
||||
));
|
||||
}
|
||||
|
||||
if field.is_unsized() {
|
||||
sized = false;
|
||||
}
|
||||
|
||||
// Invariant: offset < dl.obj_size_bound() <= 1<<61
|
||||
let field_align = if let Some(pack) = pack {
|
||||
field.align.min(AbiAndPrefAlign::new(pack))
|
||||
} else {
|
||||
field.align
|
||||
};
|
||||
offset = offset.align_to(field_align.abi);
|
||||
align = align.max(field_align);
|
||||
|
||||
debug!("univariant offset: {:?} field: {:#?}", offset, field);
|
||||
offsets[i as usize] = offset;
|
||||
|
||||
if let Some(mut niche) = field.largest_niche {
|
||||
let available = niche.available(dl);
|
||||
if available > largest_niche_available {
|
||||
largest_niche_available = available;
|
||||
niche.offset += offset;
|
||||
largest_niche = Some(niche);
|
||||
}
|
||||
}
|
||||
|
||||
offset = offset.checked_add(field.size, dl)?;
|
||||
}
|
||||
if let Some(repr_align) = repr.align {
|
||||
align = align.max(AbiAndPrefAlign::new(repr_align));
|
||||
}
|
||||
debug!("univariant min_size: {:?}", offset);
|
||||
let min_size = offset;
|
||||
// As stated above, inverse_memory_index holds field indices by increasing offset.
|
||||
// This makes it an already-sorted view of the offsets vec.
|
||||
// To invert it, consider:
|
||||
// If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
|
||||
// Field 5 would be the first element, so memory_index is i:
|
||||
// Note: if we didn't optimize, it's already right.
|
||||
let memory_index =
|
||||
if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
|
||||
let size = min_size.align_to(align.abi);
|
||||
let mut abi = Abi::Aggregate { sized };
|
||||
// Unpack newtype ABIs and find scalar pairs.
|
||||
if sized && size.bytes() > 0 {
|
||||
// All other fields must be ZSTs.
|
||||
let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
|
||||
|
||||
match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
|
||||
// We have exactly one non-ZST field.
|
||||
(Some((i, field)), None, None) => {
|
||||
// Field fills the struct and it has a scalar or scalar pair ABI.
|
||||
if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
|
||||
{
|
||||
match field.abi {
|
||||
// For plain scalars, or vectors of them, we can't unpack
|
||||
// newtypes for `#[repr(C)]`, as that affects C ABIs.
|
||||
Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
|
||||
abi = field.abi;
|
||||
}
|
||||
// But scalar pairs are Rust-specific and get
|
||||
// treated as aggregates by C ABIs anyway.
|
||||
Abi::ScalarPair(..) => {
|
||||
abi = field.abi;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Two non-ZST fields, and they're both scalars.
|
||||
(Some((i, a)), Some((j, b)), None) => {
|
||||
match (a.abi, b.abi) {
|
||||
(Abi::Scalar(a), Abi::Scalar(b)) => {
|
||||
// Order by the memory placement, not source order.
|
||||
let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
|
||||
((i, a), (j, b))
|
||||
} else {
|
||||
((j, b), (i, a))
|
||||
};
|
||||
let pair = self.scalar_pair::<V>(a, b);
|
||||
let pair_offsets = match pair.fields {
|
||||
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
|
||||
assert_eq!(memory_index, &[0, 1]);
|
||||
offsets
|
||||
}
|
||||
_ => panic!(),
|
||||
};
|
||||
if offsets[i] == pair_offsets[0]
|
||||
&& offsets[j] == pair_offsets[1]
|
||||
&& align == pair.align
|
||||
&& size == pair.size
|
||||
{
|
||||
// We can use `ScalarPair` only when it matches our
|
||||
// already computed layout (including `#[repr(C)]`).
|
||||
abi = pair.abi;
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
if fields.iter().any(|f| f.abi.is_uninhabited()) {
|
||||
abi = Abi::Uninhabited;
|
||||
}
|
||||
Some(LayoutS {
|
||||
variants: Variants::Single { index: V::new(0) },
|
||||
fields: FieldsShape::Arbitrary { offsets, memory_index },
|
||||
abi,
|
||||
largest_niche,
|
||||
align,
|
||||
size,
|
||||
})
|
||||
}
|
||||
|
||||
fn layout_of_never_type<V: Idx>(&self) -> LayoutS<V> {
|
||||
let dl = self.current_data_layout();
|
||||
let dl = dl.borrow();
|
||||
LayoutS {
|
||||
variants: Variants::Single { index: V::new(0) },
|
||||
fields: FieldsShape::Primitive,
|
||||
abi: Abi::Uninhabited,
|
||||
largest_niche: None,
|
||||
align: dl.i8_align,
|
||||
size: Size::ZERO,
|
||||
}
|
||||
}
|
||||
|
||||
fn layout_of_struct_or_enum<'a, V: Idx, F: Deref<Target = &'a LayoutS<V>> + Debug>(
|
||||
&self,
|
||||
repr: &ReprOptions,
|
||||
variants: &IndexVec<V, Vec<F>>,
|
||||
is_enum: bool,
|
||||
is_unsafe_cell: bool,
|
||||
scalar_valid_range: (Bound<u128>, Bound<u128>),
|
||||
discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool),
|
||||
discriminants: impl Iterator<Item = (V, i128)>,
|
||||
niche_optimize_enum: bool,
|
||||
always_sized: bool,
|
||||
) -> Option<LayoutS<V>> {
|
||||
let dl = self.current_data_layout();
|
||||
let dl = dl.borrow();
|
||||
|
||||
let scalar_unit = |value: Primitive| {
|
||||
let size = value.size(dl);
|
||||
assert!(size.bits() <= 128);
|
||||
Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
|
||||
};
|
||||
|
||||
// A variant is absent if it's uninhabited and only has ZST fields.
|
||||
// Present uninhabited variants only require space for their fields,
|
||||
// but *not* an encoding of the discriminant (e.g., a tag value).
|
||||
// See issue #49298 for more details on the need to leave space
|
||||
// for non-ZST uninhabited data (mostly partial initialization).
|
||||
let absent = |fields: &[F]| {
|
||||
let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
|
||||
let is_zst = fields.iter().all(|f| f.is_zst());
|
||||
uninhabited && is_zst
|
||||
};
|
||||
let (present_first, present_second) = {
|
||||
let mut present_variants = variants
|
||||
.iter_enumerated()
|
||||
.filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
|
||||
(present_variants.next(), present_variants.next())
|
||||
};
|
||||
let present_first = match present_first {
|
||||
Some(present_first) => present_first,
|
||||
// Uninhabited because it has no variants, or only absent ones.
|
||||
None if is_enum => {
|
||||
return Some(self.layout_of_never_type());
|
||||
}
|
||||
// If it's a struct, still compute a layout so that we can still compute the
|
||||
// field offsets.
|
||||
None => V::new(0),
|
||||
};
|
||||
|
||||
let is_struct = !is_enum ||
|
||||
// Only one variant is present.
|
||||
(present_second.is_none() &&
|
||||
// Representation optimizations are allowed.
|
||||
!repr.inhibit_enum_layout_opt());
|
||||
if is_struct {
|
||||
// Struct, or univariant enum equivalent to a struct.
|
||||
// (Typechecking will reject discriminant-sizing attrs.)
|
||||
|
||||
let v = present_first;
|
||||
let kind = if is_enum || variants[v].is_empty() {
|
||||
StructKind::AlwaysSized
|
||||
} else {
|
||||
if !always_sized { StructKind::MaybeUnsized } else { StructKind::AlwaysSized }
|
||||
};
|
||||
|
||||
let mut st = self.univariant(dl, &variants[v], &repr, kind)?;
|
||||
st.variants = Variants::Single { index: v };
|
||||
|
||||
if is_unsafe_cell {
|
||||
let hide_niches = |scalar: &mut _| match scalar {
|
||||
Scalar::Initialized { value, valid_range } => {
|
||||
*valid_range = WrappingRange::full(value.size(dl))
|
||||
}
|
||||
// Already doesn't have any niches
|
||||
Scalar::Union { .. } => {}
|
||||
};
|
||||
match &mut st.abi {
|
||||
Abi::Uninhabited => {}
|
||||
Abi::Scalar(scalar) => hide_niches(scalar),
|
||||
Abi::ScalarPair(a, b) => {
|
||||
hide_niches(a);
|
||||
hide_niches(b);
|
||||
}
|
||||
Abi::Vector { element, count: _ } => hide_niches(element),
|
||||
Abi::Aggregate { sized: _ } => {}
|
||||
}
|
||||
st.largest_niche = None;
|
||||
return Some(st);
|
||||
}
|
||||
|
||||
let (start, end) = scalar_valid_range;
|
||||
match st.abi {
|
||||
Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
|
||||
// the asserts ensure that we are not using the
|
||||
// `#[rustc_layout_scalar_valid_range(n)]`
|
||||
// attribute to widen the range of anything as that would probably
|
||||
// result in UB somewhere
|
||||
// FIXME(eddyb) the asserts are probably not needed,
|
||||
// as larger validity ranges would result in missed
|
||||
// optimizations, *not* wrongly assuming the inner
|
||||
// value is valid. e.g. unions enlarge validity ranges,
|
||||
// because the values may be uninitialized.
|
||||
if let Bound::Included(start) = start {
|
||||
// FIXME(eddyb) this might be incorrect - it doesn't
|
||||
// account for wrap-around (end < start) ranges.
|
||||
let valid_range = scalar.valid_range_mut();
|
||||
assert!(valid_range.start <= start);
|
||||
valid_range.start = start;
|
||||
}
|
||||
if let Bound::Included(end) = end {
|
||||
// FIXME(eddyb) this might be incorrect - it doesn't
|
||||
// account for wrap-around (end < start) ranges.
|
||||
let valid_range = scalar.valid_range_mut();
|
||||
assert!(valid_range.end >= end);
|
||||
valid_range.end = end;
|
||||
}
|
||||
|
||||
// Update `largest_niche` if we have introduced a larger niche.
|
||||
let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
|
||||
if let Some(niche) = niche {
|
||||
match st.largest_niche {
|
||||
Some(largest_niche) => {
|
||||
// Replace the existing niche even if they're equal,
|
||||
// because this one is at a lower offset.
|
||||
if largest_niche.available(dl) <= niche.available(dl) {
|
||||
st.largest_niche = Some(niche);
|
||||
}
|
||||
}
|
||||
None => st.largest_niche = Some(niche),
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => assert!(
|
||||
start == Bound::Unbounded && end == Bound::Unbounded,
|
||||
"nonscalar layout for layout_scalar_valid_range type: {:#?}",
|
||||
st,
|
||||
),
|
||||
}
|
||||
|
||||
return Some(st);
|
||||
}
|
||||
|
||||
// At this point, we have handled all unions and
|
||||
// structs. (We have also handled univariant enums
|
||||
// that allow representation optimization.)
|
||||
assert!(is_enum);
|
||||
|
||||
// Until we've decided whether to use the tagged or
|
||||
// niche filling LayoutS, we don't want to intern the
|
||||
// variant layouts, so we can't store them in the
|
||||
// overall LayoutS. Store the overall LayoutS
|
||||
// and the variant LayoutSs here until then.
|
||||
struct TmpLayout<V: Idx> {
|
||||
layout: LayoutS<V>,
|
||||
variants: IndexVec<V, LayoutS<V>>,
|
||||
}
|
||||
|
||||
let calculate_niche_filling_layout = || -> Option<TmpLayout<V>> {
|
||||
if niche_optimize_enum {
|
||||
return None;
|
||||
}
|
||||
|
||||
if variants.len() < 2 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut align = dl.aggregate_align;
|
||||
let mut variant_layouts = variants
|
||||
.iter_enumerated()
|
||||
.map(|(j, v)| {
|
||||
let mut st = self.univariant(dl, v, &repr, StructKind::AlwaysSized)?;
|
||||
st.variants = Variants::Single { index: j };
|
||||
|
||||
align = align.max(st.align);
|
||||
|
||||
Some(st)
|
||||
})
|
||||
.collect::<Option<IndexVec<V, _>>>()?;
|
||||
|
||||
let largest_variant_index = variant_layouts
|
||||
.iter_enumerated()
|
||||
.max_by_key(|(_i, layout)| layout.size.bytes())
|
||||
.map(|(i, _layout)| i)?;
|
||||
|
||||
let all_indices = (0..=variants.len() - 1).map(V::new);
|
||||
let needs_disc = |index: V| index != largest_variant_index && !absent(&variants[index]);
|
||||
let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap().index()
|
||||
..=all_indices.rev().find(|v| needs_disc(*v)).unwrap().index();
|
||||
|
||||
let count = niche_variants.size_hint().1.unwrap() as u128;
|
||||
|
||||
// Find the field with the largest niche
|
||||
let (field_index, niche, (niche_start, niche_scalar)) = variants[largest_variant_index]
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter_map(|(j, field)| Some((j, field.largest_niche?)))
|
||||
.max_by_key(|(_, niche)| niche.available(dl))
|
||||
.and_then(|(j, niche)| Some((j, niche, niche.reserve(dl, count)?)))?;
|
||||
let niche_offset =
|
||||
niche.offset + variant_layouts[largest_variant_index].fields.offset(field_index);
|
||||
let niche_size = niche.value.size(dl);
|
||||
let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
|
||||
|
||||
let all_variants_fit = variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
|
||||
if i == largest_variant_index {
|
||||
return true;
|
||||
}
|
||||
|
||||
layout.largest_niche = None;
|
||||
|
||||
if layout.size <= niche_offset {
|
||||
// This variant will fit before the niche.
|
||||
return true;
|
||||
}
|
||||
|
||||
// Determine if it'll fit after the niche.
|
||||
let this_align = layout.align.abi;
|
||||
let this_offset = (niche_offset + niche_size).align_to(this_align);
|
||||
|
||||
if this_offset + layout.size > size {
|
||||
return false;
|
||||
}
|
||||
|
||||
// It'll fit, but we need to make some adjustments.
|
||||
match layout.fields {
|
||||
FieldsShape::Arbitrary { ref mut offsets, .. } => {
|
||||
for (j, offset) in offsets.iter_mut().enumerate() {
|
||||
if !variants[i][j].is_zst() {
|
||||
*offset += this_offset;
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
panic!("Layout of fields should be Arbitrary for variants")
|
||||
}
|
||||
}
|
||||
|
||||
// It can't be a Scalar or ScalarPair because the offset isn't 0.
|
||||
if !layout.abi.is_uninhabited() {
|
||||
layout.abi = Abi::Aggregate { sized: true };
|
||||
}
|
||||
layout.size += this_offset;
|
||||
|
||||
true
|
||||
});
|
||||
|
||||
if !all_variants_fit {
|
||||
return None;
|
||||
}
|
||||
|
||||
let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
|
||||
|
||||
let others_zst = variant_layouts
|
||||
.iter_enumerated()
|
||||
.all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO);
|
||||
let same_size = size == variant_layouts[largest_variant_index].size;
|
||||
let same_align = align == variant_layouts[largest_variant_index].align;
|
||||
|
||||
let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
|
||||
Abi::Uninhabited
|
||||
} else if same_size && same_align && others_zst {
|
||||
match variant_layouts[largest_variant_index].abi {
|
||||
// When the total alignment and size match, we can use the
|
||||
// same ABI as the scalar variant with the reserved niche.
|
||||
Abi::Scalar(_) => Abi::Scalar(niche_scalar),
|
||||
Abi::ScalarPair(first, second) => {
|
||||
// Only the niche is guaranteed to be initialised,
|
||||
// so use union layouts for the other primitive.
|
||||
if niche_offset == Size::ZERO {
|
||||
Abi::ScalarPair(niche_scalar, second.to_union())
|
||||
} else {
|
||||
Abi::ScalarPair(first.to_union(), niche_scalar)
|
||||
}
|
||||
}
|
||||
_ => Abi::Aggregate { sized: true },
|
||||
}
|
||||
} else {
|
||||
Abi::Aggregate { sized: true }
|
||||
};
|
||||
|
||||
let layout = LayoutS {
|
||||
variants: Variants::Multiple {
|
||||
tag: niche_scalar,
|
||||
tag_encoding: TagEncoding::Niche {
|
||||
untagged_variant: largest_variant_index,
|
||||
niche_variants: (V::new(*niche_variants.start())
|
||||
..=V::new(*niche_variants.end())),
|
||||
niche_start,
|
||||
},
|
||||
tag_field: 0,
|
||||
variants: IndexVec::new(),
|
||||
},
|
||||
fields: FieldsShape::Arbitrary {
|
||||
offsets: vec![niche_offset],
|
||||
memory_index: vec![0],
|
||||
},
|
||||
abi,
|
||||
largest_niche,
|
||||
size,
|
||||
align,
|
||||
};
|
||||
|
||||
Some(TmpLayout { layout, variants: variant_layouts })
|
||||
};
|
||||
|
||||
let niche_filling_layout = calculate_niche_filling_layout();
|
||||
|
||||
let (mut min, mut max) = (i128::MAX, i128::MIN);
|
||||
let discr_type = repr.discr_type();
|
||||
let bits = Integer::from_attr(dl, discr_type).size().bits();
|
||||
for (i, mut val) in discriminants {
|
||||
if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
|
||||
continue;
|
||||
}
|
||||
if discr_type.is_signed() {
|
||||
// sign extend the raw representation to be an i128
|
||||
val = (val << (128 - bits)) >> (128 - bits);
|
||||
}
|
||||
if val < min {
|
||||
min = val;
|
||||
}
|
||||
if val > max {
|
||||
max = val;
|
||||
}
|
||||
}
|
||||
// We might have no inhabited variants, so pretend there's at least one.
|
||||
if (min, max) == (i128::MAX, i128::MIN) {
|
||||
min = 0;
|
||||
max = 0;
|
||||
}
|
||||
assert!(min <= max, "discriminant range is {}...{}", min, max);
|
||||
let (min_ity, signed) = discr_range_of_repr(min, max); //Integer::repr_discr(tcx, ty, &repr, min, max);
|
||||
|
||||
let mut align = dl.aggregate_align;
|
||||
let mut size = Size::ZERO;
|
||||
|
||||
// We're interested in the smallest alignment, so start large.
|
||||
let mut start_align = Align::from_bytes(256).unwrap();
|
||||
assert_eq!(Integer::for_align(dl, start_align), None);
|
||||
|
||||
// repr(C) on an enum tells us to make a (tag, union) layout,
|
||||
// so we need to grow the prefix alignment to be at least
|
||||
// the alignment of the union. (This value is used both for
|
||||
// determining the alignment of the overall enum, and the
|
||||
// determining the alignment of the payload after the tag.)
|
||||
let mut prefix_align = min_ity.align(dl).abi;
|
||||
if repr.c() {
|
||||
for fields in variants {
|
||||
for field in fields {
|
||||
prefix_align = prefix_align.max(field.align.abi);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create the set of structs that represent each variant.
|
||||
let mut layout_variants = variants
|
||||
.iter_enumerated()
|
||||
.map(|(i, field_layouts)| {
|
||||
let mut st = self.univariant(
|
||||
dl,
|
||||
&field_layouts,
|
||||
&repr,
|
||||
StructKind::Prefixed(min_ity.size(), prefix_align),
|
||||
)?;
|
||||
st.variants = Variants::Single { index: i };
|
||||
// Find the first field we can't move later
|
||||
// to make room for a larger discriminant.
|
||||
for field in st.fields.index_by_increasing_offset().map(|j| &field_layouts[j]) {
|
||||
if !field.is_zst() || field.align.abi.bytes() != 1 {
|
||||
start_align = start_align.min(field.align.abi);
|
||||
break;
|
||||
}
|
||||
}
|
||||
size = cmp::max(size, st.size);
|
||||
align = align.max(st.align);
|
||||
Some(st)
|
||||
})
|
||||
.collect::<Option<IndexVec<V, _>>>()?;
|
||||
|
||||
// Align the maximum variant size to the largest alignment.
|
||||
size = size.align_to(align.abi);
|
||||
|
||||
if size.bytes() >= dl.obj_size_bound() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let typeck_ity = Integer::from_attr(dl, repr.discr_type());
|
||||
if typeck_ity < min_ity {
|
||||
// It is a bug if Layout decided on a greater discriminant size than typeck for
|
||||
// some reason at this point (based on values discriminant can take on). Mostly
|
||||
// because this discriminant will be loaded, and then stored into variable of
|
||||
// type calculated by typeck. Consider such case (a bug): typeck decided on
|
||||
// byte-sized discriminant, but layout thinks we need a 16-bit to store all
|
||||
// discriminant values. That would be a bug, because then, in codegen, in order
|
||||
// to store this 16-bit discriminant into 8-bit sized temporary some of the
|
||||
// space necessary to represent would have to be discarded (or layout is wrong
|
||||
// on thinking it needs 16 bits)
|
||||
panic!(
|
||||
"layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
|
||||
min_ity, typeck_ity
|
||||
);
|
||||
// However, it is fine to make discr type however large (as an optimisation)
|
||||
// after this point – we’ll just truncate the value we load in codegen.
|
||||
}
|
||||
|
||||
// Check to see if we should use a different type for the
|
||||
// discriminant. We can safely use a type with the same size
|
||||
// as the alignment of the first field of each variant.
|
||||
// We increase the size of the discriminant to avoid LLVM copying
|
||||
// padding when it doesn't need to. This normally causes unaligned
|
||||
// load/stores and excessive memcpy/memset operations. By using a
|
||||
// bigger integer size, LLVM can be sure about its contents and
|
||||
// won't be so conservative.
|
||||
|
||||
// Use the initial field alignment
|
||||
let mut ity = if repr.c() || repr.int.is_some() {
|
||||
min_ity
|
||||
} else {
|
||||
Integer::for_align(dl, start_align).unwrap_or(min_ity)
|
||||
};
|
||||
|
||||
// If the alignment is not larger than the chosen discriminant size,
|
||||
// don't use the alignment as the final size.
|
||||
if ity <= min_ity {
|
||||
ity = min_ity;
|
||||
} else {
|
||||
// Patch up the variants' first few fields.
|
||||
let old_ity_size = min_ity.size();
|
||||
let new_ity_size = ity.size();
|
||||
for variant in &mut layout_variants {
|
||||
match variant.fields {
|
||||
FieldsShape::Arbitrary { ref mut offsets, .. } => {
|
||||
for i in offsets {
|
||||
if *i <= old_ity_size {
|
||||
assert_eq!(*i, old_ity_size);
|
||||
*i = new_ity_size;
|
||||
}
|
||||
}
|
||||
// We might be making the struct larger.
|
||||
if variant.size <= old_ity_size {
|
||||
variant.size = new_ity_size;
|
||||
}
|
||||
}
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let tag_mask = ity.size().unsigned_int_max();
|
||||
let tag = Scalar::Initialized {
|
||||
value: Int(ity, signed),
|
||||
valid_range: WrappingRange {
|
||||
start: (min as u128 & tag_mask),
|
||||
end: (max as u128 & tag_mask),
|
||||
},
|
||||
};
|
||||
let mut abi = Abi::Aggregate { sized: true };
|
||||
|
||||
if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
|
||||
abi = Abi::Uninhabited;
|
||||
} else if tag.size(dl) == size {
|
||||
// Make sure we only use scalar layout when the enum is entirely its
|
||||
// own tag (i.e. it has no padding nor any non-ZST variant fields).
|
||||
abi = Abi::Scalar(tag);
|
||||
} else {
|
||||
// Try to use a ScalarPair for all tagged enums.
|
||||
let mut common_prim = None;
|
||||
let mut common_prim_initialized_in_all_variants = true;
|
||||
for (field_layouts, layout_variant) in iter::zip(&*variants, &layout_variants) {
|
||||
let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
|
||||
panic!();
|
||||
};
|
||||
let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
|
||||
let (field, offset) = match (fields.next(), fields.next()) {
|
||||
(None, None) => {
|
||||
common_prim_initialized_in_all_variants = false;
|
||||
continue;
|
||||
}
|
||||
(Some(pair), None) => pair,
|
||||
_ => {
|
||||
common_prim = None;
|
||||
break;
|
||||
}
|
||||
};
|
||||
let prim = match field.abi {
|
||||
Abi::Scalar(scalar) => {
|
||||
common_prim_initialized_in_all_variants &=
|
||||
matches!(scalar, Scalar::Initialized { .. });
|
||||
scalar.primitive()
|
||||
}
|
||||
_ => {
|
||||
common_prim = None;
|
||||
break;
|
||||
}
|
||||
};
|
||||
if let Some(pair) = common_prim {
|
||||
// This is pretty conservative. We could go fancier
|
||||
// by conflating things like i32 and u32, or even
|
||||
// realising that (u8, u8) could just cohabit with
|
||||
// u16 or even u32.
|
||||
if pair != (prim, offset) {
|
||||
common_prim = None;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
common_prim = Some((prim, offset));
|
||||
}
|
||||
}
|
||||
if let Some((prim, offset)) = common_prim {
|
||||
let prim_scalar = if common_prim_initialized_in_all_variants {
|
||||
scalar_unit(prim)
|
||||
} else {
|
||||
// Common prim might be uninit.
|
||||
Scalar::Union { value: prim }
|
||||
};
|
||||
let pair = self.scalar_pair::<V>(tag, prim_scalar);
|
||||
let pair_offsets = match pair.fields {
|
||||
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
|
||||
assert_eq!(memory_index, &[0, 1]);
|
||||
offsets
|
||||
}
|
||||
_ => panic!(),
|
||||
};
|
||||
if pair_offsets[0] == Size::ZERO
|
||||
&& pair_offsets[1] == *offset
|
||||
&& align == pair.align
|
||||
&& size == pair.size
|
||||
{
|
||||
// We can use `ScalarPair` only when it matches our
|
||||
// already computed layout (including `#[repr(C)]`).
|
||||
abi = pair.abi;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
|
||||
// variants to ensure they are consistent. This is because a downcast is
|
||||
// semantically a NOP, and thus should not affect layout.
|
||||
if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
|
||||
for variant in &mut layout_variants {
|
||||
// We only do this for variants with fields; the others are not accessed anyway.
|
||||
// Also do not overwrite any already existing "clever" ABIs.
|
||||
if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) {
|
||||
variant.abi = abi;
|
||||
// Also need to bump up the size and alignment, so that the entire value fits in here.
|
||||
variant.size = cmp::max(variant.size, size);
|
||||
variant.align.abi = cmp::max(variant.align.abi, align.abi);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
|
||||
|
||||
let tagged_layout = LayoutS {
|
||||
variants: Variants::Multiple {
|
||||
tag,
|
||||
tag_encoding: TagEncoding::Direct,
|
||||
tag_field: 0,
|
||||
variants: IndexVec::new(),
|
||||
},
|
||||
fields: FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] },
|
||||
largest_niche,
|
||||
abi,
|
||||
align,
|
||||
size,
|
||||
};
|
||||
|
||||
let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
|
||||
|
||||
let mut best_layout = match (tagged_layout, niche_filling_layout) {
|
||||
(tl, Some(nl)) => {
|
||||
// Pick the smaller layout; otherwise,
|
||||
// pick the layout with the larger niche; otherwise,
|
||||
// pick tagged as it has simpler codegen.
|
||||
use cmp::Ordering::*;
|
||||
let niche_size = |tmp_l: &TmpLayout<V>| {
|
||||
tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
|
||||
};
|
||||
match (tl.layout.size.cmp(&nl.layout.size), niche_size(&tl).cmp(&niche_size(&nl))) {
|
||||
(Greater, _) => nl,
|
||||
(Equal, Less) => nl,
|
||||
_ => tl,
|
||||
}
|
||||
}
|
||||
(tl, None) => tl,
|
||||
};
|
||||
|
||||
// Now we can intern the variant layouts and store them in the enum layout.
|
||||
best_layout.layout.variants = match best_layout.layout.variants {
|
||||
Variants::Multiple { tag, tag_encoding, tag_field, .. } => {
|
||||
Variants::Multiple { tag, tag_encoding, tag_field, variants: best_layout.variants }
|
||||
}
|
||||
_ => panic!(),
|
||||
};
|
||||
Some(best_layout.layout)
|
||||
}
|
||||
|
||||
fn layout_of_union<'a, V: Idx, F: Deref<Target = &'a LayoutS<V>> + Debug>(
|
||||
&self,
|
||||
repr: &ReprOptions,
|
||||
variants: &IndexVec<V, Vec<F>>,
|
||||
) -> Option<LayoutS<V>> {
|
||||
let dl = self.current_data_layout();
|
||||
let dl = dl.borrow();
|
||||
let mut align = if repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
|
||||
|
||||
if let Some(repr_align) = repr.align {
|
||||
align = align.max(AbiAndPrefAlign::new(repr_align));
|
||||
}
|
||||
|
||||
let optimize = !repr.inhibit_union_abi_opt();
|
||||
let mut size = Size::ZERO;
|
||||
let mut abi = Abi::Aggregate { sized: true };
|
||||
let index = V::new(0);
|
||||
for field in &variants[index] {
|
||||
assert!(field.is_sized());
|
||||
align = align.max(field.align);
|
||||
|
||||
// If all non-ZST fields have the same ABI, forward this ABI
|
||||
if optimize && !field.is_zst() {
|
||||
// Discard valid range information and allow undef
|
||||
let field_abi = match field.abi {
|
||||
Abi::Scalar(x) => Abi::Scalar(x.to_union()),
|
||||
Abi::ScalarPair(x, y) => Abi::ScalarPair(x.to_union(), y.to_union()),
|
||||
Abi::Vector { element: x, count } => {
|
||||
Abi::Vector { element: x.to_union(), count }
|
||||
}
|
||||
Abi::Uninhabited | Abi::Aggregate { .. } => Abi::Aggregate { sized: true },
|
||||
};
|
||||
|
||||
if size == Size::ZERO {
|
||||
// first non ZST: initialize 'abi'
|
||||
abi = field_abi;
|
||||
} else if abi != field_abi {
|
||||
// different fields have different ABI: reset to Aggregate
|
||||
abi = Abi::Aggregate { sized: true };
|
||||
}
|
||||
}
|
||||
|
||||
size = cmp::max(size, field.size);
|
||||
}
|
||||
|
||||
if let Some(pack) = repr.pack {
|
||||
align = align.min(AbiAndPrefAlign::new(pack));
|
||||
}
|
||||
|
||||
Some(LayoutS {
|
||||
variants: Variants::Single { index },
|
||||
fields: FieldsShape::Union(NonZeroUsize::new(variants[index].len())?),
|
||||
abi,
|
||||
largest_niche: None,
|
||||
align,
|
||||
size: size.align_to(align.abi),
|
||||
})
|
||||
}
|
||||
}
|
1399
compiler/rustc_abi/src/lib.rs
Normal file
1399
compiler/rustc_abi/src/lib.rs
Normal file
File diff suppressed because it is too large
Load Diff
@ -33,7 +33,6 @@
|
||||
use rustc_middle::mir::mono::Linkage;
|
||||
use rustc_middle::ty::query::Providers;
|
||||
use rustc_middle::ty::util::{Discr, IntTypeExt};
|
||||
use rustc_middle::ty::ReprOptions;
|
||||
use rustc_middle::ty::{self, AdtKind, Const, DefIdTree, IsSuggestable, Ty, TyCtxt};
|
||||
use rustc_session::lint;
|
||||
use rustc_session::parse::feature_err;
|
||||
@ -860,7 +859,7 @@ fn adt_def<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> ty::AdtDef<'tcx> {
|
||||
bug!();
|
||||
};
|
||||
|
||||
let repr = ReprOptions::new(tcx, def_id.to_def_id());
|
||||
let repr = tcx.repr_options_of_def(def_id.to_def_id());
|
||||
let (kind, variants) = match item.kind {
|
||||
ItemKind::Enum(ref def, _) => {
|
||||
let mut distance_from_explicit = 0;
|
||||
|
@ -7,6 +7,10 @@ edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
arrayvec = { version = "0.7", default-features = false }
|
||||
rustc_serialize = { path = "../rustc_serialize" }
|
||||
rustc_macros = { path = "../rustc_macros" }
|
||||
rustc_serialize = { path = "../rustc_serialize", optional = true }
|
||||
rustc_macros = { path = "../rustc_macros", optional = true }
|
||||
smallvec = "1.8.1"
|
||||
|
||||
[features]
|
||||
default = ["nightly"]
|
||||
nightly = ["rustc_serialize", "rustc_macros"]
|
||||
|
@ -1,17 +1,25 @@
|
||||
#![deny(rustc::untranslatable_diagnostic)]
|
||||
#![deny(rustc::diagnostic_outside_of_impl)]
|
||||
#![feature(allow_internal_unstable)]
|
||||
#![feature(extend_one)]
|
||||
#![feature(min_specialization)]
|
||||
#![feature(new_uninit)]
|
||||
#![feature(step_trait)]
|
||||
#![feature(stmt_expr_attributes)]
|
||||
#![feature(test)]
|
||||
#![cfg_attr(
|
||||
feature = "nightly",
|
||||
feature(
|
||||
allow_internal_unstable,
|
||||
extend_one,
|
||||
min_specialization,
|
||||
new_uninit,
|
||||
step_trait,
|
||||
stmt_expr_attributes,
|
||||
test
|
||||
)
|
||||
)]
|
||||
|
||||
#[cfg(feature = "nightly")]
|
||||
pub mod bit_set;
|
||||
#[cfg(feature = "nightly")]
|
||||
pub mod interval;
|
||||
pub mod vec;
|
||||
|
||||
#[cfg(feature = "rustc_macros")]
|
||||
pub use rustc_macros::newtype_index;
|
||||
|
||||
/// Type size assertion. The first argument is a type and the second argument is its expected size.
|
||||
|
@ -1,3 +1,4 @@
|
||||
#[cfg(feature = "rustc_serialize")]
|
||||
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
|
||||
|
||||
use std::fmt;
|
||||
@ -61,12 +62,14 @@ pub struct IndexVec<I: Idx, T> {
|
||||
// not the phantom data.
|
||||
unsafe impl<I: Idx, T> Send for IndexVec<I, T> where T: Send {}
|
||||
|
||||
#[cfg(feature = "rustc_serialize")]
|
||||
impl<S: Encoder, I: Idx, T: Encodable<S>> Encodable<S> for IndexVec<I, T> {
|
||||
fn encode(&self, s: &mut S) {
|
||||
Encodable::encode(&self.raw, s);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "rustc_serialize")]
|
||||
impl<D: Decoder, I: Idx, T: Decodable<D>> Decodable<D> for IndexVec<I, T> {
|
||||
fn decode(d: &mut D) -> Self {
|
||||
IndexVec { raw: Decodable::decode(d), _marker: PhantomData }
|
||||
@ -359,11 +362,13 @@ fn extend<J: IntoIterator<Item = T>>(&mut self, iter: J) {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[cfg(feature = "nightly")]
|
||||
fn extend_one(&mut self, item: T) {
|
||||
self.raw.push(item);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[cfg(feature = "nightly")]
|
||||
fn extend_reserve(&mut self, additional: usize) {
|
||||
self.raw.reserve(additional);
|
||||
}
|
||||
|
@ -12,7 +12,7 @@
|
||||
use rustc_span::source_map;
|
||||
use rustc_span::symbol::sym;
|
||||
use rustc_span::{Span, Symbol};
|
||||
use rustc_target::abi::{Abi, WrappingRange};
|
||||
use rustc_target::abi::{Abi, Size, WrappingRange};
|
||||
use rustc_target::abi::{Integer, TagEncoding, Variants};
|
||||
use rustc_target::spec::abi::Abi as SpecAbi;
|
||||
|
||||
@ -225,11 +225,11 @@ fn report_bin_hex_error(
|
||||
cx: &LateContext<'_>,
|
||||
expr: &hir::Expr<'_>,
|
||||
ty: attr::IntType,
|
||||
size: Size,
|
||||
repr_str: String,
|
||||
val: u128,
|
||||
negative: bool,
|
||||
) {
|
||||
let size = Integer::from_attr(&cx.tcx, ty).size();
|
||||
cx.struct_span_lint(
|
||||
OVERFLOWING_LITERALS,
|
||||
expr.span,
|
||||
@ -352,6 +352,7 @@ fn lint_int_literal<'tcx>(
|
||||
cx,
|
||||
e,
|
||||
attr::IntType::SignedInt(ty::ast_int_ty(t)),
|
||||
Integer::from_int_ty(cx, t).size(),
|
||||
repr_str,
|
||||
v,
|
||||
negative,
|
||||
@ -437,6 +438,7 @@ fn lint_uint_literal<'tcx>(
|
||||
cx,
|
||||
e,
|
||||
attr::IntType::UnsignedInt(ty::ast_uint_ty(t)),
|
||||
Integer::from_uint_ty(cx, t).size(),
|
||||
repr_str,
|
||||
lit_val,
|
||||
false,
|
||||
@ -1376,7 +1378,7 @@ fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) {
|
||||
let (largest, slargest, largest_index) = iter::zip(enum_definition.variants, variants)
|
||||
.map(|(variant, variant_layout)| {
|
||||
// Subtract the size of the enum tag.
|
||||
let bytes = variant_layout.size().bytes().saturating_sub(tag_size);
|
||||
let bytes = variant_layout.size.bytes().saturating_sub(tag_size);
|
||||
|
||||
debug!("- variant `{}` is {} bytes large", variant.ident, bytes);
|
||||
bytes
|
||||
|
@ -6,7 +6,7 @@
|
||||
macro_rules! arena_types {
|
||||
($macro:path) => (
|
||||
$macro!([
|
||||
[] layout: rustc_target::abi::LayoutS<'tcx>,
|
||||
[] layout: rustc_target::abi::LayoutS<rustc_target::abi::VariantIdx>,
|
||||
[] fn_abi: rustc_target::abi::call::FnAbi<'tcx, rustc_middle::ty::Ty<'tcx>>,
|
||||
// AdtDef are interned and compared by address
|
||||
[decode] adt_def: rustc_middle::ty::AdtDefData,
|
||||
|
@ -14,7 +14,7 @@
|
||||
use rustc_query_system::ich::StableHashingContext;
|
||||
use rustc_session::DataTypeKind;
|
||||
use rustc_span::symbol::sym;
|
||||
use rustc_target::abi::VariantIdx;
|
||||
use rustc_target::abi::{ReprOptions, VariantIdx};
|
||||
|
||||
use std::cell::RefCell;
|
||||
use std::cmp::Ordering;
|
||||
@ -22,9 +22,7 @@
|
||||
use std::ops::Range;
|
||||
use std::str;
|
||||
|
||||
use super::{
|
||||
Destructor, FieldDef, GenericPredicates, ReprOptions, Ty, TyCtxt, VariantDef, VariantDiscr,
|
||||
};
|
||||
use super::{Destructor, FieldDef, GenericPredicates, Ty, TyCtxt, VariantDef, VariantDiscr};
|
||||
|
||||
bitflags! {
|
||||
#[derive(HashStable, TyEncodable, TyDecodable)]
|
||||
|
@ -148,7 +148,7 @@ pub struct CtxtInterners<'tcx> {
|
||||
const_: InternedSet<'tcx, ConstS<'tcx>>,
|
||||
const_allocation: InternedSet<'tcx, Allocation>,
|
||||
bound_variable_kinds: InternedSet<'tcx, List<ty::BoundVariableKind>>,
|
||||
layout: InternedSet<'tcx, LayoutS<'tcx>>,
|
||||
layout: InternedSet<'tcx, LayoutS<VariantIdx>>,
|
||||
adt_def: InternedSet<'tcx, AdtDefData>,
|
||||
}
|
||||
|
||||
@ -1233,7 +1233,7 @@ pub fn create_global_ctxt(
|
||||
global_ctxt: untracked_resolutions,
|
||||
ast_lowering: untracked_resolver_for_lowering,
|
||||
} = resolver_outputs;
|
||||
let data_layout = TargetDataLayout::parse(&s.target).unwrap_or_else(|err| {
|
||||
let data_layout = s.target.parse_data_layout().unwrap_or_else(|err| {
|
||||
s.emit_fatal(err);
|
||||
});
|
||||
let interners = CtxtInterners::new(arena);
|
||||
@ -2244,7 +2244,7 @@ pub fn $method(self, v: $ty) -> $ret_ty {
|
||||
region: mk_region(RegionKind<'tcx>): Region -> Region<'tcx>,
|
||||
const_: mk_const_internal(ConstS<'tcx>): Const -> Const<'tcx>,
|
||||
const_allocation: intern_const_alloc(Allocation): ConstAllocation -> ConstAllocation<'tcx>,
|
||||
layout: intern_layout(LayoutS<'tcx>): Layout -> Layout<'tcx>,
|
||||
layout: intern_layout(LayoutS<VariantIdx>): Layout -> Layout<'tcx>,
|
||||
adt_def: intern_adt_def(AdtDefData): AdtDef -> AdtDef<'tcx>,
|
||||
}
|
||||
|
||||
|
@ -1,8 +1,6 @@
|
||||
use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
|
||||
use crate::ty::normalize_erasing_regions::NormalizationError;
|
||||
use crate::ty::{self, ReprOptions, Ty, TyCtxt, TypeVisitable};
|
||||
use rustc_ast as ast;
|
||||
use rustc_attr as attr;
|
||||
use rustc_errors::{DiagnosticBuilder, Handler, IntoDiagnostic};
|
||||
use rustc_hir as hir;
|
||||
use rustc_hir::def_id::DefId;
|
||||
@ -20,7 +18,6 @@
|
||||
|
||||
pub trait IntegerExt {
|
||||
fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
|
||||
fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
|
||||
fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
|
||||
fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
|
||||
fn repr_discr<'tcx>(
|
||||
@ -49,22 +46,6 @@ fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the Integer type from an attr::IntType.
|
||||
fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
|
||||
let dl = cx.data_layout();
|
||||
|
||||
match ity {
|
||||
attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
|
||||
attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
|
||||
attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
|
||||
attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
|
||||
attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
|
||||
attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
|
||||
dl.ptr_sized_integer()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
|
||||
match ity {
|
||||
ty::IntTy::I8 => I8,
|
||||
@ -237,6 +218,18 @@ pub struct LayoutCx<'tcx, C> {
|
||||
pub param_env: ty::ParamEnv<'tcx>,
|
||||
}
|
||||
|
||||
impl<'tcx> LayoutCalculator for LayoutCx<'tcx, TyCtxt<'tcx>> {
|
||||
type TargetDataLayoutRef = &'tcx TargetDataLayout;
|
||||
|
||||
fn delay_bug(&self, txt: &str) {
|
||||
self.tcx.sess.delay_span_bug(DUMMY_SP, txt);
|
||||
}
|
||||
|
||||
fn current_data_layout(&self) -> Self::TargetDataLayoutRef {
|
||||
&self.tcx.data_layout
|
||||
}
|
||||
}
|
||||
|
||||
/// Type size "skeleton", i.e., the only information determining a type's size.
|
||||
/// While this is conservative, (aside from constant sizes, only pointers,
|
||||
/// newtypes thereof and null pointer optimized enums are allowed), it is
|
||||
@ -610,7 +603,7 @@ fn ty_and_layout_for_variant(
|
||||
})
|
||||
}
|
||||
|
||||
Variants::Multiple { ref variants, .. } => variants[variant_index],
|
||||
Variants::Multiple { ref variants, .. } => cx.tcx().intern_layout(variants[variant_index].clone()),
|
||||
};
|
||||
|
||||
assert_eq!(*layout.variants(), Variants::Single { index: variant_index });
|
||||
|
@ -48,7 +48,8 @@
|
||||
use rustc_span::hygiene::MacroKind;
|
||||
use rustc_span::symbol::{kw, sym, Ident, Symbol};
|
||||
use rustc_span::{ExpnId, Span};
|
||||
use rustc_target::abi::{Align, VariantIdx};
|
||||
use rustc_target::abi::{Align, Integer, IntegerType, VariantIdx};
|
||||
pub use rustc_target::abi::{ReprFlags, ReprOptions};
|
||||
pub use subst::*;
|
||||
pub use vtable::*;
|
||||
|
||||
@ -1994,163 +1995,6 @@ fn hash<H: Hasher>(&self, s: &mut H) {
|
||||
}
|
||||
}
|
||||
|
||||
bitflags! {
|
||||
#[derive(TyEncodable, TyDecodable, Default, HashStable)]
|
||||
pub struct ReprFlags: u8 {
|
||||
const IS_C = 1 << 0;
|
||||
const IS_SIMD = 1 << 1;
|
||||
const IS_TRANSPARENT = 1 << 2;
|
||||
// Internal only for now. If true, don't reorder fields.
|
||||
const IS_LINEAR = 1 << 3;
|
||||
// If true, the type's layout can be randomized using
|
||||
// the seed stored in `ReprOptions.layout_seed`
|
||||
const RANDOMIZE_LAYOUT = 1 << 4;
|
||||
// Any of these flags being set prevent field reordering optimisation.
|
||||
const IS_UNOPTIMISABLE = ReprFlags::IS_C.bits
|
||||
| ReprFlags::IS_SIMD.bits
|
||||
| ReprFlags::IS_LINEAR.bits;
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents the repr options provided by the user,
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, TyEncodable, TyDecodable, Default, HashStable)]
|
||||
pub struct ReprOptions {
|
||||
pub int: Option<attr::IntType>,
|
||||
pub align: Option<Align>,
|
||||
pub pack: Option<Align>,
|
||||
pub flags: ReprFlags,
|
||||
/// The seed to be used for randomizing a type's layout
|
||||
///
|
||||
/// Note: This could technically be a `[u8; 16]` (a `u128`) which would
|
||||
/// be the "most accurate" hash as it'd encompass the item and crate
|
||||
/// hash without loss, but it does pay the price of being larger.
|
||||
/// Everything's a tradeoff, a `u64` seed should be sufficient for our
|
||||
/// purposes (primarily `-Z randomize-layout`)
|
||||
pub field_shuffle_seed: u64,
|
||||
}
|
||||
|
||||
impl ReprOptions {
|
||||
pub fn new(tcx: TyCtxt<'_>, did: DefId) -> ReprOptions {
|
||||
let mut flags = ReprFlags::empty();
|
||||
let mut size = None;
|
||||
let mut max_align: Option<Align> = None;
|
||||
let mut min_pack: Option<Align> = None;
|
||||
|
||||
// Generate a deterministically-derived seed from the item's path hash
|
||||
// to allow for cross-crate compilation to actually work
|
||||
let mut field_shuffle_seed = tcx.def_path_hash(did).0.to_smaller_hash();
|
||||
|
||||
// If the user defined a custom seed for layout randomization, xor the item's
|
||||
// path hash with the user defined seed, this will allowing determinism while
|
||||
// still allowing users to further randomize layout generation for e.g. fuzzing
|
||||
if let Some(user_seed) = tcx.sess.opts.unstable_opts.layout_seed {
|
||||
field_shuffle_seed ^= user_seed;
|
||||
}
|
||||
|
||||
for attr in tcx.get_attrs(did, sym::repr) {
|
||||
for r in attr::parse_repr_attr(&tcx.sess, attr) {
|
||||
flags.insert(match r {
|
||||
attr::ReprC => ReprFlags::IS_C,
|
||||
attr::ReprPacked(pack) => {
|
||||
let pack = Align::from_bytes(pack as u64).unwrap();
|
||||
min_pack = Some(if let Some(min_pack) = min_pack {
|
||||
min_pack.min(pack)
|
||||
} else {
|
||||
pack
|
||||
});
|
||||
ReprFlags::empty()
|
||||
}
|
||||
attr::ReprTransparent => ReprFlags::IS_TRANSPARENT,
|
||||
attr::ReprSimd => ReprFlags::IS_SIMD,
|
||||
attr::ReprInt(i) => {
|
||||
size = Some(i);
|
||||
ReprFlags::empty()
|
||||
}
|
||||
attr::ReprAlign(align) => {
|
||||
max_align = max_align.max(Some(Align::from_bytes(align as u64).unwrap()));
|
||||
ReprFlags::empty()
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// If `-Z randomize-layout` was enabled for the type definition then we can
|
||||
// consider performing layout randomization
|
||||
if tcx.sess.opts.unstable_opts.randomize_layout {
|
||||
flags.insert(ReprFlags::RANDOMIZE_LAYOUT);
|
||||
}
|
||||
|
||||
// This is here instead of layout because the choice must make it into metadata.
|
||||
if !tcx.consider_optimizing(|| format!("Reorder fields of {:?}", tcx.def_path_str(did))) {
|
||||
flags.insert(ReprFlags::IS_LINEAR);
|
||||
}
|
||||
|
||||
Self { int: size, align: max_align, pack: min_pack, flags, field_shuffle_seed }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn simd(&self) -> bool {
|
||||
self.flags.contains(ReprFlags::IS_SIMD)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn c(&self) -> bool {
|
||||
self.flags.contains(ReprFlags::IS_C)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn packed(&self) -> bool {
|
||||
self.pack.is_some()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn transparent(&self) -> bool {
|
||||
self.flags.contains(ReprFlags::IS_TRANSPARENT)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn linear(&self) -> bool {
|
||||
self.flags.contains(ReprFlags::IS_LINEAR)
|
||||
}
|
||||
|
||||
/// Returns the discriminant type, given these `repr` options.
|
||||
/// This must only be called on enums!
|
||||
pub fn discr_type(&self) -> attr::IntType {
|
||||
self.int.unwrap_or(attr::SignedInt(ast::IntTy::Isize))
|
||||
}
|
||||
|
||||
/// Returns `true` if this `#[repr()]` should inhabit "smart enum
|
||||
/// layout" optimizations, such as representing `Foo<&T>` as a
|
||||
/// single pointer.
|
||||
pub fn inhibit_enum_layout_opt(&self) -> bool {
|
||||
self.c() || self.int.is_some()
|
||||
}
|
||||
|
||||
/// Returns `true` if this `#[repr()]` should inhibit struct field reordering
|
||||
/// optimizations, such as with `repr(C)`, `repr(packed(1))`, or `repr(<int>)`.
|
||||
pub fn inhibit_struct_field_reordering_opt(&self) -> bool {
|
||||
if let Some(pack) = self.pack {
|
||||
if pack.bytes() == 1 {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
self.flags.intersects(ReprFlags::IS_UNOPTIMISABLE) || self.int.is_some()
|
||||
}
|
||||
|
||||
/// Returns `true` if this type is valid for reordering and `-Z randomize-layout`
|
||||
/// was enabled for its declaration crate
|
||||
pub fn can_randomize_type_layout(&self) -> bool {
|
||||
!self.inhibit_struct_field_reordering_opt()
|
||||
&& self.flags.contains(ReprFlags::RANDOMIZE_LAYOUT)
|
||||
}
|
||||
|
||||
/// Returns `true` if this `#[repr()]` should inhibit union ABI optimisations.
|
||||
pub fn inhibit_union_abi_opt(&self) -> bool {
|
||||
self.c()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx> FieldDef {
|
||||
/// Returns the type of this field. The resulting type is not normalized. The `subst` is
|
||||
/// typically obtained via the second field of [`TyKind::Adt`].
|
||||
@ -2218,6 +2062,81 @@ pub fn provided_trait_methods(self, id: DefId) -> impl 'tcx + Iterator<Item = &'
|
||||
.filter(move |item| item.kind == AssocKind::Fn && item.defaultness(self).has_value())
|
||||
}
|
||||
|
||||
pub fn repr_options_of_def(self, did: DefId) -> ReprOptions {
|
||||
let mut flags = ReprFlags::empty();
|
||||
let mut size = None;
|
||||
let mut max_align: Option<Align> = None;
|
||||
let mut min_pack: Option<Align> = None;
|
||||
|
||||
// Generate a deterministically-derived seed from the item's path hash
|
||||
// to allow for cross-crate compilation to actually work
|
||||
let mut field_shuffle_seed = self.def_path_hash(did).0.to_smaller_hash();
|
||||
|
||||
// If the user defined a custom seed for layout randomization, xor the item's
|
||||
// path hash with the user defined seed, this will allowing determinism while
|
||||
// still allowing users to further randomize layout generation for e.g. fuzzing
|
||||
if let Some(user_seed) = self.sess.opts.unstable_opts.layout_seed {
|
||||
field_shuffle_seed ^= user_seed;
|
||||
}
|
||||
|
||||
for attr in self.get_attrs(did, sym::repr) {
|
||||
for r in attr::parse_repr_attr(&self.sess, attr) {
|
||||
flags.insert(match r {
|
||||
attr::ReprC => ReprFlags::IS_C,
|
||||
attr::ReprPacked(pack) => {
|
||||
let pack = Align::from_bytes(pack as u64).unwrap();
|
||||
min_pack = Some(if let Some(min_pack) = min_pack {
|
||||
min_pack.min(pack)
|
||||
} else {
|
||||
pack
|
||||
});
|
||||
ReprFlags::empty()
|
||||
}
|
||||
attr::ReprTransparent => ReprFlags::IS_TRANSPARENT,
|
||||
attr::ReprSimd => ReprFlags::IS_SIMD,
|
||||
attr::ReprInt(i) => {
|
||||
size = Some(match i {
|
||||
attr::IntType::SignedInt(x) => match x {
|
||||
ast::IntTy::Isize => IntegerType::Pointer(true),
|
||||
ast::IntTy::I8 => IntegerType::Fixed(Integer::I8, true),
|
||||
ast::IntTy::I16 => IntegerType::Fixed(Integer::I16, true),
|
||||
ast::IntTy::I32 => IntegerType::Fixed(Integer::I32, true),
|
||||
ast::IntTy::I64 => IntegerType::Fixed(Integer::I64, true),
|
||||
ast::IntTy::I128 => IntegerType::Fixed(Integer::I128, true),
|
||||
},
|
||||
attr::IntType::UnsignedInt(x) => match x {
|
||||
ast::UintTy::Usize => IntegerType::Pointer(false),
|
||||
ast::UintTy::U8 => IntegerType::Fixed(Integer::I8, false),
|
||||
ast::UintTy::U16 => IntegerType::Fixed(Integer::I16, false),
|
||||
ast::UintTy::U32 => IntegerType::Fixed(Integer::I32, false),
|
||||
ast::UintTy::U64 => IntegerType::Fixed(Integer::I64, false),
|
||||
ast::UintTy::U128 => IntegerType::Fixed(Integer::I128, false),
|
||||
},
|
||||
});
|
||||
ReprFlags::empty()
|
||||
}
|
||||
attr::ReprAlign(align) => {
|
||||
max_align = max_align.max(Some(Align::from_bytes(align as u64).unwrap()));
|
||||
ReprFlags::empty()
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// If `-Z randomize-layout` was enabled for the type definition then we can
|
||||
// consider performing layout randomization
|
||||
if self.sess.opts.unstable_opts.randomize_layout {
|
||||
flags.insert(ReprFlags::RANDOMIZE_LAYOUT);
|
||||
}
|
||||
|
||||
// This is here instead of layout because the choice must make it into metadata.
|
||||
if !self.consider_optimizing(|| format!("Reorder fields of {:?}", self.def_path_str(did))) {
|
||||
flags.insert(ReprFlags::IS_LINEAR);
|
||||
}
|
||||
|
||||
ReprOptions { int: size, align: max_align, pack: min_pack, flags, field_shuffle_seed }
|
||||
}
|
||||
|
||||
/// Look up the name of a definition across crates. This does not look at HIR.
|
||||
pub fn opt_item_name(self, def_id: DefId) -> Option<Symbol> {
|
||||
if let Some(cnum) = def_id.as_crate_root() {
|
||||
|
@ -8,8 +8,6 @@
|
||||
};
|
||||
use crate::ty::{GenericArgKind, SubstsRef};
|
||||
use rustc_apfloat::Float as _;
|
||||
use rustc_ast as ast;
|
||||
use rustc_attr::{self as attr, SignedInt, UnsignedInt};
|
||||
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
|
||||
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
|
||||
use rustc_errors::ErrorGuaranteed;
|
||||
@ -19,7 +17,7 @@
|
||||
use rustc_index::bit_set::GrowableBitSet;
|
||||
use rustc_macros::HashStable;
|
||||
use rustc_span::{sym, DUMMY_SP};
|
||||
use rustc_target::abi::{Integer, Size, TargetDataLayout};
|
||||
use rustc_target::abi::{Integer, IntegerType, Size, TargetDataLayout};
|
||||
use rustc_target::spec::abi::Abi;
|
||||
use smallvec::SmallVec;
|
||||
use std::{fmt, iter};
|
||||
@ -104,21 +102,12 @@ pub trait IntTypeExt {
|
||||
fn initial_discriminant<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Discr<'tcx>;
|
||||
}
|
||||
|
||||
impl IntTypeExt for attr::IntType {
|
||||
impl IntTypeExt for IntegerType {
|
||||
fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
|
||||
match *self {
|
||||
SignedInt(ast::IntTy::I8) => tcx.types.i8,
|
||||
SignedInt(ast::IntTy::I16) => tcx.types.i16,
|
||||
SignedInt(ast::IntTy::I32) => tcx.types.i32,
|
||||
SignedInt(ast::IntTy::I64) => tcx.types.i64,
|
||||
SignedInt(ast::IntTy::I128) => tcx.types.i128,
|
||||
SignedInt(ast::IntTy::Isize) => tcx.types.isize,
|
||||
UnsignedInt(ast::UintTy::U8) => tcx.types.u8,
|
||||
UnsignedInt(ast::UintTy::U16) => tcx.types.u16,
|
||||
UnsignedInt(ast::UintTy::U32) => tcx.types.u32,
|
||||
UnsignedInt(ast::UintTy::U64) => tcx.types.u64,
|
||||
UnsignedInt(ast::UintTy::U128) => tcx.types.u128,
|
||||
UnsignedInt(ast::UintTy::Usize) => tcx.types.usize,
|
||||
match self {
|
||||
IntegerType::Pointer(true) => tcx.types.isize,
|
||||
IntegerType::Pointer(false) => tcx.types.usize,
|
||||
IntegerType::Fixed(i, s) => i.to_ty(tcx, *s),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -65,7 +65,7 @@ fn variant_discriminants<'tcx>(
|
||||
Variants::Multiple { variants, .. } => variants
|
||||
.iter_enumerated()
|
||||
.filter_map(|(idx, layout)| {
|
||||
(layout.abi() != Abi::Uninhabited)
|
||||
(layout.abi != Abi::Uninhabited)
|
||||
.then(|| ty.discriminant_for_variant(tcx, idx).unwrap().val)
|
||||
})
|
||||
.collect(),
|
||||
|
@ -11,7 +11,7 @@
|
||||
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
|
||||
|
||||
use rustc_data_structures::stable_hasher::ToStableHashKey;
|
||||
use rustc_target::abi::{Align, TargetDataLayout};
|
||||
use rustc_target::abi::Align;
|
||||
use rustc_target::spec::{PanicStrategy, SanitizerSet, SplitDebuginfo};
|
||||
use rustc_target::spec::{Target, TargetTriple, TargetWarnings, TARGETS};
|
||||
|
||||
@ -900,7 +900,7 @@ fn default_configuration(sess: &Session) -> CrateConfig {
|
||||
let min_atomic_width = sess.target.min_atomic_width();
|
||||
let max_atomic_width = sess.target.max_atomic_width();
|
||||
let atomic_cas = sess.target.atomic_cas;
|
||||
let layout = TargetDataLayout::parse(&sess.target).unwrap_or_else(|err| {
|
||||
let layout = sess.target.parse_data_layout().unwrap_or_else(|err| {
|
||||
sess.emit_fatal(err);
|
||||
});
|
||||
|
||||
|
@ -7,6 +7,7 @@ edition = "2021"
|
||||
bitflags = "1.2.1"
|
||||
tracing = "0.1"
|
||||
serde_json = "1.0.59"
|
||||
rustc_abi = { path = "../rustc_abi" }
|
||||
rustc_data_structures = { path = "../rustc_data_structures" }
|
||||
rustc_feature = { path = "../rustc_feature" }
|
||||
rustc_index = { path = "../rustc_index" }
|
||||
|
@ -262,7 +262,7 @@ pub fn size<C: HasDataLayout>(&self, _cx: &C) -> Size {
|
||||
let mut size = self.rest.total;
|
||||
for i in 0..self.prefix.iter().count() {
|
||||
match self.prefix[i] {
|
||||
Some(v) => size += Size { raw: v.size.bytes() },
|
||||
Some(v) => size += v.size,
|
||||
None => {}
|
||||
}
|
||||
}
|
||||
|
@ -87,8 +87,8 @@ fn arg_scalar_pair<C>(
|
||||
_ => {}
|
||||
}
|
||||
|
||||
if (offset.raw % 4) != 0 && scalar2.primitive().is_float() {
|
||||
offset.raw += 4 - (offset.raw % 4);
|
||||
if (offset.bytes() % 4) != 0 && scalar2.primitive().is_float() {
|
||||
offset += Size::from_bytes(4 - (offset.bytes() % 4));
|
||||
}
|
||||
data = arg_scalar(cx, &scalar2, offset, data);
|
||||
return data;
|
||||
@ -169,14 +169,14 @@ fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, in_registers_max: S
|
||||
has_float: false,
|
||||
arg_attribute: ArgAttribute::default(),
|
||||
},
|
||||
Size { raw: 0 },
|
||||
Size::ZERO,
|
||||
);
|
||||
|
||||
if data.has_float {
|
||||
// Structure { float, int, int } doesn't like to be handled like
|
||||
// { float, long int }. Other way around it doesn't mind.
|
||||
if data.last_offset < arg.layout.size
|
||||
&& (data.last_offset.raw % 8) != 0
|
||||
&& (data.last_offset.bytes() % 8) != 0
|
||||
&& data.prefix_index < data.prefix.len()
|
||||
{
|
||||
data.prefix[data.prefix_index] = Some(Reg::i32());
|
||||
@ -185,7 +185,7 @@ fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, in_registers_max: S
|
||||
}
|
||||
|
||||
let mut rest_size = arg.layout.size - data.last_offset;
|
||||
if (rest_size.raw % 8) != 0 && data.prefix_index < data.prefix.len() {
|
||||
if (rest_size.bytes() % 8) != 0 && data.prefix_index < data.prefix.len() {
|
||||
data.prefix[data.prefix_index] = Some(Reg::i32());
|
||||
rest_size = rest_size - Reg::i32().size;
|
||||
}
|
||||
@ -214,13 +214,13 @@ pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
|
||||
C: HasDataLayout,
|
||||
{
|
||||
if !fn_abi.ret.is_ignore() {
|
||||
classify_arg(cx, &mut fn_abi.ret, Size { raw: 32 });
|
||||
classify_arg(cx, &mut fn_abi.ret, Size::from_bytes(32));
|
||||
}
|
||||
|
||||
for arg in fn_abi.args.iter_mut() {
|
||||
if arg.is_ignore() {
|
||||
continue;
|
||||
}
|
||||
classify_arg(cx, arg, Size { raw: 16 });
|
||||
classify_arg(cx, arg, Size::from_bytes(16));
|
||||
}
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -35,10 +35,7 @@
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
/// Requirements for a `StableHashingContext` to be used in this crate.
|
||||
/// This is a hack to allow using the `HashStable_Generic` derive macro
|
||||
/// instead of implementing everything in `rustc_middle`.
|
||||
pub trait HashStableContext {}
|
||||
pub use rustc_abi::HashStableContext;
|
||||
|
||||
/// The name of rustc's own place to organize libraries.
|
||||
///
|
||||
|
@ -35,7 +35,10 @@
|
||||
//! to the list specified by the target, rather than replace.
|
||||
|
||||
use crate::abi::call::Conv;
|
||||
use crate::abi::Endian;
|
||||
use crate::abi::{
|
||||
AbiAndPrefAlign, AddressSpace, Align, Endian, Integer, Size, TargetDataLayout,
|
||||
TargetDataLayoutErrors,
|
||||
};
|
||||
use crate::json::{Json, ToJson};
|
||||
use crate::spec::abi::{lookup as lookup_abi, Abi};
|
||||
use crate::spec::crt_objects::{CrtObjects, LinkSelfContainedDefault};
|
||||
@ -1317,6 +1320,120 @@ pub struct Target {
|
||||
pub options: TargetOptions,
|
||||
}
|
||||
|
||||
impl Target {
|
||||
pub fn parse_data_layout<'a>(&'a self) -> Result<TargetDataLayout, TargetDataLayoutErrors<'a>> {
|
||||
// Parse an address space index from a string.
|
||||
let parse_address_space = |s: &'a str, cause: &'a str| {
|
||||
s.parse::<u32>().map(AddressSpace).map_err(|err| {
|
||||
TargetDataLayoutErrors::InvalidAddressSpace { addr_space: s, cause, err }
|
||||
})
|
||||
};
|
||||
|
||||
// Parse a bit count from a string.
|
||||
let parse_bits = |s: &'a str, kind: &'a str, cause: &'a str| {
|
||||
s.parse::<u64>().map_err(|err| TargetDataLayoutErrors::InvalidBits {
|
||||
kind,
|
||||
bit: s,
|
||||
cause,
|
||||
err,
|
||||
})
|
||||
};
|
||||
|
||||
// Parse a size string.
|
||||
let size = |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
|
||||
|
||||
// Parse an alignment string.
|
||||
let align = |s: &[&'a str], cause: &'a str| {
|
||||
if s.is_empty() {
|
||||
return Err(TargetDataLayoutErrors::MissingAlignment { cause });
|
||||
}
|
||||
let align_from_bits = |bits| {
|
||||
Align::from_bits(bits)
|
||||
.map_err(|err| TargetDataLayoutErrors::InvalidAlignment { cause, err })
|
||||
};
|
||||
let abi = parse_bits(s[0], "alignment", cause)?;
|
||||
let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
|
||||
Ok(AbiAndPrefAlign { abi: align_from_bits(abi)?, pref: align_from_bits(pref)? })
|
||||
};
|
||||
|
||||
let mut dl = TargetDataLayout::default();
|
||||
let mut i128_align_src = 64;
|
||||
for spec in self.data_layout.split('-') {
|
||||
let spec_parts = spec.split(':').collect::<Vec<_>>();
|
||||
|
||||
match &*spec_parts {
|
||||
["e"] => dl.endian = Endian::Little,
|
||||
["E"] => dl.endian = Endian::Big,
|
||||
[p] if p.starts_with('P') => {
|
||||
dl.instruction_address_space = parse_address_space(&p[1..], "P")?
|
||||
}
|
||||
["a", ref a @ ..] => dl.aggregate_align = align(a, "a")?,
|
||||
["f32", ref a @ ..] => dl.f32_align = align(a, "f32")?,
|
||||
["f64", ref a @ ..] => dl.f64_align = align(a, "f64")?,
|
||||
[p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => {
|
||||
dl.pointer_size = size(s, p)?;
|
||||
dl.pointer_align = align(a, p)?;
|
||||
}
|
||||
[s, ref a @ ..] if s.starts_with('i') => {
|
||||
let Ok(bits) = s[1..].parse::<u64>() else {
|
||||
size(&s[1..], "i")?; // For the user error.
|
||||
continue;
|
||||
};
|
||||
let a = align(a, s)?;
|
||||
match bits {
|
||||
1 => dl.i1_align = a,
|
||||
8 => dl.i8_align = a,
|
||||
16 => dl.i16_align = a,
|
||||
32 => dl.i32_align = a,
|
||||
64 => dl.i64_align = a,
|
||||
_ => {}
|
||||
}
|
||||
if bits >= i128_align_src && bits <= 128 {
|
||||
// Default alignment for i128 is decided by taking the alignment of
|
||||
// largest-sized i{64..=128}.
|
||||
i128_align_src = bits;
|
||||
dl.i128_align = a;
|
||||
}
|
||||
}
|
||||
[s, ref a @ ..] if s.starts_with('v') => {
|
||||
let v_size = size(&s[1..], "v")?;
|
||||
let a = align(a, s)?;
|
||||
if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
|
||||
v.1 = a;
|
||||
continue;
|
||||
}
|
||||
// No existing entry, add a new one.
|
||||
dl.vector_align.push((v_size, a));
|
||||
}
|
||||
_ => {} // Ignore everything else.
|
||||
}
|
||||
}
|
||||
|
||||
// Perform consistency checks against the Target information.
|
||||
if dl.endian != self.endian {
|
||||
return Err(TargetDataLayoutErrors::InconsistentTargetArchitecture {
|
||||
dl: dl.endian.as_str(),
|
||||
target: self.endian.as_str(),
|
||||
});
|
||||
}
|
||||
|
||||
let target_pointer_width: u64 = self.pointer_width.into();
|
||||
if dl.pointer_size.bits() != target_pointer_width {
|
||||
return Err(TargetDataLayoutErrors::InconsistentTargetPointerWidth {
|
||||
pointer_size: dl.pointer_size.bits(),
|
||||
target: self.pointer_width,
|
||||
});
|
||||
}
|
||||
|
||||
dl.c_enum_min_size = match Integer::from_size(Size::from_bits(self.c_enum_min_bits)) {
|
||||
Ok(bits) => bits,
|
||||
Err(err) => return Err(TargetDataLayoutErrors::InvalidBitsSize { err }),
|
||||
};
|
||||
|
||||
Ok(dl)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait HasTargetSpec {
|
||||
fn target_spec(&self) -> &Target;
|
||||
}
|
||||
|
@ -12,6 +12,7 @@ rustc_hir = { path = "../rustc_hir" }
|
||||
rustc_index = { path = "../rustc_index" }
|
||||
rustc_ast = { path = "../rustc_ast" }
|
||||
rustc_span = { path = "../rustc_span" }
|
||||
rustc_target = { path = "../rustc_target" }
|
||||
chalk-ir = "0.87.0"
|
||||
chalk-engine = "0.87.0"
|
||||
chalk-solve = "0.87.0"
|
||||
|
@ -9,9 +9,9 @@
|
||||
use rustc_middle::traits::ChalkRustInterner as RustInterner;
|
||||
use rustc_middle::ty::{self, AssocKind, EarlyBinder, Ty, TyCtxt, TypeFoldable, TypeSuperFoldable};
|
||||
use rustc_middle::ty::{InternalSubsts, SubstsRef};
|
||||
use rustc_target::abi::{Integer, IntegerType};
|
||||
|
||||
use rustc_ast::ast;
|
||||
use rustc_attr as attr;
|
||||
|
||||
use rustc_hir::def_id::DefId;
|
||||
|
||||
@ -218,21 +218,21 @@ fn adt_repr(
|
||||
c: adt_def.repr().c(),
|
||||
packed: adt_def.repr().packed(),
|
||||
int: adt_def.repr().int.map(|i| match i {
|
||||
attr::IntType::SignedInt(ty) => match ty {
|
||||
ast::IntTy::Isize => int(chalk_ir::IntTy::Isize),
|
||||
ast::IntTy::I8 => int(chalk_ir::IntTy::I8),
|
||||
ast::IntTy::I16 => int(chalk_ir::IntTy::I16),
|
||||
ast::IntTy::I32 => int(chalk_ir::IntTy::I32),
|
||||
ast::IntTy::I64 => int(chalk_ir::IntTy::I64),
|
||||
ast::IntTy::I128 => int(chalk_ir::IntTy::I128),
|
||||
IntegerType::Pointer(true) => int(chalk_ir::IntTy::Isize),
|
||||
IntegerType::Pointer(false) => uint(chalk_ir::UintTy::Usize),
|
||||
IntegerType::Fixed(i, true) => match i {
|
||||
Integer::I8 => int(chalk_ir::IntTy::I8),
|
||||
Integer::I16 => int(chalk_ir::IntTy::I16),
|
||||
Integer::I32 => int(chalk_ir::IntTy::I32),
|
||||
Integer::I64 => int(chalk_ir::IntTy::I64),
|
||||
Integer::I128 => int(chalk_ir::IntTy::I128),
|
||||
},
|
||||
attr::IntType::UnsignedInt(ty) => match ty {
|
||||
ast::UintTy::Usize => uint(chalk_ir::UintTy::Usize),
|
||||
ast::UintTy::U8 => uint(chalk_ir::UintTy::U8),
|
||||
ast::UintTy::U16 => uint(chalk_ir::UintTy::U16),
|
||||
ast::UintTy::U32 => uint(chalk_ir::UintTy::U32),
|
||||
ast::UintTy::U64 => uint(chalk_ir::UintTy::U64),
|
||||
ast::UintTy::U128 => uint(chalk_ir::UintTy::U128),
|
||||
IntegerType::Fixed(i, false) => match i {
|
||||
Integer::I8 => uint(chalk_ir::UintTy::U8),
|
||||
Integer::I16 => uint(chalk_ir::UintTy::U16),
|
||||
Integer::I32 => uint(chalk_ir::UintTy::U32),
|
||||
Integer::I64 => uint(chalk_ir::UintTy::U64),
|
||||
Integer::I128 => uint(chalk_ir::UintTy::U128),
|
||||
},
|
||||
}),
|
||||
})
|
||||
|
@ -4,8 +4,6 @@ version = "0.0.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
rand = "0.8.4"
|
||||
rand_xoshiro = "0.6.0"
|
||||
tracing = "0.1"
|
||||
rustc_middle = { path = "../rustc_middle" }
|
||||
rustc_data_structures = { path = "../rustc_data_structures" }
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -249,27 +249,27 @@ fn check_layout_abi<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, layout: &TyAndLayou
|
||||
if let Variants::Multiple { variants, .. } = &layout.variants {
|
||||
for variant in variants.iter() {
|
||||
// No nested "multiple".
|
||||
assert!(matches!(variant.variants(), Variants::Single { .. }));
|
||||
assert!(matches!(variant.variants, Variants::Single { .. }));
|
||||
// Variants should have the same or a smaller size as the full thing,
|
||||
// and same for alignment.
|
||||
if variant.size() > layout.size {
|
||||
if variant.size > layout.size {
|
||||
bug!(
|
||||
"Type with size {} bytes has variant with size {} bytes: {layout:#?}",
|
||||
layout.size.bytes(),
|
||||
variant.size().bytes(),
|
||||
variant.size.bytes(),
|
||||
)
|
||||
}
|
||||
if variant.align().abi > layout.align.abi {
|
||||
if variant.align.abi > layout.align.abi {
|
||||
bug!(
|
||||
"Type with alignment {} bytes has variant with alignment {} bytes: {layout:#?}",
|
||||
layout.align.abi.bytes(),
|
||||
variant.align().abi.bytes(),
|
||||
variant.align.abi.bytes(),
|
||||
)
|
||||
}
|
||||
// Skip empty variants.
|
||||
if variant.size() == Size::ZERO
|
||||
|| variant.fields().count() == 0
|
||||
|| variant.abi().is_uninhabited()
|
||||
if variant.size == Size::ZERO
|
||||
|| variant.fields.count() == 0
|
||||
|| variant.abi.is_uninhabited()
|
||||
{
|
||||
// These are never actually accessed anyway, so we can skip the coherence check
|
||||
// for them. They also fail that check, since they have
|
||||
@ -282,7 +282,7 @@ fn check_layout_abi<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, layout: &TyAndLayou
|
||||
let scalar_coherent = |s1: Scalar, s2: Scalar| {
|
||||
s1.size(cx) == s2.size(cx) && s1.align(cx) == s2.align(cx)
|
||||
};
|
||||
let abi_coherent = match (layout.abi, variant.abi()) {
|
||||
let abi_coherent = match (layout.abi, variant.abi) {
|
||||
(Abi::Scalar(s1), Abi::Scalar(s2)) => scalar_coherent(s1, s2),
|
||||
(Abi::ScalarPair(a1, b1), Abi::ScalarPair(a2, b2)) => {
|
||||
scalar_coherent(a1, a2) && scalar_coherent(b1, b2)
|
||||
|
@ -10,7 +10,7 @@
|
||||
use rustc_middle::ty::{self, Adt, TyCtxt};
|
||||
use rustc_span::hygiene::MacroKind;
|
||||
use rustc_span::symbol::{kw, sym, Symbol};
|
||||
use rustc_target::abi::{Layout, Primitive, TagEncoding, Variants};
|
||||
use rustc_target::abi::{LayoutS, Primitive, TagEncoding, VariantIdx, Variants};
|
||||
use std::cmp::Ordering;
|
||||
use std::fmt;
|
||||
use std::rc::Rc;
|
||||
@ -1892,11 +1892,11 @@ struct update syntax will not work.",
|
||||
}
|
||||
|
||||
fn document_type_layout(w: &mut Buffer, cx: &Context<'_>, ty_def_id: DefId) {
|
||||
fn write_size_of_layout(w: &mut Buffer, layout: Layout<'_>, tag_size: u64) {
|
||||
if layout.abi().is_unsized() {
|
||||
fn write_size_of_layout(w: &mut Buffer, layout: &LayoutS<VariantIdx>, tag_size: u64) {
|
||||
if layout.abi.is_unsized() {
|
||||
write!(w, "(unsized)");
|
||||
} else {
|
||||
let bytes = layout.size().bytes() - tag_size;
|
||||
let bytes = layout.size.bytes() - tag_size;
|
||||
write!(w, "{size} byte{pl}", size = bytes, pl = if bytes == 1 { "" } else { "s" },);
|
||||
}
|
||||
}
|
||||
@ -1927,7 +1927,7 @@ fn write_size_of_layout(w: &mut Buffer, layout: Layout<'_>, tag_size: u64) {
|
||||
chapter for details on type layout guarantees.</p></div>"
|
||||
);
|
||||
w.write_str("<p><strong>Size:</strong> ");
|
||||
write_size_of_layout(w, ty_layout.layout, 0);
|
||||
write_size_of_layout(w, &ty_layout.layout.0, 0);
|
||||
writeln!(w, "</p>");
|
||||
if let Variants::Multiple { variants, tag, tag_encoding, .. } =
|
||||
&ty_layout.layout.variants()
|
||||
@ -1953,7 +1953,7 @@ fn write_size_of_layout(w: &mut Buffer, layout: Layout<'_>, tag_size: u64) {
|
||||
for (index, layout) in variants.iter_enumerated() {
|
||||
let name = adt.variant(index).name;
|
||||
write!(w, "<li><code>{name}</code>: ", name = name);
|
||||
write_size_of_layout(w, *layout, tag_size);
|
||||
write_size_of_layout(w, layout, tag_size);
|
||||
writeln!(w, "</li>");
|
||||
}
|
||||
w.write_str("</ul>");
|
||||
|
@ -2,12 +2,11 @@
|
||||
use clippy_utils::diagnostics::span_lint;
|
||||
use clippy_utils::expr_or_init;
|
||||
use clippy_utils::ty::{get_discriminant_value, is_isize_or_usize};
|
||||
use rustc_ast::ast;
|
||||
use rustc_attr::IntType;
|
||||
use rustc_hir::def::{DefKind, Res};
|
||||
use rustc_hir::{BinOpKind, Expr, ExprKind};
|
||||
use rustc_lint::LateContext;
|
||||
use rustc_middle::ty::{self, FloatTy, Ty};
|
||||
use rustc_target::abi::IntegerType;
|
||||
|
||||
use super::{utils, CAST_ENUM_TRUNCATION, CAST_POSSIBLE_TRUNCATION};
|
||||
|
||||
@ -122,7 +121,7 @@ pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, cast_expr: &Expr<'_>,
|
||||
let cast_from_ptr_size = def.repr().int.map_or(true, |ty| {
|
||||
matches!(
|
||||
ty,
|
||||
IntType::SignedInt(ast::IntTy::Isize) | IntType::UnsignedInt(ast::UintTy::Usize)
|
||||
IntegerType::Pointer(_),
|
||||
)
|
||||
});
|
||||
let suffix = match (cast_from_ptr_size, is_isize_or_usize(cast_to)) {
|
||||
|
@ -26,7 +26,6 @@
|
||||
extern crate rustc_arena;
|
||||
extern crate rustc_ast;
|
||||
extern crate rustc_ast_pretty;
|
||||
extern crate rustc_attr;
|
||||
extern crate rustc_data_structures;
|
||||
extern crate rustc_driver;
|
||||
extern crate rustc_errors;
|
||||
|
Loading…
Reference in New Issue
Block a user