rust/src/unsize.rs

237 lines
8.9 KiB
Rust

use crate::prelude::*;
// Adapted from https://github.com/rust-lang/rust/blob/2a663555ddf36f6b041445894a8c175cd1bc718c/src/librustc_codegen_ssa/base.rs#L159-L307
/// Retrieve the information we are losing (making dynamic) in an unsizing
/// adjustment.
///
/// The `old_info` argument is a bit funny. It is intended for use
/// in an upcast, where the new vtable for an object will be derived
/// from the old one.
pub fn unsized_info<'tcx>(
fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
source: Ty<'tcx>,
target: Ty<'tcx>,
old_info: Option<Value>,
) -> Value {
let (source, target) =
fx.tcx
.struct_lockstep_tails_erasing_lifetimes(source, target, ParamEnv::reveal_all());
match (&source.kind, &target.kind) {
(&ty::Array(_, len), &ty::Slice(_)) => fx.bcx.ins().iconst(
fx.pointer_type,
len.eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64,
),
(&ty::Dynamic(..), &ty::Dynamic(..)) => {
// For now, upcasts are limited to changes in marker
// traits, and hence never actually require an actual
// change to the vtable.
old_info.expect("unsized_info: missing old info for trait upcast")
}
(_, &ty::Dynamic(ref data, ..)) => crate::vtable::get_vtable(fx, source, data.principal()),
_ => bug!(
"unsized_info: invalid unsizing {:?} -> {:?}",
source,
target
),
}
}
/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer.
pub fn unsize_thin_ptr<'tcx>(
fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
src: Value,
src_ty: Ty<'tcx>,
dst_ty: Ty<'tcx>,
) -> (Value, Value) {
match (&src_ty.kind, &dst_ty.kind) {
(&ty::Ref(_, a, _), &ty::Ref(_, b, _))
| (&ty::Ref(_, a, _), &ty::RawPtr(ty::TypeAndMut { ty: b, .. }))
| (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
assert!(!fx.layout_of(a).is_unsized());
(src, unsized_info(fx, a, b, None))
}
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty());
assert!(!fx.layout_of(a).is_unsized());
(src, unsized_info(fx, a, b, None))
}
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
assert_eq!(def_a, def_b);
let src_layout = fx.layout_of(src_ty);
let dst_layout = fx.layout_of(dst_ty);
let mut result = None;
for i in 0..src_layout.fields.count() {
let src_f = src_layout.field(fx, i);
assert_eq!(src_layout.fields.offset(i).bytes(), 0);
assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
if src_f.is_zst() {
continue;
}
assert_eq!(src_layout.size, src_f.size);
let dst_f = dst_layout.field(fx, i);
assert_ne!(src_f.ty, dst_f.ty);
assert_eq!(result, None);
result = Some(unsize_thin_ptr(fx, src, src_f.ty, dst_f.ty));
}
result.unwrap()
}
_ => bug!("unsize_thin_ptr: called on bad types"),
}
}
/// Coerce `src`, which is a reference to a value of type `src_ty`,
/// to a value of type `dst_ty` and store the result in `dst`
pub fn coerce_unsized_into<'tcx>(
fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
src: CValue<'tcx>,
dst: CPlace<'tcx>,
) {
let src_ty = src.layout().ty;
let dst_ty = dst.layout().ty;
let mut coerce_ptr = || {
let (base, info) = if fx
.layout_of(src.layout().ty.builtin_deref(true).unwrap().ty)
.is_unsized()
{
// fat-ptr to fat-ptr unsize preserves the vtable
// i.e., &'a fmt::Debug+Send => &'a fmt::Debug
src.load_scalar_pair(fx)
} else {
let base = src.load_scalar(fx);
unsize_thin_ptr(fx, base, src_ty, dst_ty)
};
dst.write_cvalue(fx, CValue::by_val_pair(base, info, dst.layout()));
};
match (&src_ty.kind, &dst_ty.kind) {
(&ty::Ref(..), &ty::Ref(..))
| (&ty::Ref(..), &ty::RawPtr(..))
| (&ty::RawPtr(..), &ty::RawPtr(..)) => coerce_ptr(),
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
assert_eq!(def_a, def_b);
for i in 0..def_a.variants[VariantIdx::new(0)].fields.len() {
let src_f = src.value_field(fx, mir::Field::new(i));
let dst_f = dst.place_field(fx, mir::Field::new(i));
if dst_f.layout().is_zst() {
continue;
}
if src_f.layout().ty == dst_f.layout().ty {
dst_f.write_cvalue(fx, src_f);
} else {
coerce_unsized_into(fx, src_f, dst_f);
}
}
}
_ => bug!(
"coerce_unsized_into: invalid coercion {:?} -> {:?}",
src_ty,
dst_ty
),
}
}
// Adapted from https://github.com/rust-lang/rust/blob/2a663555ddf36f6b041445894a8c175cd1bc718c/src/librustc_codegen_ssa/glue.rs
pub fn size_and_align_of_dst<'tcx>(
fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
ty: Ty<'tcx>,
info: Value,
) -> (Value, Value) {
let layout = fx.layout_of(ty);
if !layout.is_unsized() {
let size = fx
.bcx
.ins()
.iconst(fx.pointer_type, layout.size.bytes() as i64);
let align = fx
.bcx
.ins()
.iconst(fx.pointer_type, layout.align.abi.bytes() as i64);
return (size, align);
}
match ty.kind {
ty::Dynamic(..) => {
// load size/align from vtable
(
crate::vtable::size_of_obj(fx, info),
crate::vtable::min_align_of_obj(fx, info),
)
}
ty::Slice(_) | ty::Str => {
let unit = layout.field(fx, 0);
// The info in this case is the length of the str, so the size is that
// times the unit size.
(
fx.bcx.ins().imul_imm(info, unit.size.bytes() as i64),
fx.bcx
.ins()
.iconst(fx.pointer_type, unit.align.abi.bytes() as i64),
)
}
_ => {
// First get the size of all statically known fields.
// Don't use size_of because it also rounds up to alignment, which we
// want to avoid, as the unsized field's alignment could be smaller.
assert!(!ty.is_simd());
let i = layout.fields.count() - 1;
let sized_size = layout.fields.offset(i).bytes();
let sized_align = layout.align.abi.bytes();
let sized_align = fx.bcx.ins().iconst(fx.pointer_type, sized_align as i64);
// Recurse to get the size of the dynamically sized field (must be
// the last field).
let field_ty = layout.field(fx, i).ty;
let (unsized_size, mut unsized_align) = size_and_align_of_dst(fx, field_ty, info);
// FIXME (#26403, #27023): We should be adding padding
// to `sized_size` (to accommodate the `unsized_align`
// required of the unsized field that follows) before
// summing it with `sized_size`. (Note that since #26403
// is unfixed, we do not yet add the necessary padding
// here. But this is where the add would go.)
// Return the sum of sizes and max of aligns.
let size = fx.bcx.ins().iadd_imm(unsized_size, sized_size as i64);
// Packed types ignore the alignment of their fields.
if let ty::Adt(def, _) = ty.kind {
if def.repr.packed() {
unsized_align = sized_align;
}
}
// Choose max of two known alignments (combined value must
// be aligned according to more restrictive of the two).
let cmp = fx
.bcx
.ins()
.icmp(IntCC::UnsignedGreaterThan, sized_align, unsized_align);
let align = fx.bcx.ins().select(cmp, sized_align, unsized_align);
// Issue #27023: must add any necessary padding to `size`
// (to make it a multiple of `align`) before returning it.
//
// Namely, the returned size should be, in C notation:
//
// `size + ((size & (align-1)) ? align : 0)`
//
// emulated via the semi-standard fast bit trick:
//
// `(size + (align-1)) & -align`
let addend = fx.bcx.ins().iadd_imm(align, -1);
let add = fx.bcx.ins().iadd(size, addend);
let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
let neg = fx.bcx.ins().isub(zero, align);
let size = fx.bcx.ins().band(add, neg);
(size, align)
}
}
}