rustc_trans: move const & lvalue access helpers from adt.

This commit is contained in:
Eduard-Mihai Burtescu 2017-04-25 14:39:00 +03:00
parent aabfed5e0c
commit fab2532ef9
6 changed files with 251 additions and 329 deletions

View File

@ -41,52 +41,15 @@
//! used unboxed and any field can have pointers (including mutable)
//! taken to it, implementing them for Rust seems difficult.
use std;
use llvm::{ValueRef, True, IntEQ, IntNE};
use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, LayoutTyper};
use common::*;
use builder::Builder;
use base;
use context::CrateContext;
use machine;
use monomorphize;
use type_::Type;
use type_of;
use mir::lvalue::Alignment;
/// Given an enum, struct, closure, or tuple, extracts fields.
/// Treats closures as a struct with one variant.
/// `empty_if_no_variants` is a switch to deal with empty enums.
/// If true, `variant_index` is disregarded and an empty Vec returned in this case.
pub fn compute_fields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>,
variant_index: usize,
empty_if_no_variants: bool) -> Vec<Ty<'tcx>> {
match t.sty {
ty::TyAdt(ref def, _) if def.variants.len() == 0 && empty_if_no_variants => {
Vec::default()
},
ty::TyAdt(ref def, ref substs) => {
def.variants[variant_index].fields.iter().map(|f| {
monomorphize::field_ty(cx.tcx(), substs, f)
}).collect::<Vec<_>>()
},
ty::TyTuple(fields, _) => fields.to_vec(),
ty::TyClosure(def_id, substs) => {
if variant_index > 0 { bug!("{} is a closure, which only has one variant", t);}
substs.upvar_tys(def_id, cx.tcx()).collect()
},
ty::TyGenerator(def_id, substs, _) => {
if variant_index > 0 { bug!("{} is a generator, which only has one variant", t);}
substs.field_tys(def_id, cx.tcx()).map(|t| {
cx.tcx().fully_normalize_associated_types_in(&t)
}).collect()
},
_ => bug!("{} is not a type that can have fields.", t)
}
}
/// LLVM-level types are a little complicated.
///
/// C-like enums need to be actual ints, not wrapped in a struct,
@ -119,8 +82,8 @@ pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
(nndiscr, nonnull, nonnull.packed),
_ => unreachable!()
};
let fields = compute_fields(cx, t, nonnull_variant_index as usize, true);
llty.set_struct_body(&struct_llfields(cx, &fields, nonnull_variant),
llty.set_struct_body(&struct_llfields(cx, t, nonnull_variant_index as usize,
nonnull_variant, None),
packed)
},
_ => bug!("This function cannot handle {} with layout {:#?}", t, l)
@ -148,10 +111,9 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
}
}
layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => {
let fields = compute_fields(cx, t, nndiscr as usize, false);
match name {
None => {
Type::struct_(cx, &struct_llfields(cx, &fields, nonnull),
Type::struct_(cx, &struct_llfields(cx, t, nndiscr as usize, nonnull, None),
nonnull.packed)
}
Some(name) => {
@ -160,17 +122,12 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
}
}
layout::Univariant { ref variant, .. } => {
// Note that this case also handles empty enums.
// Thus the true as the final parameter here.
let fields = compute_fields(cx, t, 0, true);
match name {
None => {
let fields = struct_llfields(cx, &fields, &variant);
Type::struct_(cx, &fields, variant.packed)
Type::struct_(cx, &struct_llfields(cx, t, 0, &variant, None),
variant.packed)
}
Some(name) => {
// Hypothesis: named_struct's can never need a
// drop flag. (... needs validation.)
Type::named_struct(cx, name)
}
}
@ -205,7 +162,7 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
let size = size.bytes();
let align = align.abi();
let primitive_align = primitive_align.abi();
assert!(align <= std::u32::MAX as u64);
assert!(align <= ::std::u32::MAX as u64);
let discr_ty = Type::from_integer(cx, discr);
let discr_size = discr.size().bytes();
let padded_discr_size = roundup(discr_size, align as u32);
@ -246,35 +203,63 @@ fn union_fill(cx: &CrateContext, size: u64, align: u64) -> Type {
}
}
// Double index to account for padding (FieldPath already uses `Struct::memory_index`)
fn struct_llfields_path(discrfield: &layout::FieldPath) -> Vec<usize> {
discrfield.iter().map(|&i| (i as usize) << 1).collect::<Vec<_>>()
}
// Lookup `Struct::memory_index` and double it to account for padding
pub fn struct_llfields_index(variant: &layout::Struct, index: usize) -> usize {
(variant.memory_index[index] as usize) << 1
}
pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, field_tys: &Vec<Ty<'tcx>>,
variant: &layout::Struct) -> Vec<Type> {
pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>,
variant_index: usize,
variant: &layout::Struct,
discr: Option<Ty<'tcx>>) -> Vec<Type> {
let field_count = match t.sty {
ty::TyAdt(ref def, _) if def.variants.len() == 0 => return vec![],
ty::TyAdt(ref def, _) => {
discr.is_some() as usize + def.variants[variant_index].fields.len()
},
ty::TyTuple(fields, _) => fields.len(),
ty::TyClosure(def_id, substs) => {
if variant_index > 0 { bug!("{} is a closure, which only has one variant", t);}
substs.upvar_tys(def_id, cx.tcx()).count()
},
ty::TyGenerator(def_id, substs, _) => {
if variant_index > 0 { bug!("{} is a generator, which only has one variant", t);}
substs.field_tys(def_id, cx.tcx()).count()
},
_ => bug!("{} is not a type that can have fields.", t)
};
debug!("struct_llfields: variant: {:?}", variant);
let mut first_field = true;
let mut min_offset = 0;
let mut result: Vec<Type> = Vec::with_capacity(field_tys.len() * 2);
let mut result: Vec<Type> = Vec::with_capacity(field_count * 2);
let field_iter = variant.field_index_by_increasing_offset().map(|i| {
(i, field_tys[i as usize], variant.offsets[i as usize].bytes()) });
(i, match t.sty {
ty::TyAdt(..) if i == 0 && discr.is_some() => discr.unwrap(),
ty::TyAdt(ref def, ref substs) => {
monomorphize::field_ty(cx.tcx(), substs,
&def.variants[variant_index].fields[i as usize - discr.is_some() as usize])
},
ty::TyTuple(fields, _) => fields[i as usize],
ty::TyClosure(def_id, substs) => {
substs.upvar_tys(def_id, cx.tcx()).nth(i).unwrap()
},
ty::TyGenerator(def_id, substs, _) => {
let ty = substs.field_tys(def_id, cx.tcx()).nth(i).unwrap();
cx.tcx().normalize_associated_type(&ty)
},
_ => bug!()
}, variant.offsets[i as usize].bytes())
});
for (index, ty, target_offset) in field_iter {
assert!(target_offset >= min_offset);
let padding_bytes = target_offset - min_offset;
if first_field {
debug!("struct_llfields: {} ty: {} min_offset: {} target_offset: {}",
index, ty, min_offset, target_offset);
assert_eq!(padding_bytes, 0);
first_field = false;
} else {
assert!(target_offset >= min_offset);
let padding_bytes = if variant.packed { 0 } else { target_offset - min_offset };
result.push(Type::array(&Type::i8(cx), padding_bytes));
debug!("struct_llfields: {} ty: {} pad_bytes: {} min_offset: {} target_offset: {}",
index, ty, padding_bytes, min_offset, target_offset);
@ -282,10 +267,18 @@ pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, field_tys: &Vec<Ty
let llty = type_of::in_memory_type_of(cx, ty);
result.push(llty);
let layout = cx.layout_of(ty);
if variant.packed {
assert_eq!(padding_bytes, 0);
} else {
let field_align = layout.align(cx);
assert!(field_align.abi() <= variant.align.abi(),
"non-packed type has field with larger align ({}): {:#?}",
field_align.abi(), variant);
}
let target_size = layout.size(&cx.tcx().data_layout).bytes();
min_offset = target_offset + target_size;
}
if variant.sized && !field_tys.is_empty() {
if variant.sized && field_count > 0 {
if variant.stride().bytes() < min_offset {
bug!("variant: {:?} stride: {} min_offset: {}", variant, variant.stride().bytes(),
min_offset);
@ -294,7 +287,7 @@ pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, field_tys: &Vec<Ty
debug!("struct_llfields: pad_bytes: {} min_offset: {} min_size: {} stride: {}\n",
padding_bytes, min_offset, variant.min_size.bytes(), variant.stride().bytes());
result.push(Type::array(&Type::i8(cx), padding_bytes));
assert!(result.len() == (field_tys.len() * 2));
assert!(result.len() == (field_count * 2));
} else {
debug!("struct_llfields: min_offset: {} min_size: {} stride: {}\n",
min_offset, variant.min_size.bytes(), variant.stride().bytes());
@ -310,138 +303,6 @@ pub fn is_discr_signed<'tcx>(l: &layout::Layout) -> bool {
}
}
/// Obtain the actual discriminant of a value.
pub fn trans_get_discr<'a, 'tcx>(
bcx: &Builder<'a, 'tcx>,
t: Ty<'tcx>,
scrutinee: ValueRef,
alignment: Alignment,
cast_to: Option<Type>,
range_assert: bool
) -> ValueRef {
debug!("trans_get_discr t: {:?}", t);
let l = bcx.ccx.layout_of(t);
let val = match *l {
layout::CEnum { discr, min, max, .. } => {
load_discr(bcx, discr, scrutinee, alignment, min, max, range_assert)
}
layout::General { discr, ref variants, .. } => {
let ptr = bcx.struct_gep(scrutinee, 0);
load_discr(bcx, discr, ptr, alignment,
0, variants.len() as u64 - 1,
range_assert)
}
layout::Univariant { .. } | layout::UntaggedUnion { .. } => C_u8(bcx.ccx, 0),
layout::RawNullablePointer { nndiscr, .. } => {
let cmp = if nndiscr == 0 { IntEQ } else { IntNE };
let discr = bcx.load(scrutinee, alignment.to_align());
bcx.icmp(cmp, discr, C_null(val_ty(discr)))
}
layout::StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
struct_wrapped_nullable_bitdiscr(bcx, nndiscr, discrfield, scrutinee, alignment)
},
_ => bug!("{} is not an enum", t)
};
match cast_to {
None => val,
Some(llty) => bcx.intcast(val, llty, is_discr_signed(&l))
}
}
fn struct_wrapped_nullable_bitdiscr(
bcx: &Builder,
nndiscr: u64,
discrfield: &layout::FieldPath,
scrutinee: ValueRef,
alignment: Alignment,
) -> ValueRef {
let path = struct_llfields_path(discrfield);
let llptrptr = bcx.gepi(scrutinee, &path);
let llptr = bcx.load(llptrptr, alignment.to_align());
let cmp = if nndiscr == 0 { IntEQ } else { IntNE };
bcx.icmp(cmp, llptr, C_null(val_ty(llptr)))
}
/// Helper for cases where the discriminant is simply loaded.
fn load_discr(bcx: &Builder, ity: layout::Integer, ptr: ValueRef,
alignment: Alignment, min: u64, max: u64,
range_assert: bool)
-> ValueRef {
let llty = Type::from_integer(bcx.ccx, ity);
assert_eq!(val_ty(ptr), llty.ptr_to());
let bits = ity.size().bits();
assert!(bits <= 64);
let bits = bits as usize;
let mask = !0u64 >> (64 - bits);
// For a (max) discr of -1, max will be `-1 as usize`, which overflows.
// However, that is fine here (it would still represent the full range),
if max.wrapping_add(1) & mask == min & mask || !range_assert {
// i.e., if the range is everything. The lo==hi case would be
// rejected by the LLVM verifier (it would mean either an
// empty set, which is impossible, or the entire range of the
// type, which is pointless).
bcx.load(ptr, alignment.to_align())
} else {
// llvm::ConstantRange can deal with ranges that wrap around,
// so an overflow on (max + 1) is fine.
bcx.load_range_assert(ptr, min, max.wrapping_add(1), /* signed: */ True,
alignment.to_align())
}
}
/// Set the discriminant for a new value of the given case of the given
/// representation.
pub fn trans_set_discr<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, val: ValueRef, to: u64) {
let l = bcx.ccx.layout_of(t);
match *l {
layout::CEnum{ discr, min, max, .. } => {
assert_discr_in_range(min, max, to);
bcx.store(C_int(Type::from_integer(bcx.ccx, discr), to as i64),
val, None);
}
layout::General{ discr, .. } => {
bcx.store(C_int(Type::from_integer(bcx.ccx, discr), to as i64),
bcx.struct_gep(val, 0), None);
}
layout::Univariant { .. }
| layout::UntaggedUnion { .. }
| layout::Vector { .. } => {
assert_eq!(to, 0);
}
layout::RawNullablePointer { nndiscr, .. } => {
if to != nndiscr {
let llptrty = val_ty(val).element_type();
bcx.store(C_null(llptrty), val, None);
}
}
layout::StructWrappedNullablePointer { nndiscr, ref discrfield, ref nonnull, .. } => {
if to != nndiscr {
if target_sets_discr_via_memset(bcx) {
// Issue #34427: As workaround for LLVM bug on
// ARM, use memset of 0 on whole struct rather
// than storing null to single target field.
let llptr = bcx.pointercast(val, Type::i8(bcx.ccx).ptr_to());
let fill_byte = C_u8(bcx.ccx, 0);
let size = C_usize(bcx.ccx, nonnull.stride().bytes());
let align = C_i32(bcx.ccx, nonnull.align.abi() as i32);
base::call_memset(bcx, llptr, fill_byte, size, align, false);
} else {
let path = struct_llfields_path(discrfield);
let llptrptr = bcx.gepi(val, &path);
let llptrty = val_ty(llptrptr).element_type();
bcx.store(C_null(llptrty), llptrptr, None);
}
}
}
_ => bug!("Cannot handle {} represented as {:#?}", t, l)
}
}
fn target_sets_discr_via_memset<'a, 'tcx>(bcx: &Builder<'a, 'tcx>) -> bool {
bcx.sess().target.target.arch == "arm" || bcx.sess().target.target.arch == "aarch64"
}
pub fn assert_discr_in_range<D: PartialOrd>(min: D, max: D, discr: D) {
if min <= max {
assert!(min <= discr && discr <= max)
@ -453,45 +314,3 @@ pub fn assert_discr_in_range<D: PartialOrd>(min: D, max: D, discr: D) {
// FIXME this utility routine should be somewhere more general
#[inline]
fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a }
/// Extract a field of a constant value, as appropriate for its
/// representation.
///
/// (Not to be confused with `common::const_get_elt`, which operates on
/// raw LLVM-level structs and arrays.)
pub fn const_get_field<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>,
val: ValueRef,
ix: usize) -> ValueRef {
let l = ccx.layout_of(t);
match *l {
layout::CEnum { .. } => bug!("element access in C-like enum const"),
layout::Univariant { ref variant, .. } => {
const_struct_field(val, variant.memory_index[ix] as usize)
}
layout::Vector { .. } => const_struct_field(val, ix),
layout::UntaggedUnion { .. } => const_struct_field(val, 0),
_ => bug!("{} does not have fields.", t)
}
}
/// Extract field of struct-like const, skipping our alignment padding.
fn const_struct_field(val: ValueRef, ix: usize) -> ValueRef {
// Get the ix-th non-undef element of the struct.
let mut real_ix = 0; // actual position in the struct
let mut ix = ix; // logical index relative to real_ix
let mut field;
loop {
loop {
field = const_get_elt(val, &[real_ix]);
if !is_undef(field) {
break;
}
real_ix = real_ix + 1;
}
if ix == 0 {
return field;
}
ix = ix - 1;
real_ix = real_ix + 1;
}
}

View File

@ -15,7 +15,6 @@ use libc;
use llvm;
use llvm::{ValueRef};
use abi::{Abi, FnType};
use adt;
use mir::lvalue::{LvalueRef, Alignment};
use base::*;
use common::*;
@ -379,10 +378,10 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
"discriminant_value" => {
let val_ty = substs.type_at(0);
let adt_val = LvalueRef::new_sized_ty(llargs[0], val_ty, Alignment::AbiAligned);
match val_ty.sty {
ty::TyAdt(adt, ..) if adt.is_enum() => {
adt::trans_get_discr(bcx, val_ty, llargs[0], Alignment::AbiAligned,
Some(llret_ty), true)
adt_val.trans_get_discr(bcx, ret_ty)
}
_ => C_null(llret_ty)
}

View File

@ -462,8 +462,32 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
}
}
mir::ProjectionElem::Field(ref field, _) => {
let llprojected = adt::const_get_field(self.ccx, tr_base.ty, base.llval,
field.index());
// Extract field of struct-like const, skipping our alignment padding.
let mut ix = field.index();
let layout = self.ccx.layout_of(tr_base.ty);
if let layout::Univariant { ref variant, .. } = *layout {
ix = variant.memory_index[ix] as usize;
}
// Get the ix-th non-undef element of the struct.
let mut real_ix = 0; // actual position in the struct
let mut ix = ix; // logical index relative to real_ix
let mut llprojected;
loop {
loop {
llprojected = const_get_elt(base.llval, &[real_ix]);
if !is_undef(llprojected) {
break;
}
real_ix = real_ix + 1;
}
if ix == 0 {
break;
}
ix = ix - 1;
real_ix = real_ix + 1;
}
let llextra = if !has_metadata {
ptr::null_mut()
} else {

View File

@ -8,15 +8,16 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use llvm::ValueRef;
use llvm::{self, ValueRef};
use rustc::ty::{self, Ty, TypeFoldable};
use rustc::ty::layout::{self, LayoutTyper};
use rustc::mir;
use rustc::mir::tcx::LvalueTy;
use rustc_data_structures::indexed_vec::Idx;
use adt;
use base;
use builder::Builder;
use common::{self, CrateContext, C_usize};
use common::{self, CrateContext, C_usize, C_u8, C_i32, C_int, C_null, val_ty};
use consts;
use machine;
use type_of;
@ -70,6 +71,10 @@ impl Alignment {
}
}
fn target_sets_discr_via_memset<'a, 'tcx>(bcx: &Builder<'a, 'tcx>) -> bool {
bcx.sess().target.target.arch == "arm" || bcx.sess().target.target.arch == "aarch64"
}
#[derive(Copy, Clone, Debug)]
pub struct LvalueRef<'tcx> {
/// Pointer to the contents of the lvalue
@ -121,23 +126,56 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
!self.llextra.is_null()
}
fn struct_field_ptr(
self,
bcx: &Builder<'a, 'tcx>,
st: &layout::Struct,
fields: &Vec<Ty<'tcx>>,
ix: usize,
needs_cast: bool
) -> (ValueRef, Alignment) {
let fty = fields[ix];
/// Access a field, at a point when the value's case is known.
pub fn trans_field_ptr(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> (ValueRef, Alignment) {
let ccx = bcx.ccx;
let mut l = ccx.layout_of(self.ty.to_ty(bcx.tcx()));
match self.ty {
LvalueTy::Ty { .. } => {}
LvalueTy::Downcast { variant_index, .. } => {
l = l.for_variant(variant_index)
}
}
let fty = l.field(ccx, ix).ty;
let mut ix = ix;
let st = match *l {
layout::Vector { .. } => {
return (bcx.struct_gep(self.llval, ix), self.alignment);
}
layout::UntaggedUnion { ref variants } => {
let ty = type_of::in_memory_type_of(ccx, fty);
return (bcx.pointercast(self.llval, ty.ptr_to()),
self.alignment | Alignment::from_packed(variants.packed));
}
layout::RawNullablePointer { nndiscr, .. } |
layout::StructWrappedNullablePointer { nndiscr, .. }
if l.variant_index.unwrap() as u64 != nndiscr => {
// The unit-like case might have a nonzero number of unit-like fields.
// (e.d., Result of Either with (), as one side.)
let ty = type_of::type_of(ccx, fty);
assert_eq!(machine::llsize_of_alloc(ccx, ty), 0);
return (bcx.pointercast(self.llval, ty.ptr_to()), Alignment::Packed);
}
layout::RawNullablePointer { .. } => {
let ty = type_of::type_of(ccx, fty);
return (bcx.pointercast(self.llval, ty.ptr_to()), self.alignment);
}
layout::Univariant { ref variant, .. } => variant,
layout::StructWrappedNullablePointer { ref nonnull, .. } => nonnull,
layout::General { ref variants, .. } => {
ix += 1;
&variants[l.variant_index.unwrap()]
}
_ => bug!("element access in type without elements: {} represented as {:#?}", l.ty, l)
};
let alignment = self.alignment | Alignment::from_packed(st.packed);
let llfields = adt::struct_llfields(ccx, fields, st);
let ptr_val = if needs_cast {
let real_ty = Type::struct_(ccx, &llfields[..], st.packed);
bcx.pointercast(self.llval, real_ty.ptr_to())
let ptr_val = if let layout::General { discr, .. } = *l {
let variant_ty = Type::struct_(ccx,
&adt::struct_llfields(ccx, l.ty, l.variant_index.unwrap(), st,
Some(discr.to_ty(&bcx.tcx(), false))), st.packed);
bcx.pointercast(self.llval, variant_ty.ptr_to())
} else {
self.llval
};
@ -147,7 +185,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
// * Packed struct - There is no alignment padding
// * Field is sized - pointer is properly aligned already
if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed ||
bcx.ccx.shared().type_is_sized(fty)
ccx.shared().type_is_sized(fty)
{
return (bcx.struct_gep(
ptr_val, adt::struct_llfields_index(st, ix)), alignment);
@ -189,7 +227,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
let offset = st.offsets[ix].bytes();
let unaligned_offset = C_usize(bcx.ccx, offset);
let unaligned_offset = C_usize(ccx, offset);
// Get the alignment of the field
let (_, align) = glue::size_and_align_of_dst(bcx, fty, meta);
@ -200,77 +238,130 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
// (unaligned offset + (align - 1)) & -align
// Calculate offset
let align_sub_1 = bcx.sub(align, C_usize(bcx.ccx, 1));
let align_sub_1 = bcx.sub(align, C_usize(ccx, 1u64));
let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1),
bcx.neg(align));
debug!("struct_field_ptr: DST field offset: {:?}", Value(offset));
// Cast and adjust pointer
let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(bcx.ccx));
let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(ccx));
let byte_ptr = bcx.gep(byte_ptr, &[offset]);
// Finally, cast back to the type expected
let ll_fty = type_of::in_memory_type_of(bcx.ccx, fty);
let ll_fty = type_of::in_memory_type_of(ccx, fty);
debug!("struct_field_ptr: Field type is {:?}", ll_fty);
(bcx.pointercast(byte_ptr, ll_fty.ptr_to()), alignment)
}
/// Access a field, at a point when the value's case is known.
pub fn trans_field_ptr(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> (ValueRef, Alignment) {
let discr = match self.ty {
LvalueTy::Ty { .. } => 0,
LvalueTy::Downcast { variant_index, .. } => variant_index,
// Double index to account for padding (FieldPath already uses `Struct::memory_index`)
fn gepi_struct_llfields_path(self, bcx: &Builder, discrfield: &layout::FieldPath) -> ValueRef {
let path = discrfield.iter().map(|&i| (i as usize) << 1).collect::<Vec<_>>();
bcx.gepi(self.llval, &path)
}
/// Helper for cases where the discriminant is simply loaded.
fn load_discr(self, bcx: &Builder, ity: layout::Integer, ptr: ValueRef,
min: u64, max: u64) -> ValueRef {
let llty = Type::from_integer(bcx.ccx, ity);
assert_eq!(val_ty(ptr), llty.ptr_to());
let bits = ity.size().bits();
assert!(bits <= 64);
let bits = bits as usize;
let mask = !0u64 >> (64 - bits);
// For a (max) discr of -1, max will be `-1 as usize`, which overflows.
// However, that is fine here (it would still represent the full range),
if max.wrapping_add(1) & mask == min & mask {
// i.e., if the range is everything. The lo==hi case would be
// rejected by the LLVM verifier (it would mean either an
// empty set, which is impossible, or the entire range of the
// type, which is pointless).
bcx.load(ptr, self.alignment.to_align())
} else {
// llvm::ConstantRange can deal with ranges that wrap around,
// so an overflow on (max + 1) is fine.
bcx.load_range_assert(ptr, min, max.wrapping_add(1), /* signed: */ llvm::True,
self.alignment.to_align())
}
}
/// Obtain the actual discriminant of a value.
pub fn trans_get_discr(self, bcx: &Builder<'a, 'tcx>, cast_to: Ty<'tcx>) -> ValueRef {
let l = bcx.ccx.layout_of(self.ty.to_ty(bcx.tcx()));
let val = match *l {
layout::CEnum { discr, min, max, .. } => {
self.load_discr(bcx, discr, self.llval, min, max)
}
layout::General { discr, ref variants, .. } => {
let ptr = bcx.struct_gep(self.llval, 0);
self.load_discr(bcx, discr, ptr, 0, variants.len() as u64 - 1)
}
layout::Univariant { .. } | layout::UntaggedUnion { .. } => C_u8(bcx.ccx, 0),
layout::RawNullablePointer { nndiscr, .. } => {
let cmp = if nndiscr == 0 { llvm::IntEQ } else { llvm::IntNE };
let discr = bcx.load(self.llval, self.alignment.to_align());
bcx.icmp(cmp, discr, C_null(val_ty(discr)))
}
layout::StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
let llptrptr = self.gepi_struct_llfields_path(bcx, discrfield);
let llptr = bcx.load(llptrptr, self.alignment.to_align());
let cmp = if nndiscr == 0 { llvm::IntEQ } else { llvm::IntNE };
bcx.icmp(cmp, llptr, C_null(val_ty(llptr)))
},
_ => bug!("{} is not an enum", l.ty)
};
let t = self.ty.to_ty(bcx.tcx());
let l = bcx.ccx.layout_of(t);
// Note: if this ever needs to generate conditionals (e.g., if we
// decide to do some kind of cdr-coding-like non-unique repr
// someday), it will need to return a possibly-new bcx as well.
let cast_to = type_of::immediate_type_of(bcx.ccx, cast_to);
bcx.intcast(val, cast_to, adt::is_discr_signed(&l))
}
/// Set the discriminant for a new value of the given case of the given
/// representation.
pub fn trans_set_discr(&self, bcx: &Builder<'a, 'tcx>, variant_index: usize) {
let l = bcx.ccx.layout_of(self.ty.to_ty(bcx.tcx()));
let to = l.ty.ty_adt_def().unwrap()
.discriminant_for_variant(bcx.tcx(), variant_index)
.to_u128_unchecked() as u64;
match *l {
layout::Univariant { ref variant, .. } => {
assert_eq!(discr, 0);
self.struct_field_ptr(bcx, &variant,
&adt::compute_fields(bcx.ccx, t, 0, false), ix, false)
layout::CEnum { discr, min, max, .. } => {
adt::assert_discr_in_range(min, max, to);
bcx.store(C_int(Type::from_integer(bcx.ccx, discr), to as i64),
self.llval, self.alignment.to_align());
}
layout::Vector { count, .. } => {
assert_eq!(discr, 0);
assert!((ix as u64) < count);
(bcx.struct_gep(self.llval, ix), self.alignment)
layout::General { discr, .. } => {
bcx.store(C_int(Type::from_integer(bcx.ccx, discr), to as i64),
bcx.struct_gep(self.llval, 0), self.alignment.to_align());
}
layout::General { discr: d, ref variants, .. } => {
let mut fields = adt::compute_fields(bcx.ccx, t, discr, false);
fields.insert(0, d.to_ty(&bcx.tcx(), false));
self.struct_field_ptr(bcx, &variants[discr], &fields, ix + 1, true)
}
layout::UntaggedUnion { ref variants } => {
let fields = adt::compute_fields(bcx.ccx, t, 0, false);
let ty = type_of::in_memory_type_of(bcx.ccx, fields[ix]);
(bcx.pointercast(self.llval, ty.ptr_to()),
self.alignment | Alignment::from_packed(variants.packed))
}
layout::RawNullablePointer { nndiscr, .. } |
layout::StructWrappedNullablePointer { nndiscr, .. } if discr as u64 != nndiscr => {
let nullfields = adt::compute_fields(bcx.ccx, t, (1-nndiscr) as usize, false);
// The unit-like case might have a nonzero number of unit-like fields.
// (e.d., Result of Either with (), as one side.)
let ty = type_of::type_of(bcx.ccx, nullfields[ix]);
assert_eq!(machine::llsize_of_alloc(bcx.ccx, ty), 0);
(bcx.pointercast(self.llval, ty.ptr_to()), Alignment::Packed)
layout::Univariant { .. }
| layout::UntaggedUnion { .. }
| layout::Vector { .. } => {
assert_eq!(to, 0);
}
layout::RawNullablePointer { nndiscr, .. } => {
let nnty = adt::compute_fields(bcx.ccx, t, nndiscr as usize, false)[0];
assert_eq!(ix, 0);
assert_eq!(discr as u64, nndiscr);
let ty = type_of::type_of(bcx.ccx, nnty);
(bcx.pointercast(self.llval, ty.ptr_to()), self.alignment)
if to != nndiscr {
let llptrty = val_ty(self.llval).element_type();
bcx.store(C_null(llptrty), self.llval, self.alignment.to_align());
}
}
layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => {
assert_eq!(discr as u64, nndiscr);
self.struct_field_ptr(bcx, &nonnull,
&adt::compute_fields(bcx.ccx, t, discr, false), ix, false)
layout::StructWrappedNullablePointer { nndiscr, ref discrfield, ref nonnull, .. } => {
if to != nndiscr {
if target_sets_discr_via_memset(bcx) {
// Issue #34427: As workaround for LLVM bug on
// ARM, use memset of 0 on whole struct rather
// than storing null to single target field.
let llptr = bcx.pointercast(self.llval, Type::i8(bcx.ccx).ptr_to());
let fill_byte = C_u8(bcx.ccx, 0);
let size = C_usize(bcx.ccx, nonnull.stride().bytes());
let align = C_i32(bcx.ccx, nonnull.align.abi() as i32);
base::call_memset(bcx, llptr, fill_byte, size, align, false);
} else {
let llptrptr = self.gepi_struct_llfields_path(bcx, discrfield);
let llptrty = val_ty(llptrptr).element_type();
bcx.store(C_null(llptrty), llptrptr, self.alignment.to_align());
}
}
}
_ => bug!("element access in type without elements: {} represented as {:#?}", t, l)
_ => bug!("Cannot handle {} represented as {:#?}", l.ty, l)
}
}

View File

@ -139,10 +139,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
mir::Rvalue::Aggregate(ref kind, ref operands) => {
match **kind {
mir::AggregateKind::Adt(adt_def, variant_index, substs, active_field_index) => {
let discr = adt_def.discriminant_for_variant(bcx.tcx(), variant_index)
.to_u128_unchecked() as u64;
let dest_ty = dest.ty.to_ty(bcx.tcx());
adt::trans_set_discr(&bcx, dest_ty, dest.llval, discr);
dest.trans_set_discr(&bcx, variant_index);
for (i, operand) in operands.iter().enumerate() {
let op = self.trans_operand(&bcx, operand);
// Do not generate stores and GEPis for zero-sized fields.
@ -451,12 +448,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}
mir::Rvalue::Discriminant(ref lvalue) => {
let discr_lvalue = self.trans_lvalue(&bcx, lvalue);
let enum_ty = discr_lvalue.ty.to_ty(bcx.tcx());
let discr_ty = rvalue.ty(&*self.mir, bcx.tcx());
let discr_type = type_of::immediate_type_of(bcx.ccx, discr_ty);
let discr = adt::trans_get_discr(&bcx, enum_ty, discr_lvalue.llval,
discr_lvalue.alignment, Some(discr_type), true);
let discr = self.trans_lvalue(&bcx, lvalue)
.trans_get_discr(&bcx, discr_ty);
(bcx, OperandRef {
val: OperandValue::Immediate(discr),
ty: discr_ty

View File

@ -17,7 +17,6 @@ use builder::Builder;
use super::MirContext;
use super::LocalRef;
use super::super::adt;
impl<'a, 'tcx> MirContext<'a, 'tcx> {
pub fn trans_statement(&mut self,
@ -59,12 +58,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}
}
mir::StatementKind::SetDiscriminant{ref lvalue, variant_index} => {
let ty = self.monomorphized_lvalue_ty(lvalue);
let lvalue_transed = self.trans_lvalue(&bcx, lvalue);
adt::trans_set_discr(&bcx,
ty,
lvalue_transed.llval,
variant_index as u64);
self.trans_lvalue(&bcx, lvalue)
.trans_set_discr(&bcx, variant_index);
bcx
}
mir::StatementKind::StorageLive(local) => {