translate drop glue using MIR

Drop of arrays is now translated in trans::block in an ugly way that I
should clean up in a later PR, and does not handle panics in the middle
of an array drop, but this commit & PR are growing too big.
This commit is contained in:
Ariel Ben-Yehuda 2017-03-14 01:08:21 +02:00
parent 26df816f52
commit f2c7917402
53 changed files with 719 additions and 1139 deletions

View File

@ -46,8 +46,13 @@
issue = "0")]
#![allow(missing_docs)]
extern "rust-intrinsic" {
#[cfg(not(stage0))]
#[stable(feature = "drop_in_place", since = "1.8.0")]
#[rustc_deprecated(reason = "no longer an intrinsic - use `ptr::drop_in_place` directly",
since = "1.18.0")]
pub use ptr::drop_in_place;
extern "rust-intrinsic" {
// NB: These intrinsics take raw pointers because they mutate aliased
// memory, which is not valid for either `&` or `&mut`.
@ -622,6 +627,7 @@ extern "rust-intrinsic" {
pub fn size_of_val<T: ?Sized>(_: &T) -> usize;
pub fn min_align_of_val<T: ?Sized>(_: &T) -> usize;
#[cfg(stage0)]
/// Executes the destructor (if any) of the pointed-to value.
///
/// This has two use cases:

View File

@ -37,9 +37,38 @@ pub use intrinsics::copy;
#[stable(feature = "rust1", since = "1.0.0")]
pub use intrinsics::write_bytes;
#[cfg(stage0)]
#[stable(feature = "drop_in_place", since = "1.8.0")]
pub use intrinsics::drop_in_place;
#[cfg(not(stage0))]
/// Executes the destructor (if any) of the pointed-to value.
///
/// This has two use cases:
///
/// * It is *required* to use `drop_in_place` to drop unsized types like
/// trait objects, because they can't be read out onto the stack and
/// dropped normally.
///
/// * It is friendlier to the optimizer to do this over `ptr::read` when
/// dropping manually allocated memory (e.g. when writing Box/Rc/Vec),
/// as the compiler doesn't need to prove that it's sound to elide the
/// copy.
///
/// # Undefined Behavior
///
/// This has all the same safety problems as `ptr::read` with respect to
/// invalid pointers, types, and double drops.
#[stable(feature = "drop_in_place", since = "1.8.0")]
#[lang="drop_in_place"]
#[inline]
#[allow(unconditional_recursion)]
pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
// Code here does not matter - this is replaced by the
// real drop glue by the compiler.
drop_in_place(to_drop);
}
/// Creates a null raw pointer.
///
/// # Examples

View File

@ -335,7 +335,7 @@ language_item_table! {
ExchangeMallocFnLangItem, "exchange_malloc", exchange_malloc_fn;
BoxFreeFnLangItem, "box_free", box_free_fn;
StrDupUniqFnLangItem, "strdup_uniq", strdup_uniq_fn;
DropInPlaceFnLangItem, "drop_in_place", drop_in_place_fn;
StartFnLangItem, "start", start_fn;
@ -355,8 +355,6 @@ language_item_table! {
ContravariantLifetimeItem, "contravariant_lifetime", contravariant_lifetime;
InvariantLifetimeItem, "invariant_lifetime", invariant_lifetime;
NoCopyItem, "no_copy_bound", no_copy_bound;
NonZeroItem, "non_zero", non_zero;
DebugTraitLangItem, "debug_trait", debug_trait;

View File

@ -17,7 +17,7 @@ use rustc_data_structures::control_flow_graph::{GraphPredecessors, GraphSuccesso
use rustc_data_structures::control_flow_graph::ControlFlowGraph;
use hir::def::CtorKind;
use hir::def_id::DefId;
use ty::subst::Substs;
use ty::subst::{Subst, Substs};
use ty::{self, AdtDef, ClosureSubsts, Region, Ty};
use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
use util::ppaux;
@ -982,6 +982,22 @@ impl<'tcx> Debug for Operand<'tcx> {
}
}
impl<'tcx> Operand<'tcx> {
pub fn item<'a>(tcx: ty::TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
substs: &'tcx Substs<'tcx>,
span: Span)
-> Self
{
Operand::Constant(Constant {
span: span,
ty: tcx.item_type(def_id).subst(tcx, substs),
literal: Literal::Item { def_id, substs }
})
}
}
///////////////////////////////////////////////////////////////////////////
/// Rvalues

View File

@ -35,6 +35,8 @@ pub enum InstanceDef<'tcx> {
Virtual(DefId, usize),
// <[mut closure] as FnOnce>::call_once
ClosureOnceShim { call_once: DefId },
// drop_in_place::<T>; None for empty drop glue.
DropGlue(DefId, Option<Ty<'tcx>>),
}
impl<'tcx> InstanceDef<'tcx> {
@ -46,7 +48,8 @@ impl<'tcx> InstanceDef<'tcx> {
InstanceDef::Virtual(def_id, _) |
InstanceDef::Intrinsic(def_id, ) |
InstanceDef::ClosureOnceShim { call_once: def_id }
=> def_id
=> def_id,
InstanceDef::DropGlue(def_id, _) => def_id
}
}
@ -65,6 +68,7 @@ impl<'tcx> InstanceDef<'tcx> {
// real on-demand.
let ty = match self {
&InstanceDef::FnPtrShim(_, ty) => Some(ty),
&InstanceDef::DropGlue(_, ty) => ty,
_ => None
}.into_iter();
@ -97,6 +101,9 @@ impl<'tcx> fmt::Display for Instance<'tcx> {
InstanceDef::ClosureOnceShim { .. } => {
write!(f, " - shim")
}
InstanceDef::DropGlue(_, ty) => {
write!(f, " - shim({:?})", ty)
}
}
}
}

View File

@ -437,7 +437,7 @@ impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> {
}
Rvalue::Ref(..) |
Rvalue::Discriminant(..) |
Rvalue::Len(..) => {}
Rvalue::Len(..) |
Rvalue::Box(..) => {
// This returns an rvalue with uninitialized contents. We can't
// move out of it here because it is an rvalue - assignments always

View File

@ -15,7 +15,7 @@ use rustc::middle::region::ROOT_CODE_EXTENT;
use rustc::mir::*;
use rustc::mir::transform::MirSource;
use rustc::ty::{self, Ty};
use rustc::ty::subst::Subst;
use rustc::ty::subst::{Kind, Subst};
use rustc::ty::maps::Providers;
use rustc_data_structures::indexed_vec::{IndexVec, Idx};
@ -25,10 +25,13 @@ use syntax::ast;
use syntax_pos::Span;
use std::cell::RefCell;
use std::fmt;
use std::iter;
use std::mem;
use transform::{add_call_guards, no_landing_pads, simplify};
use util::elaborate_drops::{self, DropElaborator, DropStyle, DropFlagMode};
use util::patch::MirPatch;
pub fn provide(providers: &mut Providers) {
providers.mir_shims = make_shim;
@ -101,6 +104,9 @@ fn make_shim<'a, 'tcx>(tcx: ty::TyCtxt<'a, 'tcx, 'tcx>,
None
)
}
ty::InstanceDef::DropGlue(def_id, ty) => {
build_drop_shim(tcx, &param_env, def_id, ty)
}
ty::InstanceDef::Intrinsic(_) => {
bug!("creating shims from intrinsics ({:?}) is unsupported", instance)
}
@ -143,6 +149,129 @@ fn local_decls_for_sig<'tcx>(sig: &ty::FnSig<'tcx>)
.collect()
}
fn build_drop_shim<'a, 'tcx>(tcx: ty::TyCtxt<'a, 'tcx, 'tcx>,
param_env: &ty::ParameterEnvironment<'tcx>,
def_id: DefId,
ty: Option<Ty<'tcx>>)
-> Mir<'tcx>
{
debug!("build_drop_shim(def_id={:?}, ty={:?})", def_id, ty);
let substs = if let Some(ty) = ty {
tcx.mk_substs(iter::once(Kind::from(ty)))
} else {
param_env.free_substs
};
let fn_ty = tcx.item_type(def_id).subst(tcx, substs);
let sig = tcx.erase_late_bound_regions(&fn_ty.fn_sig());
let span = tcx.def_span(def_id);
let source_info = SourceInfo { span, scope: ARGUMENT_VISIBILITY_SCOPE };
let return_block = BasicBlock::new(1);
let mut blocks = IndexVec::new();
let block = |blocks: &mut IndexVec<_, _>, kind| {
blocks.push(BasicBlockData {
statements: vec![],
terminator: Some(Terminator { source_info, kind }),
is_cleanup: false
})
};
block(&mut blocks, TerminatorKind::Goto { target: return_block });
block(&mut blocks, TerminatorKind::Return);
let mut mir = Mir::new(
blocks,
IndexVec::from_elem_n(
VisibilityScopeData { span: span, parent_scope: None }, 1
),
IndexVec::new(),
sig.output(),
local_decls_for_sig(&sig),
sig.inputs().len(),
vec![],
span
);
if let Some(..) = ty {
let patch = {
let mut elaborator = DropShimElaborator {
mir: &mir,
patch: MirPatch::new(&mir),
tcx, param_env
};
let dropee = Lvalue::Projection(
box Projection {
base: Lvalue::Local(Local::new(1+0)),
elem: ProjectionElem::Deref
}
);
let resume_block = elaborator.patch.resume_block();
elaborate_drops::elaborate_drop(
&mut elaborator,
source_info,
false,
&dropee,
(),
return_block,
Some(resume_block),
START_BLOCK
);
elaborator.patch
};
patch.apply(&mut mir);
}
mir
}
pub struct DropShimElaborator<'a, 'tcx: 'a> {
mir: &'a Mir<'tcx>,
patch: MirPatch<'tcx>,
tcx: ty::TyCtxt<'a, 'tcx, 'tcx>,
param_env: &'a ty::ParameterEnvironment<'tcx>,
}
impl<'a, 'tcx> fmt::Debug for DropShimElaborator<'a, 'tcx> {
fn fmt(&self, _f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
Ok(())
}
}
impl<'a, 'tcx> DropElaborator<'a, 'tcx> for DropShimElaborator<'a, 'tcx> {
type Path = ();
fn patch(&mut self) -> &mut MirPatch<'tcx> { &mut self.patch }
fn mir(&self) -> &'a Mir<'tcx> { self.mir }
fn tcx(&self) -> ty::TyCtxt<'a, 'tcx, 'tcx> { self.tcx }
fn param_env(&self) -> &'a ty::ParameterEnvironment<'tcx> { self.param_env }
fn drop_style(&self, _path: Self::Path, mode: DropFlagMode) -> DropStyle {
if let DropFlagMode::Shallow = mode {
DropStyle::Static
} else {
DropStyle::Open
}
}
fn get_drop_flag(&mut self, _path: Self::Path) -> Option<Operand<'tcx>> {
None
}
fn clear_drop_flag(&mut self, _location: Location, _path: Self::Path, _mode: DropFlagMode) {
}
fn field_subpath(&self, _path: Self::Path, _field: Field) -> Option<Self::Path> {
None
}
fn deref_subpath(&self, _path: Self::Path) -> Option<Self::Path> {
None
}
fn downcast_subpath(&self, _path: Self::Path, _variant: usize) -> Option<Self::Path> {
Some(())
}
}
/// Build a "call" shim for `def_id`. The shim calls the
/// function specified by `call_kind`, first adjusting its first
/// argument according to `rcvr_adjustment`.
@ -162,7 +291,6 @@ fn build_call_shim<'a, 'tcx>(tcx: ty::TyCtxt<'a, 'tcx, 'tcx>,
def_id, rcvr_adjustment, call_kind, untuple_args);
let fn_ty = tcx.item_type(def_id).subst(tcx, param_env.free_substs);
// Not normalizing here without a param env.
let sig = tcx.erase_late_bound_regions(&fn_ty.fn_sig());
let span = tcx.def_span(def_id);

View File

@ -9,10 +9,12 @@
// except according to those terms.
use std::fmt;
use rustc::hir;
use rustc::mir::*;
use rustc::middle::const_val::ConstInt;
use rustc::middle::lang_items;
use rustc::ty::{self, Ty};
use rustc::ty::subst::{Kind, Subst, Substs};
use rustc::ty::subst::{Kind, Substs};
use rustc::ty::util::IntTypeExt;
use rustc_data_structures::indexed_vec::Idx;
use util::patch::MirPatch;
@ -92,6 +94,7 @@ pub fn elaborate_drop<'b, 'tcx, D>(
bb: BasicBlock)
where D: DropElaborator<'b, 'tcx>
{
assert_eq!(unwind.is_none(), is_cleanup);
DropCtxt {
elaborator, source_info, is_cleanup, lvalue, path, succ, unwind
}.elaborate_drop(bb)
@ -146,7 +149,10 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
});
}
DropStyle::Conditional => {
let drop_bb = self.complete_drop(Some(DropFlagMode::Deep));
let is_cleanup = self.is_cleanup; // FIXME(#6393)
let succ = self.succ;
let drop_bb = self.complete_drop(
is_cleanup, Some(DropFlagMode::Deep), succ);
self.elaborator.patch().patch_terminator(bb, TerminatorKind::Goto {
target: drop_bb
});
@ -208,7 +214,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
// Using `self.path` here to condition the drop on
// our own drop flag.
path: self.path
}.complete_drop(None)
}.complete_drop(is_cleanup, None, succ)
}
}
@ -220,7 +226,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
/// (the first field is never reached). If it is `None`, all
/// unwind targets are left blank.
fn drop_halfladder<'a>(&mut self,
unwind_ladder: Option<Vec<BasicBlock>>,
unwind_ladder: Option<&[BasicBlock]>,
succ: BasicBlock,
fields: &[(Lvalue<'tcx>, Option<D::Path>)],
is_cleanup: bool)
@ -262,10 +268,10 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
/// .c1:
/// ELAB(drop location.1 [target=.c2])
/// .c2:
/// ELAB(drop location.2 [target=`self.unwind])
/// ELAB(drop location.2 [target=`self.unwind`])
fn drop_ladder<'a>(&mut self,
fields: Vec<(Lvalue<'tcx>, Option<D::Path>)>)
-> BasicBlock
-> (BasicBlock, Option<BasicBlock>)
{
debug!("drop_ladder({:?}, {:?})", self, fields);
@ -286,8 +292,12 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
let succ = self.succ; // FIXME(#6393)
let is_cleanup = self.is_cleanup;
self.drop_halfladder(unwind_ladder, succ, &fields, is_cleanup)
.last().cloned().unwrap_or(succ)
let normal_ladder =
self.drop_halfladder(unwind_ladder.as_ref().map(|x| &**x),
succ, &fields, is_cleanup);
(normal_ladder.last().cloned().unwrap_or(succ),
unwind_ladder.and_then(|l| l.last().cloned()).or(self.unwind))
}
fn open_drop_for_tuple<'a>(&mut self, tys: &[Ty<'tcx>])
@ -300,7 +310,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
self.elaborator.field_subpath(self.path, Field::new(i)))
}).collect();
self.drop_ladder(fields)
self.drop_ladder(fields).0
}
fn open_drop_for_box<'a>(&mut self, ty: Ty<'tcx>) -> BasicBlock
@ -323,7 +333,33 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
fn open_drop_for_adt<'a>(&mut self, adt: &'tcx ty::AdtDef, substs: &'tcx Substs<'tcx>)
-> BasicBlock {
debug!("open_drop_for_adt({:?}, {:?}, {:?})", self, adt, substs);
if adt.variants.len() == 0 {
return self.elaborator.patch().new_block(BasicBlockData {
statements: vec![],
terminator: Some(Terminator {
source_info: self.source_info,
kind: TerminatorKind::Unreachable
}),
is_cleanup: self.is_cleanup
});
}
let contents_drop = if adt.is_union() {
(self.succ, self.unwind)
} else {
self.open_drop_for_adt_contents(adt, substs)
};
if adt.has_dtor(self.tcx()) {
self.destructor_call_block(contents_drop)
} else {
contents_drop.0
}
}
fn open_drop_for_adt_contents<'a>(&mut self, adt: &'tcx ty::AdtDef,
substs: &'tcx Substs<'tcx>)
-> (BasicBlock, Option<BasicBlock>) {
match adt.variants.len() {
1 => {
let fields = self.move_paths_for_fields(
@ -335,9 +371,19 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
self.drop_ladder(fields)
}
_ => {
let is_cleanup = self.is_cleanup;
let succ = self.succ;
let unwind = self.unwind; // FIXME(#6393)
let mut values = Vec::with_capacity(adt.variants.len());
let mut blocks = Vec::with_capacity(adt.variants.len());
let mut normal_blocks = Vec::with_capacity(adt.variants.len());
let mut unwind_blocks = if is_cleanup {
None
} else {
Some(Vec::with_capacity(adt.variants.len()))
};
let mut otherwise = None;
let mut unwind_otherwise = None;
for (variant_index, discr) in adt.discriminants(self.tcx()).enumerate() {
let subpath = self.elaborator.downcast_subpath(
self.path, variant_index);
@ -351,53 +397,146 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
&adt.variants[variant_index],
substs);
values.push(discr);
blocks.push(self.drop_ladder(fields));
if let Some(ref mut unwind_blocks) = unwind_blocks {
// We can't use the half-ladder from the original
// drop ladder, because this breaks the
// "funclet can't have 2 successor funclets"
// requirement from MSVC:
//
// switch unwind-switch
// / \ / \
// v1.0 v2.0 v2.0-unwind v1.0-unwind
// | | / |
// v1.1-unwind v2.1-unwind |
// ^ |
// \-------------------------------/
//
// Create a duplicate half-ladder to avoid that. We
// could technically only do this on MSVC, but I
// I want to minimize the divergence between MSVC
// and non-MSVC.
let unwind = unwind.unwrap();
let halfladder = self.drop_halfladder(
None, unwind, &fields, true);
unwind_blocks.push(
halfladder.last().cloned().unwrap_or(unwind)
);
}
let (normal, _) = self.drop_ladder(fields);
normal_blocks.push(normal);
} else {
// variant not found - drop the entire enum
if let None = otherwise {
otherwise =
Some(self.complete_drop(Some(DropFlagMode::Shallow)));
otherwise = Some(self.complete_drop(
is_cleanup,
Some(DropFlagMode::Shallow),
succ));
unwind_otherwise = unwind.map(|unwind| self.complete_drop(
true,
Some(DropFlagMode::Shallow),
unwind
));
}
}
}
if let Some(block) = otherwise {
blocks.push(block);
normal_blocks.push(block);
if let Some(ref mut unwind_blocks) = unwind_blocks {
unwind_blocks.push(unwind_otherwise.unwrap());
}
} else {
values.pop();
}
// If there are multiple variants, then if something
// is present within the enum the discriminant, tracked
// by the rest path, must be initialized.
//
// Additionally, we do not want to switch on the
// discriminant after it is free-ed, because that
// way lies only trouble.
let discr_ty = adt.repr.discr_type().to_ty(self.tcx());
let discr = Lvalue::Local(self.new_temp(discr_ty));
let discr_rv = Rvalue::Discriminant(self.lvalue.clone());
let switch_block = self.elaborator.patch().new_block(BasicBlockData {
statements: vec![
Statement {
source_info: self.source_info,
kind: StatementKind::Assign(discr.clone(), discr_rv),
}
],
terminator: Some(Terminator {
source_info: self.source_info,
kind: TerminatorKind::SwitchInt {
discr: Operand::Consume(discr),
switch_ty: discr_ty,
values: From::from(values),
targets: blocks,
}
}),
is_cleanup: self.is_cleanup,
});
self.drop_flag_test_block(switch_block)
(self.adt_switch_block(is_cleanup, adt, normal_blocks, &values, succ),
unwind_blocks.map(|unwind_blocks| {
self.adt_switch_block(
is_cleanup, adt, unwind_blocks, &values, unwind.unwrap()
)
}))
}
}
}
fn adt_switch_block(&mut self,
is_cleanup: bool,
adt: &'tcx ty::AdtDef,
blocks: Vec<BasicBlock>,
values: &[ConstInt],
succ: BasicBlock)
-> BasicBlock {
// If there are multiple variants, then if something
// is present within the enum the discriminant, tracked
// by the rest path, must be initialized.
//
// Additionally, we do not want to switch on the
// discriminant after it is free-ed, because that
// way lies only trouble.
let discr_ty = adt.repr.discr_type().to_ty(self.tcx());
let discr = Lvalue::Local(self.new_temp(discr_ty));
let discr_rv = Rvalue::Discriminant(self.lvalue.clone());
let switch_block = self.elaborator.patch().new_block(BasicBlockData {
statements: vec![
Statement {
source_info: self.source_info,
kind: StatementKind::Assign(discr.clone(), discr_rv),
}
],
terminator: Some(Terminator {
source_info: self.source_info,
kind: TerminatorKind::SwitchInt {
discr: Operand::Consume(discr),
switch_ty: discr_ty,
values: From::from(values.to_owned()),
targets: blocks,
}
}),
is_cleanup: is_cleanup,
});
self.drop_flag_test_block(is_cleanup, switch_block, succ)
}
fn destructor_call_block<'a>(&mut self, (succ, unwind): (BasicBlock, Option<BasicBlock>))
-> BasicBlock
{
debug!("destructor_call_block({:?}, {:?})", self, succ);
let tcx = self.tcx();
let drop_trait = tcx.lang_items.drop_trait().unwrap();
let drop_fn = tcx.associated_items(drop_trait).next().unwrap();
let ty = self.lvalue_ty(self.lvalue);
let substs = tcx.mk_substs(iter::once(Kind::from(ty)));
let re_erased = tcx.mk_region(ty::ReErased);
let ref_ty = tcx.mk_ref(re_erased, ty::TypeAndMut {
ty: ty,
mutbl: hir::Mutability::MutMutable
});
let ref_lvalue = self.new_temp(ref_ty);
let unit_temp = Lvalue::Local(self.new_temp(tcx.mk_nil()));
self.elaborator.patch().new_block(BasicBlockData {
statements: vec![Statement {
source_info: self.source_info,
kind: StatementKind::Assign(
Lvalue::Local(ref_lvalue),
Rvalue::Ref(re_erased, BorrowKind::Mut, self.lvalue.clone())
)
}],
terminator: Some(Terminator {
kind: TerminatorKind::Call {
func: Operand::item(tcx, drop_fn.def_id, substs,
self.source_info.span),
args: vec![Operand::Consume(Lvalue::Local(ref_lvalue))],
destination: Some((unit_temp, succ)),
cleanup: unwind,
},
source_info: self.source_info
}),
is_cleanup: self.is_cleanup,
})
}
/// The slow-path - create an "open", elaborated drop for a type
/// which is moved-out-of only partially, and patch `bb` to a jump
/// to it. This must not be called on ADTs with a destructor,
@ -408,6 +547,8 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
/// ADT, both in the success case or if one of the destructors fail.
fn open_drop<'a>(&mut self) -> BasicBlock {
let ty = self.lvalue_ty(self.lvalue);
let is_cleanup = self.is_cleanup; // FIXME(#6393)
let succ = self.succ;
match ty.sty {
ty::TyClosure(def_id, substs) => {
let tys : Vec<_> = substs.upvar_tys(def_id, self.tcx()).collect();
@ -422,6 +563,14 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
ty::TyAdt(def, substs) => {
self.open_drop_for_adt(def, substs)
}
ty::TyDynamic(..) => {
self.complete_drop(is_cleanup, Some(DropFlagMode::Deep), succ)
}
ty::TyArray(..) | ty::TySlice(..) => {
// FIXME(#34708): handle partially-dropped
// array/slice elements.
self.complete_drop(is_cleanup, Some(DropFlagMode::Deep), succ)
}
_ => bug!("open drop from non-ADT `{:?}`", ty)
}
}
@ -433,22 +582,27 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
/// if FLAG(self.path)
/// if let Some(mode) = mode: FLAG(self.path)[mode] = false
/// drop(self.lv)
fn complete_drop<'a>(&mut self, drop_mode: Option<DropFlagMode>) -> BasicBlock
fn complete_drop<'a>(&mut self,
is_cleanup: bool,
drop_mode: Option<DropFlagMode>,
succ: BasicBlock) -> BasicBlock
{
debug!("complete_drop({:?},{:?})", self, drop_mode);
let drop_block = self.drop_block();
let drop_block = self.drop_block(is_cleanup, succ);
if let Some(mode) = drop_mode {
let block_start = Location { block: drop_block, statement_index: 0 };
self.elaborator.clear_drop_flag(block_start, self.path, mode);
}
self.drop_flag_test_block(drop_block)
self.drop_flag_test_block(is_cleanup, drop_block, succ)
}
fn elaborated_drop_block<'a>(&mut self) -> BasicBlock {
debug!("elaborated_drop_block({:?})", self);
let blk = self.drop_block();
let is_cleanup = self.is_cleanup; // FIXME(#6393)
let succ = self.succ;
let blk = self.drop_block(is_cleanup, succ);
self.elaborate_drop(blk);
blk
}
@ -460,7 +614,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
is_cleanup: bool
) -> BasicBlock {
let block = self.unelaborated_free_block(ty, target, is_cleanup);
self.drop_flag_test_block_with_succ(is_cleanup, block, target)
self.drop_flag_test_block(is_cleanup, block, target)
}
fn unelaborated_free_block<'a>(
@ -473,53 +627,34 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
let unit_temp = Lvalue::Local(self.new_temp(tcx.mk_nil()));
let free_func = tcx.require_lang_item(lang_items::BoxFreeFnLangItem);
let substs = tcx.mk_substs(iter::once(Kind::from(ty)));
let fty = tcx.item_type(free_func).subst(tcx, substs);
let free_block = self.elaborator.patch().new_block(BasicBlockData {
statements: vec![],
terminator: Some(Terminator {
source_info: self.source_info, kind: TerminatorKind::Call {
func: Operand::Constant(Constant {
span: self.source_info.span,
ty: fty,
literal: Literal::Item {
def_id: free_func,
substs: substs
}
}),
args: vec![Operand::Consume(self.lvalue.clone())],
destination: Some((unit_temp, target)),
cleanup: None
}
}),
is_cleanup: is_cleanup
});
let call = TerminatorKind::Call {
func: Operand::item(tcx, free_func, substs, self.source_info.span),
args: vec![Operand::Consume(self.lvalue.clone())],
destination: Some((unit_temp, target)),
cleanup: None
}; // FIXME(#6393)
let free_block = self.new_block(is_cleanup, call);
let block_start = Location { block: free_block, statement_index: 0 };
self.elaborator.clear_drop_flag(block_start, self.path, DropFlagMode::Shallow);
free_block
}
fn drop_block<'a>(&mut self) -> BasicBlock {
fn drop_block<'a>(&mut self, is_cleanup: bool, succ: BasicBlock) -> BasicBlock {
let block = TerminatorKind::Drop {
location: self.lvalue.clone(),
target: self.succ,
unwind: self.unwind
target: succ,
unwind: if is_cleanup { None } else { self.unwind }
};
let is_cleanup = self.is_cleanup; // FIXME(#6393)
self.new_block(is_cleanup, block)
}
fn drop_flag_test_block<'a>(&mut self, on_set: BasicBlock) -> BasicBlock {
let is_cleanup = self.is_cleanup;
let succ = self.succ; // FIXME(#6393)
self.drop_flag_test_block_with_succ(is_cleanup, on_set, succ)
}
fn drop_flag_test_block_with_succ<'a>(&mut self,
is_cleanup: bool,
on_set: BasicBlock,
on_unset: BasicBlock)
-> BasicBlock
fn drop_flag_test_block(&mut self,
is_cleanup: bool,
on_set: BasicBlock,
on_unset: BasicBlock)
-> BasicBlock
{
let style = self.elaborator.drop_style(self.path, DropFlagMode::Shallow);
debug!("drop_flag_test_block({:?},{:?},{:?}) - {:?}",

View File

@ -11,7 +11,7 @@
use llvm::{self, ValueRef, Integer, Pointer, Float, Double, Struct, Array, Vector, AttributePlace};
use base;
use builder::Builder;
use common::{self, type_is_fat_ptr, C_uint};
use common::{type_is_fat_ptr, C_uint};
use context::CrateContext;
use cabi_x86;
use cabi_x86_64;
@ -59,6 +59,7 @@ enum ArgKind {
pub use self::attr_impl::ArgAttribute;
#[allow(non_upper_case_globals)]
#[allow(unused)]
mod attr_impl {
// The subset of llvm::Attribute needed for arguments, packed into a bitfield.
bitflags! {
@ -223,16 +224,6 @@ impl ArgType {
self.kind == ArgKind::Ignore
}
/// Get the LLVM type for an lvalue of the original Rust type of
/// this argument/return, i.e. the result of `type_of::type_of`.
pub fn memory_ty(&self, ccx: &CrateContext) -> Type {
if self.original_ty == Type::i1(ccx) {
Type::i8(ccx)
} else {
self.original_ty
}
}
/// Store a direct/indirect value described by this ArgType into a
/// lvalue for the original Rust type of this argument/return.
/// Can be used for both storing formal arguments into Rust variables
@ -344,17 +335,6 @@ impl FnType {
fn_ty
}
pub fn from_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
instance: &ty::Instance<'tcx>,
extra_args: &[Ty<'tcx>]) -> FnType
{
let ity = common::instance_ty(ccx.shared(), instance);
let sig = common::ty_fn_sig(ccx, ity);
let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&sig);
Self::new(ccx, sig, extra_args)
}
fn unadjusted<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> FnType {

View File

@ -363,28 +363,6 @@ fn load_discr(bcx: &Builder, ity: layout::Integer, ptr: ValueRef,
}
}
/// Yield information about how to dispatch a case of the
/// discriminant-like value returned by `trans_switch`.
///
/// This should ideally be less tightly tied to `_match`.
pub fn trans_case<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, value: Disr) -> ValueRef {
let l = bcx.ccx.layout_of(t);
match *l {
layout::CEnum { discr, .. }
| layout::General { discr, .. }=> {
C_integral(Type::from_integer(bcx.ccx, discr), value.0, true)
}
layout::RawNullablePointer { .. } |
layout::StructWrappedNullablePointer { .. } => {
assert!(value == Disr(0) || value == Disr(1));
C_bool(bcx.ccx, value != Disr(0))
}
_ => {
bug!("{} does not have a discriminant. Represented as {:#?}", t, l);
}
}
}
/// Set the discriminant for a new value of the given case of the given
/// representation.
pub fn trans_set_discr<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, val: ValueRef, to: Disr) {

View File

@ -17,6 +17,7 @@ pub use syntax::attr::InlineAttr;
use syntax::ast;
use context::CrateContext;
/// Mark LLVM function to use provided inline heuristic.
#[inline]
pub fn inline(val: ValueRef, inline: InlineAttr) {

View File

@ -403,7 +403,9 @@ pub fn load_ty<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef,
// a char is a Unicode codepoint, and so takes values from 0
// to 0x10FFFF inclusive only.
b.load_range_assert(ptr, 0, 0x10FFFF + 1, llvm::False, alignment.to_align())
} else if (t.is_region_ptr() || t.is_box()) && !common::type_is_fat_ptr(ccx, t) {
} else if (t.is_region_ptr() || t.is_box() || t.is_fn())
&& !common::type_is_fat_ptr(ccx, t)
{
b.load_nonnull(ptr, alignment.to_align())
} else {
b.load(ptr, alignment.to_align())

View File

@ -96,6 +96,9 @@ pub fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
assert_eq!(common::val_ty(llfn), llptrty);
debug!("get_fn: not casting pointer!");
if common::is_inline_instance(tcx, &instance) {
attributes::inline(llfn, attributes::InlineAttr::Hint);
}
let attrs = instance.def.attrs(ccx.tcx());
attributes::from_fn_attrs(ccx, &attrs, llfn);

View File

@ -1,144 +0,0 @@
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! ## The Cleanup module
//!
//! The cleanup module tracks what values need to be cleaned up as scopes
//! are exited, either via panic or just normal control flow.
//!
//! Cleanup items can be scheduled into any of the scopes on the stack.
//! Typically, when a scope is finished, we generate the cleanup code. This
//! corresponds to a normal exit from a block (for example, an expression
//! completing evaluation successfully without panic).
use llvm::BasicBlockRef;
use base;
use mir::lvalue::LvalueRef;
use rustc::mir::tcx::LvalueTy;
use builder::Builder;
use common::Funclet;
use glue;
use type_::Type;
pub struct CleanupScope<'tcx> {
// Cleanup to run upon scope exit.
cleanup: Option<DropValue<'tcx>>,
// Computed on creation if compiling with landing pads (!sess.no_landing_pads)
pub landing_pad: Option<BasicBlockRef>,
}
#[derive(Copy, Clone)]
pub struct DropValue<'tcx> {
val: LvalueRef<'tcx>,
skip_dtor: bool,
}
impl<'tcx> DropValue<'tcx> {
fn trans<'a>(&self, funclet: Option<&'a Funclet>, bcx: &Builder<'a, 'tcx>) {
glue::call_drop_glue(bcx, self.val, self.skip_dtor, funclet)
}
/// Creates a landing pad for the top scope. The landing pad will perform all cleanups necessary
/// for an unwind and then `resume` to continue error propagation:
///
/// landing_pad -> ... cleanups ... -> [resume]
///
/// This should only be called once per function, as it creates an alloca for the landingpad.
fn get_landing_pad<'a>(&self, bcx: &Builder<'a, 'tcx>) -> BasicBlockRef {
debug!("get_landing_pad");
let bcx = bcx.build_sibling_block("cleanup_unwind");
let llpersonality = bcx.ccx.eh_personality();
bcx.set_personality_fn(llpersonality);
if base::wants_msvc_seh(bcx.sess()) {
let pad = bcx.cleanup_pad(None, &[]);
let funclet = Some(Funclet::new(pad));
self.trans(funclet.as_ref(), &bcx);
bcx.cleanup_ret(pad, None);
} else {
// The landing pad return type (the type being propagated). Not sure
// what this represents but it's determined by the personality
// function and this is what the EH proposal example uses.
let llretty = Type::struct_(bcx.ccx, &[Type::i8p(bcx.ccx), Type::i32(bcx.ccx)], false);
// The only landing pad clause will be 'cleanup'
let llretval = bcx.landing_pad(llretty, llpersonality, 1, bcx.llfn());
// The landing pad block is a cleanup
bcx.set_cleanup(llretval);
// Insert cleanup instructions into the cleanup block
self.trans(None, &bcx);
if !bcx.sess().target.target.options.custom_unwind_resume {
bcx.resume(llretval);
} else {
let exc_ptr = bcx.extract_value(llretval, 0);
bcx.call(bcx.ccx.eh_unwind_resume(), &[exc_ptr], None);
bcx.unreachable();
}
}
bcx.llbb()
}
}
impl<'a, 'tcx> CleanupScope<'tcx> {
/// Issue #23611: Schedules a (deep) drop of the contents of
/// `val`, which is a pointer to an instance of struct/enum type
/// `ty`. The scheduled code handles extracting the discriminant
/// and dropping the contents associated with that variant
/// *without* executing any associated drop implementation.
pub fn schedule_drop_adt_contents(
bcx: &Builder<'a, 'tcx>, val: LvalueRef<'tcx>
) -> CleanupScope<'tcx> {
if let LvalueTy::Downcast { .. } = val.ty {
bug!("Cannot drop downcast ty yet");
}
// `if` below could be "!contents_needs_drop"; skipping drop
// is just an optimization, so sound to be conservative.
if !bcx.ccx.shared().type_needs_drop(val.ty.to_ty(bcx.tcx())) {
return CleanupScope::noop();
}
let drop = DropValue {
val: val,
skip_dtor: true,
};
CleanupScope::new(bcx, drop)
}
fn new(bcx: &Builder<'a, 'tcx>, drop_val: DropValue<'tcx>) -> CleanupScope<'tcx> {
CleanupScope {
cleanup: Some(drop_val),
landing_pad: if !bcx.sess().no_landing_pads() {
Some(drop_val.get_landing_pad(bcx))
} else {
None
},
}
}
pub fn noop() -> CleanupScope<'tcx> {
CleanupScope {
cleanup: None,
landing_pad: None,
}
}
pub fn trans(self, bcx: &'a Builder<'a, 'tcx>) {
if let Some(cleanup) = self.cleanup {
cleanup.trans(None, &bcx);
}
}
}

View File

@ -193,25 +193,21 @@ use rustc::hir::itemlikevisit::ItemLikeVisitor;
use rustc::hir::map as hir_map;
use rustc::hir::def_id::DefId;
use rustc::middle::lang_items::{BoxFreeFnLangItem, ExchangeMallocFnLangItem};
use rustc::middle::lang_items::{ExchangeMallocFnLangItem};
use rustc::traits;
use rustc::ty::subst::{Kind, Substs, Subst};
use rustc::ty::subst::{Substs, Subst};
use rustc::ty::{self, TypeFoldable, TyCtxt};
use rustc::ty::adjustment::CustomCoerceUnsized;
use rustc::mir::{self, Location};
use rustc::mir::visit as mir_visit;
use rustc::mir::visit::Visitor as MirVisitor;
use context::SharedCrateContext;
use common::{def_ty, instance_ty};
use glue::{self, DropGlueKind};
use monomorphize::{self, Instance};
use util::nodemap::{FxHashSet, FxHashMap, DefIdMap};
use trans_item::{TransItem, DefPathBasedNames, InstantiationMode};
use std::iter;
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
pub enum TransItemCollectionMode {
Eager,
@ -327,10 +323,6 @@ fn collect_items_rec<'a, 'tcx: 'a>(scx: &SharedCrateContext<'a, 'tcx>,
let recursion_depth_reset;
match starting_point {
TransItem::DropGlue(t) => {
find_drop_glue_neighbors(scx, t, &mut neighbors);
recursion_depth_reset = None;
}
TransItem::Static(node_id) => {
let def_id = scx.tcx().hir.local_def_id(node_id);
let instance = Instance::mono(scx.tcx(), def_id);
@ -339,8 +331,7 @@ fn collect_items_rec<'a, 'tcx: 'a>(scx: &SharedCrateContext<'a, 'tcx>,
debug_assert!(should_trans_locally(scx.tcx(), &instance));
let ty = instance_ty(scx, &instance);
let ty = glue::get_drop_glue_type(scx, ty);
neighbors.push(TransItem::DropGlue(DropGlueKind::Ty(ty)));
visit_drop_use(scx, ty, true, &mut neighbors);
recursion_depth_reset = None;
@ -396,6 +387,14 @@ fn check_recursion_limit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
let recursion_depth = recursion_depths.get(&def_id).cloned().unwrap_or(0);
debug!(" => recursion depth={}", recursion_depth);
let recursion_depth = if Some(def_id) == tcx.lang_items.drop_in_place_fn() {
// HACK: drop_in_place creates tight monomorphization loops. Give
// it more margin.
recursion_depth / 4
} else {
recursion_depth
};
// Code that needs to instantiate the same function recursively
// more than the recursion limit is assumed to be causing an
// infinite expansion.
@ -521,27 +520,6 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> {
self.super_rvalue(rvalue, location);
}
fn visit_lvalue(&mut self,
lvalue: &mir::Lvalue<'tcx>,
context: mir_visit::LvalueContext<'tcx>,
location: Location) {
debug!("visiting lvalue {:?}", *lvalue);
if let mir_visit::LvalueContext::Drop = context {
let ty = lvalue.ty(self.mir, self.scx.tcx())
.to_ty(self.scx.tcx());
let ty = monomorphize::apply_param_substs(self.scx,
self.param_substs,
&ty);
assert!(ty.is_normalized_for_trans());
let ty = glue::get_drop_glue_type(self.scx, ty);
self.output.push(TransItem::DropGlue(DropGlueKind::Ty(ty)));
}
self.super_lvalue(lvalue, context, location);
}
fn visit_constant(&mut self, constant: &mir::Constant<'tcx>, location: Location) {
debug!("visiting constant {:?} @ {:?}", *constant, location);
@ -568,54 +546,90 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> {
kind: &mir::TerminatorKind<'tcx>,
location: Location) {
let tcx = self.scx.tcx();
if let mir::TerminatorKind::Call {
ref func,
..
} = *kind {
let callee_ty = func.ty(self.mir, tcx);
let callee_ty = monomorphize::apply_param_substs(
self.scx, self.param_substs, &callee_ty);
visit_fn_use(self.scx, callee_ty, true, &mut self.output);
match *kind {
mir::TerminatorKind::Call { ref func, .. } => {
let callee_ty = func.ty(self.mir, tcx);
let callee_ty = monomorphize::apply_param_substs(
self.scx, self.param_substs, &callee_ty);
visit_fn_use(self.scx, callee_ty, true, &mut self.output);
}
mir::TerminatorKind::Drop { ref location, .. } |
mir::TerminatorKind::DropAndReplace { ref location, .. } => {
let ty = location.ty(self.mir, self.scx.tcx())
.to_ty(self.scx.tcx());
let ty = monomorphize::apply_param_substs(self.scx,
self.param_substs,
&ty);
visit_drop_use(self.scx, ty, true, self.output);
}
mir::TerminatorKind::Goto { .. } |
mir::TerminatorKind::SwitchInt { .. } |
mir::TerminatorKind::Resume |
mir::TerminatorKind::Return |
mir::TerminatorKind::Unreachable |
mir::TerminatorKind::Assert { .. } => {}
}
self.super_terminator_kind(block, kind, location);
}
}
fn visit_drop_use<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
ty: ty::Ty<'tcx>,
is_direct_call: bool,
output: &mut Vec<TransItem<'tcx>>)
{
let instance = monomorphize::resolve_drop_in_place(scx, ty);
visit_instance_use(scx, instance, is_direct_call, output);
}
fn visit_fn_use<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
ty: ty::Ty<'tcx>,
is_direct_call: bool,
output: &mut Vec<TransItem<'tcx>>)
{
debug!("visit_fn_use({:?}, is_direct_call={:?})", ty, is_direct_call);
let (def_id, substs) = match ty.sty {
ty::TyFnDef(def_id, substs, _) => (def_id, substs),
_ => return
};
if let ty::TyFnDef(def_id, substs, _) = ty.sty {
let instance = monomorphize::resolve(scx, def_id, substs);
visit_instance_use(scx, instance, is_direct_call, output);
}
}
let instance = monomorphize::resolve(scx, def_id, substs);
fn visit_instance_use<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
instance: ty::Instance<'tcx>,
is_direct_call: bool,
output: &mut Vec<TransItem<'tcx>>)
{
debug!("visit_item_use({:?}, is_direct_call={:?})", instance, is_direct_call);
if !should_trans_locally(scx.tcx(), &instance) {
return
}
match instance.def {
ty::InstanceDef::Intrinsic(..) => {
ty::InstanceDef::Intrinsic(def_id) => {
if !is_direct_call {
bug!("intrinsic {:?} being reified", ty);
}
if scx.tcx().item_name(def_id) == "drop_in_place" {
// drop_in_place is a call to drop glue, need to instantiate
// that.
let ty = glue::get_drop_glue_type(scx, substs.type_at(0));
output.push(TransItem::DropGlue(DropGlueKind::Ty(ty)));
bug!("intrinsic {:?} being reified", def_id);
}
}
ty::InstanceDef::Virtual(..) => {
ty::InstanceDef::Virtual(..) |
ty::InstanceDef::DropGlue(_, None) => {
// don't need to emit shim if we are calling directly.
if !is_direct_call {
output.push(create_fn_trans_item(instance));
}
}
ty::InstanceDef::DropGlue(_, Some(ty)) => {
match ty.sty {
ty::TyArray(ety, _) |
ty::TySlice(ety)
if is_direct_call =>
{
// drop of arrays/slices is translated in-line.
visit_drop_use(scx, ety, false, output);
}
_ => {}
};
output.push(create_fn_trans_item(instance));
}
ty::InstanceDef::ClosureOnceShim { .. } |
ty::InstanceDef::Item(..) |
ty::InstanceDef::FnPtrShim(..) => {
@ -634,6 +648,7 @@ fn should_trans_locally<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, instance: &Instan
ty::InstanceDef::ClosureOnceShim { .. } |
ty::InstanceDef::Virtual(..) |
ty::InstanceDef::FnPtrShim(..) |
ty::InstanceDef::DropGlue(..) |
ty::InstanceDef::Intrinsic(_) => return true
};
match tcx.hir.get_if_local(def_id) {
@ -658,124 +673,6 @@ fn should_trans_locally<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, instance: &Instan
}
}
fn find_drop_glue_neighbors<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
dg: DropGlueKind<'tcx>,
output: &mut Vec<TransItem<'tcx>>) {
let ty = match dg {
DropGlueKind::Ty(ty) => ty,
DropGlueKind::TyContents(_) => {
// We already collected the neighbors of this item via the
// DropGlueKind::Ty variant.
return
}
};
debug!("find_drop_glue_neighbors: {}", type_to_string(scx.tcx(), ty));
// Make sure the BoxFreeFn lang-item gets translated if there is a boxed value.
if ty.is_box() {
let tcx = scx.tcx();
let def_id = tcx.require_lang_item(BoxFreeFnLangItem);
let box_free_instance = Instance::new(
def_id,
tcx.mk_substs(iter::once(Kind::from(ty.boxed_ty())))
);
if should_trans_locally(tcx, &box_free_instance) {
output.push(create_fn_trans_item(box_free_instance));
}
}
// If the type implements Drop, also add a translation item for the
// monomorphized Drop::drop() implementation.
let has_dtor = match ty.sty {
ty::TyAdt(def, _) => def.has_dtor(scx.tcx()),
_ => false
};
if has_dtor && !ty.is_box() {
let drop_trait_def_id = scx.tcx()
.lang_items
.drop_trait()
.unwrap();
let drop_method = scx.tcx().associated_items(drop_trait_def_id)
.find(|it| it.kind == ty::AssociatedKind::Method)
.unwrap().def_id;
let substs = scx.tcx().mk_substs_trait(ty, &[]);
let instance = monomorphize::resolve(scx, drop_method, substs);
if should_trans_locally(scx.tcx(), &instance) {
output.push(create_fn_trans_item(instance));
}
// This type has a Drop implementation, we'll need the contents-only
// version of the glue too.
output.push(TransItem::DropGlue(DropGlueKind::TyContents(ty)));
}
// Finally add the types of nested values
match ty.sty {
ty::TyBool |
ty::TyChar |
ty::TyInt(_) |
ty::TyUint(_) |
ty::TyStr |
ty::TyFloat(_) |
ty::TyRawPtr(_) |
ty::TyRef(..) |
ty::TyFnDef(..) |
ty::TyFnPtr(_) |
ty::TyNever |
ty::TyDynamic(..) => {
/* nothing to do */
}
ty::TyAdt(def, _) if def.is_box() => {
let inner_type = glue::get_drop_glue_type(scx, ty.boxed_ty());
if scx.type_needs_drop(inner_type) {
output.push(TransItem::DropGlue(DropGlueKind::Ty(inner_type)));
}
}
ty::TyAdt(def, substs) => {
for field in def.all_fields() {
let field_type = def_ty(scx, field.did, substs);
let field_type = glue::get_drop_glue_type(scx, field_type);
if scx.type_needs_drop(field_type) {
output.push(TransItem::DropGlue(DropGlueKind::Ty(field_type)));
}
}
}
ty::TyClosure(def_id, substs) => {
for upvar_ty in substs.upvar_tys(def_id, scx.tcx()) {
let upvar_ty = glue::get_drop_glue_type(scx, upvar_ty);
if scx.type_needs_drop(upvar_ty) {
output.push(TransItem::DropGlue(DropGlueKind::Ty(upvar_ty)));
}
}
}
ty::TySlice(inner_type) |
ty::TyArray(inner_type, _) => {
let inner_type = glue::get_drop_glue_type(scx, inner_type);
if scx.type_needs_drop(inner_type) {
output.push(TransItem::DropGlue(DropGlueKind::Ty(inner_type)));
}
}
ty::TyTuple(args, _) => {
for arg in args {
let arg = glue::get_drop_glue_type(scx, arg);
if scx.type_needs_drop(arg) {
output.push(TransItem::DropGlue(DropGlueKind::Ty(arg)));
}
}
}
ty::TyProjection(_) |
ty::TyParam(_) |
ty::TyInfer(_) |
ty::TyAnon(..) |
ty::TyError => {
bug!("encountered unexpected type");
}
}
}
/// For given pair of source and target type that occur in an unsizing coercion,
/// this function finds the pair of types that determines the vtable linking
/// them.
@ -894,8 +791,7 @@ fn create_trans_items_for_vtable_methods<'a, 'tcx>(scx: &SharedCrateContext<'a,
output.extend(methods);
}
// Also add the destructor
let dg_type = glue::get_drop_glue_type(scx, impl_ty);
output.push(TransItem::DropGlue(DropGlueKind::Ty(dg_type)));
visit_drop_use(scx, impl_ty, false, output);
}
}
@ -940,8 +836,7 @@ impl<'b, 'a, 'v> ItemLikeVisitor<'v> for RootCollector<'b, 'a, 'v> {
def_id_to_string(self.scx.tcx(), def_id));
let ty = def_ty(self.scx, def_id, Substs::empty());
let ty = glue::get_drop_glue_type(self.scx, ty);
self.output.push(TransItem::DropGlue(DropGlueKind::Ty(ty)));
visit_drop_use(self.scx, ty, true, self.output);
}
}
}
@ -1093,12 +988,3 @@ fn def_id_to_string<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
printer.push_def_path(def_id, &mut output);
output
}
fn type_to_string<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
ty: ty::Ty<'tcx>)
-> String {
let mut output = String::new();
let printer = DefPathBasedNames::new(tcx, false, false);
printer.push_type_name(ty, &mut output);
output
}

View File

@ -535,16 +535,27 @@ pub fn ty_fn_sig<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
pub fn requests_inline<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
instance: &ty::Instance<'tcx>
) -> bool {
if is_inline_instance(tcx, instance) {
return true
}
attr::requests_inline(&instance.def.attrs(tcx)[..])
}
pub fn is_inline_instance<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
instance: &ty::Instance<'tcx>
) -> bool {
let def_id = match instance.def {
ty::InstanceDef::Item(def_id) => def_id,
ty::InstanceDef::DropGlue(_, Some(_)) => return false,
_ => return true
};
match tcx.def_key(def_id).disambiguated_data.data {
DefPathData::StructCtor |
DefPathData::EnumVariant(..) |
DefPathData::ClosureExpr => true,
_ => attr::requests_inline(&tcx.get_attrs(def_id)[..]),
_ => false
}
}

View File

@ -21,7 +21,6 @@ use debuginfo;
use callee;
use base;
use declare;
use glue::DropGlueKind;
use monomorphize::Instance;
use partitioning::CodegenUnit;
@ -46,7 +45,7 @@ use std::str;
use syntax::ast;
use syntax::symbol::InternedString;
use syntax_pos::DUMMY_SP;
use abi::{Abi, FnType};
use abi::Abi;
pub struct Stats {
pub n_glues_created: Cell<usize>,
@ -94,8 +93,6 @@ pub struct LocalCrateContext<'tcx> {
previous_work_product: Option<WorkProduct>,
codegen_unit: CodegenUnit<'tcx>,
needs_unwind_cleanup_cache: RefCell<FxHashMap<Ty<'tcx>, bool>>,
fn_pointer_shims: RefCell<FxHashMap<Ty<'tcx>, ValueRef>>,
drop_glues: RefCell<FxHashMap<DropGlueKind<'tcx>, (ValueRef, FnType)>>,
/// Cache instances of monomorphic and polymorphic items
instances: RefCell<FxHashMap<Instance<'tcx>, ValueRef>>,
/// Cache generated vtables
@ -587,8 +584,6 @@ impl<'tcx> LocalCrateContext<'tcx> {
previous_work_product: previous_work_product,
codegen_unit: codegen_unit,
needs_unwind_cleanup_cache: RefCell::new(FxHashMap()),
fn_pointer_shims: RefCell::new(FxHashMap()),
drop_glues: RefCell::new(FxHashMap()),
instances: RefCell::new(FxHashMap()),
vtables: RefCell::new(FxHashMap()),
const_cstr_cache: RefCell::new(FxHashMap()),
@ -723,15 +718,6 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
&self.local().needs_unwind_cleanup_cache
}
pub fn fn_pointer_shims(&self) -> &RefCell<FxHashMap<Ty<'tcx>, ValueRef>> {
&self.local().fn_pointer_shims
}
pub fn drop_glues<'a>(&'a self)
-> &'a RefCell<FxHashMap<DropGlueKind<'tcx>, (ValueRef, FnType)>> {
&self.local().drop_glues
}
pub fn instances<'a>(&'a self) -> &'a RefCell<FxHashMap<Instance<'tcx>, ValueRef>> {
&self.local().instances
}

View File

@ -13,69 +13,35 @@
// Code relating to drop glue.
use std;
use std::iter;
use llvm;
use llvm::{ValueRef, get_param};
use middle::lang_items::BoxFreeFnLangItem;
use rustc::ty::subst::{Substs};
use llvm::{ValueRef};
use rustc::traits;
use rustc::ty::{self, layout, AdtDef, AdtKind, Ty, TypeFoldable};
use rustc::ty::subst::Kind;
use rustc::mir::tcx::LvalueTy;
use mir::lvalue::LvalueRef;
use abi::FnType;
use adt;
use base::*;
use callee::get_fn;
use cleanup::CleanupScope;
use rustc::ty::{self, Ty, TypeFoldable};
use common::*;
use machine::*;
use meth;
use monomorphize;
use trans_item::TransItem;
use tvec;
use type_of::{type_of, sizing_type_of, align_of};
use type_::Type;
use type_of::{sizing_type_of, align_of};
use value::Value;
use Disr;
use builder::Builder;
use mir::lvalue::Alignment;
pub fn trans_exchange_free_ty<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, ptr: LvalueRef<'tcx>) {
let content_ty = ptr.ty.to_ty(bcx.tcx());
let def_id = langcall(bcx.tcx(), None, "", BoxFreeFnLangItem);
let substs = bcx.tcx().mk_substs(iter::once(Kind::from(content_ty)));
let instance = monomorphize::resolve(bcx.ccx.shared(), def_id, substs);
let fn_ty = FnType::from_instance(bcx.ccx, &instance, &[]);
let llret = bcx.call(get_fn(bcx.ccx, instance),
&[ptr.llval, ptr.llextra][..1 + ptr.has_extra() as usize], None);
fn_ty.apply_attrs_callsite(llret);
}
pub fn get_drop_glue_type<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Ty<'tcx> {
pub fn needs_drop_glue<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, t: Ty<'tcx>) -> bool {
assert!(t.is_normalized_for_trans());
let t = scx.tcx().erase_regions(&t);
// Even if there is no dtor for t, there might be one deeper down and we
// might need to pass in the vtable ptr.
if !scx.type_is_sized(t) {
return t;
}
// FIXME (#22815): note that type_needs_drop conservatively
// approximates in some cases and may say a type expression
// requires drop glue when it actually does not.
//
// (In this case it is not clear whether any harm is done, i.e.
// erroneously returning `t` in some cases where we could have
// returned `tcx.types.i8` does not appear unsound. The impact on
// erroneously returning `true` in some cases where we could have
// returned `false` does not appear unsound. The impact on
// code quality is unknown at this time.)
if !scx.type_needs_drop(t) {
return scx.tcx().types.i8;
return false;
}
match t.sty {
ty::TyAdt(def, _) if def.is_box() => {
@ -85,210 +51,19 @@ pub fn get_drop_glue_type<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, t: Ty<'t
let layout = t.layout(&infcx).unwrap();
if layout.size(&scx.tcx().data_layout).bytes() == 0 {
// `Box<ZeroSizeType>` does not allocate.
scx.tcx().types.i8
false
} else {
t
true
}
})
} else {
t
true
}
}
_ => t
_ => true
}
}
fn drop_ty<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, args: LvalueRef<'tcx>) {
call_drop_glue(bcx, args, false, None)
}
pub fn call_drop_glue<'a, 'tcx>(
bcx: &Builder<'a, 'tcx>,
mut args: LvalueRef<'tcx>,
skip_dtor: bool,
funclet: Option<&'a Funclet>,
) {
let t = args.ty.to_ty(bcx.tcx());
// NB: v is an *alias* of type t here, not a direct value.
debug!("call_drop_glue(t={:?}, skip_dtor={})", t, skip_dtor);
if bcx.ccx.shared().type_needs_drop(t) {
let ccx = bcx.ccx;
let g = if skip_dtor {
DropGlueKind::TyContents(t)
} else {
DropGlueKind::Ty(t)
};
let glue = get_drop_glue_core(ccx, g);
let glue_type = get_drop_glue_type(ccx.shared(), t);
if glue_type != t {
args.llval = bcx.pointercast(args.llval, type_of(ccx, glue_type).ptr_to());
}
// No drop-hint ==> call standard drop glue
bcx.call(glue, &[args.llval, args.llextra][..1 + args.has_extra() as usize],
funclet.map(|b| b.bundle()));
}
}
pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ValueRef {
get_drop_glue_core(ccx, DropGlueKind::Ty(t))
}
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub enum DropGlueKind<'tcx> {
/// The normal path; runs the dtor, and then recurs on the contents
Ty(Ty<'tcx>),
/// Skips the dtor, if any, for ty; drops the contents directly.
/// Note that the dtor is only skipped at the most *shallow*
/// level, namely, an `impl Drop for Ty` itself. So, for example,
/// if Ty is Newtype(S) then only the Drop impl for Newtype itself
/// will be skipped, while the Drop impl for S, if any, will be
/// invoked.
TyContents(Ty<'tcx>),
}
impl<'tcx> DropGlueKind<'tcx> {
pub fn ty(&self) -> Ty<'tcx> {
match *self { DropGlueKind::Ty(t) | DropGlueKind::TyContents(t) => t }
}
pub fn map_ty<F>(&self, mut f: F) -> DropGlueKind<'tcx> where F: FnMut(Ty<'tcx>) -> Ty<'tcx>
{
match *self {
DropGlueKind::Ty(t) => DropGlueKind::Ty(f(t)),
DropGlueKind::TyContents(t) => DropGlueKind::TyContents(f(t)),
}
}
}
fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKind<'tcx>) -> ValueRef {
let g = g.map_ty(|t| get_drop_glue_type(ccx.shared(), t));
match ccx.drop_glues().borrow().get(&g) {
Some(&(glue, _)) => glue,
None => {
bug!("Could not find drop glue for {:?} -- {} -- {}.",
g,
TransItem::DropGlue(g).to_raw_string(),
ccx.codegen_unit().name());
}
}
}
pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKind<'tcx>) {
assert_eq!(g.ty(), get_drop_glue_type(ccx.shared(), g.ty()));
let (llfn, _) = ccx.drop_glues().borrow().get(&g).unwrap().clone();
let mut bcx = Builder::new_block(ccx, llfn, "entry-block");
ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1);
// All glue functions take values passed *by alias*; this is a
// requirement since in many contexts glue is invoked indirectly and
// the caller has no idea if it's dealing with something that can be
// passed by value.
//
// llfn is expected be declared to take a parameter of the appropriate
// type, so we don't need to explicitly cast the function parameter.
// NB: v0 is an *alias* of type t here, not a direct value.
// Only drop the value when it ... well, we used to check for
// non-null, (and maybe we need to continue doing so), but we now
// must definitely check for special bit-patterns corresponding to
// the special dtor markings.
let t = g.ty();
let value = get_param(llfn, 0);
let ptr = if ccx.shared().type_is_sized(t) {
LvalueRef::new_sized_ty(value, t, Alignment::AbiAligned)
} else {
LvalueRef::new_unsized_ty(value, get_param(llfn, 1), t, Alignment::AbiAligned)
};
let skip_dtor = match g {
DropGlueKind::Ty(_) => false,
DropGlueKind::TyContents(_) => true
};
let bcx = match t.sty {
ty::TyAdt(def, _) if def.is_box() => {
// Support for Box is built-in as yet and its drop glue is special
// despite having a dummy Drop impl in the library.
assert!(!skip_dtor);
let content_ty = t.boxed_ty();
let ptr = if !bcx.ccx.shared().type_is_sized(content_ty) {
let llbox = bcx.load(get_dataptr(&bcx, ptr.llval), None);
let info = bcx.load(get_meta(&bcx, ptr.llval), None);
LvalueRef::new_unsized_ty(llbox, info, content_ty, Alignment::AbiAligned)
} else {
LvalueRef::new_sized_ty(
bcx.load(ptr.llval, None),
content_ty, Alignment::AbiAligned)
};
drop_ty(&bcx, ptr);
trans_exchange_free_ty(&bcx, ptr);
bcx
}
ty::TyDynamic(..) => {
// No support in vtable for distinguishing destroying with
// versus without calling Drop::drop. Assert caller is
// okay with always calling the Drop impl, if any.
assert!(!skip_dtor);
let dtor = bcx.load(ptr.llextra, None);
bcx.call(dtor, &[ptr.llval], None);
bcx
}
ty::TyAdt(def, ..) if def.has_dtor(bcx.tcx()) && !skip_dtor => {
let shallow_drop = def.is_union();
let tcx = bcx.tcx();
// Be sure to put the contents into a scope so we can use an invoke
// instruction to call the user destructor but still call the field
// destructors if the user destructor panics.
//
// FIXME (#14875) panic-in-drop semantics might be unsupported; we
// might well consider changing below to more direct code.
// Issue #23611: schedule cleanup of contents, re-inspecting the
// discriminant (if any) in case of variant swap in drop code.
let contents_scope = if !shallow_drop {
CleanupScope::schedule_drop_adt_contents(&bcx, ptr)
} else {
CleanupScope::noop()
};
let drop_trait_def_id = tcx.lang_items.drop_trait().unwrap();
let drop_method = tcx.associated_items(drop_trait_def_id)
.find(|it| it.kind == ty::AssociatedKind::Method)
.unwrap().def_id;
let self_type_substs = tcx.mk_substs_trait(t, &[]);
let drop_instance = monomorphize::resolve(
bcx.ccx.shared(), drop_method, self_type_substs);
let fn_ty = FnType::from_instance(bcx.ccx, &drop_instance, &[]);
let llfn = get_fn(bcx.ccx, drop_instance);
let llret;
let args = &[ptr.llval, ptr.llextra][..1 + ptr.has_extra() as usize];
if let Some(landing_pad) = contents_scope.landing_pad {
let normal_bcx = bcx.build_sibling_block("normal-return");
llret = bcx.invoke(llfn, args, normal_bcx.llbb(), landing_pad, None);
bcx = normal_bcx;
} else {
llret = bcx.call(llfn, args, None);
}
fn_ty.apply_attrs_callsite(llret);
contents_scope.trans(&bcx);
bcx
}
ty::TyAdt(def, ..) if def.is_union() => {
bcx
}
_ => {
if bcx.ccx.shared().type_needs_drop(t) {
drop_structural_ty(bcx, ptr)
} else {
bcx
}
}
};
bcx.ret_void();
}
pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, info: ValueRef)
-> (ValueRef, ValueRef) {
debug!("calculate size of DST: {}; with lost info: {:?}",
@ -375,20 +150,8 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf
(size, align)
}
ty::TyDynamic(..) => {
// info points to the vtable and the second entry in the vtable is the
// dynamic size of the object.
let info = bcx.pointercast(info, Type::int(bcx.ccx).ptr_to());
let size_ptr = bcx.gepi(info, &[1]);
let align_ptr = bcx.gepi(info, &[2]);
let size = bcx.load(size_ptr, None);
let align = bcx.load(align_ptr, None);
// Vtable loads are invariant
bcx.set_invariant_load(size);
bcx.set_invariant_load(align);
(size, align)
// load size/align from vtable
(meth::SIZE.get_usize(bcx, info), meth::ALIGN.get_usize(bcx, info))
}
ty::TySlice(_) | ty::TyStr => {
let unit_ty = t.sequence_element_type(bcx.tcx());
@ -403,141 +166,3 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf
_ => bug!("Unexpected unsized type, found {}", t)
}
}
// Iterates through the elements of a structural type, dropping them.
fn drop_structural_ty<'a, 'tcx>(
cx: Builder<'a, 'tcx>,
mut ptr: LvalueRef<'tcx>
) -> Builder<'a, 'tcx> {
fn iter_variant_fields<'a, 'tcx>(
cx: &'a Builder<'a, 'tcx>,
av: LvalueRef<'tcx>,
adt_def: &'tcx AdtDef,
variant_index: usize,
substs: &'tcx Substs<'tcx>
) {
let variant = &adt_def.variants[variant_index];
let tcx = cx.tcx();
for (i, field) in variant.fields.iter().enumerate() {
let arg = monomorphize::field_ty(tcx, substs, field);
let (field_ptr, align) = av.trans_field_ptr(&cx, i);
drop_ty(&cx, LvalueRef::new_sized_ty(field_ptr, arg, align));
}
}
let mut cx = cx;
let t = ptr.ty.to_ty(cx.tcx());
match t.sty {
ty::TyClosure(def_id, substs) => {
for (i, upvar_ty) in substs.upvar_tys(def_id, cx.tcx()).enumerate() {
let (llupvar, align) = ptr.trans_field_ptr(&cx, i);
drop_ty(&cx, LvalueRef::new_sized_ty(llupvar, upvar_ty, align));
}
}
ty::TyArray(_, n) => {
let base = get_dataptr(&cx, ptr.llval);
let len = C_uint(cx.ccx, n);
let unit_ty = t.sequence_element_type(cx.tcx());
cx = tvec::slice_for_each(&cx, base, unit_ty, len,
|bb, vv| drop_ty(bb, LvalueRef::new_sized_ty(vv, unit_ty, ptr.alignment)));
}
ty::TySlice(_) | ty::TyStr => {
let unit_ty = t.sequence_element_type(cx.tcx());
cx = tvec::slice_for_each(&cx, ptr.llval, unit_ty, ptr.llextra,
|bb, vv| drop_ty(bb, LvalueRef::new_sized_ty(vv, unit_ty, ptr.alignment)));
}
ty::TyTuple(ref args, _) => {
for (i, arg) in args.iter().enumerate() {
let (llfld_a, align) = ptr.trans_field_ptr(&cx, i);
drop_ty(&cx, LvalueRef::new_sized_ty(llfld_a, *arg, align));
}
}
ty::TyAdt(adt, substs) => match adt.adt_kind() {
AdtKind::Struct => {
for (i, field) in adt.variants[0].fields.iter().enumerate() {
let field_ty = monomorphize::field_ty(cx.tcx(), substs, field);
let (llval, align) = ptr.trans_field_ptr(&cx, i);
let field_ptr = if cx.ccx.shared().type_is_sized(field_ty) {
LvalueRef::new_sized_ty(llval, field_ty, align)
} else {
LvalueRef::new_unsized_ty(llval, ptr.llextra, field_ty, align)
};
drop_ty(&cx, field_ptr);
}
}
AdtKind::Union => {
bug!("Union in `glue::drop_structural_ty`");
}
AdtKind::Enum => {
let n_variants = adt.variants.len();
// NB: we must hit the discriminant first so that structural
// comparison know not to proceed when the discriminants differ.
// Obtain a representation of the discriminant sufficient to translate
// destructuring; this may or may not involve the actual discriminant.
let l = cx.ccx.layout_of(t);
match *l {
layout::Univariant { .. } |
layout::UntaggedUnion { .. } => {
if n_variants != 0 {
assert!(n_variants == 1);
ptr.ty = LvalueTy::Downcast {
adt_def: adt,
substs: substs,
variant_index: 0,
};
iter_variant_fields(&cx, ptr, &adt, 0, substs);
}
}
layout::CEnum { .. } |
layout::General { .. } |
layout::RawNullablePointer { .. } |
layout::StructWrappedNullablePointer { .. } => {
let lldiscrim_a = adt::trans_get_discr(
&cx, t, ptr.llval, ptr.alignment, None, false);
// Create a fall-through basic block for the "else" case of
// the switch instruction we're about to generate. Note that
// we do **not** use an Unreachable instruction here, even
// though most of the time this basic block will never be hit.
//
// When an enum is dropped it's contents are currently
// overwritten to DTOR_DONE, which means the discriminant
// could have changed value to something not within the actual
// range of the discriminant. Currently this function is only
// used for drop glue so in this case we just return quickly
// from the outer function, and any other use case will only
// call this for an already-valid enum in which case the `ret
// void` will never be hit.
let ret_void_cx = cx.build_sibling_block("enum-iter-ret-void");
ret_void_cx.ret_void();
let llswitch = cx.switch(lldiscrim_a, ret_void_cx.llbb(), n_variants);
let next_cx = cx.build_sibling_block("enum-iter-next");
for (i, discr) in adt.discriminants(cx.tcx()).enumerate() {
let variant_cx_name = format!("enum-iter-variant-{}", i);
let variant_cx = cx.build_sibling_block(&variant_cx_name);
let case_val = adt::trans_case(&cx, t, Disr::from(discr));
variant_cx.add_case(llswitch, case_val, variant_cx.llbb());
ptr.ty = LvalueTy::Downcast {
adt_def: adt,
substs: substs,
variant_index: i,
};
iter_variant_fields(&variant_cx, ptr, &adt, i, substs);
variant_cx.br(next_cx.llbb());
}
cx = next_cx;
}
_ => bug!("{} is not an enum.", t),
}
}
},
_ => {
cx.sess().unimpl(&format!("type in drop_structural_ty: {}", t))
}
}
return cx;
}

View File

@ -113,7 +113,6 @@ mod cabi_x86;
mod cabi_x86_64;
mod cabi_x86_win64;
mod callee;
mod cleanup;
mod collector;
mod common;
mod consts;

View File

@ -14,28 +14,45 @@ use callee;
use common::*;
use builder::Builder;
use consts;
use glue;
use machine;
use monomorphize;
use type_::Type;
use type_of::*;
use value::Value;
use rustc::ty;
// drop_glue pointer, size, align.
const VTABLE_OFFSET: usize = 3;
#[derive(Copy, Clone, Debug)]
pub struct VirtualIndex(usize);
/// Extracts a method from a trait object's vtable, at the specified index.
pub fn get_virtual_method<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
llvtable: ValueRef,
vtable_index: usize) -> ValueRef {
// Load the data pointer from the object.
debug!("get_virtual_method(vtable_index={}, llvtable={:?})",
vtable_index, Value(llvtable));
pub const DESTRUCTOR: VirtualIndex = VirtualIndex(0);
pub const SIZE: VirtualIndex = VirtualIndex(1);
pub const ALIGN: VirtualIndex = VirtualIndex(2);
let ptr = bcx.load_nonnull(bcx.gepi(llvtable, &[vtable_index + VTABLE_OFFSET]), None);
// Vtable loads are invariant
bcx.set_invariant_load(ptr);
ptr
impl<'a, 'tcx> VirtualIndex {
pub fn from_index(index: usize) -> Self {
VirtualIndex(index + 3)
}
pub fn get_fn(self, bcx: &Builder<'a, 'tcx>, llvtable: ValueRef) -> ValueRef {
// Load the data pointer from the object.
debug!("get_fn({:?}, {:?})", Value(llvtable), self);
let ptr = bcx.load_nonnull(bcx.gepi(llvtable, &[self.0]), None);
// Vtable loads are invariant
bcx.set_invariant_load(ptr);
ptr
}
pub fn get_usize(self, bcx: &Builder<'a, 'tcx>, llvtable: ValueRef) -> ValueRef {
// Load the data pointer from the object.
debug!("get_int({:?}, {:?})", Value(llvtable), self);
let llvtable = bcx.pointercast(llvtable, Type::int(bcx.ccx).ptr_to());
let ptr = bcx.load(bcx.gepi(llvtable, &[self.0]), None);
// Vtable loads are invariant
bcx.set_invariant_load(ptr);
ptr
}
}
/// Creates a dynamic vtable for the given type and vtable origin.
@ -68,8 +85,7 @@ pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
let align = align_of(ccx, ty);
let mut components: Vec<_> = [
// Generate a destructor for the vtable.
glue::get_drop_glue(ccx, ty),
callee::get_fn(ccx, monomorphize::resolve_drop_in_place(ccx.shared(), ty)),
C_uint(ccx, size),
C_uint(ccx, align)
].iter().cloned().collect();

View File

@ -19,13 +19,13 @@ use base::{self, Lifetime};
use callee;
use builder::Builder;
use common::{self, Funclet};
use common::{C_bool, C_str_slice, C_struct, C_u32, C_undef};
use common::{C_bool, C_str_slice, C_struct, C_u32, C_uint, C_undef};
use consts;
use machine::llalign_of_min;
use meth;
use monomorphize;
use tvec;
use type_of::{self, align_of};
use glue;
use type_::Type;
use rustc_data_structures::indexed_vec::IndexVec;
@ -209,21 +209,49 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
mir::TerminatorKind::Drop { ref location, target, unwind } => {
let ty = location.ty(&self.mir, bcx.tcx()).to_ty(bcx.tcx());
let ty = self.monomorphize(&ty);
let drop_fn = monomorphize::resolve_drop_in_place(bcx.ccx.shared(), ty);
// Double check for necessity to drop
if !bcx.ccx.shared().type_needs_drop(ty) {
if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
// we don't actually need to drop anything.
funclet_br(self, bcx, target);
return;
return
}
let mut lvalue = self.trans_lvalue(&bcx, location);
let drop_fn = glue::get_drop_glue(bcx.ccx, ty);
let drop_ty = glue::get_drop_glue_type(bcx.ccx.shared(), ty);
if bcx.ccx.shared().type_is_sized(ty) && drop_ty != ty {
lvalue.llval = bcx.pointercast(
lvalue.llval, type_of::type_of(bcx.ccx, drop_ty).ptr_to());
}
let args = &[lvalue.llval, lvalue.llextra][..1 + lvalue.has_extra() as usize];
let lvalue = self.trans_lvalue(&bcx, location);
let (drop_fn, need_extra) = match ty.sty {
ty::TyDynamic(..) => (meth::DESTRUCTOR.get_fn(&bcx, lvalue.llextra),
false),
ty::TyArray(ety, _) | ty::TySlice(ety) => {
// FIXME: handle panics
let drop_fn = monomorphize::resolve_drop_in_place(
bcx.ccx.shared(), ety);
let drop_fn = callee::get_fn(bcx.ccx, drop_fn);
let bcx = tvec::slice_for_each(
&bcx,
lvalue.project_index(&bcx, C_uint(bcx.ccx, 0u64)),
ety,
lvalue.len(bcx.ccx),
|bcx, llval, loop_bb| {
self.set_debug_loc(&bcx, terminator.source_info);
if let Some(unwind) = unwind {
bcx.invoke(
drop_fn,
&[llval],
loop_bb,
llblock(self, unwind),
cleanup_bundle
);
} else {
bcx.call(drop_fn, &[llval], cleanup_bundle);
bcx.br(loop_bb);
}
});
funclet_br(self, bcx, target);
return
}
_ => (callee::get_fn(bcx.ccx, drop_fn), lvalue.has_extra())
};
let args = &[lvalue.llval, lvalue.llextra][..1 + need_extra as usize];
if let Some(unwind) = unwind {
bcx.invoke(
drop_fn,
@ -417,23 +445,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
Some(ty::InstanceDef::Virtual(..)) => {
FnType::new_vtable(bcx.ccx, sig, &extra_args)
}
_ => FnType::new(bcx.ccx, sig, &extra_args)
};
if intrinsic == Some("drop_in_place") {
let &(_, target) = destination.as_ref().unwrap();
let ty = instance.unwrap().substs.type_at(0);
// Double check for necessity to drop
if !bcx.ccx.shared().type_needs_drop(ty) {
Some(ty::InstanceDef::DropGlue(_, None)) => {
// empty drop glue - a nop.
let &(_, target) = destination.as_ref().unwrap();
funclet_br(self, bcx, target);
return;
}
let drop_fn = glue::get_drop_glue(bcx.ccx, ty);
let llty = fn_ty.llvm_type(bcx.ccx).ptr_to();
llfn = Some(bcx.pointercast(drop_fn, llty));
}
_ => FnType::new(bcx.ccx, sig, &extra_args)
};
// The arguments we'll be passing. Plus one to account for outptr, if used.
let arg_count = fn_ty.args.len() + fn_ty.ret.is_indirect() as usize;
@ -588,7 +607,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let (ptr, meta) = (a, b);
if *next_idx == 0 {
if let Some(ty::InstanceDef::Virtual(_, idx)) = *def {
let llmeth = meth::get_virtual_method(bcx, meta, idx);
let llmeth = meth::VirtualIndex::from_index(idx).get_fn(bcx, meta);
let llty = fn_ty.llvm_type(bcx.ccx).ptr_to();
*llfn = Some(bcx.pointercast(llmeth, llty));
}
@ -756,14 +775,18 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
return block;
}
let block = self.blocks[target_bb];
let landing_pad = self.landing_pad_uncached(block);
self.landing_pads[target_bb] = Some(landing_pad);
landing_pad
}
fn landing_pad_uncached(&mut self, target_bb: BasicBlockRef) -> BasicBlockRef {
if base::wants_msvc_seh(self.ccx.sess()) {
return self.blocks[target_bb];
return target_bb;
}
let target = self.get_builder(target_bb);
let bcx = self.new_block("cleanup");
self.landing_pads[target_bb] = Some(bcx.llbb());
let ccx = bcx.ccx;
let llpersonality = self.ccx.eh_personality();
@ -772,7 +795,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
bcx.set_cleanup(llretval);
let slot = self.get_personality_slot(&bcx);
bcx.store(llretval, slot, None);
bcx.br(target.llbb());
bcx.br(target_bb);
bcx.llbb()
}

View File

@ -27,7 +27,6 @@ use std::ptr;
use std::ops;
use super::{MirContext, LocalRef};
use super::operand::OperandValue;
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum Alignment {
@ -95,16 +94,6 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
LvalueRef::new_sized(llval, LvalueTy::from_ty(ty), alignment)
}
pub fn new_unsized_ty(llval: ValueRef, llextra: ValueRef, ty: Ty<'tcx>, alignment: Alignment)
-> LvalueRef<'tcx> {
LvalueRef {
llval: llval,
llextra: llextra,
ty: LvalueTy::from_ty(ty),
alignment: alignment,
}
}
pub fn alloca(bcx: &Builder<'a, 'tcx>, ty: Ty<'tcx>, name: &str) -> LvalueRef<'tcx> {
debug!("alloca({:?}: {:?})", name, ty);
let tmp = bcx.alloca(type_of::type_of(bcx.ccx, ty), name);
@ -279,6 +268,16 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
_ => bug!("element access in type without elements: {} represented as {:#?}", t, l)
}
}
pub fn project_index(&self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef) -> ValueRef {
if let ty::TySlice(_) = self.ty.to_ty(bcx.tcx()).sty {
// Slices already point to the array element type.
bcx.inbounds_gep(self.llval, &[llindex])
} else {
let zero = common::C_uint(bcx.ccx, 0u64);
bcx.inbounds_gep(self.llval, &[zero, llindex])
}
}
}
impl<'a, 'tcx> MirContext<'a, 'tcx> {
@ -314,21 +313,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
elem: mir::ProjectionElem::Deref
}) => {
// Load the pointer from its location.
let ptr = self.trans_consume(bcx, base);
let projected_ty = LvalueTy::from_ty(ptr.ty)
.projection_ty(tcx, &mir::ProjectionElem::Deref);
let projected_ty = self.monomorphize(&projected_ty);
let (llptr, llextra) = match ptr.val {
OperandValue::Immediate(llptr) => (llptr, ptr::null_mut()),
OperandValue::Pair(llptr, llextra) => (llptr, llextra),
OperandValue::Ref(..) => bug!("Deref of by-Ref type {:?}", ptr.ty)
};
LvalueRef {
llval: llptr,
llextra: llextra,
ty: projected_ty,
alignment: Alignment::AbiAligned,
}
self.trans_consume(bcx, base).deref()
}
mir::Lvalue::Projection(ref projection) => {
let tr_base = self.trans_lvalue(bcx, &projection.base);
@ -336,17 +321,6 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let projected_ty = self.monomorphize(&projected_ty);
let align = tr_base.alignment;
let project_index = |llindex| {
let element = if let ty::TySlice(_) = tr_base.ty.to_ty(tcx).sty {
// Slices already point to the array element type.
bcx.inbounds_gep(tr_base.llval, &[llindex])
} else {
let zero = common::C_uint(bcx.ccx, 0u64);
bcx.inbounds_gep(tr_base.llval, &[zero, llindex])
};
(element, align)
};
let ((llprojected, align), llextra) = match projection.elem {
mir::ProjectionElem::Deref => bug!(),
mir::ProjectionElem::Field(ref field, _) => {
@ -359,13 +333,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}
mir::ProjectionElem::Index(ref index) => {
let index = self.trans_operand(bcx, index);
(project_index(self.prepare_index(bcx, index.immediate())), ptr::null_mut())
let llindex = self.prepare_index(bcx, index.immediate());
((tr_base.project_index(bcx, llindex), align), ptr::null_mut())
}
mir::ProjectionElem::ConstantIndex { offset,
from_end: false,
min_length: _ } => {
let lloffset = C_uint(bcx.ccx, offset);
(project_index(lloffset), ptr::null_mut())
((tr_base.project_index(bcx, lloffset), align), ptr::null_mut())
}
mir::ProjectionElem::ConstantIndex { offset,
from_end: true,
@ -373,11 +348,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let lloffset = C_uint(bcx.ccx, offset);
let lllen = tr_base.len(bcx.ccx);
let llindex = bcx.sub(lllen, lloffset);
(project_index(llindex), ptr::null_mut())
((tr_base.project_index(bcx, llindex), align), ptr::null_mut())
}
mir::ProjectionElem::Subslice { from, to } => {
let llindex = C_uint(bcx.ccx, from);
let (llbase, align) = project_index(llindex);
let llbase = tr_base.project_index(bcx, C_uint(bcx.ccx, from));
let base_ty = tr_base.ty.to_ty(bcx.tcx());
match base_ty.sty {

View File

@ -9,9 +9,10 @@
// except according to those terms.
use llvm::ValueRef;
use rustc::ty::Ty;
use rustc::ty::{self, Ty};
use rustc::ty::layout::Layout;
use rustc::mir;
use rustc::mir::tcx::LvalueTy;
use rustc_data_structures::indexed_vec::Idx;
use base;
@ -22,9 +23,10 @@ use type_of;
use type_::Type;
use std::fmt;
use std::ptr;
use super::{MirContext, LocalRef};
use super::lvalue::Alignment;
use super::lvalue::{Alignment, LvalueRef};
/// The representation of a Rust value. The enum variant is in fact
/// uniquely determined by the value's type, but is kept as a
@ -86,6 +88,22 @@ impl<'a, 'tcx> OperandRef<'tcx> {
}
}
pub fn deref(self) -> LvalueRef<'tcx> {
let projected_ty = self.ty.builtin_deref(true, ty::NoPreference)
.unwrap().ty;
let (llptr, llextra) = match self.val {
OperandValue::Immediate(llptr) => (llptr, ptr::null_mut()),
OperandValue::Pair(llptr, llextra) => (llptr, llextra),
OperandValue::Ref(..) => bug!("Deref of by-Ref operand {:?}", self)
};
LvalueRef {
llval: llptr,
llextra: llextra,
ty: LvalueTy::from_ty(projected_ty),
alignment: Alignment::AbiAligned,
}
}
/// If this operand is a Pair, we return an
/// Immediate aggregate with the two values.
pub fn pack_if_pair(mut self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> {
@ -236,7 +254,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}
mir::Operand::Constant(ref constant) => {
let val = self.trans_constant(bcx, constant);
let val = self.trans_constant(&bcx, constant);
let operand = val.to_operand(bcx.ccx);
if let OperandValue::Ref(ptr, align) = operand.val {
// If this is a OperandValue::Ref to an immediate constant, load it.

View File

@ -98,8 +98,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let size = count.as_u64(bcx.tcx().sess.target.uint_type);
let size = C_uint(bcx.ccx, size);
let base = base::get_dataptr(&bcx, dest.llval);
tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot| {
tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot, loop_bb| {
self.store_operand(bcx, llslot, dest.alignment.to_align(), tr_elem);
bcx.br(loop_bb);
})
}
@ -459,7 +460,6 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
};
(bcx, operand)
}
mir::Rvalue::Use(ref operand) => {
let operand = self.trans_operand(&bcx, operand);
(bcx, operand)
@ -662,7 +662,7 @@ pub fn rvalue_creates_operand(rvalue: &mir::Rvalue) -> bool {
mir::Rvalue::UnaryOp(..) |
mir::Rvalue::Discriminant(..) |
mir::Rvalue::Box(..) |
mir::Rvalue::Use(..) =>
mir::Rvalue::Use(..) => // (*)
true,
mir::Rvalue::Repeat(..) |
mir::Rvalue::Aggregate(..) =>

View File

@ -10,9 +10,11 @@
use abi::Abi;
use common::*;
use glue;
use rustc::hir::def_id::DefId;
use rustc::infer::TransNormalize;
use rustc::middle::lang_items::DropInPlaceFnLangItem;
use rustc::traits::{self, SelectionContext, Reveal};
use rustc::ty::adjustment::CustomCoerceUnsized;
use rustc::ty::fold::{TypeFolder, TypeFoldable};
@ -242,8 +244,19 @@ pub fn resolve<'a, 'tcx>(
ty::InstanceDef::Intrinsic(def_id)
}
_ => {
debug!(" => free item");
ty::InstanceDef::Item(def_id)
if Some(def_id) == scx.tcx().lang_items.drop_in_place_fn() {
let ty = substs.type_at(0);
if glue::needs_drop_glue(scx, ty) {
debug!(" => nontrivial drop glue");
ty::InstanceDef::DropGlue(def_id, Some(ty))
} else {
debug!(" => trivial drop glue");
ty::InstanceDef::DropGlue(def_id, None)
}
} else {
debug!(" => free item");
ty::InstanceDef::Item(def_id)
}
}
};
Instance { def, substs }
@ -253,6 +266,16 @@ pub fn resolve<'a, 'tcx>(
result
}
pub fn resolve_drop_in_place<'a, 'tcx>(
scx: &SharedCrateContext<'a, 'tcx>,
ty: Ty<'tcx>)
-> ty::Instance<'tcx>
{
let def_id = scx.tcx().require_lang_item(DropInPlaceFnLangItem);
let substs = scx.tcx().intern_substs(&[Kind::from(ty)]);
resolve(scx, def_id, substs)
}
pub fn custom_coerce_unsize_info<'scx, 'tcx>(scx: &SharedCrateContext<'scx, 'tcx>,
source_ty: Ty<'tcx>,
target_ty: Ty<'tcx>)

View File

@ -194,7 +194,6 @@ impl<'tcx> CodegenUnit<'tcx> {
TransItem::Static(node_id) => {
exported_symbols.contains(&node_id)
}
TransItem::DropGlue(..) => false,
};
exported.hash(&mut state);
}
@ -245,7 +244,6 @@ impl<'tcx> CodegenUnit<'tcx> {
tcx.hir.as_local_node_id(instance.def_id())
}
TransItem::Static(node_id) => Some(node_id),
TransItem::DropGlue(_) => None,
}
}
}
@ -341,7 +339,6 @@ fn place_root_translation_items<'a, 'tcx, I>(scx: &SharedCrateContext<'a, 'tcx>,
match trans_item {
TransItem::Fn(..) |
TransItem::Static(..) => llvm::ExternalLinkage,
TransItem::DropGlue(..) => unreachable!(),
}
}
};
@ -461,6 +458,7 @@ fn characteristic_def_id_of_trans_item<'a, 'tcx>(scx: &SharedCrateContext<'a, 't
ty::InstanceDef::FnPtrShim(..) |
ty::InstanceDef::ClosureOnceShim { .. } |
ty::InstanceDef::Intrinsic(..) |
ty::InstanceDef::DropGlue(..) |
ty::InstanceDef::Virtual(..) => return None
};
@ -485,7 +483,6 @@ fn characteristic_def_id_of_trans_item<'a, 'tcx>(scx: &SharedCrateContext<'a, 't
Some(def_id)
}
TransItem::DropGlue(dg) => characteristic_def_id_of_type(dg.ty()),
TransItem::Static(node_id) => Some(tcx.hir.local_def_id(node_id)),
}
}

View File

@ -100,7 +100,6 @@ impl<'tcx> SymbolMap<'tcx> {
tcx.hir.as_local_node_id(def.def_id())
}
TransItem::Static(node_id) => Some(node_id),
TransItem::DropGlue(_) => None,
}.map(|node_id| {
tcx.hir.span(node_id)
})

View File

@ -20,28 +20,23 @@ use consts;
use context::{CrateContext, SharedCrateContext};
use common;
use declare;
use glue::DropGlueKind;
use llvm;
use monomorphize::Instance;
use rustc::dep_graph::DepNode;
use rustc::hir;
use rustc::hir::def_id::DefId;
use rustc::hir::map::definitions::DefPathData;
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
use rustc::ty::subst::Substs;
use rustc_const_eval::fatal_const_eval_err;
use syntax::ast::{self, NodeId};
use syntax::attr;
use type_of;
use glue;
use abi::{Abi, FnType};
use back::symbol_names;
use std::fmt::Write;
use std::iter;
#[derive(PartialEq, Eq, Clone, Copy, Debug, Hash)]
pub enum TransItem<'tcx> {
DropGlue(DropGlueKind<'tcx>),
Fn(Instance<'tcx>),
Static(NodeId)
}
@ -100,9 +95,6 @@ impl<'a, 'tcx> TransItem<'tcx> {
base::trans_instance(&ccx, instance);
}
TransItem::DropGlue(dg) => {
glue::implement_drop_glue(&ccx, dg);
}
}
debug!("END IMPLEMENTING '{} ({})' in cgu {}",
@ -131,9 +123,6 @@ impl<'a, 'tcx> TransItem<'tcx> {
TransItem::Fn(instance) => {
TransItem::predefine_fn(ccx, instance, linkage, &symbol_name);
}
TransItem::DropGlue(dg) => {
TransItem::predefine_drop_glue(ccx, dg, linkage, &symbol_name);
}
}
debug!("END PREDEFINING '{} ({})' in cgu {}",
@ -180,52 +169,14 @@ impl<'a, 'tcx> TransItem<'tcx> {
}
debug!("predefine_fn: mono_ty = {:?} instance = {:?}", mono_ty, instance);
match ccx.tcx().def_key(instance.def_id()).disambiguated_data.data {
DefPathData::StructCtor |
DefPathData::EnumVariant(..) |
DefPathData::ClosureExpr => {
attributes::inline(lldecl, attributes::InlineAttr::Hint);
}
_ => {}
if common::is_inline_instance(ccx.tcx(), &instance) {
attributes::inline(lldecl, attributes::InlineAttr::Hint);
}
attributes::from_fn_attrs(ccx, &attrs, lldecl);
ccx.instances().borrow_mut().insert(instance, lldecl);
}
fn predefine_drop_glue(ccx: &CrateContext<'a, 'tcx>,
dg: glue::DropGlueKind<'tcx>,
linkage: llvm::Linkage,
symbol_name: &str) {
let tcx = ccx.tcx();
assert_eq!(dg.ty(), glue::get_drop_glue_type(ccx.shared(), dg.ty()));
let t = dg.ty();
let sig = tcx.mk_fn_sig(
iter::once(tcx.mk_mut_ptr(t)),
tcx.mk_nil(),
false,
hir::Unsafety::Normal,
Abi::Rust
);
debug!("predefine_drop_glue: sig={}", sig);
let fn_ty = FnType::new(ccx, sig, &[]);
let llfnty = fn_ty.llvm_type(ccx);
assert!(declare::get_defined_value(ccx, symbol_name).is_none());
let llfn = declare::declare_cfn(ccx, symbol_name, llfnty);
unsafe { llvm::LLVMRustSetLinkage(llfn, linkage) };
if linkage == llvm::Linkage::LinkOnceODRLinkage ||
linkage == llvm::Linkage::WeakODRLinkage {
llvm::SetUniqueComdat(ccx.llmod(), llfn);
}
attributes::set_frame_pointer_elimination(ccx, llfn);
ccx.drop_glues().borrow_mut().insert(dg, (llfn, fn_ty));
}
pub fn compute_symbol_name(&self,
scx: &SharedCrateContext<'a, 'tcx>) -> String {
match *self {
@ -234,13 +185,6 @@ impl<'a, 'tcx> TransItem<'tcx> {
let def_id = scx.tcx().hir.local_def_id(node_id);
symbol_names::symbol_name(Instance::mono(scx.tcx(), def_id), scx)
}
TransItem::DropGlue(dg) => {
let prefix = match dg {
DropGlueKind::Ty(_) => "drop",
DropGlueKind::TyContents(_) => "drop_contents",
};
symbol_names::exported_name_from_type_and_prefix(scx, dg.ty(), prefix)
}
}
}
@ -257,7 +201,6 @@ impl<'a, 'tcx> TransItem<'tcx> {
InstantiationMode::GloballyShared
}
}
TransItem::DropGlue(..) => InstantiationMode::LocalCopy,
TransItem::Static(..) => InstantiationMode::GloballyShared,
}
}
@ -267,7 +210,6 @@ impl<'a, 'tcx> TransItem<'tcx> {
TransItem::Fn(ref instance) => {
instance.substs.types().next().is_some()
}
TransItem::DropGlue(..) |
TransItem::Static(..) => false,
}
}
@ -276,7 +218,6 @@ impl<'a, 'tcx> TransItem<'tcx> {
let def_id = match *self {
TransItem::Fn(ref instance) => instance.def_id(),
TransItem::Static(node_id) => tcx.hir.local_def_id(node_id),
TransItem::DropGlue(..) => return None,
};
let attributes = tcx.get_attrs(def_id);
@ -300,16 +241,6 @@ impl<'a, 'tcx> TransItem<'tcx> {
let hir_map = &tcx.hir;
return match *self {
TransItem::DropGlue(dg) => {
let mut s = String::with_capacity(32);
match dg {
DropGlueKind::Ty(_) => s.push_str("drop-glue "),
DropGlueKind::TyContents(_) => s.push_str("drop-glue-contents "),
};
let printer = DefPathBasedNames::new(tcx, false, false);
printer.push_type_name(dg.ty(), &mut s);
s
}
TransItem::Fn(instance) => {
to_string_internal(tcx, "fn ", instance)
},
@ -334,13 +265,6 @@ impl<'a, 'tcx> TransItem<'tcx> {
pub fn to_raw_string(&self) -> String {
match *self {
TransItem::DropGlue(dg) => {
let prefix = match dg {
DropGlueKind::Ty(_) => "Ty",
DropGlueKind::TyContents(_) => "TyContents",
};
format!("DropGlue({}: {})", prefix, dg.ty() as *const _ as usize)
}
TransItem::Fn(instance) => {
format!("Fn({:?}, {})",
instance.def,

View File

@ -10,7 +10,7 @@
use llvm;
use builder::Builder;
use llvm::ValueRef;
use llvm::{BasicBlockRef, ValueRef};
use common::*;
use rustc::ty::Ty;
@ -20,7 +20,7 @@ pub fn slice_for_each<'a, 'tcx, F>(
unit_ty: Ty<'tcx>,
len: ValueRef,
f: F
) -> Builder<'a, 'tcx> where F: FnOnce(&Builder<'a, 'tcx>, ValueRef) {
) -> Builder<'a, 'tcx> where F: FnOnce(&Builder<'a, 'tcx>, ValueRef, BasicBlockRef) {
// Special-case vectors with elements of size 0 so they don't go out of bounds (#9890)
let zst = type_is_zero_size(bcx.ccx, unit_ty);
let add = |bcx: &Builder, a, b| if zst {
@ -46,9 +46,8 @@ pub fn slice_for_each<'a, 'tcx, F>(
let keep_going = header_bcx.icmp(llvm::IntNE, current, end);
header_bcx.cond_br(keep_going, body_bcx.llbb(), next_bcx.llbb());
f(&body_bcx, if zst { data_ptr } else { current });
let next = add(&body_bcx, current, C_uint(bcx.ccx, 1usize));
f(&body_bcx, if zst { data_ptr } else { current }, header_bcx.llbb());
header_bcx.add_incoming_to_phi(current, next, body_bcx.llbb());
body_bcx.br(header_bcx.llbb());
next_bcx
}

View File

@ -30,5 +30,3 @@ fn main()
// This should not introduce a codegen item
let _ = cgu_generic_function::exported_but_not_generic(3);
}
//~ TRANS_ITEM drop-glue i8

View File

@ -11,8 +11,7 @@
// ignore-tidy-linelength
// compile-flags:-Zprint-trans-items=eager
//~ TRANS_ITEM drop-glue drop_in_place_intrinsic::StructWithDtor[0]
//~ TRANS_ITEM drop-glue-contents drop_in_place_intrinsic::StructWithDtor[0]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<drop_in_place_intrinsic::StructWithDtor[0]> @@ drop_in_place_intrinsic.cgu-0[Internal]
struct StructWithDtor(u32);
impl Drop for StructWithDtor {
@ -23,7 +22,7 @@ impl Drop for StructWithDtor {
//~ TRANS_ITEM fn drop_in_place_intrinsic::main[0]
fn main() {
//~ TRANS_ITEM drop-glue [drop_in_place_intrinsic::StructWithDtor[0]; 2]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<[drop_in_place_intrinsic::StructWithDtor[0]; 2]> @@ drop_in_place_intrinsic.cgu-0[Internal]
let x = [StructWithDtor(0), StructWithDtor(1)];
drop_slice_in_place(&x);
@ -35,7 +34,7 @@ fn drop_slice_in_place(x: &[StructWithDtor]) {
// This is the interesting thing in this test case: Normally we would
// not have drop-glue for the unsized [StructWithDtor]. This has to be
// generated though when the drop_in_place() intrinsic is used.
//~ TRANS_ITEM drop-glue [drop_in_place_intrinsic::StructWithDtor[0]]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<[drop_in_place_intrinsic::StructWithDtor[0]]> @@ drop_in_place_intrinsic.cgu-0[Internal]
::std::ptr::drop_in_place(x as *const _ as *mut [StructWithDtor]);
}
}

View File

@ -44,5 +44,3 @@ fn main() {
//~ TRANS_ITEM fn function_as_argument::function[0]<f32, i64>
take_fn_pointer(function, 0f32, 0i64);
}
//~ TRANS_ITEM drop-glue i8

View File

@ -45,8 +45,7 @@ enum EnumNoDrop<T1, T2> {
struct NonGenericNoDrop(i32);
struct NonGenericWithDrop(i32);
//~ TRANS_ITEM drop-glue generic_drop_glue::NonGenericWithDrop[0]
//~ TRANS_ITEM drop-glue-contents generic_drop_glue::NonGenericWithDrop[0]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<generic_drop_glue::NonGenericWithDrop[0]> @@ generic_drop_glue.cgu-0[Internal]
impl Drop for NonGenericWithDrop {
//~ TRANS_ITEM fn generic_drop_glue::{{impl}}[2]::drop[0]
@ -55,13 +54,11 @@ impl Drop for NonGenericWithDrop {
//~ TRANS_ITEM fn generic_drop_glue::main[0]
fn main() {
//~ TRANS_ITEM drop-glue generic_drop_glue::StructWithDrop[0]<i8, char>
//~ TRANS_ITEM drop-glue-contents generic_drop_glue::StructWithDrop[0]<i8, char>
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<generic_drop_glue::StructWithDrop[0]<i8, char>> @@ generic_drop_glue.cgu-0[Internal]
//~ TRANS_ITEM fn generic_drop_glue::{{impl}}[0]::drop[0]<i8, char>
let _ = StructWithDrop { x: 0i8, y: 'a' }.x;
//~ TRANS_ITEM drop-glue generic_drop_glue::StructWithDrop[0]<&str, generic_drop_glue::NonGenericNoDrop[0]>
//~ TRANS_ITEM drop-glue-contents generic_drop_glue::StructWithDrop[0]<&str, generic_drop_glue::NonGenericNoDrop[0]>
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<generic_drop_glue::StructWithDrop[0]<&str, generic_drop_glue::NonGenericNoDrop[0]>> @@ generic_drop_glue.cgu-0[Internal]
//~ TRANS_ITEM fn generic_drop_glue::{{impl}}[0]::drop[0]<&str, generic_drop_glue::NonGenericNoDrop[0]>
let _ = StructWithDrop { x: "&str", y: NonGenericNoDrop(0) }.y;
@ -70,19 +67,17 @@ fn main() {
// This is supposed to generate drop-glue because it contains a field that
// needs to be dropped.
//~ TRANS_ITEM drop-glue generic_drop_glue::StructNoDrop[0]<generic_drop_glue::NonGenericWithDrop[0], f64>
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<generic_drop_glue::StructNoDrop[0]<generic_drop_glue::NonGenericWithDrop[0], f64>> @@ generic_drop_glue.cgu-0[Internal]
let _ = StructNoDrop { x: NonGenericWithDrop(0), y: 0f64 }.y;
//~ TRANS_ITEM drop-glue generic_drop_glue::EnumWithDrop[0]<i32, i64>
//~ TRANS_ITEM drop-glue-contents generic_drop_glue::EnumWithDrop[0]<i32, i64>
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<generic_drop_glue::EnumWithDrop[0]<i32, i64>> @@ generic_drop_glue.cgu-0[Internal]
//~ TRANS_ITEM fn generic_drop_glue::{{impl}}[1]::drop[0]<i32, i64>
let _ = match EnumWithDrop::A::<i32, i64>(0) {
EnumWithDrop::A(x) => x,
EnumWithDrop::B(x) => x as i32
};
//~ TRANS_ITEM drop-glue generic_drop_glue::EnumWithDrop[0]<f64, f32>
//~ TRANS_ITEM drop-glue-contents generic_drop_glue::EnumWithDrop[0]<f64, f32>
//~TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<generic_drop_glue::EnumWithDrop[0]<f64, f32>> @@ generic_drop_glue.cgu-0[Internal]
//~ TRANS_ITEM fn generic_drop_glue::{{impl}}[1]::drop[0]<f64, f32>
let _ = match EnumWithDrop::B::<f64, f32>(1.0) {
EnumWithDrop::A(x) => x,
@ -99,5 +94,3 @@ fn main() {
EnumNoDrop::B(x) => x as f64
};
}
//~ TRANS_ITEM drop-glue i8

View File

@ -31,12 +31,13 @@ impl<T> Trait for Struct<T> {
fn main() {
let s1 = Struct { _a: 0u32 };
//~ TRANS_ITEM drop-glue i8
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<instantiation_through_vtable::Struct[0]<u32>> @@ instantiation_through_vtable.cgu-0[Internal]
//~ TRANS_ITEM fn instantiation_through_vtable::{{impl}}[0]::foo[0]<u32>
//~ TRANS_ITEM fn instantiation_through_vtable::{{impl}}[0]::bar[0]<u32>
let _ = &s1 as &Trait;
let s1 = Struct { _a: 0u64 };
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<instantiation_through_vtable::Struct[0]<u64>> @@ instantiation_through_vtable.cgu-0[Internal]
//~ TRANS_ITEM fn instantiation_through_vtable::{{impl}}[0]::foo[0]<u64>
//~ TRANS_ITEM fn instantiation_through_vtable::{{impl}}[0]::bar[0]<u64>
let _ = &s1 as &Trait;

View File

@ -40,5 +40,3 @@ fn main() {
//~ TRANS_ITEM fn items_within_generic_items::generic_fn[0]<i8>
let _ = generic_fn(0i8);
}
//~ TRANS_ITEM drop-glue i8

View File

@ -13,8 +13,7 @@
#![deny(dead_code)]
//~ TRANS_ITEM drop-glue non_generic_drop_glue::StructWithDrop[0]
//~ TRANS_ITEM drop-glue-contents non_generic_drop_glue::StructWithDrop[0]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<non_generic_drop_glue::StructWithDrop[0]> @@ non_generic_drop_glue.cgu-0[Internal]
struct StructWithDrop {
x: i32
}
@ -28,8 +27,7 @@ struct StructNoDrop {
x: i32
}
//~ TRANS_ITEM drop-glue non_generic_drop_glue::EnumWithDrop[0]
//~ TRANS_ITEM drop-glue-contents non_generic_drop_glue::EnumWithDrop[0]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<non_generic_drop_glue::EnumWithDrop[0]> @@ non_generic_drop_glue.cgu-0[Internal]
enum EnumWithDrop {
A(i32)
}
@ -54,5 +52,3 @@ fn main() {
EnumNoDrop::A(x) => x
};
}
//~ TRANS_ITEM drop-glue i8

View File

@ -77,5 +77,3 @@ fn main() {
let x = Struct { _x: 0 };
x.bar();
}
//~ TRANS_ITEM drop-glue i8

View File

@ -68,5 +68,3 @@ impl Deref for Equatable {
&self.0
}
}
//~ TRANS_ITEM drop-glue i8

View File

@ -20,4 +20,3 @@ pub fn foo<T>() { }
fn main() { }
//~ TRANS_ITEM fn static_init::main[0]
//~ TRANS_ITEM drop-glue i8

View File

@ -60,5 +60,3 @@ fn main() {
//~ TRANS_ITEM static statics_and_consts::foo[0]::STATIC2[2]
//~ TRANS_ITEM fn statics_and_consts::main[0]
//~ TRANS_ITEM drop-glue i8

View File

@ -78,5 +78,3 @@ fn main() {
//~ TRANS_ITEM fn trait_implementations::{{impl}}[3]::bar[0]<&str, &str>
0f32.bar("&str", "&str");
}
//~ TRANS_ITEM drop-glue i8

View File

@ -64,5 +64,3 @@ fn main() {
//~ TRANS_ITEM fn core::ops[0]::FnMut[0]::call_mut[0]<fn(u32) -> u32, (u32)>
take_foo_mut(Trait::foo, 'c');
}
//~ TRANS_ITEM drop-glue i8

View File

@ -66,5 +66,3 @@ fn main() {
//~ TRANS_ITEM fn trait_method_default_impl::SomeGenericTrait[0]::bar[0]<u32, i16, ()>
0u32.bar(0i16, ());
}
//~ TRANS_ITEM drop-glue i8

View File

@ -13,12 +13,11 @@
#![deny(dead_code)]
//~ TRANS_ITEM drop-glue transitive_drop_glue::Root[0]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<transitive_drop_glue::Root[0]> @@ transitive_drop_glue.cgu-0[Internal]
struct Root(Intermediate);
//~ TRANS_ITEM drop-glue transitive_drop_glue::Intermediate[0]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<transitive_drop_glue::Intermediate[0]> @@ transitive_drop_glue.cgu-0[Internal]
struct Intermediate(Leaf);
//~ TRANS_ITEM drop-glue transitive_drop_glue::Leaf[0]
//~ TRANS_ITEM drop-glue-contents transitive_drop_glue::Leaf[0]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<transitive_drop_glue::Leaf[0]> @@ transitive_drop_glue.cgu-0[Internal]
struct Leaf;
impl Drop for Leaf {
@ -39,17 +38,15 @@ fn main() {
let _ = Root(Intermediate(Leaf));
//~ TRANS_ITEM drop-glue transitive_drop_glue::RootGen[0]<u32>
//~ TRANS_ITEM drop-glue transitive_drop_glue::IntermediateGen[0]<u32>
//~ TRANS_ITEM drop-glue transitive_drop_glue::LeafGen[0]<u32>
//~ TRANS_ITEM drop-glue-contents transitive_drop_glue::LeafGen[0]<u32>
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<transitive_drop_glue::RootGen[0]<u32>> @@ transitive_drop_glue.cgu-0[Internal]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<transitive_drop_glue::IntermediateGen[0]<u32>> @@ transitive_drop_glue.cgu-0[Internal]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<transitive_drop_glue::LeafGen[0]<u32>> @@ transitive_drop_glue.cgu-0[Internal]
//~ TRANS_ITEM fn transitive_drop_glue::{{impl}}[1]::drop[0]<u32>
let _ = RootGen(IntermediateGen(LeafGen(0u32)));
//~ TRANS_ITEM drop-glue transitive_drop_glue::RootGen[0]<i16>
//~ TRANS_ITEM drop-glue transitive_drop_glue::IntermediateGen[0]<i16>
//~ TRANS_ITEM drop-glue transitive_drop_glue::LeafGen[0]<i16>
//~ TRANS_ITEM drop-glue-contents transitive_drop_glue::LeafGen[0]<i16>
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<transitive_drop_glue::RootGen[0]<i16>> @@ transitive_drop_glue.cgu-0[Internal]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<transitive_drop_glue::IntermediateGen[0]<i16>> @@ transitive_drop_glue.cgu-0[Internal]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<transitive_drop_glue::LeafGen[0]<i16>> @@ transitive_drop_glue.cgu-0[Internal]
//~ TRANS_ITEM fn transitive_drop_glue::{{impl}}[1]::drop[0]<i16>
let _ = RootGen(IntermediateGen(LeafGen(0i16)));
}

View File

@ -13,8 +13,7 @@
#![deny(dead_code)]
//~ TRANS_ITEM drop-glue tuple_drop_glue::Dropped[0]
//~ TRANS_ITEM drop-glue-contents tuple_drop_glue::Dropped[0]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<tuple_drop_glue::Dropped[0]> @@ tuple_drop_glue.cgu-0[Internal]
struct Dropped;
impl Drop for Dropped {
@ -24,10 +23,10 @@ impl Drop for Dropped {
//~ TRANS_ITEM fn tuple_drop_glue::main[0]
fn main() {
//~ TRANS_ITEM drop-glue (u32, tuple_drop_glue::Dropped[0])
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<(u32, tuple_drop_glue::Dropped[0])> @@ tuple_drop_glue.cgu-0[Internal]
let x = (0u32, Dropped);
//~ TRANS_ITEM drop-glue (i16, (tuple_drop_glue::Dropped[0], bool))
//~ TRANS_ITEM drop-glue (tuple_drop_glue::Dropped[0], bool)
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<(i16, (tuple_drop_glue::Dropped[0], bool))> @@ tuple_drop_glue.cgu-0[Internal]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<(tuple_drop_glue::Dropped[0], bool)> @@ tuple_drop_glue.cgu-0[Internal]
let x = (0i16, (Dropped, true));
}

View File

@ -57,11 +57,13 @@ fn main()
{
// simple case
let bool_sized = &true;
//~ TRANS_ITEM drop-glue i8
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<bool> @@ unsizing.cgu-0[Internal]
//~ TRANS_ITEM fn unsizing::{{impl}}[0]::foo[0]
let _bool_unsized = bool_sized as &Trait;
let char_sized = &true;
let char_sized = &'a';
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<char> @@ unsizing.cgu-0[Internal]
//~ TRANS_ITEM fn unsizing::{{impl}}[1]::foo[0]
let _char_unsized = char_sized as &Trait;
@ -71,11 +73,13 @@ fn main()
_b: 2,
_c: 3.0f64
};
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<f64> @@ unsizing.cgu-0[Internal]
//~ TRANS_ITEM fn unsizing::{{impl}}[2]::foo[0]
let _struct_unsized = struct_sized as &Struct<Trait>;
// custom coercion
let wrapper_sized = Wrapper(&0u32);
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<u32> @@ unsizing.cgu-0[Internal]
//~ TRANS_ITEM fn unsizing::{{impl}}[3]::foo[0]
let _wrapper_sized = wrapper_sized as Wrapper<Trait>;
}

View File

@ -86,4 +86,3 @@ impl NonGeneric {
// Only the non-generic methods should be instantiated:
//~ TRANS_ITEM fn unused_traits_and_generics::{{impl}}[3]::foo[0]
//~ TRANS_ITEM drop-glue i8

View File

@ -20,15 +20,14 @@
// aux-build:cgu_extern_drop_glue.rs
extern crate cgu_extern_drop_glue;
//~ TRANS_ITEM drop-glue cgu_extern_drop_glue::Struct[0] @@ extern_drop_glue[Internal] extern_drop_glue-mod1[Internal]
//~ TRANS_ITEM drop-glue-contents cgu_extern_drop_glue::Struct[0] @@ extern_drop_glue[Internal] extern_drop_glue-mod1[Internal]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<cgu_extern_drop_glue::Struct[0]> @@ extern_drop_glue[Internal] extern_drop_glue-mod1[Internal]
struct LocalStruct(cgu_extern_drop_glue::Struct);
//~ TRANS_ITEM fn extern_drop_glue::user[0] @@ extern_drop_glue[External]
fn user()
{
//~ TRANS_ITEM drop-glue extern_drop_glue::LocalStruct[0] @@ extern_drop_glue[Internal]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<extern_drop_glue::LocalStruct[0]> @@ extern_drop_glue[Internal]
let _ = LocalStruct(cgu_extern_drop_glue::Struct(0));
}
@ -40,7 +39,7 @@ mod mod1 {
//~ TRANS_ITEM fn extern_drop_glue::mod1[0]::user[0] @@ extern_drop_glue-mod1[External]
fn user()
{
//~ TRANS_ITEM drop-glue extern_drop_glue::mod1[0]::LocalStruct[0] @@ extern_drop_glue-mod1[Internal]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<extern_drop_glue::mod1[0]::LocalStruct[0]> @@ extern_drop_glue-mod1[Internal]
let _ = LocalStruct(cgu_extern_drop_glue::Struct(0));
}
}

View File

@ -60,5 +60,3 @@ mod mod3 {
// once for the current crate
//~ TRANS_ITEM fn cgu_generic_function::foo[0]<&str> @@ cgu_generic_function.volatile[External]
//~ TRANS_ITEM fn cgu_generic_function::bar[0]<&str> @@ cgu_generic_function.volatile[External]
//~ TRANS_ITEM drop-glue i8

View File

@ -16,8 +16,7 @@
#![allow(dead_code)]
#![crate_type="lib"]
//~ TRANS_ITEM drop-glue local_drop_glue::Struct[0] @@ local_drop_glue[Internal] local_drop_glue-mod1[Internal]
//~ TRANS_ITEM drop-glue-contents local_drop_glue::Struct[0] @@ local_drop_glue[Internal] local_drop_glue-mod1[Internal]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<local_drop_glue::Struct[0]> @@ local_drop_glue[Internal] local_drop_glue-mod1[Internal]
struct Struct {
_a: u32
}
@ -27,7 +26,7 @@ impl Drop for Struct {
fn drop(&mut self) {}
}
//~ TRANS_ITEM drop-glue local_drop_glue::Outer[0] @@ local_drop_glue[Internal]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<local_drop_glue::Outer[0]> @@ local_drop_glue[Internal]
struct Outer {
_a: Struct
}
@ -46,10 +45,10 @@ mod mod1
{
use super::Struct;
//~ TRANS_ITEM drop-glue local_drop_glue::mod1[0]::Struct2[0] @@ local_drop_glue-mod1[Internal]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<local_drop_glue::mod1[0]::Struct2[0]> @@ local_drop_glue-mod1[Internal]
struct Struct2 {
_a: Struct,
//~ TRANS_ITEM drop-glue (u32, local_drop_glue::Struct[0]) @@ local_drop_glue-mod1[Internal]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<(u32, local_drop_glue::Struct[0])> @@ local_drop_glue-mod1[Internal]
_b: (u32, Struct),
}

View File

@ -80,5 +80,3 @@ mod mod2 {
static BAZ: u64 = 0;
}
}
//~ TRANS_ITEM drop-glue i8

View File

@ -46,5 +46,3 @@ mod mod1 {
static BAR: u32 = 0;
}
}
//~ TRANS_ITEM drop-glue i8

View File

@ -69,7 +69,7 @@ mod mod1 {
//~ TRANS_ITEM fn vtable_through_const::main[0] @@ vtable_through_const[External]
fn main() {
//~ TRANS_ITEM drop-glue i8 @@ vtable_through_const[Internal]
//~ TRANS_ITEM fn core::ptr[0]::drop_in_place[0]<u32> @@ vtable_through_const[Internal]
// Since Trait1::do_something() is instantiated via its default implementation,
// it is considered a generic and is instantiated here only because it is