diff --git a/src/librustc/infer/mod.rs b/src/librustc/infer/mod.rs index 4dbee6ffa79..620e6504f41 100644 --- a/src/librustc/infer/mod.rs +++ b/src/librustc/infer/mod.rs @@ -624,6 +624,24 @@ pub fn normalize_associated_type(self, value: &T) -> T value.trans_normalize(&infcx) }) } + + pub fn normalize_associated_type_in_env( + self, value: &T, env: &'a ty::ParameterEnvironment<'tcx> + ) -> T + where T: TransNormalize<'tcx> + { + debug!("normalize_associated_type_in_env(t={:?})", value); + + let value = self.erase_regions(value); + + if !value.has_projection_types() { + return value; + } + + self.infer_ctxt(None, Some(env.clone()), ProjectionMode::Any).enter(|infcx| { + value.trans_normalize(&infcx) + }) + } } impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { diff --git a/src/librustc/middle/cstore.rs b/src/librustc/middle/cstore.rs index 3ede60beb74..d85ea961462 100644 --- a/src/librustc/middle/cstore.rs +++ b/src/librustc/middle/cstore.rs @@ -163,6 +163,7 @@ fn item_type<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> ty::TypeScheme<'tcx>; fn visible_parent_map<'a>(&'a self) -> ::std::cell::RefMut<'a, DefIdMap>; fn item_name(&self, def: DefId) -> ast::Name; + fn opt_item_name(&self, def: DefId) -> Option; fn item_predicates<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> ty::GenericPredicates<'tcx>; fn item_super_predicates<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) @@ -345,6 +346,7 @@ fn visible_parent_map<'a>(&'a self) -> ::std::cell::RefMut<'a, DefIdMap> bug!("visible_parent_map") } fn item_name(&self, def: DefId) -> ast::Name { bug!("item_name") } + fn opt_item_name(&self, def: DefId) -> Option { bug!("opt_item_name") } fn item_predicates<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> ty::GenericPredicates<'tcx> { bug!("item_predicates") } fn item_super_predicates<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) diff --git a/src/librustc/mir/repr.rs b/src/librustc/mir/repr.rs index f9a671435ff..1cd837e4853 100644 --- a/src/librustc/mir/repr.rs +++ b/src/librustc/mir/repr.rs @@ -330,11 +330,19 @@ pub enum TerminatorKind<'tcx> { /// Drop the Lvalue Drop { - value: Lvalue<'tcx>, + location: Lvalue<'tcx>, target: BasicBlock, unwind: Option }, + /// Drop the Lvalue and assign the new value over it + DropAndReplace { + location: Lvalue<'tcx>, + value: Operand<'tcx>, + target: BasicBlock, + unwind: Option, + }, + /// Block ends with a call of a converging function Call { /// The function that’s being called @@ -373,8 +381,14 @@ pub fn successors(&self) -> Cow<[BasicBlock]> { slice::ref_slice(t).into_cow(), Call { destination: None, cleanup: Some(ref c), .. } => slice::ref_slice(c).into_cow(), Call { destination: None, cleanup: None, .. } => (&[]).into_cow(), - Drop { target, unwind: Some(unwind), .. } => vec![target, unwind].into_cow(), - Drop { ref target, .. } => slice::ref_slice(target).into_cow(), + DropAndReplace { target, unwind: Some(unwind), .. } | + Drop { target, unwind: Some(unwind), .. } => { + vec![target, unwind].into_cow() + } + DropAndReplace { ref target, unwind: None, .. } | + Drop { ref target, unwind: None, .. } => { + slice::ref_slice(target).into_cow() + } } } @@ -393,8 +407,12 @@ pub fn successors_mut(&mut self) -> Vec<&mut BasicBlock> { Call { destination: Some((_, ref mut t)), cleanup: None, .. } => vec![t], Call { destination: None, cleanup: Some(ref mut c), .. } => vec![c], Call { destination: None, cleanup: None, .. } => vec![], + DropAndReplace { ref mut target, unwind: Some(ref mut unwind), .. } | Drop { ref mut target, unwind: Some(ref mut unwind), .. } => vec![target, unwind], - Drop { ref mut target, .. } => vec![target] + DropAndReplace { ref mut target, unwind: None, .. } | + Drop { ref mut target, unwind: None, .. } => { + vec![target] + } } } } @@ -461,7 +479,9 @@ pub fn fmt_head(&self, fmt: &mut W) -> fmt::Result { SwitchInt { discr: ref lv, .. } => write!(fmt, "switchInt({:?})", lv), Return => write!(fmt, "return"), Resume => write!(fmt, "resume"), - Drop { ref value, .. } => write!(fmt, "drop({:?})", value), + Drop { ref location, .. } => write!(fmt, "drop({:?})", location), + DropAndReplace { ref location, ref value, .. } => + write!(fmt, "replace({:?} <- {:?})", location, value), Call { ref func, ref args, ref destination, .. } => { if let Some((ref destination, _)) = *destination { write!(fmt, "{:?} = ", destination)?; @@ -506,8 +526,12 @@ pub fn fmt_successor_labels(&self) -> Vec> { Call { destination: Some(_), cleanup: None, .. } => vec!["return".into_cow()], Call { destination: None, cleanup: Some(_), .. } => vec!["unwind".into_cow()], Call { destination: None, cleanup: None, .. } => vec![], + DropAndReplace { unwind: None, .. } | Drop { unwind: None, .. } => vec!["return".into_cow()], - Drop { .. } => vec!["return".into_cow(), "unwind".into_cow()], + DropAndReplace { unwind: Some(_), .. } | + Drop { unwind: Some(_), .. } => { + vec!["return".into_cow(), "unwind".into_cow()] + } } } } @@ -918,7 +942,7 @@ fn fmt_tuple(fmt: &mut Formatter, lvs: &[Operand]) -> fmt::Result { ppaux::parameterized(fmt, substs, variant_def.did, ppaux::Ns::Value, &[], |tcx| { - tcx.lookup_item_type(variant_def.did).generics + Some(tcx.lookup_item_type(variant_def.did).generics) })?; match variant_def.kind() { @@ -1010,8 +1034,9 @@ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { use self::Literal::*; match *self { Item { def_id, substs } => { - ppaux::parameterized(fmt, substs, def_id, ppaux::Ns::Value, &[], - |tcx| tcx.lookup_item_type(def_id).generics) + ppaux::parameterized( + fmt, substs, def_id, ppaux::Ns::Value, &[], + |tcx| Some(tcx.lookup_item_type(def_id).generics)) } Value { ref value } => { write!(fmt, "const ")?; diff --git a/src/librustc/mir/visit.rs b/src/librustc/mir/visit.rs index 88460651352..17a8d040ab4 100644 --- a/src/librustc/mir/visit.rs +++ b/src/librustc/mir/visit.rs @@ -394,10 +394,20 @@ fn super_terminator_kind(&mut self, TerminatorKind::Return => { } - TerminatorKind::Drop { ref $($mutability)* value, + TerminatorKind::Drop { ref $($mutability)* location, target, unwind } => { - self.visit_lvalue(value, LvalueContext::Drop); + self.visit_lvalue(location, LvalueContext::Drop); + self.visit_branch(block, target); + unwind.map(|t| self.visit_branch(block, t)); + } + + TerminatorKind::DropAndReplace { ref $($mutability)* location, + ref $($mutability)* value, + target, + unwind } => { + self.visit_lvalue(location, LvalueContext::Drop); + self.visit_operand(value); self.visit_branch(block, target); unwind.map(|t| self.visit_branch(block, t)); } diff --git a/src/librustc/ty/item_path.rs b/src/librustc/ty/item_path.rs index ee9983038b1..74c05feb6d1 100644 --- a/src/librustc/ty/item_path.rs +++ b/src/librustc/ty/item_path.rs @@ -13,6 +13,7 @@ use hir::def_id::{DefId, CRATE_DEF_INDEX}; use ty::{self, Ty, TyCtxt}; use syntax::ast; +use syntax::parse::token; use std::cell::Cell; @@ -138,7 +139,8 @@ pub fn try_push_visible_item_path(self, buffer: &mut T, external_def_id: DefI } } - cur_path.push(self.sess.cstore.item_name(cur_def)); + cur_path.push(self.sess.cstore.opt_item_name(cur_def).unwrap_or_else(|| + token::intern(""))); match visible_parent_map.get(&cur_def) { Some(&def) => cur_def = def, None => return false, diff --git a/src/librustc/ty/mod.rs b/src/librustc/ty/mod.rs index 4069aa6b955..3ab7d90bf8c 100644 --- a/src/librustc/ty/mod.rs +++ b/src/librustc/ty/mod.rs @@ -2503,6 +2503,18 @@ pub fn lookup_item_type(self, did: DefId) -> TypeScheme<'gcx> { || self.sess.cstore.item_type(self.global_tcx(), did)) } + pub fn opt_lookup_item_type(self, did: DefId) -> Option> { + if let Some(scheme) = self.tcache.borrow_mut().get(&did) { + return Some(scheme.clone()); + } + + if did.krate == LOCAL_CRATE { + None + } else { + Some(self.sess.cstore.item_type(self.global_tcx(), did)) + } + } + /// Given the did of a trait, returns its canonical trait ref. pub fn lookup_trait_def(self, did: DefId) -> &'gcx TraitDef<'gcx> { lookup_locally_or_in_crate_store( diff --git a/src/librustc/util/ppaux.rs b/src/librustc/util/ppaux.rs index a851e8354a9..e600e7b72da 100644 --- a/src/librustc/util/ppaux.rs +++ b/src/librustc/util/ppaux.rs @@ -69,15 +69,12 @@ pub enum Ns { Value } -fn number_of_supplied_defaults<'a, 'gcx, 'tcx, GG>(tcx: TyCtxt<'a, 'gcx, 'tcx>, - substs: &subst::Substs, - space: subst::ParamSpace, - get_generics: GG) - -> usize - where GG: FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> ty::Generics<'tcx> +fn number_of_supplied_defaults<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, + substs: &subst::Substs, + space: subst::ParamSpace, + generics: ty::Generics<'tcx>) + -> usize { - let generics = get_generics(tcx); - let has_self = substs.self_ty().is_some(); let ty_params = generics.types.get_slice(space); let tps = substs.types.get_slice(space); @@ -115,7 +112,8 @@ pub fn parameterized(f: &mut fmt::Formatter, projections: &[ty::ProjectionPredicate], get_generics: GG) -> fmt::Result - where GG: for<'a, 'gcx, 'tcx> FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> ty::Generics<'tcx> + where GG: for<'a, 'gcx, 'tcx> FnOnce(TyCtxt<'a, 'gcx, 'tcx>) + -> Option> { if let (Ns::Value, Some(self_ty)) = (ns, substs.self_ty()) { write!(f, "<{} as ", self_ty)?; @@ -176,13 +174,12 @@ pub fn parameterized(f: &mut fmt::Formatter, let num_supplied_defaults = if verbose { 0 } else { - // It is important to execute this conditionally, only if -Z - // verbose is false. Otherwise, debug logs can sometimes cause - // ICEs trying to fetch the generics early in the pipeline. This - // is kind of a hacky workaround in that -Z verbose is required to - // avoid those ICEs. ty::tls::with(|tcx| { - number_of_supplied_defaults(tcx, substs, subst::TypeSpace, get_generics) + if let Some(generics) = get_generics(tcx) { + number_of_supplied_defaults(tcx, substs, subst::TypeSpace, generics) + } else { + 0 + } }) }; @@ -312,7 +309,7 @@ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { trait_ref.def_id, Ns::Type, projection_bounds, - |tcx| tcx.lookup_trait_def(trait_ref.def_id).generics.clone()) + |tcx| Some(tcx.lookup_trait_def(trait_ref.def_id).generics.clone())) } } @@ -814,7 +811,7 @@ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { impl<'tcx> fmt::Display for ty::TraitRef<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { parameterized(f, self.substs, self.def_id, Ns::Type, &[], - |tcx| tcx.lookup_trait_def(self.def_id).generics.clone()) + |tcx| Some(tcx.lookup_trait_def(self.def_id).generics.clone())) } } @@ -866,8 +863,9 @@ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { } write!(f, "{} {{", bare_fn.sig.0)?; - parameterized(f, substs, def_id, Ns::Value, &[], - |tcx| tcx.lookup_item_type(def_id).generics)?; + parameterized( + f, substs, def_id, Ns::Value, &[], + |tcx| tcx.opt_lookup_item_type(def_id).map(|t| t.generics))?; write!(f, "}}") } TyFnPtr(ref bare_fn) => { @@ -890,8 +888,12 @@ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { !tcx.tcache.borrow().contains_key(&def.did) { write!(f, "{}<..>", tcx.item_path_str(def.did)) } else { - parameterized(f, substs, def.did, Ns::Type, &[], - |tcx| tcx.lookup_item_type(def.did).generics) + parameterized( + f, substs, def.did, Ns::Type, &[], + |tcx| { + tcx.opt_lookup_item_type(def.did). + map(|t| t.generics) + }) } }) } diff --git a/src/librustc_borrowck/borrowck/mir/dataflow/mod.rs b/src/librustc_borrowck/borrowck/mir/dataflow/mod.rs index b46b6c368a0..293d2863733 100644 --- a/src/librustc_borrowck/borrowck/mir/dataflow/mod.rs +++ b/src/librustc_borrowck/borrowck/mir/dataflow/mod.rs @@ -200,6 +200,12 @@ pub fn mir(&self) -> &'a Mir<'tcx> { self.mir } pub struct DataflowResults(DataflowState) where O: BitDenotation; +impl DataflowResults { + pub fn sets(&self) -> &AllSets { + &self.0.sets + } +} + // FIXME: This type shouldn't be public, but the graphviz::MirWithFlowState trait // references it in a method signature. Look into using `pub(crate)` to address this. pub struct DataflowState @@ -444,10 +450,17 @@ fn propagate_bits_into_graph_successors_of( repr::TerminatorKind::Return | repr::TerminatorKind::Resume => {} repr::TerminatorKind::Goto { ref target } | - repr::TerminatorKind::Drop { ref target, value: _, unwind: None } => { + repr::TerminatorKind::Drop { ref target, location: _, unwind: None } | + + repr::TerminatorKind::DropAndReplace { + ref target, value: _, location: _, unwind: None + } => { self.propagate_bits_into_entry_set_for(in_out, changed, target); } - repr::TerminatorKind::Drop { ref target, value: _, unwind: Some(ref unwind) } => { + repr::TerminatorKind::Drop { ref target, location: _, unwind: Some(ref unwind) } | + repr::TerminatorKind::DropAndReplace { + ref target, value: _, location: _, unwind: Some(ref unwind) + } => { self.propagate_bits_into_entry_set_for(in_out, changed, target); self.propagate_bits_into_entry_set_for(in_out, changed, unwind); } diff --git a/src/librustc_borrowck/borrowck/mir/elaborate_drops.rs b/src/librustc_borrowck/borrowck/mir/elaborate_drops.rs new file mode 100644 index 00000000000..e783420fa06 --- /dev/null +++ b/src/librustc_borrowck/borrowck/mir/elaborate_drops.rs @@ -0,0 +1,1048 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use indexed_set::IdxSetBuf; +use super::gather_moves::{MoveData, MovePathIndex, MovePathContent, Location}; +use super::dataflow::{MaybeInitializedLvals, MaybeUninitializedLvals}; +use super::dataflow::{DataflowResults}; +use super::{drop_flag_effects_for_location, on_all_children_bits}; +use super::{DropFlagState, MoveDataParamEnv}; +use super::patch::MirPatch; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::subst::{Subst, Substs, VecPerParamSpace}; +use rustc::mir::repr::*; +use rustc::mir::transform::{Pass, MirPass, MirSource}; +use rustc::middle::const_val::ConstVal; +use rustc::middle::lang_items; +use rustc::util::nodemap::FnvHashMap; +use rustc_mir::pretty; +use syntax::codemap::Span; + +use std::fmt; +use std::u32; + +pub struct ElaborateDrops; + +impl<'tcx> MirPass<'tcx> for ElaborateDrops { + fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, + src: MirSource, mir: &mut Mir<'tcx>) + { + debug!("elaborate_drops({:?} @ {:?})", src, mir.span); + match src { + MirSource::Fn(..) => {}, + _ => return + } + let id = src.item_id(); + let param_env = ty::ParameterEnvironment::for_item(tcx, id); + let move_data = MoveData::gather_moves(mir, tcx); + let elaborate_patch = { + let mir = &*mir; + let env = MoveDataParamEnv { + move_data: move_data, + param_env: param_env + }; + let flow_inits = + super::do_dataflow(tcx, mir, id, &[], &env, + MaybeInitializedLvals::new(tcx, mir)); + let flow_uninits = + super::do_dataflow(tcx, mir, id, &[], &env, + MaybeUninitializedLvals::new(tcx, mir)); + + ElaborateDropsCtxt { + tcx: tcx, + mir: mir, + env: &env, + flow_inits: flow_inits, + flow_uninits: flow_uninits, + drop_flags: FnvHashMap(), + patch: MirPatch::new(mir), + }.elaborate() + }; + pretty::dump_mir(tcx, "elaborate_drops", &0, src, mir, None); + elaborate_patch.apply(mir); + pretty::dump_mir(tcx, "elaborate_drops", &1, src, mir, None); + } +} + +impl Pass for ElaborateDrops {} + +struct InitializationData { + live: IdxSetBuf, + dead: IdxSetBuf +} + +impl InitializationData { + fn apply_location<'a,'tcx>(&mut self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + mir: &Mir<'tcx>, + env: &MoveDataParamEnv<'tcx>, + loc: Location) + { + drop_flag_effects_for_location(tcx, mir, env, loc, |path, df| { + debug!("at location {:?}: setting {:?} to {:?}", + loc, path, df); + match df { + DropFlagState::Present => { + self.live.add(&path); + self.dead.remove(&path); + } + DropFlagState::Absent => { + self.dead.add(&path); + self.live.remove(&path); + } + } + }); + } + + fn state(&self, path: MovePathIndex) -> (bool, bool) { + (self.live.contains(&path), self.dead.contains(&path)) + } +} + +impl fmt::Debug for InitializationData { + fn fmt(&self, _f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + Ok(()) + } +} + +struct ElaborateDropsCtxt<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + mir: &'a Mir<'tcx>, + env: &'a MoveDataParamEnv<'tcx>, + flow_inits: DataflowResults>, + flow_uninits: DataflowResults>, + drop_flags: FnvHashMap, + patch: MirPatch<'tcx>, +} + +#[derive(Copy, Clone, Debug)] +struct DropCtxt<'a, 'tcx: 'a> { + span: Span, + scope: ScopeId, + is_cleanup: bool, + + init_data: &'a InitializationData, + + lvalue: &'a Lvalue<'tcx>, + path: MovePathIndex, + succ: BasicBlock, + unwind: Option +} + +impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { + fn move_data(&self) -> &'b MoveData<'tcx> { &self.env.move_data } + fn param_env(&self) -> &'b ty::ParameterEnvironment<'tcx> { + &self.env.param_env + } + + fn initialization_data_at(&self, loc: Location) -> InitializationData { + let mut data = InitializationData { + live: self.flow_inits.sets().on_entry_set_for(loc.block.index()) + .to_owned(), + dead: self.flow_uninits.sets().on_entry_set_for(loc.block.index()) + .to_owned(), + }; + for stmt in 0..loc.index { + data.apply_location(self.tcx, self.mir, self.env, + Location { block: loc.block, index: stmt }); + } + data + } + + fn create_drop_flag(&mut self, index: MovePathIndex) { + let tcx = self.tcx; + let patch = &mut self.patch; + self.drop_flags.entry(index).or_insert_with(|| { + patch.new_temp(tcx.types.bool) + }); + } + + fn drop_flag(&mut self, index: MovePathIndex) -> Option> { + self.drop_flags.get(&index).map(|t| Lvalue::Temp(*t)) + } + + /// create a patch that elaborates all drops in the input + /// MIR. + fn elaborate(mut self) -> MirPatch<'tcx> + { + self.collect_drop_flags(); + + self.elaborate_drops(); + + self.drop_flags_on_init(); + self.drop_flags_for_fn_rets(); + self.drop_flags_for_args(); + self.drop_flags_for_locs(); + + self.patch + } + + fn path_needs_drop(&self, path: MovePathIndex) -> bool + { + match self.move_data().move_paths[path].content { + MovePathContent::Lvalue(ref lvalue) => { + let ty = self.mir.lvalue_ty(self.tcx, lvalue).to_ty(self.tcx); + debug!("path_needs_drop({:?}, {:?} : {:?})", path, lvalue, ty); + + self.tcx.type_needs_drop_given_env(ty, self.param_env()) + } + _ => false + } + } + + /// Returns whether this lvalue is tracked by drop elaboration. This + /// includes all lvalues, except these behind references or arrays. + /// + /// Lvalues behind references or arrays are not tracked by elaboration + /// and are always assumed to be initialized when accessible. As + /// references and indexes can be reseated, trying to track them + /// can only lead to trouble. + fn lvalue_is_tracked(&self, lv: &Lvalue<'tcx>) -> bool + { + if let &Lvalue::Projection(ref data) = lv { + self.lvalue_contents_are_tracked(&data.base) + } else { + true + } + } + + fn lvalue_contents_are_tracked(&self, lv: &Lvalue<'tcx>) -> bool { + let ty = self.mir.lvalue_ty(self.tcx, lv).to_ty(self.tcx); + match ty.sty { + ty::TyArray(..) | ty::TySlice(..) | ty::TyRef(..) | ty::TyRawPtr(..) => { + false + } + _ => self.lvalue_is_tracked(lv) + } + } + + fn collect_drop_flags(&mut self) + { + for bb in self.mir.all_basic_blocks() { + let data = self.mir.basic_block_data(bb); + let terminator = data.terminator(); + let location = match terminator.kind { + TerminatorKind::Drop { ref location, .. } | + TerminatorKind::DropAndReplace { ref location, .. } => location, + _ => continue + }; + + if !self.lvalue_is_tracked(location) { + continue + } + + let init_data = self.initialization_data_at(Location { + block: bb, + index: data.statements.len() + }); + + let path = self.move_data().rev_lookup.find(location); + debug!("collect_drop_flags: {:?}, lv {:?} (index {:?})", + bb, location, path); + + on_all_children_bits(self.tcx, self.mir, self.move_data(), path, |child| { + if self.path_needs_drop(child) { + let (maybe_live, maybe_dead) = init_data.state(child); + debug!("collect_drop_flags: collecting {:?} from {:?}@{:?} - {:?}", + child, location, path, (maybe_live, maybe_dead)); + if maybe_live && maybe_dead { + self.create_drop_flag(child) + } + } + }); + } + } + + fn elaborate_drops(&mut self) + { + for bb in self.mir.all_basic_blocks() { + let data = self.mir.basic_block_data(bb); + let loc = Location { block: bb, index: data.statements.len() }; + let terminator = data.terminator(); + + let resume_block = self.patch.resume_block(); + match terminator.kind { + TerminatorKind::Drop { ref location, target, unwind } => { + let init_data = self.initialization_data_at(loc); + let path = self.move_data().rev_lookup.find(location); + self.elaborate_drop(&DropCtxt { + span: terminator.span, + scope: terminator.scope, + is_cleanup: data.is_cleanup, + init_data: &init_data, + lvalue: location, + path: path, + succ: target, + unwind: if data.is_cleanup { + None + } else { + Some(Option::unwrap_or(unwind, resume_block)) + } + }, bb); + } + TerminatorKind::DropAndReplace { ref location, ref value, + target, unwind } => + { + assert!(!data.is_cleanup); + + self.elaborate_replace( + loc, + location, value, + target, unwind + ); + } + _ => continue + } + } + } + + /// Elaborate a MIR `replace` terminator. This instruction + /// is not directly handled by translation, and therefore + /// must be desugared. + /// + /// The desugaring drops the location if needed, and then writes + /// the value (including setting the drop flag) over it in *both* arms. + /// + /// The `replace` terminator can also be called on lvalues that + /// are not tracked by elaboration (for example, + /// `replace x[i] <- tmp0`). The borrow checker requires that + /// these locations are initialized before the assignment, + /// so we just generate an unconditional drop. + fn elaborate_replace( + &mut self, + loc: Location, + location: &Lvalue<'tcx>, + value: &Operand<'tcx>, + target: BasicBlock, + unwind: Option) + { + let bb = loc.block; + let data = self.mir.basic_block_data(bb); + let terminator = data.terminator(); + + let assign = Statement { + kind: StatementKind::Assign(location.clone(), Rvalue::Use(value.clone())), + span: terminator.span, + scope: terminator.scope + }; + + let unwind = unwind.unwrap_or(self.patch.resume_block()); + let unwind = self.patch.new_block(BasicBlockData { + statements: vec![assign.clone()], + terminator: Some(Terminator { + kind: TerminatorKind::Goto { target: unwind }, + ..*terminator + }), + is_cleanup: true + }); + + let target = self.patch.new_block(BasicBlockData { + statements: vec![assign], + terminator: Some(Terminator { + kind: TerminatorKind::Goto { target: target }, + ..*terminator + }), + is_cleanup: data.is_cleanup, + }); + + if !self.lvalue_is_tracked(location) { + // drop and replace behind a pointer/array/whatever. The location + // must be initialized. + debug!("elaborate_drop_and_replace({:?}) - untracked", terminator); + self.patch.patch_terminator(bb, TerminatorKind::Drop { + location: location.clone(), + target: target, + unwind: Some(unwind) + }); + } else { + debug!("elaborate_drop_and_replace({:?}) - tracked", terminator); + let init_data = self.initialization_data_at(loc); + let path = self.move_data().rev_lookup.find(location); + + self.elaborate_drop(&DropCtxt { + span: terminator.span, + scope: terminator.scope, + is_cleanup: data.is_cleanup, + init_data: &init_data, + lvalue: location, + path: path, + succ: target, + unwind: Some(unwind) + }, bb); + on_all_children_bits(self.tcx, self.mir, self.move_data(), path, |child| { + self.set_drop_flag(Location { block: target, index: 0 }, + child, DropFlagState::Present); + self.set_drop_flag(Location { block: unwind, index: 0 }, + child, DropFlagState::Present); + }); + } + } + + /// This elaborates a single drop instruction, located at `bb`, and + /// patches over it. + /// + /// The elaborated drop checks the drop flags to only drop what + /// is initialized. + /// + /// In addition, the relevant drop flags also need to be cleared + /// to avoid double-drops. However, in the middle of a complex + /// drop, one must avoid clearing some of the flags before they + /// are read, as that would cause a memory leak. + /// + /// In particular, when dropping an ADT, multiple fields may be + /// joined together under the `rest` subpath. They are all controlled + /// by the primary drop flag, but only the last rest-field dropped + /// should clear it (and it must also not clear anything else). + /// + /// FIXME: I think we should just control the flags externally + /// and then we do not need this machinery. + fn elaborate_drop<'a>(&mut self, c: &DropCtxt<'a, 'tcx>, bb: BasicBlock) { + debug!("elaborate_drop({:?})", c); + + let mut some_live = false; + let mut some_dead = false; + let mut children_count = 0; + on_all_children_bits( + self.tcx, self.mir, self.move_data(), + c.path, |child| { + if self.path_needs_drop(child) { + let (live, dead) = c.init_data.state(child); + debug!("elaborate_drop: state({:?}) = {:?}", + child, (live, dead)); + some_live |= live; + some_dead |= dead; + children_count += 1; + } + }); + + debug!("elaborate_drop({:?}): live - {:?}", c, + (some_live, some_dead)); + match (some_live, some_dead) { + (false, false) | (false, true) => { + // dead drop - patch it out + self.patch.patch_terminator(bb, TerminatorKind::Goto { + target: c.succ + }); + } + (true, false) => { + // static drop - just set the flag + self.patch.patch_terminator(bb, TerminatorKind::Drop { + location: c.lvalue.clone(), + target: c.succ, + unwind: c.unwind + }); + self.drop_flags_for_drop(c, bb); + } + (true, true) => { + // dynamic drop + let drop_bb = if children_count == 1 || self.must_complete_drop(c) { + self.conditional_drop(c) + } else { + self.open_drop(c) + }; + self.patch.patch_terminator(bb, TerminatorKind::Goto { + target: drop_bb + }); + } + } + } + + /// Return the lvalue and move path for each field of `variant`, + /// (the move path is `None` if the field is a rest field). + fn move_paths_for_fields(&self, + base_lv: &Lvalue<'tcx>, + variant_path: MovePathIndex, + variant: ty::VariantDef<'tcx>, + substs: &'tcx Substs<'tcx>) + -> Vec<(Lvalue<'tcx>, Option)> + { + let move_paths = &self.move_data().move_paths; + variant.fields.iter().enumerate().map(|(i, f)| { + let subpath = + super::move_path_children_matching(move_paths, variant_path, |p| { + match p { + &Projection { + elem: ProjectionElem::Field(idx, _), .. + } => idx.index() == i, + _ => false + } + }); + + let field_ty = + self.tcx.normalize_associated_type_in_env( + &f.ty(self.tcx, substs), + self.param_env() + ); + (base_lv.clone().field(Field::new(i), field_ty), subpath) + }).collect() + } + + /// Create one-half of the drop ladder for a list of fields, and return + /// the list of steps in it in reverse order. + /// + /// `unwind_ladder` is such a list of steps in reverse order, + /// which is called instead of the next step if the drop unwinds + /// (the first field is never reached). If it is `None`, all + /// unwind targets are left blank. + fn drop_halfladder<'a>(&mut self, + c: &DropCtxt<'a, 'tcx>, + unwind_ladder: Option>, + succ: BasicBlock, + fields: &[(Lvalue<'tcx>, Option)], + is_cleanup: bool) + -> Vec + { + let mut succ = succ; + let mut unwind_succ = if is_cleanup { + None + } else { + c.unwind + }; + let mut update_drop_flag = true; + + fields.iter().rev().enumerate().map(|(i, &(ref lv, path))| { + let drop_block = match path { + Some(path) => { + debug!("drop_ladder: for std field {} ({:?})", i, lv); + + self.elaborated_drop_block(&DropCtxt { + span: c.span, + scope: c.scope, + is_cleanup: is_cleanup, + init_data: c.init_data, + lvalue: lv, + path: path, + succ: succ, + unwind: unwind_succ, + }) + } + None => { + debug!("drop_ladder: for rest field {} ({:?})", i, lv); + + let blk = self.complete_drop(&DropCtxt { + span: c.span, + scope: c.scope, + is_cleanup: is_cleanup, + init_data: c.init_data, + lvalue: lv, + path: c.path, + succ: succ, + unwind: unwind_succ, + }, update_drop_flag); + + // the drop flag has been updated - updating + // it again would clobber it. + update_drop_flag = false; + + blk + } + }; + + succ = drop_block; + unwind_succ = unwind_ladder.as_ref().map(|p| p[i]); + + drop_block + }).collect() + } + + /// Create a full drop ladder, consisting of 2 connected half-drop-ladders + /// + /// For example, with 3 fields, the drop ladder is + /// + /// .d0: + /// ELAB(drop location.0 [target=.d1, unwind=.c1]) + /// .d1: + /// ELAB(drop location.1 [target=.d2, unwind=.c2]) + /// .d2: + /// ELAB(drop location.2 [target=`c.succ`, unwind=`c.unwind`]) + /// .c1: + /// ELAB(drop location.1 [target=.c2]) + /// .c2: + /// ELAB(drop location.2 [target=`c.unwind]) + fn drop_ladder<'a>(&mut self, + c: &DropCtxt<'a, 'tcx>, + fields: &[(Lvalue<'tcx>, Option)]) + -> BasicBlock + { + debug!("drop_ladder({:?}, {:?})", c, fields); + let unwind_ladder = if c.is_cleanup { + None + } else { + Some(self.drop_halfladder(c, None, c.unwind.unwrap(), &fields, true)) + }; + + self.drop_halfladder(c, unwind_ladder, c.succ, fields, c.is_cleanup) + .last().cloned().unwrap_or(c.succ) + } + + fn open_drop_for_tuple<'a>(&mut self, c: &DropCtxt<'a, 'tcx>, tys: &[Ty<'tcx>]) + -> BasicBlock + { + debug!("open_drop_for_tuple({:?}, {:?})", c, tys); + + let fields: Vec<_> = tys.iter().enumerate().map(|(i, &ty)| { + (c.lvalue.clone().field(Field::new(i), ty), + super::move_path_children_matching( + &self.move_data().move_paths, c.path, |proj| match proj { + &Projection { + elem: ProjectionElem::Field(f, _), .. + } => f.index() == i, + _ => false + } + )) + }).collect(); + + self.drop_ladder(c, &fields) + } + + fn open_drop_for_box<'a>(&mut self, c: &DropCtxt<'a, 'tcx>, ty: Ty<'tcx>) + -> BasicBlock + { + debug!("open_drop_for_box({:?}, {:?})", c, ty); + + let interior_path = super::move_path_children_matching( + &self.move_data().move_paths, c.path, |proj| match proj { + &Projection { elem: ProjectionElem::Deref, .. } => true, + _ => false + }).unwrap(); + + let interior = c.lvalue.clone().deref(); + let inner_c = DropCtxt { + lvalue: &interior, + unwind: c.unwind.map(|u| { + self.box_free_block(c, ty, u, true) + }), + succ: self.box_free_block(c, ty, c.succ, c.is_cleanup), + path: interior_path, + ..*c + }; + + self.elaborated_drop_block(&inner_c) + } + + fn open_drop_for_variant<'a>(&mut self, + c: &DropCtxt<'a, 'tcx>, + drop_block: &mut Option, + adt: ty::AdtDef<'tcx>, + substs: &'tcx Substs<'tcx>, + variant_index: usize) + -> BasicBlock + { + let move_paths = &self.move_data().move_paths; + + let subpath = super::move_path_children_matching( + move_paths, c.path, |proj| match proj { + &Projection { + elem: ProjectionElem::Downcast(_, idx), .. + } => idx == variant_index, + _ => false + }); + + if let Some(variant_path) = subpath { + let base_lv = c.lvalue.clone().elem( + ProjectionElem::Downcast(adt, variant_index) + ); + let fields = self.move_paths_for_fields( + &base_lv, + variant_path, + &adt.variants[variant_index], + substs); + self.drop_ladder(c, &fields) + } else { + // variant not found - drop the entire enum + if let None = *drop_block { + *drop_block = Some(self.complete_drop(c, true)); + } + return drop_block.unwrap(); + } + } + + fn open_drop_for_adt<'a>(&mut self, c: &DropCtxt<'a, 'tcx>, + adt: ty::AdtDef<'tcx>, substs: &'tcx Substs<'tcx>) + -> BasicBlock { + debug!("open_drop_for_adt({:?}, {:?}, {:?})", c, adt, substs); + + let mut drop_block = None; + + match adt.variants.len() { + 1 => { + let fields = self.move_paths_for_fields( + c.lvalue, + c.path, + &adt.variants[0], + substs + ); + self.drop_ladder(c, &fields) + } + _ => { + let variant_drops : Vec = + (0..adt.variants.len()).map(|i| { + self.open_drop_for_variant(c, &mut drop_block, + adt, substs, i) + }).collect(); + + // If there are multiple variants, then if something + // is present within the enum the discriminant, tracked + // by the rest path, must be initialized. + // + // Additionally, we do not want to switch on the + // discriminant after it is free-ed, because that + // way lies only trouble. + + let switch_block = self.new_block( + c, c.is_cleanup, TerminatorKind::Switch { + discr: c.lvalue.clone(), + adt_def: adt, + targets: variant_drops + }); + + self.drop_flag_test_block(c, switch_block) + } + } + } + + /// The slow-path - create an "open", elaborated drop for a type + /// which is moved-out-of only partially, and patch `bb` to a jump + /// to it. This must not be called on ADTs with a destructor, + /// as these can't be moved-out-of, except for `Box`, which is + /// special-cased. + /// + /// This creates a "drop ladder" that drops the needed fields of the + /// ADT, both in the success case or if one of the destructors fail. + fn open_drop<'a>(&mut self, c: &DropCtxt<'a, 'tcx>) -> BasicBlock { + let ty = self.mir.lvalue_ty(self.tcx, c.lvalue).to_ty(self.tcx); + match ty.sty { + ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => { + self.open_drop_for_adt(c, def, substs) + } + ty::TyTuple(tys) | ty::TyClosure(_, ty::ClosureSubsts { + upvar_tys: tys, .. + }) => { + self.open_drop_for_tuple(c, tys) + } + ty::TyBox(ty) => { + self.open_drop_for_box(c, ty) + } + _ => bug!("open drop from non-ADT `{:?}`", ty) + } + } + + /// Return a basic block that drop an lvalue using the context + /// and path in `c`. If `update_drop_flag` is true, also + /// clear `c`. + /// + /// if FLAG(c.path) + /// if(update_drop_flag) FLAG(c.path) = false + /// drop(c.lv) + fn complete_drop<'a>( + &mut self, + c: &DropCtxt<'a, 'tcx>, + update_drop_flag: bool) + -> BasicBlock + { + debug!("complete_drop({:?},{:?})", c, update_drop_flag); + + let drop_block = self.drop_block(c); + if update_drop_flag { + self.set_drop_flag( + Location { block: drop_block, index: 0 }, + c.path, + DropFlagState::Absent + ); + } + + self.drop_flag_test_block(c, drop_block) + } + + /// Create a simple conditional drop. + /// + /// if FLAG(c.lv) + /// FLAGS(c.lv) = false + /// drop(c.lv) + fn conditional_drop<'a>(&mut self, c: &DropCtxt<'a, 'tcx>) + -> BasicBlock + { + debug!("conditional_drop({:?})", c); + let drop_bb = self.drop_block(c); + self.drop_flags_for_drop(c, drop_bb); + + self.drop_flag_test_block(c, drop_bb) + } + + fn new_block<'a>(&mut self, + c: &DropCtxt<'a, 'tcx>, + is_cleanup: bool, + k: TerminatorKind<'tcx>) + -> BasicBlock + { + self.patch.new_block(BasicBlockData { + statements: vec![], + terminator: Some(Terminator { + scope: c.scope, span: c.span, kind: k + }), + is_cleanup: is_cleanup + }) + } + + fn elaborated_drop_block<'a>(&mut self, c: &DropCtxt<'a, 'tcx>) -> BasicBlock { + debug!("elaborated_drop_block({:?})", c); + let blk = self.drop_block(c); + self.elaborate_drop(c, blk); + blk + } + + fn drop_flag_test_block<'a>(&mut self, + c: &DropCtxt<'a, 'tcx>, + on_set: BasicBlock) + -> BasicBlock { + self.drop_flag_test_block_with_succ(c, c.is_cleanup, on_set, c.succ) + } + + fn drop_flag_test_block_with_succ<'a>(&mut self, + c: &DropCtxt<'a, 'tcx>, + is_cleanup: bool, + on_set: BasicBlock, + on_unset: BasicBlock) + -> BasicBlock + { + let (maybe_live, maybe_dead) = c.init_data.state(c.path); + debug!("drop_flag_test_block({:?},{:?},{:?}) - {:?}", + c, is_cleanup, on_set, (maybe_live, maybe_dead)); + + match (maybe_live, maybe_dead) { + (false, _) => on_unset, + (true, false) => on_set, + (true, true) => { + let flag = self.drop_flag(c.path).unwrap(); + self.new_block(c, is_cleanup, TerminatorKind::If { + cond: Operand::Consume(flag), + targets: (on_set, on_unset) + }) + } + } + } + + fn drop_block<'a>(&mut self, c: &DropCtxt<'a, 'tcx>) -> BasicBlock { + self.new_block(c, c.is_cleanup, TerminatorKind::Drop { + location: c.lvalue.clone(), + target: c.succ, + unwind: c.unwind + }) + } + + fn box_free_block<'a>( + &mut self, + c: &DropCtxt<'a, 'tcx>, + ty: Ty<'tcx>, + target: BasicBlock, + is_cleanup: bool + ) -> BasicBlock { + let block = self.unelaborated_free_block(c, ty, target, is_cleanup); + self.drop_flag_test_block_with_succ(c, is_cleanup, block, target) + } + + fn unelaborated_free_block<'a>( + &mut self, + c: &DropCtxt<'a, 'tcx>, + ty: Ty<'tcx>, + target: BasicBlock, + is_cleanup: bool + ) -> BasicBlock { + let mut statements = vec![]; + if let Some(&flag) = self.drop_flags.get(&c.path) { + statements.push(Statement { + span: c.span, + scope: c.scope, + kind: StatementKind::Assign( + Lvalue::Temp(flag), + self.constant_bool(c.span, false) + ) + }); + } + + let tcx = self.tcx; + let unit_temp = Lvalue::Temp(self.patch.new_temp(tcx.mk_nil())); + let free_func = tcx.lang_items.require(lang_items::BoxFreeFnLangItem) + .unwrap_or_else(|e| tcx.sess.fatal(&e)); + let substs = tcx.mk_substs(Substs::new( + VecPerParamSpace::new(vec![], vec![], vec![ty]), + VecPerParamSpace::new(vec![], vec![], vec![]) + )); + let fty = tcx.lookup_item_type(free_func).ty.subst(tcx, substs); + + self.patch.new_block(BasicBlockData { + statements: statements, + terminator: Some(Terminator { + scope: c.scope, span: c.span, kind: TerminatorKind::Call { + func: Operand::Constant(Constant { + span: c.span, + ty: fty, + literal: Literal::Item { + def_id: free_func, + substs: substs + } + }), + args: vec![Operand::Consume(c.lvalue.clone())], + destination: Some((unit_temp, target)), + cleanup: None + } + }), + is_cleanup: is_cleanup + }) + } + + fn must_complete_drop<'a>(&self, c: &DropCtxt<'a, 'tcx>) -> bool { + // if we have a destuctor, we must *not* split the drop. + + // dataflow can create unneeded children in some cases + // - be sure to ignore them. + + let ty = self.mir.lvalue_ty(self.tcx, c.lvalue).to_ty(self.tcx); + + match ty.sty { + ty::TyStruct(def, _) | ty::TyEnum(def, _) => { + if def.has_dtor() { + self.tcx.sess.span_warn( + c.span, + &format!("dataflow bug??? moving out of type with dtor {:?}", + c)); + true + } else { + false + } + } + _ => false + } + } + + fn constant_bool(&self, span: Span, val: bool) -> Rvalue<'tcx> { + Rvalue::Use(Operand::Constant(Constant { + span: span, + ty: self.tcx.types.bool, + literal: Literal::Value { value: ConstVal::Bool(val) } + })) + } + + fn set_drop_flag(&mut self, loc: Location, path: MovePathIndex, val: DropFlagState) { + if let Some(&flag) = self.drop_flags.get(&path) { + let span = self.patch.context_for_location(self.mir, loc).0; + let val = self.constant_bool(span, val.value()); + self.patch.add_assign(loc, Lvalue::Temp(flag), val); + } + } + + fn drop_flags_on_init(&mut self) { + let loc = Location { block: START_BLOCK, index: 0 }; + let span = self.patch.context_for_location(self.mir, loc).0; + let false_ = self.constant_bool(span, false); + for flag in self.drop_flags.values() { + self.patch.add_assign(loc, Lvalue::Temp(*flag), false_.clone()); + } + } + + fn drop_flags_for_fn_rets(&mut self) { + for bb in self.mir.all_basic_blocks() { + let data = self.mir.basic_block_data(bb); + if let TerminatorKind::Call { + destination: Some((ref lv, tgt)), cleanup: Some(_), .. + } = data.terminator().kind { + assert!(!self.patch.is_patched(bb)); + + let loc = Location { block: tgt, index: 0 }; + let path = self.move_data().rev_lookup.find(lv); + on_all_children_bits( + self.tcx, self.mir, self.move_data(), path, + |child| self.set_drop_flag(loc, child, DropFlagState::Present) + ); + } + } + } + + fn drop_flags_for_args(&mut self) { + let loc = Location { block: START_BLOCK, index: 0 }; + super::drop_flag_effects_for_function_entry( + self.tcx, self.mir, self.env, |path, ds| { + self.set_drop_flag(loc, path, ds); + } + ) + } + + fn drop_flags_for_locs(&mut self) { + // We intentionally iterate only over the *old* basic blocks. + // + // Basic blocks created by drop elaboration update their + // drop flags by themselves, to avoid the drop flags being + // clobbered before they are read. + + for bb in self.mir.all_basic_blocks() { + let data = self.mir.basic_block_data(bb); + debug!("drop_flags_for_locs({:?})", data); + for i in 0..(data.statements.len()+1) { + debug!("drop_flag_for_locs: stmt {}", i); + let mut allow_initializations = true; + if i == data.statements.len() { + match data.terminator().kind { + TerminatorKind::Drop { .. } => { + // drop elaboration should handle that by itself + continue + } + TerminatorKind::DropAndReplace { .. } => { + // this contains the move of the source and + // the initialization of the destination. We + // only want the former - the latter is handled + // by the elaboration code and must be done + // *after* the destination is dropped. + assert!(self.patch.is_patched(bb)); + allow_initializations = false; + } + _ => { + assert!(!self.patch.is_patched(bb)); + } + } + } + let loc = Location { block: bb, index: i }; + super::drop_flag_effects_for_location( + self.tcx, self.mir, self.env, loc, |path, ds| { + if ds == DropFlagState::Absent || allow_initializations { + self.set_drop_flag(loc, path, ds) + } + } + ) + } + + // There may be a critical edge after this call, + // so mark the return as initialized *before* the + // call. + if let TerminatorKind::Call { + destination: Some((ref lv, _)), cleanup: None, .. + } = data.terminator().kind { + assert!(!self.patch.is_patched(bb)); + + let loc = Location { block: bb, index: data.statements.len() }; + let path = self.move_data().rev_lookup.find(lv); + on_all_children_bits( + self.tcx, self.mir, self.move_data(), path, + |child| self.set_drop_flag(loc, child, DropFlagState::Present) + ); + } + } + } + + fn drop_flags_for_drop<'a>(&mut self, + c: &DropCtxt<'a, 'tcx>, + bb: BasicBlock) + { + let loc = self.patch.terminator_loc(self.mir, bb); + on_all_children_bits( + self.tcx, self.mir, self.move_data(), c.path, + |child| self.set_drop_flag(loc, child, DropFlagState::Absent) + ); + } +} diff --git a/src/librustc_borrowck/borrowck/mir/gather_moves.rs b/src/librustc_borrowck/borrowck/mir/gather_moves.rs index 48511cd5ebc..fcaa655f749 100644 --- a/src/librustc_borrowck/borrowck/mir/gather_moves.rs +++ b/src/librustc_borrowck/borrowck/mir/gather_moves.rs @@ -671,10 +671,18 @@ fn gather_moves<'a, 'tcx>(mir: &Mir<'tcx>, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> MoveD let _ = discr; } - TerminatorKind::Drop { value: ref lval, target: _, unwind: _ } => { + TerminatorKind::Drop { ref location, target: _, unwind: _ } => { let source = Location { block: bb, index: bb_data.statements.len() }; - bb_ctxt.on_move_out_lval(SK::Drop, lval, source); + bb_ctxt.on_move_out_lval(SK::Drop, location, source); + } + TerminatorKind::DropAndReplace { ref location, ref value, .. } => { + let assigned_path = bb_ctxt.builder.move_path_for(location); + bb_ctxt.path_map.fill_to(assigned_path.idx()); + + let source = Location { block: bb, + index: bb_data.statements.len() }; + bb_ctxt.on_operand(SK::Use, value, source); } TerminatorKind::Call { ref func, ref args, ref destination, cleanup: _ } => { let source = Location { block: bb, diff --git a/src/librustc_borrowck/borrowck/mir/mod.rs b/src/librustc_borrowck/borrowck/mir/mod.rs index 1b9d08bade7..007cde156f4 100644 --- a/src/librustc_borrowck/borrowck/mir/mod.rs +++ b/src/librustc_borrowck/borrowck/mir/mod.rs @@ -24,8 +24,10 @@ use rustc::ty::{self, TyCtxt}; mod abs_domain; +pub mod elaborate_drops; mod dataflow; mod gather_moves; +mod patch; // mod graphviz; use self::dataflow::{BitDenotation}; @@ -34,7 +36,7 @@ use self::dataflow::{MaybeInitializedLvals, MaybeUninitializedLvals}; use self::dataflow::{DefinitelyInitializedLvals}; use self::gather_moves::{MoveData, MovePathIndex, Location}; -use self::gather_moves::{MovePathContent}; +use self::gather_moves::{MovePathContent, MovePathData}; fn has_rustc_mir_with(attrs: &[ast::Attribute], name: &str) -> Option> { for attr in attrs { @@ -202,6 +204,37 @@ enum DropFlagState { Absent, // i.e. deinitialized or "moved" } +impl DropFlagState { + fn value(self) -> bool { + match self { + DropFlagState::Present => true, + DropFlagState::Absent => false + } + } +} + +fn move_path_children_matching<'tcx, F>(move_paths: &MovePathData<'tcx>, + path: MovePathIndex, + mut cond: F) + -> Option + where F: FnMut(&repr::LvalueProjection<'tcx>) -> bool +{ + let mut next_child = move_paths[path].first_child; + while let Some(child_index) = next_child { + match move_paths[child_index].content { + MovePathContent::Lvalue(repr::Lvalue::Projection(ref proj)) => { + if cond(proj) { + return Some(child_index) + } + } + _ => {} + } + next_child = move_paths[child_index].next_sibling; + } + + None +} + fn on_all_children_bits<'a, 'tcx, F>( tcx: TyCtxt<'a, 'tcx, 'tcx>, mir: &Mir<'tcx>, @@ -309,15 +342,23 @@ fn drop_flag_effects_for_location<'a, 'tcx, F>( Some(stmt) => match stmt.kind { repr::StatementKind::Assign(ref lvalue, _) => { debug!("drop_flag_effects: assignment {:?}", stmt); - on_all_children_bits(tcx, mir, move_data, + on_all_children_bits(tcx, mir, move_data, move_data.rev_lookup.find(lvalue), |moi| callback(moi, DropFlagState::Present)) } }, None => { - // terminator - no move-ins except for function return edge - let term = bb.terminator(); - debug!("drop_flag_effects: terminator {:?}", term); + debug!("drop_flag_effects: replace {:?}", bb.terminator()); + match bb.terminator().kind { + repr::TerminatorKind::DropAndReplace { ref location, .. } => { + on_all_children_bits(tcx, mir, move_data, + move_data.rev_lookup.find(location), + |moi| callback(moi, DropFlagState::Present)) + } + _ => { + // other terminators do not contain move-ins + } + } } } } diff --git a/src/librustc_borrowck/borrowck/mir/patch.rs b/src/librustc_borrowck/borrowck/mir/patch.rs new file mode 100644 index 00000000000..b390c19af1a --- /dev/null +++ b/src/librustc_borrowck/borrowck/mir/patch.rs @@ -0,0 +1,184 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::gather_moves::Location; +use rustc::ty::Ty; +use rustc::mir::repr::*; +use syntax::codemap::Span; + +use std::iter; +use std::u32; + +/// This struct represents a patch to MIR, which can add +/// new statements and basic blocks and patch over block +/// terminators. +pub struct MirPatch<'tcx> { + patch_map: Vec>>, + new_blocks: Vec>, + new_statements: Vec<(Location, StatementKind<'tcx>)>, + new_temps: Vec>, + resume_block: BasicBlock, + next_temp: u32, +} + +impl<'tcx> MirPatch<'tcx> { + pub fn new(mir: &Mir<'tcx>) -> Self { + let mut result = MirPatch { + patch_map: iter::repeat(None) + .take(mir.basic_blocks.len()).collect(), + new_blocks: vec![], + new_temps: vec![], + new_statements: vec![], + next_temp: mir.temp_decls.len() as u32, + resume_block: START_BLOCK + }; + + // make sure the MIR we create has a resume block. It is + // completely legal to convert jumps to the resume block + // to jumps to None, but we occasionally have to add + // instructions just before that. + + let mut resume_block = None; + let mut resume_stmt_block = None; + for block in mir.all_basic_blocks() { + let data = mir.basic_block_data(block); + if let TerminatorKind::Resume = data.terminator().kind { + if data.statements.len() > 0 { + resume_stmt_block = Some(block); + } else { + resume_block = Some(block); + } + break + } + } + let resume_block = resume_block.unwrap_or_else(|| { + result.new_block(BasicBlockData { + statements: vec![], + terminator: Some(Terminator { + span: mir.span, + scope: ScopeId::new(0), + kind: TerminatorKind::Resume + }), + is_cleanup: true + })}); + result.resume_block = resume_block; + if let Some(resume_stmt_block) = resume_stmt_block { + result.patch_terminator(resume_stmt_block, TerminatorKind::Goto { + target: resume_block + }); + } + result + } + + pub fn resume_block(&self) -> BasicBlock { + self.resume_block + } + + pub fn is_patched(&self, bb: BasicBlock) -> bool { + self.patch_map[bb.index()].is_some() + } + + pub fn terminator_loc(&self, mir: &Mir<'tcx>, bb: BasicBlock) -> Location { + let offset = match bb.index().checked_sub(mir.basic_blocks.len()) { + Some(index) => self.new_blocks[index].statements.len(), + None => mir.basic_block_data(bb).statements.len() + }; + Location { + block: bb, + index: offset + } + } + + pub fn new_temp(&mut self, ty: Ty<'tcx>) -> u32 { + let index = self.next_temp; + assert!(self.next_temp < u32::MAX); + self.next_temp += 1; + self.new_temps.push(TempDecl { ty: ty }); + index + } + + pub fn new_block(&mut self, data: BasicBlockData<'tcx>) -> BasicBlock { + let block = BasicBlock::new(self.patch_map.len()); + debug!("MirPatch: new_block: {:?}: {:?}", block, data); + self.new_blocks.push(data); + self.patch_map.push(None); + block + } + + pub fn patch_terminator(&mut self, block: BasicBlock, new: TerminatorKind<'tcx>) { + assert!(self.patch_map[block.index()].is_none()); + debug!("MirPatch: patch_terminator({:?}, {:?})", block, new); + self.patch_map[block.index()] = Some(new); + } + + pub fn add_statement(&mut self, loc: Location, stmt: StatementKind<'tcx>) { + debug!("MirPatch: add_statement({:?}, {:?})", loc, stmt); + self.new_statements.push((loc, stmt)); + } + + pub fn add_assign(&mut self, loc: Location, lv: Lvalue<'tcx>, rv: Rvalue<'tcx>) { + self.add_statement(loc, StatementKind::Assign(lv, rv)); + } + + pub fn apply(self, mir: &mut Mir<'tcx>) { + debug!("MirPatch: {:?} new temps, starting from index {}: {:?}", + self.new_temps.len(), mir.temp_decls.len(), self.new_temps); + debug!("MirPatch: {} new blocks, starting from index {}", + self.new_blocks.len(), mir.basic_blocks.len()); + mir.basic_blocks.extend(self.new_blocks); + mir.temp_decls.extend(self.new_temps); + for (src, patch) in self.patch_map.into_iter().enumerate() { + if let Some(patch) = patch { + debug!("MirPatch: patching block {:?}", src); + mir.basic_blocks[src].terminator_mut().kind = patch; + } + } + + let mut new_statements = self.new_statements; + new_statements.sort_by(|u,v| u.0.cmp(&v.0)); + + let mut delta = 0; + let mut last_bb = START_BLOCK; + for (mut loc, stmt) in new_statements { + if loc.block != last_bb { + delta = 0; + last_bb = loc.block; + } + debug!("MirPatch: adding statement {:?} at loc {:?}+{}", + stmt, loc, delta); + loc.index += delta; + let (span, scope) = Self::context_for_index( + mir.basic_block_data(loc.block), loc + ); + mir.basic_block_data_mut(loc.block).statements.insert( + loc.index, Statement { + span: span, + scope: scope, + kind: stmt + }); + delta += 1; + } + } + + pub fn context_for_index(data: &BasicBlockData, loc: Location) -> (Span, ScopeId) { + match data.statements.get(loc.index) { + Some(stmt) => (stmt.span, stmt.scope), + None => (data.terminator().span, data.terminator().scope) + } + } + + pub fn context_for_location(&self, mir: &Mir, loc: Location) -> (Span, ScopeId) { + let data = match loc.block.index().checked_sub(mir.basic_blocks.len()) { + Some(new) => &self.new_blocks[new], + None => mir.basic_block_data(loc.block) + }; + Self::context_for_index(data, loc) + } +} diff --git a/src/librustc_borrowck/borrowck/mod.rs b/src/librustc_borrowck/borrowck/mod.rs index 819717628d6..5acbb18a2ff 100644 --- a/src/librustc_borrowck/borrowck/mod.rs +++ b/src/librustc_borrowck/borrowck/mod.rs @@ -18,6 +18,8 @@ pub use self::AliasableViolationKind::*; pub use self::MovedValueUseKind::*; +pub use self::mir::elaborate_drops::ElaborateDrops; + use self::InteriorKind::*; use rustc::dep_graph::DepNode; diff --git a/src/librustc_borrowck/lib.rs b/src/librustc_borrowck/lib.rs index 9d7e05ed9fa..cc694c59245 100644 --- a/src/librustc_borrowck/lib.rs +++ b/src/librustc_borrowck/lib.rs @@ -39,7 +39,7 @@ pub use borrowck::check_crate; pub use borrowck::build_borrowck_dataflow_data_for_fn; -pub use borrowck::{AnalysisData, BorrowckCtxt}; +pub use borrowck::{AnalysisData, BorrowckCtxt, ElaborateDrops}; // NB: This module needs to be declared first so diagnostics are // registered before they are used. diff --git a/src/librustc_driver/driver.rs b/src/librustc_driver/driver.rs index 96a1bdf62de..b28d203ed8d 100644 --- a/src/librustc_driver/driver.rs +++ b/src/librustc_driver/driver.rs @@ -1037,7 +1037,12 @@ pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, passes.push_pass(box mir::transform::no_landing_pads::NoLandingPads); passes.push_pass(box mir::transform::remove_dead_blocks::RemoveDeadBlocks); passes.push_pass(box mir::transform::erase_regions::EraseRegions); - passes.push_pass(box mir::transform::break_cleanup_edges::BreakCleanupEdges); + passes.push_pass(box mir::transform::add_call_guards::AddCallGuards); + passes.push_pass(box borrowck::ElaborateDrops); + passes.push_pass(box mir::transform::no_landing_pads::NoLandingPads); + passes.push_pass(box mir::transform::simplify_cfg::SimplifyCfg); + passes.push_pass(box mir::transform::add_call_guards::AddCallGuards); + passes.push_pass(box mir::transform::dump_mir::DumpMir("pre_trans")); passes.run_passes(tcx, &mut mir_map); }); diff --git a/src/librustc_metadata/csearch.rs b/src/librustc_metadata/csearch.rs index 5d42f8c1d6f..3134a3844bc 100644 --- a/src/librustc_metadata/csearch.rs +++ b/src/librustc_metadata/csearch.rs @@ -142,6 +142,11 @@ fn item_name(&self, def: DefId) -> ast::Name { decoder::get_item_name(&self.intr, &cdata, def.index) } + fn opt_item_name(&self, def: DefId) -> Option { + self.dep_graph.read(DepNode::MetaData(def)); + let cdata = self.get_crate_data(def.krate); + decoder::maybe_get_item_name(&self.intr, &cdata, def.index) + } fn inherent_implementations_for_type(&self, def_id: DefId) -> Vec { diff --git a/src/librustc_metadata/decoder.rs b/src/librustc_metadata/decoder.rs index 1131e409aa0..68387941b65 100644 --- a/src/librustc_metadata/decoder.rs +++ b/src/librustc_metadata/decoder.rs @@ -285,12 +285,17 @@ fn item_trait_ref<'a, 'tcx>(doc: rbml::Doc, tcx: TyCtxt<'a, 'tcx, 'tcx>, cdata: } fn item_name(intr: &IdentInterner, item: rbml::Doc) -> ast::Name { - let name = reader::get_doc(item, tag_paths_data_name); - let string = name.as_str_slice(); - match intr.find(string) { - None => token::intern(string), - Some(val) => val, - } + maybe_item_name(intr, item).expect("no item in item_name") +} + +fn maybe_item_name(intr: &IdentInterner, item: rbml::Doc) -> Option { + reader::maybe_get_doc(item, tag_paths_data_name).map(|name| { + let string = name.as_str_slice(); + match intr.find(string) { + None => token::intern(string), + Some(val) => val, + } + }) } fn family_to_variant_kind<'tcx>(family: Family) -> Option { @@ -792,6 +797,11 @@ pub fn get_item_name(intr: &IdentInterner, cdata: Cmd, id: DefIndex) -> ast::Nam item_name(intr, cdata.lookup_item(id)) } +pub fn maybe_get_item_name(intr: &IdentInterner, cdata: Cmd, id: DefIndex) + -> Option { + maybe_item_name(intr, cdata.lookup_item(id)) +} + pub fn maybe_get_item_ast<'a, 'tcx>(cdata: Cmd, tcx: TyCtxt<'a, 'tcx, 'tcx>, id: DefIndex) -> FoundAst<'tcx> { debug!("Looking up item: {:?}", id); diff --git a/src/librustc_mir/build/expr/stmt.rs b/src/librustc_mir/build/expr/stmt.rs index 9629396f48b..3324467e70d 100644 --- a/src/librustc_mir/build/expr/stmt.rs +++ b/src/librustc_mir/build/expr/stmt.rs @@ -34,29 +34,25 @@ pub fn stmt_expr(&mut self, mut block: BasicBlock, expr: Expr<'tcx>) -> BlockAnd let scope_id = this.innermost_scope_id(); let lhs_span = lhs.span; - let lhs_ty = lhs.ty; - let rhs_ty = rhs.ty; - - let lhs_needs_drop = this.hir.needs_drop(lhs_ty); - let rhs_needs_drop = this.hir.needs_drop(rhs_ty); - // Note: we evaluate assignments right-to-left. This // is better for borrowck interaction with overloaded // operators like x[j] = x[i]. // Generate better code for things that don't need to be // dropped. - let rhs = if lhs_needs_drop || rhs_needs_drop { - let op = unpack!(block = this.as_operand(block, rhs)); - Rvalue::Use(op) + if this.hir.needs_drop(lhs.ty) { + let rhs = unpack!(block = this.as_operand(block, rhs)); + let lhs = unpack!(block = this.as_lvalue(block, lhs)); + unpack!(block = this.build_drop_and_replace( + block, lhs_span, lhs, rhs + )); + block.unit() } else { - unpack!(block = this.as_rvalue(block, rhs)) - }; - - let lhs = unpack!(block = this.as_lvalue(block, lhs)); - unpack!(block = this.build_drop(block, lhs_span, lhs.clone(), lhs_ty)); - this.cfg.push_assign(block, scope_id, expr_span, &lhs, rhs); - block.unit() + let rhs = unpack!(block = this.as_rvalue(block, rhs)); + let lhs = unpack!(block = this.as_lvalue(block, lhs)); + this.cfg.push_assign(block, scope_id, expr_span, &lhs, rhs); + block.unit() + } } ExprKind::AssignOp { op, lhs, rhs } => { // FIXME(#28160) there is an interesting semantics diff --git a/src/librustc_mir/build/scope.rs b/src/librustc_mir/build/scope.rs index 071c8d618c8..cd81fc764f4 100644 --- a/src/librustc_mir/build/scope.rs +++ b/src/librustc_mir/build/scope.rs @@ -139,7 +139,7 @@ struct DropData<'tcx> { span: Span, /// lvalue to drop - value: Lvalue<'tcx>, + location: Lvalue<'tcx>, /// The cached block for the cleanups-on-diverge path. This block /// contains code to run the current drop and all the preceding @@ -402,7 +402,7 @@ pub fn schedule_drop(&mut self, // the drop that comes before it in the vector. scope.drops.push(DropData { span: span, - value: lvalue.clone(), + location: lvalue.clone(), cached_block: None }); return; @@ -497,7 +497,7 @@ pub fn diverge_cleanup(&mut self) -> Option { pub fn build_drop(&mut self, block: BasicBlock, span: Span, - value: Lvalue<'tcx>, + location: Lvalue<'tcx>, ty: Ty<'tcx>) -> BlockAnd<()> { if !self.hir.needs_drop(ty) { return block.unit(); @@ -509,7 +509,7 @@ pub fn build_drop(&mut self, scope_id, span, TerminatorKind::Drop { - value: value, + location: location, target: next_target, unwind: diverge_target, }); @@ -517,6 +517,27 @@ pub fn build_drop(&mut self, } + + pub fn build_drop_and_replace(&mut self, + block: BasicBlock, + span: Span, + location: Lvalue<'tcx>, + value: Operand<'tcx>) -> BlockAnd<()> { + let scope_id = self.innermost_scope_id(); + let next_target = self.cfg.start_new_block(); + let diverge_target = self.diverge_cleanup(); + self.cfg.terminate(block, + scope_id, + span, + TerminatorKind::DropAndReplace { + location: location, + value: value, + target: next_target, + unwind: diverge_target, + }); + next_target.unit() + } + // Panicking // ========= // FIXME: should be moved into their own module @@ -653,7 +674,7 @@ fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>, }); let next = cfg.start_new_block(); cfg.terminate(block, scope.id, drop_data.span, TerminatorKind::Drop { - value: drop_data.value.clone(), + location: drop_data.location.clone(), target: next, unwind: on_diverge }); @@ -709,7 +730,7 @@ fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, scope.id, drop_data.span, TerminatorKind::Drop { - value: drop_data.value.clone(), + location: drop_data.location.clone(), target: target, unwind: None }); diff --git a/src/librustc_mir/transform/break_cleanup_edges.rs b/src/librustc_mir/transform/add_call_guards.rs similarity index 57% rename from src/librustc_mir/transform/break_cleanup_edges.rs rename to src/librustc_mir/transform/add_call_guards.rs index 0eb6223a71e..bcdd62c1899 100644 --- a/src/librustc_mir/transform/break_cleanup_edges.rs +++ b/src/librustc_mir/transform/add_call_guards.rs @@ -12,13 +12,11 @@ use rustc::mir::repr::*; use rustc::mir::transform::{MirPass, MirSource, Pass}; -use rustc_data_structures::bitvec::BitVector; - use pretty; use traversal; -pub struct BreakCleanupEdges; +pub struct AddCallGuards; /** * Breaks outgoing critical edges for call terminators in the MIR. @@ -40,7 +38,7 @@ * */ -impl<'tcx> MirPass<'tcx> for BreakCleanupEdges { +impl<'tcx> MirPass<'tcx> for AddCallGuards { fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, src: MirSource, mir: &mut Mir<'tcx>) { let mut pred_count = vec![0u32; mir.basic_blocks.len()]; @@ -53,9 +51,6 @@ fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, src: MirSource, mir: &mu } } - let cleanup_map : BitVector = mir.basic_blocks - .iter().map(|bb| bb.is_cleanup).collect(); - // We need a place to store the new blocks generated let mut new_blocks = Vec::new(); @@ -65,30 +60,31 @@ fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, src: MirSource, mir: &mu for &bb in &bbs { let data = mir.basic_block_data_mut(bb); - if let Some(ref mut term) = data.terminator { - if term_is_invoke(term) { - let term_span = term.span; - let term_scope = term.scope; - let succs = term.successors_mut(); - for tgt in succs { - let num_preds = pred_count[tgt.index()]; - if num_preds > 1 { - // It's a critical edge, break it - let goto = Terminator { - span: term_span, - scope: term_scope, - kind: TerminatorKind::Goto { target: *tgt } - }; - let mut data = BasicBlockData::new(Some(goto)); - data.is_cleanup = cleanup_map.contains(tgt.index()); + match data.terminator { + Some(Terminator { + kind: TerminatorKind::Call { + destination: Some((_, ref mut destination)), + cleanup: Some(_), + .. + }, span, scope + }) if pred_count[destination.index()] > 1 => { + // It's a critical edge, break it + let call_guard = BasicBlockData { + statements: vec![], + is_cleanup: data.is_cleanup, + terminator: Some(Terminator { + span: span, + scope: scope, + kind: TerminatorKind::Goto { target: *destination } + }) + }; - // Get the index it will be when inserted into the MIR - let idx = cur_len + new_blocks.len(); - new_blocks.push(data); - *tgt = BasicBlock::new(idx); - } - } + // Get the index it will be when inserted into the MIR + let idx = cur_len + new_blocks.len(); + new_blocks.push(call_guard); + *destination = BasicBlock::new(idx); } + _ => {} } } @@ -99,13 +95,4 @@ fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, src: MirSource, mir: &mu } } -impl Pass for BreakCleanupEdges {} - -// Returns true if the terminator is a call that would use an invoke in LLVM. -fn term_is_invoke(term: &Terminator) -> bool { - match term.kind { - TerminatorKind::Call { cleanup: Some(_), .. } | - TerminatorKind::Drop { unwind: Some(_), .. } => true, - _ => false - } -} +impl Pass for AddCallGuards {} diff --git a/src/librustc_mir/transform/dump_mir.rs b/src/librustc_mir/transform/dump_mir.rs new file mode 100644 index 00000000000..fb49f951ecd --- /dev/null +++ b/src/librustc_mir/transform/dump_mir.rs @@ -0,0 +1,27 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! This pass just dumps MIR at a specified point. + +use rustc::ty::TyCtxt; +use rustc::mir::repr::*; +use rustc::mir::transform::{Pass, MirPass, MirSource}; +use pretty; + +pub struct DumpMir<'a>(pub &'a str); + +impl<'b, 'tcx> MirPass<'tcx> for DumpMir<'b> { + fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, + src: MirSource, mir: &mut Mir<'tcx>) { + pretty::dump_mir(tcx, self.0, &0, src, mir, None); + } +} + +impl<'b> Pass for DumpMir<'b> {} diff --git a/src/librustc_mir/transform/mod.rs b/src/librustc_mir/transform/mod.rs index 0dcb7ef84d0..339dcdec060 100644 --- a/src/librustc_mir/transform/mod.rs +++ b/src/librustc_mir/transform/mod.rs @@ -13,6 +13,7 @@ pub mod erase_regions; pub mod no_landing_pads; pub mod type_check; -pub mod break_cleanup_edges; +pub mod add_call_guards; pub mod promote_consts; pub mod qualify_consts; +pub mod dump_mir; diff --git a/src/librustc_mir/transform/no_landing_pads.rs b/src/librustc_mir/transform/no_landing_pads.rs index de05032fa55..67710c43285 100644 --- a/src/librustc_mir/transform/no_landing_pads.rs +++ b/src/librustc_mir/transform/no_landing_pads.rs @@ -29,12 +29,11 @@ fn visit_terminator(&mut self, bb: BasicBlock, terminator: &mut Terminator<'tcx> TerminatorKind::SwitchInt { .. } => { /* nothing to do */ }, + TerminatorKind::Call { cleanup: ref mut unwind, .. } | + TerminatorKind::DropAndReplace { ref mut unwind, .. } | TerminatorKind::Drop { ref mut unwind, .. } => { unwind.take(); }, - TerminatorKind::Call { ref mut cleanup, .. } => { - cleanup.take(); - }, } self.super_terminator(bb, terminator); } diff --git a/src/librustc_mir/transform/promote_consts.rs b/src/librustc_mir/transform/promote_consts.rs index 431568b004d..d81c4e2dfb6 100644 --- a/src/librustc_mir/transform/promote_consts.rs +++ b/src/librustc_mir/transform/promote_consts.rs @@ -399,7 +399,7 @@ pub fn promote_candidates<'a, 'tcx>(mir: &mut Mir<'tcx>, }); let terminator = block.terminator_mut(); match terminator.kind { - TerminatorKind::Drop { value: Lvalue::Temp(index), target, .. } => { + TerminatorKind::Drop { location: Lvalue::Temp(index), target, .. } => { if promoted(index) { terminator.kind = TerminatorKind::Goto { target: target diff --git a/src/librustc_mir/transform/qualify_consts.rs b/src/librustc_mir/transform/qualify_consts.rs index 2e4400c834f..18a1f1595f3 100644 --- a/src/librustc_mir/transform/qualify_consts.rs +++ b/src/librustc_mir/transform/qualify_consts.rs @@ -422,6 +422,7 @@ fn qualify_const(&mut self) -> Qualif { TerminatorKind::Switch {..} | TerminatorKind::SwitchInt {..} | + TerminatorKind::DropAndReplace { .. } | TerminatorKind::Resume => None, TerminatorKind::Return => { diff --git a/src/librustc_mir/transform/type_check.rs b/src/librustc_mir/transform/type_check.rs index 80c56a5dc08..efac8ea8461 100644 --- a/src/librustc_mir/transform/type_check.rs +++ b/src/librustc_mir/transform/type_check.rs @@ -363,6 +363,20 @@ fn check_terminator(&mut self, // no checks needed for these } + + TerminatorKind::DropAndReplace { + ref location, + ref value, + .. + } => { + let lv_ty = mir.lvalue_ty(tcx, location).to_ty(tcx); + let rv_ty = mir.operand_ty(tcx, value); + if let Err(terr) = self.sub_types(self.last_span, rv_ty, lv_ty) { + span_mirbug!(self, term, "bad DropAndReplace ({:?} = {:?}): {:?}", + lv_ty, rv_ty, terr); + } + } + TerminatorKind::If { ref cond, .. } => { let cond_ty = mir.operand_ty(tcx, cond); match cond_ty.sty { @@ -519,6 +533,69 @@ fn check_box_free_inputs(&self, } } + fn check_iscleanup(&mut self, mir: &Mir<'tcx>, block: &BasicBlockData<'tcx>) + { + let is_cleanup = block.is_cleanup; + self.last_span = block.terminator().span; + match block.terminator().kind { + TerminatorKind::Goto { target } => + self.assert_iscleanup(mir, block, target, is_cleanup), + TerminatorKind::If { targets: (on_true, on_false), .. } => { + self.assert_iscleanup(mir, block, on_true, is_cleanup); + self.assert_iscleanup(mir, block, on_false, is_cleanup); + } + TerminatorKind::Switch { ref targets, .. } | + TerminatorKind::SwitchInt { ref targets, .. } => { + for target in targets { + self.assert_iscleanup(mir, block, *target, is_cleanup); + } + } + TerminatorKind::Resume => { + if !is_cleanup { + span_mirbug!(self, block, "resume on non-cleanup block!") + } + } + TerminatorKind::Return => { + if is_cleanup { + span_mirbug!(self, block, "return on cleanup block") + } + } + TerminatorKind::Drop { target, unwind, .. } | + TerminatorKind::DropAndReplace { target, unwind, .. } => { + self.assert_iscleanup(mir, block, target, is_cleanup); + if let Some(unwind) = unwind { + if is_cleanup { + span_mirbug!(self, block, "unwind on cleanup block") + } + self.assert_iscleanup(mir, block, unwind, true); + } + } + TerminatorKind::Call { ref destination, cleanup, .. } => { + if let &Some((_, target)) = destination { + self.assert_iscleanup(mir, block, target, is_cleanup); + } + if let Some(cleanup) = cleanup { + if is_cleanup { + span_mirbug!(self, block, "cleanup on cleanup block") + } + self.assert_iscleanup(mir, block, cleanup, true); + } + } + } + } + + fn assert_iscleanup(&mut self, + mir: &Mir<'tcx>, + ctxt: &fmt::Debug, + bb: BasicBlock, + iscleanuppad: bool) + { + if mir.basic_block_data(bb).is_cleanup != iscleanuppad { + span_mirbug!(self, ctxt, "cleanuppad mismatch: {:?} should be {:?}", + bb, iscleanuppad); + } + } + fn typeck_mir(&mut self, mir: &Mir<'tcx>) { self.last_span = mir.span; debug!("run_on_mir: {:?}", mir.span); @@ -530,9 +607,8 @@ fn typeck_mir(&mut self, mir: &Mir<'tcx>) { self.check_stmt(mir, stmt); } - if let Some(ref terminator) = block.terminator { - self.check_terminator(mir, terminator); - } + self.check_terminator(mir, block.terminator()); + self.check_iscleanup(mir, block); } } diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 1b13a662d36..f35d87d0741 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -577,6 +577,15 @@ pub fn lpad(&self) -> Option<&'blk LandingPad> { self.lpad.get() } + pub fn set_lpad_ref(&self, lpad: Option<&'blk LandingPad>) { + // FIXME: use an IVar? + self.lpad.set(lpad); + } + + pub fn set_lpad(&self, lpad: Option) { + self.set_lpad_ref(lpad.map(|p| &*self.fcx().lpad_arena.alloc(p))) + } + pub fn mir(&self) -> CachedMir<'blk, 'tcx> { self.fcx.mir() } @@ -716,7 +725,16 @@ pub fn monomorphize(&self, value: &T) -> T } pub fn set_lpad(&self, lpad: Option) { - self.bcx.lpad.set(lpad.map(|p| &*self.fcx().lpad_arena.alloc(p))) + self.bcx.set_lpad(lpad) + } + + pub fn set_lpad_ref(&self, lpad: Option<&'blk LandingPad>) { + // FIXME: use an IVar? + self.bcx.set_lpad_ref(lpad); + } + + pub fn lpad(&self) -> Option<&'blk LandingPad> { + self.bcx.lpad() } } @@ -761,6 +779,10 @@ pub fn msvc(cleanuppad: ValueRef) -> LandingPad { pub fn bundle(&self) -> Option<&OperandBundleDef> { self.operand.as_ref() } + + pub fn cleanuppad(&self) -> Option { + self.cleanuppad + } } impl Clone for LandingPad { diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs index 0b88ba554da..03df1c451f0 100644 --- a/src/librustc_trans/mir/analyze.rs +++ b/src/librustc_trans/mir/analyze.rs @@ -13,7 +13,9 @@ use rustc_data_structures::bitvec::BitVector; use rustc::mir::repr as mir; +use rustc::mir::repr::TerminatorKind; use rustc::mir::visit::{Visitor, LvalueContext}; +use rustc_mir::traversal; use common::{self, Block, BlockAndBuilder}; use super::rvalue; @@ -134,3 +136,104 @@ fn visit_lvalue(&mut self, self.super_lvalue(lvalue, context); } } + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum CleanupKind { + NotCleanup, + Funclet, + Internal { funclet: mir::BasicBlock } +} + +pub fn cleanup_kinds<'bcx,'tcx>(_bcx: Block<'bcx,'tcx>, + mir: &mir::Mir<'tcx>) + -> Vec +{ + fn discover_masters<'tcx>(result: &mut [CleanupKind], mir: &mir::Mir<'tcx>) { + for bb in mir.all_basic_blocks() { + let data = mir.basic_block_data(bb); + match data.terminator().kind { + TerminatorKind::Goto { .. } | + TerminatorKind::Resume | + TerminatorKind::Return | + TerminatorKind::If { .. } | + TerminatorKind::Switch { .. } | + TerminatorKind::SwitchInt { .. } => { + /* nothing to do */ + } + TerminatorKind::Call { cleanup: unwind, .. } | + TerminatorKind::DropAndReplace { unwind, .. } | + TerminatorKind::Drop { unwind, .. } => { + if let Some(unwind) = unwind { + debug!("cleanup_kinds: {:?}/{:?} registering {:?} as funclet", + bb, data, unwind); + result[unwind.index()] = CleanupKind::Funclet; + } + } + } + } + } + + fn propagate<'tcx>(result: &mut [CleanupKind], mir: &mir::Mir<'tcx>) { + let mut funclet_succs : Vec<_> = + mir.all_basic_blocks().iter().map(|_| None).collect(); + + let mut set_successor = |funclet: mir::BasicBlock, succ| { + match funclet_succs[funclet.index()] { + ref mut s @ None => { + debug!("set_successor: updating successor of {:?} to {:?}", + funclet, succ); + *s = Some(succ); + }, + Some(s) => if s != succ { + span_bug!(mir.span, "funclet {:?} has 2 parents - {:?} and {:?}", + funclet, s, succ); + } + } + }; + + for (bb, data) in traversal::reverse_postorder(mir) { + let funclet = match result[bb.index()] { + CleanupKind::NotCleanup => continue, + CleanupKind::Funclet => bb, + CleanupKind::Internal { funclet } => funclet, + }; + + debug!("cleanup_kinds: {:?}/{:?}/{:?} propagating funclet {:?}", + bb, data, result[bb.index()], funclet); + + for &succ in data.terminator().successors().iter() { + let kind = result[succ.index()]; + debug!("cleanup_kinds: propagating {:?} to {:?}/{:?}", + funclet, succ, kind); + match kind { + CleanupKind::NotCleanup => { + result[succ.index()] = CleanupKind::Internal { funclet: funclet }; + } + CleanupKind::Funclet => { + set_successor(funclet, succ); + } + CleanupKind::Internal { funclet: succ_funclet } => { + if funclet != succ_funclet { + // `succ` has 2 different funclet going into it, so it must + // be a funclet by itself. + + debug!("promoting {:?} to a funclet and updating {:?}", succ, + succ_funclet); + result[succ.index()] = CleanupKind::Funclet; + set_successor(succ_funclet, succ); + set_successor(funclet, succ); + } + } + } + } + } + } + + let mut result : Vec<_> = + mir.all_basic_blocks().iter().map(|_| CleanupKind::NotCleanup).collect(); + + discover_masters(&mut result, mir); + propagate(&mut result, mir); + debug!("cleanup_kinds: result={:?}", result); + result +} diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 4e3386bc736..eb962b66154 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm::{self, BasicBlockRef, ValueRef, OperandBundleDef}; +use llvm::{self, ValueRef}; use rustc::ty; use rustc::mir::repr as mir; use abi::{Abi, FnType, ArgType}; @@ -16,7 +16,7 @@ use base; use build; use callee::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual}; -use common::{self, type_is_fat_ptr, Block, BlockAndBuilder, C_undef}; +use common::{self, type_is_fat_ptr, Block, BlockAndBuilder, LandingPad, C_undef}; use debuginfo::DebugLoc; use Disr; use machine::{llalign_of_min, llbitsize_of_real}; @@ -26,7 +26,8 @@ use type_::Type; use rustc_data_structures::fnv::FnvHashMap; -use super::{MirContext, TempRef, drop}; +use super::{MirContext, TempRef}; +use super::analyze::CleanupKind; use super::constant::Const; use super::lvalue::{LvalueRef, load_fat_ptr}; use super::operand::OperandRef; @@ -34,22 +35,62 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { pub fn trans_block(&mut self, bb: mir::BasicBlock) { - debug!("trans_block({:?})", bb); - let mut bcx = self.bcx(bb); let mir = self.mir.clone(); let data = mir.basic_block_data(bb); - // MSVC SEH bits - let (cleanup_pad, cleanup_bundle) = if let Some((cp, cb)) = self.make_cleanup_pad(bb) { - (Some(cp), Some(cb)) - } else { - (None, None) + debug!("trans_block({:?}={:?})", bb, data); + + // Create the cleanup bundle, if needed. + let cleanup_pad = bcx.lpad().and_then(|lp| lp.cleanuppad()); + let cleanup_bundle = bcx.lpad().and_then(|l| l.bundle()); + + let funclet_br = |this: &Self, bcx: BlockAndBuilder, bb: mir::BasicBlock| { + let lltarget = this.blocks[bb.index()].llbb; + if let Some(cp) = cleanup_pad { + match this.cleanup_kind(bb) { + CleanupKind::Funclet => { + // micro-optimization: generate a `ret` rather than a jump + // to a return block + bcx.cleanup_ret(cp, Some(lltarget)); + } + CleanupKind::Internal { .. } => bcx.br(lltarget), + CleanupKind::NotCleanup => bug!("jump from cleanup bb to bb {:?}", bb) + } + } else { + bcx.br(lltarget); + } }; - let funclet_br = |bcx: BlockAndBuilder, llbb: BasicBlockRef| if let Some(cp) = cleanup_pad { - bcx.cleanup_ret(cp, Some(llbb)); - } else { - bcx.br(llbb); + + let llblock = |this: &mut Self, target: mir::BasicBlock| { + let lltarget = this.blocks[target.index()].llbb; + + if let Some(cp) = cleanup_pad { + match this.cleanup_kind(target) { + CleanupKind::Funclet => { + // MSVC cross-funclet jump - need a trampoline + + debug!("llblock: creating cleanup trampoline for {:?}", target); + let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target); + let trampoline = this.fcx.new_block(name, None).build(); + trampoline.set_personality_fn(this.fcx.eh_personality()); + trampoline.cleanup_ret(cp, Some(lltarget)); + trampoline.llbb() + } + CleanupKind::Internal { .. } => lltarget, + CleanupKind::NotCleanup => + bug!("jump from cleanup bb {:?} to bb {:?}", bb, target) + } + } else { + if let (CleanupKind::NotCleanup, CleanupKind::Funclet) = + (this.cleanup_kind(bb), this.cleanup_kind(target)) + { + // jump *into* cleanup - need a landing pad if GNU + this.landing_pad_to(target).llbb + } else { + lltarget + } + } }; for statement in &data.statements { @@ -78,13 +119,14 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { } mir::TerminatorKind::Goto { target } => { - funclet_br(bcx, self.llblock(target)); + funclet_br(self, bcx, target); } mir::TerminatorKind::If { ref cond, targets: (true_bb, false_bb) } => { let cond = self.trans_operand(&bcx, cond); - let lltrue = self.llblock(true_bb); - let llfalse = self.llblock(false_bb); + + let lltrue = llblock(self, true_bb); + let llfalse = llblock(self, false_bb); bcx.cond_br(cond.immediate(), lltrue, llfalse); } @@ -106,18 +148,18 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { // code. This is especially helpful in cases like an if-let on a huge enum. // Note: This optimization is only valid for exhaustive matches. Some((&&bb, &c)) if c > targets.len() / 2 => { - (Some(bb), self.blocks[bb.index()]) + (Some(bb), llblock(self, bb)) } // We're generating an exhaustive switch, so the else branch // can't be hit. Branching to an unreachable instruction // lets LLVM know this - _ => (None, self.unreachable_block()) + _ => (None, self.unreachable_block().llbb) }; - let switch = bcx.switch(discr, default_blk.llbb, targets.len()); + let switch = bcx.switch(discr, default_blk, targets.len()); assert_eq!(adt_def.variants.len(), targets.len()); for (adt_variant, &target) in adt_def.variants.iter().zip(targets) { if default_bb != Some(target) { - let llbb = self.llblock(target); + let llbb = llblock(self, target); let llval = bcx.with_block(|bcx| adt::trans_case( bcx, &repr, Disr::from(adt_variant.disr_val))); build::AddCase(switch, llval, llbb) @@ -129,10 +171,10 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { let (otherwise, targets) = targets.split_last().unwrap(); let discr = bcx.load(self.trans_lvalue(&bcx, discr).llval); let discr = bcx.with_block(|bcx| base::to_immediate(bcx, discr, switch_ty)); - let switch = bcx.switch(discr, self.llblock(*otherwise), values.len()); + let switch = bcx.switch(discr, llblock(self, *otherwise), values.len()); for (value, target) in values.iter().zip(targets) { let val = Const::from_constval(bcx.ccx(), value.clone(), switch_ty); - let llbb = self.llblock(*target); + let llbb = llblock(self, *target); build::AddCase(switch, val.llval, llbb) } } @@ -143,12 +185,12 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { }) } - mir::TerminatorKind::Drop { ref value, target, unwind } => { - let lvalue = self.trans_lvalue(&bcx, value); + mir::TerminatorKind::Drop { ref location, target, unwind } => { + let lvalue = self.trans_lvalue(&bcx, location); let ty = lvalue.ty.to_ty(bcx.tcx()); // Double check for necessity to drop if !glue::type_needs_drop(bcx.tcx(), ty) { - funclet_br(bcx, self.llblock(target)); + funclet_br(self, bcx, target); return; } let drop_fn = glue::get_drop_glue(bcx.ccx(), ty); @@ -159,24 +201,21 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { lvalue.llval }; if let Some(unwind) = unwind { - let uwbcx = self.bcx(unwind); - let unwind = self.make_landing_pad(uwbcx); bcx.invoke(drop_fn, &[llvalue], - self.llblock(target), - unwind.llbb(), - cleanup_bundle.as_ref()); - self.bcx(target).at_start(|bcx| { - debug_loc.apply_to_bcx(bcx); - drop::drop_fill(bcx, lvalue.llval, ty) - }); + self.blocks[target.index()].llbb, + llblock(self, unwind), + cleanup_bundle); } else { - bcx.call(drop_fn, &[llvalue], cleanup_bundle.as_ref()); - drop::drop_fill(&bcx, lvalue.llval, ty); - funclet_br(bcx, self.llblock(target)); + bcx.call(drop_fn, &[llvalue], cleanup_bundle); + funclet_br(self, bcx, target); } } + mir::TerminatorKind::DropAndReplace { .. } => { + bug!("undesugared DropAndReplace in trans: {:?}", data); + } + mir::TerminatorKind::Call { ref func, ref args, ref destination, ref cleanup } => { // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar. let callee = self.trans_operand(&bcx, func); @@ -211,8 +250,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { let llptr = self.trans_operand(&bcx, &args[0]).immediate(); let val = self.trans_operand(&bcx, &args[1]); self.store_operand(&bcx, llptr, val); - self.set_operand_dropped(&bcx, &args[1]); - funclet_br(bcx, self.llblock(target)); + funclet_br(self, bcx, target); return; } @@ -222,8 +260,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { this.trans_transmute(&bcx, &args[0], dest); }); - self.set_operand_dropped(&bcx, &args[0]); - funclet_br(bcx, self.llblock(target)); + funclet_br(self, bcx, target); return; } @@ -328,10 +365,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { } if let Some((_, target)) = *destination { - for op in args { - self.set_operand_dropped(&bcx, op); - } - funclet_br(bcx, self.llblock(target)); + funclet_br(self, bcx, target); } else { // trans_intrinsic_call already used Unreachable. // bcx.unreachable(); @@ -344,28 +378,19 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { }; // Many different ways to call a function handled here - if let Some(cleanup) = cleanup.map(|bb| self.bcx(bb)) { + if let &Some(cleanup) = cleanup { let ret_bcx = if let Some((_, target)) = *destination { self.blocks[target.index()] } else { self.unreachable_block() }; - let landingpad = self.make_landing_pad(cleanup); - let invokeret = bcx.invoke(fn_ptr, &llargs, ret_bcx.llbb, - landingpad.llbb(), - cleanup_bundle.as_ref()); + llblock(self, cleanup), + cleanup_bundle); fn_ty.apply_attrs_callsite(invokeret); - landingpad.at_start(|bcx| { - debug_loc.apply_to_bcx(bcx); - for op in args { - self.set_operand_dropped(bcx, op); - } - }); - if destination.is_some() { let ret_bcx = ret_bcx.build(); ret_bcx.at_start(|ret_bcx| { @@ -375,13 +400,10 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { ty: sig.output.unwrap() }; self.store_return(&ret_bcx, ret_dest, fn_ty.ret, op); - for op in args { - self.set_operand_dropped(&ret_bcx, op); - } }); } } else { - let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle.as_ref()); + let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle); fn_ty.apply_attrs_callsite(llret); if let Some((_, target)) = *destination { let op = OperandRef { @@ -389,12 +411,8 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { ty: sig.output.unwrap() }; self.store_return(&bcx, ret_dest, fn_ty.ret, op); - for op in args { - self.set_operand_dropped(&bcx, op); - } - funclet_br(bcx, self.llblock(target)); + funclet_br(self, bcx, target); } else { - // no need to drop args, because the call never returns bcx.unreachable(); } } @@ -534,17 +552,29 @@ fn get_personality_slot(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>) -> ValueRe } } - /// Create a landingpad wrapper around the given Block. + fn cleanup_kind(&self, bb: mir::BasicBlock) -> CleanupKind { + self.cleanup_kinds[bb.index()] + } + + /// Return the landingpad wrapper around the given basic block /// /// No-op in MSVC SEH scheme. - fn make_landing_pad(&mut self, - cleanup: BlockAndBuilder<'bcx, 'tcx>) - -> BlockAndBuilder<'bcx, 'tcx> + fn landing_pad_to(&mut self, target_bb: mir::BasicBlock) -> Block<'bcx, 'tcx> { - if base::wants_msvc_seh(cleanup.sess()) { - return cleanup; + if let Some(block) = self.landing_pads[target_bb.index()] { + return block; } - let bcx = self.fcx.new_block("cleanup", None).build(); + + if base::wants_msvc_seh(self.fcx.ccx.sess()) { + return self.blocks[target_bb.index()]; + } + + let target = self.bcx(target_bb); + + let block = self.fcx.new_block("cleanup", None); + self.landing_pads[target_bb.index()] = Some(block); + + let bcx = block.build(); let ccx = bcx.ccx(); let llpersonality = self.fcx.eh_personality(); let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); @@ -552,36 +582,34 @@ fn make_landing_pad(&mut self, bcx.set_cleanup(llretval); let slot = self.get_personality_slot(&bcx); bcx.store(llretval, slot); - bcx.br(cleanup.llbb()); - bcx + bcx.br(target.llbb()); + block } - /// Create prologue cleanuppad instruction under MSVC SEH handling scheme. - /// - /// Also handles setting some state for the original trans and creating an operand bundle for - /// function calls. - fn make_cleanup_pad(&mut self, bb: mir::BasicBlock) -> Option<(ValueRef, OperandBundleDef)> { + pub fn init_cpad(&mut self, bb: mir::BasicBlock) { let bcx = self.bcx(bb); let data = self.mir.basic_block_data(bb); - let use_funclets = base::wants_msvc_seh(bcx.sess()) && data.is_cleanup; - let cleanup_pad = if use_funclets { - bcx.set_personality_fn(self.fcx.eh_personality()); - bcx.at_start(|bcx| { - DebugLoc::None.apply_to_bcx(bcx); - Some(bcx.cleanup_pad(None, &[])) - }) - } else { - None + debug!("init_cpad({:?})", data); + + match self.cleanup_kinds[bb.index()] { + CleanupKind::NotCleanup => { + bcx.set_lpad(None) + } + _ if !base::wants_msvc_seh(bcx.sess()) => { + bcx.set_lpad(Some(LandingPad::gnu())) + } + CleanupKind::Internal { funclet } => { + // FIXME: is this needed? + bcx.set_personality_fn(self.fcx.eh_personality()); + bcx.set_lpad_ref(self.bcx(funclet).lpad()); + } + CleanupKind::Funclet => { + bcx.set_personality_fn(self.fcx.eh_personality()); + DebugLoc::None.apply_to_bcx(&bcx); + let cleanup_pad = bcx.cleanup_pad(None, &[]); + bcx.set_lpad(Some(LandingPad::msvc(cleanup_pad))); + } }; - // Set the landingpad global-state for old translator, so it knows about the SEH used. - bcx.set_lpad(if let Some(cleanup_pad) = cleanup_pad { - Some(common::LandingPad::msvc(cleanup_pad)) - } else if data.is_cleanup { - Some(common::LandingPad::gnu()) - } else { - None - }); - cleanup_pad.map(|f| (f, OperandBundleDef::new("funclet", &[f]))) } fn unreachable_block(&mut self) -> Block<'bcx, 'tcx> { @@ -597,10 +625,6 @@ fn bcx(&self, bb: mir::BasicBlock) -> BlockAndBuilder<'bcx, 'tcx> { self.blocks[bb.index()].build() } - pub fn llblock(&self, bb: mir::BasicBlock) -> BasicBlockRef { - self.blocks[bb.index()].llbb - } - fn make_return_dest(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>, dest: &mir::Lvalue<'tcx>, fn_ret_ty: &ArgType, llargs: &mut Vec, is_intrinsic: bool) -> ReturnDest { diff --git a/src/librustc_trans/mir/drop.rs b/src/librustc_trans/mir/drop.rs deleted file mode 100644 index 623cd5a6f8c..00000000000 --- a/src/librustc_trans/mir/drop.rs +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use llvm::ValueRef; -use rustc::ty::Ty; -use adt; -use base; -use common::{self, BlockAndBuilder}; -use machine; -use type_of; -use type_::Type; - -pub fn drop_fill<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, value: ValueRef, ty: Ty<'tcx>) { - let llty = type_of::type_of(bcx.ccx(), ty); - let llptr = bcx.pointercast(value, Type::i8(bcx.ccx()).ptr_to()); - let filling = common::C_u8(bcx.ccx(), adt::DTOR_DONE); - let size = machine::llsize_of(bcx.ccx(), llty); - let align = common::C_u32(bcx.ccx(), machine::llalign_of_min(bcx.ccx(), llty)); - base::call_memset(&bcx, llptr, filling, size, align, false); -} diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index b39a6ac1ce3..bc79482666c 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -20,7 +20,6 @@ use consts; use machine; use type_of::type_of; -use mir::drop; use Disr; use std::ptr; @@ -51,9 +50,6 @@ pub fn alloca<'bcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, { assert!(!ty.has_erasable_regions()); let lltemp = bcx.with_block(|bcx| base::alloc_ty(bcx, ty, name)); - if bcx.fcx().type_needs_drop(ty) { - drop::drop_fill(bcx, lltemp, ty); - } LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty)) } diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index ffc14b4468b..d1206550b13 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -73,6 +73,13 @@ pub struct MirContext<'bcx, 'tcx:'bcx> { /// A `Block` for each MIR `BasicBlock` blocks: Vec>, + /// The funclet status of each basic block + cleanup_kinds: Vec, + + /// This stores the landing-pad block for a given BB, computed lazily on GNU + /// and eagerly on MSVC. + landing_pads: Vec>>, + /// Cached unreachable block unreachable_block: Option>, @@ -139,8 +146,9 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { // Analyze the temps to determine which must be lvalues // FIXME - let lvalue_temps = bcx.with_block(|bcx| { - analyze::lvalue_temps(bcx, &mir) + let (lvalue_temps, cleanup_kinds) = bcx.with_block(|bcx| { + (analyze::lvalue_temps(bcx, &mir), + analyze::cleanup_kinds(bcx, &mir)) }); // Compute debuginfo scopes from MIR scopes. @@ -206,6 +214,8 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { llpersonalityslot: None, blocks: block_bcxs, unreachable_block: None, + cleanup_kinds: cleanup_kinds, + landing_pads: mir_blocks.iter().map(|_| None).collect(), vars: vars, temps: temps, args: args, @@ -214,7 +224,14 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { let mut visited = BitVector::new(mir_blocks.len()); - let rpo = traversal::reverse_postorder(&mir); + let mut rpo = traversal::reverse_postorder(&mir); + + // Prepare each block for translation. + for (bb, _) in rpo.by_ref() { + mircx.init_cpad(bb); + } + rpo.reset(); + // Translate the body of each block using reverse postorder for (bb, _) in rpo { visited.insert(bb.index()); @@ -228,8 +245,7 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { let block = BasicBlock(block.llbb); // Unreachable block if !visited.contains(bb.index()) { - block.delete(); - } else if block.pred_iter().count() == 0 { + debug!("trans_mir: block {:?} was not visited", bb); block.delete(); } } @@ -431,7 +447,6 @@ fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, mod analyze; mod block; mod constant; -mod drop; mod lvalue; mod operand; mod rvalue; diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index fc726a3474f..107ec1159f0 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -15,12 +15,11 @@ use common::{self, Block, BlockAndBuilder}; use datum; use value::Value; -use glue; use std::fmt; use super::lvalue::load_fat_ptr; -use super::{MirContext, TempRef, drop}; +use super::{MirContext, TempRef}; /// The representation of a Rust value. The enum variant is in fact /// uniquely determined by the value's type, but is kept as a @@ -179,29 +178,4 @@ pub fn store_operand_direct(&mut self, } } } - - pub fn set_operand_dropped(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, - operand: &mir::Operand<'tcx>) { - match *operand { - mir::Operand::Constant(_) => return, - mir::Operand::Consume(ref lvalue) => { - if let mir::Lvalue::Temp(idx) = *lvalue { - if let TempRef::Operand(..) = self.temps[idx as usize] { - // All lvalues which have an associated drop are promoted to an alloca - // beforehand. If this is an operand, it is safe to say this is never - // dropped and there’s no reason for us to zero this out at all. - return - } - } - let lvalue = self.trans_lvalue(bcx, lvalue); - let ty = lvalue.ty.to_ty(bcx.tcx()); - if !glue::type_needs_drop(bcx.tcx(), ty) { - return - } else { - drop::drop_fill(bcx, lvalue.llval, ty); - } - } - } - } } diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 5945e8813a4..6d141862ac3 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -25,7 +25,6 @@ use tvec; use value::Value; use Disr; -use glue; use super::MirContext; use super::operand::{OperandRef, OperandValue}; @@ -48,7 +47,6 @@ pub fn trans_rvalue(&mut self, // FIXME: consider not copying constants through stack. (fixable by translating // constants into OperandValue::Ref, why don’t we do that yet if we don’t?) self.store_operand(&bcx, dest.llval, tr_operand); - self.set_operand_dropped(&bcx, operand); bcx } @@ -92,7 +90,6 @@ pub fn trans_rvalue(&mut self, } } }); - self.set_operand_dropped(&bcx, source); bcx } @@ -107,7 +104,6 @@ pub fn trans_rvalue(&mut self, block }) }); - self.set_operand_dropped(&bcx, elem); bcx } @@ -128,7 +124,6 @@ pub fn trans_rvalue(&mut self, val, disr, i); self.store_operand(&bcx, lldest_i, op); } - self.set_operand_dropped(&bcx, operand); } }, _ => { @@ -167,7 +162,6 @@ pub fn trans_rvalue(&mut self, let dest = bcx.gepi(dest.llval, &[0, i]); self.store_operand(&bcx, dest, op); } - self.set_operand_dropped(&bcx, operand); } } } @@ -209,9 +203,6 @@ pub fn trans_rvalue(&mut self, asm::trans_inline_asm(bcx, asm, outputs, input_vals); }); - for input in inputs { - self.set_operand_dropped(&bcx, input); - } bcx } @@ -269,7 +260,6 @@ pub fn trans_rvalue_operand(&mut self, // &'a fmt::Debug+Send => &'a fmt::Debug, // So we need to pointercast the base to ensure // the types match up. - self.set_operand_dropped(&bcx, source); let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx(), cast_ty); let lldata = bcx.pointercast(lldata, llcast_ty); OperandValue::FatPtr(lldata, llextra) @@ -280,7 +270,6 @@ pub fn trans_rvalue_operand(&mut self, base::unsize_thin_ptr(bcx, lldata, operand.ty, cast_ty) }); - self.set_operand_dropped(&bcx, source); OperandValue::FatPtr(lldata, llextra) } OperandValue::Ref(_) => { @@ -569,8 +558,8 @@ pub fn trans_scalar_binop(&mut self, } } -pub fn rvalue_creates_operand<'bcx, 'tcx>(mir: &mir::Mir<'tcx>, - bcx: &BlockAndBuilder<'bcx, 'tcx>, +pub fn rvalue_creates_operand<'bcx, 'tcx>(_mir: &mir::Mir<'tcx>, + _bcx: &BlockAndBuilder<'bcx, 'tcx>, rvalue: &mir::Rvalue<'tcx>) -> bool { match *rvalue { mir::Rvalue::Ref(..) | @@ -578,21 +567,14 @@ pub fn rvalue_creates_operand<'bcx, 'tcx>(mir: &mir::Mir<'tcx>, mir::Rvalue::Cast(..) | // (*) mir::Rvalue::BinaryOp(..) | mir::Rvalue::UnaryOp(..) | - mir::Rvalue::Box(..) => + mir::Rvalue::Box(..) | + mir::Rvalue::Use(..) => true, mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) | mir::Rvalue::Slice { .. } | mir::Rvalue::InlineAsm { .. } => false, - mir::Rvalue::Use(ref operand) => { - let ty = mir.operand_ty(bcx.tcx(), operand); - let ty = bcx.monomorphize(&ty); - // Types that don't need dropping can just be an operand, - // this allows temporary lvalues, used as rvalues, to - // avoid a stack slot when it's unnecessary - !glue::type_needs_drop(bcx.tcx(), ty) - } } // (*) this is only true if the type is suitable diff --git a/src/librustc_trans/monomorphize.rs b/src/librustc_trans/monomorphize.rs index c02dd7995f1..cf84dd57d02 100644 --- a/src/librustc_trans/monomorphize.rs +++ b/src/librustc_trans/monomorphize.rs @@ -173,7 +173,7 @@ pub struct Instance<'tcx> { impl<'tcx> fmt::Display for Instance<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ppaux::parameterized(f, &self.substs, self.def, ppaux::Ns::Value, &[], - |tcx| tcx.lookup_item_type(self.def).generics) + |tcx| Some(tcx.lookup_item_type(self.def).generics)) } } diff --git a/src/llvm b/src/llvm index a73c41e7f1c..80ad955b60b 160000 --- a/src/llvm +++ b/src/llvm @@ -1 +1 @@ -Subproject commit a73c41e7f1c85cd814e9792fc6a6a8f8e31b8dd4 +Subproject commit 80ad955b60b3ac02d0462a4a65fcea597d0ebfb1 diff --git a/src/test/codegen-units/item-collection/cross-crate-trait-method.rs b/src/test/codegen-units/item-collection/cross-crate-trait-method.rs index 9f29a90bffb..195125793be 100644 --- a/src/test/codegen-units/item-collection/cross-crate-trait-method.rs +++ b/src/test/codegen-units/item-collection/cross-crate-trait-method.rs @@ -56,5 +56,3 @@ fn main() //~ TRANS_ITEM fn cgu_export_trait_method::{{impl}}[0]::without_default_impl_generic[0] let _: (char, bool) = Trait::without_default_impl_generic(false); } - -//~ TRANS_ITEM drop-glue i8 diff --git a/src/test/codegen-units/item-collection/generic-functions.rs b/src/test/codegen-units/item-collection/generic-functions.rs index 5ec1f7fbc3c..afe6ffc8bfe 100644 --- a/src/test/codegen-units/item-collection/generic-functions.rs +++ b/src/test/codegen-units/item-collection/generic-functions.rs @@ -60,5 +60,3 @@ fn main() { //~ TRANS_ITEM fn generic_functions::foo3[0] let _ = foo3('v', (), ()); } - -//~ TRANS_ITEM drop-glue i8 diff --git a/src/test/codegen-units/partitioning/local-generic.rs b/src/test/codegen-units/partitioning/local-generic.rs index e38e676b95c..f5641f1f2ed 100644 --- a/src/test/codegen-units/partitioning/local-generic.rs +++ b/src/test/codegen-units/partitioning/local-generic.rs @@ -56,5 +56,3 @@ fn user() { let _ = generic("abc"); } } - -//~ TRANS_ITEM drop-glue i8 diff --git a/src/test/run-fail/issue-30380.rs b/src/test/run-fail/issue-30380.rs new file mode 100644 index 00000000000..eb668517bdf --- /dev/null +++ b/src/test/run-fail/issue-30380.rs @@ -0,0 +1,47 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(rustc_attrs)] + +// check that panics in destructors during assignment do not leave +// destroyed values lying around for other destructors to observe. + +// error-pattern:panicking destructors ftw! + +struct Observer<'a>(&'a mut FilledOnDrop); + +struct FilledOnDrop(u32); +impl Drop for FilledOnDrop { + fn drop(&mut self) { + if self.0 == 0 { + // this is only set during the destructor - safe + // code should not be able to observe this. + self.0 = 0x1c1c1c1c; + panic!("panicking destructors ftw!"); + } + } +} + +impl<'a> Drop for Observer<'a> { + fn drop(&mut self) { + assert_eq!(self.0 .0, 1); + } +} + +#[rustc_mir] +fn foo(b: &mut Observer) { + *b.0 = FilledOnDrop(1); +} + +fn main() { + let mut bomb = FilledOnDrop(0); + let mut observer = Observer(&mut bomb); + foo(&mut observer); +} diff --git a/src/test/run-make/atomic-lock-free/atomic_lock_free.rs b/src/test/run-make/atomic-lock-free/atomic_lock_free.rs index 8731cd960f3..023f2218b87 100644 --- a/src/test/run-make/atomic-lock-free/atomic_lock_free.rs +++ b/src/test/run-make/atomic-lock-free/atomic_lock_free.rs @@ -18,6 +18,8 @@ #[lang = "sized"] trait Sized {} +#[lang = "copy"] +trait Copy {} #[cfg(target_has_atomic = "8")] pub unsafe fn atomic_u8(x: *mut u8) { diff --git a/src/test/run-pass/dynamic-drop.rs b/src/test/run-pass/dynamic-drop.rs new file mode 100644 index 00000000000..f917531e868 --- /dev/null +++ b/src/test/run-pass/dynamic-drop.rs @@ -0,0 +1,156 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(rustc_attrs)] + +use std::cell::{Cell, RefCell}; +use std::panic; +use std::usize; + +struct InjectedFailure; + +struct Allocator { + data: RefCell>, + failing_op: usize, + cur_ops: Cell, +} + +impl panic::UnwindSafe for Allocator {} +impl panic::RefUnwindSafe for Allocator {} + +impl Drop for Allocator { + fn drop(&mut self) { + let data = self.data.borrow(); + if data.iter().any(|d| *d) { + panic!("missing free: {:?}", data); + } + } +} + +impl Allocator { + fn new(failing_op: usize) -> Self { + Allocator { + failing_op: failing_op, + cur_ops: Cell::new(0), + data: RefCell::new(vec![]) + } + } + fn alloc(&self) -> Ptr { + self.cur_ops.set(self.cur_ops.get() + 1); + + if self.cur_ops.get() == self.failing_op { + panic!(InjectedFailure); + } + + let mut data = self.data.borrow_mut(); + let addr = data.len(); + data.push(true); + Ptr(addr, self) + } +} + +struct Ptr<'a>(usize, &'a Allocator); +impl<'a> Drop for Ptr<'a> { + fn drop(&mut self) { + match self.1.data.borrow_mut()[self.0] { + false => { + panic!("double free at index {:?}", self.0) + } + ref mut d => *d = false + } + + self.1.cur_ops.set(self.1.cur_ops.get()+1); + + if self.1.cur_ops.get() == self.1.failing_op { + panic!(InjectedFailure); + } + } +} + +#[rustc_mir] +fn dynamic_init(a: &Allocator, c: bool) { + let _x; + if c { + _x = Some(a.alloc()); + } +} + +#[rustc_mir] +fn dynamic_drop(a: &Allocator, c: bool) { + let x = a.alloc(); + if c { + Some(x) + } else { + None + }; +} + +#[rustc_mir] +fn assignment2(a: &Allocator, c0: bool, c1: bool) { + let mut _v = a.alloc(); + let mut _w = a.alloc(); + if c0 { + drop(_v); + } + _v = _w; + if c1 { + _w = a.alloc(); + } +} + +#[rustc_mir] +fn assignment1(a: &Allocator, c0: bool) { + let mut _v = a.alloc(); + let mut _w = a.alloc(); + if c0 { + drop(_v); + } + _v = _w; +} + +fn run_test(mut f: F) + where F: FnMut(&Allocator) +{ + let first_alloc = Allocator::new(usize::MAX); + f(&first_alloc); + + for failing_op in 1..first_alloc.cur_ops.get()+1 { + let alloc = Allocator::new(failing_op); + let alloc = &alloc; + let f = panic::AssertUnwindSafe(&mut f); + let result = panic::catch_unwind(move || { + f.0(alloc); + }); + match result { + Ok(..) => panic!("test executed {} ops but now {}", + first_alloc.cur_ops.get(), alloc.cur_ops.get()), + Err(e) => { + if e.downcast_ref::().is_none() { + panic::resume_unwind(e); + } + } + } + } +} + +fn main() { + run_test(|a| dynamic_init(a, false)); + run_test(|a| dynamic_init(a, true)); + run_test(|a| dynamic_drop(a, false)); + run_test(|a| dynamic_drop(a, true)); + + run_test(|a| assignment2(a, false, false)); + run_test(|a| assignment2(a, false, true)); + run_test(|a| assignment2(a, true, false)); + run_test(|a| assignment2(a, true, true)); + + run_test(|a| assignment1(a, false)); + run_test(|a| assignment1(a, true)); +}