From 4912df0266bb570008d1898546bc8cb028167edc Mon Sep 17 00:00:00 2001 From: Tyler Mandry Date: Wed, 22 May 2019 12:31:43 -0700 Subject: [PATCH] Generate StorageDead along unwind paths for generators --- src/librustc/ty/layout.rs | 2 +- src/librustc_mir/build/matches/mod.rs | 2 +- src/librustc_mir/build/scope.rs | 150 +++++++++++++++----------- 3 files changed, 88 insertions(+), 66 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 6415122dd39..8e2c3dd3d8a 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -666,7 +666,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { size, align, }); - debug!("generator layout: {:#?}", layout); + debug!("generator layout ({:?}): {:#?}", ty, layout); layout } diff --git a/src/librustc_mir/build/matches/mod.rs b/src/librustc_mir/build/matches/mod.rs index 8c2ef082c33..b5efe656c99 100644 --- a/src/librustc_mir/build/matches/mod.rs +++ b/src/librustc_mir/build/matches/mod.rs @@ -544,7 +544,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let place = Place::Base(PlaceBase::Local(local_id)); let var_ty = self.local_decls[local_id].ty; let region_scope = self.hir.region_scope_tree.var_scope(var.local_id); - self.schedule_drop(span, region_scope, &place, var_ty, DropKind::Storage); + self.schedule_drop(span, region_scope, &place, var_ty, DropKind::Storage { cached_block: CachedBlock::default() }); place } diff --git a/src/librustc_mir/build/scope.rs b/src/librustc_mir/build/scope.rs index 58339173c9e..780f2313304 100644 --- a/src/librustc_mir/build/scope.rs +++ b/src/librustc_mir/build/scope.rs @@ -88,7 +88,7 @@ use rustc::middle::region; use rustc::ty::Ty; use rustc::hir; use rustc::mir::*; -use syntax_pos::{Span, DUMMY_SP}; +use syntax_pos::{DUMMY_SP, Span}; use rustc_data_structures::fx::FxHashMap; use std::collections::hash_map::Entry; use std::mem; @@ -164,10 +164,8 @@ pub(crate) struct CachedBlock { #[derive(Debug)] pub(crate) enum DropKind { - Value { - cached_block: CachedBlock, - }, - Storage + Value { cached_block: CachedBlock }, + Storage { cached_block: CachedBlock }, } #[derive(Clone, Debug)] @@ -211,7 +209,7 @@ impl DropKind { fn may_panic(&self) -> bool { match *self { DropKind::Value { .. } => true, - DropKind::Storage => false + DropKind::Storage { .. } => false } } } @@ -225,25 +223,28 @@ impl<'tcx> Scope<'tcx> { /// `storage_only` controls whether to invalidate only drop paths that run `StorageDead`. /// `this_scope_only` controls whether to invalidate only drop paths that refer to the current /// top-of-scope (as opposed to dependent scopes). - fn invalidate_cache(&mut self, storage_only: bool, this_scope_only: bool) { + fn invalidate_cache(&mut self, storage_only: bool, is_generator: bool, this_scope_only: bool) { // FIXME: maybe do shared caching of `cached_exits` etc. to handle functions // with lots of `try!`? // cached exits drop storage and refer to the top-of-scope self.cached_exits.clear(); - if !storage_only { - // the current generator drop and unwind ignore - // storage but refer to top-of-scope - self.cached_generator_drop = None; + // the current generator drop and unwind refer to top-of-scope + self.cached_generator_drop = None; + + let ignore_unwinds = storage_only && !is_generator; + if !ignore_unwinds { self.cached_unwind.invalidate(); } - if !storage_only && !this_scope_only { + if !ignore_unwinds && !this_scope_only { for drop_data in &mut self.drops { - if let DropKind::Value { ref mut cached_block } = drop_data.kind { - cached_block.invalidate(); - } + let cached_block = match drop_data.kind { + DropKind::Storage { ref mut cached_block } => cached_block, + DropKind::Value { ref mut cached_block } => cached_block, + }; + cached_block.invalidate(); } } } @@ -388,6 +389,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { unpack!(block = build_scope_drops( &mut self.cfg, + self.is_generator, &scope, block, unwind_to, @@ -454,6 +456,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { unpack!(block = build_scope_drops( &mut self.cfg, + self.is_generator, scope, block, unwind_to, @@ -484,10 +487,6 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let result = block; while let Some(scope) = scopes.next() { - if !scope.needs_cleanup && !self.is_generator { - continue; - } - block = if let Some(b) = scope.cached_generator_drop { self.cfg.terminate(block, src_info, TerminatorKind::Goto { target: b }); @@ -508,6 +507,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { unpack!(block = build_scope_drops( &mut self.cfg, + self.is_generator, scope, block, unwind_to, @@ -644,7 +644,9 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { ) { self.schedule_drop( span, region_scope, place, place_ty, - DropKind::Storage, + DropKind::Storage { + cached_block: CachedBlock::default(), + }, ); self.schedule_drop( span, region_scope, place, place_ty, @@ -672,7 +674,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let needs_drop = self.hir.needs_drop(place_ty); match drop_kind { DropKind::Value { .. } => if !needs_drop { return }, - DropKind::Storage => { + DropKind::Storage { .. } => { match *place { Place::Base(PlaceBase::Local(index)) => if index.index() <= self.arg_count { span_bug!( @@ -735,8 +737,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // Note that this code iterates scopes from the inner-most to the outer-most, // invalidating caches of each scope visited. This way bare minimum of the // caches gets invalidated. i.e., if a new drop is added into the middle scope, the - // cache of outer scope stays intact. - scope.invalidate_cache(!needs_drop, this_scope); + // cache of outer scpoe stays intact. + scope.invalidate_cache(!needs_drop, self.is_generator, this_scope); if this_scope { if let DropKind::Value { .. } = drop_kind { scope.needs_cleanup = true; @@ -797,6 +799,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // to left reading the cached results but never created anything. // Find the last cached block + debug!("diverge_cleanup_gen(self.scopes = {:?})", self.scopes); let (mut target, first_uncached) = if let Some(cached_index) = self.scopes.iter() .rposition(|scope| scope.cached_unwind.get(generator_drop).is_some()) { (self.scopes[cached_index].cached_unwind.get(generator_drop).unwrap(), cached_index + 1) @@ -890,7 +893,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { assert_eq!(top_scope.region_scope, region_scope); top_scope.drops.clear(); - top_scope.invalidate_cache(false, true); + top_scope.invalidate_cache(false, self.is_generator, true); } /// Drops the single variable provided @@ -941,7 +944,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } } - top_scope.invalidate_cache(true, true); + top_scope.invalidate_cache(true, self.is_generator, true); } } @@ -949,13 +952,14 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// Builds drops for pop_scope and exit_scope. fn build_scope_drops<'tcx>( cfg: &mut CFG<'tcx>, + is_generator: bool, scope: &Scope<'tcx>, mut block: BasicBlock, last_unwind_to: BasicBlock, arg_count: usize, generator_drop: bool, ) -> BlockAnd<()> { - debug!("build_scope_drops({:?} -> {:?}", block, scope); + debug!("build_scope_drops({:?} -> {:?})", block, scope); // Build up the drops in evaluation order. The end result will // look like: @@ -969,28 +973,20 @@ fn build_scope_drops<'tcx>( // The horizontal arrows represent the execution path when the drops return // successfully. The downwards arrows represent the execution path when the // drops panic (panicking while unwinding will abort, so there's no need for - // another set of arrows). The drops for the unwind path should have already - // been generated by `diverge_cleanup_gen`. + // another set of arrows). + // + // For generators, we unwind from a drop on a local to its StorageDead + // statement. For other functions we don't worry about StorageDead. The + // drops for the unwind path should have already been generated by + // `diverge_cleanup_gen`. - let mut unwind_blocks = scope.drops.iter().rev().filter_map(|drop_data| { - if let DropKind::Value { cached_block } = drop_data.kind { - Some(cached_block.get(generator_drop).unwrap_or_else(|| { - span_bug!(drop_data.span, "cached block not present?") - })) - } else { - None - } - }); - - // When we unwind from a drop, we start cleaning up from the next one, so - // we don't need this block. - unwind_blocks.next(); - - for drop_data in scope.drops.iter().rev() { + for drop_idx in (0..scope.drops.len()).rev() { + let drop_data = &scope.drops[drop_idx]; let source_info = scope.source_info(drop_data.span); match drop_data.kind { DropKind::Value { .. } => { - let unwind_to = unwind_blocks.next().unwrap_or(last_unwind_to); + let unwind_to = get_unwind_to(scope, is_generator, drop_idx, generator_drop) + .unwrap_or(last_unwind_to); let next = cfg.start_new_block(); cfg.terminate(block, source_info, TerminatorKind::Drop { @@ -1000,7 +996,7 @@ fn build_scope_drops<'tcx>( }); block = next; } - DropKind::Storage => { + DropKind::Storage { .. } => { // Drop the storage for both value and storage drops. // Only temps and vars need their storage dead. match drop_data.location { @@ -1018,6 +1014,31 @@ fn build_scope_drops<'tcx>( block.unit() } +fn get_unwind_to<'tcx>( + scope: &Scope<'tcx>, + is_generator: bool, + unwind_from: usize, + generator_drop: bool, +) -> Option { + for drop_idx in (0..unwind_from).rev() { + let drop_data = &scope.drops[drop_idx]; + match drop_data.kind { + DropKind::Storage { cached_block } if is_generator => { + return Some(cached_block.get(generator_drop).unwrap_or_else(|| { + span_bug!(drop_data.span, "cached block not present for {:?}", drop_data) + })); + } + DropKind::Value { cached_block } if !is_generator => { + return Some(cached_block.get(generator_drop).unwrap_or_else(|| { + span_bug!(drop_data.span, "cached block not present for {:?}", drop_data) + })); + } + _ => (), + } + } + None +} + fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>, span: Span, scope: &mut Scope<'tcx>, @@ -1051,6 +1072,7 @@ fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>, // Build up the drops. Here we iterate the vector in // *forward* order, so that we generate drops[0] first (right to // left in diagram above). + debug!("build_diverge_scope({:?})", scope.drops); for (j, drop_data) in scope.drops.iter_mut().enumerate() { debug!("build_diverge_scope drop_data[{}]: {:?}", j, drop_data); // Only full value drops are emitted in the diverging path, @@ -1062,7 +1084,7 @@ fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>, // match the behavior of clang, but on inspection eddyb says // this is not what clang does. match drop_data.kind { - DropKind::Storage if is_generator => { + DropKind::Storage { ref mut cached_block } if is_generator => { // Only temps and vars need their storage dead. match drop_data.location { Place::Base(PlaceBase::Local(index)) => { @@ -1070,11 +1092,22 @@ fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>, source_info: source_info(drop_data.span), kind: StatementKind::StorageDead(index) }); + if !target_built_by_us { + // We cannot add statements to an existing block, so we create a new + // block for our StorageDead statements. + let block = cfg.start_new_cleanup_block(); + let source_info = SourceInfo { span: DUMMY_SP, scope: source_scope }; + cfg.terminate(block, source_info, + TerminatorKind::Goto { target: target }); + target = block; + target_built_by_us = true; + } } _ => unreachable!(), }; + *cached_block.ref_mut(generator_drop) = Some(target); } - DropKind::Storage => {} + DropKind::Storage { .. } => {} DropKind::Value { ref mut cached_block } => { let cached_block = cached_block.ref_mut(generator_drop); target = if let Some(cached_block) = *cached_block { @@ -1082,8 +1115,7 @@ fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>, target_built_by_us = false; cached_block } else { - push_storage_deads( - cfg, &mut target, &mut storage_deads, target_built_by_us, source_scope); + push_storage_deads(cfg, target, &mut storage_deads); let block = cfg.start_new_cleanup_block(); cfg.terminate(block, source_info(drop_data.span), TerminatorKind::Drop { @@ -1098,7 +1130,7 @@ fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>, } }; } - push_storage_deads(cfg, &mut target, &mut storage_deads, target_built_by_us, source_scope); + push_storage_deads(cfg, target, &mut storage_deads); *scope.cached_unwind.ref_mut(generator_drop) = Some(target); assert!(storage_deads.is_empty()); @@ -1108,23 +1140,13 @@ fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>, } fn push_storage_deads(cfg: &mut CFG<'tcx>, - target: &mut BasicBlock, - storage_deads: &mut Vec>, - target_built_by_us: bool, - source_scope: SourceScope) { + target: BasicBlock, + storage_deads: &mut Vec>) { if storage_deads.is_empty() { return; } - if !target_built_by_us { - // We cannot add statements to an existing block, so we create a new - // block for our StorageDead statements. - let block = cfg.start_new_cleanup_block(); - let source_info = SourceInfo { span: DUMMY_SP, scope: source_scope }; - cfg.terminate(block, source_info, TerminatorKind::Goto { target: *target }); - *target = block; - } - let statements = &mut cfg.block_data_mut(*target).statements; + let statements = &mut cfg.block_data_mut(target).statements; storage_deads.reverse(); debug!("push_storage_deads({:?}), storage_deads={:?}, statements={:?}", - *target, storage_deads, statements); + target, storage_deads, statements); storage_deads.append(statements); mem::swap(statements, storage_deads); assert!(storage_deads.is_empty());