diff --git a/src/librustc_mir/build/scope.rs b/src/librustc_mir/build/scope.rs index 1a8f8fbbb68d..2a11f24095b0 100644 --- a/src/librustc_mir/build/scope.rs +++ b/src/librustc_mir/build/scope.rs @@ -96,6 +96,7 @@ use rustc::hir::def_id::LOCAL_CRATE; use rustc::mir::*; use syntax_pos::{Span}; use rustc_data_structures::fx::FxHashMap; +use std::collections::hash_map::Entry; #[derive(Debug)] pub struct Scope<'tcx> { @@ -224,7 +225,7 @@ impl<'tcx> Scope<'tcx> { /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a /// larger extent of code. /// - /// `storage_only` controls whether to invalidate only drop paths run `StorageDead`. + /// `storage_only` controls whether to invalidate only drop paths that run `StorageDead`. /// `this_scope_only` controls whether to invalidate only drop paths that refer to the current /// top-of-scope (as opposed to dependent scopes). fn invalidate_cache(&mut self, storage_only: bool, this_scope_only: bool) { @@ -242,8 +243,8 @@ impl<'tcx> Scope<'tcx> { } if !storage_only && !this_scope_only { - for dropdata in &mut self.drops { - if let DropKind::Value { ref mut cached_block } = dropdata.kind { + for drop_data in &mut self.drops { + if let DropKind::Value { ref mut cached_block } = drop_data.kind { cached_block.invalidate(); } } @@ -323,7 +324,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let parent_hir_id = tcx.hir.definitions().node_to_hir_id( self.source_scope_local_data[source_scope].lint_root - ); + ); let current_hir_id = tcx.hir.definitions().node_to_hir_id(node_id); sets.lint_level_set(parent_hir_id) == @@ -333,7 +334,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { if !same_lint_scopes { self.source_scope = self.new_source_scope(region_scope.1.span, lint_level, - None); + None); } } self.push_scope(region_scope); @@ -381,14 +382,18 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let scope = self.scopes.pop().unwrap(); assert_eq!(scope.region_scope, region_scope.0); - let resume_block = self.resume_block(); - unpack!(block = build_scope_drops(&mut self.cfg, - resume_block, - &scope, - &self.scopes, - block, - self.arg_count, - false)); + let unwind_to = self.scopes.last().and_then(|next_scope| { + next_scope.cached_unwind.get(false) + }).unwrap_or_else(|| self.resume_block()); + + unpack!(block = build_scope_drops( + &mut self.cfg, + &scope, + block, + unwind_to, + self.arg_count, + false, + )); block.unit() } @@ -396,8 +401,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// Branch out of `block` to `target`, exiting all scopes up to /// and including `region_scope`. This will insert whatever drops are - /// needed, as well as tracking this exit for the SEME region. See - /// module comment for details. + /// needed. See module comment for details. pub fn exit_scope(&mut self, span: Span, region_scope: (region::Scope, SourceInfo), @@ -415,38 +419,51 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // If we are emitting a `drop` statement, we need to have the cached // diverge cleanup pads ready in case that drop panics. - let may_panic = self.scopes[(len - scope_count)..].iter() - .any(|s| s.drops.iter().any(|s| s.kind.may_panic())); + let may_panic = self.scopes[(len - scope_count)..].iter().any(|s| s.needs_cleanup); if may_panic { self.diverge_cleanup(); } - { - let resume_block = self.resume_block(); - let mut rest = &mut self.scopes[(len - scope_count)..]; - while let Some((scope, rest_)) = {rest}.split_last_mut() { - rest = rest_; - block = if let Some(&e) = scope.cached_exits.get(&(target, region_scope.0)) { - self.cfg.terminate(block, scope.source_info(span), - TerminatorKind::Goto { target: e }); - return; - } else { - let b = self.cfg.start_new_block(); - self.cfg.terminate(block, scope.source_info(span), - TerminatorKind::Goto { target: b }); - scope.cached_exits.insert((target, region_scope.0), b); - b + let mut scopes = self.scopes[(len - scope_count - 1)..].iter_mut().rev(); + let mut scope = scopes.next().unwrap(); + for next_scope in scopes { + if scope.drops.is_empty() { + scope = next_scope; + continue; + } + let source_info = scope.source_info(span); + block = match scope.cached_exits.entry((target, region_scope.0)) { + Entry::Occupied(e) => { + self.cfg.terminate(block, source_info, + TerminatorKind::Goto { target: *e.get() }); + return; + } + Entry::Vacant(v) => { + let b = self.cfg.start_new_block(); + self.cfg.terminate(block, source_info, + TerminatorKind::Goto { target: b }); + v.insert(b); + b + } }; - unpack!(block = build_scope_drops(&mut self.cfg, - resume_block, - scope, - rest, - block, - self.arg_count, - false)); - } + let unwind_to = next_scope.cached_unwind.get(false).unwrap_or_else(|| { + debug_assert!(!may_panic, "cached block not present?"); + START_BLOCK + }); + + unpack!(block = build_scope_drops( + &mut self.cfg, + scope, + block, + unwind_to, + self.arg_count, + false, + )); + + scope = next_scope; } + let scope = &self.scopes[len - scope_count]; self.cfg.terminate(block, scope.source_info(span), TerminatorKind::Goto { target }); @@ -461,20 +478,20 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { return None; } - // Fill in the cache + // Fill in the cache for unwinds self.diverge_cleanup_gen(true); let src_info = self.scopes[0].source_info(self.fn_span); + let resume_block = self.resume_block(); + let mut scopes = self.scopes.iter_mut().rev().peekable(); let mut block = self.cfg.start_new_block(); let result = block; - let resume_block = self.resume_block(); - let mut rest = &mut self.scopes[..]; - while let Some((scope, rest_)) = {rest}.split_last_mut() { - rest = rest_; + while let Some(scope) = scopes.next() { if !scope.needs_cleanup { continue; } + block = if let Some(b) = scope.cached_generator_drop { self.cfg.terminate(block, src_info, TerminatorKind::Goto { target: b }); @@ -487,13 +504,20 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { b }; - unpack!(block = build_scope_drops(&mut self.cfg, - resume_block, - scope, - rest, - block, - self.arg_count, - true)); + let unwind_to = scopes.peek().as_ref().map(|scope| { + scope.cached_unwind.get(true).unwrap_or_else(|| { + span_bug!(src_info.span, "cached block not present?") + }) + }).unwrap_or(resume_block); + + unpack!(block = build_scope_drops( + &mut self.cfg, + scope, + block, + unwind_to, + self.arg_count, + true, + )); } self.cfg.terminate(block, src_info, TerminatorKind::GeneratorDrop); @@ -503,9 +527,9 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// Creates a new source scope, nested in the current one. pub fn new_source_scope(&mut self, - span: Span, - lint_level: LintLevel, - safety: Option) -> SourceScope { + span: Span, + lint_level: LintLevel, + safety: Option) -> SourceScope { let parent = self.source_scope; debug!("new_source_scope({:?}, {:?}, {:?}) - parent({:?})={:?}", span, lint_level, safety, @@ -742,8 +766,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// Creates a path that performs all required cleanup for unwinding. /// /// This path terminates in Resume. Returns the start of the path. - /// See module comment for more details. None indicates there’s no - /// cleanup to do at this point. + /// See module comment for more details. pub fn diverge_cleanup(&mut self) -> BasicBlock { self.diverge_cleanup_gen(false) } @@ -765,11 +788,6 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } fn diverge_cleanup_gen(&mut self, generator_drop: bool) -> BasicBlock { - // To start, create the resume terminator. - let mut target = self.resume_block(); - - let Builder { ref mut cfg, ref mut scopes, .. } = *self; - // Build up the drops in **reverse** order. The end result will // look like: // @@ -781,11 +799,17 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // store caches. If everything is cached, we'll just walk right // to left reading the cached results but never created anything. - if scopes.iter().any(|scope| scope.needs_cleanup) { - for scope in scopes.iter_mut() { - target = build_diverge_scope(cfg, scope.region_scope_span, - scope, target, generator_drop); - } + // Find the last cached block + let (mut target, first_uncached) = if let Some(cached_index) = self.scopes.iter() + .rposition(|scope| scope.cached_unwind.get(generator_drop).is_some()) { + (self.scopes[cached_index].cached_unwind.get(generator_drop).unwrap(), cached_index + 1) + } else { + (self.resume_block(), 0) + }; + + for scope in self.scopes[first_uncached..].iter_mut() { + target = build_diverge_scope(&mut self.cfg, scope.region_scope_span, + scope, target, generator_drop); } target @@ -859,64 +883,62 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } /// Builds drops for pop_scope and exit_scope. -fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>, - resume_block: BasicBlock, - scope: &Scope<'tcx>, - earlier_scopes: &[Scope<'tcx>], - mut block: BasicBlock, - arg_count: usize, - generator_drop: bool) - -> BlockAnd<()> { - debug!("build_scope_drops({:?} -> {:?})", block, scope); - let mut iter = scope.drops.iter().rev(); - while let Some(drop_data) = iter.next() { +fn build_scope_drops<'tcx>( + cfg: &mut CFG<'tcx>, + scope: &Scope<'tcx>, + mut block: BasicBlock, + last_unwind_to: BasicBlock, + arg_count: usize, + generator_drop: bool, +) -> BlockAnd<()> { + debug!("build_scope_drops({:?} -> {:?}", block, scope); + + // Build up the drops in evaluation order. The end result will + // look like: + // + // [SDs, drops[n]] --..> [SDs, drop[1]] -> [SDs, drop[0]] -> [[SDs]] + // | | | + // : | | + // V V + // [drop[n]] -...-> [drop[1]] ------> [drop[0]] ------> [last_unwind_to] + // + // The horizontal arrows represent the execution path when the drops return + // successfully. The downwards arrows represent the execution path when the + // drops panic (panicking while unwinding will abort, so there's no need for + // another set of arrows). The drops for the unwind path should have already + // been generated by `diverge_cleanup_gen`. + // + // The code in this function reads from right to left. + // Storage dead drops have to be done left to right (since we can only push + // to the end of a Vec). So, we find the next drop and then call + // push_storage_deads which will iterate backwards through them so that + // they are added in the correct order. + + let mut unwind_blocks = scope.drops.iter().rev().filter_map(|drop_data| { + if let DropKind::Value { cached_block } = drop_data.kind { + Some(cached_block.get(generator_drop).unwrap_or_else(|| { + span_bug!(drop_data.span, "cached block not present?") + })) + } else { + None + } + }); + + // When we unwind from a drop, we start cleaning up from the next one, so + // we don't need this block. + unwind_blocks.next(); + + for drop_data in scope.drops.iter().rev() { let source_info = scope.source_info(drop_data.span); match drop_data.kind { DropKind::Value { .. } => { - // Try to find the next block with its cached block for us to - // diverge into, either a previous block in this current scope or - // the top of the previous scope. - // - // If it wasn't for EndRegion, we could just chain all the DropData - // together and pick the first DropKind::Value. Please do that - // when we replace EndRegion with NLL. - let on_diverge = iter.clone().filter_map(|dd| { - match dd.kind { - DropKind::Value { cached_block } => Some(cached_block), - DropKind::Storage => None - } - }).next().or_else(|| { - if earlier_scopes.iter().any(|scope| scope.needs_cleanup) { - // If *any* scope requires cleanup code to be run, - // we must use the cached unwind from the *topmost* - // scope, to ensure all EndRegions from surrounding - // scopes are executed before the drop code runs. - Some(earlier_scopes.last().unwrap().cached_unwind) - } else { - // We don't need any further cleanup, so return None - // to avoid creating a landing pad. We can skip - // EndRegions because all local regions end anyway - // when the function unwinds. - // - // This is an important optimization because LLVM is - // terrible at optimizing landing pads. FIXME: I think - // it would be cleaner and better to do this optimization - // in SimplifyCfg instead of here. - None - } - }); - - let on_diverge = on_diverge.map(|cached_block| { - cached_block.get(generator_drop).unwrap_or_else(|| { - span_bug!(drop_data.span, "cached block not present?") - }) - }); + let unwind_to = unwind_blocks.next().unwrap_or(last_unwind_to); let next = cfg.start_new_block(); cfg.terminate(block, source_info, TerminatorKind::Drop { location: drop_data.location.clone(), target: next, - unwind: Some(on_diverge.unwrap_or(resume_block)) + unwind: Some(unwind_to) }); block = next; } @@ -943,20 +965,17 @@ fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>, block.unit() } -fn build_diverge_scope<'a, 'gcx, 'tcx>(cfg: &mut CFG<'tcx>, - span: Span, - scope: &mut Scope<'tcx>, - mut target: BasicBlock, - generator_drop: bool) - -> BasicBlock +fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>, + span: Span, + scope: &mut Scope<'tcx>, + mut target: BasicBlock, + generator_drop: bool) + -> BasicBlock { // Build up the drops in **reverse** order. The end result will // look like: // - // [EndRegion Block] -> [drops[n]] -...-> [drops[0]] -> [Free] -> [target] - // | | - // +---------------------------------------------------------+ - // code for scope + // [drops[n]] -...-> [drops[0]] -> [target] // // The code in this function reads from right to left. At each // point, we check for cached blocks representing the @@ -1001,20 +1020,7 @@ fn build_diverge_scope<'a, 'gcx, 'tcx>(cfg: &mut CFG<'tcx>, }; } - // Finally, push the EndRegion block, used by mir-borrowck, and set - // `cached_unwind` to point to it (Block becomes trivial goto after - // pass that removes all EndRegions). - target = { - let cached_block = scope.cached_unwind.ref_mut(generator_drop); - if let Some(cached_block) = *cached_block { - cached_block - } else { - let block = cfg.start_new_cleanup_block(); - cfg.terminate(block, source_info(span), TerminatorKind::Goto { target }); - *cached_block = Some(block); - block - } - }; + *scope.cached_unwind.ref_mut(generator_drop) = Some(target); debug!("build_diverge_scope({:?}, {:?}) = {:?}", scope, span, target); diff --git a/src/test/mir-opt/issue-49232.rs b/src/test/mir-opt/issue-49232.rs index f9024b670633..d8365c8c9cc2 100644 --- a/src/test/mir-opt/issue-49232.rs +++ b/src/test/mir-opt/issue-49232.rs @@ -44,7 +44,7 @@ fn main() { // falseUnwind -> [real: bb3, cleanup: bb4]; // } // bb2: { -// goto -> bb29; +// goto -> bb20; // } // bb3: { // StorageLive(_2); @@ -90,58 +90,31 @@ fn main() { // StorageDead(_3); // StorageLive(_6); // _6 = &_2; -// _5 = const std::mem::drop(move _6) -> [return: bb28, unwind: bb4]; +// _5 = const std::mem::drop(move _6) -> [return: bb19, unwind: bb4]; // } // bb15: { +// StorageDead(_3); // goto -> bb16; // } // bb16: { -// goto -> bb17; -// } -// bb17: { -// goto -> bb18; -// } -// bb18: { -// goto -> bb19; -// } -// bb19: { -// goto -> bb20; -// } -// bb20: { -// StorageDead(_3); -// goto -> bb21; -// } -// bb21: { -// goto -> bb22; -// } -// bb22: { // StorageDead(_2); -// goto -> bb23; -// } -// bb23: { -// goto -> bb24; -// } -// bb24: { -// goto -> bb25; -// } -// bb25: { // goto -> bb2; // } -// bb26: { +// bb17: { // _4 = (); // unreachable; // } -// bb27: { +// bb18: { // StorageDead(_4); // goto -> bb14; // } -// bb28: { +// bb19: { // StorageDead(_6); // _1 = (); // StorageDead(_2); // goto -> bb1; // } -// bb29: { +// bb20: { // return; // } // }