diff --git a/src/librustc_mir/dataflow/move_paths/builder.rs b/src/librustc_mir/dataflow/move_paths/builder.rs index 57aa5de7f7a31..b65999a746cbd 100644 --- a/src/librustc_mir/dataflow/move_paths/builder.rs +++ b/src/librustc_mir/dataflow/move_paths/builder.rs @@ -361,6 +361,7 @@ impl<'b, 'a, 'tcx> Gatherer<'b, 'a, 'tcx> { fn gather_terminator(&mut self, term: &Terminator<'tcx>) { match term.kind { TerminatorKind::Goto { target: _ } + | TerminatorKind::Return | TerminatorKind::Resume | TerminatorKind::Abort | TerminatorKind::GeneratorDrop @@ -368,10 +369,6 @@ impl<'b, 'a, 'tcx> Gatherer<'b, 'a, 'tcx> { | TerminatorKind::FalseUnwind { .. } | TerminatorKind::Unreachable => {} - TerminatorKind::Return => { - self.gather_move(&Place::return_place()); - } - TerminatorKind::Assert { ref cond, .. } => { self.gather_operand(cond); } diff --git a/src/librustc_mir/util/elaborate_drops.rs b/src/librustc_mir/util/elaborate_drops.rs index fab64e37cbc5a..e0bb04a9c7023 100644 --- a/src/librustc_mir/util/elaborate_drops.rs +++ b/src/librustc_mir/util/elaborate_drops.rs @@ -163,8 +163,6 @@ where .patch_terminator(bb, TerminatorKind::Goto { target: self.succ }); } DropStyle::Static => { - let loc = self.terminator_loc(bb); - self.elaborator.clear_drop_flag(loc, self.path, DropFlagMode::Deep); self.elaborator.patch().patch_terminator( bb, TerminatorKind::Drop { @@ -175,9 +173,7 @@ where ); } DropStyle::Conditional => { - let unwind = self.unwind; // FIXME(#43234) - let succ = self.succ; - let drop_bb = self.complete_drop(Some(DropFlagMode::Deep), succ, unwind); + let drop_bb = self.complete_drop(self.succ, self.unwind); self.elaborator .patch() .patch_terminator(bb, TerminatorKind::Goto { target: drop_bb }); @@ -249,7 +245,7 @@ where // our own drop flag. path: self.path, } - .complete_drop(None, succ, unwind) + .complete_drop(succ, unwind) } } @@ -280,13 +276,7 @@ where // Clear the "master" drop flag at the end. This is needed // because the "master" drop protects the ADT's discriminant, // which is invalidated after the ADT is dropped. - let (succ, unwind) = (self.succ, self.unwind); // FIXME(#43234) - ( - self.drop_flag_reset_block(DropFlagMode::Shallow, succ, unwind), - unwind.map(|unwind| { - self.drop_flag_reset_block(DropFlagMode::Shallow, unwind, Unwind::InCleanup) - }), - ) + (self.drop_flag_reset_block(DropFlagMode::Shallow, self.succ, self.unwind), self.unwind) } /// Creates a full drop ladder, consisting of 2 connected half-drop-ladders @@ -813,11 +803,7 @@ where self.open_drop_for_adt(def, substs) } } - ty::Dynamic(..) => { - let unwind = self.unwind; // FIXME(#43234) - let succ = self.succ; - self.complete_drop(Some(DropFlagMode::Deep), succ, unwind) - } + ty::Dynamic(..) => self.complete_drop(self.succ, self.unwind), ty::Array(ety, size) => { let size = size.try_eval_usize(self.tcx(), self.elaborator.param_env()); self.open_drop_for_array(ety, size) @@ -829,26 +815,14 @@ where } /// Returns a basic block that drop a place using the context - /// and path in `c`. If `mode` is something, also clear `c` - /// according to it. + /// and path in `c`. /// /// if FLAG(self.path) - /// if let Some(mode) = mode: FLAG(self.path)[mode] = false /// drop(self.place) - fn complete_drop( - &mut self, - drop_mode: Option, - succ: BasicBlock, - unwind: Unwind, - ) -> BasicBlock { - debug!("complete_drop({:?},{:?})", self, drop_mode); + fn complete_drop(&mut self, succ: BasicBlock, unwind: Unwind) -> BasicBlock { + debug!("complete_drop(succ={:?}, unwind={:?})", succ, unwind); let drop_block = self.drop_block(succ, unwind); - let drop_block = if let Some(mode) = drop_mode { - self.drop_flag_reset_block(mode, drop_block, unwind) - } else { - drop_block - }; self.drop_flag_test_block(drop_block, succ, unwind) } @@ -861,6 +835,11 @@ where ) -> BasicBlock { debug!("drop_flag_reset_block({:?},{:?})", self, mode); + if unwind.is_cleanup() { + // The drop flag isn't read again on the unwind path, so don't + // bother setting it. + return succ; + } let block = self.new_block(unwind, TerminatorKind::Goto { target: succ }); let block_start = Location { block: block, statement_index: 0 }; self.elaborator.clear_drop_flag(block_start, self.path, mode); @@ -964,11 +943,6 @@ where self.elaborator.patch().new_temp(ty, self.source_info.span) } - fn terminator_loc(&mut self, bb: BasicBlock) -> Location { - let body = self.elaborator.body(); - self.elaborator.patch().terminator_loc(body, bb) - } - fn constant_usize(&self, val: u16) -> Operand<'tcx> { Operand::Constant(box Constant { span: self.source_info.span, diff --git a/src/librustc_mir/util/graphviz.rs b/src/librustc_mir/util/graphviz.rs index 8291bc958808d..3727f4a1acd48 100644 --- a/src/librustc_mir/util/graphviz.rs +++ b/src/librustc_mir/util/graphviz.rs @@ -97,12 +97,17 @@ where write!(w, r#""#)?; // Basic block number at the top. + let (blk, color) = if data.is_cleanup { + (format!("{} (cleanup)", block.index()), "lightblue") + } else { + (format!("{}", block.index()), "gray") + }; write!( w, - r#""#, - attrs = r#"bgcolor="gray" align="center""#, + r#""#, colspan = num_cols, - blk = block.index() + blk = blk, + color = color )?; init(w)?; diff --git a/src/librustc_mir_build/build/block.rs b/src/librustc_mir_build/build/block.rs index df5526ad76281..2f16693c61efb 100644 --- a/src/librustc_mir_build/build/block.rs +++ b/src/librustc_mir_build/build/block.rs @@ -2,6 +2,7 @@ use crate::build::matches::ArmHasGuard; use crate::build::ForGuard::OutsideGuard; use crate::build::{BlockAnd, BlockAndExtension, BlockFrame, Builder}; use crate::hair::*; +use rustc::middle::region; use rustc::mir::*; use rustc_hir as hir; use rustc_span::Span; @@ -10,6 +11,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { crate fn ast_block( &mut self, destination: &Place<'tcx>, + scope: Option, block: BasicBlock, ast_block: &'tcx hir::Block<'tcx>, source_info: SourceInfo, @@ -26,16 +28,19 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { self.in_opt_scope(opt_destruction_scope.map(|de| (de, source_info)), move |this| { this.in_scope((region_scope, source_info), LintLevel::Inherited, move |this| { if targeted_by_break { - // This is a `break`-able block - let exit_block = this.cfg.start_new_block(); - let block_exit = - this.in_breakable_scope(None, exit_block, destination.clone(), |this| { - this.ast_block_stmts(destination, block, span, stmts, expr, safety_mode) - }); - this.cfg.goto(unpack!(block_exit), source_info, exit_block); - exit_block.unit() + this.in_breakable_scope(None, destination.clone(), scope, span, |this| { + Some(this.ast_block_stmts( + destination, + scope, + block, + span, + stmts, + expr, + safety_mode, + )) + }) } else { - this.ast_block_stmts(destination, block, span, stmts, expr, safety_mode) + this.ast_block_stmts(destination, scope, block, span, stmts, expr, safety_mode) } }) }) @@ -44,6 +49,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { fn ast_block_stmts( &mut self, destination: &Place<'tcx>, + scope: Option, mut block: BasicBlock, span: Span, stmts: Vec>, @@ -175,7 +181,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { destination_ty.is_unit() || this.block_context.currently_ignores_tail_results(); this.block_context.push(BlockFrame::TailExpr { tail_result_is_ignored }); - unpack!(block = this.into(destination, block, expr)); + unpack!(block = this.into(destination, scope, block, expr)); let popped = this.block_context.pop(); assert!(popped.map_or(false, |bf| bf.is_tail_expr())); diff --git a/src/librustc_mir_build/build/expr/as_rvalue.rs b/src/librustc_mir_build/build/expr/as_rvalue.rs index dc97f321a36ad..fb1b22ad14cfc 100644 --- a/src/librustc_mir_build/build/expr/as_rvalue.rs +++ b/src/librustc_mir_build/build/expr/as_rvalue.rs @@ -111,12 +111,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let box_ = Rvalue::NullaryOp(NullOp::Box, value.ty); this.cfg.push_assign(block, source_info, &Place::from(result), box_); - // initialize the box contents: + // Initialize the box contents. No scope is needed since the + // `Box` is already scheduled to be dropped. unpack!( block = this.into( &this.hir.tcx().mk_place_deref(Place::from(result)), + None, block, - value + value, ) ); block.and(Rvalue::Use(Operand::Move(Place::from(result)))) diff --git a/src/librustc_mir_build/build/expr/as_temp.rs b/src/librustc_mir_build/build/expr/as_temp.rs index 34dd10cbc0fc8..c17fd6ce96179 100644 --- a/src/librustc_mir_build/build/expr/as_temp.rs +++ b/src/librustc_mir_build/build/expr/as_temp.rs @@ -100,11 +100,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } - unpack!(block = this.into(temp_place, block, expr)); - - if let Some(temp_lifetime) = temp_lifetime { - this.schedule_drop(expr_span, temp_lifetime, temp, DropKind::Value); - } + unpack!(block = this.into(temp_place, temp_lifetime, block, expr)); block.and(temp) } diff --git a/src/librustc_mir_build/build/expr/into.rs b/src/librustc_mir_build/build/expr/into.rs index 4583e244f493d..7b556881816f9 100644 --- a/src/librustc_mir_build/build/expr/into.rs +++ b/src/librustc_mir_build/build/expr/into.rs @@ -1,8 +1,10 @@ //! See docs in build/expr/mod.rs use crate::build::expr::category::{Category, RvalueFunc}; +use crate::build::scope::DropKind; use crate::build::{BlockAnd, BlockAndExtension, BlockFrame, Builder}; use crate::hair::*; +use rustc::middle::region; use rustc::mir::*; use rustc::ty::{self, CanonicalUserTypeAnnotation}; use rustc_data_structures::fx::FxHashMap; @@ -14,13 +16,19 @@ use rustc_target::spec::abi::Abi; impl<'a, 'tcx> Builder<'a, 'tcx> { /// Compile `expr`, storing the result into `destination`, which /// is assumed to be uninitialized. + /// If a `drop_scope` is provided, `destination` is scheduled to be dropped + /// in `scope` once it has been initialized. crate fn into_expr( &mut self, destination: &Place<'tcx>, + scope: Option, mut block: BasicBlock, expr: Expr<'tcx>, ) -> BlockAnd<()> { - debug!("into_expr(destination={:?}, block={:?}, expr={:?})", destination, block, expr); + debug!( + "into_expr(destination={:?}, scope={:?}, block={:?}, expr={:?})", + destination, scope, block, expr + ); // since we frequently have to reference `self` from within a // closure, where `self` would be shadowed, it's easier to @@ -35,6 +43,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { _ => false, }; + let schedule_drop = move |this: &mut Self| { + if let Some(drop_scope) = scope { + let local = + destination.as_local().expect("cannot schedule drop of non-Local place"); + this.schedule_drop(expr_span, drop_scope, local, DropKind::Value); + } + }; + if !expr_is_block_or_scope { this.block_context.push(BlockFrame::SubExpr); } @@ -42,13 +58,15 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let block_and = match expr.kind { ExprKind::Scope { region_scope, lint_level, value } => { let region_scope = (region_scope, source_info); - this.in_scope(region_scope, lint_level, |this| this.into(destination, block, value)) + this.in_scope(region_scope, lint_level, |this| { + this.into(destination, scope, block, value) + }) } ExprKind::Block { body: ast_block } => { - this.ast_block(destination, block, ast_block, source_info) + this.ast_block(destination, scope, block, ast_block, source_info) } ExprKind::Match { scrutinee, arms } => { - this.match_expr(destination, expr_span, block, scrutinee, arms) + this.match_expr(destination, scope, expr_span, block, scrutinee, arms) } ExprKind::NeverToAny { source } => { let source = this.hir.mirror(source); @@ -63,6 +81,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // This is an optimization. If the expression was a call then we already have an // unreachable block. Don't bother to terminate it and create a new one. + schedule_drop(this); if is_call { block.unit() } else { @@ -134,37 +153,40 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // body, even when the exact code in the body cannot unwind let loop_block = this.cfg.start_new_block(); - let exit_block = this.cfg.start_new_block(); // Start the loop. this.cfg.goto(block, source_info, loop_block); this.in_breakable_scope( Some(loop_block), - exit_block, destination.clone(), + scope, + expr_span, move |this| { // conduct the test, if necessary let body_block = this.cfg.start_new_block(); - let diverge_cleanup = this.diverge_cleanup(); + this.diverge_from(loop_block); this.cfg.terminate( loop_block, source_info, - TerminatorKind::FalseUnwind { - real_target: body_block, - unwind: Some(diverge_cleanup), - }, + TerminatorKind::FalseUnwind { real_target: body_block, unwind: None }, ); // The “return” value of the loop body must always be an unit. We therefore // introduce a unit temporary as the destination for the loop body. let tmp = this.get_unit_temp(); // Execute the body, branching back to the test. - let body_block_end = unpack!(this.into(&tmp, body_block, body)); - this.cfg.goto(body_block_end, source_info, loop_block); + // No scope is provided, since we've scheduled the drop above. + let body_block_end = unpack!(this.into(&tmp, None, body_block, body)); + this.cfg.terminate( + body_block_end, + source_info, + TerminatorKind::Goto { target: loop_block }, + ); + schedule_drop(this); + None }, - ); - exit_block.unit() + ) } ExprKind::Call { ty, fun, args, from_hir_call } => { let intrinsic = match ty.kind { @@ -202,8 +224,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { is_block_tail: None, }); let ptr_temp = Place::from(ptr_temp); - let block = unpack!(this.into(&ptr_temp, block, ptr)); - this.into(&this.hir.tcx().mk_place_deref(ptr_temp), block, val) + // No need for a scope, ptr_temp doesn't need drop + let block = unpack!(this.into(&ptr_temp, None, block, ptr)); + // Maybe we should provide a scope here so that + // `move_val_init` wouldn't leak on panic even with an + // arbitrary `val` expression, but `schedule_drop`, + // borrowck and drop elaboration all prevent us from + // dropping `ptr_temp.deref()`. + this.into(&this.hir.tcx().mk_place_deref(ptr_temp), None, block, val) } else { let args: Vec<_> = args .into_iter() @@ -211,17 +239,17 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { .collect(); let success = this.cfg.start_new_block(); - let cleanup = this.diverge_cleanup(); this.record_operands_moved(&args); + this.diverge_from(block); this.cfg.terminate( block, source_info, TerminatorKind::Call { func: fun, args, - cleanup: Some(cleanup), + cleanup: None, // FIXME(varkor): replace this with an uninhabitedness-based check. // This requires getting access to the current module to call // `tcx.is_ty_uninhabited_from`, which is currently tricky to do. @@ -233,10 +261,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { from_hir_call, }, ); + schedule_drop(this); success.unit() } } - ExprKind::Use { source } => this.into(destination, block, source), + ExprKind::Use { source } => this.into(destination, scope, block, source), ExprKind::Borrow { arg, borrow_kind } => { // We don't do this in `as_rvalue` because we use `as_place` // for borrow expressions, so we cannot create an `RValue` that @@ -320,6 +349,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { destination, Rvalue::Aggregate(adt, fields), ); + schedule_drop(this); block.unit() } @@ -345,6 +375,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let place = unpack!(block = this.as_place(block, expr)); let rvalue = Rvalue::Use(this.consume_by_copy_or_move(place)); this.cfg.push_assign(block, source_info, destination, rvalue); + schedule_drop(this); block.unit() } ExprKind::Index { .. } | ExprKind::Deref { .. } | ExprKind::Field { .. } => { @@ -362,6 +393,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let place = unpack!(block = this.as_place(block, expr)); let rvalue = Rvalue::Use(this.consume_by_copy_or_move(place)); this.cfg.push_assign(block, source_info, destination, rvalue); + schedule_drop(this); block.unit() } @@ -369,17 +401,13 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let scope = this.local_scope(); let value = unpack!(block = this.as_operand(block, scope, value)); let resume = this.cfg.start_new_block(); - let cleanup = this.generator_drop_cleanup(); + this.generator_drop_cleanup(block); this.cfg.terminate( block, source_info, - TerminatorKind::Yield { - value, - resume, - resume_arg: *destination, - drop: cleanup, - }, + TerminatorKind::Yield { value, resume, resume_arg: *destination, drop: None }, ); + schedule_drop(this); resume.unit() } @@ -409,6 +437,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let rvalue = unpack!(block = this.as_local_rvalue(block, expr)); this.cfg.push_assign(block, source_info, destination, rvalue); + schedule_drop(this); block.unit() } }; diff --git a/src/librustc_mir_build/build/into.rs b/src/librustc_mir_build/build/into.rs index 1a2a9d2bc05fc..117e445fac367 100644 --- a/src/librustc_mir_build/build/into.rs +++ b/src/librustc_mir_build/build/into.rs @@ -6,6 +6,7 @@ use crate::build::{BlockAnd, Builder}; use crate::hair::*; +use rustc::middle::region; use rustc::mir::*; pub(in crate::build) trait EvalInto<'tcx> { @@ -13,6 +14,7 @@ pub(in crate::build) trait EvalInto<'tcx> { self, builder: &mut Builder<'_, 'tcx>, destination: &Place<'tcx>, + scope: Option, block: BasicBlock, ) -> BlockAnd<()>; } @@ -21,13 +23,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { crate fn into( &mut self, destination: &Place<'tcx>, + scope: Option, block: BasicBlock, expr: E, ) -> BlockAnd<()> where E: EvalInto<'tcx>, { - expr.eval_into(self, destination, block) + expr.eval_into(self, destination, scope, block) } } @@ -36,10 +39,11 @@ impl<'tcx> EvalInto<'tcx> for ExprRef<'tcx> { self, builder: &mut Builder<'_, 'tcx>, destination: &Place<'tcx>, + scope: Option, block: BasicBlock, ) -> BlockAnd<()> { let expr = builder.hir.mirror(self); - builder.into_expr(destination, block, expr) + builder.into_expr(destination, scope, block, expr) } } @@ -48,8 +52,9 @@ impl<'tcx> EvalInto<'tcx> for Expr<'tcx> { self, builder: &mut Builder<'_, 'tcx>, destination: &Place<'tcx>, + scope: Option, block: BasicBlock, ) -> BlockAnd<()> { - builder.into_expr(destination, block, self) + builder.into_expr(destination, scope, block, self) } } diff --git a/src/librustc_mir_build/build/matches/mod.rs b/src/librustc_mir_build/build/matches/mod.rs index 7995125524314..0679821c5908c 100644 --- a/src/librustc_mir_build/build/matches/mod.rs +++ b/src/librustc_mir_build/build/matches/mod.rs @@ -84,6 +84,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { crate fn match_expr( &mut self, destination: &Place<'tcx>, + destination_scope: Option, span: Span, mut block: BasicBlock, scrutinee: ExprRef<'tcx>, @@ -104,6 +105,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { self.lower_match_arms( destination, + destination_scope, scrutinee_place, scrutinee_span, arm_candidates, @@ -210,81 +212,17 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } - /// Lower the bindings, guards and arm bodies of a `match` expression. - /// - /// The decision tree should have already been created - /// (by [Builder::lower_match_tree]). - /// - /// `outer_source_info` is the SourceInfo for the whole match. - fn lower_match_arms( - &mut self, - destination: &Place<'tcx>, - scrutinee_place: Place<'tcx>, - scrutinee_span: Span, - arm_candidates: Vec<(&'_ Arm<'tcx>, Candidate<'_, 'tcx>)>, - outer_source_info: SourceInfo, - fake_borrow_temps: Vec<(Place<'tcx>, Local)>, - ) -> BlockAnd<()> { - let match_scope = self.scopes.topmost(); - - let arm_end_blocks: Vec<_> = arm_candidates - .into_iter() - .map(|(arm, candidate)| { - debug!("lowering arm {:?}\ncanidate = {:?}", arm, candidate); - - let arm_source_info = self.source_info(arm.span); - let arm_scope = (arm.scope, arm_source_info); - self.in_scope(arm_scope, arm.lint_level, |this| { - let body = this.hir.mirror(arm.body.clone()); - let scope = this.declare_bindings( - None, - arm.span, - &arm.pattern, - ArmHasGuard(arm.guard.is_some()), - Some((Some(&scrutinee_place), scrutinee_span)), - ); - - let arm_block = this.bind_pattern( - outer_source_info, - candidate, - arm.guard.as_ref().map(|g| (g, match_scope)), - &fake_borrow_temps, - scrutinee_span, - Some(arm.scope), - ); - - if let Some(source_scope) = scope { - this.source_scope = source_scope; - } - - this.into(destination, arm_block, body) - }) - }) - .collect(); - - // all the arm blocks will rejoin here - let end_block = self.cfg.start_new_block(); - - for arm_block in arm_end_blocks { - self.cfg.goto(unpack!(arm_block), outer_source_info, end_block); - } - - self.source_scope = outer_source_info.scope; - - end_block.unit() - } - /// Binds the variables and ascribes types for a given `match` arm or /// `let` binding. /// /// Also check if the guard matches, if it's provided. /// `arm_scope` should be `Some` if and only if this is called for a /// `match` arm. - fn bind_pattern( + crate fn bind_pattern( &mut self, outer_source_info: SourceInfo, candidate: Candidate<'_, 'tcx>, - guard: Option<(&Guard<'tcx>, region::Scope)>, + guard: Option<&Guard<'tcx>>, fake_borrow_temps: &Vec<(Place<'tcx>, Local)>, scrutinee_span: Span, arm_scope: Option, @@ -364,13 +302,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { PatKind::Binding { mode: BindingMode::ByValue, var, subpattern: None, .. } => { let place = self.storage_live_binding(block, var, irrefutable_pat.span, OutsideGuard, true); - unpack!(block = self.into(&place, block, initializer)); + let region_scope = self.hir.region_scope_tree.var_scope(var.local_id); + + unpack!(block = self.into(&place, Some(region_scope), block, initializer)); // Inject a fake read, see comments on `FakeReadCause::ForLet`. let source_info = self.source_info(irrefutable_pat.span); self.cfg.push_fake_read(block, source_info, FakeReadCause::ForLet, place); - self.schedule_drop_for_binding(var, irrefutable_pat.span, OutsideGuard); block.unit() } @@ -397,9 +336,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { ascription: hair::pattern::Ascription { user_ty: pat_ascription_ty, variance: _, user_ty_span }, } => { + let region_scope = self.hir.region_scope_tree.var_scope(var.local_id); let place = self.storage_live_binding(block, var, irrefutable_pat.span, OutsideGuard, true); - unpack!(block = self.into(&place, block, initializer)); + unpack!(block = self.into(&place, Some(region_scope), block, initializer)); // Inject a fake read, see comments on `FakeReadCause::ForLet`. let pattern_source_info = self.source_info(irrefutable_pat.span); @@ -437,7 +377,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { }, ); - self.schedule_drop_for_binding(var, irrefutable_pat.span, OutsideGuard); block.unit() } @@ -656,7 +595,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } #[derive(Debug)] -struct Candidate<'pat, 'tcx> { +pub(super) struct Candidate<'pat, 'tcx> { /// `Span` of the original pattern that gave rise to this candidate span: Span, @@ -1561,7 +1500,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { &mut self, candidate: Candidate<'pat, 'tcx>, parent_bindings: &[(Vec>, Vec>)], - guard: Option<(&Guard<'tcx>, region::Scope)>, + guard: Option<&Guard<'tcx>>, fake_borrows: &Vec<(Place<'tcx>, Local)>, scrutinee_span: Span, schedule_drops: bool, @@ -1673,7 +1612,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // the reference that we create for the arm. // * So we eagerly create the reference for the arm and then take a // reference to that. - if let Some((guard, region_scope)) = guard { + if let Some(guard) = guard { let tcx = self.hir.tcx(); let bindings = parent_bindings .iter() @@ -1717,12 +1656,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { unreachable }); let outside_scope = self.cfg.start_new_block(); - self.exit_scope( - source_info.span, - region_scope, - otherwise_post_guard_block, - outside_scope, - ); + self.exit_top_scope(otherwise_post_guard_block, outside_scope, source_info); self.false_edges( outside_scope, otherwise_block, diff --git a/src/librustc_mir_build/build/matches/test.rs b/src/librustc_mir_build/build/matches/test.rs index 1acfa7dddbe1f..c8a63cb25f862 100644 --- a/src/librustc_mir_build/build/matches/test.rs +++ b/src/librustc_mir_build/build/matches/test.rs @@ -426,7 +426,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let bool_ty = self.hir.bool_ty(); let eq_result = self.temp(bool_ty, source_info.span); let eq_block = self.cfg.start_new_block(); - let cleanup = self.diverge_cleanup(); + self.diverge_from(block); self.cfg.terminate( block, source_info, @@ -444,7 +444,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { }), args: vec![val, expect], destination: Some((eq_result, eq_block)), - cleanup: Some(cleanup), + cleanup: None, from_hir_call: false, }, ); diff --git a/src/librustc_mir_build/build/mod.rs b/src/librustc_mir_build/build/mod.rs index 8b7d0637c0343..659393415a113 100644 --- a/src/librustc_mir_build/build/mod.rs +++ b/src/librustc_mir_build/build/mod.rs @@ -317,11 +317,6 @@ struct Builder<'a, 'tcx> { var_debug_info: Vec>, - /// Cached block with the `RESUME` terminator; this is created - /// when first set of cleanups are built. - cached_resume_block: Option, - /// Cached block with the `RETURN` terminator. - cached_return_block: Option, /// Cached block with the `UNREACHABLE` terminator. cached_unreachable_block: Option, } @@ -583,50 +578,40 @@ where region::Scope { id: body.value.hir_id.local_id, data: region::ScopeData::CallSite }; let arg_scope = region::Scope { id: body.value.hir_id.local_id, data: region::ScopeData::Arguments }; - let mut block = START_BLOCK; let source_info = builder.source_info(span); let call_site_s = (call_site_scope, source_info); - unpack!( - block = builder.in_scope(call_site_s, LintLevel::Inherited, |builder| { - if should_abort_on_panic(tcx, fn_def_id, abi) { - builder.schedule_abort(); - } - - let arg_scope_s = (arg_scope, source_info); - // `return_block` is called when we evaluate a `return` expression, so - // we just use `START_BLOCK` here. - unpack!( - block = builder.in_breakable_scope( - None, - START_BLOCK, - Place::return_place(), - |builder| { - builder.in_scope(arg_scope_s, LintLevel::Inherited, |builder| { - builder.args_and_body( - block, - fn_def_id, - &arguments, - arg_scope, - &body.value, - ) - }) - }, - ) - ); - // Attribute epilogue to function's closing brace - let fn_end = span.shrink_to_hi(); - let source_info = builder.source_info(fn_end); - let return_block = builder.return_block(); - builder.cfg.goto(block, source_info, return_block); - builder.cfg.terminate(return_block, source_info, TerminatorKind::Return); - // Attribute any unreachable codepaths to the function's closing brace - if let Some(unreachable_block) = builder.cached_unreachable_block { - builder.cfg.terminate(unreachable_block, source_info, TerminatorKind::Unreachable); - } - return_block.unit() - }) - ); - assert_eq!(block, builder.return_block()); + unpack!(builder.in_scope(call_site_s, LintLevel::Inherited, |builder| { + let arg_scope_s = (arg_scope, source_info); + // Attribute epilogue to function's closing brace + let fn_end = span.shrink_to_hi(); + let return_block = unpack!(builder.in_breakable_scope( + None, + Place::return_place(), + Some(call_site_scope), + fn_end, + |builder| { + Some(builder.in_scope(arg_scope_s, LintLevel::Inherited, |builder| { + builder.args_and_body( + START_BLOCK, + fn_def_id, + &arguments, + arg_scope, + &body.value, + ) + })) + }, + )); + let source_info = builder.source_info(fn_end); + builder.cfg.terminate(return_block, source_info, TerminatorKind::Return); + let should_abort = should_abort_on_panic(tcx, fn_def_id, abi); + builder.build_drop_trees(should_abort); + // Attribute any unreachable codepaths to the function's closing brace + if let Some(unreachable_block) = builder.cached_unreachable_block { + builder.cfg.terminate(unreachable_block, source_info, TerminatorKind::Unreachable); + } + builder.unschedule_return_place_drop(); + return_block.unit() + })); let mut spread_arg = None; if abi == Abi::RustCall { @@ -654,14 +639,13 @@ fn construct_const<'a, 'tcx>( let mut block = START_BLOCK; let ast_expr = &tcx.hir().body(body_id).value; let expr = builder.hir.mirror(ast_expr); - unpack!(block = builder.into_expr(&Place::return_place(), block, expr)); + // We don't provide a scope because we can't unwind in constants, so won't + // need to drop the return place. + unpack!(block = builder.into_expr(&Place::return_place(), None, block, expr)); let source_info = builder.source_info(span); builder.cfg.terminate(block, source_info, TerminatorKind::Return); - // Constants can't `return` so a return block should not be created. - assert_eq!(builder.cached_return_block, None); - // Constants may be match expressions in which case an unreachable block may // be created, so terminate it properly. if let Some(unreachable_block) = builder.cached_unreachable_block { @@ -735,7 +719,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { fn_span: span, arg_count, generator_kind, - scopes: Default::default(), + scopes: scope::Scopes::new(), block_context: BlockContext::new(), source_scopes: IndexVec::new(), source_scope: OUTERMOST_SOURCE_SCOPE, @@ -751,8 +735,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { var_indices: Default::default(), unit_temp: None, var_debug_info: vec![], - cached_resume_block: None, - cached_return_block: None, cached_unreachable_block: None, }; @@ -960,7 +942,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } let body = self.hir.mirror(ast_body); - self.into(&Place::return_place(), block, body) + let call_site = + region::Scope { id: ast_body.hir_id.local_id, data: region::ScopeData::CallSite }; + self.into(&Place::return_place(), Some(call_site), block, body) } fn set_correct_source_scope_for_arg( @@ -997,17 +981,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } } - - fn return_block(&mut self) -> BasicBlock { - match self.cached_return_block { - Some(rb) => rb, - None => { - let rb = self.cfg.start_new_block(); - self.cached_return_block = Some(rb); - rb - } - } - } } /////////////////////////////////////////////////////////////////////////// diff --git a/src/librustc_mir_build/build/scope.rs b/src/librustc_mir_build/build/scope.rs index a63ac06ec3fe9..94668d1e5f37b 100644 --- a/src/librustc_mir_build/build/scope.rs +++ b/src/librustc_mir_build/build/scope.rs @@ -6,30 +6,31 @@ contents, and then pop it off. Every scope is named by a ### SEME Regions -When pushing a new scope, we record the current point in the graph (a +When pushing a new [Scope], we record the current point in the graph (a basic block); this marks the entry to the scope. We then generate more stuff in the control-flow graph. Whenever the scope is exited, either via a `break` or `return` or just by fallthrough, that marks an exit from the scope. Each lexical scope thus corresponds to a single-entry, multiple-exit (SEME) region in the control-flow graph. -For now, we keep a mapping from each `region::Scope` to its -corresponding SEME region for later reference (see caveat in next -paragraph). This is because region scopes are tied to -them. Eventually, when we shift to non-lexical lifetimes, there should -be no need to remember this mapping. +For now, we record the `region::Scope` to each SEME region for later reference +(see caveat in next paragraph). This is because destruction scopes are tied to +them. This may change in the future so that MIR lowering determines its own +destruction scopes. ### Not so SEME Regions In the course of building matches, it sometimes happens that certain code (namely guards) gets executed multiple times. This means that the scope lexical scope may in fact correspond to multiple, disjoint SEME regions. So in fact our -mapping is from one scope to a vector of SEME regions. +mapping is from one scope to a vector of SEME regions. Since the SEME regions +are disjoint, the mapping is still one-to-one for the set of SEME regions that +we're currently in. -Also in matches, the scopes assigned to arms are not even SEME regions! Each -arm has a single region with one entry for each pattern. We manually +Also in matches, the scopes assigned to arms are not always even SEME regions! +Each arm has a single region with one entry for each pattern. We manually manipulate the scheduled drops in this scope to avoid dropping things multiple -times, although drop elaboration would clean this up for value drops. +times. ### Drops @@ -60,38 +61,49 @@ that for now); any later drops would also drop `y`. There are numerous "normal" ways to early exit a scope: `break`, `continue`, `return` (panics are handled separately). Whenever an -early exit occurs, the method `exit_scope` is called. It is given the +early exit occurs, the method `break_scope` is called. It is given the current point in execution where the early exit occurs, as well as the scope you want to branch to (note that all early exits from to some -other enclosing scope). `exit_scope` will record this exit point and -also add all drops. +other enclosing scope). `break_scope` will record the set of drops currently +scheduled in a [DropTree]. Later, before `in_breakable_scope` exits, the drops +will be added to the CFG. -Panics are handled in a similar fashion, except that a panic always -returns out to the `DIVERGE_BLOCK`. To trigger a panic, simply call -`panic(p)` with the current point `p`. Or else you can call -`diverge_cleanup`, which will produce a block that you can branch to -which does the appropriate cleanup and then diverges. `panic(p)` -simply calls `diverge_cleanup()` and adds an edge from `p` to the -result. +Panics are handled in a similar fashion, except that the drops are added to the +mir once the rest of the function has finished being lowered. If a terminator +can panic, call `diverge_from(block)` with the block containing the terminator +`block`. -### Loop scopes +### Breakable scopes In addition to the normal scope stack, we track a loop scope stack -that contains only loops. It tracks where a `break` and `continue` -should go to. +that contains only loops and breakable blocks. It tracks where a `break`, +`continue` or `return` should go to. */ +use crate::build::matches::{ArmHasGuard, Candidate}; use crate::build::{BlockAnd, BlockAndExtension, BlockFrame, Builder, CFG}; -use crate::hair::{Expr, ExprRef, LintLevel}; +use crate::hair::{Arm, Expr, ExprRef, LintLevel}; use rustc::middle::region; use rustc::mir::*; use rustc_data_structures::fx::FxHashMap; use rustc_hir as hir; -use rustc_hir::GeneratorKind; +use rustc_index::vec::IndexVec; use rustc_span::{Span, DUMMY_SP}; -use std::collections::hash_map::Entry; -use std::mem; + +#[derive(Debug)] +pub struct Scopes<'tcx> { + scopes: Vec, + /// The current set of breakable scopes. See module comment for more details. + breakable_scopes: Vec>, + + /// Drops that need to be done on unwind paths. See the comment on + /// [DropTree] for more details. + unwind_drops: DropTree, + + /// Drops that need to be done on paths to the `GeneratorDrop` terminator. + generator_drops: DropTree, +} #[derive(Debug)] struct Scope { @@ -112,73 +124,47 @@ struct Scope { moved_locals: Vec, - /// The cache for drop chain on “normal” exit into a particular BasicBlock. - cached_exits: FxHashMap<(BasicBlock, region::Scope), BasicBlock>, - - /// The cache for drop chain on "generator drop" exit. - cached_generator_drop: Option, + /// The drop index that will drop everything in and below this scope on an + /// unwind path. + cached_unwind_block: Option, - /// The cache for drop chain on "unwind" exit. - cached_unwind: CachedBlock, + /// The drop index that will drop everything in and below this scope on a + /// generator drop path. + cached_generator_drop_block: Option, } -#[derive(Debug, Default)] -crate struct Scopes<'tcx> { - scopes: Vec, - /// The current set of breakable scopes. See module comment for more details. - breakable_scopes: Vec>, -} - -#[derive(Debug)] +#[derive(Clone, Copy, Debug)] struct DropData { - /// span where drop obligation was incurred (typically where place was declared) - span: Span, + /// The `Span` where drop obligation was incurred (typically where place was + /// declared) + source_info: SourceInfo, /// local to drop local: Local, /// Whether this is a value Drop or a StorageDead. kind: DropKind, - - /// The cached blocks for unwinds. - cached_block: CachedBlock, -} - -#[derive(Debug, Default, Clone, Copy)] -struct CachedBlock { - /// The cached block for the cleanups-on-diverge path. This block - /// contains code to run the current drop and all the preceding - /// drops (i.e., those having lower index in Drop’s Scope drop - /// array) - unwind: Option, - - /// The cached block for unwinds during cleanups-on-generator-drop path - /// - /// This is split from the standard unwind path here to prevent drop - /// elaboration from creating drop flags that would have to be captured - /// by the generator. I'm not sure how important this optimization is, - /// but it is here. - generator_drop: Option, } -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub(crate) enum DropKind { Value, Storage, } -#[derive(Clone, Debug)] +#[derive(Debug)] struct BreakableScope<'tcx> { /// Region scope of the loop region_scope: region::Scope, - /// Where the body of the loop begins. `None` if block - continue_block: Option, - /// Block to branch into when the loop or block terminates (either by being - /// `break`-en out from, or by having its condition to become false) - break_block: BasicBlock, /// The destination of the loop/block expression itself (i.e., where to put - /// the result of a `break` expression) + /// the result of a `break` or `return` expression) break_destination: Place<'tcx>, + /// The scope that the destination should have its drop scheduled in. + destination_scope: Option, + /// Drops that happen on the `break`/`return` path. + break_drops: DropTree, + /// Drops that happen on the `continue` path. + continue_drops: Option, } /// The target of an expression that breaks out of a scope @@ -189,61 +175,33 @@ crate enum BreakableTarget { Return, } -impl CachedBlock { - fn invalidate(&mut self) { - *self = CachedBlock::default(); - } +rustc_index::newtype_index! { + struct DropIdx { .. } +} - fn get(&self, generator_drop: bool) -> Option { - if generator_drop { self.generator_drop } else { self.unwind } - } +const ROOT_NODE: DropIdx = DropIdx::from_u32_const(0); - fn ref_mut(&mut self, generator_drop: bool) -> &mut Option { - if generator_drop { &mut self.generator_drop } else { &mut self.unwind } - } +/// A tree of drops that we have deferred lowering. It's used for: +/// +/// * Drops on unwind paths +/// * Drops on generator drop paths (when a suspended generator is dropped) +/// * Drops on return and loop exit paths +/// +/// Once no more nodes could be added to the tree, we lower it to MIR in one go +/// in `build_drop_tree`. +#[derive(Debug)] +struct DropTree { + /// Drops in the tree. + drops: IndexVec, + /// Map for finding the inverse of the `next_drop` relation: + /// + /// `previous_drops[(drops[i].1, drops[i].0.local, drops[i].0.kind] == i` + previous_drops: FxHashMap<(DropIdx, Local, DropKind), DropIdx>, + /// Edges into the `DropTree` that need to be added once it's lowered. + entry_points: Vec<(DropIdx, BasicBlock)>, } impl Scope { - /// Invalidates all the cached blocks in the scope. - /// - /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a - /// larger extent of code. - /// - /// `storage_only` controls whether to invalidate only drop paths that run `StorageDead`. - /// `this_scope_only` controls whether to invalidate only drop paths that refer to the current - /// top-of-scope (as opposed to dependent scopes). - fn invalidate_cache( - &mut self, - storage_only: bool, - generator_kind: Option, - this_scope_only: bool, - ) { - // FIXME: maybe do shared caching of `cached_exits` etc. to handle functions - // with lots of `try!`? - - // cached exits drop storage and refer to the top-of-scope - self.cached_exits.clear(); - - // the current generator drop and unwind refer to top-of-scope - self.cached_generator_drop = None; - - let ignore_unwinds = storage_only && generator_kind.is_none(); - if !ignore_unwinds { - self.cached_unwind.invalidate(); - } - - if !ignore_unwinds && !this_scope_only { - for drop_data in &mut self.drops { - drop_data.cached_block.invalidate(); - } - } - } - - /// Given a span and this scope's source scope, make a SourceInfo. - fn source_info(&self, span: Span) -> SourceInfo { - SourceInfo { span, scope: self.source_scope } - } - /// Whether there's anything to do for the cleanup path, that is, /// when unwinding through this scope. This includes destructors, /// but not StorageDead statements, which don't get emitted at all @@ -261,109 +219,216 @@ impl Scope { DropKind::Storage => false, }) } + + fn invalidate_cache(&mut self) { + self.cached_unwind_block = None; + self.cached_generator_drop_block = None; + } } -impl<'tcx> Scopes<'tcx> { - fn len(&self) -> usize { - self.scopes.len() +/// A trait that determined how [DropTree::lower_to_mir] creates its blocks and +/// links to any entry nodes. +trait DropTreeBuilder<'tcx> { + fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock; + fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock); +} + +impl DropTree { + fn new() -> Self { + // The root node of the tree doesn't represent a drop, but instead + // represents the block in the tree that should be jumped to once all + // of the required drops have been performed. + let fake_source_info = SourceInfo { span: DUMMY_SP, scope: OUTERMOST_SOURCE_SCOPE }; + let fake_data = + DropData { source_info: fake_source_info, local: Local::MAX, kind: DropKind::Storage }; + let drop_idx = DropIdx::MAX; + let drops = IndexVec::from_elem_n((fake_data, drop_idx), 1); + Self { drops, entry_points: Vec::new(), previous_drops: FxHashMap::default() } } - fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo), vis_scope: SourceScope) { - debug!("push_scope({:?})", region_scope); - self.scopes.push(Scope { - source_scope: vis_scope, - region_scope: region_scope.0, - region_scope_span: region_scope.1.span, - drops: vec![], - moved_locals: vec![], - cached_generator_drop: None, - cached_exits: Default::default(), - cached_unwind: CachedBlock::default(), - }); + fn add_drop(&mut self, drop: DropData, next: DropIdx) -> DropIdx { + let drops = &mut self.drops; + *self + .previous_drops + .entry((next, drop.local, drop.kind)) + .or_insert_with(|| drops.push((drop, next))) } - fn pop_scope( - &mut self, - region_scope: (region::Scope, SourceInfo), - ) -> (Scope, Option) { - let scope = self.scopes.pop().unwrap(); - assert_eq!(scope.region_scope, region_scope.0); - let unwind_to = - self.scopes.last().and_then(|next_scope| next_scope.cached_unwind.get(false)); - (scope, unwind_to) + fn add_entry(&mut self, from: BasicBlock, to: DropIdx) { + debug_assert!(to < self.drops.next_index()); + self.entry_points.push((to, from)); } - fn may_panic(&self, scope_count: usize) -> bool { - let len = self.len(); - self.scopes[(len - scope_count)..].iter().any(|s| s.needs_cleanup()) + /// Builds the MIR for a given drop tree. + /// + /// `blocks` should have the same length as `self.drops`, and may have its + /// first value set to some already existing block. + fn build_mir<'tcx, T: DropTreeBuilder<'tcx>>( + &mut self, + cfg: &mut CFG<'tcx>, + blocks: &mut IndexVec>, + ) { + debug!("DropTree::lower_to_mir(drops = {:#?})", self); + debug_assert_eq!(blocks.len(), self.drops.len()); + + self.assign_blocks::(cfg, blocks); + self.link_blocks(cfg, blocks) } - /// Finds the breakable scope for a given label. This is used for - /// resolving `return`, `break` and `continue`. - fn find_breakable_scope( - &self, - span: Span, - target: BreakableTarget, - ) -> (BasicBlock, region::Scope, Option>) { - let get_scope = |scope: region::Scope| { - // find the loop-scope by its `region::Scope`. - self.breakable_scopes - .iter() - .rfind(|breakable_scope| breakable_scope.region_scope == scope) - .unwrap_or_else(|| span_bug!(span, "no enclosing breakable scope found")) - }; - match target { - BreakableTarget::Return => { - let scope = &self.breakable_scopes[0]; - if scope.break_destination != Place::return_place() { - span_bug!(span, "`return` in item with no return scope"); + /// Assign blocks for all of the drops in the drop tree that need them. + fn assign_blocks<'tcx, T: DropTreeBuilder<'tcx>>( + &mut self, + cfg: &mut CFG<'tcx>, + blocks: &mut IndexVec>, + ) { + // StorageDead statements can share blocks with each other and also with + // a Drop terminator. We iterate through the blocks to find which blocks + // need + #[derive(Clone, Copy)] + enum Block { + // This drop is unreachable + None, + // This drop is only reachable through the `StorageDead` with the + // specified index. + Shares(DropIdx), + // This drop has more than one way of being reached, or it is + // branched to from outside the tree, or it's predecessor is a + // `Value` drop. + Own, + } + + let mut needs_block = IndexVec::from_elem(Block::None, &self.drops); + if blocks[ROOT_NODE].is_some() { + // In some cases (such as drops for `continue`) the root node + // already has a block. In this case, make sure that we don't + // override it. + needs_block[ROOT_NODE] = Block::Own; + } + + // Sort so that we only need to check the last + let entry_points = &mut self.entry_points; + entry_points.sort(); + + for (drop_idx, drop_data) in self.drops.iter_enumerated().rev() { + if entry_points.last().map_or(false, |entry_point| entry_point.0 == drop_idx) { + let block = *blocks[drop_idx].get_or_insert_with(|| T::make_block(cfg)); + needs_block[drop_idx] = Block::Own; + while entry_points.last().map_or(false, |entry_point| entry_point.0 == drop_idx) { + let entry_block = entry_points.pop().unwrap().1; + T::add_entry(cfg, entry_block, block); } - (scope.break_block, scope.region_scope, Some(scope.break_destination)) } - BreakableTarget::Break(scope) => { - let scope = get_scope(scope); - (scope.break_block, scope.region_scope, Some(scope.break_destination)) + match needs_block[drop_idx] { + Block::None => continue, + Block::Own => { + blocks[drop_idx].get_or_insert_with(|| T::make_block(cfg)); + } + Block::Shares(pred) => { + blocks[drop_idx] = blocks[pred]; + } } - BreakableTarget::Continue(scope) => { - let scope = get_scope(scope); - let continue_block = scope - .continue_block - .unwrap_or_else(|| span_bug!(span, "missing `continue` block")); - (continue_block, scope.region_scope, None) + if let DropKind::Value = drop_data.0.kind { + needs_block[drop_data.1] = Block::Own; + } else { + if drop_idx != ROOT_NODE { + match &mut needs_block[drop_data.1] { + pred @ Block::None => *pred = Block::Shares(drop_idx), + pred @ Block::Shares(_) => *pred = Block::Own, + Block::Own => (), + } + } } } + + debug!("assign_blocks: blocks = {:#?}", blocks); + assert!(entry_points.is_empty()); } - fn num_scopes_above(&self, region_scope: region::Scope, span: Span) -> usize { - let scope_count = self - .scopes - .iter() - .rev() - .position(|scope| scope.region_scope == region_scope) - .unwrap_or_else(|| span_bug!(span, "region_scope {:?} does not enclose", region_scope)); - let len = self.len(); - assert!(scope_count < len, "should not use `exit_scope` to pop ALL scopes"); - scope_count + fn link_blocks<'tcx>( + &self, + cfg: &mut CFG<'tcx>, + blocks: &IndexVec>, + ) { + for (drop_idx, drop_data) in self.drops.iter_enumerated().rev() { + let block = if let Some(block) = blocks[drop_idx] { + block + } else { + continue; + }; + match drop_data.0.kind { + DropKind::Value => { + let terminator = TerminatorKind::Drop { + target: blocks[drop_data.1].unwrap(), + // The caller will handle this if needed. + unwind: None, + location: drop_data.0.local.into(), + }; + cfg.terminate(block, drop_data.0.source_info, terminator); + } + // Root nodes don't correspond to a drop. + DropKind::Storage if drop_idx == ROOT_NODE => {} + DropKind::Storage => { + let stmt = Statement { + source_info: drop_data.0.source_info, + kind: StatementKind::StorageDead(drop_data.0.local), + }; + cfg.push(block, stmt); + let target = blocks[drop_data.1].unwrap(); + if target != block { + // Diagnostics don't use this `Span` but debuginfo + // might, which could cause breakpoints to end up in the + // wrong place. + let source_info = SourceInfo { span: DUMMY_SP, ..drop_data.0.source_info }; + let terminator = TerminatorKind::Goto { target }; + cfg.terminate(block, source_info, terminator); + } + } + } + } + } +} + +impl<'tcx> Scopes<'tcx> { + pub(crate) fn new() -> Self { + Self { + scopes: Vec::new(), + breakable_scopes: Vec::new(), + unwind_drops: DropTree::new(), + generator_drops: DropTree::new(), + } } - fn iter_mut(&mut self) -> impl DoubleEndedIterator + '_ { - self.scopes.iter_mut().rev() + fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo), vis_scope: SourceScope) { + debug!("push_scope({:?})", region_scope); + self.scopes.push(Scope { + source_scope: vis_scope, + region_scope: region_scope.0, + region_scope_span: region_scope.1.span, + drops: vec![], + moved_locals: vec![], + cached_unwind_block: None, + cached_generator_drop_block: None, + }); + } + + fn pop_scope(&mut self, region_scope: (region::Scope, SourceInfo)) -> Scope { + let scope = self.scopes.pop().unwrap(); + assert_eq!(scope.region_scope, region_scope.0); + scope } - fn top_scopes(&mut self, count: usize) -> impl DoubleEndedIterator + '_ { - let len = self.len(); - self.scopes[len - count..].iter_mut() + fn scope_index(&self, region_scope: region::Scope, span: Span) -> usize { + self.scopes + .iter() + .rposition(|scope| scope.region_scope == region_scope) + .unwrap_or_else(|| span_bug!(span, "region_scope {:?} does not enclose", region_scope)) } /// Returns the topmost active scope, which is known to be alive until /// the next scope expression. - pub(super) fn topmost(&self) -> region::Scope { + fn topmost(&self) -> region::Scope { self.scopes.last().expect("topmost_scope: no scopes present").region_scope } - - fn source_info(&self, index: usize, span: Span) -> SourceInfo { - self.scopes[self.len() - index].source_info(span) - } } impl<'a, 'tcx> Builder<'a, 'tcx> { @@ -371,28 +436,53 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // ========================== // Start a breakable scope, which tracks where `continue`, `break` and // `return` should branch to. - crate fn in_breakable_scope( + crate fn in_breakable_scope( &mut self, loop_block: Option, - break_block: BasicBlock, break_destination: Place<'tcx>, + destination_scope: Option, + span: Span, f: F, - ) -> R + ) -> BlockAnd<()> where - F: FnOnce(&mut Builder<'a, 'tcx>) -> R, + F: FnOnce(&mut Builder<'a, 'tcx>) -> Option>, { let region_scope = self.scopes.topmost(); let scope = BreakableScope { region_scope, - continue_block: loop_block, - break_block, break_destination, + destination_scope, + break_drops: DropTree::new(), + continue_drops: loop_block.map(|_| DropTree::new()), }; + let continue_blocks = loop_block.map(|block| (block, self.diverge_cleanup())); self.scopes.breakable_scopes.push(scope); - let res = f(self); + let normal_exit_block = f(self); let breakable_scope = self.scopes.breakable_scopes.pop().unwrap(); assert!(breakable_scope.region_scope == region_scope); - res + breakable_scope.continue_drops.map(|drops| { + self.build_exit_tree(drops, continue_blocks); + }); + let break_block = self.build_exit_tree(breakable_scope.break_drops, None); + match (normal_exit_block, break_block) { + (Some(block), None) | (None, Some(block)) => block, + (None, None) => self.cfg.start_new_block().unit(), + (Some(normal_block), Some(exit_block)) => { + let target = self.cfg.start_new_block(); + let source_info = self.source_info(span); + self.cfg.terminate( + unpack!(normal_block), + source_info, + TerminatorKind::Goto { target }, + ); + self.cfg.terminate( + unpack!(exit_block), + source_info, + TerminatorKind::Goto { target }, + ); + target.unit() + } + } } crate fn in_opt_scope( @@ -476,30 +566,15 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { mut block: BasicBlock, ) -> BlockAnd<()> { debug!("pop_scope({:?}, {:?})", region_scope, block); - // If we are emitting a `drop` statement, we need to have the cached - // diverge cleanup pads ready in case that drop panics. - if self.scopes.may_panic(1) { - self.diverge_cleanup(); - } - let (scope, unwind_to) = self.scopes.pop_scope(region_scope); - let unwind_to = unwind_to.unwrap_or_else(|| self.resume_block()); - unpack!( - block = build_scope_drops( - &mut self.cfg, - self.generator_kind, - &scope, - block, - unwind_to, - self.arg_count, - false, // not generator - false, // not unwind path - ) - ); + block = self.leave_top_scope(block); + + self.scopes.pop_scope(region_scope); block.unit() } + /// Sets up the drops for breaking from `block` to `target`. crate fn break_scope( &mut self, mut block: BasicBlock, @@ -507,152 +582,99 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { scope: BreakableTarget, source_info: SourceInfo, ) -> BlockAnd<()> { - let (mut target_block, region_scope, destination) = - self.scopes.find_breakable_scope(source_info.span, scope); - if let BreakableTarget::Return = scope { - // We call this now, rather than when we start lowering the - // function so that the return block doesn't precede the entire - // rest of the CFG. Some passes and LLVM prefer blocks to be in - // approximately CFG order. - target_block = self.return_block(); - } - if let Some(destination) = destination { + let span = source_info.span; + + let get_scope_index = |scope: region::Scope| { + // find the loop-scope by its `region::Scope`. + self.scopes + .breakable_scopes + .iter() + .rposition(|breakable_scope| breakable_scope.region_scope == scope) + .unwrap_or_else(|| span_bug!(span, "no enclosing breakable scope found")) + }; + let (break_index, destination, dest_scope) = match scope { + BreakableTarget::Return => { + let scope = &self.scopes.breakable_scopes[0]; + if scope.break_destination != Place::return_place() { + span_bug!(span, "`return` in item with no return scope"); + } + (0, Some(scope.break_destination.clone()), scope.destination_scope) + } + BreakableTarget::Break(scope) => { + let break_index = get_scope_index(scope); + let scope = &self.scopes.breakable_scopes[break_index]; + (break_index, Some(scope.break_destination.clone()), scope.destination_scope) + } + BreakableTarget::Continue(scope) => { + let break_index = get_scope_index(scope); + (break_index, None, None) + } + }; + + if let Some(ref destination) = destination { if let Some(value) = value { debug!("stmt_expr Break val block_context.push(SubExpr)"); self.block_context.push(BlockFrame::SubExpr); - unpack!(block = self.into(&destination, block, value)); + unpack!(block = self.into(destination, dest_scope, block, value)); + dest_scope + .map(|scope| self.unschedule_drop(scope, destination.as_local().unwrap())); self.block_context.pop(); } else { - self.cfg.push_assign_unit(block, source_info, &destination) + self.cfg.push_assign_unit(block, source_info, destination) } } else { assert!(value.is_none(), "`return` and `break` should have a destination"); } - self.exit_scope(source_info.span, region_scope, block, target_block); + + let region_scope = self.scopes.breakable_scopes[break_index].region_scope; + let scope_index = self.scopes.scope_index(region_scope, span); + let drops = if destination.is_some() { + &mut self.scopes.breakable_scopes[break_index].break_drops + } else { + self.scopes.breakable_scopes[break_index].continue_drops.as_mut().unwrap() + }; + let mut drop_idx = ROOT_NODE; + for scope in &self.scopes.scopes[scope_index + 1..] { + for drop in &scope.drops { + drop_idx = drops.add_drop(*drop, drop_idx); + } + } + drops.add_entry(block, drop_idx); + // `build_drop_tree` doesn't have access to our source_info, so we + // create a dummy terminator now. `TerminatorKind::Resume` is used + // because MIR type checking will panic if it hasn't been overwritten. + self.cfg.terminate(block, source_info, TerminatorKind::Resume); + self.cfg.start_new_block().unit() } - /// Branch out of `block` to `target`, exiting all scopes up to - /// and including `region_scope`. This will insert whatever drops are - /// needed. See module comment for details. - crate fn exit_scope( + crate fn exit_top_scope( &mut self, - span: Span, - region_scope: region::Scope, mut block: BasicBlock, target: BasicBlock, + source_info: SourceInfo, ) { - debug!( - "exit_scope(region_scope={:?}, block={:?}, target={:?})", - region_scope, block, target - ); - let scope_count = self.scopes.num_scopes_above(region_scope, span); + block = self.leave_top_scope(block); + self.cfg.terminate(block, source_info, TerminatorKind::Goto { target }); + } + fn leave_top_scope(&mut self, block: BasicBlock) -> BasicBlock { // If we are emitting a `drop` statement, we need to have the cached // diverge cleanup pads ready in case that drop panics. - let may_panic = self.scopes.may_panic(scope_count); - if may_panic { - self.diverge_cleanup(); - } - - let mut scopes = self.scopes.top_scopes(scope_count + 1).rev(); - let mut scope = scopes.next().unwrap(); - for next_scope in scopes { - if scope.drops.is_empty() { - scope = next_scope; - continue; - } - let source_info = scope.source_info(span); - block = match scope.cached_exits.entry((target, region_scope)) { - Entry::Occupied(e) => { - self.cfg.goto(block, source_info, *e.get()); - return; - } - Entry::Vacant(v) => { - let b = self.cfg.start_new_block(); - self.cfg.goto(block, source_info, b); - v.insert(b); - b - } - }; - - let unwind_to = next_scope.cached_unwind.get(false).unwrap_or_else(|| { - debug_assert!(!may_panic, "cached block not present?"); - START_BLOCK - }); - - unpack!( - block = build_scope_drops( - &mut self.cfg, - self.generator_kind, - scope, - block, - unwind_to, - self.arg_count, - false, // not generator - false, // not unwind path - ) - ); - - scope = next_scope; - } - - self.cfg.goto(block, self.scopes.source_info(scope_count, span), target); - } - - /// Creates a path that performs all required cleanup for dropping a generator. - /// - /// This path terminates in GeneratorDrop. Returns the start of the path. - /// None indicates there’s no cleanup to do at this point. - crate fn generator_drop_cleanup(&mut self) -> Option { - // Fill in the cache for unwinds - self.diverge_cleanup_gen(true); - - let src_info = self.scopes.source_info(self.scopes.len(), self.fn_span); - let resume_block = self.resume_block(); - let mut scopes = self.scopes.iter_mut().peekable(); - let mut block = self.cfg.start_new_block(); - let result = block; - - while let Some(scope) = scopes.next() { - block = if let Some(b) = scope.cached_generator_drop { - self.cfg.goto(block, src_info, b); - return Some(result); - } else { - let b = self.cfg.start_new_block(); - scope.cached_generator_drop = Some(b); - self.cfg.goto(block, src_info, b); - b - }; - - let unwind_to = scopes - .peek() - .as_ref() - .map(|scope| { - scope - .cached_unwind - .get(true) - .unwrap_or_else(|| span_bug!(src_info.span, "cached block not present?")) - }) - .unwrap_or(resume_block); - - unpack!( - block = build_scope_drops( - &mut self.cfg, - self.generator_kind, - scope, - block, - unwind_to, - self.arg_count, - true, // is generator - true, // is cached path - ) - ); - } - - self.cfg.terminate(block, src_info, TerminatorKind::GeneratorDrop); - - Some(result) + let needs_cleanup = self.scopes.scopes.last().map_or(false, |scope| scope.needs_cleanup()); + let is_generator = self.generator_kind.is_some(); + let unwind_to = if needs_cleanup { self.diverge_cleanup() } else { DropIdx::MAX }; + + let scope = self.scopes.scopes.last().expect("exit_top_scope called with no scopes"); + unpack!(build_scope_drops( + &mut self.cfg, + &mut self.scopes.unwind_drops, + scope, + block, + unwind_to, + is_generator && needs_cleanup, + self.arg_count, + )) } /// Creates a new source scope, nested in the current one. @@ -728,15 +750,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } - // Schedule an abort block - this is used for some ABIs that cannot unwind - crate fn schedule_abort(&mut self) -> BasicBlock { - let source_info = self.scopes.source_info(self.scopes.len(), self.fn_span); - let abortblk = self.cfg.start_new_cleanup_block(); - self.cfg.terminate(abortblk, source_info, TerminatorKind::Abort); - self.cached_resume_block = Some(abortblk); - abortblk - } - // Scheduling drops // ================ crate fn schedule_drop_storage_and_value( @@ -749,11 +762,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { self.schedule_drop(span, region_scope, local, DropKind::Value); } - /// Indicates that `place` should be dropped on exit from - /// `region_scope`. + /// Indicates that `place` should be dropped on exit from `region_scope`. /// - /// When called with `DropKind::Storage`, `place` should be a local - /// with an index higher than the current `self.arg_count`. + /// When called with `DropKind::Storage`, `place` shouldn't be the return + /// place, or a function parameter. crate fn schedule_drop( &mut self, span: Span, @@ -781,73 +793,65 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } }; - for scope in self.scopes.iter_mut() { - let this_scope = scope.region_scope == region_scope; - // When building drops, we try to cache chains of drops in such a way so these drops - // could be reused by the drops which would branch into the cached (already built) - // blocks. This, however, means that whenever we add a drop into a scope which already - // had some blocks built (and thus, cached) for it, we must invalidate all caches which - // might branch into the scope which had a drop just added to it. This is necessary, - // because otherwise some other code might use the cache to branch into already built - // chain of drops, essentially ignoring the newly added drop. - // - // For example consider there’s two scopes with a drop in each. These are built and - // thus the caches are filled: - // - // +--------------------------------------------------------+ - // | +---------------------------------+ | - // | | +--------+ +-------------+ | +---------------+ | - // | | | return | <-+ | drop(outer) | <-+ | drop(middle) | | - // | | +--------+ +-------------+ | +---------------+ | - // | +------------|outer_scope cache|--+ | - // +------------------------------|middle_scope cache|------+ - // - // Now, a new, inner-most scope is added along with a new drop into both inner-most and - // outer-most scopes: - // - // +------------------------------------------------------------+ - // | +----------------------------------+ | - // | | +--------+ +-------------+ | +---------------+ | +-------------+ - // | | | return | <+ | drop(new) | <-+ | drop(middle) | <--+| drop(inner) | - // | | +--------+ | | drop(outer) | | +---------------+ | +-------------+ - // | | +-+ +-------------+ | | - // | +---|invalid outer_scope cache|----+ | - // +----=----------------|invalid middle_scope cache|-----------+ - // - // If, when adding `drop(new)` we do not invalidate the cached blocks for both - // outer_scope and middle_scope, then, when building drops for the inner (right-most) - // scope, the old, cached blocks, without `drop(new)` will get used, producing the - // wrong results. - // - // The cache and its invalidation for unwind branch is somewhat special. The cache is - // per-drop, rather than per scope, which has a several different implications. Adding - // a new drop into a scope will not invalidate cached blocks of the prior drops in the - // scope. That is true, because none of the already existing drops will have an edge - // into a block with the newly added drop. - // - // Note that this code iterates scopes from the inner-most to the outer-most, - // invalidating caches of each scope visited. This way bare minimum of the - // caches gets invalidated. i.e., if a new drop is added into the middle scope, the - // cache of outer scope stays intact. - scope.invalidate_cache(!needs_drop, self.generator_kind, this_scope); - if this_scope { + let invalidate_caches = needs_drop || self.generator_kind.is_some(); + for scope in self.scopes.scopes.iter_mut().rev() { + if invalidate_caches { + scope.invalidate_cache(); + } + + if scope.region_scope == region_scope { let region_scope_span = region_scope.span(self.hir.tcx(), &self.hir.region_scope_tree); // Attribute scope exit drops to scope's closing brace. let scope_end = self.hir.tcx().sess.source_map().end_point(region_scope_span); scope.drops.push(DropData { - span: scope_end, + source_info: SourceInfo { span: scope_end, scope: scope.source_scope }, local, kind: drop_kind, - cached_block: CachedBlock::default(), }); + return; } } + span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, local); } + /// Unschedule a drop. Used for `break`, `return` and `match` expressions + /// when `record_operands_moved` is not powerful enough. + /// + /// The given local is expected to have a value drop scheduled in the given + /// scope and for that drop to be the most recent thing scheduled in that + /// scope. + fn unschedule_drop(&mut self, region_scope: region::Scope, local: Local) { + if !self.hir.needs_drop(self.local_decls[local].ty) { + return; + } + for scope in self.scopes.scopes.iter_mut().rev() { + scope.invalidate_cache(); + + if scope.region_scope == region_scope { + let drop = scope.drops.pop(); + + match drop { + Some(DropData { local: removed_local, kind: DropKind::Value, .. }) + if removed_local == local => + { + return; + } + _ => bug!( + "found wrong drop, expected value drop of {:?}, found {:?}", + local, + drop, + ), + } + } + } + + bug!("region scope {:?} not in scope to unschedule drop of {:?}", region_scope, local); + } + /// Indicates that the "local operand" stored in `local` is /// *moved* at some point during execution (see `local_scope` for /// more information about what a "local operand" is -- in short, @@ -892,9 +896,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } Some(local_scope) => self + .scopes .scopes .iter_mut() - .find(|scope| scope.region_scope == local_scope) + .rfind(|scope| scope.region_scope == local_scope) .unwrap_or_else(|| bug!("scope {:?} not found in scope list!", local_scope)), }; @@ -944,13 +949,16 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // Manually drop the condition on both branches. let top_scope = self.scopes.scopes.last_mut().unwrap(); let top_drop_data = top_scope.drops.pop().unwrap(); + if self.generator_kind.is_some() { + top_scope.invalidate_cache(); + } match top_drop_data.kind { DropKind::Value { .. } => { bug!("Drop scheduled on top of condition variable") } DropKind::Storage => { - let source_info = top_scope.source_info(top_drop_data.span); + let source_info = top_drop_data.source_info; let local = top_drop_data.local; assert_eq!(local, cond_temp, "Drop scheduled on top of condition"); self.cfg.push( @@ -963,8 +971,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { ); } } - - top_scope.invalidate_cache(true, self.generator_kind, true); } else { bug!("Expected as_local_operand to produce a temporary"); } @@ -974,62 +980,61 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { (true_block, false_block) } - /// Creates a path that performs all required cleanup for unwinding. - /// - /// This path terminates in Resume. Returns the start of the path. - /// See module comment for more details. - crate fn diverge_cleanup(&mut self) -> BasicBlock { - self.diverge_cleanup_gen(false) - } - - fn resume_block(&mut self) -> BasicBlock { - if let Some(target) = self.cached_resume_block { - target - } else { - let resumeblk = self.cfg.start_new_cleanup_block(); - self.cfg.terminate( - resumeblk, - SourceInfo { scope: OUTERMOST_SOURCE_SCOPE, span: self.fn_span }, - TerminatorKind::Resume, - ); - self.cached_resume_block = Some(resumeblk); - resumeblk + fn diverge_cleanup(&mut self) -> DropIdx { + let is_generator = self.generator_kind.is_some(); + let (uncached_scope, mut cached_drop) = self + .scopes + .scopes + .iter() + .enumerate() + .rev() + .find_map(|(scope_idx, scope)| { + scope.cached_unwind_block.map(|cached_block| (scope_idx + 1, cached_block)) + }) + .unwrap_or((0, ROOT_NODE)); + for scope in &mut self.scopes.scopes[uncached_scope..] { + for drop in &scope.drops { + if is_generator || drop.kind == DropKind::Value { + cached_drop = self.scopes.unwind_drops.add_drop(*drop, cached_drop); + } + } + scope.cached_unwind_block = Some(cached_drop); } + cached_drop } - fn diverge_cleanup_gen(&mut self, generator_drop: bool) -> BasicBlock { - // Build up the drops in **reverse** order. The end result will - // look like: - // - // scopes[n] -> scopes[n-1] -> ... -> scopes[0] - // - // However, we build this in **reverse order**. That is, we - // process scopes[0], then scopes[1], etc, pointing each one at - // the result generates from the one before. Along the way, we - // store caches. If everything is cached, we'll just walk right - // to left reading the cached results but never created anything. - - // Find the last cached block - debug!("diverge_cleanup_gen(self.scopes = {:?})", self.scopes); - let cached_cleanup = self.scopes.iter_mut().enumerate().find_map(|(idx, ref scope)| { - let cached_block = scope.cached_unwind.get(generator_drop)?; - Some((cached_block, idx)) - }); - let (mut target, first_uncached) = - cached_cleanup.unwrap_or_else(|| (self.resume_block(), self.scopes.len())); + /// Prepares to create a path that performs all required cleanup for + /// unwinding. + /// + /// This path terminates in Resume. The path isn't created until after all + /// of the non-unwind paths in this item have been lowered. + crate fn diverge_from(&mut self, start: BasicBlock) { + let next_drop = self.diverge_cleanup(); + self.scopes.unwind_drops.add_entry(start, next_drop); + } - for scope in self.scopes.top_scopes(first_uncached) { - target = build_diverge_scope( - &mut self.cfg, - scope.region_scope_span, - scope, - target, - generator_drop, - self.generator_kind, - ); + /// Sets up a path that performs all required cleanup for dropping a generator. + /// + /// This path terminates in GeneratorDrop. Returns the start of the path. + /// None indicates there’s no cleanup to do at this point. + crate fn generator_drop_cleanup(&mut self, yield_block: BasicBlock) { + let (uncached_scope, mut cached_drop) = self + .scopes + .scopes + .iter() + .enumerate() + .rev() + .find_map(|(scope_idx, scope)| { + scope.cached_generator_drop_block.map(|cached_block| (scope_idx + 1, cached_block)) + }) + .unwrap_or((0, ROOT_NODE)); + for scope in &mut self.scopes.scopes[uncached_scope..] { + for drop in &scope.drops { + cached_drop = self.scopes.generator_drops.add_drop(*drop, cached_drop); + } + scope.cached_generator_drop_block = Some(cached_drop); } - - target + self.scopes.generator_drops.add_entry(yield_block, cached_drop); } /// Utility function for *non*-scope code to build their own drops @@ -1042,16 +1047,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { ) -> BlockAnd<()> { let source_info = self.source_info(span); let next_target = self.cfg.start_new_block(); - let diverge_target = self.diverge_cleanup(); + self.diverge_from(block); self.cfg.terminate( block, source_info, - TerminatorKind::DropAndReplace { - location, - value, - target: next_target, - unwind: Some(diverge_target), - }, + TerminatorKind::DropAndReplace { location, value, target: next_target, unwind: None }, ); next_target.unit() } @@ -1070,49 +1070,137 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let source_info = self.source_info(span); let success_block = self.cfg.start_new_block(); - let cleanup = self.diverge_cleanup(); + self.diverge_from(block); self.cfg.terminate( block, source_info, - TerminatorKind::Assert { - cond, - expected, - msg, - target: success_block, - cleanup: Some(cleanup), - }, + TerminatorKind::Assert { cond, expected, msg, target: success_block, cleanup: None }, ); success_block } - // `match` arm scopes - // ================== + /// Lower the arms and guards of a match. + /// + /// The decision tree should have already been created (by + /// [Builder::lower_match_tree]). + /// + /// This is here, and not in `build::matches` because we have to do some + /// careful scope manipulation to have the drop of the destination be + /// scheduled at the end of each arm and then cleared for the next arm. + crate fn lower_match_arms( + &mut self, + destination: &Place<'tcx>, + destination_scope: Option, + scrutinee_place: Place<'tcx>, + scrutinee_span: Span, + arm_candidates: Vec<(&'_ Arm<'tcx>, Candidate<'_, 'tcx>)>, + outer_source_info: SourceInfo, + fake_borrow_temps: Vec<(Place<'tcx>, Local)>, + ) -> BlockAnd<()> { + if arm_candidates.is_empty() { + // If there are no arms to schedule drops, then we have to do it + // manually. + if let Some(scope) = destination_scope { + self.schedule_drop( + outer_source_info.span, + scope, + destination.as_local().unwrap(), + DropKind::Value, + ); + } + return self.cfg.start_new_block().unit(); + } + let mut first_arm = true; + let arm_end_blocks: Vec<_> = arm_candidates + .into_iter() + .map(|(arm, candidate)| { + if first_arm { + first_arm = false; + } else { + destination_scope.map(|scope| { + self.unschedule_drop(scope, destination.as_local().unwrap()); + }); + } + + let arm_source_info = self.source_info(arm.span); + let arm_scope = (arm.scope, arm_source_info); + self.in_scope(arm_scope, arm.lint_level, |this| { + let body = this.hir.mirror(arm.body.clone()); + let scope = this.declare_bindings( + None, + arm.span, + &arm.pattern, + ArmHasGuard(arm.guard.is_some()), + Some((Some(&scrutinee_place), scrutinee_span)), + ); + + let arm_block = this.bind_pattern( + outer_source_info, + candidate, + arm.guard.as_ref(), + &fake_borrow_temps, + scrutinee_span, + Some(arm.scope), + ); + + if let Some(source_scope) = scope { + this.source_scope = source_scope; + } + + this.into(destination, destination_scope, arm_block, body) + }) + }) + .collect(); + + // all the arm blocks will rejoin here + let end_block = self.cfg.start_new_block(); + + for arm_block in arm_end_blocks { + self.cfg.goto(unpack!(arm_block), outer_source_info, end_block); + } + + self.source_scope = outer_source_info.scope; + + end_block.unit() + } + /// Unschedules any drops in the top scope. /// /// This is only needed for `match` arm scopes, because they have one /// entrance per pattern, but only one exit. - pub(crate) fn clear_top_scope(&mut self, region_scope: region::Scope) { + pub(super) fn clear_top_scope(&mut self, region_scope: region::Scope) { let top_scope = self.scopes.scopes.last_mut().unwrap(); assert_eq!(top_scope.region_scope, region_scope); top_scope.drops.clear(); - top_scope.invalidate_cache(false, self.generator_kind, true); + top_scope.invalidate_cache(); + } + + /// Unschedules the drop of the return place. + /// + /// If the return type of a function requires drop, then we schedule it + /// in the outermost scope so that it's dropped if there's a panic while + /// we drop any local variables. But we don't want to drop it if we + /// return normally. + crate fn unschedule_return_place_drop(&mut self) { + assert_eq!(self.scopes.scopes.len(), 1); + assert!(self.scopes.scopes[0].drops.len() <= 1); + self.scopes.scopes[0].drops.clear(); } } -/// Builds drops for pop_scope and exit_scope. +/// Builds drops for pop_scope and leave_top_scope. fn build_scope_drops<'tcx>( cfg: &mut CFG<'tcx>, - generator_kind: Option, + unwind_drops: &mut DropTree, scope: &Scope, mut block: BasicBlock, - last_unwind_to: BasicBlock, + mut unwind_to: DropIdx, + storage_dead_on_unwind: bool, arg_count: usize, - generator_drop: bool, - is_cached_path: bool, ) -> BlockAnd<()> { debug!("build_scope_drops({:?} -> {:?})", block, scope); @@ -1135,37 +1223,39 @@ fn build_scope_drops<'tcx>( // drops for the unwind path should have already been generated by // `diverge_cleanup_gen`. - for drop_idx in (0..scope.drops.len()).rev() { - let drop_data = &scope.drops[drop_idx]; - let source_info = scope.source_info(drop_data.span); + for drop_data in scope.drops.iter().rev() { + let source_info = drop_data.source_info; let local = drop_data.local; match drop_data.kind { DropKind::Value => { + debug_assert_eq!(unwind_drops.drops[unwind_to].0.local, drop_data.local); + debug_assert_eq!(unwind_drops.drops[unwind_to].0.kind, drop_data.kind); + unwind_to = unwind_drops.drops[unwind_to].1; // If the operand has been moved, and we are not on an unwind // path, then don't generate the drop. (We only take this into // account for non-unwind paths so as not to disturb the // caching mechanism.) - if !is_cached_path && scope.moved_locals.iter().any(|&o| o == local) { + if scope.moved_locals.iter().any(|&o| o == local) { continue; } - let unwind_to = get_unwind_to(scope, generator_kind, drop_idx, generator_drop) - .unwrap_or(last_unwind_to); + unwind_drops.add_entry(block, unwind_to); let next = cfg.start_new_block(); cfg.terminate( block, source_info, - TerminatorKind::Drop { - location: local.into(), - target: next, - unwind: Some(unwind_to), - }, + TerminatorKind::Drop { location: local.into(), target: next, unwind: None }, ); block = next; } DropKind::Storage => { + if storage_dead_on_unwind { + debug_assert_eq!(unwind_drops.drops[unwind_to].0.local, drop_data.local); + debug_assert_eq!(unwind_drops.drops[unwind_to].0.kind, drop_data.kind); + unwind_to = unwind_drops.drops[unwind_to].1; + } // Only temps and vars need their storage dead. assert!(local.index() > arg_count); cfg.push(block, Statement { source_info, kind: StatementKind::StorageDead(local) }); @@ -1175,139 +1265,180 @@ fn build_scope_drops<'tcx>( block.unit() } -fn get_unwind_to( - scope: &Scope, - generator_kind: Option, - unwind_from: usize, - generator_drop: bool, -) -> Option { - for drop_idx in (0..unwind_from).rev() { - let drop_data = &scope.drops[drop_idx]; - match (generator_kind, &drop_data.kind) { - (Some(_), DropKind::Storage) => { - return Some(drop_data.cached_block.get(generator_drop).unwrap_or_else(|| { - span_bug!(drop_data.span, "cached block not present for {:?}", drop_data) - })); - } - (None, DropKind::Value) => { - return Some(drop_data.cached_block.get(generator_drop).unwrap_or_else(|| { - span_bug!(drop_data.span, "cached block not present for {:?}", drop_data) - })); +impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { + fn build_exit_tree( + &mut self, + mut drops: DropTree, + continue_block: Option<(BasicBlock, DropIdx)>, + ) -> Option> { + let mut blocks = IndexVec::from_elem(None, &drops.drops); + blocks[ROOT_NODE] = continue_block.map(|(block, _)| block); + + drops.build_mir::(&mut self.cfg, &mut blocks); + if drops.drops.iter().any(|(drop, _)| drop.kind == DropKind::Value) { + let unwind_target = continue_block + .map_or_else(|| self.diverge_cleanup(), |(_, unwind_target)| unwind_target); + let mut unwind_indices = IndexVec::from_elem_n(unwind_target, 1); + for (drop_idx, drop_data) in drops.drops.iter_enumerated().skip(1) { + match drop_data.0.kind { + DropKind::Storage => { + if self.generator_kind.is_some() { + let unwind_drop = self + .scopes + .unwind_drops + .add_drop(drop_data.0, unwind_indices[drop_data.1]); + unwind_indices.push(unwind_drop); + } else { + unwind_indices.push(unwind_indices[drop_data.1]); + } + } + DropKind::Value => { + let unwind_drop = self + .scopes + .unwind_drops + .add_drop(drop_data.0, unwind_indices[drop_data.1]); + self.scopes + .unwind_drops + .add_entry(blocks[drop_idx].unwrap(), unwind_indices[drop_data.1]); + unwind_indices.push(unwind_drop); + } + } } - _ => (), } + blocks[ROOT_NODE].map(BasicBlock::unit) } - None -} -fn build_diverge_scope<'tcx>( - cfg: &mut CFG<'tcx>, - span: Span, - scope: &mut Scope, - mut target: BasicBlock, - generator_drop: bool, - generator_kind: Option, -) -> BasicBlock { - // Build up the drops in **reverse** order. The end result will - // look like: - // - // [drops[n]] -...-> [drops[0]] -> [target] - // - // The code in this function reads from right to left. At each - // point, we check for cached blocks representing the - // remainder. If everything is cached, we'll just walk right to - // left reading the cached results but never create anything. - - let source_scope = scope.source_scope; - let source_info = |span| SourceInfo { span, scope: source_scope }; - - // We keep track of StorageDead statements to prepend to our current block - // and store them here, in reverse order. - let mut storage_deads = vec![]; - - let mut target_built_by_us = false; - - // Build up the drops. Here we iterate the vector in - // *forward* order, so that we generate drops[0] first (right to - // left in diagram above). - debug!("build_diverge_scope({:?})", scope.drops); - for (j, drop_data) in scope.drops.iter_mut().enumerate() { - debug!("build_diverge_scope drop_data[{}]: {:?}", j, drop_data); - // Only full value drops are emitted in the diverging path, - // not StorageDead, except in the case of generators. + crate fn build_drop_trees(&mut self, should_abort: bool) { + if self.generator_kind.is_some() { + self.build_generator_drop_trees(should_abort); + } else { + Self::build_unwind_tree( + &mut self.cfg, + &mut self.scopes.unwind_drops, + self.fn_span, + should_abort, + &mut None, + ); + } + } + + fn build_generator_drop_trees(&mut self, should_abort: bool) { + // Build the drop tree for dropping the generator while it's suspended. + let drops = &mut self.scopes.generator_drops; + let cfg = &mut self.cfg; + let fn_span = self.fn_span; + let mut blocks = IndexVec::from_elem(None, &drops.drops); + drops.build_mir::(cfg, &mut blocks); + if let Some(root_block) = blocks[ROOT_NODE] { + cfg.terminate( + root_block, + SourceInfo { scope: OUTERMOST_SOURCE_SCOPE, span: fn_span }, + TerminatorKind::GeneratorDrop, + ); + } + + // Build the drop tree for unwinding in the normal control flow paths. + let resume_block = &mut None; + let unwind_drops = &mut self.scopes.unwind_drops; + Self::build_unwind_tree(cfg, unwind_drops, fn_span, should_abort, resume_block); + + // Build the drop tree for unwinding when dropping a suspended + // generator. // - // Note: This may not actually be what we desire (are we - // "freeing" stack storage as we unwind, or merely observing a - // frozen stack)? In particular, the intent may have been to - // match the behavior of clang, but on inspection eddyb says - // this is not what clang does. - match drop_data.kind { - DropKind::Storage if generator_kind.is_some() => { - storage_deads.push(Statement { - source_info: source_info(drop_data.span), - kind: StatementKind::StorageDead(drop_data.local), - }); - if !target_built_by_us { - // We cannot add statements to an existing block, so we create a new - // block for our StorageDead statements. - let block = cfg.start_new_cleanup_block(); - let source_info = SourceInfo { span: DUMMY_SP, scope: source_scope }; - cfg.goto(block, source_info, target); - target = block; - target_built_by_us = true; - } - *drop_data.cached_block.ref_mut(generator_drop) = Some(target); - } - DropKind::Storage => {} - DropKind::Value => { - let cached_block = drop_data.cached_block.ref_mut(generator_drop); - target = if let Some(cached_block) = *cached_block { - storage_deads.clear(); - target_built_by_us = false; - cached_block - } else { - push_storage_deads(cfg, target, &mut storage_deads); - let block = cfg.start_new_cleanup_block(); - cfg.terminate( - block, - source_info(drop_data.span), - TerminatorKind::Drop { - location: drop_data.local.into(), - target, - unwind: None, - }, - ); - *cached_block = Some(block); - target_built_by_us = true; - block - }; + // This is a different tree to the standard unwind paths here to + // prevent drop elaboration from creating drop flags that would have + // to be captured by the generator. I'm not sure how important this + // optimization is, but it is here. + for (drop_idx, drop_data) in drops.drops.iter_enumerated() { + if let DropKind::Value = drop_data.0.kind { + debug_assert!(drop_data.1 < drops.drops.next_index()); + drops.entry_points.push((drop_data.1, blocks[drop_idx].unwrap())); } - }; + } + Self::build_unwind_tree(cfg, drops, fn_span, should_abort, resume_block); } - push_storage_deads(cfg, target, &mut storage_deads); - *scope.cached_unwind.ref_mut(generator_drop) = Some(target); - assert!(storage_deads.is_empty()); - debug!("build_diverge_scope({:?}, {:?}) = {:?}", scope, span, target); + fn build_unwind_tree( + cfg: &mut CFG<'tcx>, + drops: &mut DropTree, + fn_span: Span, + should_abort: bool, + resume_block: &mut Option, + ) { + let mut blocks = IndexVec::from_elem(None, &drops.drops); + blocks[ROOT_NODE] = *resume_block; + drops.build_mir::(cfg, &mut blocks); + if let (None, Some(resume)) = (*resume_block, blocks[ROOT_NODE]) { + let terminator = + if should_abort { TerminatorKind::Abort } else { TerminatorKind::Resume }; + cfg.terminate( + resume, + SourceInfo { scope: OUTERMOST_SOURCE_SCOPE, span: fn_span }, + terminator, + ); + *resume_block = blocks[ROOT_NODE]; + } + } +} - target +struct ExitScopes; + +impl<'tcx> DropTreeBuilder<'tcx> for ExitScopes { + fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock { + cfg.start_new_block() + } + fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) { + cfg.block_data_mut(from).terminator_mut().kind = TerminatorKind::Goto { target: to }; + } } -fn push_storage_deads<'tcx>( - cfg: &mut CFG<'tcx>, - target: BasicBlock, - storage_deads: &mut Vec>, -) { - if storage_deads.is_empty() { - return; +struct GeneratorDrop; + +impl<'tcx> DropTreeBuilder<'tcx> for GeneratorDrop { + fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock { + cfg.start_new_block() + } + fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) { + let term = cfg.block_data_mut(from).terminator_mut(); + if let TerminatorKind::Yield { ref mut drop, .. } = term.kind { + *drop = Some(to); + } else { + span_bug!( + term.source_info.span, + "cannot enter generator drop tree from {:?}", + term.kind + ) + } + } +} + +struct Unwind; + +impl<'tcx> DropTreeBuilder<'tcx> for Unwind { + fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock { + cfg.start_new_cleanup_block() + } + fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) { + let term = &mut cfg.block_data_mut(from).terminator_mut(); + match &mut term.kind { + TerminatorKind::Drop { unwind, .. } + | TerminatorKind::DropAndReplace { unwind, .. } + | TerminatorKind::FalseUnwind { unwind, .. } + | TerminatorKind::Call { cleanup: unwind, .. } + | TerminatorKind::Assert { cleanup: unwind, .. } => { + *unwind = Some(to); + } + TerminatorKind::Goto { .. } + | TerminatorKind::SwitchInt { .. } + | TerminatorKind::Resume + | TerminatorKind::Abort + | TerminatorKind::Return + | TerminatorKind::Unreachable + | TerminatorKind::Yield { .. } + | TerminatorKind::GeneratorDrop + | TerminatorKind::FalseEdges { .. } => { + span_bug!(term.source_info.span, "cannot unwind from {:?}", term.kind) + } + } } - let statements = &mut cfg.block_data_mut(target).statements; - storage_deads.reverse(); - debug!( - "push_storage_deads({:?}), storage_deads={:?}, statements={:?}", - target, storage_deads, statements - ); - storage_deads.append(statements); - mem::swap(statements, storage_deads); - assert!(storage_deads.is_empty()); } diff --git a/src/test/codegen/drop.rs b/src/test/codegen/drop.rs index 0c7f3bb2020a9..99a791464ab89 100644 --- a/src/test/codegen/drop.rs +++ b/src/test/codegen/drop.rs @@ -23,13 +23,13 @@ pub fn droppy() { // FIXME(eddyb) the `void @` forces a match on the instruction, instead of the // comment, that's `; call core::intrinsics::drop_in_place::` // for the `v0` mangling, should switch to matching on that once `legacy` is gone. +// CHECK-NOT: call void @{{.*}}drop_in_place{{.*}}SomeUniqueName +// CHECK: invoke void @{{.*}}drop_in_place{{.*}}SomeUniqueName +// CHECK: invoke void @{{.*}}drop_in_place{{.*}}SomeUniqueName // CHECK-NOT: invoke void @{{.*}}drop_in_place{{.*}}SomeUniqueName // CHECK: call void @{{.*}}drop_in_place{{.*}}SomeUniqueName // CHECK: call void @{{.*}}drop_in_place{{.*}}SomeUniqueName -// CHECK-NOT: call void @{{.*}}drop_in_place{{.*}}SomeUniqueName -// CHECK: invoke void @{{.*}}drop_in_place{{.*}}SomeUniqueName // CHECK: call void @{{.*}}drop_in_place{{.*}}SomeUniqueName -// CHECK: invoke void @{{.*}}drop_in_place{{.*}}SomeUniqueName // CHECK: call void @{{.*}}drop_in_place{{.*}}SomeUniqueName // CHECK-NOT: {{(call|invoke) void @.*}}drop_in_place{{.*}}SomeUniqueName // The next line checks for the } that ends the function definition diff --git a/src/test/mir-opt/basic_assignment.rs b/src/test/mir-opt/basic_assignment.rs index ca0e9fa811a26..8fcf4d7613f88 100644 --- a/src/test/mir-opt/basic_assignment.rs +++ b/src/test/mir-opt/basic_assignment.rs @@ -39,14 +39,14 @@ fn main() { // StorageLive(_5); // StorageLive(_6); // _6 = move _4; -// replace(_5 <- move _6) -> [return: bb2, unwind: bb5]; +// replace(_5 <- move _6) -> [return: bb1, unwind: bb5]; // } // ... -// bb2: { -// drop(_6) -> [return: bb6, unwind: bb4]; +// bb1: { +// drop(_6) -> [return: bb2, unwind: bb6]; // } // ... // bb5 (cleanup): { -// drop(_6) -> bb4; +// drop(_6) -> bb6; // } // END rustc.main.SimplifyCfg-initial.after.mir diff --git a/src/test/mir-opt/box_expr.rs b/src/test/mir-opt/box_expr.rs index fa1a291858bec..2ef4a3d8abbb5 100644 --- a/src/test/mir-opt/box_expr.rs +++ b/src/test/mir-opt/box_expr.rs @@ -33,45 +33,40 @@ impl Drop for S { // StorageLive(_1); // StorageLive(_2); // _2 = Box(S); -// (*_2) = const S::new() -> [return: bb2, unwind: bb3]; +// (*_2) = const S::new() -> [return: bb1, unwind: bb7]; // } -// -// bb1 (cleanup): { -// resume; -// } -// -// bb2: { +// bb1: { // _1 = move _2; -// drop(_2) -> bb4; +// drop(_2) -> [return: bb2, unwind: bb6]; // } -// -// bb3 (cleanup): { -// drop(_2) -> bb1; -// } -// -// bb4: { +// bb2: { // StorageDead(_2); // StorageLive(_3); // StorageLive(_4); // _4 = move _1; -// _3 = const std::mem::drop::>(move _4) -> [return: bb5, unwind: bb7]; +// _3 = const std::mem::drop::>(move _4) -> [return: bb3, unwind: bb5]; // } -// -// bb5: { +// bb3: { // StorageDead(_4); // StorageDead(_3); // _0 = (); -// drop(_1) -> bb8; +// drop(_1) -> bb4; +// } +// bb4: { +// StorageDead(_1); +// return; +// } +// bb5 (cleanup): { +// drop(_4) -> bb6; // } // bb6 (cleanup): { -// drop(_1) -> bb1; +// drop(_1) -> bb8; // } // bb7 (cleanup): { -// drop(_4) -> bb6; +// drop(_2) -> bb8; // } -// bb8: { -// StorageDead(_1); -// return; +// bb8 (cleanup): { +// resume; // } // } // END rustc.main.ElaborateDrops.before.mir diff --git a/src/test/mir-opt/const-promotion-extern-static.rs b/src/test/mir-opt/const-promotion-extern-static.rs index f6f7d0910911c..e303b2274f4a5 100644 --- a/src/test/mir-opt/const-promotion-extern-static.rs +++ b/src/test/mir-opt/const-promotion-extern-static.rs @@ -19,10 +19,9 @@ fn main() {} // _3 = [move _4]; // _2 = &_3; // _1 = move _2 as &[&'static i32] (Pointer(Unsize)); -// _0 = const core::slice::::as_ptr(move _1) -> [return: bb2, unwind: bb1]; +// _0 = const core::slice::::as_ptr(move _1) -> bb1; // } -// ... -// bb2: { +// bb1: { // StorageDead(_5); // StorageDead(_3); // return; @@ -36,10 +35,9 @@ fn main() {} // _3 = [move _4]; // _2 = &_3; // _1 = move _2 as &[&'static i32] (Pointer(Unsize)); -// _0 = const core::slice::::as_ptr(move _1) -> [return: bb2, unwind: bb1]; +// _0 = const core::slice::::as_ptr(move _1) -> bb1; // } -// ... -// bb2: { +// bb1: { // StorageDead(_5); // StorageDead(_3); // return; @@ -51,10 +49,9 @@ fn main() {} // _6 = const BAR::promoted[0]; // _2 = &(*_6); // _1 = move _2 as &[&'static i32] (Pointer(Unsize)); -// _0 = const core::slice::::as_ptr(move _1) -> [return: bb2, unwind: bb1]; +// _0 = const core::slice::::as_ptr(move _1) -> bb1; // } -// ... -// bb2: { +// bb1: { // return; // } // END rustc.BAR.PromoteTemps.after.mir @@ -64,10 +61,9 @@ fn main() {} // _6 = const FOO::promoted[0]; // _2 = &(*_6); // _1 = move _2 as &[&'static i32] (Pointer(Unsize)); -// _0 = const core::slice::::as_ptr(move _1) -> [return: bb2, unwind: bb1]; +// _0 = const core::slice::::as_ptr(move _1) -> bb1; // } -// ... -// bb2: { +// bb1: { // return; // } // END rustc.FOO.PromoteTemps.after.mir diff --git a/src/test/mir-opt/const_prop/boxes.rs b/src/test/mir-opt/const_prop/boxes.rs index cf134dadf2789..fe25d6a0a2d13 100644 --- a/src/test/mir-opt/const_prop/boxes.rs +++ b/src/test/mir-opt/const_prop/boxes.rs @@ -22,16 +22,16 @@ fn main() { // _2 = (*_3); // _1 = Add(move _2, const 0i32); // ... -// drop(_3) -> [return: bb2, unwind: bb1]; +// drop(_3) -> [return: bb1, unwind: bb2]; // } -// bb1 (cleanup): { -// resume; -// } -// bb2: { +// bb1: { // ... // _0 = (); // ... // } +// bb2 (cleanup): { +// resume; +// } // END rustc.main.ConstProp.before.mir // START rustc.main.ConstProp.after.mir // bb0: { @@ -43,14 +43,14 @@ fn main() { // _2 = (*_3); // _1 = Add(move _2, const 0i32); // ... -// drop(_3) -> [return: bb2, unwind: bb1]; +// drop(_3) -> [return: bb1, unwind: bb2]; // } -// bb1 (cleanup): { -// resume; -// } -// bb2: { +// bb1: { // ... // _0 = (); // ... // } +// bb2 (cleanup): { +// resume; +// } // END rustc.main.ConstProp.after.mir diff --git a/src/test/mir-opt/generator-storage-dead-unwind.rs b/src/test/mir-opt/generator-storage-dead-unwind.rs index 82b216a99cf55..7aea985be999f 100644 --- a/src/test/mir-opt/generator-storage-dead-unwind.rs +++ b/src/test/mir-opt/generator-storage-dead-unwind.rs @@ -49,66 +49,66 @@ fn main() { // StorageLive(_4); // _4 = Bar(const 6i32,); // ... -// _5 = yield(move _6) -> [resume: bb2, drop: bb4]; +// _5 = yield(move _6) -> [resume: bb1, drop: bb5]; // } -// bb1 (cleanup): { -// resume; -// } -// bb2: { +// bb1: { // ... -// StorageLive(_7); // StorageLive(_8); // _8 = move _3; -// _7 = const take::(move _8) -> [return: bb7, unwind: bb9]; +// _7 = const take::(move _8) -> [return: bb2, unwind: bb9]; // } -// bb3 (cleanup): { -// StorageDead(_3); -// drop(_1) -> bb1; +// bb2: { +// StorageDead(_8); +// StorageDead(_7); +// StorageLive(_9); +// StorageLive(_10); +// _10 = move _4; +// _9 = const take::(move _10) -> [return: bb3, unwind: bb8]; // } -// bb4: { +// bb3: { +// StorageDead(_10); +// StorageDead(_9); // ... // StorageDead(_4); -// drop(_3) -> [return: bb5, unwind: bb3]; +// StorageDead(_3); +// drop(_1) -> [return: bb4, unwind: bb11]; +// } +// bb4: { +// return; // } // bb5: { -// StorageDead(_3); -// drop(_1) -> [return: bb6, unwind: bb1]; +// ... +// StorageDead(_4); +// drop(_3) -> [return: bb6, unwind: bb12]; // } // bb6: { -// generator_drop; +// StorageDead(_3); +// drop(_1) -> [return: bb7, unwind: bb11]; // } // bb7: { -// StorageDead(_8); -// StorageDead(_7); -// StorageLive(_9); -// StorageLive(_10); -// _10 = move _4; -// _9 = const take::(move _10) -> [return: bb10, unwind: bb11]; +// generator_drop; // } // bb8 (cleanup): { -// StorageDead(_4); -// StorageDead(_3); -// drop(_1) -> bb1; +// StorageDead(_10); +// StorageDead(_9); +// goto -> bb10; // } // bb9 (cleanup): { // StorageDead(_8); // StorageDead(_7); -// goto -> bb8; +// goto -> bb10; // } -// bb10: { -// StorageDead(_10); -// StorageDead(_9); -// ... +// bb10 (cleanup): { // StorageDead(_4); // StorageDead(_3); -// drop(_1) -> [return: bb12, unwind: bb1]; +// drop(_1) -> bb11; // } // bb11 (cleanup): { -// StorageDead(_10); -// StorageDead(_9); -// goto -> bb8; +// resume; // } -// bb12: { -// return; +// bb12 (cleanup): { +// StorageDead(_3); +// drop(_1) -> bb11; // } + // END rustc.main-{{closure}}.StateTransform.before.mir diff --git a/src/test/mir-opt/graphviz.rs b/src/test/mir-opt/graphviz.rs index fcbb189c1117b..f253388261a78 100644 --- a/src/test/mir-opt/graphviz.rs +++ b/src/test/mir-opt/graphviz.rs @@ -8,13 +8,10 @@ fn main() {} // END RUST SOURCE // START rustc.main.mir_map.0.dot // digraph Mir_0_3 { // The name here MUST be an ASCII identifier. -// graph [fontname="monospace"]; -// node [fontname="monospace"]; -// edge [fontname="monospace"]; -// label=>; -// bb0__0_3 [shape="none", label=<
{blk}
{blk}
0
_0 = ()
goto
>]; -// bb1__0_3 [shape="none", label=<
1
resume
>]; -// bb2__0_3 [shape="none", label=<
2
return
>]; -// bb0__0_3 -> bb2__0_3 [label=""]; +// graph [fontname="monospace"]; +// node [fontname="monospace"]; +// edge [fontname="monospace"]; +// label=>; +// bb0__0_3 [shape="none", label=<
0
_0 = ()
return
>]; // } // END rustc.main.mir_map.0.dot diff --git a/src/test/mir-opt/inline/inline-into-box-place.rs b/src/test/mir-opt/inline/inline-into-box-place.rs index f368bdef6f8e2..462f1138ed734 100644 --- a/src/test/mir-opt/inline/inline-into-box-place.rs +++ b/src/test/mir-opt/inline/inline-into-box-place.rs @@ -20,23 +20,23 @@ fn main() { // StorageLive(_1); // StorageLive(_2); // _2 = Box(std::vec::Vec); -// (*_2) = const std::vec::Vec::::new() -> [return: bb2, unwind: bb4]; +// (*_2) = const std::vec::Vec::::new() -> [return: bb1, unwind: bb4]; // } -// bb1 (cleanup): { -// resume; -// } -// bb2: { +// bb1: { // _1 = move _2; // StorageDead(_2); // _0 = (); -// drop(_1) -> [return: bb3, unwind: bb1]; +// drop(_1) -> [return: bb2, unwind: bb3]; // } -// bb3: { +// bb2: { // StorageDead(_1); // return; // } +// bb3 (cleanup): { +// resume; +// } // bb4 (cleanup): { -// _3 = const alloc::alloc::box_free::>(move (_2.0: std::ptr::Unique>)) -> bb1; +// _3 = const alloc::alloc::box_free::>(move (_2.0: std::ptr::Unique>)) -> bb3; // } // END rustc.main.Inline.before.mir // START rustc.main.Inline.after.mir @@ -60,13 +60,13 @@ fn main() { // _1 = move _2; // StorageDead(_2); // _0 = (); -// drop(_1) -> [return: bb2, unwind: bb1]; +// drop(_1) -> [return: bb1, unwind: bb2]; // } -// bb1 (cleanup): { -// resume; -// } -// bb2: { +// bb1: { // StorageDead(_1); // return; // } +// bb2 (cleanup): { +// resume; +// } // END rustc.main.Inline.after.mir diff --git a/src/test/mir-opt/issue-38669.rs b/src/test/mir-opt/issue-38669.rs index d980cc891dc40..6621707c74b78 100644 --- a/src/test/mir-opt/issue-38669.rs +++ b/src/test/mir-opt/issue-38669.rs @@ -16,35 +16,35 @@ fn main() { // StorageLive(_1); // _1 = const false; // FakeRead(ForLet, _1); -// goto -> bb2; +// goto -> bb1; // } -// bb1 (cleanup): { -// resume; +// bb1: { +// falseUnwind -> [real: bb2, cleanup: bb6]; // } // bb2: { -// falseUnwind -> [real: bb3, cleanup: bb1]; -// } -// bb3: { // StorageLive(_3); // StorageLive(_4); // _4 = _1; // FakeRead(ForMatchedPlace, _4); -// switchInt(_4) -> [false: bb5, otherwise: bb4]; +// switchInt(_4) -> [false: bb4, otherwise: bb3]; // } // ... -// bb5: { +// bb4: { // _3 = (); // StorageDead(_4); // StorageDead(_3); // _1 = const true; // _2 = (); -// goto -> bb2; +// goto -> bb1; // } -// bb6: { +// bb5: { // _0 = (); // StorageDead(_4); // StorageDead(_3); // StorageDead(_1); // return; // } +// bb6 (cleanup): { +// resume; +// } // END rustc.main.SimplifyCfg-initial.after.mir diff --git a/src/test/mir-opt/issue-49232.rs b/src/test/mir-opt/issue-49232.rs index 54c89b85f42d7..50b9971cf9aea 100644 --- a/src/test/mir-opt/issue-49232.rs +++ b/src/test/mir-opt/issue-49232.rs @@ -30,64 +30,55 @@ fn main() { // goto -> bb1; // } // bb1: { -// falseUnwind -> [real: bb3, cleanup: bb4]; +// falseUnwind -> [real: bb2, cleanup: bb11]; // } // bb2: { -// goto -> bb14; -// } -// bb3: { // StorageLive(_2); // StorageLive(_3); // _3 = const true; // FakeRead(ForMatchedPlace, _3); -// switchInt(_3) -> [false: bb5, otherwise: bb6]; -// } -// bb4 (cleanup): { -// resume; +// switchInt(_3) -> [false: bb3, otherwise: bb4]; // } -// bb5: { -// falseEdges -> [real: bb7, imaginary: bb6]; +// bb3: { +// falseEdges -> [real: bb5, imaginary: bb4]; // } -// bb6: { +// bb4: { // _0 = (); -// goto -> bb8; +// goto -> bb10; // } -// bb7: { +// bb5: { // _2 = const 4i32; -// goto -> bb12; -// } -// bb8: { -// StorageDead(_3); -// goto -> bb9; +// goto -> bb8; // } -// bb9: { -// StorageDead(_2); -// goto -> bb2; -// } -// bb10: { +// bb6: { // _4 = (); // unreachable; -// } -// bb11: { -// goto -> bb12; -// } -// bb12: { +// } +// bb7: { +// goto -> bb8; +// } +// bb8: { // FakeRead(ForLet, _2); // StorageDead(_3); // StorageLive(_5); -// StorageLive(_6); +// StorageLive(_6); // _6 = &_2; -// _5 = const std::mem::drop::<&i32>(move _6) -> [return: bb13, unwind: bb4]; +// _5 = const std::mem::drop::<&i32>(move _6) -> [return: bb9, unwind: bb11]; // } -// bb13: { +// bb9: { // StorageDead(_6); // StorageDead(_5); // _1 = (); // StorageDead(_2); // goto -> bb1; // } -// bb14: { +// bb10: { +// StorageDead(_3); +// StorageDead(_2); // return; // } +// bb11 (cleanup): { +// resume; +// } // } // END rustc.main.mir_map.0.mir diff --git a/src/test/mir-opt/issue-62289.rs b/src/test/mir-opt/issue-62289.rs index 8e619ffdf8b96..494834e6ff8a4 100644 --- a/src/test/mir-opt/issue-62289.rs +++ b/src/test/mir-opt/issue-62289.rs @@ -24,68 +24,71 @@ fn main() { // StorageLive(_3); // StorageLive(_4); // _4 = std::option::Option::::None; -// _3 = const as std::ops::Try>::into_result(move _4) -> [return: bb2, unwind: bb3]; +// _3 = const as std::ops::Try>::into_result(move _4) -> [return: bb1, unwind: bb13]; // } -// bb1 (cleanup): { -// resume; -// } -// bb2: { +// bb1: { // StorageDead(_4); // _5 = discriminant(_3); -// switchInt(move _5) -> [0isize: bb4, 1isize: bb6, otherwise: bb5]; +// switchInt(move _5) -> [0isize: bb2, 1isize: bb4, otherwise: bb3]; // } -// bb3 (cleanup): { -// drop(_2) -> bb1; -// } -// bb4: { +// bb2: { // StorageLive(_10); // _10 = ((_3 as Ok).0: u32); // (*_2) = _10; // StorageDead(_10); // _1 = move _2; -// drop(_2) -> [return: bb12, unwind: bb11]; +// drop(_2) -> [return: bb7, unwind: bb12]; // } -// bb5: { +// bb3: { // unreachable; // } -// bb6: { +// bb4: { // StorageLive(_6); // _6 = ((_3 as Err).0: std::option::NoneError); // StorageLive(_8); // StorageLive(_9); // _9 = _6; -// _8 = const >::from(move _9) -> [return: bb8, unwind: bb3]; +// _8 = const >::from(move _9) -> [return: bb5, unwind: bb13]; // } -// bb7: { -// return; -// } -// bb8: { +// bb5: { // StorageDead(_9); -// _0 = const > as std::ops::Try>::from_error(move _8) -> [return: bb9, unwind: bb3]; +// _0 = const > as std::ops::Try>::from_error(move _8) -> [return: bb6, unwind: bb13]; // } -// bb9: { +// bb6: { // StorageDead(_8); // StorageDead(_6); -// drop(_2) -> bb10; +// drop(_2) -> [return: bb9, unwind: bb11]; // } -// bb10: { +// bb7: { // StorageDead(_2); +// _0 = std::option::Option::>::Some(move _1,); +// drop(_1) -> [return: bb8, unwind: bb11]; +// } +// bb8: { // StorageDead(_1); // StorageDead(_3); -// goto -> bb7; -// } -// bb11 (cleanup): { -// drop(_1) -> bb1; +// goto -> bb10; // } -// bb12: { +// bb9: { // StorageDead(_2); -// _0 = std::option::Option::>::Some(move _1,); -// drop(_1) -> bb13; -// } -// bb13: { // StorageDead(_1); // StorageDead(_3); -// goto -> bb7; +// goto -> bb10; +// } +// bb10: { +// return; +// } +// bb11 (cleanup): { +// drop(_0) -> bb14; +// } +// bb12 (cleanup): { +// drop(_1) -> bb14; +// } +// bb13 (cleanup): { +// drop(_2) -> bb14; +// } +// bb14 (cleanup): { +// resume; // } // } // END rustc.test.ElaborateDrops.before.mir diff --git a/src/test/mir-opt/loop_test.rs b/src/test/mir-opt/loop_test.rs index 418febbdc01eb..f0bd001cf8374 100644 --- a/src/test/mir-opt/loop_test.rs +++ b/src/test/mir-opt/loop_test.rs @@ -18,27 +18,26 @@ fn main() { // END RUST SOURCE // START rustc.main.SimplifyCfg-qualify-consts.after.mir // ... -// bb1 (cleanup): { -// resume; -// } -// ... -// bb3: { // Entry into the loop +// bb2: { // Entry into the loop // _1 = (); // StorageDead(_2); // StorageDead(_1); // StorageLive(_4); -// goto -> bb5; +// goto -> bb4; // } // ... -// bb5: { // The loop_block -// falseUnwind -> [real: bb6, cleanup: bb1]; +// bb4: { // The loop_block +// falseUnwind -> [real: bb5, cleanup: bb6]; // } -// bb6: { // The loop body (body_block) +// bb5: { // The loop body (body_block) // StorageLive(_6); // _6 = const 1i32; // FakeRead(ForLet, _6); // StorageDead(_6); -// goto -> bb5; +// goto -> bb4; +// } +// bb6 (cleanup): { +// resume; // } // ... // END rustc.main.SimplifyCfg-qualify-consts.after.mir diff --git a/src/test/mir-opt/match-arm-scopes.rs b/src/test/mir-opt/match-arm-scopes.rs index 7afc3bbd6fae8..12b82b455f84e 100644 --- a/src/test/mir-opt/match-arm-scopes.rs +++ b/src/test/mir-opt/match-arm-scopes.rs @@ -61,38 +61,35 @@ fn main() { // } // bb0: { // FakeRead(ForMatchedPlace, _2); -// switchInt((_2.0: bool)) -> [false: bb2, otherwise: bb3]; +// switchInt((_2.0: bool)) -> [false: bb1, otherwise: bb2]; // } -// bb1 (cleanup): { -// resume; +// bb1: { +// falseEdges -> [real: bb8, imaginary: bb3]; // } -// bb2: { // pre-binding for arm 1 first pattern -// falseEdges -> [real: bb9, imaginary: bb4]; +// bb2: { +// switchInt((_2.1: bool)) -> [false: bb3, otherwise: bb4]; // } // bb3: { -// switchInt((_2.1: bool)) -> [false: bb4, otherwise: bb5]; +// falseEdges -> [real: bb14, imaginary: bb5]; // } -// bb4: { // pre-binding for arm 1 second pattern -// falseEdges -> [real: bb18, imaginary: bb6]; +// bb4: { +// switchInt((_2.0: bool)) -> [false: bb6, otherwise: bb5]; // } // bb5: { -// switchInt((_2.0: bool)) -> [false: bb7, otherwise: bb6]; -// } -// bb6: { // pre-binding for arm 2 first pattern -// falseEdges -> [real: bb26, imaginary: bb7]; +// falseEdges -> [real: bb22, imaginary: bb6]; // } -// bb7: { // bindings for arm 2 - second pattern +// bb6: { // StorageLive(_15); // _15 = (_2.1: bool); // StorageLive(_16); // _16 = move (_2.2: std::string::String); -// goto -> bb25; +// goto -> bb21; // } -// bb8: { // arm 1 +// bb7: { // _0 = const 1i32; -// drop(_7) -> [return: bb24, unwind: bb14]; +// drop(_7) -> [return: bb20, unwind: bb27]; // } -// bb9: { // guard - first time +// bb8: { // StorageLive(_6); // _6 = &(_2.1: bool); // StorageLive(_8); @@ -103,34 +100,23 @@ fn main() { // StorageLive(_10); // _10 = _1; // FakeRead(ForMatchedPlace, _10); -// switchInt(_10) -> [false: bb11, otherwise: bb10]; +// switchInt(_10) -> [false: bb10, otherwise: bb9]; // } -// bb10: { -// falseEdges -> [real: bb12, imaginary: bb11]; +// bb9: { +// falseEdges -> [real: bb11, imaginary: bb10]; // } -// bb11: { // `else` block - first time +// bb10: { // _9 = (*_6); // StorageDead(_10); -// switchInt(move _9) -> [false: bb17, otherwise: bb16]; +// switchInt(move _9) -> [false: bb13, otherwise: bb12]; // } -// bb12: { // `return 3` - first time +// bb11: { // _0 = const 3i32; // StorageDead(_10); // StorageDead(_9); -// StorageDead(_8); -// StorageDead(_6); -// goto -> bb15; -// } -// bb13: { -// return; -// } -// bb14 (cleanup): { -// drop(_2) -> bb1; -// } -// bb15: { -// drop(_2) -> [return: bb13, unwind: bb1]; +// goto -> bb25; // } -// bb16: { +// bb12: { // StorageDead(_9); // FakeRead(ForMatchGuard, _3); // FakeRead(ForMatchGuard, _4); @@ -140,15 +126,15 @@ fn main() { // _5 = (_2.1: bool); // StorageLive(_7); // _7 = move (_2.2: std::string::String); -// goto -> bb8; +// goto -> bb7; // } -// bb17: { // guard otherwise case - first time +// bb13: { // StorageDead(_9); // StorageDead(_8); // StorageDead(_6); -// falseEdges -> [real: bb3, imaginary: bb4]; +// falseEdges -> [real: bb2, imaginary: bb3]; // } -// bb18: { // guard - second time +// bb14: { // StorageLive(_6); // _6 = &(_2.0: bool); // StorageLive(_8); @@ -159,25 +145,23 @@ fn main() { // StorageLive(_13); // _13 = _1; // FakeRead(ForMatchedPlace, _13); -// switchInt(_13) -> [false: bb20, otherwise: bb19]; +// switchInt(_13) -> [false: bb16, otherwise: bb15]; // } -// bb19: { -// falseEdges -> [real: bb21, imaginary: bb20]; +// bb15: { +// falseEdges -> [real: bb17, imaginary: bb16]; // } -// bb20: { // `else` block - second time +// bb16: { // _12 = (*_6); // StorageDead(_13); -// switchInt(move _12) -> [false: bb23, otherwise: bb22]; +// switchInt(move _12) -> [false: bb19, otherwise: bb18]; // } -// bb21: { +// bb17: { // _0 = const 3i32; // StorageDead(_13); // StorageDead(_12); -// StorageDead(_8); -// StorageDead(_6); -// goto -> bb15; +// goto -> bb25; // } -// bb22: { // bindings for arm 1 +// bb18: { // StorageDead(_12); // FakeRead(ForMatchGuard, _3); // FakeRead(ForMatchGuard, _4); @@ -187,40 +171,53 @@ fn main() { // _5 = (_2.0: bool); // StorageLive(_7); // _7 = move (_2.2: std::string::String); -// goto -> bb8; +// goto -> bb7; // } -// bb23: { // Guard otherwise case - second time +// bb19: { // StorageDead(_12); // StorageDead(_8); // StorageDead(_6); -// falseEdges -> [real: bb5, imaginary: bb6]; +// falseEdges -> [real: bb4, imaginary: bb5]; // } -// bb24: { // rest of arm 1 +// bb20: { // StorageDead(_7); // StorageDead(_5); // StorageDead(_8); // StorageDead(_6); -// goto -> bb28; +// goto -> bb24; // } -// bb25: { // arm 2 +// bb21: { // _0 = const 2i32; -// drop(_16) -> [return: bb27, unwind: bb14]; +// drop(_16) -> [return: bb23, unwind: bb27]; // } -// bb26: { // bindings for arm 2 - first pattern +// bb22: { // StorageLive(_15); // _15 = (_2.1: bool); // StorageLive(_16); // _16 = move (_2.2: std::string::String); -// goto -> bb25; +// goto -> bb21; // } - -// bb27: { // rest of arm 2 +// bb23: { // StorageDead(_16); // StorageDead(_15); -// goto -> bb28; +// goto -> bb24; // } -// bb28: { -// drop(_2) -> [return: bb13, unwind: bb1]; +// bb24: { +// drop(_2) -> [return: bb26, unwind: bb28]; +// } +// bb25: { +// StorageDead(_8); +// StorageDead(_6); +// drop(_2) -> [return: bb26, unwind: bb28]; +// } +// bb26: { +// return; +// } +// bb27 (cleanup): { +// drop(_2) -> bb28; +// } +// bb28 (cleanup): { +// resume; // } // END rustc.complicated_match.SimplifyCfg-initial.after.mir // START rustc.complicated_match.ElaborateDrops.after.mir diff --git a/src/test/mir-opt/match_false_edges.rs b/src/test/mir-opt/match_false_edges.rs index 237828d9020db..367a467272916 100644 --- a/src/test/mir-opt/match_false_edges.rs +++ b/src/test/mir-opt/match_false_edges.rs @@ -45,36 +45,33 @@ fn main() { // _2 = std::option::Option::::Some(const 42i32,); // FakeRead(ForMatchedPlace, _2); // _3 = discriminant(_2); -// switchInt(move _3) -> [0isize: bb2, 1isize: bb3, otherwise: bb5]; +// switchInt(move _3) -> [0isize: bb1, 1isize: bb2, otherwise: bb4]; // } -// bb1 (cleanup): { -// resume; -// } -// bb2: { // pre_binding3 and arm3 +// bb1: { // pre_binding3 and arm3 // _1 = (const 3i32, const 3i32); -// goto -> bb11; +// goto -> bb10; +// } +// bb2: { +// falseEdges -> [real: bb5, imaginary: bb3]; //pre_binding1 // } // bb3: { -// falseEdges -> [real: bb6, imaginary: bb4]; //pre_binding1 +// falseEdges -> [real: bb9, imaginary: bb1]; //pre_binding2 // } // bb4: { -// falseEdges -> [real: bb10, imaginary: bb2]; //pre_binding2 -// } -// bb5: { // unreachable; // } -// bb6: { // binding1 and guard +// bb5: { // binding1 and guard // StorageLive(_6); // _11 = const full_tested_match::promoted[0]; // _6 = &(((*_11) as Some).0: i32); // _4 = &shallow _2; // StorageLive(_7); -// _7 = const guard() -> [return: bb7, unwind: bb1]; +// _7 = const guard() -> [return: bb6, unwind: bb11]; // } -// bb7: { // end of guard -// switchInt(move _7) -> [false: bb9, otherwise: bb8]; +// bb6: { // end of guard +// switchInt(move _7) -> [false: bb8, otherwise: bb7]; // } -// bb8: { // arm1 +// bb7: { // arm1 // StorageDead(_7); // FakeRead(ForMatchGuard, _4); // FakeRead(ForGuardBinding, _6); @@ -86,14 +83,14 @@ fn main() { // StorageDead(_8); // StorageDead(_5); // StorageDead(_6); -// goto -> bb11; +// goto -> bb10; // } -// bb9: { // to pre_binding2 +// bb8: { // to pre_binding2 // StorageDead(_7); // StorageDead(_6); -// goto -> bb4; +// goto -> bb3; // } -// bb10: { // arm2 +// bb9: { // arm2 // StorageLive(_9); // _9 = ((_2 as Some).0: i32); // StorageLive(_10); @@ -101,14 +98,17 @@ fn main() { // _1 = (const 2i32, move _10); // StorageDead(_10); // StorageDead(_9); -// goto -> bb11; +// goto -> bb10; // } -// bb11: { +// bb10: { // match exit // StorageDead(_2); // StorageDead(_1); // _0 = (); // return; // } +// bb11 (cleanup): { +// resume; +// } // END rustc.full_tested_match.PromoteTemps.after.mir // // START rustc.full_tested_match2.PromoteTemps.before.mir @@ -117,18 +117,15 @@ fn main() { // _2 = std::option::Option::::Some(const 42i32,); // FakeRead(ForMatchedPlace, _2); // _3 = discriminant(_2); -// switchInt(move _3) -> [0isize: bb2, 1isize: bb3, otherwise: bb5]; +// switchInt(move _3) -> [0isize: bb1, 1isize: bb2, otherwise: bb4]; // } -// bb1 (cleanup): { -// resume; -// } -// bb2: { // pre_binding2 -// falseEdges -> [real: bb10, imaginary: bb4]; +// bb1: { +// falseEdges -> [real: bb9, imaginary: bb3]; // } -// bb3: { // pre_binding1 -// falseEdges -> [real: bb6, imaginary: bb2]; +// bb2: { +// falseEdges -> [real: bb5, imaginary: bb1]; // } -// bb4: { // binding3 and arm3 +// bb3: { // binding3 and arm3 // StorageLive(_9); // _9 = ((_2 as Some).0: i32); // StorageLive(_10); @@ -136,22 +133,22 @@ fn main() { // _1 = (const 2i32, move _10); // StorageDead(_10); // StorageDead(_9); -// goto -> bb11; +// goto -> bb10; // } -// bb5: { +// bb4: { // unreachable; // } -// bb6: { +// bb5: { // binding1 and guard // StorageLive(_6); // _6 = &((_2 as Some).0: i32); // _4 = &shallow _2; // StorageLive(_7); -// _7 = const guard() -> [return: bb7, unwind: bb1]; +// _7 = const guard() -> [return: bb6, unwind: bb11]; // } -// bb7: { // end of guard -// switchInt(move _7) -> [false: bb9, otherwise: bb8]; +// bb6: { // end of guard +// switchInt(move _7) -> [false: bb8, otherwise: bb7]; // } -// bb8: { +// bb7: { // StorageDead(_7); // FakeRead(ForMatchGuard, _4); // FakeRead(ForGuardBinding, _6); @@ -163,23 +160,26 @@ fn main() { // StorageDead(_8); // StorageDead(_5); // StorageDead(_6); -// goto -> bb11; +// goto -> bb10; // } -// bb9: { // to pre_binding3 (can skip 2 since this is `Some`) +// bb8: { // to pre_binding3 (can skip 2 since this is `Some`) // StorageDead(_7); // StorageDead(_6); -// falseEdges -> [real: bb4, imaginary: bb2]; +// falseEdges -> [real: bb3, imaginary: bb1]; // } -// bb10: { // arm2 +// bb9: { // arm2 // _1 = (const 3i32, const 3i32); -// goto -> bb11; +// goto -> bb10; // } -// bb11: { +// bb10: { // StorageDead(_2); // StorageDead(_1); // _0 = (); // return; // } +// bb11 (cleanup): { +// resume; +// } // END rustc.full_tested_match2.PromoteTemps.before.mir // // START rustc.main.PromoteTemps.before.mir @@ -188,38 +188,35 @@ fn main() { // _2 = std::option::Option::::Some(const 1i32,); // FakeRead(ForMatchedPlace, _2); // _4 = discriminant(_2); -// switchInt(move _4) -> [1isize: bb3, otherwise: bb2]; +// switchInt(move _4) -> [1isize: bb2, otherwise: bb1]; // } -// bb1 (cleanup): { -// resume; +// bb1: { +// falseEdges -> [real: bb9, imaginary: bb4]; // } // bb2: { -// falseEdges -> [real: bb10, imaginary: bb5]; +// falseEdges -> [real: bb5, imaginary: bb1]; // } // bb3: { -// falseEdges -> [real: bb6, imaginary: bb2]; -// } -// bb4: { // StorageLive(_14); // _14 = _2; // _1 = const 4i32; // StorageDead(_14); -// goto -> bb15; +// goto -> bb14; // } -// bb5: { -// falseEdges -> [real: bb11, imaginary: bb4]; +// bb4: { +// falseEdges -> [real: bb10, imaginary: bb3]; // } -// bb6: { //end of guard1 +// bb5: { // StorageLive(_7); // _7 = &((_2 as Some).0: i32); // _5 = &shallow _2; // StorageLive(_8); -// _8 = const guard() -> [return: bb7, unwind: bb1]; +// _8 = const guard() -> [return: bb6, unwind: bb15]; // } -// bb7: { -// switchInt(move _8) -> [false: bb9, otherwise: bb8]; +// bb6: { //end of guard1 +// switchInt(move _8) -> [false: bb8, otherwise: bb7]; // } -// bb8: { +// bb7: { // StorageDead(_8); // FakeRead(ForMatchGuard, _5); // FakeRead(ForGuardBinding, _7); @@ -228,34 +225,34 @@ fn main() { // _1 = const 1i32; // StorageDead(_6); // StorageDead(_7); -// goto -> bb15; +// goto -> bb14; // } -// bb9: { +// bb8: { // StorageDead(_8); // StorageDead(_7); -// falseEdges -> [real: bb2, imaginary: bb2]; +// falseEdges -> [real: bb1, imaginary: bb1]; // } -// bb10: { // binding2 & arm2 +// bb9: { // binding2 & arm2 // StorageLive(_9); // _9 = _2; // _1 = const 2i32; // StorageDead(_9); -// goto -> bb15; +// goto -> bb14; // } -// bb11: { // binding3: Some(y) if guard2(y) +// bb10: { // binding3: Some(y) if guard2(y) // StorageLive(_11); // _11 = &((_2 as Some).0: i32); // _5 = &shallow _2; // StorageLive(_12); // StorageLive(_13); // _13 = (*_11); -// _12 = const guard2(move _13) -> [return: bb12, unwind: bb1]; +// _12 = const guard2(move _13) -> [return: bb11, unwind: bb15]; // } -// bb12: { // end of guard2 +// bb11: { // end of guard2 // StorageDead(_13); -// switchInt(move _12) -> [false: bb14, otherwise: bb13]; +// switchInt(move _12) -> [false: bb13, otherwise: bb12]; // } -// bb13: { // binding4 & arm4 +// bb12: { // binding4 & arm4 // StorageDead(_12); // FakeRead(ForMatchGuard, _5); // FakeRead(ForGuardBinding, _11); @@ -264,17 +261,20 @@ fn main() { // _1 = const 3i32; // StorageDead(_10); // StorageDead(_11); -// goto -> bb15; +// goto -> bb14; // } -// bb14: { +// bb13: { // StorageDead(_12); // StorageDead(_11); -// falseEdges -> [real: bb4, imaginary: bb4]; +// falseEdges -> [real: bb3, imaginary: bb3]; // } -// bb15: { +// bb14: { // StorageDead(_2); // StorageDead(_1); // _0 = (); // return; // } +// bb15 (cleanup): { +// resume; +// } // END rustc.main.PromoteTemps.before.mir diff --git a/src/test/mir-opt/nll/region-subtyping-basic.rs b/src/test/mir-opt/nll/region-subtyping-basic.rs index 16e357fc16255..2dca483a37963 100644 --- a/src/test/mir-opt/nll/region-subtyping-basic.rs +++ b/src/test/mir-opt/nll/region-subtyping-basic.rs @@ -22,9 +22,9 @@ fn main() { // END RUST SOURCE // START rustc.main.nll.0.mir -// | '_#2r | U0 | {bb2[0..=8], bb3[0], bb5[0..=2]} -// | '_#3r | U0 | {bb2[1..=8], bb3[0], bb5[0..=2]} -// | '_#4r | U0 | {bb2[4..=8], bb3[0], bb5[0..=2]} +// | '_#2r | U0 | {bb1[0..=8], bb2[0], bb4[0..=2]} +// | '_#3r | U0 | {bb1[1..=8], bb2[0], bb4[0..=2]} +// | '_#4r | U0 | {bb1[4..=8], bb2[0], bb4[0..=2]} // END rustc.main.nll.0.mir // START rustc.main.nll.0.mir // let _2: &'_#3r usize; diff --git a/src/test/mir-opt/no-spurious-drop-after-call.rs b/src/test/mir-opt/no-spurious-drop-after-call.rs index 782bc31186ca5..e4e906edd195a 100644 --- a/src/test/mir-opt/no-spurious-drop-after-call.rs +++ b/src/test/mir-opt/no-spurious-drop-after-call.rs @@ -10,11 +10,11 @@ fn main() { // END RUST SOURCE // START rustc.main.ElaborateDrops.before.mir -// bb2: { +// bb1: { // StorageDead(_3); -// _1 = const std::mem::drop::(move _2) -> [return: bb3, unwind: bb4]; +// _1 = const std::mem::drop::(move _2) -> [return: bb2, unwind: bb3]; // } -// bb3: { +// bb2: { // StorageDead(_2); // StorageDead(_4); // StorageDead(_1); diff --git a/src/test/mir-opt/packed-struct-drop-aligned.rs b/src/test/mir-opt/packed-struct-drop-aligned.rs index 113f81c441f7c..fa6479cdc2316 100644 --- a/src/test/mir-opt/packed-struct-drop-aligned.rs +++ b/src/test/mir-opt/packed-struct-drop-aligned.rs @@ -37,23 +37,23 @@ impl Drop for Droppy { // _6 = move (_1.0: Aligned); // drop(_6) -> [return: bb4, unwind: bb3]; // } -// bb1 (cleanup): { -// resume; -// } -// bb2: { +// bb1: { // StorageDead(_1); // return; // } +// bb2 (cleanup): { +// resume; +// } // bb3 (cleanup): { // (_1.0: Aligned) = move _4; -// drop(_1) -> bb1; +// drop(_1) -> bb2; // } // bb4: { // StorageDead(_6); // (_1.0: Aligned) = move _4; // StorageDead(_4); // _0 = (); -// drop(_1) -> [return: bb2, unwind: bb1]; +// drop(_1) -> [return: bb1, unwind: bb2]; // } // } // END rustc.main.EraseRegions.before.mir diff --git a/src/test/mir-opt/retag.rs b/src/test/mir-opt/retag.rs index 1c88a9e4d5a32..bb7fc3b9ad0ed 100644 --- a/src/test/mir-opt/retag.rs +++ b/src/test/mir-opt/retag.rs @@ -8,8 +8,12 @@ struct Test(i32); impl Test { // Make sure we run the pass on a method, not just on bare functions. - fn foo<'x>(&self, x: &'x mut i32) -> &'x mut i32 { x } - fn foo_shr<'x>(&self, x: &'x i32) -> &'x i32 { x } + fn foo<'x>(&self, x: &'x mut i32) -> &'x mut i32 { + x + } + fn foo_shr<'x>(&self, x: &'x i32) -> &'x i32 { + x + } } impl Drop for Test { @@ -27,7 +31,10 @@ fn main() { } // Also test closures - let c: fn(&i32) -> &i32 = |x: &i32| -> &i32 { let _y = x; x }; + let c: fn(&i32) -> &i32 = |x: &i32| -> &i32 { + let _y = x; + x + }; let _w = c(&x); // need to call `foo_shr` or it doesn't even get generated @@ -65,12 +72,12 @@ fn main() { // ... // bb0: { // ... -// _3 = const Test::foo(move _4, move _6) -> [return: bb2, unwind: bb3]; +// _3 = const Test::foo(move _4, move _6) -> [return: bb1, unwind: bb7]; // } // // ... // -// bb2: { +// bb1: { // Retag(_3); // ... // _9 = move _3; @@ -85,13 +92,13 @@ fn main() { // _12 = &raw mut (*_10); // Retag([raw] _12); // ... -// _15 = move _16(move _17) -> bb5; +// _15 = move _16(move _17) -> bb3; // } // -// bb5: { +// bb3: { // Retag(_15); // ... -// _19 = const Test::foo_shr(move _20, move _22) -> [return: bb6, unwind: bb7]; +// _19 = const Test::foo_shr(move _20, move _22) -> [return: bb4, unwind: bb6]; // } // // ... diff --git a/src/test/mir-opt/simple-match.rs b/src/test/mir-opt/simple-match.rs index fc1a3bb1bf453..9e5709e9d5b06 100644 --- a/src/test/mir-opt/simple-match.rs +++ b/src/test/mir-opt/simple-match.rs @@ -14,26 +14,20 @@ fn main() {} // START rustc.match_bool.mir_map.0.mir // bb0: { // FakeRead(ForMatchedPlace, _1); -// switchInt(_1) -> [false: bb3, otherwise: bb2]; +// switchInt(_1) -> [false: bb2, otherwise: bb1]; // } -// bb1 (cleanup): { -// resume; +// bb1: { +// falseEdges -> [real: bb3, imaginary: bb2]; // } // bb2: { -// falseEdges -> [real: bb4, imaginary: bb3]; -// } -// bb3: { // _0 = const 20usize; -// goto -> bb5; +// goto -> bb4; // } -// bb4: { +// bb3: { // _0 = const 10usize; -// goto -> bb5; -// } -// bb5: { -// goto -> bb6; +// goto -> bb4; // } -// bb6: { +// bb4: { // return; // } // END rustc.match_bool.mir_map.0.mir diff --git a/src/test/mir-opt/simplify_cfg.rs b/src/test/mir-opt/simplify_cfg.rs index ef843f7158130..adecf80411736 100644 --- a/src/test/mir-opt/simplify_cfg.rs +++ b/src/test/mir-opt/simplify_cfg.rs @@ -19,20 +19,20 @@ fn bar() -> bool { // goto -> bb1; // } // bb1: { -// falseUnwind -> [real: bb3, cleanup: bb4]; +// falseUnwind -> [real: bb2, cleanup: bb11]; // } // ... -// bb11: { +// bb9: { // ... // goto -> bb1; // } // END rustc.main.SimplifyCfg-initial.before.mir // START rustc.main.SimplifyCfg-initial.after.mir // bb0: { -// falseUnwind -> [real: bb1, cleanup: bb2]; +// falseUnwind -> [real: bb1, cleanup: bb6]; // } // ... -// bb5: { +// bb4: { // ... // goto -> bb0; // } @@ -43,7 +43,7 @@ fn bar() -> bool { // } // bb1: { // StorageLive(_2); -// _2 = const bar() -> bb3; +// _2 = const bar() -> bb2; // } // END rustc.main.SimplifyCfg-early-opt.before.mir // START rustc.main.SimplifyCfg-early-opt.after.mir diff --git a/src/test/mir-opt/unusual-item-types.rs b/src/test/mir-opt/unusual-item-types.rs index 88cfb62a0d094..1f8343c16662a 100644 --- a/src/test/mir-opt/unusual-item-types.rs +++ b/src/test/mir-opt/unusual-item-types.rs @@ -30,9 +30,6 @@ fn main() { // _0 = const 2i32; // return; // } -// bb1 (cleanup): { -// resume; -// } // END rustc.{{impl}}-ASSOCIATED_CONSTANT.mir_map.0.mir // START rustc.E-V-{{constant}}.mir_map.0.mir @@ -40,14 +37,11 @@ fn main() { // _0 = const 5isize; // return; // } -// bb1 (cleanup): { -// resume; -// } // END rustc.E-V-{{constant}}.mir_map.0.mir // START rustc.ptr-drop_in_place.std__vec__Vec_i32_.AddMovesForPackedDrops.before.mir -// bb0: { -// goto -> bb7; +// bb0: { +// goto -> bb6; // } // bb1: { // return; @@ -59,17 +53,14 @@ fn main() { // goto -> bb1; // } // bb4 (cleanup): { -// goto -> bb2; +// drop(((*_1).0: alloc::raw_vec::RawVec)) -> bb2; // } -// bb5 (cleanup): { -// drop(((*_1).0: alloc::raw_vec::RawVec)) -> bb4; +// bb5: { +// drop(((*_1).0: alloc::raw_vec::RawVec)) -> [return: bb3, unwind: bb2]; // } // bb6: { -// drop(((*_1).0: alloc::raw_vec::RawVec)) -> [return: bb3, unwind: bb4]; -// } -// bb7: { // _2 = &mut (*_1); -// _3 = const as std::ops::Drop>::drop(move _2) -> [return: bb6, unwind: bb5]; +// _3 = const as std::ops::Drop>::drop(move _2) -> [return: bb5, unwind: bb4]; // } // END rustc.ptr-drop_in_place.std__vec__Vec_i32_.AddMovesForPackedDrops.before.mir diff --git a/src/test/ui/drop/dynamic-drop-async.rs b/src/test/ui/drop/dynamic-drop-async.rs index 88d36ab6aa664..acd4bff237ffa 100644 --- a/src/test/ui/drop/dynamic-drop-async.rs +++ b/src/test/ui/drop/dynamic-drop-async.rs @@ -46,6 +46,7 @@ impl Future for Defer { /// The `failing_op`-th operation will panic. struct Allocator { data: RefCell>, + name: &'static str, failing_op: usize, cur_ops: Cell, } @@ -57,23 +58,28 @@ impl Drop for Allocator { fn drop(&mut self) { let data = self.data.borrow(); if data.iter().any(|d| *d) { - panic!("missing free: {:?}", data); + panic!("missing free in {:?}: {:?}", self.name, data); } } } impl Allocator { - fn new(failing_op: usize) -> Self { - Allocator { failing_op, cur_ops: Cell::new(0), data: RefCell::new(vec![]) } + fn new(failing_op: usize, name: &'static str) -> Self { + Allocator { + failing_op, + name, + cur_ops: Cell::new(0), + data: RefCell::new(vec![]), + } } - fn alloc(&self) -> impl Future> + '_ { + fn alloc(self: &Rc) -> impl Future + 'static { self.fallible_operation(); let mut data = self.data.borrow_mut(); let addr = data.len(); data.push(true); - Defer { ready: false, value: Some(Ptr(addr, self)) } + Defer { ready: false, value: Some(Ptr(addr, self.clone())) } } fn fallible_operation(&self) { self.cur_ops.set(self.cur_ops.get() + 1); @@ -86,11 +92,11 @@ impl Allocator { // Type that tracks whether it was dropped and can panic when it's created or // destroyed. -struct Ptr<'a>(usize, &'a Allocator); -impl<'a> Drop for Ptr<'a> { +struct Ptr(usize, Rc); +impl Drop for Ptr { fn drop(&mut self) { match self.1.data.borrow_mut()[self.0] { - false => panic!("double free at index {:?}", self.0), + false => panic!("double free in {:?} at index {:?}", self.1.name, self.0), ref mut d => *d = false, } @@ -114,7 +120,7 @@ async fn dynamic_drop(a: Rc, c: bool) { }; } -struct TwoPtrs<'a>(Ptr<'a>, Ptr<'a>); +struct TwoPtrs(Ptr, Ptr); async fn struct_dynamic_drop(a: Rc, c0: bool, c1: bool, c: bool) { for i in 0..2 { let x; @@ -235,21 +241,62 @@ async fn move_ref_pattern(a: Rc) { a.alloc().await; } -fn run_test(cx: &mut Context<'_>, ref f: F) +async fn panic_after_return(a: Rc, c: bool) -> (Ptr,) { + a.alloc().await; + let p = a.alloc().await; + if c { + a.alloc().await; + let q = a.alloc().await; + // We use a return type that isn't used anywhere else to make sure that + // the return place doesn't incorrectly end up in the generator state. + return (a.alloc().await,); + } + (a.alloc().await,) +} + + +async fn panic_after_init_by_loop(a: Rc) { + a.alloc().await; + let p = a.alloc().await; + let q = loop { + a.alloc().await; + let r = a.alloc().await; + break a.alloc().await; + }; +} + +async fn panic_after_init_by_match_with_bindings_and_guard(a: Rc, b: bool) { + a.alloc().await; + let p = a.alloc().await; + let q = match a.alloc().await { + ref _x if b => { + a.alloc().await; + let r = a.alloc().await; + a.alloc().await + } + _x => { + a.alloc().await; + let r = a.alloc().await; + a.alloc().await + }, + }; +} + +fn run_test(cx: &mut Context<'_>, ref f: F, name: &'static str) where F: Fn(Rc) -> G, - G: Future, + G: Future, { for polls in 0.. { // Run without any panics to find which operations happen after the // penultimate `poll`. - let first_alloc = Rc::new(Allocator::new(usize::MAX)); + let first_alloc = Rc::new(Allocator::new(usize::MAX, name)); let mut fut = Box::pin(f(first_alloc.clone())); let mut ops_before_last_poll = 0; let mut completed = false; for _ in 0..polls { ops_before_last_poll = first_alloc.cur_ops.get(); - if let Poll::Ready(()) = fut.as_mut().poll(cx) { + if let Poll::Ready(_) = fut.as_mut().poll(cx) { completed = true; } } @@ -258,7 +305,7 @@ where // Start at `ops_before_last_poll` so that we will always be able to // `poll` the expected number of times. for failing_op in ops_before_last_poll..first_alloc.cur_ops.get() { - let alloc = Rc::new(Allocator::new(failing_op + 1)); + let alloc = Rc::new(Allocator::new(failing_op + 1, name)); let f = &f; let cx = &mut *cx; let result = panic::catch_unwind(panic::AssertUnwindSafe(move || { @@ -288,48 +335,58 @@ fn clone_waker(data: *const ()) -> RawWaker { RawWaker::new(data, &RawWakerVTable::new(clone_waker, drop, drop, drop)) } +macro_rules! run_test { + ($ctxt:expr, $e:expr) => { run_test($ctxt, $e, stringify!($e)); }; +} + fn main() { let waker = unsafe { Waker::from_raw(clone_waker(ptr::null())) }; let context = &mut Context::from_waker(&waker); - run_test(context, |a| dynamic_init(a, false)); - run_test(context, |a| dynamic_init(a, true)); - run_test(context, |a| dynamic_drop(a, false)); - run_test(context, |a| dynamic_drop(a, true)); - - run_test(context, |a| assignment(a, false, false)); - run_test(context, |a| assignment(a, false, true)); - run_test(context, |a| assignment(a, true, false)); - run_test(context, |a| assignment(a, true, true)); - - run_test(context, |a| array_simple(a)); - run_test(context, |a| vec_simple(a)); - run_test(context, |a| vec_unreachable(a)); - - run_test(context, |a| struct_dynamic_drop(a, false, false, false)); - run_test(context, |a| struct_dynamic_drop(a, false, false, true)); - run_test(context, |a| struct_dynamic_drop(a, false, true, false)); - run_test(context, |a| struct_dynamic_drop(a, false, true, true)); - run_test(context, |a| struct_dynamic_drop(a, true, false, false)); - run_test(context, |a| struct_dynamic_drop(a, true, false, true)); - run_test(context, |a| struct_dynamic_drop(a, true, true, false)); - run_test(context, |a| struct_dynamic_drop(a, true, true, true)); - - run_test(context, |a| field_assignment(a, false)); - run_test(context, |a| field_assignment(a, true)); - - run_test(context, |a| mixed_drop_and_nondrop(a)); - - run_test(context, |a| slice_pattern_one_of(a, 0)); - run_test(context, |a| slice_pattern_one_of(a, 1)); - run_test(context, |a| slice_pattern_one_of(a, 2)); - run_test(context, |a| slice_pattern_one_of(a, 3)); - - run_test(context, |a| subslice_pattern_from_end_with_drop(a, true, true)); - run_test(context, |a| subslice_pattern_from_end_with_drop(a, true, false)); - run_test(context, |a| subslice_pattern_from_end_with_drop(a, false, true)); - run_test(context, |a| subslice_pattern_from_end_with_drop(a, false, false)); - run_test(context, |a| subslice_pattern_reassign(a)); - - run_test(context, |a| move_ref_pattern(a)); + run_test!(context, |a| dynamic_init(a, false)); + run_test!(context, |a| dynamic_init(a, true)); + run_test!(context, |a| dynamic_drop(a, false)); + run_test!(context, |a| dynamic_drop(a, true)); + + run_test!(context, |a| assignment(a, false, false)); + run_test!(context, |a| assignment(a, false, true)); + run_test!(context, |a| assignment(a, true, false)); + run_test!(context, |a| assignment(a, true, true)); + + run_test!(context, |a| array_simple(a)); + run_test!(context, |a| vec_simple(a)); + run_test!(context, |a| vec_unreachable(a)); + + run_test!(context, |a| struct_dynamic_drop(a, false, false, false)); + run_test!(context, |a| struct_dynamic_drop(a, false, false, true)); + run_test!(context, |a| struct_dynamic_drop(a, false, true, false)); + run_test!(context, |a| struct_dynamic_drop(a, false, true, true)); + run_test!(context, |a| struct_dynamic_drop(a, true, false, false)); + run_test!(context, |a| struct_dynamic_drop(a, true, false, true)); + run_test!(context, |a| struct_dynamic_drop(a, true, true, false)); + run_test!(context, |a| struct_dynamic_drop(a, true, true, true)); + + run_test!(context, |a| field_assignment(a, false)); + run_test!(context, |a| field_assignment(a, true)); + + run_test!(context, |a| mixed_drop_and_nondrop(a)); + + run_test!(context, |a| slice_pattern_one_of(a, 0)); + run_test!(context, |a| slice_pattern_one_of(a, 1)); + run_test!(context, |a| slice_pattern_one_of(a, 2)); + run_test!(context, |a| slice_pattern_one_of(a, 3)); + + run_test!(context, |a| subslice_pattern_from_end_with_drop(a, true, true)); + run_test!(context, |a| subslice_pattern_from_end_with_drop(a, true, false)); + run_test!(context, |a| subslice_pattern_from_end_with_drop(a, false, true)); + run_test!(context, |a| subslice_pattern_from_end_with_drop(a, false, false)); + run_test!(context, |a| subslice_pattern_reassign(a)); + + run_test!(context, |a| move_ref_pattern(a)); + + run_test!(context, |a| panic_after_return(a, false)); + run_test!(context, |a| panic_after_return(a, true)); + run_test!(context, |a| panic_after_init_by_loop(a)); + run_test!(context, |a| panic_after_init_by_match_with_bindings_and_guard(a, false)); + run_test!(context, |a| panic_after_init_by_match_with_bindings_and_guard(a, true)); } diff --git a/src/test/ui/drop/dynamic-drop.rs b/src/test/ui/drop/dynamic-drop.rs index 451686d9ae21c..8a8a67d7d49e1 100644 --- a/src/test/ui/drop/dynamic-drop.rs +++ b/src/test/ui/drop/dynamic-drop.rs @@ -3,7 +3,6 @@ #![feature(generators, generator_trait, untagged_unions)] #![feature(move_ref_pattern)] - #![allow(unused_assignments)] #![allow(unused_variables)] @@ -18,6 +17,7 @@ struct InjectedFailure; struct Allocator { data: RefCell>, + name: &'static str, failing_op: usize, cur_ops: Cell, } @@ -29,17 +29,18 @@ impl Drop for Allocator { fn drop(&mut self) { let data = self.data.borrow(); if data.iter().any(|d| *d) { - panic!("missing free: {:?}", data); + panic!("missing free in {:?}: {:?}", self.name, data); } } } impl Allocator { - fn new(failing_op: usize) -> Self { + fn new(failing_op: usize, name: &'static str) -> Self { Allocator { failing_op: failing_op, cur_ops: Cell::new(0), - data: RefCell::new(vec![]) + data: RefCell::new(vec![]), + name, } } fn alloc(&self) -> Ptr<'_> { @@ -54,33 +55,17 @@ impl Allocator { data.push(true); Ptr(addr, self) } - // FIXME(#47949) Any use of this indicates a bug in rustc: we should never - // be leaking values in the cases here. - // - // Creates a `Ptr<'_>` and checks that the allocated value is leaked if the - // `failing_op` is in the list of exception. - fn alloc_leaked(&self, exceptions: Vec) -> Ptr<'_> { - let ptr = self.alloc(); - - if exceptions.iter().any(|operation| *operation == self.failing_op) { - let mut data = self.data.borrow_mut(); - data[ptr.0] = false; - } - ptr - } } struct Ptr<'a>(usize, &'a Allocator); impl<'a> Drop for Ptr<'a> { fn drop(&mut self) { match self.1.data.borrow_mut()[self.0] { - false => { - panic!("double free at index {:?}", self.0) - } - ref mut d => *d = false + false => panic!("double free in {:?} at index {:?}", self.1.name, self.0), + ref mut d => *d = false, } - self.1.cur_ops.set(self.1.cur_ops.get()+1); + self.1.cur_ops.set(self.1.cur_ops.get() + 1); if self.1.cur_ops.get() == self.1.failing_op { panic!(InjectedFailure); @@ -178,11 +163,7 @@ fn generator(a: &Allocator, run_count: usize) { assert!(run_count < 4); let mut gen = || { - (a.alloc(), - yield a.alloc(), - a.alloc(), - yield a.alloc() - ); + (a.alloc(), yield a.alloc(), a.alloc(), yield a.alloc()); }; for _ in 0..run_count { Pin::new(&mut gen).resume(()); @@ -206,28 +187,40 @@ fn vec_unreachable(a: &Allocator) { } fn slice_pattern_first(a: &Allocator) { - let[_x, ..] = [a.alloc(), a.alloc(), a.alloc()]; + let [_x, ..] = [a.alloc(), a.alloc(), a.alloc()]; } fn slice_pattern_middle(a: &Allocator) { - let[_, _x, _] = [a.alloc(), a.alloc(), a.alloc()]; + let [_, _x, _] = [a.alloc(), a.alloc(), a.alloc()]; } fn slice_pattern_two(a: &Allocator) { - let[_x, _, _y, _] = [a.alloc(), a.alloc(), a.alloc(), a.alloc()]; + let [_x, _, _y, _] = [a.alloc(), a.alloc(), a.alloc(), a.alloc()]; } fn slice_pattern_last(a: &Allocator) { - let[.., _y] = [a.alloc(), a.alloc(), a.alloc(), a.alloc()]; + let [.., _y] = [a.alloc(), a.alloc(), a.alloc(), a.alloc()]; } fn slice_pattern_one_of(a: &Allocator, i: usize) { let array = [a.alloc(), a.alloc(), a.alloc(), a.alloc()]; let _x = match i { - 0 => { let [a, ..] = array; a } - 1 => { let [_, a, ..] = array; a } - 2 => { let [_, _, a, _] = array; a } - 3 => { let [_, _, _, a] = array; a } + 0 => { + let [a, ..] = array; + a + } + 1 => { + let [_, a, ..] = array; + a + } + 2 => { + let [_, _, a, _] = array; + a + } + 3 => { + let [_, _, _, a] = array; + a + } _ => panic!("unmatched"), }; } @@ -235,9 +228,9 @@ fn slice_pattern_one_of(a: &Allocator, i: usize) { fn subslice_pattern_from_end(a: &Allocator, arg: bool) { let a = [a.alloc(), a.alloc(), a.alloc()]; if arg { - let[.., _x, _] = a; + let [.., _x, _] = a; } else { - let[_, _y @ ..] = a; + let [_, _y @ ..] = a; } } @@ -249,45 +242,61 @@ fn subslice_pattern_from_end_with_drop(a: &Allocator, arg: bool, arg2: bool) { } if arg { - let[.., _x, _] = a; + let [.., _x, _] = a; } else { - let[_, _y @ ..] = a; + let [_, _y @ ..] = a; } } fn slice_pattern_reassign(a: &Allocator) { let mut ar = [a.alloc(), a.alloc()]; - let[_, _x] = ar; + let [_, _x] = ar; ar = [a.alloc(), a.alloc()]; - let[.., _y] = ar; + let [.., _y] = ar; } fn subslice_pattern_reassign(a: &Allocator) { let mut ar = [a.alloc(), a.alloc(), a.alloc()]; - let[_, _, _x] = ar; + let [_, _, _x] = ar; ar = [a.alloc(), a.alloc(), a.alloc()]; - let[_, _y @ ..] = ar; + let [_, _y @ ..] = ar; } fn index_field_mixed_ends(a: &Allocator) { let ar = [(a.alloc(), a.alloc()), (a.alloc(), a.alloc())]; - let[(_x, _), ..] = ar; - let[(_, _y), _] = ar; - let[_, (_, _w)] = ar; - let[.., (_z, _)] = ar; + let [(_x, _), ..] = ar; + let [(_, _y), _] = ar; + let [_, (_, _w)] = ar; + let [.., (_z, _)] = ar; } fn subslice_mixed_min_lengths(a: &Allocator, c: i32) { let ar = [(a.alloc(), a.alloc()), (a.alloc(), a.alloc())]; match c { - 0 => { let[_x, ..] = ar; } - 1 => { let[_x, _, ..] = ar; } - 2 => { let[_x, _] = ar; } - 3 => { let[(_x, _), _, ..] = ar; } - 4 => { let[.., (_x, _)] = ar; } - 5 => { let[.., (_x, _), _] = ar; } - 6 => { let [_y @ ..] = ar; } - _ => { let [_y @ .., _] = ar; } + 0 => { + let [_x, ..] = ar; + } + 1 => { + let [_x, _, ..] = ar; + } + 2 => { + let [_x, _] = ar; + } + 3 => { + let [(_x, _), _, ..] = ar; + } + 4 => { + let [.., (_x, _)] = ar; + } + 5 => { + let [.., (_x, _), _] = ar; + } + 6 => { + let [_y @ ..] = ar; + } + _ => { + let [_y @ .., _] = ar; + } } } @@ -297,87 +306,160 @@ fn move_ref_pattern(a: &Allocator) { } fn panic_after_return(a: &Allocator) -> Ptr<'_> { - // Panic in the drop of `p` or `q` can leak - let exceptions = vec![8, 9]; a.alloc(); let p = a.alloc(); { a.alloc(); let p = a.alloc(); - // FIXME (#47949) We leak values when we panic in a destructor after - // evaluating an expression with `rustc_mir::build::Builder::into`. - a.alloc_leaked(exceptions) + a.alloc() } } fn panic_after_return_expr(a: &Allocator) -> Ptr<'_> { - // Panic in the drop of `p` or `q` can leak - let exceptions = vec![8, 9]; a.alloc(); let p = a.alloc(); { a.alloc(); let q = a.alloc(); - // FIXME (#47949) - return a.alloc_leaked(exceptions); + return a.alloc(); } } fn panic_after_init(a: &Allocator) { - // Panic in the drop of `r` can leak - let exceptions = vec![8]; a.alloc(); let p = a.alloc(); let q = { a.alloc(); let r = a.alloc(); - // FIXME (#47949) - a.alloc_leaked(exceptions) + a.alloc() }; } fn panic_after_init_temp(a: &Allocator) { - // Panic in the drop of `r` can leak - let exceptions = vec![8]; a.alloc(); let p = a.alloc(); { a.alloc(); let r = a.alloc(); - // FIXME (#47949) - a.alloc_leaked(exceptions) + a.alloc() }; } fn panic_after_init_by_loop(a: &Allocator) { - // Panic in the drop of `r` can leak - let exceptions = vec![8]; a.alloc(); let p = a.alloc(); let q = loop { a.alloc(); let r = a.alloc(); - // FIXME (#47949) - break a.alloc_leaked(exceptions); + break a.alloc(); }; } -fn run_test(mut f: F) - where F: FnMut(&Allocator) +fn panic_after_init_by_match(a: &Allocator, b: bool) { + a.alloc(); + let p = a.alloc(); + loop { + let q = match b { + true => { + a.alloc(); + let r = a.alloc(); + a.alloc() + } + false => { + a.alloc(); + let r = a.alloc(); + break a.alloc(); + } + }; + return; + }; +} + +fn panic_after_init_by_match_with_guard(a: &Allocator, b: bool) { + a.alloc(); + let p = a.alloc(); + let q = match a.alloc() { + _ if b => { + a.alloc(); + let r = a.alloc(); + a.alloc() + } + _ => { + a.alloc(); + let r = a.alloc(); + a.alloc() + } + }; +} + +fn panic_after_init_by_match_with_bindings_and_guard(a: &Allocator, b: bool) { + a.alloc(); + let p = a.alloc(); + let q = match a.alloc() { + _x if b => { + a.alloc(); + let r = a.alloc(); + a.alloc() + } + _x => { + a.alloc(); + let r = a.alloc(); + a.alloc() + } + }; +} + +fn panic_after_init_by_match_with_ref_bindings_and_guard(a: &Allocator, b: bool) { + a.alloc(); + let p = a.alloc(); + let q = match a.alloc() { + ref _x if b => { + a.alloc(); + let r = a.alloc(); + a.alloc() + } + ref _x => { + a.alloc(); + let r = a.alloc(); + a.alloc() + } + }; +} + +fn panic_after_init_by_break_if(a: &Allocator, b: bool) { + a.alloc(); + let p = a.alloc(); + let q = loop { + let r = a.alloc(); + break if b { + let s = a.alloc(); + a.alloc() + } else { + a.alloc() + }; + }; +} + +fn run_test(mut f: F, name: &'static str) +where + F: FnMut(&Allocator), { - let first_alloc = Allocator::new(usize::MAX); + let first_alloc = Allocator::new(usize::MAX, name); f(&first_alloc); - for failing_op in 1..first_alloc.cur_ops.get()+1 { - let alloc = Allocator::new(failing_op); + for failing_op in 1..first_alloc.cur_ops.get() + 1 { + let alloc = Allocator::new(failing_op, name); let alloc = &alloc; let f = panic::AssertUnwindSafe(&mut f); let result = panic::catch_unwind(move || { f.0(alloc); }); match result { - Ok(..) => panic!("test executed {} ops but now {}", - first_alloc.cur_ops.get(), alloc.cur_ops.get()), + Ok(..) => panic!( + "test executed {} ops but now {}", + first_alloc.cur_ops.get(), + alloc.cur_ops.get() + ), Err(e) => { if e.downcast_ref::().is_none() { panic::resume_unwind(e); @@ -387,89 +469,106 @@ fn run_test(mut f: F) } } -fn run_test_nopanic(mut f: F) - where F: FnMut(&Allocator) +fn run_test_nopanic(mut f: F, name: &'static str) +where + F: FnMut(&Allocator), { - let first_alloc = Allocator::new(usize::MAX); + let first_alloc = Allocator::new(usize::MAX, name); f(&first_alloc); } +macro_rules! run_test { + ($e:expr) => { + run_test($e, stringify!($e)); + }; +} + fn main() { - run_test(|a| dynamic_init(a, false)); - run_test(|a| dynamic_init(a, true)); - run_test(|a| dynamic_drop(a, false)); - run_test(|a| dynamic_drop(a, true)); - - run_test(|a| assignment2(a, false, false)); - run_test(|a| assignment2(a, false, true)); - run_test(|a| assignment2(a, true, false)); - run_test(|a| assignment2(a, true, true)); - - run_test(|a| assignment1(a, false)); - run_test(|a| assignment1(a, true)); - - run_test(|a| array_simple(a)); - run_test(|a| vec_simple(a)); - run_test(|a| vec_unreachable(a)); - - run_test(|a| struct_dynamic_drop(a, false, false, false)); - run_test(|a| struct_dynamic_drop(a, false, false, true)); - run_test(|a| struct_dynamic_drop(a, false, true, false)); - run_test(|a| struct_dynamic_drop(a, false, true, true)); - run_test(|a| struct_dynamic_drop(a, true, false, false)); - run_test(|a| struct_dynamic_drop(a, true, false, true)); - run_test(|a| struct_dynamic_drop(a, true, true, false)); - run_test(|a| struct_dynamic_drop(a, true, true, true)); - - run_test(|a| field_assignment(a, false)); - run_test(|a| field_assignment(a, true)); - - run_test(|a| generator(a, 0)); - run_test(|a| generator(a, 1)); - run_test(|a| generator(a, 2)); - run_test(|a| generator(a, 3)); - - run_test(|a| mixed_drop_and_nondrop(a)); - - run_test(|a| slice_pattern_first(a)); - run_test(|a| slice_pattern_middle(a)); - run_test(|a| slice_pattern_two(a)); - run_test(|a| slice_pattern_last(a)); - run_test(|a| slice_pattern_one_of(a, 0)); - run_test(|a| slice_pattern_one_of(a, 1)); - run_test(|a| slice_pattern_one_of(a, 2)); - run_test(|a| slice_pattern_one_of(a, 3)); - - run_test(|a| subslice_pattern_from_end(a, true)); - run_test(|a| subslice_pattern_from_end(a, false)); - run_test(|a| subslice_pattern_from_end_with_drop(a, true, true)); - run_test(|a| subslice_pattern_from_end_with_drop(a, true, false)); - run_test(|a| subslice_pattern_from_end_with_drop(a, false, true)); - run_test(|a| subslice_pattern_from_end_with_drop(a, false, false)); - run_test(|a| slice_pattern_reassign(a)); - run_test(|a| subslice_pattern_reassign(a)); - - run_test(|a| index_field_mixed_ends(a)); - run_test(|a| subslice_mixed_min_lengths(a, 0)); - run_test(|a| subslice_mixed_min_lengths(a, 1)); - run_test(|a| subslice_mixed_min_lengths(a, 2)); - run_test(|a| subslice_mixed_min_lengths(a, 3)); - run_test(|a| subslice_mixed_min_lengths(a, 4)); - run_test(|a| subslice_mixed_min_lengths(a, 5)); - run_test(|a| subslice_mixed_min_lengths(a, 6)); - run_test(|a| subslice_mixed_min_lengths(a, 7)); - - run_test(|a| move_ref_pattern(a)); - - run_test(|a| { + run_test!(|a| dynamic_init(a, false)); + run_test!(|a| dynamic_init(a, true)); + run_test!(|a| dynamic_drop(a, false)); + run_test!(|a| dynamic_drop(a, true)); + + run_test!(|a| assignment2(a, false, false)); + run_test!(|a| assignment2(a, false, true)); + run_test!(|a| assignment2(a, true, false)); + run_test!(|a| assignment2(a, true, true)); + + run_test!(|a| assignment1(a, false)); + run_test!(|a| assignment1(a, true)); + + run_test!(|a| array_simple(a)); + run_test!(|a| vec_simple(a)); + run_test!(|a| vec_unreachable(a)); + + run_test!(|a| struct_dynamic_drop(a, false, false, false)); + run_test!(|a| struct_dynamic_drop(a, false, false, true)); + run_test!(|a| struct_dynamic_drop(a, false, true, false)); + run_test!(|a| struct_dynamic_drop(a, false, true, true)); + run_test!(|a| struct_dynamic_drop(a, true, false, false)); + run_test!(|a| struct_dynamic_drop(a, true, false, true)); + run_test!(|a| struct_dynamic_drop(a, true, true, false)); + run_test!(|a| struct_dynamic_drop(a, true, true, true)); + + run_test!(|a| field_assignment(a, false)); + run_test!(|a| field_assignment(a, true)); + + run_test!(|a| generator(a, 0)); + run_test!(|a| generator(a, 1)); + run_test!(|a| generator(a, 2)); + run_test!(|a| generator(a, 3)); + + run_test!(|a| mixed_drop_and_nondrop(a)); + + run_test!(|a| slice_pattern_first(a)); + run_test!(|a| slice_pattern_middle(a)); + run_test!(|a| slice_pattern_two(a)); + run_test!(|a| slice_pattern_last(a)); + run_test!(|a| slice_pattern_one_of(a, 0)); + run_test!(|a| slice_pattern_one_of(a, 1)); + run_test!(|a| slice_pattern_one_of(a, 2)); + run_test!(|a| slice_pattern_one_of(a, 3)); + + run_test!(|a| subslice_pattern_from_end(a, true)); + run_test!(|a| subslice_pattern_from_end(a, false)); + run_test!(|a| subslice_pattern_from_end_with_drop(a, true, true)); + run_test!(|a| subslice_pattern_from_end_with_drop(a, true, false)); + run_test!(|a| subslice_pattern_from_end_with_drop(a, false, true)); + run_test!(|a| subslice_pattern_from_end_with_drop(a, false, false)); + run_test!(|a| slice_pattern_reassign(a)); + run_test!(|a| subslice_pattern_reassign(a)); + + run_test!(|a| index_field_mixed_ends(a)); + run_test!(|a| subslice_mixed_min_lengths(a, 0)); + run_test!(|a| subslice_mixed_min_lengths(a, 1)); + run_test!(|a| subslice_mixed_min_lengths(a, 2)); + run_test!(|a| subslice_mixed_min_lengths(a, 3)); + run_test!(|a| subslice_mixed_min_lengths(a, 4)); + run_test!(|a| subslice_mixed_min_lengths(a, 5)); + run_test!(|a| subslice_mixed_min_lengths(a, 6)); + run_test!(|a| subslice_mixed_min_lengths(a, 7)); + + run_test!(|a| move_ref_pattern(a)); + + run_test!(|a| { panic_after_return(a); }); - run_test(|a| { + run_test!(|a| { panic_after_return_expr(a); }); - run_test(|a| panic_after_init(a)); - run_test(|a| panic_after_init_temp(a)); - run_test(|a| panic_after_init_by_loop(a)); - - run_test_nopanic(|a| union1(a)); + run_test!(|a| panic_after_init(a)); + run_test!(|a| panic_after_init_temp(a)); + run_test!(|a| panic_after_init_by_loop(a)); + run_test!(|a| panic_after_init_by_match(a, false)); + run_test!(|a| panic_after_init_by_match(a, true)); + run_test!(|a| panic_after_init_by_match_with_guard(a, false)); + run_test!(|a| panic_after_init_by_match_with_guard(a, true)); + run_test!(|a| panic_after_init_by_match_with_bindings_and_guard(a, false)); + run_test!(|a| panic_after_init_by_match_with_bindings_and_guard(a, true)); + run_test!(|a| panic_after_init_by_match_with_ref_bindings_and_guard(a, false)); + run_test!(|a| panic_after_init_by_match_with_ref_bindings_and_guard(a, true)); + run_test!(|a| panic_after_init_by_break_if(a, false)); + run_test!(|a| panic_after_init_by_break_if(a, true)); + + run_test_nopanic(|a| union1(a), "|a| union1(a)"); }