diff --git a/src/liballoc/heap.rs b/src/liballoc/heap.rs index 51e6f2f8bd7a6..7170d8b30e6bf 100644 --- a/src/liballoc/heap.rs +++ b/src/liballoc/heap.rs @@ -130,7 +130,7 @@ pub const EMPTY: *mut () = 0x1 as *mut (); // This function must not unwind. If it does, MIR trans will fail. #[cfg(not(test))] #[lang = "exchange_malloc"] -#[inline] +#[inline(never)] unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 { if size == 0 { EMPTY as *mut u8 diff --git a/src/librustc/mir/cache.rs b/src/librustc/mir/cache.rs index bc9bbebb1796a..f8bff4de555e0 100644 --- a/src/librustc/mir/cache.rs +++ b/src/librustc/mir/cache.rs @@ -11,13 +11,14 @@ use std::cell::{Ref, RefCell}; use rustc_data_structures::indexed_vec::IndexVec; -use mir::{Mir, BasicBlock}; +use mir::{Mir, Block}; use rustc_serialize as serialize; #[derive(Clone, Debug)] pub struct Cache { - predecessors: RefCell>>> + predecessors: RefCell>>>, + successors: RefCell>>>, } @@ -37,30 +38,57 @@ impl serialize::Decodable for Cache { impl Cache { pub fn new() -> Self { Cache { - predecessors: RefCell::new(None) + predecessors: RefCell::new(None), + successors: RefCell::new(None) } } pub fn invalidate(&self) { // FIXME: consider being more fine-grained *self.predecessors.borrow_mut() = None; + *self.successors.borrow_mut() = None; } - pub fn predecessors(&self, mir: &Mir) -> Ref>> { + pub fn predecessors(&self, mir: &Mir) -> Ref>> { if self.predecessors.borrow().is_none() { - *self.predecessors.borrow_mut() = Some(calculate_predecessors(mir)); + *self.predecessors.borrow_mut() = Some(self.calculate_predecessors(mir)); } Ref::map(self.predecessors.borrow(), |p| p.as_ref().unwrap()) } + + fn calculate_predecessors(&self, mir: &Mir) -> IndexVec> { + let mut result = IndexVec::from_elem(vec![], mir.basic_blocks()); + for (bb, bbs) in self.successors(mir).iter_enumerated() { + for &tgt in bbs { + result[tgt].push(bb); + } + } + + result + } + + pub fn successors(&self, mir: &Mir) -> Ref>> { + if self.successors.borrow().is_none() { + *self.successors.borrow_mut() = Some(calculate_successors(mir)); + } + + Ref::map(self.successors.borrow(), |p| p.as_ref().unwrap()) + } } -fn calculate_predecessors(mir: &Mir) -> IndexVec> { +fn calculate_successors(mir: &Mir) -> IndexVec> { let mut result = IndexVec::from_elem(vec![], mir.basic_blocks()); for (bb, data) in mir.basic_blocks().iter_enumerated() { + for stmt in &data.statements { + if let Some(cleanup) = stmt.cleanup_target() { + result[bb].push(cleanup); + } + } + if let Some(ref term) = data.terminator { for &tgt in term.successors().iter() { - result[tgt].push(bb); + result[bb].push(tgt); } } } diff --git a/src/librustc/mir/mod.rs b/src/librustc/mir/mod.rs index 01dc7f51e29d9..5814cd309cdc3 100644 --- a/src/librustc/mir/mod.rs +++ b/src/librustc/mir/mod.rs @@ -66,9 +66,9 @@ macro_rules! newtype_index { /// Lowered representation of a single function. #[derive(Clone, RustcEncodable, RustcDecodable, Debug)] pub struct Mir<'tcx> { - /// List of basic blocks. References to basic block use a newtyped index type `BasicBlock` + /// List of basic blocks. References to basic block use a newtyped index type `Block` /// that indexes into this vector. - basic_blocks: IndexVec>, + basic_blocks: IndexVec>, /// List of visibility (lexical) scopes; these are referenced by statements /// and used (eventually) for debuginfo. Indexed by a `VisibilityScope`. @@ -115,10 +115,10 @@ pub struct Mir<'tcx> { } /// where execution begins -pub const START_BLOCK: BasicBlock = BasicBlock(0); +pub const START_BLOCK: Block = Block(0); impl<'tcx> Mir<'tcx> { - pub fn new(basic_blocks: IndexVec>, + pub fn new(basic_blocks: IndexVec>, visibility_scopes: IndexVec, promoted: IndexVec>, return_ty: Ty<'tcx>, @@ -147,28 +147,38 @@ impl<'tcx> Mir<'tcx> { } #[inline] - pub fn basic_blocks(&self) -> &IndexVec> { + pub fn basic_blocks(&self) -> &IndexVec> { &self.basic_blocks } #[inline] - pub fn basic_blocks_mut(&mut self) -> &mut IndexVec> { + pub fn basic_blocks_mut(&mut self) -> &mut IndexVec> { self.cache.invalidate(); &mut self.basic_blocks } #[inline] - pub fn predecessors(&self) -> Ref>> { + pub fn predecessors(&self) -> Ref>> { self.cache.predecessors(self) } #[inline] - pub fn predecessors_for(&self, bb: BasicBlock) -> Ref> { + pub fn successors(&self) -> Ref>> { + self.cache.successors(self) + } + + #[inline] + pub fn predecessors_for(&self, bb: Block) -> Ref> { Ref::map(self.predecessors(), |p| &p[bb]) } #[inline] - pub fn dominators(&self) -> Dominators { + pub fn successors_for(&self, bb: Block) -> Ref> { + Ref::map(self.successors(), |p| &p[bb]) + } + + #[inline] + pub fn dominators(&self) -> Dominators { dominators(self) } @@ -243,18 +253,18 @@ impl<'tcx> Mir<'tcx> { } } -impl<'tcx> Index for Mir<'tcx> { - type Output = BasicBlockData<'tcx>; +impl<'tcx> Index for Mir<'tcx> { + type Output = BlockData<'tcx>; #[inline] - fn index(&self, index: BasicBlock) -> &BasicBlockData<'tcx> { + fn index(&self, index: Block) -> &BlockData<'tcx> { &self.basic_blocks()[index] } } -impl<'tcx> IndexMut for Mir<'tcx> { +impl<'tcx> IndexMut for Mir<'tcx> { #[inline] - fn index_mut(&mut self, index: BasicBlock) -> &mut BasicBlockData<'tcx> { + fn index_mut(&mut self, index: Block) -> &mut BlockData<'tcx> { &mut self.basic_blocks_mut()[index] } } @@ -411,15 +421,15 @@ pub struct UpvarDecl { } /////////////////////////////////////////////////////////////////////////// -// BasicBlock +// Block -newtype_index!(BasicBlock, "bb"); +newtype_index!(Block, "bb"); /////////////////////////////////////////////////////////////////////////// -// BasicBlockData and Terminator +// BlockData and Terminator #[derive(Clone, Debug, RustcEncodable, RustcDecodable)] -pub struct BasicBlockData<'tcx> { +pub struct BlockData<'tcx> { /// List of statements in this block. pub statements: Vec>, @@ -450,7 +460,7 @@ pub struct Terminator<'tcx> { pub enum TerminatorKind<'tcx> { /// block should have one successor in the graph; we jump there Goto { - target: BasicBlock, + target: Block, }, /// operand evaluates to an integer; jump depending on its value @@ -472,12 +482,12 @@ pub enum TerminatorKind<'tcx> { // This invariant is quite non-obvious and also could be improved. // One way to make this invariant is to have something like this instead: // - // branches: Vec<(ConstInt, BasicBlock)>, - // otherwise: Option // exhaustive if None + // branches: Vec<(ConstInt, Block)>, + // otherwise: Option // exhaustive if None // // However we’ve decided to keep this as-is until we figure a case // where some other approach seems to be strictly better than other. - targets: Vec, + targets: Vec, }, /// Indicates that the landing pad is finished and unwinding should @@ -494,54 +504,32 @@ pub enum TerminatorKind<'tcx> { /// Drop the Lvalue Drop { location: Lvalue<'tcx>, - target: BasicBlock, - unwind: Option + target: Block, + unwind: Option }, /// Drop the Lvalue and assign the new value over it DropAndReplace { location: Lvalue<'tcx>, value: Operand<'tcx>, - target: BasicBlock, - unwind: Option, - }, - - /// Block ends with a call of a converging function - Call { - /// The function that’s being called - func: Operand<'tcx>, - /// Arguments the function is called with - args: Vec>, - /// Destination for the return value. If some, the call is converging. - destination: Option<(Lvalue<'tcx>, BasicBlock)>, - /// Cleanups to be done if the call unwinds. - cleanup: Option + target: Block, + unwind: Option, }, - - /// Jump to the target if the condition has the expected value, - /// otherwise panic with a message and a cleanup target. - Assert { - cond: Operand<'tcx>, - expected: bool, - msg: AssertMessage<'tcx>, - target: BasicBlock, - cleanup: Option - } } impl<'tcx> Terminator<'tcx> { - pub fn successors(&self) -> Cow<[BasicBlock]> { + pub fn successors(&self) -> Cow<[Block]> { self.kind.successors() } - pub fn successors_mut(&mut self) -> Vec<&mut BasicBlock> { + pub fn successors_mut(&mut self) -> Vec<&mut Block> { self.kind.successors_mut() } } impl<'tcx> TerminatorKind<'tcx> { pub fn if_<'a, 'gcx>(tcx: ty::TyCtxt<'a, 'gcx, 'tcx>, cond: Operand<'tcx>, - t: BasicBlock, f: BasicBlock) -> TerminatorKind<'tcx> { + t: Block, f: Block) -> TerminatorKind<'tcx> { static BOOL_SWITCH_FALSE: &'static [ConstInt] = &[ConstInt::U8(0)]; TerminatorKind::SwitchInt { discr: cond, @@ -551,7 +539,7 @@ impl<'tcx> TerminatorKind<'tcx> { } } - pub fn successors(&self) -> Cow<[BasicBlock]> { + pub fn successors(&self) -> Cow<[Block]> { use self::TerminatorKind::*; match *self { Goto { target: ref b } => slice::ref_slice(b).into_cow(), @@ -559,11 +547,6 @@ impl<'tcx> TerminatorKind<'tcx> { Resume => (&[]).into_cow(), Return => (&[]).into_cow(), Unreachable => (&[]).into_cow(), - Call { destination: Some((_, t)), cleanup: Some(c), .. } => vec![t, c].into_cow(), - Call { destination: Some((_, ref t)), cleanup: None, .. } => - slice::ref_slice(t).into_cow(), - Call { destination: None, cleanup: Some(ref c), .. } => slice::ref_slice(c).into_cow(), - Call { destination: None, cleanup: None, .. } => (&[]).into_cow(), DropAndReplace { target, unwind: Some(unwind), .. } | Drop { target, unwind: Some(unwind), .. } => { vec![target, unwind].into_cow() @@ -572,14 +555,12 @@ impl<'tcx> TerminatorKind<'tcx> { Drop { ref target, unwind: None, .. } => { slice::ref_slice(target).into_cow() } - Assert { target, cleanup: Some(unwind), .. } => vec![target, unwind].into_cow(), - Assert { ref target, .. } => slice::ref_slice(target).into_cow(), } } - // FIXME: no mootable cow. I’m honestly not sure what a “cow” between `&mut [BasicBlock]` and - // `Vec<&mut BasicBlock>` would look like in the first place. - pub fn successors_mut(&mut self) -> Vec<&mut BasicBlock> { + // FIXME: no mootable cow. I’m honestly not sure what a “cow” between `&mut [Block]` and + // `Vec<&mut Block>` would look like in the first place. + pub fn successors_mut(&mut self) -> Vec<&mut Block> { use self::TerminatorKind::*; match *self { Goto { target: ref mut b } => vec![b], @@ -587,25 +568,19 @@ impl<'tcx> TerminatorKind<'tcx> { Resume => Vec::new(), Return => Vec::new(), Unreachable => Vec::new(), - Call { destination: Some((_, ref mut t)), cleanup: Some(ref mut c), .. } => vec![t, c], - Call { destination: Some((_, ref mut t)), cleanup: None, .. } => vec![t], - Call { destination: None, cleanup: Some(ref mut c), .. } => vec![c], - Call { destination: None, cleanup: None, .. } => vec![], DropAndReplace { ref mut target, unwind: Some(ref mut unwind), .. } | Drop { ref mut target, unwind: Some(ref mut unwind), .. } => vec![target, unwind], DropAndReplace { ref mut target, unwind: None, .. } | Drop { ref mut target, unwind: None, .. } => { vec![target] } - Assert { ref mut target, cleanup: Some(ref mut unwind), .. } => vec![target, unwind], - Assert { ref mut target, .. } => vec![target] } } } -impl<'tcx> BasicBlockData<'tcx> { - pub fn new(terminator: Option>) -> BasicBlockData<'tcx> { - BasicBlockData { +impl<'tcx> BlockData<'tcx> { + pub fn new(terminator: Option>) -> BlockData<'tcx> { + BlockData { statements: vec![], terminator: terminator, is_cleanup: false, @@ -667,39 +642,6 @@ impl<'tcx> TerminatorKind<'tcx> { Drop { ref location, .. } => write!(fmt, "drop({:?})", location), DropAndReplace { ref location, ref value, .. } => write!(fmt, "replace({:?} <- {:?})", location, value), - Call { ref func, ref args, ref destination, .. } => { - if let Some((ref destination, _)) = *destination { - write!(fmt, "{:?} = ", destination)?; - } - write!(fmt, "{:?}(", func)?; - for (index, arg) in args.iter().enumerate() { - if index > 0 { - write!(fmt, ", ")?; - } - write!(fmt, "{:?}", arg)?; - } - write!(fmt, ")") - } - Assert { ref cond, expected, ref msg, .. } => { - write!(fmt, "assert(")?; - if !expected { - write!(fmt, "!")?; - } - write!(fmt, "{:?}, ", cond)?; - - match *msg { - AssertMessage::BoundsCheck { ref len, ref index } => { - write!(fmt, "{:?}, {:?}, {:?}", - "index out of bounds: the len is {} but the index is {}", - len, index)?; - } - AssertMessage::Math(ref err) => { - write!(fmt, "{:?}", err.description())?; - } - } - - write!(fmt, ")") - } } } @@ -719,20 +661,12 @@ impl<'tcx> TerminatorKind<'tcx> { .chain(iter::once(String::from("otherwise").into())) .collect() } - Call { destination: Some(_), cleanup: Some(_), .. } => - vec!["return".into_cow(), "unwind".into_cow()], - Call { destination: Some(_), cleanup: None, .. } => vec!["return".into_cow()], - Call { destination: None, cleanup: Some(_), .. } => vec!["unwind".into_cow()], - Call { destination: None, cleanup: None, .. } => vec![], DropAndReplace { unwind: None, .. } | Drop { unwind: None, .. } => vec!["return".into_cow()], DropAndReplace { unwind: Some(_), .. } | Drop { unwind: Some(_), .. } => { vec!["return".into_cow(), "unwind".into_cow()] } - Assert { cleanup: None, .. } => vec!["".into()], - Assert { .. } => - vec!["success".into_cow(), "unwind".into_cow()] } } } @@ -761,6 +695,44 @@ impl<'tcx> Statement<'tcx> { pub fn make_nop(&mut self) { self.kind = StatementKind::Nop } + + pub fn cleanup_target(&self) -> Option { + match self.kind { + StatementKind::Assign(..) | + StatementKind::SetDiscriminant { .. } | + StatementKind::StorageLive(..) | + StatementKind::StorageDead(..) | + StatementKind::InlineAsm { .. } | + StatementKind::Nop => None, + StatementKind::Assert { cleanup: unwind, .. } | + StatementKind::Call { cleanup: unwind, .. } => { + if let Some(unwind) = unwind { + Some(unwind) + } else { + None + } + } + } + } + + pub fn cleanup_target_mut(&mut self) -> Option<&mut Block> { + match self.kind { + StatementKind::Assign(..) | + StatementKind::SetDiscriminant { .. } | + StatementKind::StorageLive(..) | + StatementKind::StorageDead(..) | + StatementKind::InlineAsm { .. } | + StatementKind::Nop => None, + StatementKind::Assert { cleanup: ref mut unwind, .. } | + StatementKind::Call { cleanup: ref mut unwind, .. } => { + if let Some(ref mut unwind) = *unwind { + Some(unwind) + } else { + None + } + } + } + } } #[derive(Clone, Debug, RustcEncodable, RustcDecodable)] @@ -783,6 +755,27 @@ pub enum StatementKind<'tcx> { inputs: Vec> }, + /// Jump to the target if the condition has the expected value, + /// otherwise panic with a message and a cleanup target. + Assert { + cond: Operand<'tcx>, + expected: bool, + msg: AssertMessage<'tcx>, + cleanup: Option + }, + + /// Block ends with a call of a converging function + Call { + /// The function that’s being called + func: Operand<'tcx>, + /// Arguments the function is called with + args: Vec>, + /// Destination for the return value. + destination: Lvalue<'tcx>, + /// Cleanups to be done if the call unwinds. + cleanup: Option + }, + /// No-op. Useful for deleting instructions without affecting statement indices. Nop, } @@ -800,6 +793,37 @@ impl<'tcx> Debug for Statement<'tcx> { InlineAsm { ref asm, ref outputs, ref inputs } => { write!(fmt, "asm!({:?} : {:?} : {:?})", asm, outputs, inputs) }, + Assert { ref cond, expected, ref msg, .. } => { + write!(fmt, "assert(")?; + if !expected { + write!(fmt, "!")?; + } + write!(fmt, "{:?}, ", cond)?; + + match *msg { + AssertMessage::BoundsCheck { ref len, ref index } => { + write!(fmt, "{:?}, {:?}, {:?}", + "index out of bounds: the len is {} but the index is {}", + len, index)?; + } + AssertMessage::Math(ref err) => { + write!(fmt, "{:?}", err.description())?; + } + } + + write!(fmt, ")") + }, + Call { ref func, ref args, ref destination, .. } => { + write!(fmt, "{:?} = ", destination)?; + write!(fmt, "{:?}(", func)?; + for (index, arg) in args.iter().enumerate() { + if index > 0 { + write!(fmt, ", ")?; + } + write!(fmt, "{:?}", arg)?; + } + write!(fmt, ")") + }, Nop => write!(fmt, "nop"), } } @@ -1296,7 +1320,7 @@ fn item_path_str(def_id: DefId) -> String { impl<'tcx> ControlFlowGraph for Mir<'tcx> { - type Node = BasicBlock; + type Node = Block; fn num_nodes(&self) -> usize { self.basic_blocks.len() } @@ -1310,24 +1334,24 @@ impl<'tcx> ControlFlowGraph for Mir<'tcx> { fn successors<'graph>(&'graph self, node: Self::Node) -> >::Iter { - self.basic_blocks[node].terminator().successors().into_owned().into_iter() + self.successors_for(node).clone().into_iter() } } impl<'a, 'b> GraphPredecessors<'b> for Mir<'a> { - type Item = BasicBlock; - type Iter = IntoIter; + type Item = Block; + type Iter = IntoIter; } impl<'a, 'b> GraphSuccessors<'b> for Mir<'a> { - type Item = BasicBlock; - type Iter = IntoIter; + type Item = Block; + type Iter = IntoIter; } #[derive(Copy, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)] pub struct Location { /// the location is within this block - pub block: BasicBlock, + pub block: Block, /// the location is the start of the this statement; or, if `statement_index` /// == num-statements, then the start of the terminator. @@ -1341,7 +1365,7 @@ impl fmt::Debug for Location { } impl Location { - pub fn dominates(&self, other: &Location, dominators: &Dominators) -> bool { + pub fn dominates(&self, other: &Location, dominators: &Dominators) -> bool { if self.block == other.block { self.statement_index <= other.statement_index } else { @@ -1392,9 +1416,9 @@ impl<'tcx> TypeFoldable<'tcx> for LocalDecl<'tcx> { } } -impl<'tcx> TypeFoldable<'tcx> for BasicBlockData<'tcx> { +impl<'tcx> TypeFoldable<'tcx> for BlockData<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - BasicBlockData { + BlockData { statements: self.statements.fold_with(folder), terminator: self.terminator.fold_with(folder), is_cleanup: self.is_cleanup @@ -1423,6 +1447,32 @@ impl<'tcx> TypeFoldable<'tcx> for Statement<'tcx> { outputs: outputs.fold_with(folder), inputs: inputs.fold_with(folder) }, + Assert { ref cond, expected, ref msg, cleanup } => { + let msg = if let AssertMessage::BoundsCheck { ref len, ref index } = *msg { + AssertMessage::BoundsCheck { + len: len.fold_with(folder), + index: index.fold_with(folder), + } + } else { + msg.clone() + }; + Assert { + cond: cond.fold_with(folder), + expected: expected, + msg: msg, + cleanup: cleanup + } + }, + Call { ref func, ref args, ref destination, cleanup } => { + let dest = destination.fold_with(folder); + + Call { + func: func.fold_with(folder), + args: args.fold_with(folder), + destination: dest, + cleanup: cleanup + } + }, Nop => Nop, }; Statement { @@ -1441,6 +1491,21 @@ impl<'tcx> TypeFoldable<'tcx> for Statement<'tcx> { StorageDead(ref lvalue) => lvalue.visit_with(visitor), InlineAsm { ref outputs, ref inputs, .. } => outputs.visit_with(visitor) || inputs.visit_with(visitor), + Assert { ref cond, ref msg, .. } => { + if cond.visit_with(visitor) { + if let AssertMessage::BoundsCheck { ref len, ref index } = *msg { + len.visit_with(visitor) || index.visit_with(visitor) + } else { + false + } + } else { + false + } + }, + Call { ref func, ref args, ref destination, .. } => { + destination.visit_with(visitor) || func.visit_with(visitor) || + args.visit_with(visitor) + }, Nop => false, } } @@ -1469,35 +1534,6 @@ impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> { target: target, unwind: unwind }, - Call { ref func, ref args, ref destination, cleanup } => { - let dest = destination.as_ref().map(|&(ref loc, dest)| { - (loc.fold_with(folder), dest) - }); - - Call { - func: func.fold_with(folder), - args: args.fold_with(folder), - destination: dest, - cleanup: cleanup - } - }, - Assert { ref cond, expected, ref msg, target, cleanup } => { - let msg = if let AssertMessage::BoundsCheck { ref len, ref index } = *msg { - AssertMessage::BoundsCheck { - len: len.fold_with(folder), - index: index.fold_with(folder), - } - } else { - msg.clone() - }; - Assert { - cond: cond.fold_with(folder), - expected: expected, - msg: msg, - target: target, - cleanup: cleanup - } - }, Resume => Resume, Return => Return, Unreachable => Unreachable, @@ -1517,23 +1553,6 @@ impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> { Drop { ref location, ..} => location.visit_with(visitor), DropAndReplace { ref location, ref value, ..} => location.visit_with(visitor) || value.visit_with(visitor), - Call { ref func, ref args, ref destination, .. } => { - let dest = if let Some((ref loc, _)) = *destination { - loc.visit_with(visitor) - } else { false }; - dest || func.visit_with(visitor) || args.visit_with(visitor) - }, - Assert { ref cond, ref msg, .. } => { - if cond.visit_with(visitor) { - if let AssertMessage::BoundsCheck { ref len, ref index } = *msg { - len.visit_with(visitor) || index.visit_with(visitor) - } else { - false - } - } else { - false - } - }, Goto { .. } | Resume | Return | diff --git a/src/librustc/mir/traversal.rs b/src/librustc/mir/traversal.rs index 6057e7ec7e0f5..8a43a510f6cc3 100644 --- a/src/librustc/mir/traversal.rs +++ b/src/librustc/mir/traversal.rs @@ -36,11 +36,11 @@ use super::*; pub struct Preorder<'a, 'tcx: 'a> { mir: &'a Mir<'tcx>, visited: BitVector, - worklist: Vec, + worklist: Vec, } impl<'a, 'tcx> Preorder<'a, 'tcx> { - pub fn new(mir: &'a Mir<'tcx>, root: BasicBlock) -> Preorder<'a, 'tcx> { + pub fn new(mir: &'a Mir<'tcx>, root: Block) -> Preorder<'a, 'tcx> { let worklist = vec![root]; Preorder { @@ -56,23 +56,19 @@ pub fn preorder<'a, 'tcx>(mir: &'a Mir<'tcx>) -> Preorder<'a, 'tcx> { } impl<'a, 'tcx> Iterator for Preorder<'a, 'tcx> { - type Item = (BasicBlock, &'a BasicBlockData<'tcx>); + type Item = (Block, &'a BlockData<'tcx>); - fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> { + fn next(&mut self) -> Option<(Block, &'a BlockData<'tcx>)> { while let Some(idx) = self.worklist.pop() { if !self.visited.insert(idx.index()) { continue; } - let data = &self.mir[idx]; - - if let Some(ref term) = data.terminator { - for &succ in term.successors().iter() { - self.worklist.push(succ); - } + for &succ in self.mir.successors_for(idx).iter() { + self.worklist.push(succ); } - return Some((idx, data)); + return Some((idx, &self.mir[idx])); } None @@ -100,28 +96,22 @@ impl<'a, 'tcx> Iterator for Preorder<'a, 'tcx> { pub struct Postorder<'a, 'tcx: 'a> { mir: &'a Mir<'tcx>, visited: BitVector, - visit_stack: Vec<(BasicBlock, vec::IntoIter)> + visit_stack: Vec<(Block, vec::IntoIter)> } impl<'a, 'tcx> Postorder<'a, 'tcx> { - pub fn new(mir: &'a Mir<'tcx>, root: BasicBlock) -> Postorder<'a, 'tcx> { + pub fn new(mir: &'a Mir<'tcx>, root: Block) -> Postorder<'a, 'tcx> { let mut po = Postorder { mir: mir, visited: BitVector::new(mir.basic_blocks().len()), visit_stack: Vec::new() }; + po.visited.insert(root.index()); - let data = &po.mir[root]; - - if let Some(ref term) = data.terminator { - po.visited.insert(root.index()); - - let succs = term.successors().into_owned().into_iter(); - - po.visit_stack.push((root, succs)); - po.traverse_successor(); - } + let succs = ControlFlowGraph::successors(&po.mir, root); + po.visit_stack.push((root, succs)); + po.traverse_successor(); po } @@ -186,10 +176,7 @@ impl<'a, 'tcx> Postorder<'a, 'tcx> { }; if self.visited.insert(bb.index()) { - if let Some(ref term) = self.mir[bb].terminator { - let succs = term.successors().into_owned().into_iter(); - self.visit_stack.push((bb, succs)); - } + self.visit_stack.push((bb, ControlFlowGraph::successors(&self.mir, bb))); } } } @@ -200,9 +187,9 @@ pub fn postorder<'a, 'tcx>(mir: &'a Mir<'tcx>) -> Postorder<'a, 'tcx> { } impl<'a, 'tcx> Iterator for Postorder<'a, 'tcx> { - type Item = (BasicBlock, &'a BasicBlockData<'tcx>); + type Item = (Block, &'a BlockData<'tcx>); - fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> { + fn next(&mut self) -> Option<(Block, &'a BlockData<'tcx>)> { let next = self.visit_stack.pop(); if next.is_some() { self.traverse_successor(); @@ -240,12 +227,12 @@ impl<'a, 'tcx> Iterator for Postorder<'a, 'tcx> { #[derive(Clone)] pub struct ReversePostorder<'a, 'tcx: 'a> { mir: &'a Mir<'tcx>, - blocks: Vec, + blocks: Vec, idx: usize } impl<'a, 'tcx> ReversePostorder<'a, 'tcx> { - pub fn new(mir: &'a Mir<'tcx>, root: BasicBlock) -> ReversePostorder<'a, 'tcx> { + pub fn new(mir: &'a Mir<'tcx>, root: Block) -> ReversePostorder<'a, 'tcx> { let blocks : Vec<_> = Postorder::new(mir, root).map(|(bb, _)| bb).collect(); let len = blocks.len(); @@ -268,9 +255,9 @@ pub fn reverse_postorder<'a, 'tcx>(mir: &'a Mir<'tcx>) -> ReversePostorder<'a, ' } impl<'a, 'tcx> Iterator for ReversePostorder<'a, 'tcx> { - type Item = (BasicBlock, &'a BasicBlockData<'tcx>); + type Item = (Block, &'a BlockData<'tcx>); - fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> { + fn next(&mut self) -> Option<(Block, &'a BlockData<'tcx>)> { if self.idx == 0 { return None; } self.idx -= 1; diff --git a/src/librustc/mir/visit.rs b/src/librustc/mir/visit.rs index 733ad36de90e3..4710042969425 100644 --- a/src/librustc/mir/visit.rs +++ b/src/librustc/mir/visit.rs @@ -50,9 +50,9 @@ use syntax_pos::Span; // // ```rust // fn super_basic_block_data(&mut self, -// block: BasicBlock, -// data: & $($mutability)* BasicBlockData<'tcx>) { -// let BasicBlockData { +// block: Block, +// data: & $($mutability)* BlockData<'tcx>) { +// let BlockData { // ref $($mutability)* statements, // ref $($mutability)* terminator, // is_cleanup: _ @@ -66,9 +66,9 @@ use syntax_pos::Span; // } // ``` // -// Here we used `let BasicBlockData { } = *data` deliberately, +// Here we used `let BlockData { } = *data` deliberately, // rather than writing `data.statements` in the body. This is because if one -// adds a new field to `BasicBlockData`, one will be forced to revise this code, +// adds a new field to `BlockData`, one will be forced to revise this code, // and hence one will (hopefully) invoke the correct visit methods (if any). // // For this to work, ALL MATCHES MUST BE EXHAUSTIVE IN FIELDS AND VARIANTS. @@ -90,8 +90,8 @@ macro_rules! make_mir_visitor { } fn visit_basic_block_data(&mut self, - block: BasicBlock, - data: & $($mutability)* BasicBlockData<'tcx>) { + block: Block, + data: & $($mutability)* BlockData<'tcx>) { self.super_basic_block_data(block, data); } @@ -101,14 +101,14 @@ macro_rules! make_mir_visitor { } fn visit_statement(&mut self, - block: BasicBlock, + block: Block, statement: & $($mutability)* Statement<'tcx>, location: Location) { self.super_statement(block, statement, location); } fn visit_assign(&mut self, - block: BasicBlock, + block: Block, lvalue: & $($mutability)* Lvalue<'tcx>, rvalue: & $($mutability)* Rvalue<'tcx>, location: Location) { @@ -116,14 +116,14 @@ macro_rules! make_mir_visitor { } fn visit_terminator(&mut self, - block: BasicBlock, + block: Block, terminator: & $($mutability)* Terminator<'tcx>, location: Location) { self.super_terminator(block, terminator, location); } fn visit_terminator_kind(&mut self, - block: BasicBlock, + block: Block, kind: & $($mutability)* TerminatorKind<'tcx>, location: Location) { self.super_terminator_kind(block, kind, location); @@ -176,8 +176,8 @@ macro_rules! make_mir_visitor { } fn visit_branch(&mut self, - source: BasicBlock, - target: BasicBlock) { + source: Block, + target: Block) { self.super_branch(source, target); } @@ -258,7 +258,7 @@ macro_rules! make_mir_visitor { fn super_mir(&mut self, mir: & $($mutability)* Mir<'tcx>) { for index in 0..mir.basic_blocks().len() { - let block = BasicBlock::new(index); + let block = Block::new(index); self.visit_basic_block_data(block, &$($mutability)* mir[block]); } @@ -276,9 +276,9 @@ macro_rules! make_mir_visitor { } fn super_basic_block_data(&mut self, - block: BasicBlock, - data: & $($mutability)* BasicBlockData<'tcx>) { - let BasicBlockData { + block: Block, + data: & $($mutability)* BlockData<'tcx>) { + let BlockData { ref $($mutability)* statements, ref $($mutability)* terminator, is_cleanup: _ @@ -311,7 +311,7 @@ macro_rules! make_mir_visitor { } fn super_statement(&mut self, - block: BasicBlock, + block: Block, statement: & $($mutability)* Statement<'tcx>, location: Location) { let Statement { @@ -344,12 +344,31 @@ macro_rules! make_mir_visitor { self.visit_operand(input, location); } } + StatementKind::Assert { ref $($mutability)* cond, + expected: _, + ref $($mutability)* msg, + cleanup } => { + self.visit_operand(cond, location); + self.visit_assert_message(msg, location); + cleanup.map(|t| self.visit_branch(block, t)); + } + StatementKind::Call { ref $($mutability)* func, + ref $($mutability)* args, + ref $($mutability)* destination, + cleanup } => { + self.visit_operand(func, location); + for arg in args { + self.visit_operand(arg, location); + } + self.visit_lvalue(destination, LvalueContext::Call, location); + cleanup.map(|t| self.visit_branch(block, t)); + } StatementKind::Nop => {} } } fn super_assign(&mut self, - _block: BasicBlock, + _block: Block, lvalue: &$($mutability)* Lvalue<'tcx>, rvalue: &$($mutability)* Rvalue<'tcx>, location: Location) { @@ -358,7 +377,7 @@ macro_rules! make_mir_visitor { } fn super_terminator(&mut self, - block: BasicBlock, + block: Block, terminator: &$($mutability)* Terminator<'tcx>, location: Location) { let Terminator { @@ -371,7 +390,7 @@ macro_rules! make_mir_visitor { } fn super_terminator_kind(&mut self, - block: BasicBlock, + block: Block, kind: & $($mutability)* TerminatorKind<'tcx>, source_location: Location) { match *kind { @@ -415,32 +434,6 @@ macro_rules! make_mir_visitor { self.visit_branch(block, target); unwind.map(|t| self.visit_branch(block, t)); } - - TerminatorKind::Call { ref $($mutability)* func, - ref $($mutability)* args, - ref $($mutability)* destination, - cleanup } => { - self.visit_operand(func, source_location); - for arg in args { - self.visit_operand(arg, source_location); - } - if let Some((ref $($mutability)* destination, target)) = *destination { - self.visit_lvalue(destination, LvalueContext::Call, source_location); - self.visit_branch(block, target); - } - cleanup.map(|t| self.visit_branch(block, t)); - } - - TerminatorKind::Assert { ref $($mutability)* cond, - expected: _, - ref $($mutability)* msg, - target, - cleanup } => { - self.visit_operand(cond, source_location); - self.visit_assert_message(msg, source_location); - self.visit_branch(block, target); - cleanup.map(|t| self.visit_branch(block, t)); - } } } @@ -643,8 +636,8 @@ macro_rules! make_mir_visitor { } fn super_branch(&mut self, - _source: BasicBlock, - _target: BasicBlock) { + _source: Block, + _target: Block) { } fn super_constant(&mut self, diff --git a/src/librustc_borrowck/borrowck/mir/dataflow/graphviz.rs b/src/librustc_borrowck/borrowck/mir/dataflow/graphviz.rs index 7f95f07f48d4a..7c1deb93b4335 100644 --- a/src/librustc_borrowck/borrowck/mir/dataflow/graphviz.rs +++ b/src/librustc_borrowck/borrowck/mir/dataflow/graphviz.rs @@ -11,7 +11,7 @@ //! Hook into libgraphviz for rendering dataflow graphs for MIR. use syntax::ast::NodeId; -use rustc::mir::{BasicBlock, Mir}; +use rustc::mir::{Block, Mir}; use rustc_data_structures::bitslice::bits_to_string; use rustc_data_structures::indexed_set::{IdxSet}; use rustc_data_structures::indexed_vec::Idx; @@ -119,13 +119,13 @@ pub fn print_borrowck_graph_to<'a, 'tcx, BD, P>( File::create(path).and_then(|mut f| f.write_all(&v)) } -pub type Node = BasicBlock; +pub type Node = Block; #[derive(Copy, Clone, PartialEq, Eq, Debug)] -pub struct Edge { source: BasicBlock, index: usize } +pub struct Edge { source: Block, index: usize } -fn outgoing(mir: &Mir, bb: BasicBlock) -> Vec { - let succ_len = mir[bb].terminator().successors().len(); +fn outgoing(mir: &Mir, bb: Block) -> Vec { + let succ_len = mir.successors_for(bb).len(); (0..succ_len).map(|index| Edge { source: bb, index: index}).collect() } diff --git a/src/librustc_borrowck/borrowck/mir/dataflow/impls.rs b/src/librustc_borrowck/borrowck/mir/dataflow/impls.rs index da8aa231ccf15..2b617537f20af 100644 --- a/src/librustc_borrowck/borrowck/mir/dataflow/impls.rs +++ b/src/librustc_borrowck/borrowck/mir/dataflow/impls.rs @@ -12,7 +12,6 @@ use rustc::ty::TyCtxt; use rustc::mir::{self, Mir, Location}; use rustc_data_structures::bitslice::BitSlice; // adds set_bit/get_bit to &[usize] bitvector rep. use rustc_data_structures::bitslice::{BitwiseOperator}; -use rustc_data_structures::indexed_set::{IdxSet}; use rustc_data_structures::indexed_vec::Idx; use rustc_mir::util::elaborate_drops::DropFlagState; @@ -275,7 +274,7 @@ impl<'a, 'tcx> BitDenotation for MaybeInitializedLvals<'a, 'tcx> { fn statement_effect(&self, sets: &mut BlockSets, - bb: mir::BasicBlock, + bb: mir::Block, idx: usize) { drop_flag_effects_for_location( @@ -287,7 +286,7 @@ impl<'a, 'tcx> BitDenotation for MaybeInitializedLvals<'a, 'tcx> { fn terminator_effect(&self, sets: &mut BlockSets, - bb: mir::BasicBlock, + bb: mir::Block, statements_len: usize) { drop_flag_effects_for_location( @@ -296,18 +295,6 @@ impl<'a, 'tcx> BitDenotation for MaybeInitializedLvals<'a, 'tcx> { |path, s| Self::update_bits(sets, path, s) ) } - - fn propagate_call_return(&self, - in_out: &mut IdxSet, - _call_bb: mir::BasicBlock, - _dest_bb: mir::BasicBlock, - dest_lval: &mir::Lvalue) { - // when a call returns successfully, that means we need to set - // the bits for that dest_lval to 1 (initialized). - on_lookup_result_bits(self.tcx, self.mir, self.move_data(), - self.move_data().rev_lookup.find(dest_lval), - |mpi| { in_out.add(&mpi); }); - } } impl<'a, 'tcx> BitDenotation for MaybeUninitializedLvals<'a, 'tcx> { @@ -332,7 +319,7 @@ impl<'a, 'tcx> BitDenotation for MaybeUninitializedLvals<'a, 'tcx> { fn statement_effect(&self, sets: &mut BlockSets, - bb: mir::BasicBlock, + bb: mir::Block, idx: usize) { drop_flag_effects_for_location( @@ -344,7 +331,7 @@ impl<'a, 'tcx> BitDenotation for MaybeUninitializedLvals<'a, 'tcx> { fn terminator_effect(&self, sets: &mut BlockSets, - bb: mir::BasicBlock, + bb: mir::Block, statements_len: usize) { drop_flag_effects_for_location( @@ -353,18 +340,6 @@ impl<'a, 'tcx> BitDenotation for MaybeUninitializedLvals<'a, 'tcx> { |path, s| Self::update_bits(sets, path, s) ) } - - fn propagate_call_return(&self, - in_out: &mut IdxSet, - _call_bb: mir::BasicBlock, - _dest_bb: mir::BasicBlock, - dest_lval: &mir::Lvalue) { - // when a call returns successfully, that means we need to set - // the bits for that dest_lval to 0 (initialized). - on_lookup_result_bits(self.tcx, self.mir, self.move_data(), - self.move_data().rev_lookup.find(dest_lval), - |mpi| { in_out.remove(&mpi); }); - } } impl<'a, 'tcx> BitDenotation for DefinitelyInitializedLvals<'a, 'tcx> { @@ -388,7 +363,7 @@ impl<'a, 'tcx> BitDenotation for DefinitelyInitializedLvals<'a, 'tcx> { fn statement_effect(&self, sets: &mut BlockSets, - bb: mir::BasicBlock, + bb: mir::Block, idx: usize) { drop_flag_effects_for_location( @@ -400,7 +375,7 @@ impl<'a, 'tcx> BitDenotation for DefinitelyInitializedLvals<'a, 'tcx> { fn terminator_effect(&self, sets: &mut BlockSets, - bb: mir::BasicBlock, + bb: mir::Block, statements_len: usize) { drop_flag_effects_for_location( @@ -409,18 +384,6 @@ impl<'a, 'tcx> BitDenotation for DefinitelyInitializedLvals<'a, 'tcx> { |path, s| Self::update_bits(sets, path, s) ) } - - fn propagate_call_return(&self, - in_out: &mut IdxSet, - _call_bb: mir::BasicBlock, - _dest_bb: mir::BasicBlock, - dest_lval: &mir::Lvalue) { - // when a call returns successfully, that means we need to set - // the bits for that dest_lval to 1 (initialized). - on_lookup_result_bits(self.tcx, self.mir, self.move_data(), - self.move_data().rev_lookup.find(dest_lval), - |mpi| { in_out.add(&mpi); }); - } } impl<'a, 'tcx> BitDenotation for MovingOutStatements<'a, 'tcx> { @@ -436,7 +399,7 @@ impl<'a, 'tcx> BitDenotation for MovingOutStatements<'a, 'tcx> { } fn statement_effect(&self, sets: &mut BlockSets, - bb: mir::BasicBlock, + bb: mir::Block, idx: usize) { let (tcx, mir, move_data) = (self.tcx, self.mir, self.move_data()); let stmt = &mir[bb].statements[idx]; @@ -458,6 +421,7 @@ impl<'a, 'tcx> BitDenotation for MovingOutStatements<'a, 'tcx> { mir::StatementKind::SetDiscriminant { .. } => { span_bug!(stmt.source_info.span, "SetDiscriminant should not exist in borrowck"); } + mir::StatementKind::Call { destination: ref lvalue, .. } | mir::StatementKind::Assign(ref lvalue, _) => { // assigning into this `lvalue` kills all // MoveOuts from it, and *also* all MoveOuts @@ -474,13 +438,14 @@ impl<'a, 'tcx> BitDenotation for MovingOutStatements<'a, 'tcx> { mir::StatementKind::StorageLive(_) | mir::StatementKind::StorageDead(_) | mir::StatementKind::InlineAsm { .. } | + mir::StatementKind::Assert { .. } | mir::StatementKind::Nop => {} } } fn terminator_effect(&self, sets: &mut BlockSets, - bb: mir::BasicBlock, + bb: mir::Block, statements_len: usize) { let (mir, move_data) = (self.mir, self.move_data()); @@ -495,25 +460,6 @@ impl<'a, 'tcx> BitDenotation for MovingOutStatements<'a, 'tcx> { zero_to_one(sets.gen_set.words_mut(), *move_index); } } - - fn propagate_call_return(&self, - in_out: &mut IdxSet, - _call_bb: mir::BasicBlock, - _dest_bb: mir::BasicBlock, - dest_lval: &mir::Lvalue) { - let move_data = self.move_data(); - let bits_per_block = self.bits_per_block(); - - let path_map = &move_data.path_map; - on_lookup_result_bits(self.tcx, - self.mir, - move_data, - move_data.rev_lookup.find(dest_lval), - |mpi| for moi in &path_map[mpi] { - assert!(moi.index() < bits_per_block); - in_out.remove(&moi); - }); - } } fn zero_to_one(bitvec: &mut [usize], move_index: MoveOutIndex) { diff --git a/src/librustc_borrowck/borrowck/mir/dataflow/mod.rs b/src/librustc_borrowck/borrowck/mir/dataflow/mod.rs index 8b246105f6169..9a9997e50bd38 100644 --- a/src/librustc_borrowck/borrowck/mir/dataflow/mod.rs +++ b/src/librustc_borrowck/borrowck/mir/dataflow/mod.rs @@ -82,7 +82,7 @@ impl<'a, 'tcx: 'a, BD> DataflowAnalysis<'a, 'tcx, BD> } for (bb, data) in self.mir.basic_blocks().iter_enumerated() { - let &mir::BasicBlockData { ref statements, ref terminator, is_cleanup: _ } = data; + let &mir::BlockData { ref statements, ref terminator, is_cleanup: _ } = data; let sets = &mut self.flow_state.sets.for_block(bb.index()); for j_stmt in 0..statements.len() { @@ -119,7 +119,7 @@ impl<'b, 'a: 'b, 'tcx: 'a, BD> PropagationContext<'b, 'a, 'tcx, BD> in_out.subtract(sets.kill_set); } builder.propagate_bits_into_graph_successors_of( - in_out, &mut self.changed, (mir::BasicBlock::new(bb_idx), bb_data)); + in_out, &mut self.changed, (mir::Block::new(bb_idx), bb_data)); } } } @@ -328,7 +328,7 @@ pub trait BitDenotation { /// the MIR. fn statement_effect(&self, sets: &mut BlockSets, - bb: mir::BasicBlock, + bb: mir::Block, idx_stmt: usize); /// Mutates the block-sets (the flow sets for the given @@ -343,33 +343,8 @@ pub trait BitDenotation { /// terminator took. fn terminator_effect(&self, sets: &mut BlockSets, - bb: mir::BasicBlock, + bb: mir::Block, idx_term: usize); - - /// Mutates the block-sets according to the (flow-dependent) - /// effect of a successful return from a Call terminator. - /// - /// If basic-block BB_x ends with a call-instruction that, upon - /// successful return, flows to BB_y, then this method will be - /// called on the exit flow-state of BB_x in order to set up the - /// entry flow-state of BB_y. - /// - /// This is used, in particular, as a special case during the - /// "propagate" loop where all of the basic blocks are repeatedly - /// visited. Since the effects of a Call terminator are - /// flow-dependent, the current MIR cannot encode them via just - /// GEN and KILL sets attached to the block, and so instead we add - /// this extra machinery to represent the flow-dependent effect. - /// - /// FIXME: Right now this is a bit of a wart in the API. It might - /// be better to represent this as an additional gen- and - /// kill-sets associated with each edge coming out of the basic - /// block. - fn propagate_call_return(&self, - in_out: &mut IdxSet, - call_bb: mir::BasicBlock, - dest_bb: mir::BasicBlock, - dest_lval: &mir::Lvalue); } impl<'a, 'tcx: 'a, D> DataflowAnalysis<'a, 'tcx, D> @@ -432,21 +407,19 @@ impl<'a, 'tcx: 'a, D> DataflowAnalysis<'a, 'tcx, D> &mut self, in_out: &mut IdxSet, changed: &mut bool, - (bb, bb_data): (mir::BasicBlock, &mir::BasicBlockData)) + (_, bb_data): (mir::Block, &mir::BlockData)) { match bb_data.terminator().kind { mir::TerminatorKind::Return | mir::TerminatorKind::Resume | mir::TerminatorKind::Unreachable => {} mir::TerminatorKind::Goto { ref target } | - mir::TerminatorKind::Assert { ref target, cleanup: None, .. } | mir::TerminatorKind::Drop { ref target, location: _, unwind: None } | mir::TerminatorKind::DropAndReplace { ref target, value: _, location: _, unwind: None } => { self.propagate_bits_into_entry_set_for(in_out, changed, target); } - mir::TerminatorKind::Assert { ref target, cleanup: Some(ref unwind), .. } | mir::TerminatorKind::Drop { ref target, location: _, unwind: Some(ref unwind) } | mir::TerminatorKind::DropAndReplace { ref target, value: _, location: _, unwind: Some(ref unwind) @@ -459,17 +432,40 @@ impl<'a, 'tcx: 'a, D> DataflowAnalysis<'a, 'tcx, D> self.propagate_bits_into_entry_set_for(in_out, changed, target); } } - mir::TerminatorKind::Call { ref cleanup, ref destination, func: _, args: _ } => { - if let Some(ref unwind) = *cleanup { - self.propagate_bits_into_entry_set_for(in_out, changed, unwind); - } - if let Some((ref dest_lval, ref dest_bb)) = *destination { - // N.B.: This must be done *last*, after all other - // propagation, as documented in comment above. - self.flow_state.operator.propagate_call_return( - in_out, bb, *dest_bb, dest_lval); - self.propagate_bits_into_entry_set_for(in_out, changed, dest_bb); + } + + for stmt in &bb_data.statements { + match stmt.kind { + mir::StatementKind::Assert { ref cleanup, .. } => { + if let Some(ref unwind) = *cleanup { + self.propagate_bits_into_entry_set_for(in_out, changed, unwind); + } } + _ => {} + } + } + + // FIXME(simulacrum): Handle calls in EBBs + let count = bb_data.statements.iter().filter(|s| match s.kind { + mir::StatementKind::Call { .. } => true, + _ => false, + }).count(); + if let Some(&mir::StatementKind::Call { .. }) = bb_data.statements.last().map(|s| &s.kind) { + assert_eq!(count, 1); + } else { + assert_eq!(count, 0); + } + if let + Some(&mir::StatementKind::Call { destination: _, ref cleanup, func: _, args: _ }) + = bb_data.statements.last().map(|s| &s.kind) { + if let Some(ref unwind) = *cleanup { + self.propagate_bits_into_entry_set_for(in_out, changed, unwind); + } + if let mir::TerminatorKind::Goto { .. } = bb_data.terminator().kind { + // FIXME(simulacrum): This NB is potentially needless. + // N.B.: This must be done *last*, after all other + // propagation, as documented in comment above. + //self.flow_state.operator.propagate_call_return(in_out, bb, destination); } } } @@ -477,7 +473,7 @@ impl<'a, 'tcx: 'a, D> DataflowAnalysis<'a, 'tcx, D> fn propagate_bits_into_entry_set_for(&mut self, in_out: &IdxSet, changed: &mut bool, - bb: &mir::BasicBlock) { + bb: &mir::Block) { let entry_set = self.flow_state.sets.for_block(bb.index()).on_entry; let set_changed = bitwise(entry_set.words_mut(), in_out.words(), diff --git a/src/librustc_borrowck/borrowck/mir/dataflow/sanity_check.rs b/src/librustc_borrowck/borrowck/mir/dataflow/sanity_check.rs index 44e3b38ea3857..cfff84ebec79a 100644 --- a/src/librustc_borrowck/borrowck/mir/dataflow/sanity_check.rs +++ b/src/librustc_borrowck/borrowck/mir/dataflow/sanity_check.rs @@ -10,7 +10,6 @@ use syntax::abi::{Abi}; use syntax::ast; -use syntax_pos::Span; use rustc::ty::{self, TyCtxt}; use rustc::mir::{self, Mir}; @@ -57,16 +56,28 @@ pub fn sanity_check_via_rustc_peek<'a, 'tcx, O>(tcx: TyCtxt<'a, 'tcx, 'tcx>, fn each_block<'a, 'tcx, O>(tcx: TyCtxt<'a, 'tcx, 'tcx>, mir: &Mir<'tcx>, results: &DataflowResults, - bb: mir::BasicBlock) where + bb: mir::Block) where O: BitDenotation + HasMoveData<'tcx> { let move_data = results.0.operator.move_data(); - let mir::BasicBlockData { ref statements, ref terminator, is_cleanup: _ } = mir[bb]; - - let (args, span) = match is_rustc_peek(tcx, terminator) { - Some(args_and_span) => args_and_span, - None => return, - }; + let mir::BlockData { ref statements, terminator: _, is_cleanup: _ } = mir[bb]; + + let mut args: Option<&[mir::Operand]> = None; + let mut span = None; + // FIXME: Don't loop through statements twice, here, and below + for stmt in statements { + // FIXME: Can there potentially be multiple Call rustc_peek statements? If so, + // do we need to handle all of them? That is possible, of course, but will require slight + // refactoring.. + if let Some(a) = is_rustc_peek(tcx, stmt) { + args = Some(a); + span = Some(stmt.source_info.span); + } else { + return; + } + } + let args = args.unwrap(); + let span = span.unwrap(); assert!(args.len() == 1); let peek_arg_lval = match args[0] { mir::Operand::Consume(ref lval @ mir::Lvalue::Local(_)) => Some(lval), @@ -105,6 +116,8 @@ fn each_block<'a, 'tcx, O>(tcx: TyCtxt<'a, 'tcx, 'tcx>, mir::StatementKind::StorageLive(_) | mir::StatementKind::StorageDead(_) | mir::StatementKind::InlineAsm { .. } | + mir::StatementKind::Assert { .. } | + mir::StatementKind::Call { .. } | mir::StatementKind::Nop => continue, mir::StatementKind::SetDiscriminant{ .. } => span_bug!(stmt.source_info.span, @@ -157,21 +170,16 @@ fn each_block<'a, 'tcx, O>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } fn is_rustc_peek<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - terminator: &'a Option>) - -> Option<(&'a [mir::Operand<'tcx>], Span)> { - if let Some(mir::Terminator { ref kind, source_info, .. }) = *terminator { - if let mir::TerminatorKind::Call { func: ref oper, ref args, .. } = *kind - { - if let mir::Operand::Constant(ref func) = *oper - { - if let ty::TyFnDef(def_id, _, sig) = func.ty.sty - { - let abi = sig.abi(); - let name = tcx.item_name(def_id); - if abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic { - if name == "rustc_peek" { - return Some((args, source_info.span)); - } + statement: &'a mir::Statement<'tcx>) + -> Option<&'a [mir::Operand<'tcx>]> { + if let mir::StatementKind::Call { func: ref oper, ref args, .. } = statement.kind { + if let mir::Operand::Constant(ref func) = *oper { + if let ty::TyFnDef(def_id, _, sig) = func.ty.sty { + let abi = sig.abi(); + let name = tcx.item_name(def_id); + if abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic { + if name == "rustc_peek" { + return Some(args); } } } diff --git a/src/librustc_borrowck/borrowck/mir/elaborate_drops.rs b/src/librustc_borrowck/borrowck/mir/elaborate_drops.rs index 88ec86cc95d61..ce18f397ac6fd 100644 --- a/src/librustc_borrowck/borrowck/mir/elaborate_drops.rs +++ b/src/librustc_borrowck/borrowck/mir/elaborate_drops.rs @@ -399,8 +399,8 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { loc: Location, location: &Lvalue<'tcx>, value: &Operand<'tcx>, - target: BasicBlock, - unwind: Option) + target: Block, + unwind: Option) { let bb = loc.block; let data = &self.mir[bb]; @@ -412,7 +412,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { }; let unwind = unwind.unwrap_or(self.patch.resume_block()); - let unwind = self.patch.new_block(BasicBlockData { + let unwind = self.patch.new_block(BlockData { statements: vec![assign.clone()], terminator: Some(Terminator { kind: TerminatorKind::Goto { target: unwind }, @@ -421,7 +421,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { is_cleanup: true }); - let target = self.patch.new_block(BasicBlockData { + let target = self.patch.new_block(BlockData { statements: vec![assign], terminator: Some(Terminator { kind: TerminatorKind::Goto { target: target }, @@ -494,13 +494,17 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { fn drop_flags_for_fn_rets(&mut self) { for (bb, data) in self.mir.basic_blocks().iter_enumerated() { - if let TerminatorKind::Call { - destination: Some((ref lv, tgt)), cleanup: Some(_), .. - } = data.terminator().kind { + // FIXME(simulacrum): Handle calls in EBBs + if let ( + Some(&Statement { + kind: StatementKind::Call { ref destination, cleanup: Some(_), .. }, .. + }), + &TerminatorKind::Goto { target } + ) = (data.statements.last(), &data.terminator().kind) { assert!(!self.patch.is_patched(bb)); - let loc = Location { block: tgt, statement_index: 0 }; - let path = self.move_data().rev_lookup.find(lv); + let loc = Location { block: target, statement_index: 0 }; + let path = self.move_data().rev_lookup.find(destination); on_lookup_result_bits( self.tcx, self.mir, self.move_data(), path, |child| self.set_drop_flag(loc, child, DropFlagState::Present) @@ -546,10 +550,13 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { allow_initializations = false; } _ => { - assert!(!self.patch.is_patched(bb)); + assert!(!self.patch.is_patched(bb), "kind: {:?}, source: {:?}, {:?}; bbdata: {:?}", + data.terminator().kind, data.terminator().source_info, + bb, data); } } } + let loc = Location { block: bb, statement_index: i }; super::drop_flag_effects_for_location( self.tcx, self.mir, self.env, loc, |path, ds| { @@ -557,15 +564,19 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { self.set_drop_flag(loc, path, ds) } } - ) + ); } // There may be a critical edge after this call, // so mark the return as initialized *before* the // call. - if let TerminatorKind::Call { - destination: Some((ref lv, _)), cleanup: None, .. - } = data.terminator().kind { + // FIXME(simulacrum): Handle calls in EBBs + if let ( + Some(&Statement { + kind: StatementKind::Call { destination: ref lv, cleanup: None, .. }, .. + }), + &TerminatorKind::Goto { .. } + ) = (data.statements.last(), &data.terminator().kind) { assert!(!self.patch.is_patched(bb)); let loc = Location { block: bb, statement_index: data.statements.len() }; diff --git a/src/librustc_borrowck/borrowck/mir/gather_moves.rs b/src/librustc_borrowck/borrowck/mir/gather_moves.rs index 81037fe40d9da..1b2704c7b80b7 100644 --- a/src/librustc_borrowck/borrowck/mir/gather_moves.rs +++ b/src/librustc_borrowck/borrowck/mir/gather_moves.rs @@ -126,9 +126,9 @@ pub trait HasMoveData<'tcx> { #[derive(Debug)] pub struct LocationMap { - /// Location-indexed (BasicBlock for outer index, index within BB + /// Location-indexed (Block for outer index, index within BB /// for inner index) map. - map: IndexVec>, + map: IndexVec>, } impl Index for LocationMap { @@ -412,7 +412,15 @@ impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> { span_bug!(stmt.source_info.span, "SetDiscriminant should not exist during borrowck"); } + StatementKind::Call { ref func, ref args, ref destination, .. } => { + self.gather_operand(loc, func); + for arg in args { + self.gather_operand(loc, arg); + } + self.create_move_path(destination); + } StatementKind::InlineAsm { .. } | + StatementKind::Assert { .. } | StatementKind::Nop => {} } } @@ -464,7 +472,6 @@ impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> { self.gather_move(loc, &Lvalue::Local(RETURN_POINTER)); } - TerminatorKind::Assert { .. } | TerminatorKind::SwitchInt { .. } => { // branching terminators - these don't move anything } @@ -476,15 +483,6 @@ impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> { self.create_move_path(location); self.gather_operand(loc, value); } - TerminatorKind::Call { ref func, ref args, ref destination, cleanup: _ } => { - self.gather_operand(loc, func); - for arg in args { - self.gather_operand(loc, arg); - } - if let Some((ref destination, _bb)) = *destination { - self.create_move_path(destination); - } - } } } diff --git a/src/librustc_borrowck/borrowck/mir/mod.rs b/src/librustc_borrowck/borrowck/mir/mod.rs index 9237bb31f6bd7..709a485e27709 100644 --- a/src/librustc_borrowck/borrowck/mir/mod.rs +++ b/src/librustc_borrowck/borrowck/mir/mod.rs @@ -13,7 +13,7 @@ use borrowck::BorrowckCtxt; use syntax::ast::{self, MetaItem}; use syntax_pos::DUMMY_SP; -use rustc::mir::{self, BasicBlock, BasicBlockData, Mir, Statement, Terminator, Location}; +use rustc::mir::{self, Block, BlockData, Mir, Statement, Terminator, Location}; use rustc::session::Session; use rustc::ty::{self, TyCtxt}; use rustc_mir::util::elaborate_drops::DropFlagState; @@ -164,8 +164,8 @@ pub struct MirBorrowckCtxt<'b, 'a: 'b, 'tcx: 'a> { } impl<'b, 'a: 'b, 'tcx: 'a> MirBorrowckCtxt<'b, 'a, 'tcx> { - fn process_basic_block(&mut self, bb: BasicBlock) { - let BasicBlockData { ref statements, ref terminator, is_cleanup: _ } = + fn process_basic_block(&mut self, bb: Block) { + let BlockData { ref statements, ref terminator, is_cleanup: _ } = self.mir[bb]; for stmt in statements { self.process_statement(bb, stmt); @@ -174,11 +174,11 @@ impl<'b, 'a: 'b, 'tcx: 'a> MirBorrowckCtxt<'b, 'a, 'tcx> { self.process_terminator(bb, terminator); } - fn process_statement(&mut self, bb: BasicBlock, stmt: &Statement<'tcx>) { + fn process_statement(&mut self, bb: Block, stmt: &Statement<'tcx>) { debug!("MirBorrowckCtxt::process_statement({:?}, {:?}", bb, stmt); } - fn process_terminator(&mut self, bb: BasicBlock, term: &Option>) { + fn process_terminator(&mut self, bb: Block, term: &Option>) { debug!("MirBorrowckCtxt::process_terminator({:?}, {:?})", bb, term); } } @@ -355,6 +355,7 @@ fn drop_flag_effects_for_location<'a, 'tcx, F>( mir::StatementKind::SetDiscriminant{ .. } => { span_bug!(stmt.source_info.span, "SetDiscrimant should not exist during borrowck"); } + mir::StatementKind::Call { destination: ref lvalue, .. } | mir::StatementKind::Assign(ref lvalue, _) => { debug!("drop_flag_effects: assignment {:?}", stmt); on_lookup_result_bits(tcx, mir, move_data, @@ -364,6 +365,7 @@ fn drop_flag_effects_for_location<'a, 'tcx, F>( mir::StatementKind::StorageLive(_) | mir::StatementKind::StorageDead(_) | mir::StatementKind::InlineAsm { .. } | + mir::StatementKind::Assert { .. } | mir::StatementKind::Nop => {} }, None => { diff --git a/src/librustc_mir/build/block.rs b/src/librustc_mir/build/block.rs index 3305cfc0dfe1a..e3caa89de3c10 100644 --- a/src/librustc_mir/build/block.rs +++ b/src/librustc_mir/build/block.rs @@ -9,17 +9,19 @@ // except according to those terms. use build::{BlockAnd, BlockAndExtension, Builder}; +use hair; use hair::*; use rustc::mir::*; +use rustc::mir::Block; use rustc::hir; impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { pub fn ast_block(&mut self, destination: &Lvalue<'tcx>, - mut block: BasicBlock, + mut block: Block, ast_block: &'tcx hir::Block) -> BlockAnd<()> { - let Block { extent, span, stmts, expr } = self.hir.mirror(ast_block); + let hair::Block { extent, span, stmts, expr } = self.hir.mirror(ast_block); self.in_scope(extent, block, move |this| { // This convoluted structure is to avoid using recursion as we walk down a list // of statements. Basically, the structure we get back is something like: diff --git a/src/librustc_mir/build/cfg.rs b/src/librustc_mir/build/cfg.rs index 71e97e4bfe0d3..6d6c5c40f9a09 100644 --- a/src/librustc_mir/build/cfg.rs +++ b/src/librustc_mir/build/cfg.rs @@ -17,31 +17,31 @@ use build::CFG; use rustc::mir::*; impl<'tcx> CFG<'tcx> { - pub fn block_data(&self, blk: BasicBlock) -> &BasicBlockData<'tcx> { + pub fn block_data(&self, blk: Block) -> &BlockData<'tcx> { &self.basic_blocks[blk] } - pub fn block_data_mut(&mut self, blk: BasicBlock) -> &mut BasicBlockData<'tcx> { + pub fn block_data_mut(&mut self, blk: Block) -> &mut BlockData<'tcx> { &mut self.basic_blocks[blk] } - pub fn start_new_block(&mut self) -> BasicBlock { - self.basic_blocks.push(BasicBlockData::new(None)) + pub fn start_new_block(&mut self) -> Block { + self.basic_blocks.push(BlockData::new(None)) } - pub fn start_new_cleanup_block(&mut self) -> BasicBlock { + pub fn start_new_cleanup_block(&mut self) -> Block { let bb = self.start_new_block(); self.block_data_mut(bb).is_cleanup = true; bb } - pub fn push(&mut self, block: BasicBlock, statement: Statement<'tcx>) { + pub fn push(&mut self, block: Block, statement: Statement<'tcx>) { debug!("push({:?}, {:?})", block, statement); self.block_data_mut(block).statements.push(statement); } pub fn push_assign(&mut self, - block: BasicBlock, + block: Block, source_info: SourceInfo, lvalue: &Lvalue<'tcx>, rvalue: Rvalue<'tcx>) { @@ -52,7 +52,7 @@ impl<'tcx> CFG<'tcx> { } pub fn push_assign_constant(&mut self, - block: BasicBlock, + block: Block, source_info: SourceInfo, temp: &Lvalue<'tcx>, constant: Constant<'tcx>) { @@ -61,7 +61,7 @@ impl<'tcx> CFG<'tcx> { } pub fn push_assign_unit(&mut self, - block: BasicBlock, + block: Block, source_info: SourceInfo, lvalue: &Lvalue<'tcx>) { self.push_assign(block, source_info, lvalue, Rvalue::Aggregate( @@ -70,7 +70,7 @@ impl<'tcx> CFG<'tcx> { } pub fn terminate(&mut self, - block: BasicBlock, + block: Block, source_info: SourceInfo, kind: TerminatorKind<'tcx>) { debug!("terminating block {:?} <- {:?}", block, kind); diff --git a/src/librustc_mir/build/expr/as_lvalue.rs b/src/librustc_mir/build/expr/as_lvalue.rs index 1cd9a1b25bade..4931f9f099d65 100644 --- a/src/librustc_mir/build/expr/as_lvalue.rs +++ b/src/librustc_mir/build/expr/as_lvalue.rs @@ -14,13 +14,14 @@ use build::{BlockAnd, BlockAndExtension, Builder}; use build::expr::category::Category; use hair::*; use rustc::mir::*; +use rustc::mir::Block; use rustc_data_structures::indexed_vec::Idx; impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// Compile `expr`, yielding an lvalue that we can move from etc. pub fn as_lvalue(&mut self, - block: BasicBlock, + block: Block, expr: M) -> BlockAnd> where M: Mirror<'tcx, Output=Expr<'tcx>> @@ -30,7 +31,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } fn expr_as_lvalue(&mut self, - mut block: BasicBlock, + mut block: Block, expr: Expr<'tcx>) -> BlockAnd> { debug!("expr_as_lvalue(block={:?}, expr={:?})", block, expr); @@ -74,9 +75,17 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { len: Operand::Consume(len), index: idx.clone() }; - let success = this.assert(block, Operand::Consume(lt), true, - msg, expr_span); - success.and(slice.index(idx)) + let stmt = Statement { + source_info: source_info, + kind: StatementKind::Assert { + cond: Operand::Consume(lt), + expected: true, + msg: msg, + cleanup: this.diverge_cleanup(), + } + }; + this.cfg.push(block, stmt); + block.and(slice.index(idx)) } ExprKind::SelfRef => { block.and(Lvalue::Local(Local::new(1))) diff --git a/src/librustc_mir/build/expr/as_operand.rs b/src/librustc_mir/build/expr/as_operand.rs index 8d79e755685d0..271617bc12d6d 100644 --- a/src/librustc_mir/build/expr/as_operand.rs +++ b/src/librustc_mir/build/expr/as_operand.rs @@ -15,6 +15,7 @@ use build::expr::category::Category; use hair::*; use rustc::middle::region::CodeExtent; use rustc::mir::*; +use rustc::mir::Block; impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// Returns an operand suitable for use until the end of the current @@ -23,7 +24,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// The operand returned from this function will *not be valid* after /// an ExprKind::Scope is passed, so please do *not* return it from /// functions to avoid bad miscompiles. - pub fn as_local_operand(&mut self, block: BasicBlock, expr: M) + pub fn as_local_operand(&mut self, block: Block, expr: M) -> BlockAnd> where M: Mirror<'tcx, Output = Expr<'tcx>> { @@ -38,7 +39,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// /// The operand is known to be live until the end of `scope`. pub fn as_operand(&mut self, - block: BasicBlock, + block: Block, scope: Option, expr: M) -> BlockAnd> where M: Mirror<'tcx, Output = Expr<'tcx>> @@ -48,7 +49,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } fn expr_as_operand(&mut self, - mut block: BasicBlock, + mut block: Block, scope: Option, expr: Expr<'tcx>) -> BlockAnd> { diff --git a/src/librustc_mir/build/expr/as_rvalue.rs b/src/librustc_mir/build/expr/as_rvalue.rs index 6694107a8d485..fd5e5ede31a70 100644 --- a/src/librustc_mir/build/expr/as_rvalue.rs +++ b/src/librustc_mir/build/expr/as_rvalue.rs @@ -24,12 +24,13 @@ use rustc::middle::const_val::ConstVal; use rustc::middle::region::CodeExtent; use rustc::ty; use rustc::mir::*; +use rustc::mir::Block; use syntax::ast; use syntax_pos::Span; impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// See comment on `as_local_operand` - pub fn as_local_rvalue(&mut self, block: BasicBlock, expr: M) + pub fn as_local_rvalue(&mut self, block: Block, expr: M) -> BlockAnd> where M: Mirror<'tcx, Output = Expr<'tcx>> { @@ -38,7 +39,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } /// Compile `expr`, yielding an rvalue. - pub fn as_rvalue(&mut self, block: BasicBlock, scope: Option, expr: M) + pub fn as_rvalue(&mut self, block: Block, scope: Option, expr: M) -> BlockAnd> where M: Mirror<'tcx, Output = Expr<'tcx>> { @@ -47,7 +48,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } fn expr_as_rvalue(&mut self, - mut block: BasicBlock, + mut block: Block, scope: Option, expr: Expr<'tcx>) -> BlockAnd> { @@ -88,8 +89,16 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { Rvalue::BinaryOp(BinOp::Eq, arg.clone(), minval)); let err = ConstMathErr::Overflow(Op::Neg); - block = this.assert(block, Operand::Consume(is_min), false, - AssertMessage::Math(err), expr_span); + let stmt = Statement { + source_info: source_info, + kind: StatementKind::Assert { + cond: Operand::Consume(is_min), + expected: false, + msg: AssertMessage::Math(err), + cleanup: this.diverge_cleanup(), + }, + }; + this.cfg.push(block, stmt); } block.and(Rvalue::UnaryOp(op, arg)) } @@ -253,7 +262,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } } - pub fn build_binary_op(&mut self, mut block: BasicBlock, + pub fn build_binary_op(&mut self, block: Block, op: BinOp, span: Span, ty: ty::Ty<'tcx>, lhs: Operand<'tcx>, rhs: Operand<'tcx>) -> BlockAnd> { let source_info = self.source_info(span); @@ -283,9 +292,16 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } }); - block = self.assert(block, Operand::Consume(of), false, - AssertMessage::Math(err), span); - + let stmt = Statement { + source_info: source_info, + kind: StatementKind::Assert { + cond: Operand::Consume(of), + expected: false, + msg: AssertMessage::Math(err), + cleanup: self.diverge_cleanup(), + }, + }; + self.cfg.push(block, stmt); block.and(Rvalue::Use(Operand::Consume(val))) } else { if ty.is_integral() && (op == BinOp::Div || op == BinOp::Rem) { @@ -306,8 +322,16 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { self.cfg.push_assign(block, source_info, &is_zero, Rvalue::BinaryOp(BinOp::Eq, rhs.clone(), zero)); - block = self.assert(block, Operand::Consume(is_zero), false, - AssertMessage::Math(zero_err), span); + let stmt = Statement { + source_info: source_info, + kind: StatementKind::Assert { + cond: Operand::Consume(is_zero), + expected: false, + msg: AssertMessage::Math(zero_err), + cleanup: self.diverge_cleanup(), + }, + }; + self.cfg.push(block, stmt); // We only need to check for the overflow in one case: // MIN / -1, and only for signed values. @@ -331,8 +355,16 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { self.cfg.push_assign(block, source_info, &of, Rvalue::BinaryOp(BinOp::BitAnd, is_neg_1, is_min)); - block = self.assert(block, Operand::Consume(of), false, - AssertMessage::Math(overflow_err), span); + let stmt = Statement { + source_info: source_info, + kind: StatementKind::Assert { + cond: Operand::Consume(of), + expected: false, + msg: AssertMessage::Math(overflow_err), + cleanup: self.diverge_cleanup(), + }, + }; + self.cfg.push(block, stmt); } } diff --git a/src/librustc_mir/build/expr/as_temp.rs b/src/librustc_mir/build/expr/as_temp.rs index 42d9ab4d2bf27..132c5a3453112 100644 --- a/src/librustc_mir/build/expr/as_temp.rs +++ b/src/librustc_mir/build/expr/as_temp.rs @@ -15,12 +15,13 @@ use build::expr::category::Category; use hair::*; use rustc::middle::region::CodeExtent; use rustc::mir::*; +use rustc::mir::Block; impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// Compile `expr` into a fresh temporary. This is used when building /// up rvalues so as to freeze the value that will be consumed. pub fn as_temp(&mut self, - block: BasicBlock, + block: Block, temp_lifetime: Option, expr: M) -> BlockAnd> @@ -31,7 +32,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } fn expr_as_temp(&mut self, - mut block: BasicBlock, + mut block: Block, temp_lifetime: Option, expr: Expr<'tcx>) -> BlockAnd> { diff --git a/src/librustc_mir/build/expr/into.rs b/src/librustc_mir/build/expr/into.rs index e1b0c6a6f042e..4ba9a61129a46 100644 --- a/src/librustc_mir/build/expr/into.rs +++ b/src/librustc_mir/build/expr/into.rs @@ -15,13 +15,14 @@ use build::expr::category::{Category, RvalueFunc}; use hair::*; use rustc::ty; use rustc::mir::*; +use rustc::mir::Block; impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// Compile `expr`, storing the result into `destination`, which /// is assumed to be uninitialized. pub fn into_expr(&mut self, destination: &Lvalue<'tcx>, - mut block: BasicBlock, + mut block: Block, expr: Expr<'tcx>) -> BlockAnd<()> { @@ -211,14 +212,14 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { exit_block.unit() } ExprKind::Call { ty, fun, args } => { + let fun = unpack!(block = this.as_local_operand(block, fun)); let diverges = match ty.sty { ty::TyFnDef(_, _, ref f) | ty::TyFnPtr(ref f) => { // FIXME(canndrew): This is_never should probably be an is_uninhabited f.output().skip_binder().is_never() } - _ => false + _ => false, }; - let fun = unpack!(block = this.as_local_operand(block, fun)); let args: Vec<_> = args.into_iter() .map(|arg| unpack!(block = this.as_local_operand(block, arg))) @@ -226,16 +227,22 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let success = this.cfg.start_new_block(); let cleanup = this.diverge_cleanup(); - this.cfg.terminate(block, source_info, TerminatorKind::Call { - func: fun, - args: args, - cleanup: cleanup, - destination: if diverges { - None - } else { - Some ((destination.clone(), success)) + this.cfg.push(block, Statement { + source_info: source_info, + kind: StatementKind::Call { + func: fun, + args: args, + cleanup: cleanup, + destination: destination.clone(), } }); + if diverges { + this.cfg.terminate(block, source_info, TerminatorKind::Unreachable); + } else { + this.cfg.terminate(block, source_info, TerminatorKind::Goto { + target: success + }); + } success.unit() } diff --git a/src/librustc_mir/build/expr/stmt.rs b/src/librustc_mir/build/expr/stmt.rs index 7336da654c184..ff46ac462e398 100644 --- a/src/librustc_mir/build/expr/stmt.rs +++ b/src/librustc_mir/build/expr/stmt.rs @@ -12,10 +12,11 @@ use build::{BlockAnd, BlockAndExtension, Builder}; use build::scope::BreakableScope; use hair::*; use rustc::mir::*; +use rustc::mir::Block; impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { - pub fn stmt_expr(&mut self, mut block: BasicBlock, expr: Expr<'tcx>) -> BlockAnd<()> { + pub fn stmt_expr(&mut self, mut block: Block, expr: Expr<'tcx>) -> BlockAnd<()> { let this = self; let expr_span = expr.span; let source_info = this.source_info(expr.span); diff --git a/src/librustc_mir/build/into.rs b/src/librustc_mir/build/into.rs index 5c133780e433b..14c8218caabd7 100644 --- a/src/librustc_mir/build/into.rs +++ b/src/librustc_mir/build/into.rs @@ -17,19 +17,20 @@ use build::{BlockAnd, Builder}; use hair::*; use rustc::mir::*; +use rustc::mir::Block; pub trait EvalInto<'tcx> { fn eval_into<'a, 'gcx>(self, builder: &mut Builder<'a, 'gcx, 'tcx>, destination: &Lvalue<'tcx>, - block: BasicBlock) + block: Block) -> BlockAnd<()>; } impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { pub fn into(&mut self, destination: &Lvalue<'tcx>, - block: BasicBlock, + block: Block, expr: E) -> BlockAnd<()> where E: EvalInto<'tcx> @@ -42,7 +43,7 @@ impl<'tcx> EvalInto<'tcx> for ExprRef<'tcx> { fn eval_into<'a, 'gcx>(self, builder: &mut Builder<'a, 'gcx, 'tcx>, destination: &Lvalue<'tcx>, - block: BasicBlock) + block: Block) -> BlockAnd<()> { let expr = builder.hir.mirror(self); builder.into_expr(destination, block, expr) @@ -53,7 +54,7 @@ impl<'tcx> EvalInto<'tcx> for Expr<'tcx> { fn eval_into<'a, 'gcx>(self, builder: &mut Builder<'a, 'gcx, 'tcx>, destination: &Lvalue<'tcx>, - block: BasicBlock) + block: Block) -> BlockAnd<()> { builder.into_expr(destination, block, self) } diff --git a/src/librustc_mir/build/matches/mod.rs b/src/librustc_mir/build/matches/mod.rs index 705eb1f56608e..95fff7b849401 100644 --- a/src/librustc_mir/build/matches/mod.rs +++ b/src/librustc_mir/build/matches/mod.rs @@ -19,6 +19,7 @@ use rustc_data_structures::bitvec::BitVector; use rustc::middle::const_val::ConstVal; use rustc::ty::{AdtDef, Ty}; use rustc::mir::*; +use rustc::mir::Block; use rustc::hir; use hair::*; use syntax::ast::{Name, NodeId}; @@ -33,7 +34,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { pub fn match_expr(&mut self, destination: &Lvalue<'tcx>, span: Span, - mut block: BasicBlock, + mut block: Block, discriminant: ExprRef<'tcx>, arms: Vec>) -> BlockAnd<()> { @@ -114,7 +115,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } pub fn expr_into_pattern(&mut self, - mut block: BasicBlock, + mut block: Block, irrefutable_pat: Pattern<'tcx>, initializer: ExprRef<'tcx>) -> BlockAnd<()> { @@ -136,7 +137,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } pub fn lvalue_into_pattern(&mut self, - mut block: BasicBlock, + mut block: Block, irrefutable_pat: Pattern<'tcx>, initializer: &Lvalue<'tcx>) -> BlockAnd<()> { @@ -187,7 +188,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { var_scope } - pub fn storage_live_binding(&mut self, block: BasicBlock, var: NodeId, span: Span) + pub fn storage_live_binding(&mut self, block: Block, var: NodeId, span: Span) -> Lvalue<'tcx> { let local_id = self.var_indices[&var]; @@ -241,7 +242,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// List of blocks for each arm (and potentially other metadata in the /// future). struct ArmBlocks { - blocks: Vec, + blocks: Vec, } #[derive(Clone, Debug)] @@ -359,8 +360,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { span: Span, arm_blocks: &mut ArmBlocks, mut candidates: Vec>, - mut block: BasicBlock) - -> Vec + mut block: Block) + -> Vec { debug!("matched_candidate(span={:?}, block={:?}, candidates={:?})", span, block, candidates); @@ -421,8 +422,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { fn join_otherwise_blocks(&mut self, span: Span, - mut otherwise: Vec) - -> BasicBlock + mut otherwise: Vec) + -> Block { let source_info = self.source_info(span); otherwise.sort(); @@ -554,8 +555,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { span: Span, arm_blocks: &mut ArmBlocks, candidates: &[Candidate<'pat, 'tcx>], - block: BasicBlock) - -> (Vec, usize) + block: Block) + -> (Vec, usize) { // extract the match-pair from the highest priority candidate let match_pair = &candidates.first().unwrap().match_pairs[0]; @@ -643,10 +644,10 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// in turn be detected by the borrowck code that runs on the /// MIR). fn bind_and_guard_matched_candidate<'pat>(&mut self, - mut block: BasicBlock, + mut block: Block, arm_blocks: &mut ArmBlocks, candidate: Candidate<'pat, 'tcx>) - -> Option { + -> Option { debug!("bind_and_guard_matched_candidate(block={:?}, candidate={:?})", block, candidate); @@ -675,7 +676,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } fn bind_matched_candidate(&mut self, - block: BasicBlock, + block: Block, bindings: Vec>) { debug!("bind_matched_candidate(block={:?}, bindings={:?})", block, bindings); diff --git a/src/librustc_mir/build/matches/simplify.rs b/src/librustc_mir/build/matches/simplify.rs index efddee2c933f4..7eed3223d5b9d 100644 --- a/src/librustc_mir/build/matches/simplify.rs +++ b/src/librustc_mir/build/matches/simplify.rs @@ -25,14 +25,14 @@ use build::{BlockAnd, BlockAndExtension, Builder}; use build::matches::{Binding, MatchPair, Candidate}; use hair::*; -use rustc::mir::*; +use rustc::mir::Block; use rustc_data_structures::fx::FxHashMap; use std::mem; impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { pub fn simplify_candidate<'pat>(&mut self, - block: BasicBlock, + block: Block, candidate: &mut Candidate<'pat, 'tcx>) -> BlockAnd<()> { // repeatedly simplify match pairs until fixed point is reached diff --git a/src/librustc_mir/build/matches/test.rs b/src/librustc_mir/build/matches/test.rs index f4fdf8ade900a..7ceb3f192e4f7 100644 --- a/src/librustc_mir/build/matches/test.rs +++ b/src/librustc_mir/build/matches/test.rs @@ -24,6 +24,7 @@ use rustc::middle::const_val::ConstVal; use rustc::ty::{self, Ty}; use rustc::ty::util::IntTypeExt; use rustc::mir::*; +use rustc::mir::Block; use rustc::hir::RangeEnd; use syntax_pos::Span; use std::cmp::Ordering; @@ -176,10 +177,10 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// Generates the code to perform a test. pub fn perform_test(&mut self, - block: BasicBlock, + block: Block, lvalue: &Lvalue<'tcx>, test: &Test<'tcx>) - -> Vec { + -> Vec { let source_info = self.source_info(test.span); match test.kind { TestKind::Switch { adt_def, ref variants } => { @@ -305,26 +306,28 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let bool_ty = self.hir.bool_ty(); let eq_result = self.temp(bool_ty); - let eq_block = self.cfg.start_new_block(); let cleanup = self.diverge_cleanup(); - self.cfg.terminate(block, source_info, TerminatorKind::Call { - func: Operand::Constant(Constant { - span: test.span, - ty: mty, - literal: method - }), - args: vec![val, expect], - destination: Some((eq_result.clone(), eq_block)), - cleanup: cleanup, + self.cfg.push(block, Statement { + source_info: source_info, + kind: StatementKind::Call { + func: Operand::Constant(Constant { + span: test.span, + ty: mty, + literal: method + }), + args: vec![val, expect], + destination: eq_result.clone(), + cleanup: cleanup, + }, }); // check the result - let block = self.cfg.start_new_block(); - self.cfg.terminate(eq_block, source_info, + let succ = self.cfg.start_new_block(); + self.cfg.terminate(block, source_info, TerminatorKind::if_(self.hir.tcx(), Operand::Consume(eq_result), - block, fail)); - vec![block, fail] + succ, fail)); + vec![succ, fail] } else { let block = self.compare(block, fail, test.span, BinOp::Eq, expect, val); vec![block, fail] @@ -376,12 +379,12 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } fn compare(&mut self, - block: BasicBlock, - fail_block: BasicBlock, + block: Block, + fail_block: Block, span: Span, op: BinOp, left: Operand<'tcx>, - right: Operand<'tcx>) -> BasicBlock { + right: Operand<'tcx>) -> Block { let bool_ty = self.hir.bool_ty(); let result = self.temp(bool_ty); diff --git a/src/librustc_mir/build/misc.rs b/src/librustc_mir/build/misc.rs index 99aa5cb0fa86e..3dd9b085cb50d 100644 --- a/src/librustc_mir/build/misc.rs +++ b/src/librustc_mir/build/misc.rs @@ -101,7 +101,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } pub fn push_usize(&mut self, - block: BasicBlock, + block: Block, source_info: SourceInfo, value: u64) -> Lvalue<'tcx> { diff --git a/src/librustc_mir/build/mod.rs b/src/librustc_mir/build/mod.rs index db03a1c68f715..c601020ccfad6 100644 --- a/src/librustc_mir/build/mod.rs +++ b/src/librustc_mir/build/mod.rs @@ -52,13 +52,13 @@ pub struct Builder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { /// cached block with the RESUME terminator; this is created /// when first set of cleanups are built. - cached_resume_block: Option, + cached_resume_block: Option, /// cached block with the RETURN terminator - cached_return_block: Option, + cached_return_block: Option, } struct CFG<'tcx> { - basic_blocks: IndexVec>, + basic_blocks: IndexVec>, } #[derive(Copy, Clone, Debug, PartialEq, Eq)] @@ -82,14 +82,14 @@ impl Idx for ScopeId { /// convenient. #[must_use] // if you don't use one of these results, you're leaving a dangling edge -pub struct BlockAnd(BasicBlock, T); +pub struct BlockAnd(Block, T); trait BlockAndExtension { fn and(self, v: T) -> BlockAnd; fn unit(self) -> BlockAnd<()>; } -impl BlockAndExtension for BasicBlock { +impl BlockAndExtension for Block { fn and(self, v: T) -> BlockAnd { BlockAnd(self, v) } @@ -285,7 +285,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } fn args_and_body(&mut self, - mut block: BasicBlock, + mut block: Block, arguments: &[(Ty<'gcx>, Option<&'gcx hir::Pat>)], argument_extent: CodeExtent, ast_body: &'gcx hir::Expr) @@ -348,7 +348,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } } - fn return_block(&mut self) -> BasicBlock { + fn return_block(&mut self) -> Block { match self.cached_return_block { Some(rb) => rb, None => { diff --git a/src/librustc_mir/build/scope.rs b/src/librustc_mir/build/scope.rs index dd4190a412dac..94bb23555600f 100644 --- a/src/librustc_mir/build/scope.rs +++ b/src/librustc_mir/build/scope.rs @@ -136,8 +136,8 @@ pub struct Scope<'tcx> { /// stage. free: Option>, - /// The cache for drop chain on “normal” exit into a particular BasicBlock. - cached_exits: FxHashMap<(BasicBlock, CodeExtent), BasicBlock>, + /// The cache for drop chain on “normal” exit into a particular Block. + cached_exits: FxHashMap<(Block, CodeExtent), Block>, } struct DropData<'tcx> { @@ -157,7 +157,7 @@ enum DropKind { /// contains code to run the current drop and all the preceding /// drops (i.e. those having lower index in Drop’s Scope drop /// array) - cached_block: Option + cached_block: Option }, Storage } @@ -174,7 +174,7 @@ struct FreeData<'tcx> { /// The cached block containing code to run the free. The block will also execute all the drops /// in the scope. - cached_block: Option + cached_block: Option } #[derive(Clone, Debug)] @@ -182,10 +182,10 @@ pub struct BreakableScope<'tcx> { /// Extent of the loop pub extent: CodeExtent, /// Where the body of the loop begins. `None` if block - pub continue_block: Option, + pub continue_block: Option, /// Block to branch into when the loop or block terminates (either by being `break`-en out /// from, or by having its condition to become false) - pub break_block: BasicBlock, + pub break_block: Block, /// The destination of the loop/block expression itself (i.e. where to put the result of a /// `break` expression) pub break_destination: Lvalue<'tcx>, @@ -215,7 +215,7 @@ impl<'tcx> Scope<'tcx> { /// /// Precondition: the caches must be fully filled (i.e. diverge_cleanup is called) in order for /// this method to work correctly. - fn cached_block(&self) -> Option { + fn cached_block(&self) -> Option { let mut drops = self.drops.iter().rev().filter_map(|data| { match data.kind { DropKind::Value { cached_block } => Some(cached_block), @@ -248,10 +248,10 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// /// Returns the might_break attribute of the BreakableScope used. pub fn in_breakable_scope(&mut self, - loop_block: Option, - break_block: BasicBlock, - break_destination: Lvalue<'tcx>, - f: F) -> R + loop_block: Option, + break_block: Block, + break_destination: Lvalue<'tcx>, + f: F) -> R where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> R { let extent = self.topmost_scope(); @@ -270,7 +270,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// Convenience wrapper that pushes a scope and then executes `f` /// to build its contents, popping the scope afterwards. - pub fn in_scope(&mut self, extent: CodeExtent, mut block: BasicBlock, f: F) -> BlockAnd + pub fn in_scope(&mut self, extent: CodeExtent, mut block: Block, f: F) -> BlockAnd where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd { debug!("in_scope(extent={:?}, block={:?})", extent, block); @@ -303,7 +303,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// match 1-to-1 with `push_scope`. pub fn pop_scope(&mut self, extent: CodeExtent, - mut block: BasicBlock) + mut block: Block) -> BlockAnd<()> { debug!("pop_scope({:?}, {:?})", extent, block); // We need to have `cached_block`s available for all the drops, so we call diverge_cleanup @@ -327,8 +327,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { pub fn exit_scope(&mut self, span: Span, extent: CodeExtent, - mut block: BasicBlock, - target: BasicBlock) { + mut block: Block, + target: Block) { debug!("exit_scope(extent={:?}, block={:?}, target={:?})", extent, block, target); let scope_count = 1 + self.scopes.iter().rev().position(|scope| scope.extent == extent) .unwrap_or_else(||{ @@ -358,10 +358,10 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { block, self.arg_count)); if let Some(ref free_data) = scope.free { - let next = self.cfg.start_new_block(); - let free = build_free(self.hir.tcx(), &tmp, free_data, next); - self.cfg.terminate(block, scope.source_info(span), free); - block = next; + self.cfg.push(block, Statement { + source_info: scope.source_info(span), + kind: build_free(self.hir.tcx(), &tmp, free_data) + }); } } } @@ -550,7 +550,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// This path terminates in Resume. Returns the start of the path. /// See module comment for more details. None indicates there’s no /// cleanup to do at this point. - pub fn diverge_cleanup(&mut self) -> Option { + pub fn diverge_cleanup(&mut self) -> Option { if !self.scopes.iter().any(|scope| scope.needs_cleanup) { return None; } @@ -591,7 +591,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// Utility function for *non*-scope code to build their own drops pub fn build_drop(&mut self, - block: BasicBlock, + block: Block, span: Span, location: Lvalue<'tcx>, ty: Ty<'tcx>) -> BlockAnd<()> { @@ -612,7 +612,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// Utility function for *non*-scope code to build their own drops pub fn build_drop_and_replace(&mut self, - block: BasicBlock, + block: Block, span: Span, location: Lvalue<'tcx>, value: Operand<'tcx>) -> BlockAnd<()> { @@ -628,39 +628,13 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { }); next_target.unit() } - - /// Create an Assert terminator and return the success block. - /// If the boolean condition operand is not the expected value, - /// a runtime panic will be caused with the given message. - pub fn assert(&mut self, block: BasicBlock, - cond: Operand<'tcx>, - expected: bool, - msg: AssertMessage<'tcx>, - span: Span) - -> BasicBlock { - let source_info = self.source_info(span); - - let success_block = self.cfg.start_new_block(); - let cleanup = self.diverge_cleanup(); - - self.cfg.terminate(block, source_info, - TerminatorKind::Assert { - cond: cond, - expected: expected, - msg: msg, - target: success_block, - cleanup: cleanup - }); - - success_block - } } /// Builds drops for pop_scope and exit_scope. fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>, scope: &Scope<'tcx>, earlier_scopes: &[Scope<'tcx>], - mut block: BasicBlock, + mut block: Block, arg_count: usize) -> BlockAnd<()> { let mut iter = scope.drops.iter().rev().peekable(); @@ -711,8 +685,8 @@ fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, cfg: &mut CFG<'tcx>, unit_temp: &Lvalue<'tcx>, scope: &mut Scope<'tcx>, - mut target: BasicBlock) - -> BasicBlock + mut target: Block) + -> Block { // Build up the drops in **reverse** order. The end result will // look like: @@ -739,8 +713,12 @@ fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, cached_block } else { let into = cfg.start_new_cleanup_block(); + cfg.push(into, Statement { + source_info: source_info(free_data.span), + kind: build_free(tcx, unit_temp, free_data) + }); cfg.terminate(into, source_info(free_data.span), - build_free(tcx, unit_temp, free_data, target)); + TerminatorKind::Goto { target: target }); free_data.cached_block = Some(into); into }; @@ -776,12 +754,11 @@ fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, fn build_free<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, unit_temp: &Lvalue<'tcx>, - data: &FreeData<'tcx>, - target: BasicBlock) - -> TerminatorKind<'tcx> { + data: &FreeData<'tcx>) + -> StatementKind<'tcx> { let free_func = tcx.require_lang_item(lang_items::BoxFreeFnLangItem); let substs = tcx.intern_substs(&[Kind::from(data.item_ty)]); - TerminatorKind::Call { + StatementKind::Call { func: Operand::Constant(Constant { span: data.span, ty: tcx.item_type(free_func).subst(tcx, substs), @@ -790,7 +767,7 @@ fn build_free<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, } }), args: vec![Operand::Consume(data.value.clone())], - destination: Some((unit_temp.clone(), target)), + destination: unit_temp.clone(), cleanup: None } } diff --git a/src/librustc_mir/callgraph.rs b/src/librustc_mir/callgraph.rs index 69416289d8e26..1c56217c5dfd3 100644 --- a/src/librustc_mir/callgraph.rs +++ b/src/librustc_mir/callgraph.rs @@ -79,11 +79,11 @@ struct CallVisitor<'a> { } impl<'a, 'tcx> Visitor<'tcx> for CallVisitor<'a> { - fn visit_terminator_kind(&mut self, _block: BasicBlock, - kind: &TerminatorKind<'tcx>, _loc: Location) { - if let TerminatorKind::Call { + fn visit_statement(&mut self, _block: Block, + statement: &Statement<'tcx>, _loc: Location) { + if let StatementKind::Call { func: Operand::Constant(ref f) - , .. } = *kind { + , .. } = statement.kind { if let ty::TyFnDef(def_id, _, _) = f.ty.sty { let callee = self.graph.add_node(def_id); self.graph.graph.add_edge(self.caller, callee, ()); diff --git a/src/librustc_mir/shim.rs b/src/librustc_mir/shim.rs index 63d20be88feee..8516983eee446 100644 --- a/src/librustc_mir/shim.rs +++ b/src/librustc_mir/shim.rs @@ -169,10 +169,10 @@ fn build_drop_shim<'a, 'tcx>(tcx: ty::TyCtxt<'a, 'tcx, 'tcx>, let source_info = SourceInfo { span, scope: ARGUMENT_VISIBILITY_SCOPE }; - let return_block = BasicBlock::new(1); + let return_block = Block::new(1); let mut blocks = IndexVec::new(); let block = |blocks: &mut IndexVec<_, _>, kind| { - blocks.push(BasicBlockData { + blocks.push(BlockData { statements: vec![], terminator: Some(Terminator { source_info, kind }), is_cleanup: false @@ -360,31 +360,38 @@ fn build_call_shim<'a, 'tcx>(tcx: ty::TyCtxt<'a, 'tcx, 'tcx>, let mut blocks = IndexVec::new(); let block = |blocks: &mut IndexVec<_, _>, statements, kind, is_cleanup| { - blocks.push(BasicBlockData { + blocks.push(BlockData { statements, terminator: Some(Terminator { source_info, kind }), is_cleanup }) }; + statements.push(Statement { + source_info: source_info, + kind: StatementKind::Call { + func: callee, + args: args, + destination: Lvalue::Local(RETURN_POINTER), + cleanup: if let Adjustment::RefMut = rcvr_adjustment { + Some(Block::new(3)) + } else { + None + } + }, + }); + // BB #0 - block(&mut blocks, statements, TerminatorKind::Call { - func: callee, - args: args, - destination: Some((Lvalue::Local(RETURN_POINTER), - BasicBlock::new(1))), - cleanup: if let Adjustment::RefMut = rcvr_adjustment { - Some(BasicBlock::new(3)) - } else { - None - } + // FIXME: This block shouldn't be necessary. + block(&mut blocks, statements, TerminatorKind::Goto { + target: Block::new(1) }, false); if let Adjustment::RefMut = rcvr_adjustment { // BB #1 - drop for Self block(&mut blocks, vec![], TerminatorKind::Drop { location: Lvalue::Local(rcvr_arg), - target: BasicBlock::new(2), + target: Block::new(2), unwind: None }, false); } @@ -394,7 +401,7 @@ fn build_call_shim<'a, 'tcx>(tcx: ty::TyCtxt<'a, 'tcx, 'tcx>, // BB #3 - drop if closure panics block(&mut blocks, vec![], TerminatorKind::Drop { location: Lvalue::Local(rcvr_arg), - target: BasicBlock::new(4), + target: Block::new(4), unwind: None }, true); @@ -456,7 +463,7 @@ pub fn build_adt_ctor<'a, 'gcx, 'tcx>(infcx: &infer::InferCtxt<'a, 'gcx, 'tcx>, }; // return = ADT(arg0, arg1, ...); return - let start_block = BasicBlockData { + let start_block = BlockData { statements: vec![Statement { source_info: source_info, kind: StatementKind::Assign( diff --git a/src/librustc_mir/transform/add_call_guards.rs b/src/librustc_mir/transform/add_call_guards.rs index 80b17c6a008f5..7504eb005afc2 100644 --- a/src/librustc_mir/transform/add_call_guards.rs +++ b/src/librustc_mir/transform/add_call_guards.rs @@ -11,7 +11,7 @@ use rustc::ty::TyCtxt; use rustc::mir::*; use rustc::mir::transform::{MirPass, MirSource, Pass}; -use rustc_data_structures::indexed_vec::{Idx, IndexVec}; +use rustc_data_structures::indexed_vec::Idx; pub struct AddCallGuards; @@ -42,40 +42,74 @@ impl<'tcx> MirPass<'tcx> for AddCallGuards { } pub fn add_call_guards(mir: &mut Mir) { - let pred_count: IndexVec<_, _> = - mir.predecessors().iter().map(|ps| ps.len()).collect(); - // We need a place to store the new blocks generated let mut new_blocks = Vec::new(); let cur_len = mir.basic_blocks().len(); for block in mir.basic_blocks_mut() { - match block.terminator { - Some(Terminator { - kind: TerminatorKind::Call { - destination: Some((_, ref mut destination)), - cleanup: Some(_), - .. - }, source_info - }) if pred_count[*destination] > 1 => { - // It's a critical edge, break it - let call_guard = BasicBlockData { - statements: vec![], - is_cleanup: block.is_cleanup, - terminator: Some(Terminator { - source_info: source_info, - kind: TerminatorKind::Goto { target: *destination } - }) - }; - - // Get the index it will be when inserted into the MIR - let idx = cur_len + new_blocks.len(); - new_blocks.push(call_guard); - *destination = BasicBlock::new(idx); + // Call statement indices, since the last call. + let mut calls = Vec::new(); + // Iterate in reverse to allow draining from the end of statements, not the middle + for i in (0..block.statements.len()).rev() { + if let StatementKind::Call { .. } = block.statements[i].kind { + calls.push(i); } - _ => {} } + + let first_new_block_idx = cur_len + new_blocks.len(); + let mut new_blocks_iter = Vec::new(); + + debug!("original statements = {:#?}", block.statements); + + let mut is_first = true; + + for &el in calls.iter() { + let after_call = block.statements.split_off(el + 1); + + let next_block_idx = first_new_block_idx + new_blocks_iter.len(); + let terminator = if is_first { + block.terminator.take().expect("invalid terminator state") + } else { + Terminator { + source_info: after_call[0].source_info, + kind: TerminatorKind::Goto { target: Block::new(next_block_idx - 1) } + } + }; + + debug!("cg: statements = {:?}", after_call); + let call_guard = BlockData { + statements: after_call, + is_cleanup: block.is_cleanup, + terminator: Some(terminator) + }; + + new_blocks_iter.push(call_guard); + is_first = false; + } + + debug!("after blocks = {:#?}", new_blocks_iter); + + for bb_data in &new_blocks_iter { + let c = bb_data.statements.iter().filter(|stmt| { + match stmt.kind { + StatementKind::Call { .. } => true, + _ => false, + } + }).count(); + assert!(c <= 1, "{} calls in {:?}", c, bb_data); + } + + if !new_blocks_iter.is_empty() { + block.terminator = Some(Terminator { + source_info: new_blocks_iter[0].terminator().source_info, + kind: TerminatorKind::Goto { + target: Block::new(first_new_block_idx + new_blocks_iter.len() - 1) + } + }); + } + + new_blocks.extend(new_blocks_iter); } debug!("Broke {} N edges", new_blocks.len()); diff --git a/src/librustc_mir/transform/inline.rs b/src/librustc_mir/transform/inline.rs index 80a9c06f11b28..e577182992e18 100644 --- a/src/librustc_mir/transform/inline.rs +++ b/src/librustc_mir/transform/inline.rs @@ -15,6 +15,7 @@ use rustc::hir::def_id::DefId; use rustc_data_structures::bitvec::BitVector; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use rustc_data_structures::graph; +use rustc_data_structures::control_flow_graph::ControlFlowGraph; use rustc::dep_graph::DepNode; use rustc::mir::*; @@ -111,8 +112,8 @@ struct CallSite<'tcx> { caller: DefId, callee: DefId, substs: &'tcx Substs<'tcx>, - bb: BasicBlock, - location: SourceInfo, + source_info: SourceInfo, + location: Location, } impl<'a, 'tcx> Inliner<'a, 'tcx> { @@ -139,17 +140,18 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { if bb_data.is_cleanup { continue; } // Only consider direct calls to functions - let terminator = bb_data.terminator(); - if let TerminatorKind::Call { - func: Operand::Constant(ref f), .. } = terminator.kind { - if let ty::TyFnDef(callee_def_id, substs, _) = f.ty.sty { - callsites.push(CallSite { - caller: def_id, - callee: callee_def_id, - substs: substs, - bb: bb, - location: terminator.source_info - }); + for (i, statement) in bb_data.statements.iter().enumerate().rev() { + if let StatementKind::Call { + func: Operand::Constant(ref f), .. } = statement.kind { + if let ty::TyFnDef(callee_def_id, substs, _) = f.ty.sty { + callsites.push(CallSite { + caller: def_id, + callee: callee_def_id, + substs: substs, + source_info: statement.source_info, + location: Location { block: bb, statement_index: i } + }); + } } } } @@ -217,19 +219,20 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { // Add callsites from inlined function for (bb, bb_data) in caller_mir.basic_blocks().iter_enumerated().skip(start) { // Only consider direct calls to functions - let terminator = bb_data.terminator(); - if let TerminatorKind::Call { - func: Operand::Constant(ref f), .. } = terminator.kind { - if let ty::TyFnDef(callee_def_id, substs, _) = f.ty.sty { - // Don't inline the same function multiple times. - if callsite.callee != callee_def_id { - callsites.push(CallSite { - caller: callsite.caller, - callee: callee_def_id, - substs: substs, - bb: bb, - location: terminator.source_info - }); + for (i, statement) in bb_data.statements.iter().enumerate().rev() { + if let StatementKind::Call { + func: Operand::Constant(ref f), .. } = statement.kind { + if let ty::TyFnDef(callee_def_id, substs, _) = f.ty.sty { + // Don't inline the same function multiple times. + if callsite.callee != callee_def_id { + callsites.push(CallSite { + caller: callsite.caller, + callee: callee_def_id, + substs: substs, + source_info: statement.source_info, + location: Location { block: bb, statement_index: i } + }); + } } } } @@ -342,7 +345,26 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { match stmt.kind { StatementKind::StorageLive(_) | StatementKind::StorageDead(_) | - StatementKind::Nop => {} + StatementKind::Nop => {}, + StatementKind::Assert { .. } => cost += CALL_PENALTY - INSTR_COST, + StatementKind::Call { ref func, .. } => { + if first_block && + func.ty(&callee_mir, tcx).fn_ret().skip_binder().is_never() { + threshold = 0; + } + + if let Operand::Constant(ref f) = *func { + if let ty::TyFnDef(.., f) = f.ty.sty { + // Don't give intrinsics the extra penalty for calls + if f.abi() == Abi::RustIntrinsic || + f.abi() == Abi::PlatformIntrinsic { + cost += INSTR_COST; + } else { + cost += CALL_PENALTY; + } + } + } + } _ => cost += INSTR_COST } } @@ -367,29 +389,17 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { } } - TerminatorKind::Unreachable | - TerminatorKind::Call { destination: None, .. } if first_block => { + TerminatorKind::Unreachable if first_block => { // If the function always diverges, don't inline // unless the cost is zero threshold = 0; } - TerminatorKind::Call {func: Operand::Constant(ref f), .. } => { - if let ty::TyFnDef(.., f) = f.ty.sty { - // Don't give intrinsics the extra penalty for calls - if f.abi() == Abi::RustIntrinsic || f.abi() == Abi::PlatformIntrinsic { - cost += INSTR_COST; - } else { - cost += CALL_PENALTY; - } - } - } - TerminatorKind::Assert { .. } => cost += CALL_PENALTY, _ => cost += INSTR_COST } if !is_drop { - for &succ in &term.successors()[..] { + for succ in ControlFlowGraph::successors(callee_mir, bb) { work_list.push(succ); } } @@ -425,19 +435,30 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { fn inline_call(&self, callsite: CallSite<'tcx>, - caller_mir: &mut Mir<'tcx>, mut callee_mir: Mir<'tcx>) -> bool { + caller_mir: &mut Mir<'tcx>, mut callee_mir: Mir<'tcx>) -> bool { // Don't inline a function into itself if callsite.caller == callsite.callee { return false; } let _task = self.tcx.dep_graph.in_task(DepNode::Mir(callsite.caller)); + match caller_mir[callsite.location.block] + .statements[callsite.location.statement_index].kind { + StatementKind::Call { .. } => {}, + _ => return false, + } - let terminator = caller_mir[callsite.bb].terminator.take().unwrap(); - match terminator.kind { - // FIXME: Handle inlining of diverging calls - TerminatorKind::Call { args, destination: Some(destination), cleanup, .. } => { + let statements_after = caller_mir[callsite.location.block].statements.split_off( + callsite.location.statement_index + 1 + ); + let mut after_block = BlockData::new(caller_mir[callsite.location.block].terminator.take()); + after_block.statements = statements_after; + let new_block_idx = caller_mir.basic_blocks_mut().push(after_block); + let statement = caller_mir[callsite.location.block].statements.pop().unwrap(); + match statement.kind { + // FIXME: Handle inlining of diverging calls + StatementKind::Call { args, destination, cleanup, .. } => { debug!("Inlined {:?} into {:?}", callsite.callee, callsite.caller); let is_box_free = Some(callsite.callee) == self.tcx.lang_items.box_free_fn(); @@ -448,11 +469,11 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { for mut scope in callee_mir.visibility_scopes.iter().cloned() { if scope.parent_scope.is_none() { - scope.parent_scope = Some(callsite.location.scope); + scope.parent_scope = Some(callsite.source_info.scope); scope.span = callee_mir.span; } - scope.span = callsite.location.span; + scope.span = callsite.source_info.span; let idx = caller_mir.visibility_scopes.push(scope); scope_map.push(idx); @@ -463,8 +484,7 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { if let Some(ref mut source_info) = local.source_info { source_info.scope = scope_map[source_info.scope]; - - source_info.span = callsite.location.span; + source_info.span = callsite.source_info.span; } let idx = caller_mir.local_decls.push(local); @@ -497,12 +517,12 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { } } - let dest = if dest_needs_borrow(&destination.0) { + let dest = if dest_needs_borrow(&destination) { debug!("Creating temp for return destination"); let dest = Rvalue::Ref( self.tcx.mk_region(ty::ReErased), BorrowKind::Mut, - destination.0); + destination); let ty = dest.ty(caller_mir, self.tcx); @@ -512,18 +532,15 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { let tmp = Lvalue::Local(tmp); let stmt = Statement { - source_info: callsite.location, - kind: StatementKind::Assign(tmp.clone(), dest) + source_info: callsite.source_info, + kind: StatementKind::Assign(tmp.clone(), dest), }; - caller_mir[callsite.bb] - .statements.push(stmt); - tmp.deref() + caller_mir[callsite.location.block].statements.push(stmt); + tmp } else { - destination.0 + destination }; - let return_block = destination.1; - let args : Vec<_> = if is_box_free { assert!(args.len() == 1); // box_free takes a Box, but is defined with a *mut T, inlining @@ -552,33 +569,24 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { promoted_map: promoted_map, _callsite: callsite, destination: dest, - return_block: return_block, + return_block: new_block_idx, cleanup_block: cleanup, in_cleanup_block: false }; - for (bb, mut block) in callee_mir.basic_blocks_mut().drain_enumerated(..) { integrator.visit_basic_block_data(bb, &mut block); caller_mir.basic_blocks_mut().push(block); } - let terminator = Terminator { - source_info: callsite.location, - kind: TerminatorKind::Goto { target: BasicBlock::new(bb_len) } - }; - - caller_mir[callsite.bb].terminator = Some(terminator); + caller_mir[callsite.location.block].terminator = Some(Terminator { + source_info: callsite.source_info, + kind: TerminatorKind::Goto { target: Block::new(bb_len) } + }); true } - kind => { - caller_mir[callsite.bb].terminator = Some(Terminator { - source_info: terminator.source_info, - kind: kind - }); - false - } + _ => bug!() } } @@ -595,11 +603,11 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { let ref_tmp = Lvalue::Local(ref_tmp); let ref_stmt = Statement { - source_info: callsite.location, + source_info: callsite.source_info, kind: StatementKind::Assign(ref_tmp.clone(), arg) }; - caller_mir[callsite.bb] + caller_mir[callsite.location.block] .statements.push(ref_stmt); let pointee_ty = match ptr_ty.sty { @@ -616,11 +624,11 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { let cast_tmp = Lvalue::Local(cast_tmp); let cast_stmt = Statement { - source_info: callsite.location, + source_info: callsite.source_info, kind: StatementKind::Assign(cast_tmp.clone(), raw_ptr) }; - caller_mir[callsite.bb] + caller_mir[callsite.location.block] .statements.push(cast_stmt); Operand::Consume(cast_tmp) @@ -650,10 +658,10 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { let arg_tmp = Lvalue::Local(arg_tmp); let stmt = Statement { - source_info: callsite.location, + source_info: callsite.source_info, kind: StatementKind::Assign(arg_tmp.clone(), arg) }; - caller_mir[callsite.bb].statements.push(stmt); + caller_mir[callsite.location.block].statements.push(stmt); Operand::Consume(arg_tmp) }).collect() } @@ -683,14 +691,14 @@ struct Integrator<'a, 'tcx: 'a> { promoted_map: IndexVec, _callsite: CallSite<'tcx>, destination: Lvalue<'tcx>, - return_block: BasicBlock, - cleanup_block: Option, + return_block: Block, + cleanup_block: Option, in_cleanup_block: bool, } impl<'a, 'tcx> Integrator<'a, 'tcx> { - fn update_target(&self, tgt: BasicBlock) -> BasicBlock { - let new = BasicBlock::new(tgt.index() + self.block_idx); + fn update_target(&self, tgt: Block) -> Block { + let new = Block::new(tgt.index() + self.block_idx); debug!("Updating target `{:?}`, new: `{:?}`", tgt, new); new } @@ -756,13 +764,13 @@ impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> { self.super_operand(operand, location); } - fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) { + fn visit_basic_block_data(&mut self, block: Block, data: &mut BlockData<'tcx>) { self.in_cleanup_block = data.is_cleanup; self.super_basic_block_data(block, data); self.in_cleanup_block = false; } - fn visit_terminator_kind(&mut self, block: BasicBlock, + fn visit_terminator_kind(&mut self, block: Block, kind: &mut TerminatorKind<'tcx>, loc: Location) { self.super_terminator_kind(block, kind, loc); @@ -786,20 +794,35 @@ impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> { *unwind = self.cleanup_block; } } - TerminatorKind::Call { ref mut destination, ref mut cleanup, .. } => { - if let Some((_, ref mut tgt)) = *destination { - *tgt = self.update_target(*tgt); - } - if let Some(tgt) = *cleanup { - *cleanup = Some(self.update_target(tgt)); - } else if !self.in_cleanup_block { - // Unless this call is in a cleanup block, add an unwind edge to - // the orignal call's cleanup block - *cleanup = self.cleanup_block; + TerminatorKind::Return => { + *kind = TerminatorKind::Goto { target: self.return_block }; + } + TerminatorKind::Resume => { + if let Some(tgt) = self.cleanup_block { + *kind = TerminatorKind::Goto { target: tgt } } } - TerminatorKind::Assert { ref mut target, ref mut cleanup, .. } => { - *target = self.update_target(*target); + TerminatorKind::Unreachable => { } + } + } + + fn visit_statement( + &mut self, + block: Block, + statement: &mut Statement<'tcx>, + location: Location + ) { + self.super_statement(block, statement, location); + + match statement.kind { + StatementKind::Assign(..) | + StatementKind::SetDiscriminant { .. } | + StatementKind::StorageLive(..) | + StatementKind::StorageDead(..) | + StatementKind::InlineAsm { .. } | + StatementKind::Nop => {}, + StatementKind::Assert { ref mut cleanup, .. } | + StatementKind::Call { ref mut cleanup, .. } => { if let Some(tgt) = *cleanup { *cleanup = Some(self.update_target(tgt)); } else if !self.in_cleanup_block { @@ -808,15 +831,6 @@ impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> { *cleanup = self.cleanup_block; } } - TerminatorKind::Return => { - *kind = TerminatorKind::Goto { target: self.return_block }; - } - TerminatorKind::Resume => { - if let Some(tgt) = self.cleanup_block { - *kind = TerminatorKind::Goto { target: tgt } - } - } - TerminatorKind::Unreachable => { } } } diff --git a/src/librustc_mir/transform/no_landing_pads.rs b/src/librustc_mir/transform/no_landing_pads.rs index 3654ae6940c52..5ef2c7968006f 100644 --- a/src/librustc_mir/transform/no_landing_pads.rs +++ b/src/librustc_mir/transform/no_landing_pads.rs @@ -20,7 +20,7 @@ pub struct NoLandingPads; impl<'tcx> MutVisitor<'tcx> for NoLandingPads { fn visit_terminator(&mut self, - bb: BasicBlock, + bb: Block, terminator: &mut Terminator<'tcx>, location: Location) { match terminator.kind { @@ -31,8 +31,6 @@ impl<'tcx> MutVisitor<'tcx> for NoLandingPads { TerminatorKind::SwitchInt { .. } => { /* nothing to do */ }, - TerminatorKind::Call { cleanup: ref mut unwind, .. } | - TerminatorKind::Assert { cleanup: ref mut unwind, .. } | TerminatorKind::DropAndReplace { ref mut unwind, .. } | TerminatorKind::Drop { ref mut unwind, .. } => { unwind.take(); @@ -40,6 +38,28 @@ impl<'tcx> MutVisitor<'tcx> for NoLandingPads { } self.super_terminator(bb, terminator, location); } + + fn visit_statement( + &mut self, + bb: Block, + statement: &mut Statement<'tcx>, + location: Location) { + match statement.kind { + StatementKind::Assign(..) | + StatementKind::SetDiscriminant { .. } | + StatementKind::StorageLive(..) | + StatementKind::StorageDead(..) | + StatementKind::InlineAsm { .. } | + StatementKind::Nop => { + /* nothing to do */ + }, + StatementKind::Call { ref mut cleanup, .. } | + StatementKind::Assert { ref mut cleanup, .. } => { + cleanup.take(); + } + } + self.super_statement(bb, statement, location); + } } pub fn no_landing_pads<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, mir: &mut Mir<'tcx>) { diff --git a/src/librustc_mir/transform/promote_consts.rs b/src/librustc_mir/transform/promote_consts.rs index 57cf4b1e8b02b..74d22f812a6e1 100644 --- a/src/librustc_mir/transform/promote_consts.rs +++ b/src/librustc_mir/transform/promote_consts.rs @@ -73,7 +73,7 @@ pub enum Candidate { /// Array of indices found in the third argument of /// a call to one of the simd_shuffleN intrinsics. - ShuffleIndices(BasicBlock) + ShuffleIndices(Location) } struct TempCollector<'tcx> { @@ -159,9 +159,9 @@ struct Promoter<'a, 'tcx: 'a> { } impl<'a, 'tcx> Promoter<'a, 'tcx> { - fn new_block(&mut self) -> BasicBlock { + fn new_block(&mut self) -> Block { let span = self.promoted.span; - self.promoted.basic_blocks_mut().push(BasicBlockData { + self.promoted.basic_blocks_mut().push(BlockData { statements: vec![], terminator: Some(Terminator { source_info: SourceInfo { @@ -174,18 +174,6 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> { }) } - fn assign(&mut self, dest: Local, rvalue: Rvalue<'tcx>, span: Span) { - let last = self.promoted.basic_blocks().last().unwrap(); - let data = &mut self.promoted[last]; - data.statements.push(Statement { - source_info: SourceInfo { - span: span, - scope: ARGUMENT_VISIBILITY_SCOPE - }, - kind: StatementKind::Assign(Lvalue::Local(dest), rvalue) - }); - } - /// Copy the initialization of this temp to the /// promoted MIR, recursing through temps. fn promote_temp(&mut self, temp: Local) -> Local { @@ -215,72 +203,46 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> { // First, take the Rvalue or Call out of the source MIR, // or duplicate it, depending on keep_original. - if loc.statement_index < no_stmts { - let (mut rvalue, source_info) = { - let statement = &mut self.source[loc.block].statements[loc.statement_index]; - let rhs = match statement.kind { - StatementKind::Assign(_, ref mut rhs) => rhs, - _ => { - span_bug!(statement.source_info.span, "{:?} is not an assignment", - statement); - } - }; - - (if self.keep_original { - rhs.clone() - } else { - let unit = Rvalue::Aggregate(AggregateKind::Tuple, vec![]); - mem::replace(rhs, unit) - }, statement.source_info) - }; - - self.visit_rvalue(&mut rvalue, loc); - self.assign(new_temp, rvalue, source_info.span); + let kind = if self.keep_original { + self.source[loc.block].statements[loc.statement_index].kind.clone() } else { - let terminator = if self.keep_original { - self.source[loc.block].terminator().clone() - } else { - let terminator = self.source[loc.block].terminator_mut(); - let target = match terminator.kind { - TerminatorKind::Call { destination: Some((_, target)), .. } => target, - ref kind => { - span_bug!(terminator.source_info.span, "{:?} not promotable", kind); - } - }; - Terminator { - source_info: terminator.source_info, - kind: mem::replace(&mut terminator.kind, TerminatorKind::Goto { - target: target - }) + mem::replace(&mut self.source[loc.block].statements[loc.statement_index].kind, + StatementKind::Nop) + }; + let kind = match kind { + StatementKind::Assign(_, mut rhs) => { + self.visit_rvalue(&mut rhs, loc); + StatementKind::Assign(Lvalue::Local(new_temp), rhs) + } + StatementKind::Call { mut func, mut args, .. } => { + self.visit_operand(&mut func, loc); + for arg in &mut args { + self.visit_operand(arg, loc); } - }; - - match terminator.kind { - TerminatorKind::Call { mut func, mut args, .. } => { - self.visit_operand(&mut func, loc); - for arg in &mut args { - self.visit_operand(arg, loc); - } - let last = self.promoted.basic_blocks().last().unwrap(); - let new_target = self.new_block(); - - *self.promoted[last].terminator_mut() = Terminator { - kind: TerminatorKind::Call { - func: func, - args: args, - cleanup: None, - destination: Some((Lvalue::Local(new_temp), new_target)) - }, - ..terminator - }; + StatementKind::Call { + func: func, + args: args, + cleanup: None, + destination: Lvalue::Local(new_temp), } - ref kind => { - span_bug!(terminator.source_info.span, "{:?} not promotable", kind); - } - }; + } + _ => { + let statement = &self.source[loc.block].statements[loc.statement_index]; + span_bug!(statement.source_info.span, "{:?} not promotable", statement.kind); + } }; + let last = self.promoted.basic_blocks().last().unwrap(); + let data = &mut self.promoted[last]; + data.statements.push(Statement { + source_info: SourceInfo { + span: self.source[loc.block].statements[loc.statement_index].source_info.span, + scope: ARGUMENT_VISIBILITY_SCOPE, + }, + kind: kind + }); + self.keep_original = old_keep_original; new_temp } @@ -304,21 +266,28 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> { _ => bug!() } } - Candidate::ShuffleIndices(bb) => { - match self.source[bb].terminator_mut().kind { - TerminatorKind::Call { ref mut args, .. } => { - Rvalue::Use(mem::replace(&mut args[2], new_operand)) - } - _ => bug!() + Candidate::ShuffleIndices(Location { block: bb, statement_index: stmt_idx }) => { + let statement = &mut self.source[bb].statements[stmt_idx]; + if let StatementKind::Call { ref mut args, .. } = statement.kind { + Rvalue::Use(mem::replace(&mut args[2], new_operand)) + } else { + bug!() } } }; self.visit_rvalue(&mut rvalue, Location { - block: BasicBlock::new(0), + block: Block::new(0), statement_index: usize::MAX }); - self.assign(RETURN_POINTER, rvalue, span); + let last = self.promoted.basic_blocks().last().unwrap(); + self.promoted[last].statements.push(Statement { + source_info: SourceInfo { + span: span, + scope: ARGUMENT_VISIBILITY_SCOPE + }, + kind: StatementKind::Assign(Lvalue::Local(RETURN_POINTER), rvalue) + }); self.source.promoted.push(self.promoted); } } @@ -350,31 +319,26 @@ pub fn promote_candidates<'a, 'tcx>(mir: &mut Mir<'tcx>, let statement = &mir[bb].statements[stmt_idx]; let dest = match statement.kind { StatementKind::Assign(ref dest, _) => dest, + StatementKind::Nop => { + // Already promoted. + continue; + } _ => { span_bug!(statement.source_info.span, "expected assignment to promote"); } }; - if let Lvalue::Local(index) = *dest { - if temps[index] == TempState::PromotedOut { - // Already promoted. - continue; - } - } (statement.source_info.span, dest.ty(mir, tcx).to_ty(tcx)) } - Candidate::ShuffleIndices(bb) => { - let terminator = mir[bb].terminator(); - let ty = match terminator.kind { - TerminatorKind::Call { ref args, .. } => { - args[2].ty(mir, tcx) - } - _ => { - span_bug!(terminator.source_info.span, - "expected simd_shuffleN call to promote"); - } + Candidate::ShuffleIndices(Location { block: bb, statement_index: stmt_idx }) => { + let statement = &mir[bb].statements[stmt_idx]; + let ty = if let StatementKind::Call { ref args, .. } = statement.kind { + args[2].ty(mir, tcx) + } else { + span_bug!(statement.source_info.span, + "expected simd_shuffleN call to promote"); }; - (terminator.source_info.span, ty) + (statement.source_info.span, ty) } }; @@ -406,16 +370,17 @@ pub fn promote_candidates<'a, 'tcx>(mir: &mut Mir<'tcx>, // Eliminate assignments to, and drops of promoted temps. let promoted = |index: Local| temps[index] == TempState::PromotedOut; for block in mir.basic_blocks_mut() { - block.statements.retain(|statement| { + for statement in &mut block.statements { match statement.kind { - StatementKind::Assign(Lvalue::Local(index), _) | StatementKind::StorageLive(Lvalue::Local(index)) | StatementKind::StorageDead(Lvalue::Local(index)) => { - !promoted(index) + if promoted(index) { + statement.kind = StatementKind::Nop; + } } - _ => true + _ => {} } - }); + } let terminator = block.terminator_mut(); match terminator.kind { TerminatorKind::Drop { location: Lvalue::Local(index), target, .. } => { diff --git a/src/librustc_mir/transform/qualify_consts.rs b/src/librustc_mir/transform/qualify_consts.rs index ba42804c9262f..ff6dd3be3d606 100644 --- a/src/librustc_mir/transform/qualify_consts.rs +++ b/src/librustc_mir/transform/qualify_consts.rs @@ -376,17 +376,10 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { let target = match mir[bb].terminator().kind { TerminatorKind::Goto { target } | // Drops are considered noops. - TerminatorKind::Drop { target, .. } | - TerminatorKind::Assert { target, .. } | - TerminatorKind::Call { destination: Some((_, target)), .. } => { + TerminatorKind::Drop { target, .. } => { Some(target) } - // Non-terminating calls cannot produce any value. - TerminatorKind::Call { destination: None, .. } => { - return Qualif::empty(); - } - TerminatorKind::SwitchInt {..} | TerminatorKind::DropAndReplace { .. } | TerminatorKind::Resume | @@ -761,113 +754,8 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { } } - fn visit_terminator_kind(&mut self, - bb: BasicBlock, - kind: &TerminatorKind<'tcx>, - location: Location) { - if let TerminatorKind::Call { ref func, ref args, ref destination, .. } = *kind { - self.visit_operand(func, location); - - let fn_ty = func.ty(self.mir, self.tcx); - let (is_shuffle, is_const_fn) = match fn_ty.sty { - ty::TyFnDef(def_id, _, f) => { - (f.abi() == Abi::PlatformIntrinsic && - self.tcx.item_name(def_id).as_str().starts_with("simd_shuffle"), - is_const_fn(self.tcx, def_id)) - } - _ => (false, false) - }; - - for (i, arg) in args.iter().enumerate() { - self.nest(|this| { - this.visit_operand(arg, location); - if is_shuffle && i == 2 && this.mode == Mode::Fn { - let candidate = Candidate::ShuffleIndices(bb); - if !this.qualif.intersects(Qualif::NEVER_PROMOTE) { - this.promotion_candidates.push(candidate); - } else { - span_err!(this.tcx.sess, this.span, E0526, - "shuffle indices are not constant"); - } - } - }); - } - - // Const fn calls. - if is_const_fn { - // We are in a const or static initializer, - if self.mode != Mode::Fn && - - // feature-gate is not enabled, - !self.tcx.sess.features.borrow().const_fn && - - // this doesn't come from a crate with the feature-gate enabled, - self.def_id.is_local() && - - // this doesn't come from a macro that has #[allow_internal_unstable] - !self.tcx.sess.codemap().span_allows_unstable(self.span) - { - let mut err = self.tcx.sess.struct_span_err(self.span, - "const fns are an unstable feature"); - help!(&mut err, - "in Nightly builds, add `#![feature(const_fn)]` \ - to the crate attributes to enable"); - err.emit(); - } - } else { - self.qualif = Qualif::NOT_CONST; - if self.mode != Mode::Fn { - // FIXME(#24111) Remove this check when const fn stabilizes - let (msg, note) = if let UnstableFeatures::Disallow = - self.tcx.sess.opts.unstable_features { - (format!("calls in {}s are limited to \ - struct and enum constructors", - self.mode), - Some("a limited form of compile-time function \ - evaluation is available on a nightly \ - compiler via `const fn`")) - } else { - (format!("calls in {}s are limited \ - to constant functions, \ - struct and enum constructors", - self.mode), - None) - }; - let mut err = struct_span_err!(self.tcx.sess, self.span, E0015, "{}", msg); - if let Some(note) = note { - err.span_note(self.span, note); - } - err.emit(); - } - } - - if let Some((ref dest, _)) = *destination { - // Avoid propagating irrelevant callee/argument qualifications. - if self.qualif.intersects(Qualif::CONST_ERROR) { - self.qualif = Qualif::NOT_CONST; - } else { - // Be conservative about the returned value of a const fn. - let tcx = self.tcx; - let ty = dest.ty(self.mir, tcx).to_ty(tcx); - self.qualif = Qualif::empty(); - self.add_type(ty); - - // Let `const fn` transitively have destructors, - // but they do get stopped in `const` or `static`. - if self.mode != Mode::ConstFn { - self.deny_drop(); - } - } - self.assign(dest, location); - } - } else { - // Qualify any operands inside other terminators. - self.super_terminator_kind(bb, kind, location); - } - } - fn visit_assign(&mut self, - _: BasicBlock, + _: Block, dest: &Lvalue<'tcx>, rvalue: &Rvalue<'tcx>, location: Location) { @@ -903,24 +791,120 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { self.span = source_info.span; } - fn visit_statement(&mut self, bb: BasicBlock, statement: &Statement<'tcx>, location: Location) { + fn visit_statement(&mut self, bb: Block, statement: &Statement<'tcx>, location: Location) { self.nest(|this| { this.visit_source_info(&statement.source_info); match statement.kind { StatementKind::Assign(ref lvalue, ref rvalue) => { this.visit_assign(bb, lvalue, rvalue, location); } + StatementKind::Call { ref func, ref args, ref destination, .. } => { + this.visit_operand(func, location); + + let fn_ty = func.ty(this.mir, this.tcx); + let (is_shuffle, is_const_fn) = match fn_ty.sty { + ty::TyFnDef(def_id, _, f) => { + (f.abi() == Abi::PlatformIntrinsic && + this.tcx.item_name(def_id).as_str().starts_with("simd_shuffle"), + is_const_fn(this.tcx, def_id)) + } + _ => (false, false) + }; + + for (i, arg) in args.iter().enumerate() { + this.nest(|this| { + this.visit_operand(arg, location); + if is_shuffle && i == 2 && this.mode == Mode::Fn { + let candidate = Candidate::ShuffleIndices(location); + if !this.qualif.intersects(Qualif::NEVER_PROMOTE) { + this.promotion_candidates.push(candidate); + } else { + span_err!(this.tcx.sess, this.span, E0526, + "shuffle indices are not constant"); + } + } + }); + } + + // Const fn calls. + if is_const_fn { + // We are in a const or static initializer, + if this.mode != Mode::Fn && + + // feature-gate is not enabled, + !this.tcx.sess.features.borrow().const_fn && + + // this doesn't come from a crate with the feature-gate enabled, + this.def_id.is_local() && + + // this doesn't come from a macro that has #[allow_internal_unstable] + !this.tcx.sess.codemap().span_allows_unstable(this.span) + { + let mut err = this.tcx.sess.struct_span_err(this.span, + "const fns are an unstable feature"); + help!(&mut err, + "in Nightly builds, add `#![feature(const_fn)]` \ + to the crate attributes to enable"); + err.emit(); + } + } else { + this.qualif = Qualif::NOT_CONST; + if this.mode != Mode::Fn { + // FIXME(#24111) Remove this check when const fn stabilizes + let (msg, note) = if let UnstableFeatures::Disallow = + this.tcx.sess.opts.unstable_features { + (format!("calls in {}s are limited to \ + struct and enum constructors", + this.mode), + Some("a limited form of compile-time function \ + evaluation is available on a nightly \ + compiler via `const fn`")) + } else { + (format!("calls in {}s are limited \ + to constant functions, \ + struct and enum constructors", + this.mode), + None) + }; + let mut err = struct_span_err!(this.tcx.sess, this.span, E0015, "{}", + msg); + if let Some(note) = note { + err.span_note(this.span, note); + } + err.emit(); + } + } + + // Avoid propagating irrelevant callee/argument qualifications. + if this.qualif.intersects(Qualif::CONST_ERROR) { + this.qualif = Qualif::NOT_CONST; + } else { + // Be conservative about the returned value of a const fn. + let tcx = this.tcx; + let ty = destination.ty(this.mir, tcx).to_ty(tcx); + this.qualif = Qualif::empty(); + this.add_type(ty); + + // Let `const fn` transitively have destructors, + // but they do get stopped in `const` or `static`. + if this.mode != Mode::ConstFn { + this.deny_drop(); + } + } + this.assign(destination, location); + } StatementKind::SetDiscriminant { .. } | StatementKind::StorageLive(_) | StatementKind::StorageDead(_) | StatementKind::InlineAsm {..} | + StatementKind::Assert { .. } | StatementKind::Nop => {} } }); } fn visit_terminator(&mut self, - bb: BasicBlock, + bb: Block, terminator: &Terminator<'tcx>, location: Location) { self.nest(|this| this.super_terminator(bb, terminator, location)); diff --git a/src/librustc_mir/transform/simplify.rs b/src/librustc_mir/transform/simplify.rs index 0a8f147b21410..cba0102f72d1f 100644 --- a/src/librustc_mir/transform/simplify.rs +++ b/src/librustc_mir/transform/simplify.rs @@ -78,8 +78,8 @@ impl<'l> Pass for SimplifyCfg<'l> { } pub struct CfgSimplifier<'a, 'tcx: 'a> { - basic_blocks: &'a mut IndexVec>, - pred_count: IndexVec + basic_blocks: &'a mut IndexVec>, + pred_count: IndexVec } impl<'a, 'tcx: 'a> CfgSimplifier<'a, 'tcx> { @@ -90,11 +90,9 @@ impl<'a, 'tcx: 'a> CfgSimplifier<'a, 'tcx> { // dead blocks, which we don't want to. pred_count[START_BLOCK] = 1; - for (_, data) in traversal::preorder(mir) { - if let Some(ref term) = data.terminator { - for &tgt in term.successors().iter() { - pred_count[tgt] += 1; - } + for (bb, _) in traversal::preorder(mir) { + for &tgt in mir.successors_for(bb).iter() { + pred_count[tgt] += 1; } } @@ -110,7 +108,7 @@ impl<'a, 'tcx: 'a> CfgSimplifier<'a, 'tcx> { loop { let mut changed = false; - for bb in (0..self.basic_blocks.len()).map(BasicBlock::new) { + for bb in (0..self.basic_blocks.len()).map(Block::new) { if self.pred_count[bb] == 0 { continue } @@ -120,6 +118,8 @@ impl<'a, 'tcx: 'a> CfgSimplifier<'a, 'tcx> { let mut terminator = self.basic_blocks[bb].terminator.take() .expect("invalid terminator state"); + // NB: No need to call collapse_goto_chain on statement successors because + // the statements aren't empty, so we cannot collapse blocks. for successor in terminator.successors_mut() { self.collapse_goto_chain(successor, &mut changed); } @@ -146,9 +146,9 @@ impl<'a, 'tcx: 'a> CfgSimplifier<'a, 'tcx> { } // Collapse a goto chain starting from `start` - fn collapse_goto_chain(&mut self, start: &mut BasicBlock, changed: &mut bool) { + fn collapse_goto_chain(&mut self, start: &mut Block, changed: &mut bool) { let mut terminator = match self.basic_blocks[*start] { - BasicBlockData { + BlockData { ref statements, terminator: ref mut terminator @ Some(Terminator { kind: TerminatorKind::Goto { .. }, .. @@ -258,10 +258,10 @@ pub fn remove_dead_blocks(mir: &mut Mir) { let basic_blocks = mir.basic_blocks_mut(); let num_blocks = basic_blocks.len(); - let mut replacements : Vec<_> = (0..num_blocks).map(BasicBlock::new).collect(); + let mut replacements : Vec<_> = (0..num_blocks).map(Block::new).collect(); let mut used_blocks = 0; for alive_index in seen.iter() { - replacements[alive_index] = BasicBlock::new(used_blocks); + replacements[alive_index] = Block::new(used_blocks); if alive_index != used_blocks { // Swap the next alive block data with the current available slot. Since alive_index is // non-decreasing this is a valid operation. @@ -272,6 +272,12 @@ pub fn remove_dead_blocks(mir: &mut Mir) { basic_blocks.raw.truncate(used_blocks); for block in basic_blocks { + for stmt in &mut block.statements { + if let Some(cleanup) = stmt.cleanup_target_mut() { + *cleanup = replacements[cleanup.index()]; + } + } + for target in block.terminator_mut().successors_mut() { *target = replacements[target.index()]; } @@ -338,7 +344,7 @@ struct LocalUpdater { } impl<'tcx> MutVisitor<'tcx> for LocalUpdater { - fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) { + fn visit_basic_block_data(&mut self, block: Block, data: &mut BlockData<'tcx>) { // Remove unnecessary StorageLive and StorageDead annotations. data.statements.retain(|stmt| { match stmt.kind { diff --git a/src/librustc_mir/transform/simplify_branches.rs b/src/librustc_mir/transform/simplify_branches.rs index 3d5106c4b06f7..9b61899dd3b0a 100644 --- a/src/librustc_mir/transform/simplify_branches.rs +++ b/src/librustc_mir/transform/simplify_branches.rs @@ -11,7 +11,6 @@ //! A pass that simplifies branches when their condition is known. use rustc::ty::TyCtxt; -use rustc::middle::const_val::ConstVal; use rustc::mir::transform::{MirPass, MirSource, Pass}; use rustc::mir::*; @@ -47,13 +46,6 @@ impl<'l, 'tcx> MirPass<'tcx> for SimplifyBranches<'l> { continue } }, - TerminatorKind::Assert { target, cond: Operand::Constant(Constant { - literal: Literal::Value { - value: ConstVal::Bool(cond) - }, .. - }), expected, .. } if cond == expected => { - TerminatorKind::Goto { target: target } - }, _ => continue }; } diff --git a/src/librustc_mir/transform/type_check.rs b/src/librustc_mir/transform/type_check.rs index 3d604affbfea9..2894e25a05183 100644 --- a/src/librustc_mir/transform/type_check.rs +++ b/src/librustc_mir/transform/type_check.rs @@ -397,6 +397,41 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { } } } + StatementKind::Assert { ref cond, ref msg, .. } => { + let cond_ty = cond.ty(mir, tcx); + if cond_ty != tcx.types.bool { + span_mirbug!(self, stmt, "bad Assert ({:?}, not bool", cond_ty); + } + + if let AssertMessage::BoundsCheck { ref len, ref index } = *msg { + if len.ty(mir, tcx) != tcx.types.usize { + span_mirbug!(self, len, "bounds-check length non-usize {:?}", len) + } + if index.ty(mir, tcx) != tcx.types.usize { + span_mirbug!(self, index, "bounds-check index non-usize {:?}", index) + } + } + } + StatementKind::Call { ref func, ref args, ref destination, .. } => { + let func_ty = func.ty(mir, tcx); + debug!("check_stmt: call, func_ty={:?}", func_ty); + let sig = match func_ty.sty { + ty::TyFnDef(.., sig) | ty::TyFnPtr(sig) => sig, + _ => { + span_mirbug!(self, stmt, "call to non-function {:?}", func_ty); + return; + } + }; + let sig = tcx.erase_late_bound_regions(&sig); + let sig = self.normalize(&sig); + self.check_call_dest(mir, stmt, &sig, destination); + + if self.is_box_free(func) { + self.check_box_free_inputs(mir, stmt, &sig, args); + } else { + self.check_call_inputs(mir, stmt, &sig, args); + } + } StatementKind::InlineAsm { .. } | StatementKind::Nop => {} } @@ -442,83 +477,38 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { } // FIXME: check the values } - TerminatorKind::Call { ref func, ref args, ref destination, .. } => { - let func_ty = func.ty(mir, tcx); - debug!("check_terminator: call, func_ty={:?}", func_ty); - let sig = match func_ty.sty { - ty::TyFnDef(.., sig) | ty::TyFnPtr(sig) => sig, - _ => { - span_mirbug!(self, term, "call to non-function {:?}", func_ty); - return; - } - }; - let sig = tcx.erase_late_bound_regions(&sig); - let sig = self.normalize(&sig); - self.check_call_dest(mir, term, &sig, destination); - - if self.is_box_free(func) { - self.check_box_free_inputs(mir, term, &sig, args); - } else { - self.check_call_inputs(mir, term, &sig, args); - } - } - TerminatorKind::Assert { ref cond, ref msg, .. } => { - let cond_ty = cond.ty(mir, tcx); - if cond_ty != tcx.types.bool { - span_mirbug!(self, term, "bad Assert ({:?}, not bool", cond_ty); - } - - if let AssertMessage::BoundsCheck { ref len, ref index } = *msg { - if len.ty(mir, tcx) != tcx.types.usize { - span_mirbug!(self, len, "bounds-check length non-usize {:?}", len) - } - if index.ty(mir, tcx) != tcx.types.usize { - span_mirbug!(self, index, "bounds-check index non-usize {:?}", index) - } - } - } } } fn check_call_dest(&mut self, mir: &Mir<'tcx>, - term: &Terminator<'tcx>, + stmt: &Statement<'tcx>, sig: &ty::FnSig<'tcx>, - destination: &Option<(Lvalue<'tcx>, BasicBlock)>) { + destination: &Lvalue<'tcx>) { let tcx = self.tcx(); - match *destination { - Some((ref dest, _)) => { - let dest_ty = dest.ty(mir, tcx).to_ty(tcx); - if let Err(terr) = self.sub_types(sig.output(), dest_ty) { - span_mirbug!(self, term, - "call dest mismatch ({:?} <- {:?}): {:?}", - dest_ty, sig.output(), terr); - } - }, - None => { - // FIXME(canndrew): This is_never should probably be an is_uninhabited - if !sig.output().is_never() { - span_mirbug!(self, term, "call to converging function {:?} w/o dest", sig); - } - }, + let dest_ty = destination.ty(mir, tcx).to_ty(tcx); + if let Err(terr) = self.sub_types(sig.output(), dest_ty) { + span_mirbug!(self, stmt, + "call dest mismatch ({:?} <- {:?}): {:?}", + dest_ty, sig.output(), terr); } } fn check_call_inputs(&mut self, mir: &Mir<'tcx>, - term: &Terminator<'tcx>, + stmt: &Statement<'tcx>, sig: &ty::FnSig<'tcx>, args: &[Operand<'tcx>]) { debug!("check_call_inputs({:?}, {:?})", sig, args); if args.len() < sig.inputs().len() || (args.len() > sig.inputs().len() && !sig.variadic) { - span_mirbug!(self, term, "call to {:?} with wrong # of args", sig); + span_mirbug!(self, stmt, "call to {:?} with wrong # of args", sig); } for (n, (fn_arg, op_arg)) in sig.inputs().iter().zip(args).enumerate() { let op_arg_ty = op_arg.ty(mir, self.tcx()); if let Err(terr) = self.sub_types(op_arg_ty, fn_arg) { - span_mirbug!(self, term, "bad arg #{:?} ({:?} <- {:?}): {:?}", + span_mirbug!(self, stmt, "bad arg #{:?} ({:?} <- {:?}): {:?}", n, fn_arg, op_arg_ty, terr); } } @@ -539,7 +529,7 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { fn check_box_free_inputs(&mut self, mir: &Mir<'tcx>, - term: &Terminator<'tcx>, + stmt: &Statement<'tcx>, sig: &ty::FnSig<'tcx>, args: &[Operand<'tcx>]) { @@ -548,20 +538,20 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { // box_free takes a Box as a pointer. Allow for that. if sig.inputs().len() != 1 { - span_mirbug!(self, term, "box_free should take 1 argument"); + span_mirbug!(self, stmt, "box_free should take 1 argument"); return; } let pointee_ty = match sig.inputs()[0].sty { ty::TyRawPtr(mt) => mt.ty, _ => { - span_mirbug!(self, term, "box_free should take a raw ptr"); + span_mirbug!(self, stmt, "box_free should take a raw ptr"); return; } }; if args.len() != 1 { - span_mirbug!(self, term, "box_free called with wrong # of args"); + span_mirbug!(self, stmt, "box_free called with wrong # of args"); return; } @@ -570,20 +560,36 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { ty::TyRawPtr(mt) => mt.ty, ty::TyAdt(def, _) if def.is_box() => ty.boxed_ty(), _ => { - span_mirbug!(self, term, "box_free called with bad arg ty"); + span_mirbug!(self, stmt, "box_free called with bad arg ty"); return; } }; if let Err(terr) = self.sub_types(arg_ty, pointee_ty) { - span_mirbug!(self, term, "bad box_free arg ({:?} <- {:?}): {:?}", + span_mirbug!(self, stmt, "bad box_free arg ({:?} <- {:?}): {:?}", pointee_ty, arg_ty, terr); } } - fn check_iscleanup(&mut self, mir: &Mir<'tcx>, block: &BasicBlockData<'tcx>) + fn check_iscleanup(&mut self, mir: &Mir<'tcx>, block: &BlockData<'tcx>) { let is_cleanup = block.is_cleanup; + + for stmt in &block.statements { + match stmt.kind { + StatementKind::Assert { cleanup, .. } | + StatementKind::Call { cleanup, .. } => { + if let Some(cleanup) = cleanup { + if is_cleanup { + span_mirbug!(self, block, "unwind on cleanup block"); + } + self.assert_iscleanup(mir, block, cleanup, true); + } + } + _ => {} + } + } + self.last_span = block.terminator().source_info.span; match block.terminator().kind { TerminatorKind::Goto { target } => @@ -605,8 +611,7 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { } TerminatorKind::Unreachable => {} TerminatorKind::Drop { target, unwind, .. } | - TerminatorKind::DropAndReplace { target, unwind, .. } | - TerminatorKind::Assert { target, cleanup: unwind, .. } => { + TerminatorKind::DropAndReplace { target, unwind, .. } => { self.assert_iscleanup(mir, block, target, is_cleanup); if let Some(unwind) = unwind { if is_cleanup { @@ -615,24 +620,13 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { self.assert_iscleanup(mir, block, unwind, true); } } - TerminatorKind::Call { ref destination, cleanup, .. } => { - if let &Some((_, target)) = destination { - self.assert_iscleanup(mir, block, target, is_cleanup); - } - if let Some(cleanup) = cleanup { - if is_cleanup { - span_mirbug!(self, block, "cleanup on cleanup block") - } - self.assert_iscleanup(mir, block, cleanup, true); - } - } } } fn assert_iscleanup(&mut self, mir: &Mir<'tcx>, ctxt: &fmt::Debug, - bb: BasicBlock, + bb: Block, iscleanuppad: bool) { if mir[bb].is_cleanup != iscleanuppad { diff --git a/src/librustc_mir/util/elaborate_drops.rs b/src/librustc_mir/util/elaborate_drops.rs index ccbc6700d89c1..c21cae263f811 100644 --- a/src/librustc_mir/util/elaborate_drops.rs +++ b/src/librustc_mir/util/elaborate_drops.rs @@ -79,8 +79,8 @@ struct DropCtxt<'l, 'b: 'l, 'tcx: 'b, D> lvalue: &'l Lvalue<'tcx>, path: D::Path, - succ: BasicBlock, - unwind: Option, + succ: Block, + unwind: Option, } pub fn elaborate_drop<'b, 'tcx, D>( @@ -89,9 +89,9 @@ pub fn elaborate_drop<'b, 'tcx, D>( is_cleanup: bool, lvalue: &Lvalue<'tcx>, path: D::Path, - succ: BasicBlock, - unwind: Option, - bb: BasicBlock) + succ: Block, + unwind: Option, + bb: Block) where D: DropElaborator<'b, 'tcx> { assert_eq!(unwind.is_none(), is_cleanup); @@ -129,7 +129,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> /// /// FIXME: I think we should just control the flags externally /// and then we do not need this machinery. - pub fn elaborate_drop<'a>(&mut self, bb: BasicBlock) { + pub fn elaborate_drop<'a>(&mut self, bb: Block) { debug!("elaborate_drop({:?})", self); let style = self.elaborator.drop_style(self.path, DropFlagMode::Deep); debug!("elaborate_drop({:?}): live - {:?}", self, style); @@ -192,9 +192,9 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> is_cleanup: bool, lvalue: &Lvalue<'tcx>, path: Option, - succ: BasicBlock, - unwind: Option) - -> BasicBlock + succ: Block, + unwind: Option) + -> Block { if let Some(path) = path { debug!("drop_subpath: for std field {:?}", lvalue); @@ -226,11 +226,11 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> /// (the first field is never reached). If it is `None`, all /// unwind targets are left blank. fn drop_halfladder<'a>(&mut self, - unwind_ladder: Option<&[BasicBlock]>, - succ: BasicBlock, + unwind_ladder: Option<&[Block]>, + succ: Block, fields: &[(Lvalue<'tcx>, Option)], is_cleanup: bool) - -> Vec + -> Vec { let mut unwind_succ = if is_cleanup { None @@ -271,7 +271,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> /// ELAB(drop location.2 [target=`self.unwind`]) fn drop_ladder<'a>(&mut self, fields: Vec<(Lvalue<'tcx>, Option)>) - -> (BasicBlock, Option) + -> (Block, Option) { debug!("drop_ladder({:?}, {:?})", self, fields); @@ -301,7 +301,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> } fn open_drop_for_tuple<'a>(&mut self, tys: &[Ty<'tcx>]) - -> BasicBlock + -> Block { debug!("open_drop_for_tuple({:?}, {:?})", self, tys); @@ -313,7 +313,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> self.drop_ladder(fields).0 } - fn open_drop_for_box<'a>(&mut self, ty: Ty<'tcx>) -> BasicBlock + fn open_drop_for_box<'a>(&mut self, ty: Ty<'tcx>) -> Block { debug!("open_drop_for_box({:?}, {:?})", self, ty); @@ -331,10 +331,10 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> } fn open_drop_for_adt<'a>(&mut self, adt: &'tcx ty::AdtDef, substs: &'tcx Substs<'tcx>) - -> BasicBlock { + -> Block { debug!("open_drop_for_adt({:?}, {:?}, {:?})", self, adt, substs); if adt.variants.len() == 0 { - return self.elaborator.patch().new_block(BasicBlockData { + return self.elaborator.patch().new_block(BlockData { statements: vec![], terminator: Some(Terminator { source_info: self.source_info, @@ -359,7 +359,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> fn open_drop_for_adt_contents<'a>(&mut self, adt: &'tcx ty::AdtDef, substs: &'tcx Substs<'tcx>) - -> (BasicBlock, Option) { + -> (Block, Option) { match adt.variants.len() { 1 => { let fields = self.move_paths_for_fields( @@ -462,10 +462,10 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> fn adt_switch_block(&mut self, is_cleanup: bool, adt: &'tcx ty::AdtDef, - blocks: Vec, + blocks: Vec, values: &[ConstInt], - succ: BasicBlock) - -> BasicBlock { + succ: Block) + -> Block { // If there are multiple variants, then if something // is present within the enum the discriminant, tracked // by the rest path, must be initialized. @@ -476,7 +476,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> let discr_ty = adt.repr.discr_type().to_ty(self.tcx()); let discr = Lvalue::Local(self.new_temp(discr_ty)); let discr_rv = Rvalue::Discriminant(self.lvalue.clone()); - let switch_block = self.elaborator.patch().new_block(BasicBlockData { + let switch_block = self.elaborator.patch().new_block(BlockData { statements: vec![ Statement { source_info: self.source_info, @@ -497,8 +497,8 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> self.drop_flag_test_block(is_cleanup, switch_block, succ) } - fn destructor_call_block<'a>(&mut self, (succ, unwind): (BasicBlock, Option)) - -> BasicBlock + fn destructor_call_block<'a>(&mut self, (succ, unwind): (Block, Option)) + -> Block { debug!("destructor_call_block({:?}, {:?})", self, succ); let tcx = self.tcx(); @@ -515,22 +515,26 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> let ref_lvalue = self.new_temp(ref_ty); let unit_temp = Lvalue::Local(self.new_temp(tcx.mk_nil())); - self.elaborator.patch().new_block(BasicBlockData { + self.elaborator.patch().new_block(BlockData { statements: vec![Statement { source_info: self.source_info, kind: StatementKind::Assign( Lvalue::Local(ref_lvalue), Rvalue::Ref(re_erased, BorrowKind::Mut, self.lvalue.clone()) ) - }], - terminator: Some(Terminator { - kind: TerminatorKind::Call { + }, + Statement { + source_info: self.source_info, + kind: StatementKind::Call { func: Operand::function_handle(tcx, drop_fn.def_id, substs, self.source_info.span), args: vec![Operand::Consume(Lvalue::Local(ref_lvalue))], - destination: Some((unit_temp, succ)), + destination: unit_temp, cleanup: unwind, - }, + } + }], + terminator: Some(Terminator { + kind: TerminatorKind::Goto { target: succ }, source_info: self.source_info }), is_cleanup: self.is_cleanup, @@ -545,7 +549,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> /// /// This creates a "drop ladder" that drops the needed fields of the /// ADT, both in the success case or if one of the destructors fail. - fn open_drop<'a>(&mut self) -> BasicBlock { + fn open_drop<'a>(&mut self) -> Block { let ty = self.lvalue_ty(self.lvalue); let is_cleanup = self.is_cleanup; // FIXME(#6393) let succ = self.succ; @@ -585,7 +589,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> fn complete_drop<'a>(&mut self, is_cleanup: bool, drop_mode: Option, - succ: BasicBlock) -> BasicBlock + succ: Block) -> Block { debug!("complete_drop({:?},{:?})", self, drop_mode); @@ -598,7 +602,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> self.drop_flag_test_block(is_cleanup, drop_block, succ) } - fn elaborated_drop_block<'a>(&mut self) -> BasicBlock { + fn elaborated_drop_block<'a>(&mut self) -> Block { debug!("elaborated_drop_block({:?})", self); let is_cleanup = self.is_cleanup; // FIXME(#6393) let succ = self.succ; @@ -610,9 +614,9 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> fn box_free_block<'a>( &mut self, ty: Ty<'tcx>, - target: BasicBlock, + target: Block, is_cleanup: bool - ) -> BasicBlock { + ) -> Block { let block = self.unelaborated_free_block(ty, target, is_cleanup); self.drop_flag_test_block(is_cleanup, block, target) } @@ -620,28 +624,38 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> fn unelaborated_free_block<'a>( &mut self, ty: Ty<'tcx>, - target: BasicBlock, + target: Block, is_cleanup: bool - ) -> BasicBlock { + ) -> Block { let tcx = self.tcx(); let unit_temp = Lvalue::Local(self.new_temp(tcx.mk_nil())); let free_func = tcx.require_lang_item(lang_items::BoxFreeFnLangItem); let substs = tcx.mk_substs(iter::once(Kind::from(ty))); - let call = TerminatorKind::Call { + let call = StatementKind::Call { func: Operand::function_handle(tcx, free_func, substs, self.source_info.span), args: vec![Operand::Consume(self.lvalue.clone())], - destination: Some((unit_temp, target)), + destination: unit_temp, cleanup: None }; // FIXME(#6393) - let free_block = self.new_block(is_cleanup, call); + let free_block = self.elaborator.patch().new_block(BlockData { + statements: vec![Statement { + kind: call, + source_info: self.source_info, + }], + terminator: Some(Terminator { + source_info: self.source_info, + kind: TerminatorKind::Goto { target: target }, + }), + is_cleanup: is_cleanup + }); let block_start = Location { block: free_block, statement_index: 0 }; self.elaborator.clear_drop_flag(block_start, self.path, DropFlagMode::Shallow); free_block } - fn drop_block<'a>(&mut self, is_cleanup: bool, succ: BasicBlock) -> BasicBlock { + fn drop_block<'a>(&mut self, is_cleanup: bool, succ: Block) -> Block { let block = TerminatorKind::Drop { location: self.lvalue.clone(), target: succ, @@ -652,9 +666,9 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> fn drop_flag_test_block(&mut self, is_cleanup: bool, - on_set: BasicBlock, - on_unset: BasicBlock) - -> BasicBlock + on_set: Block, + on_unset: Block) + -> Block { let style = self.elaborator.drop_style(self.path, DropFlagMode::Shallow); debug!("drop_flag_test_block({:?},{:?},{:?}) - {:?}", @@ -674,9 +688,9 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> fn new_block<'a>(&mut self, is_cleanup: bool, k: TerminatorKind<'tcx>) - -> BasicBlock + -> Block { - self.elaborator.patch().new_block(BasicBlockData { + self.elaborator.patch().new_block(BlockData { statements: vec![], terminator: Some(Terminator { source_info: self.source_info, kind: k @@ -689,7 +703,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> self.elaborator.patch().new_temp(ty) } - fn terminator_loc(&mut self, bb: BasicBlock) -> Location { + fn terminator_loc(&mut self, bb: Block) -> Location { let mir = self.elaborator.mir(); self.elaborator.patch().terminator_loc(mir, bb) } diff --git a/src/librustc_mir/util/graphviz.rs b/src/librustc_mir/util/graphviz.rs index 91600b947c610..f1fff9efe2f18 100644 --- a/src/librustc_mir/util/graphviz.rs +++ b/src/librustc_mir/util/graphviz.rs @@ -60,7 +60,7 @@ pub fn write_mir_graphviz<'a, 'b, 'tcx, W, I>(tcx: TyCtxt<'b, 'tcx, 'tcx>, /// /// `init` and `fini` are callbacks for emitting additional rows of /// data (using HTML enclosed with `` in the emitted text). -pub fn write_node_label(block: BasicBlock, +pub fn write_node_label(block: Block, mir: &Mir, w: &mut W, num_cols: u32, @@ -103,7 +103,7 @@ pub fn write_node_label(block: BasicBlock, } /// Write a graphviz DOT node for the given basic block. -fn write_node(block: BasicBlock, mir: &Mir, w: &mut W) -> io::Result<()> { +fn write_node(block: Block, mir: &Mir, w: &mut W) -> io::Result<()> { // Start a new node with the label to follow, in one of DOT's pseudo-HTML tables. write!(w, r#" {} [shape="none", label=<"#, node(block))?; write_node_label(block, mir, w, 1, |_| Ok(()), |_| Ok(()))?; @@ -112,10 +112,12 @@ fn write_node(block: BasicBlock, mir: &Mir, w: &mut W) -> io::Result<( } /// Write graphviz DOT edges with labels between the given basic block and all of its successors. -fn write_edges(source: BasicBlock, mir: &Mir, w: &mut W) -> io::Result<()> { +fn write_edges(source: Block, mir: &Mir, w: &mut W) -> io::Result<()> { let terminator = mir[source].terminator(); let labels = terminator.kind.fmt_successor_labels(); + // FIXME: This should utilize ControlFlowGraph::successors(mir, source) but the + // fmt_successor_labels is only implemented for terminators as of right now. for (&target, label) in terminator.successors().iter().zip(labels) { writeln!(w, r#" {} -> {} [label="{}"];"#, node(source), node(target), label)?; } @@ -152,19 +154,17 @@ fn write_graph_label<'a, 'tcx, W: Write>(tcx: TyCtxt<'a, 'tcx, 'tcx>, write!(w, "mut ")?; } + write!(w, "{:?}: {};", Lvalue::Local(local), escape(&decl.ty))?; if let Some(name) = decl.name { - write!(w, r#"{:?}: {}; // {}
"#, - Lvalue::Local(local), escape(&decl.ty), name)?; - } else { - write!(w, r#"let mut {:?}: {};
"#, - Lvalue::Local(local), escape(&decl.ty))?; + write!(w, " // {}", name)?; } + write!(w, r#"
"#)?; } writeln!(w, ">;") } -fn node(block: BasicBlock) -> String { +fn node(block: Block) -> String { format!("bb{}", block.index()) } diff --git a/src/librustc_mir/util/patch.rs b/src/librustc_mir/util/patch.rs index 19f240da73059..f26a8bd38b039 100644 --- a/src/librustc_mir/util/patch.rs +++ b/src/librustc_mir/util/patch.rs @@ -16,11 +16,11 @@ use rustc_data_structures::indexed_vec::{IndexVec, Idx}; /// new statements and basic blocks and patch over block /// terminators. pub struct MirPatch<'tcx> { - patch_map: IndexVec>>, - new_blocks: Vec>, + patch_map: IndexVec>>, + new_blocks: Vec>, new_statements: Vec<(Location, StatementKind<'tcx>)>, new_locals: Vec>, - resume_block: BasicBlock, + resume_block: Block, next_local: usize, } @@ -53,7 +53,7 @@ impl<'tcx> MirPatch<'tcx> { } } let resume_block = resume_block.unwrap_or_else(|| { - result.new_block(BasicBlockData { + result.new_block(BlockData { statements: vec![], terminator: Some(Terminator { source_info: SourceInfo { @@ -66,6 +66,8 @@ impl<'tcx> MirPatch<'tcx> { })}); result.resume_block = resume_block; if let Some(resume_stmt_block) = resume_stmt_block { + debug!("MirPatch: patching resume_stmt_block, statements: {:?}", + mir.basic_blocks()[resume_stmt_block].statements); result.patch_terminator(resume_stmt_block, TerminatorKind::Goto { target: resume_block }); @@ -73,15 +75,15 @@ impl<'tcx> MirPatch<'tcx> { result } - pub fn resume_block(&self) -> BasicBlock { + pub fn resume_block(&self) -> Block { self.resume_block } - pub fn is_patched(&self, bb: BasicBlock) -> bool { + pub fn is_patched(&self, bb: Block) -> bool { self.patch_map[bb].is_some() } - pub fn terminator_loc(&self, mir: &Mir<'tcx>, bb: BasicBlock) -> Location { + pub fn terminator_loc(&self, mir: &Mir<'tcx>, bb: Block) -> Location { let offset = match bb.index().checked_sub(mir.basic_blocks().len()) { Some(index) => self.new_blocks[index].statements.len(), None => mir[bb].statements.len() @@ -99,15 +101,15 @@ impl<'tcx> MirPatch<'tcx> { Local::new(index as usize) } - pub fn new_block(&mut self, data: BasicBlockData<'tcx>) -> BasicBlock { - let block = BasicBlock::new(self.patch_map.len()); + pub fn new_block(&mut self, data: BlockData<'tcx>) -> Block { + let block = Block::new(self.patch_map.len()); debug!("MirPatch: new_block: {:?}: {:?}", block, data); self.new_blocks.push(data); self.patch_map.push(None); block } - pub fn patch_terminator(&mut self, block: BasicBlock, new: TerminatorKind<'tcx>) { + pub fn patch_terminator(&mut self, block: Block, new: TerminatorKind<'tcx>) { assert!(self.patch_map[block].is_none()); debug!("MirPatch: patch_terminator({:?}, {:?})", block, new); self.patch_map[block] = Some(new); @@ -161,7 +163,7 @@ impl<'tcx> MirPatch<'tcx> { } } - pub fn source_info_for_index(data: &BasicBlockData, loc: Location) -> SourceInfo { + pub fn source_info_for_index(data: &BlockData, loc: Location) -> SourceInfo { match data.statements.get(loc.statement_index) { Some(stmt) => stmt.source_info, None => data.terminator().source_info diff --git a/src/librustc_mir/util/pretty.rs b/src/librustc_mir/util/pretty.rs index ef2bf6e543420..297b0afdef06b 100644 --- a/src/librustc_mir/util/pretty.rs +++ b/src/librustc_mir/util/pretty.rs @@ -136,7 +136,7 @@ pub fn write_mir_fn<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, /// Write out a human-readable textual representation for the given basic block. fn write_basic_block(tcx: TyCtxt, - block: BasicBlock, + block: Block, mir: &Mir, w: &mut Write) -> io::Result<()> { diff --git a/src/librustc_passes/mir_stats.rs b/src/librustc_passes/mir_stats.rs index ce02cb0e83643..c1e7b00a338a9 100644 --- a/src/librustc_passes/mir_stats.rs +++ b/src/librustc_passes/mir_stats.rs @@ -14,7 +14,7 @@ use rustc_const_math::{ConstUsize}; use rustc::middle::const_val::{ConstVal}; -use rustc::mir::{AggregateKind, AssertMessage, BasicBlock, BasicBlockData}; +use rustc::mir::{AggregateKind, AssertMessage, Block, BlockData}; use rustc::mir::{Constant, Literal, Location, LocalDecl}; use rustc::mir::{Lvalue, LvalueElem, LvalueProjection}; use rustc::mir::{Mir, Operand, ProjectionElem}; @@ -106,9 +106,9 @@ impl<'a, 'tcx> mir_visit::Visitor<'tcx> for StatCollector<'a, 'tcx> { } fn visit_basic_block_data(&mut self, - block: BasicBlock, - data: &BasicBlockData<'tcx>) { - self.record("BasicBlockData", data); + block: Block, + data: &BlockData<'tcx>) { + self.record("BlockData", data); self.super_basic_block_data(block, data); } @@ -119,7 +119,7 @@ impl<'a, 'tcx> mir_visit::Visitor<'tcx> for StatCollector<'a, 'tcx> { } fn visit_statement(&mut self, - block: BasicBlock, + block: Block, statement: &Statement<'tcx>, location: Location) { self.record("Statement", statement); @@ -129,13 +129,15 @@ impl<'a, 'tcx> mir_visit::Visitor<'tcx> for StatCollector<'a, 'tcx> { StatementKind::StorageLive(..) => "StatementKind::StorageLive", StatementKind::StorageDead(..) => "StatementKind::StorageDead", StatementKind::InlineAsm { .. } => "StatementKind::InlineAsm", + StatementKind::Assert { .. } => "StatementKind::Assert", + StatementKind::Call { .. } => "StatementKind::Call", StatementKind::Nop => "StatementKind::Nop", }, &statement.kind); self.super_statement(block, statement, location); } fn visit_terminator(&mut self, - block: BasicBlock, + block: Block, terminator: &Terminator<'tcx>, location: Location) { self.record("Terminator", terminator); @@ -143,7 +145,7 @@ impl<'a, 'tcx> mir_visit::Visitor<'tcx> for StatCollector<'a, 'tcx> { } fn visit_terminator_kind(&mut self, - block: BasicBlock, + block: Block, kind: &TerminatorKind<'tcx>, location: Location) { self.record("TerminatorKind", kind); @@ -155,8 +157,6 @@ impl<'a, 'tcx> mir_visit::Visitor<'tcx> for StatCollector<'a, 'tcx> { TerminatorKind::Unreachable => "TerminatorKind::Unreachable", TerminatorKind::Drop { .. } => "TerminatorKind::Drop", TerminatorKind::DropAndReplace { .. } => "TerminatorKind::DropAndReplace", - TerminatorKind::Call { .. } => "TerminatorKind::Call", - TerminatorKind::Assert { .. } => "TerminatorKind::Assert", }, kind); self.super_terminator_kind(block, kind, location); } diff --git a/src/librustc_trans/collector.rs b/src/librustc_trans/collector.rs index 500802a4135d0..6fceb9508ecfd 100644 --- a/src/librustc_trans/collector.rs +++ b/src/librustc_trans/collector.rs @@ -542,17 +542,10 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { } fn visit_terminator_kind(&mut self, - block: mir::BasicBlock, + block: mir::Block, kind: &mir::TerminatorKind<'tcx>, location: Location) { - let tcx = self.scx.tcx(); match *kind { - mir::TerminatorKind::Call { ref func, .. } => { - let callee_ty = func.ty(self.mir, tcx); - let callee_ty = monomorphize::apply_param_substs( - self.scx, self.param_substs, &callee_ty); - visit_fn_use(self.scx, callee_ty, true, &mut self.output); - } mir::TerminatorKind::Drop { ref location, .. } | mir::TerminatorKind::DropAndReplace { ref location, .. } => { let ty = location.ty(self.mir, self.scx.tcx()) @@ -566,12 +559,36 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { mir::TerminatorKind::SwitchInt { .. } | mir::TerminatorKind::Resume | mir::TerminatorKind::Return | - mir::TerminatorKind::Unreachable | - mir::TerminatorKind::Assert { .. } => {} + mir::TerminatorKind::Unreachable => {} } self.super_terminator_kind(block, kind, location); } + + fn visit_statement( + &mut self, + _block: mir::Block, + statement: &mir::Statement<'tcx>, + _location: Location, + ) { + let tcx = self.scx.tcx(); + match statement.kind { + mir::StatementKind::Call { ref func, .. } => { + let callee_ty = func.ty(self.mir, tcx); + let callee_ty = monomorphize::apply_param_substs( + self.scx, self.param_substs, &callee_ty); + visit_fn_use(self.scx, callee_ty, true, &mut self.output); + } + mir::StatementKind::Assign(..) | + mir::StatementKind::SetDiscriminant { .. } | + mir::StatementKind::StorageLive(..) | + mir::StatementKind::StorageDead(..) | + mir::StatementKind::InlineAsm { .. } | + mir::StatementKind::Assert { .. } | + mir::StatementKind::Nop => {} + } + } + } fn visit_drop_use<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs index a3968650043ba..169d8fd6cadc5 100644 --- a/src/librustc_trans/mir/analyze.rs +++ b/src/librustc_trans/mir/analyze.rs @@ -14,7 +14,7 @@ use rustc_data_structures::bitvec::BitVector; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use rustc::middle::const_val::ConstVal; -use rustc::mir::{self, Location, TerminatorKind, Literal}; +use rustc::mir::{self, Location, TerminatorKind, Literal, StatementKind}; use rustc::mir::visit::{Visitor, LvalueContext}; use rustc::mir::traversal; use common; @@ -85,7 +85,7 @@ impl<'mir, 'a, 'tcx> LocalAnalyzer<'mir, 'a, 'tcx> { impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { fn visit_assign(&mut self, - block: mir::BasicBlock, + block: mir::Block, lvalue: &mir::Lvalue<'tcx>, rvalue: &mir::Rvalue<'tcx>, location: Location) { @@ -103,12 +103,14 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { self.visit_rvalue(rvalue, location); } - fn visit_terminator_kind(&mut self, - block: mir::BasicBlock, - kind: &mir::TerminatorKind<'tcx>, - location: Location) { - match *kind { - mir::TerminatorKind::Call { + fn visit_statement( + &mut self, + block: mir::Block, + statement: &mir::Statement<'tcx>, + location: Location + ) { + match statement.kind { + mir::StatementKind::Call { func: mir::Operand::Constant(mir::Constant { literal: Literal::Value { value: ConstVal::Function(def_id, _), .. @@ -126,7 +128,7 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { _ => {} } - self.super_terminator_kind(block, kind, location); + self.super_statement(block, statement, location); } fn visit_lvalue(&mut self, @@ -195,13 +197,34 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { pub enum CleanupKind { NotCleanup, Funclet, - Internal { funclet: mir::BasicBlock } + Internal { funclet: mir::Block } } -pub fn cleanup_kinds<'a, 'tcx>(mir: &mir::Mir<'tcx>) -> IndexVec { - fn discover_masters<'tcx>(result: &mut IndexVec, +pub fn cleanup_kinds<'a, 'tcx>(mir: &mir::Mir<'tcx>) -> IndexVec { + fn discover_masters<'tcx>(result: &mut IndexVec, mir: &mir::Mir<'tcx>) { for (bb, data) in mir.basic_blocks().iter_enumerated() { + for stmt in data.statements.iter() { + match stmt.kind { + StatementKind::Assign(..) | + StatementKind::SetDiscriminant { .. } | + StatementKind::StorageLive(..) | + StatementKind::StorageDead(..) | + StatementKind::InlineAsm { .. } | + StatementKind::Nop => { + /* nothing to do */ + }, + StatementKind::Call { cleanup: unwind, .. } | + StatementKind::Assert { cleanup: unwind, .. } => { + if let Some(unwind) = unwind { + debug!("cleanup_kinds: {:?}/{:?} registering {:?} as funclet", + bb, data, unwind); + result[unwind] = CleanupKind::Funclet; + } + } + } + } + match data.terminator().kind { TerminatorKind::Goto { .. } | TerminatorKind::Resume | @@ -210,8 +233,6 @@ pub fn cleanup_kinds<'a, 'tcx>(mir: &mir::Mir<'tcx>) -> IndexVec { /* nothing to do */ } - TerminatorKind::Call { cleanup: unwind, .. } | - TerminatorKind::Assert { cleanup: unwind, .. } | TerminatorKind::DropAndReplace { unwind, .. } | TerminatorKind::Drop { unwind, .. } => { if let Some(unwind) = unwind { @@ -224,11 +245,11 @@ pub fn cleanup_kinds<'a, 'tcx>(mir: &mir::Mir<'tcx>) -> IndexVec(result: &mut IndexVec, + fn propagate<'tcx>(result: &mut IndexVec, mir: &mir::Mir<'tcx>) { let mut funclet_succs = IndexVec::from_elem(None, mir.basic_blocks()); - let mut set_successor = |funclet: mir::BasicBlock, succ| { + let mut set_successor = |funclet: mir::Block, succ| { match funclet_succs[funclet] { ref mut s @ None => { debug!("set_successor: updating successor of {:?} to {:?}", @@ -252,7 +273,7 @@ pub fn cleanup_kinds<'a, 'tcx>(mir: &mir::Mir<'tcx>) -> IndexVec MirContext<'a, 'tcx> { - pub fn trans_block(&mut self, bb: mir::BasicBlock, - funclets: &IndexVec>) { + pub fn trans_block(&mut self, bb: mir::Block, + funclets: &IndexVec>) { let mut bcx = self.get_builder(bb); let data = &self.mir[bb]; @@ -57,7 +43,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let cleanup_pad = funclet.map(|lp| lp.cleanuppad()); let cleanup_bundle = funclet.map(|l| l.bundle()); - let funclet_br = |this: &Self, bcx: Builder, bb: mir::BasicBlock| { + let funclet_br = |this: &Self, bcx: Builder, bb: mir::Block| { let lltarget = this.blocks[bb]; if let Some(cp) = cleanup_pad { match this.cleanup_kinds[bb] { @@ -74,7 +60,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } }; - let llblock = |this: &mut Self, target: mir::BasicBlock| { + let llblock = |this: &mut Self, target: mir::Block| { let lltarget = this.blocks[target]; if let Some(cp) = cleanup_pad { @@ -105,13 +91,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { }; for statement in &data.statements { - bcx = self.trans_statement(bcx, statement); + bcx = self.trans_statement(bcx, statement, cleanup_bundle); } let terminator = data.terminator(); debug!("trans_block: terminator: {:?}", terminator); - let span = terminator.source_info.span; self.set_debug_loc(&bcx, terminator.source_info); match terminator.kind { mir::TerminatorKind::Resume => { @@ -207,570 +192,23 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } mir::TerminatorKind::Drop { ref location, target, unwind } => { - let ty = location.ty(&self.mir, bcx.tcx()).to_ty(bcx.tcx()); - let ty = self.monomorphize(&ty); - let drop_fn = monomorphize::resolve_drop_in_place(bcx.ccx.shared(), ty); - - if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def { - // we don't actually need to drop anything. - funclet_br(self, bcx, target); - return - } - - let lvalue = self.trans_lvalue(&bcx, location); - let (drop_fn, need_extra) = match ty.sty { - ty::TyDynamic(..) => (meth::DESTRUCTOR.get_fn(&bcx, lvalue.llextra), - false), - ty::TyArray(ety, _) | ty::TySlice(ety) => { - // FIXME: handle panics - let drop_fn = monomorphize::resolve_drop_in_place( - bcx.ccx.shared(), ety); - let drop_fn = callee::get_fn(bcx.ccx, drop_fn); - let bcx = tvec::slice_for_each( - &bcx, - lvalue.project_index(&bcx, C_uint(bcx.ccx, 0u64)), - ety, - lvalue.len(bcx.ccx), - |bcx, llval, loop_bb| { - self.set_debug_loc(&bcx, terminator.source_info); - if let Some(unwind) = unwind { - bcx.invoke( - drop_fn, - &[llval], - loop_bb, - llblock(self, unwind), - cleanup_bundle - ); - } else { - bcx.call(drop_fn, &[llval], cleanup_bundle); - bcx.br(loop_bb); - } - }); - funclet_br(self, bcx, target); - return - } - _ => (callee::get_fn(bcx.ccx, drop_fn), lvalue.has_extra()) - }; - let args = &[lvalue.llval, lvalue.llextra][..1 + need_extra as usize]; - if let Some(unwind) = unwind { - bcx.invoke( - drop_fn, - args, - self.blocks[target], - llblock(self, unwind), - cleanup_bundle - ); - } else { - bcx.call(drop_fn, args, cleanup_bundle); - funclet_br(self, bcx, target); - } - } - - mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => { - let cond = self.trans_operand(&bcx, cond).immediate(); - let mut const_cond = common::const_to_opt_u128(cond, false).map(|c| c == 1); - - // This case can currently arise only from functions marked - // with #[rustc_inherit_overflow_checks] and inlined from - // another crate (mostly core::num generic/#[inline] fns), - // while the current crate doesn't use overflow checks. - // NOTE: Unlike binops, negation doesn't have its own - // checked operation, just a comparison with the minimum - // value, so we have to check for the assert message. - if !bcx.ccx.check_overflow() { - use rustc_const_math::ConstMathErr::Overflow; - use rustc_const_math::Op::Neg; - - if let mir::AssertMessage::Math(Overflow(Neg)) = *msg { - const_cond = Some(expected); - } - } - - // Don't translate the panic block if success if known. - if const_cond == Some(expected) { - funclet_br(self, bcx, target); - return; - } - - // Pass the condition through llvm.expect for branch hinting. - let expect = bcx.ccx.get_intrinsic(&"llvm.expect.i1"); - let cond = bcx.call(expect, &[cond, C_bool(bcx.ccx, expected)], None); - - // Create the failure block and the conditional branch to it. - let lltarget = llblock(self, target); - let panic_block = self.new_block("panic"); - if expected { - bcx.cond_br(cond, lltarget, panic_block.llbb()); - } else { - bcx.cond_br(cond, panic_block.llbb(), lltarget); - } - - // After this point, bcx is the block for the call to panic. - bcx = panic_block; - self.set_debug_loc(&bcx, terminator.source_info); - - // Get the location information. - let loc = bcx.sess().codemap().lookup_char_pos(span.lo); - let filename = Symbol::intern(&loc.file.name).as_str(); - let filename = C_str_slice(bcx.ccx, filename); - let line = C_u32(bcx.ccx, loc.line as u32); - - // Put together the arguments to the panic entry point. - let (lang_item, args, const_err) = match *msg { - mir::AssertMessage::BoundsCheck { ref len, ref index } => { - let len = self.trans_operand(&mut bcx, len).immediate(); - let index = self.trans_operand(&mut bcx, index).immediate(); - - let const_err = common::const_to_opt_u128(len, false) - .and_then(|len| common::const_to_opt_u128(index, false) - .map(|index| ErrKind::IndexOutOfBounds { - len: len as u64, - index: index as u64 - })); - - let file_line = C_struct(bcx.ccx, &[filename, line], false); - let align = llalign_of_min(bcx.ccx, common::val_ty(file_line)); - let file_line = consts::addr_of(bcx.ccx, - file_line, - align, - "panic_bounds_check_loc"); - (lang_items::PanicBoundsCheckFnLangItem, - vec![file_line, index, len], - const_err) - } - mir::AssertMessage::Math(ref err) => { - let msg_str = Symbol::intern(err.description()).as_str(); - let msg_str = C_str_slice(bcx.ccx, msg_str); - let msg_file_line = C_struct(bcx.ccx, - &[msg_str, filename, line], - false); - let align = llalign_of_min(bcx.ccx, common::val_ty(msg_file_line)); - let msg_file_line = consts::addr_of(bcx.ccx, - msg_file_line, - align, - "panic_loc"); - (lang_items::PanicFnLangItem, - vec![msg_file_line], - Some(ErrKind::Math(err.clone()))) - } - }; - - // If we know we always panic, and the error message - // is also constant, then we can produce a warning. - if const_cond == Some(!expected) { - if let Some(err) = const_err { - let err = ConstEvalErr{ span: span, kind: err }; - let mut diag = bcx.tcx().sess.struct_span_warn( - span, "this expression will panic at run-time"); - note_const_eval_err(bcx.tcx(), &err, span, "expression", &mut diag); - diag.emit(); - } - } - - // Obtain the panic entry point. - let def_id = common::langcall(bcx.tcx(), Some(span), "", lang_item); - let instance = ty::Instance::mono(bcx.tcx(), def_id); - let llfn = callee::get_fn(bcx.ccx, instance); - - // Translate the actual panic invoke/call. - if let Some(unwind) = cleanup { - bcx.invoke(llfn, - &args, - self.unreachable_block(), - llblock(self, unwind), - cleanup_bundle); - } else { - bcx.call(llfn, &args, cleanup_bundle); - bcx.unreachable(); - } + bcx = self.trans_drop( + bcx, location, unwind, cleanup_bundle, terminator.source_info + ); + funclet_br(self, bcx, target); } mir::TerminatorKind::DropAndReplace { .. } => { bug!("undesugared DropAndReplace in trans: {:?}", data); } - mir::TerminatorKind::Call { ref func, ref args, ref destination, ref cleanup } => { - // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar. - let callee = self.trans_operand(&bcx, func); - - let (instance, mut llfn, sig) = match callee.ty.sty { - ty::TyFnDef(def_id, substs, sig) => { - (Some(monomorphize::resolve(bcx.ccx.shared(), def_id, substs)), - None, - sig) - } - ty::TyFnPtr(sig) => { - (None, - Some(callee.immediate()), - sig) - } - _ => bug!("{} is not callable", callee.ty) - }; - let def = instance.map(|i| i.def); - let sig = bcx.tcx().erase_late_bound_regions_and_normalize(&sig); - let abi = sig.abi; - - // Handle intrinsics old trans wants Expr's for, ourselves. - let intrinsic = match def { - Some(ty::InstanceDef::Intrinsic(def_id)) - => Some(bcx.tcx().item_name(def_id).as_str()), - _ => None - }; - let intrinsic = intrinsic.as_ref().map(|s| &s[..]); - - if intrinsic == Some("move_val_init") { - let &(_, target) = destination.as_ref().unwrap(); - // The first argument is a thin destination pointer. - let llptr = self.trans_operand(&bcx, &args[0]).immediate(); - let val = self.trans_operand(&bcx, &args[1]); - self.store_operand(&bcx, llptr, None, val); - funclet_br(self, bcx, target); - return; - } - - if intrinsic == Some("transmute") { - let &(ref dest, target) = destination.as_ref().unwrap(); - self.trans_transmute(&bcx, &args[0], dest); - funclet_br(self, bcx, target); - return; - } - - let extra_args = &args[sig.inputs().len()..]; - let extra_args = extra_args.iter().map(|op_arg| { - let op_ty = op_arg.ty(&self.mir, bcx.tcx()); - self.monomorphize(&op_ty) - }).collect::>(); - - let fn_ty = match def { - Some(ty::InstanceDef::Virtual(..)) => { - FnType::new_vtable(bcx.ccx, sig, &extra_args) - } - Some(ty::InstanceDef::DropGlue(_, None)) => { - // empty drop glue - a nop. - let &(_, target) = destination.as_ref().unwrap(); - funclet_br(self, bcx, target); - return; - } - _ => FnType::new(bcx.ccx, sig, &extra_args) - }; - - // The arguments we'll be passing. Plus one to account for outptr, if used. - let arg_count = fn_ty.args.len() + fn_ty.ret.is_indirect() as usize; - let mut llargs = Vec::with_capacity(arg_count); - - // Prepare the return value destination - let ret_dest = if let Some((ref dest, _)) = *destination { - let is_intrinsic = intrinsic.is_some(); - self.make_return_dest(&bcx, dest, &fn_ty.ret, &mut llargs, - is_intrinsic) - } else { - ReturnDest::Nothing - }; - - // Split the rust-call tupled arguments off. - let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() { - let (tup, args) = args.split_last().unwrap(); - (args, Some(tup)) - } else { - (&args[..], None) - }; - - let is_shuffle = intrinsic.map_or(false, |name| { - name.starts_with("simd_shuffle") - }); - let mut idx = 0; - for arg in first_args { - // The indices passed to simd_shuffle* in the - // third argument must be constant. This is - // checked by const-qualification, which also - // promotes any complex rvalues to constants. - if is_shuffle && idx == 2 { - match *arg { - mir::Operand::Consume(_) => { - span_bug!(span, "shuffle indices must be constant"); - } - mir::Operand::Constant(ref constant) => { - let val = self.trans_constant(&bcx, constant); - llargs.push(val.llval); - idx += 1; - continue; - } - } - } - - let op = self.trans_operand(&bcx, arg); - self.trans_argument(&bcx, op, &mut llargs, &fn_ty, - &mut idx, &mut llfn, &def); - } - if let Some(tup) = untuple { - self.trans_arguments_untupled(&bcx, tup, &mut llargs, &fn_ty, - &mut idx, &mut llfn, &def) - } - - if intrinsic.is_some() && intrinsic != Some("drop_in_place") { - use intrinsic::trans_intrinsic_call; - - let (dest, llargs) = match ret_dest { - _ if fn_ty.ret.is_indirect() => { - (llargs[0], &llargs[1..]) - } - ReturnDest::Nothing => { - (C_undef(fn_ty.ret.original_ty.ptr_to()), &llargs[..]) - } - ReturnDest::IndirectOperand(dst, _) | - ReturnDest::Store(dst) => (dst, &llargs[..]), - ReturnDest::DirectOperand(_) => - bug!("Cannot use direct operand with an intrinsic call") - }; - - let callee_ty = common::instance_ty( - bcx.ccx.shared(), instance.as_ref().unwrap()); - trans_intrinsic_call(&bcx, callee_ty, &fn_ty, &llargs, dest, - terminator.source_info.span); - - if let ReturnDest::IndirectOperand(dst, _) = ret_dest { - // Make a fake operand for store_return - let op = OperandRef { - val: Ref(dst, Alignment::AbiAligned), - ty: sig.output(), - }; - self.store_return(&bcx, ret_dest, fn_ty.ret, op); - } - - if let Some((_, target)) = *destination { - funclet_br(self, bcx, target); - } else { - bcx.unreachable(); - } - - return; - } - - let fn_ptr = match (llfn, instance) { - (Some(llfn), _) => llfn, - (None, Some(instance)) => callee::get_fn(bcx.ccx, instance), - _ => span_bug!(span, "no llfn for call"), - }; - - // Many different ways to call a function handled here - if let &Some(cleanup) = cleanup { - let ret_bcx = if let Some((_, target)) = *destination { - self.blocks[target] - } else { - self.unreachable_block() - }; - let invokeret = bcx.invoke(fn_ptr, - &llargs, - ret_bcx, - llblock(self, cleanup), - cleanup_bundle); - fn_ty.apply_attrs_callsite(invokeret); - - if let Some((_, target)) = *destination { - let ret_bcx = self.get_builder(target); - self.set_debug_loc(&ret_bcx, terminator.source_info); - let op = OperandRef { - val: Immediate(invokeret), - ty: sig.output(), - }; - self.store_return(&ret_bcx, ret_dest, fn_ty.ret, op); - } - } else { - let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle); - fn_ty.apply_attrs_callsite(llret); - if let Some((_, target)) = *destination { - let op = OperandRef { - val: Immediate(llret), - ty: sig.output(), - }; - self.store_return(&bcx, ret_dest, fn_ty.ret, op); - funclet_br(self, bcx, target); - } else { - bcx.unreachable(); - } - } - } - } - } - - fn trans_argument(&mut self, - bcx: &Builder<'a, 'tcx>, - op: OperandRef<'tcx>, - llargs: &mut Vec, - fn_ty: &FnType, - next_idx: &mut usize, - llfn: &mut Option, - def: &Option>) { - if let Pair(a, b) = op.val { - // Treat the values in a fat pointer separately. - if common::type_is_fat_ptr(bcx.ccx, op.ty) { - let (ptr, meta) = (a, b); - if *next_idx == 0 { - if let Some(ty::InstanceDef::Virtual(_, idx)) = *def { - let llmeth = meth::VirtualIndex::from_index(idx).get_fn(bcx, meta); - let llty = fn_ty.llvm_type(bcx.ccx).ptr_to(); - *llfn = Some(bcx.pointercast(llmeth, llty)); - } - } - - let imm_op = |x| OperandRef { - val: Immediate(x), - // We won't be checking the type again. - ty: bcx.tcx().types.err - }; - self.trans_argument(bcx, imm_op(ptr), llargs, fn_ty, next_idx, llfn, def); - self.trans_argument(bcx, imm_op(meta), llargs, fn_ty, next_idx, llfn, def); - return; - } - } - - let arg = &fn_ty.args[*next_idx]; - *next_idx += 1; - - // Fill padding with undef value, where applicable. - if let Some(ty) = arg.pad { - llargs.push(C_undef(ty)); - } - - if arg.is_ignore() { - return; - } - - // Force by-ref if we have to load through a cast pointer. - let (mut llval, align, by_ref) = match op.val { - Immediate(_) | Pair(..) => { - if arg.is_indirect() || arg.cast.is_some() { - let llscratch = bcx.alloca(arg.original_ty, "arg"); - self.store_operand(bcx, llscratch, None, op); - (llscratch, Alignment::AbiAligned, true) - } else { - (op.pack_if_pair(bcx).immediate(), Alignment::AbiAligned, false) - } - } - Ref(llval, Alignment::Packed) if arg.is_indirect() => { - // `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I - // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't - // have scary latent bugs around. - - let llscratch = bcx.alloca(arg.original_ty, "arg"); - base::memcpy_ty(bcx, llscratch, llval, op.ty, Some(1)); - (llscratch, Alignment::AbiAligned, true) - } - Ref(llval, align) => (llval, align, true) - }; - - if by_ref && !arg.is_indirect() { - // Have to load the argument, maybe while casting it. - if arg.original_ty == Type::i1(bcx.ccx) { - // We store bools as i8 so we need to truncate to i1. - llval = bcx.load_range_assert(llval, 0, 2, llvm::False, None); - llval = bcx.trunc(llval, arg.original_ty); - } else if let Some(ty) = arg.cast { - llval = bcx.load(bcx.pointercast(llval, ty.ptr_to()), - align.min_with(llalign_of_min(bcx.ccx, arg.ty))); - } else { - llval = bcx.load(llval, align.to_align()); - } - } - - llargs.push(llval); - } - - fn trans_arguments_untupled(&mut self, - bcx: &Builder<'a, 'tcx>, - operand: &mir::Operand<'tcx>, - llargs: &mut Vec, - fn_ty: &FnType, - next_idx: &mut usize, - llfn: &mut Option, - def: &Option>) { - let tuple = self.trans_operand(bcx, operand); - - let arg_types = match tuple.ty.sty { - ty::TyTuple(ref tys, _) => tys, - _ => span_bug!(self.mir.span, - "bad final argument to \"rust-call\" fn {:?}", tuple.ty) - }; - - // Handle both by-ref and immediate tuples. - match tuple.val { - Ref(llval, align) => { - for (n, &ty) in arg_types.iter().enumerate() { - let ptr = LvalueRef::new_sized_ty(llval, tuple.ty, align); - let (ptr, align) = ptr.trans_field_ptr(bcx, n); - let val = if common::type_is_fat_ptr(bcx.ccx, ty) { - let (lldata, llextra) = base::load_fat_ptr(bcx, ptr, align, ty); - Pair(lldata, llextra) - } else { - // trans_argument will load this if it needs to - Ref(ptr, align) - }; - let op = OperandRef { - val: val, - ty: ty - }; - self.trans_argument(bcx, op, llargs, fn_ty, next_idx, llfn, def); - } - - } - Immediate(llval) => { - let l = bcx.ccx.layout_of(tuple.ty); - let v = if let layout::Univariant { ref variant, .. } = *l { - variant - } else { - bug!("Not a tuple."); - }; - for (n, &ty) in arg_types.iter().enumerate() { - let mut elem = bcx.extract_value(llval, v.memory_index[n] as usize); - // Truncate bools to i1, if needed - if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx) { - elem = bcx.trunc(elem, Type::i1(bcx.ccx)); - } - // If the tuple is immediate, the elements are as well - let op = OperandRef { - val: Immediate(elem), - ty: ty - }; - self.trans_argument(bcx, op, llargs, fn_ty, next_idx, llfn, def); - } - } - Pair(a, b) => { - let elems = [a, b]; - for (n, &ty) in arg_types.iter().enumerate() { - let mut elem = elems[n]; - // Truncate bools to i1, if needed - if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx) { - elem = bcx.trunc(elem, Type::i1(bcx.ccx)); - } - // Pair is always made up of immediates - let op = OperandRef { - val: Immediate(elem), - ty: ty - }; - self.trans_argument(bcx, op, llargs, fn_ty, next_idx, llfn, def); - } - } - } - - } - - fn get_personality_slot(&mut self, bcx: &Builder<'a, 'tcx>) -> ValueRef { - let ccx = bcx.ccx; - if let Some(slot) = self.llpersonalityslot { - slot - } else { - let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); - let slot = bcx.alloca(llretty, "personalityslot"); - self.llpersonalityslot = Some(slot); - Lifetime::Start.call(bcx, slot); - slot } } /// Return the landingpad wrapper around the given basic block /// /// No-op in MSVC SEH scheme. - fn landing_pad_to(&mut self, target_bb: mir::BasicBlock) -> BasicBlockRef { + pub fn landing_pad_to(&mut self, target_bb: mir::Block) -> BasicBlockRef { if let Some(block) = self.landing_pads[target_bb] { return block; } @@ -799,159 +237,26 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { bcx.llbb() } - fn unreachable_block(&mut self) -> BasicBlockRef { - self.unreachable_block.unwrap_or_else(|| { - let bl = self.new_block("unreachable"); - bl.unreachable(); - self.unreachable_block = Some(bl.llbb()); - bl.llbb() - }) - } - pub fn new_block(&self, name: &str) -> Builder<'a, 'tcx> { Builder::new_block(self.ccx, self.llfn, name) } - pub fn get_builder(&self, bb: mir::BasicBlock) -> Builder<'a, 'tcx> { + pub fn get_builder(&self, bb: mir::Block) -> Builder<'a, 'tcx> { let builder = Builder::with_ccx(self.ccx); builder.position_at_end(self.blocks[bb]); builder } - fn make_return_dest(&mut self, bcx: &Builder<'a, 'tcx>, - dest: &mir::Lvalue<'tcx>, fn_ret_ty: &ArgType, - llargs: &mut Vec, is_intrinsic: bool) -> ReturnDest { - // If the return is ignored, we can just return a do-nothing ReturnDest - if fn_ret_ty.is_ignore() { - return ReturnDest::Nothing; - } - let dest = if let mir::Lvalue::Local(index) = *dest { - let ret_ty = self.monomorphized_lvalue_ty(dest); - match self.locals[index] { - LocalRef::Lvalue(dest) => dest, - LocalRef::Operand(None) => { - // Handle temporary lvalues, specifically Operand ones, as - // they don't have allocas - return if fn_ret_ty.is_indirect() { - // Odd, but possible, case, we have an operand temporary, - // but the calling convention has an indirect return. - let tmp = LvalueRef::alloca(bcx, ret_ty, "tmp_ret"); - llargs.push(tmp.llval); - ReturnDest::IndirectOperand(tmp.llval, index) - } else if is_intrinsic { - // Currently, intrinsics always need a location to store - // the result. so we create a temporary alloca for the - // result - let tmp = LvalueRef::alloca(bcx, ret_ty, "tmp_ret"); - ReturnDest::IndirectOperand(tmp.llval, index) - } else { - ReturnDest::DirectOperand(index) - }; - } - LocalRef::Operand(Some(_)) => { - bug!("lvalue local already assigned to"); - } - } - } else { - self.trans_lvalue(bcx, dest) - }; - if fn_ret_ty.is_indirect() { - match dest.alignment { - Alignment::AbiAligned => { - llargs.push(dest.llval); - ReturnDest::Nothing - }, - Alignment::Packed => { - // Currently, MIR code generation does not create calls - // that store directly to fields of packed structs (in - // fact, the calls it creates write only to temps), - // - // If someone changes that, please update this code path - // to create a temporary. - span_bug!(self.mir.span, "can't directly store to unaligned value"); - } - } - } else { - ReturnDest::Store(dest.llval) - } - } - - fn trans_transmute(&mut self, bcx: &Builder<'a, 'tcx>, - src: &mir::Operand<'tcx>, - dst: &mir::Lvalue<'tcx>) { - if let mir::Lvalue::Local(index) = *dst { - match self.locals[index] { - LocalRef::Lvalue(lvalue) => self.trans_transmute_into(bcx, src, &lvalue), - LocalRef::Operand(None) => { - let lvalue_ty = self.monomorphized_lvalue_ty(dst); - assert!(!lvalue_ty.has_erasable_regions()); - let lvalue = LvalueRef::alloca(bcx, lvalue_ty, "transmute_temp"); - self.trans_transmute_into(bcx, src, &lvalue); - let op = self.trans_load(bcx, lvalue.llval, lvalue.alignment, lvalue_ty); - self.locals[index] = LocalRef::Operand(Some(op)); - } - LocalRef::Operand(Some(_)) => { - let ty = self.monomorphized_lvalue_ty(dst); - assert!(common::type_is_zero_size(bcx.ccx, ty), - "assigning to initialized SSAtemp"); - } - } + fn get_personality_slot(&mut self, bcx: &Builder<'a, 'tcx>) -> ValueRef { + let ccx = bcx.ccx; + if let Some(slot) = self.llpersonalityslot { + slot } else { - let dst = self.trans_lvalue(bcx, dst); - self.trans_transmute_into(bcx, src, &dst); - } - } - - fn trans_transmute_into(&mut self, bcx: &Builder<'a, 'tcx>, - src: &mir::Operand<'tcx>, - dst: &LvalueRef<'tcx>) { - let val = self.trans_operand(bcx, src); - let llty = type_of::type_of(bcx.ccx, val.ty); - let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to()); - let in_type = val.ty; - let out_type = dst.ty.to_ty(bcx.tcx());; - let llalign = cmp::min(align_of(bcx.ccx, in_type), align_of(bcx.ccx, out_type)); - self.store_operand(bcx, cast_ptr, Some(llalign), val); - } - - - // Stores the return value of a function call into it's final location. - fn store_return(&mut self, - bcx: &Builder<'a, 'tcx>, - dest: ReturnDest, - ret_ty: ArgType, - op: OperandRef<'tcx>) { - use self::ReturnDest::*; - - match dest { - Nothing => (), - Store(dst) => ret_ty.store(bcx, op.immediate(), dst), - IndirectOperand(tmp, index) => { - let op = self.trans_load(bcx, tmp, Alignment::AbiAligned, op.ty); - self.locals[index] = LocalRef::Operand(Some(op)); - } - DirectOperand(index) => { - // If there is a cast, we have to store and reload. - let op = if ret_ty.cast.is_some() { - let tmp = LvalueRef::alloca(bcx, op.ty, "tmp_ret"); - ret_ty.store(bcx, op.immediate(), tmp.llval); - self.trans_load(bcx, tmp.llval, tmp.alignment, op.ty) - } else { - op.unpack_if_pair(bcx) - }; - self.locals[index] = LocalRef::Operand(Some(op)); - } + let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); + let slot = bcx.alloca(llretty, "personalityslot"); + self.llpersonalityslot = Some(slot); + Lifetime::Start.call(bcx, slot); + slot } } } - -enum ReturnDest { - // Do nothing, the return value is indirect or ignored - Nothing, - // Store the return value to the pointer - Store(ValueRef), - // Stores an indirect return value to an operand local lvalue - IndirectOperand(ValueRef, mir::Local), - // Stores a direct return value to an operand local lvalue - DirectOperand(mir::Local) -} diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index dbd928194c032..9dc729d4b0245 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -286,6 +286,53 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { Err(err) => if failure.is_ok() { failure = Err(err); } } } + mir::StatementKind::Assert { ref cond, expected, ref msg, .. } => { + let cond = self.const_operand(cond, span)?; + let cond_bool = common::const_to_uint(cond.llval) != 0; + if cond_bool != expected { + let err = match *msg { + mir::AssertMessage::BoundsCheck { ref len, ref index } => { + let len = self.const_operand(len, span)?; + let index = self.const_operand(index, span)?; + ErrKind::IndexOutOfBounds { + len: common::const_to_uint(len.llval), + index: common::const_to_uint(index.llval) + } + } + mir::AssertMessage::Math(ref err) => { + ErrKind::Math(err.clone()) + } + }; + + let err = ConstEvalErr{ span: span, kind: err }; + report_const_eval_err(tcx, &err, span, "expression"); + failure = Err(err); + } + } + + mir::StatementKind::Call { ref func, ref args, ref destination, .. } => { + let fn_ty = func.ty(self.mir, tcx); + let fn_ty = self.monomorphize(&fn_ty); + let (def_id, substs) = match fn_ty.sty { + ty::TyFnDef(def_id, substs, _) => (def_id, substs), + _ => span_bug!(span, "calling {:?} (of type {}) in constant", + func, fn_ty) + }; + + let mut const_args = IndexVec::with_capacity(args.len()); + for arg in args { + match self.const_operand(arg, span) { + Ok(arg) => { const_args.push(arg); }, + Err(err) => if failure.is_ok() { failure = Err(err); } + } + } + + match MirConstContext::trans_def(self.ccx, def_id, substs, const_args) { + Ok(value) => self.store(destination, value, span), + Err(err) => if failure.is_ok() { failure = Err(err); } + } + } + mir::StatementKind::StorageLive(_) | mir::StatementKind::StorageDead(_) | mir::StatementKind::Nop => {} @@ -308,57 +355,6 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { })); } - mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, .. } => { - let cond = self.const_operand(cond, span)?; - let cond_bool = common::const_to_uint(cond.llval) != 0; - if cond_bool != expected { - let err = match *msg { - mir::AssertMessage::BoundsCheck { ref len, ref index } => { - let len = self.const_operand(len, span)?; - let index = self.const_operand(index, span)?; - ErrKind::IndexOutOfBounds { - len: common::const_to_uint(len.llval), - index: common::const_to_uint(index.llval) - } - } - mir::AssertMessage::Math(ref err) => { - ErrKind::Math(err.clone()) - } - }; - - let err = ConstEvalErr{ span: span, kind: err }; - report_const_eval_err(tcx, &err, span, "expression"); - failure = Err(err); - } - target - } - - mir::TerminatorKind::Call { ref func, ref args, ref destination, .. } => { - let fn_ty = func.ty(self.mir, tcx); - let fn_ty = self.monomorphize(&fn_ty); - let (def_id, substs) = match fn_ty.sty { - ty::TyFnDef(def_id, substs, _) => (def_id, substs), - _ => span_bug!(span, "calling {:?} (of type {}) in constant", - func, fn_ty) - }; - - let mut const_args = IndexVec::with_capacity(args.len()); - for arg in args { - match self.const_operand(arg, span) { - Ok(arg) => { const_args.push(arg); }, - Err(err) => if failure.is_ok() { failure = Err(err); } - } - } - if let Some((ref dest, target)) = *destination { - match MirConstContext::trans_def(self.ccx, def_id, substs, const_args) { - Ok(value) => self.store(dest, value, span), - Err(err) => if failure.is_ok() { failure = Err(err); } - } - target - } else { - span_bug!(span, "diverging {:?} in constant", terminator.kind); - } - } _ => span_bug!(span, "{:?} in constant", terminator.kind) }; } diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 6419f41f86b6d..463e6900f58ca 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -63,18 +63,15 @@ pub struct MirContext<'a, 'tcx:'a> { /// then later loaded when generating the DIVERGE_BLOCK. llpersonalityslot: Option, - /// A `Block` for each MIR `BasicBlock` - blocks: IndexVec, + /// A `BasicBlock` for each MIR `Block` + blocks: IndexVec, /// The funclet status of each basic block - cleanup_kinds: IndexVec, + cleanup_kinds: IndexVec, /// This stores the landing-pad block for a given BB, computed lazily on GNU /// and eagerly on MSVC. - landing_pads: IndexVec>, - - /// Cached unreachable block - unreachable_block: Option, + landing_pads: IndexVec>, /// The location where each MIR arg/var/tmp/ret is stored. This is /// usually an `LvalueRef` representing an alloca, but not always: @@ -217,7 +214,7 @@ pub fn trans_mir<'a, 'tcx: 'a>( let cleanup_kinds = analyze::cleanup_kinds(&mir); // Allocate a `Block` for every basic block - let block_bcxs: IndexVec = + let block_bcxs: IndexVec = mir.basic_blocks().indices().map(|bb| { if bb == mir::START_BLOCK { bcx.build_sibling_block("start").llbb() @@ -236,7 +233,6 @@ pub fn trans_mir<'a, 'tcx: 'a>( ccx: ccx, llpersonalityslot: None, blocks: block_bcxs, - unreachable_block: None, cleanup_kinds: cleanup_kinds, landing_pads: IndexVec::from_elem(None, mir.basic_blocks()), scopes: scopes, @@ -316,7 +312,7 @@ pub fn trans_mir<'a, 'tcx: 'a>( // emitting should be enabled. debuginfo::start_emitting_source_locations(&mircx.debug_context); - let funclets: IndexVec> = + let funclets: IndexVec> = mircx.cleanup_kinds.iter_enumerated().map(|(bb, cleanup_kind)| { if let CleanupKind::Funclet = *cleanup_kind { let bcx = mircx.get_builder(bb); diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index d487aa6cd5be6..84ca3b277d2f2 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -99,8 +99,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let size = C_uint(bcx.ccx, size); let base = base::get_dataptr(&bcx, dest.llval); tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot, loop_bb| { - self.store_operand(bcx, llslot, dest.alignment.to_align(), tr_elem); + self.store_operand(&bcx, llslot, dest.alignment.to_align(), tr_elem); bcx.br(loop_bb); + bcx }) } diff --git a/src/librustc_trans/mir/statement.rs b/src/librustc_trans/mir/statement.rs index 29a0648c8f8f8..921e87c4bffa9 100644 --- a/src/librustc_trans/mir/statement.rs +++ b/src/librustc_trans/mir/statement.rs @@ -8,22 +8,42 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use llvm::{self, ValueRef, OperandBundleDef}; use rustc::mir; +use rustc::ty::{self, layout, TypeFoldable}; +use rustc_const_eval::{ErrKind, ConstEvalErr, note_const_eval_err}; +use rustc::middle::lang_items; use base; use asm; -use common; +use common::{self, C_bool, C_str_slice, C_u32, C_struct, C_undef, C_uint}; use builder::Builder; +use syntax::symbol::Symbol; +use machine::llalign_of_min; +use consts; +use callee; +use monomorphize; +use meth; +use tvec; +use abi::{Abi, FnType, ArgType}; +use type_::Type; +use type_of::{self, align_of}; + +use std::cmp; use super::MirContext; use super::LocalRef; use super::super::adt; use super::super::disr::Disr; +use super::lvalue::{Alignment, LvalueRef}; +use super::operand::OperandRef; +use super::operand::OperandValue::{Pair, Ref, Immediate}; impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_statement(&mut self, bcx: Builder<'a, 'tcx>, - statement: &mir::Statement<'tcx>) + statement: &mir::Statement<'tcx>, + cleanup_bundle: Option<&OperandBundleDef>) -> Builder<'a, 'tcx> { debug!("trans_statement(statement={:?})", statement); @@ -87,6 +107,16 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { asm::trans_inline_asm(&bcx, asm, outputs, input_vals); bcx } + mir::StatementKind::Assert { ref cond, expected, ref msg, cleanup } => { + self.trans_assert( + bcx, cond, expected, msg, cleanup, cleanup_bundle, statement.source_info + ) + } + mir::StatementKind::Call { ref func, ref args, ref destination, ref cleanup } => { + self.trans_call( + bcx, func, args, destination, cleanup, cleanup_bundle, statement.source_info, + ) + } mir::StatementKind::Nop => bcx, } } @@ -103,4 +133,685 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } bcx } + + pub fn trans_assert( + &mut self, + mut bcx: Builder<'a, 'tcx>, + cond: &mir::Operand<'tcx>, + expected: bool, + msg: &mir::AssertMessage<'tcx>, + cleanup: Option, + cleanup_bundle: Option<&OperandBundleDef>, + source_info: mir::SourceInfo, + ) -> Builder<'a, 'tcx> { + let cond = self.trans_operand(&bcx, cond).immediate(); + let mut const_cond = common::const_to_opt_u128(cond, false).map(|c| c == 1); + + // This case can currently arise only from functions marked + // with #[rustc_inherit_overflow_checks] and inlined from + // another crate (mostly core::num generic/#[inline] fns), + // while the current crate doesn't use overflow checks. + // NOTE: Unlike binops, negation doesn't have its own + // checked operation, just a comparison with the minimum + // value, so we have to check for the assert message. + if !bcx.ccx.check_overflow() { + use rustc_const_math::ConstMathErr::Overflow; + use rustc_const_math::Op::Neg; + + if let mir::AssertMessage::Math(Overflow(Neg)) = *msg { + const_cond = Some(expected); + } + } + + // Don't translate the panic block if success if known. + if const_cond == Some(expected) { + return bcx; + } + + // Pass the condition through llvm.expect for branch hinting. + let expect = bcx.ccx.get_intrinsic(&"llvm.expect.i1"); + let cond = bcx.call(expect, &[cond, C_bool(bcx.ccx, expected)], None); + + // Create the failure block and the conditional branch to it. + let success_block = self.new_block("success"); + let panic_block = self.new_block("panic"); + if expected { + bcx.cond_br(cond, success_block.llbb(), panic_block.llbb()); + } else { + bcx.cond_br(cond, panic_block.llbb(), success_block.llbb()); + } + + // After this point, bcx is the block for the call to panic. + bcx = panic_block; + self.set_debug_loc(&bcx, source_info); + + // Get the location information. + let loc = bcx.sess().codemap().lookup_char_pos(source_info.span.lo); + let filename = Symbol::intern(&loc.file.name).as_str(); + let filename = C_str_slice(bcx.ccx, filename); + let line = C_u32(bcx.ccx, loc.line as u32); + + // Put together the arguments to the panic entry point. + let (lang_item, args, const_err) = match *msg { + mir::AssertMessage::BoundsCheck { ref len, ref index } => { + let len = self.trans_operand(&mut bcx, len).immediate(); + let index = self.trans_operand(&mut bcx, index).immediate(); + + let const_err = common::const_to_opt_u128(len, false) + .and_then(|len| common::const_to_opt_u128(index, false) + .map(|index| ErrKind::IndexOutOfBounds { + len: len as u64, + index: index as u64 + })); + + let file_line = C_struct(bcx.ccx, &[filename, line], false); + let align = llalign_of_min(bcx.ccx, common::val_ty(file_line)); + let file_line = consts::addr_of(bcx.ccx, + file_line, + align, + "panic_bounds_check_loc"); + (lang_items::PanicBoundsCheckFnLangItem, + vec![file_line, index, len], + const_err) + } + mir::AssertMessage::Math(ref err) => { + let msg_str = Symbol::intern(err.description()).as_str(); + let msg_str = C_str_slice(bcx.ccx, msg_str); + let msg_file_line = C_struct(bcx.ccx, + &[msg_str, filename, line], + false); + let align = llalign_of_min(bcx.ccx, common::val_ty(msg_file_line)); + let msg_file_line = consts::addr_of(bcx.ccx, + msg_file_line, + align, + "panic_loc"); + (lang_items::PanicFnLangItem, + vec![msg_file_line], + Some(ErrKind::Math(err.clone()))) + } + }; + + // If we know we always panic, and the error message + // is also constant, then we can produce a warning. + if const_cond == Some(!expected) { + if let Some(err) = const_err { + let err = ConstEvalErr { span: source_info.span, kind: err }; + let mut diag = bcx.tcx().sess.struct_span_warn( + source_info.span, "this expression will panic at run-time"); + note_const_eval_err(bcx.tcx(), &err, source_info.span, "expression", &mut diag); + diag.emit(); + } + } + + // Obtain the panic entry point. + let def_id = common::langcall(bcx.tcx(), Some(source_info.span), "", lang_item); + let instance = ty::Instance::mono(bcx.tcx(), def_id); + let llfn = callee::get_fn(bcx.ccx, instance); + + // Translate the actual panic invoke/call. + if let Some(unwind) = cleanup { + let old_bcx = bcx; + bcx = old_bcx.build_sibling_block("assert-next"); + old_bcx.invoke( + llfn, + &args, + bcx.llbb(), + self.landing_pad_to(unwind), + cleanup_bundle + ); + } else { + bcx.call(llfn, &args, cleanup_bundle); + } + bcx.unreachable(); + + success_block + } + + pub fn trans_drop( + &mut self, + mut bcx: Builder<'a, 'tcx>, + location: &mir::Lvalue<'tcx>, + unwind: Option, + cleanup_bundle: Option<&OperandBundleDef>, + source_info: mir::SourceInfo, + ) -> Builder<'a, 'tcx> { + let ty = location.ty(&self.mir, bcx.tcx()).to_ty(bcx.tcx()); + let ty = self.monomorphize(&ty); + let drop_fn = monomorphize::resolve_drop_in_place(bcx.ccx.shared(), ty); + + if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def { + // we don't actually need to drop anything. + return bcx; + } + + let lvalue = self.trans_lvalue(&bcx, location); + let (drop_fn, need_extra) = match ty.sty { + ty::TyDynamic(..) => (meth::DESTRUCTOR.get_fn(&bcx, lvalue.llextra), + false), + ty::TyArray(ety, _) | ty::TySlice(ety) => { + // FIXME: handle panics + let drop_fn = monomorphize::resolve_drop_in_place( + bcx.ccx.shared(), ety); + let drop_fn = callee::get_fn(bcx.ccx, drop_fn); + return tvec::slice_for_each( + &bcx, + lvalue.project_index(&bcx, C_uint(bcx.ccx, 0u64)), + ety, + lvalue.len(bcx.ccx), + |mut bcx, llval, loop_bb| { + self.set_debug_loc(&bcx, source_info); + if let Some(unwind) = unwind { + let old_bcx = bcx; + bcx = old_bcx.build_sibling_block("drop-next"); + old_bcx.invoke( + drop_fn, + &[llval], + bcx.llbb(), + self.landing_pad_to(unwind), + cleanup_bundle + ); + } else { + bcx.call(drop_fn, &[llval], cleanup_bundle); + } + bcx.br(loop_bb); + bcx + }); + } + _ => (callee::get_fn(bcx.ccx, drop_fn), lvalue.has_extra()) + }; + let args = &[lvalue.llval, lvalue.llextra][..1 + need_extra as usize]; + if let Some(unwind) = unwind { + let old_bcx = bcx; + bcx = old_bcx.build_sibling_block("drop-next"); + old_bcx.invoke( + drop_fn, + args, + bcx.llbb(), + self.landing_pad_to(unwind), + cleanup_bundle + ); + } else { + bcx.call(drop_fn, args, cleanup_bundle); + } + + bcx + } + + pub fn trans_call( + &mut self, + mut bcx: Builder<'a, 'tcx>, + func: &mir::Operand<'tcx>, + args: &[mir::Operand<'tcx>], + destination: &mir::Lvalue<'tcx>, + cleanup: &Option, + cleanup_bundle: Option<&OperandBundleDef>, + source_info: mir::SourceInfo, + ) -> Builder<'a, 'tcx> { + // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar. + let callee = self.trans_operand(&bcx, func); + + let (instance, mut llfn, sig) = match callee.ty.sty { + ty::TyFnDef(def_id, substs, sig) => { + (Some(monomorphize::resolve(bcx.ccx.shared(), def_id, substs)), + None, + sig) + } + ty::TyFnPtr(sig) => { + (None, + Some(callee.immediate()), + sig) + } + _ => bug!("{} is not callable", callee.ty) + }; + let def = instance.map(|i| i.def); + let sig = bcx.tcx().erase_late_bound_regions_and_normalize(&sig); + let abi = sig.abi; + + // Handle intrinsics old trans wants Expr's for, ourselves. + let intrinsic = match def { + Some(ty::InstanceDef::Intrinsic(def_id)) + => Some(bcx.tcx().item_name(def_id).as_str()), + _ => None + }; + let intrinsic = intrinsic.as_ref().map(|s| &s[..]); + + if intrinsic == Some("move_val_init") { + // The first argument is a thin destination pointer. + let llptr = self.trans_operand(&bcx, &args[0]).immediate(); + let val = self.trans_operand(&bcx, &args[1]); + self.store_operand(&bcx, llptr, None, val); + return bcx; + } + + if intrinsic == Some("transmute") { + self.trans_transmute(&bcx, &args[0], destination); + return bcx; + } + + let extra_args = &args[sig.inputs().len()..]; + let extra_args = extra_args.iter().map(|op_arg| { + let op_ty = op_arg.ty(&self.mir, bcx.tcx()); + self.monomorphize(&op_ty) + }).collect::>(); + + let fn_ty = match def { + Some(ty::InstanceDef::Virtual(..)) => { + FnType::new_vtable(bcx.ccx, sig, &extra_args) + } + Some(ty::InstanceDef::DropGlue(_, None)) => { + // empty drop glue - a nop. + return bcx; + } + _ => FnType::new(bcx.ccx, sig, &extra_args) + }; + + // The arguments we'll be passing. Plus one to account for outptr, if used. + let arg_count = fn_ty.args.len() + fn_ty.ret.is_indirect() as usize; + let mut llargs = Vec::with_capacity(arg_count); + + // Prepare the return value destination + let ret_dest = self.make_return_dest(&bcx, destination, &fn_ty.ret, &mut llargs, + intrinsic.is_some()); + + // Split the rust-call tupled arguments off. + let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() { + let (tup, args) = args.split_last().unwrap(); + (args, Some(tup)) + } else { + (&args[..], None) + }; + + let is_shuffle = intrinsic.map_or(false, |name| { + name.starts_with("simd_shuffle") + }); + let mut idx = 0; + for arg in first_args { + // The indices passed to simd_shuffle* in the + // third argument must be constant. This is + // checked by const-qualification, which also + // promotes any complex rvalues to constants. + if is_shuffle && idx == 2 { + match *arg { + mir::Operand::Consume(_) => { + span_bug!(source_info.span, "shuffle indices must be constant"); + } + mir::Operand::Constant(ref constant) => { + let val = self.trans_constant(&bcx, constant); + llargs.push(val.llval); + idx += 1; + continue; + } + } + } + + let op = self.trans_operand(&bcx, arg); + self.trans_argument(&bcx, op, &mut llargs, &fn_ty, + &mut idx, &mut llfn, &def); + } + if let Some(tup) = untuple { + self.trans_arguments_untupled(&bcx, tup, &mut llargs, &fn_ty, + &mut idx, &mut llfn, &def) + } + + if intrinsic.is_some() && intrinsic != Some("drop_in_place") { + use intrinsic::trans_intrinsic_call; + + let (dest, llargs) = match ret_dest { + _ if fn_ty.ret.is_indirect() => { + (llargs[0], &llargs[1..]) + } + ReturnDest::Nothing => { + (C_undef(fn_ty.ret.original_ty.ptr_to()), &llargs[..]) + } + ReturnDest::IndirectOperand(dst, _) | + ReturnDest::Store(dst) => (dst, &llargs[..]), + ReturnDest::DirectOperand(_) => + bug!("Cannot use direct operand with an intrinsic call") + }; + + let callee_ty = common::instance_ty( + bcx.ccx.shared(), instance.as_ref().unwrap()); + trans_intrinsic_call(&bcx, callee_ty, &fn_ty, &llargs, dest, + source_info.span); + + if let ReturnDest::IndirectOperand(dst, _) = ret_dest { + // Make a fake operand for store_return + let op = OperandRef { + val: Ref(dst, Alignment::AbiAligned), + ty: sig.output(), + }; + self.store_return(&bcx, ret_dest, fn_ty.ret, op); + } + + return bcx; + } + + let fn_ptr = match (llfn, instance) { + (Some(llfn), _) => llfn, + (None, Some(instance)) => callee::get_fn(bcx.ccx, instance), + _ => span_bug!(source_info.span, "no llfn for call"), + }; + + let llret = if let &Some(cleanup) = cleanup { + let old_bcx = bcx; + bcx = old_bcx.build_sibling_block("call-next"); + self.set_debug_loc(&bcx, source_info); + old_bcx.invoke( + fn_ptr, + &llargs, + bcx.llbb(), + self.landing_pad_to(cleanup), + cleanup_bundle, + ) + } else { + bcx.call(fn_ptr, &llargs, cleanup_bundle) + }; + + fn_ty.apply_attrs_callsite(llret); + + let op = OperandRef { + val: Immediate(llret), + ty: sig.output(), + }; + self.store_return(&bcx, ret_dest, fn_ty.ret, op); + + bcx + } + + fn trans_argument(&mut self, + bcx: &Builder<'a, 'tcx>, + op: OperandRef<'tcx>, + llargs: &mut Vec, + fn_ty: &FnType, + next_idx: &mut usize, + llfn: &mut Option, + def: &Option>) { + if let Pair(a, b) = op.val { + // Treat the values in a fat pointer separately. + if common::type_is_fat_ptr(bcx.ccx, op.ty) { + let (ptr, meta) = (a, b); + if *next_idx == 0 { + if let Some(ty::InstanceDef::Virtual(_, idx)) = *def { + let llmeth = meth::VirtualIndex::from_index(idx).get_fn(bcx, meta); + let llty = fn_ty.llvm_type(bcx.ccx).ptr_to(); + *llfn = Some(bcx.pointercast(llmeth, llty)); + } + } + + let imm_op = |x| OperandRef { + val: Immediate(x), + // We won't be checking the type again. + ty: bcx.tcx().types.err + }; + self.trans_argument(bcx, imm_op(ptr), llargs, fn_ty, next_idx, llfn, def); + self.trans_argument(bcx, imm_op(meta), llargs, fn_ty, next_idx, llfn, def); + return; + } + } + + let arg = &fn_ty.args[*next_idx]; + *next_idx += 1; + + // Fill padding with undef value, where applicable. + if let Some(ty) = arg.pad { + llargs.push(C_undef(ty)); + } + + if arg.is_ignore() { + return; + } + + // Force by-ref if we have to load through a cast pointer. + let (mut llval, align, by_ref) = match op.val { + Immediate(_) | Pair(..) => { + if arg.is_indirect() || arg.cast.is_some() { + let llscratch = bcx.alloca(arg.original_ty, "arg"); + self.store_operand(bcx, llscratch, None, op); + (llscratch, Alignment::AbiAligned, true) + } else { + (op.pack_if_pair(bcx).immediate(), Alignment::AbiAligned, false) + } + } + Ref(llval, Alignment::Packed) if arg.is_indirect() => { + // `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I + // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't + // have scary latent bugs around. + + let llscratch = bcx.alloca(arg.original_ty, "arg"); + base::memcpy_ty(bcx, llscratch, llval, op.ty, Some(1)); + (llscratch, Alignment::AbiAligned, true) + } + Ref(llval, align) => (llval, align, true) + }; + + if by_ref && !arg.is_indirect() { + // Have to load the argument, maybe while casting it. + if arg.original_ty == Type::i1(bcx.ccx) { + // We store bools as i8 so we need to truncate to i1. + llval = bcx.load_range_assert(llval, 0, 2, llvm::False, None); + llval = bcx.trunc(llval, arg.original_ty); + } else if let Some(ty) = arg.cast { + llval = bcx.load(bcx.pointercast(llval, ty.ptr_to()), + align.min_with(llalign_of_min(bcx.ccx, arg.ty))); + } else { + llval = bcx.load(llval, align.to_align()); + } + } + + llargs.push(llval); + } + + fn trans_arguments_untupled(&mut self, + bcx: &Builder<'a, 'tcx>, + operand: &mir::Operand<'tcx>, + llargs: &mut Vec, + fn_ty: &FnType, + next_idx: &mut usize, + llfn: &mut Option, + def: &Option>) { + let tuple = self.trans_operand(bcx, operand); + + let arg_types = match tuple.ty.sty { + ty::TyTuple(ref tys, _) => tys, + _ => span_bug!(self.mir.span, + "bad final argument to \"rust-call\" fn {:?}", tuple.ty) + }; + + // Handle both by-ref and immediate tuples. + match tuple.val { + Ref(llval, align) => { + for (n, &ty) in arg_types.iter().enumerate() { + let ptr = LvalueRef::new_sized_ty(llval, tuple.ty, align); + let (ptr, align) = ptr.trans_field_ptr(bcx, n); + let val = if common::type_is_fat_ptr(bcx.ccx, ty) { + let (lldata, llextra) = base::load_fat_ptr(bcx, ptr, align, ty); + Pair(lldata, llextra) + } else { + // trans_argument will load this if it needs to + Ref(ptr, align) + }; + let op = OperandRef { + val: val, + ty: ty + }; + self.trans_argument(bcx, op, llargs, fn_ty, next_idx, llfn, def); + } + + } + Immediate(llval) => { + let l = bcx.ccx.layout_of(tuple.ty); + let v = if let layout::Univariant { ref variant, .. } = *l { + variant + } else { + bug!("Not a tuple."); + }; + for (n, &ty) in arg_types.iter().enumerate() { + let mut elem = bcx.extract_value(llval, v.memory_index[n] as usize); + // Truncate bools to i1, if needed + if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx) { + elem = bcx.trunc(elem, Type::i1(bcx.ccx)); + } + // If the tuple is immediate, the elements are as well + let op = OperandRef { + val: Immediate(elem), + ty: ty + }; + self.trans_argument(bcx, op, llargs, fn_ty, next_idx, llfn, def); + } + } + Pair(a, b) => { + let elems = [a, b]; + for (n, &ty) in arg_types.iter().enumerate() { + let mut elem = elems[n]; + // Truncate bools to i1, if needed + if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx) { + elem = bcx.trunc(elem, Type::i1(bcx.ccx)); + } + // Pair is always made up of immediates + let op = OperandRef { + val: Immediate(elem), + ty: ty + }; + self.trans_argument(bcx, op, llargs, fn_ty, next_idx, llfn, def); + } + } + } + } + + fn make_return_dest(&mut self, bcx: &Builder<'a, 'tcx>, + dest: &mir::Lvalue<'tcx>, fn_ret_ty: &ArgType, + llargs: &mut Vec, is_intrinsic: bool) -> ReturnDest { + // If the return is ignored, we can just return a do-nothing ReturnDest + if fn_ret_ty.is_ignore() { + return ReturnDest::Nothing; + } + let dest = if let mir::Lvalue::Local(index) = *dest { + let ret_ty = self.monomorphized_lvalue_ty(dest); + match self.locals[index] { + LocalRef::Lvalue(dest) => dest, + LocalRef::Operand(None) => { + // Handle temporary lvalues, specifically Operand ones, as + // they don't have allocas + return if fn_ret_ty.is_indirect() { + // Odd, but possible, case, we have an operand temporary, + // but the calling convention has an indirect return. + let tmp = LvalueRef::alloca(bcx, ret_ty, "tmp_ret"); + llargs.push(tmp.llval); + ReturnDest::IndirectOperand(tmp.llval, index) + } else if is_intrinsic { + // Currently, intrinsics always need a location to store + // the result. so we create a temporary alloca for the + // result + let tmp = LvalueRef::alloca(bcx, ret_ty, "tmp_ret"); + ReturnDest::IndirectOperand(tmp.llval, index) + } else { + ReturnDest::DirectOperand(index) + }; + } + LocalRef::Operand(Some(_)) => { + bug!("lvalue local already assigned to"); + } + } + } else { + self.trans_lvalue(bcx, dest) + }; + if fn_ret_ty.is_indirect() { + match dest.alignment { + Alignment::AbiAligned => { + llargs.push(dest.llval); + ReturnDest::Nothing + }, + Alignment::Packed => { + // Currently, MIR code generation does not create calls + // that store directly to fields of packed structs (in + // fact, the calls it creates write only to temps), + // + // If someone changes that, please update this code path + // to create a temporary. + span_bug!(self.mir.span, "can't directly store to unaligned value"); + } + } + } else { + ReturnDest::Store(dest.llval) + } + } + + fn trans_transmute(&mut self, bcx: &Builder<'a, 'tcx>, + src: &mir::Operand<'tcx>, + dst: &mir::Lvalue<'tcx>) { + if let mir::Lvalue::Local(index) = *dst { + match self.locals[index] { + LocalRef::Lvalue(lvalue) => self.trans_transmute_into(bcx, src, &lvalue), + LocalRef::Operand(None) => { + let lvalue_ty = self.monomorphized_lvalue_ty(dst); + assert!(!lvalue_ty.has_erasable_regions()); + let lvalue = LvalueRef::alloca(bcx, lvalue_ty, "transmute_temp"); + self.trans_transmute_into(bcx, src, &lvalue); + let op = self.trans_load(bcx, lvalue.llval, lvalue.alignment, lvalue_ty); + self.locals[index] = LocalRef::Operand(Some(op)); + } + LocalRef::Operand(Some(_)) => { + let ty = self.monomorphized_lvalue_ty(dst); + assert!(common::type_is_zero_size(bcx.ccx, ty), + "assigning to initialized SSAtemp"); + } + } + } else { + let dst = self.trans_lvalue(bcx, dst); + self.trans_transmute_into(bcx, src, &dst); + } + } + + fn trans_transmute_into(&mut self, bcx: &Builder<'a, 'tcx>, + src: &mir::Operand<'tcx>, + dst: &LvalueRef<'tcx>) { + let val = self.trans_operand(bcx, src); + let llty = type_of::type_of(bcx.ccx, val.ty); + let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to()); + let in_type = val.ty; + let out_type = dst.ty.to_ty(bcx.tcx());; + let llalign = cmp::min(align_of(bcx.ccx, in_type), align_of(bcx.ccx, out_type)); + self.store_operand(bcx, cast_ptr, Some(llalign), val); + } + + + // Stores the return value of a function call into it's final location. + fn store_return(&mut self, + bcx: &Builder<'a, 'tcx>, + dest: ReturnDest, + ret_ty: ArgType, + op: OperandRef<'tcx>) { + use self::ReturnDest::*; + + match dest { + Nothing => (), + Store(dst) => ret_ty.store(bcx, op.immediate(), dst), + IndirectOperand(tmp, index) => { + let op = self.trans_load(bcx, tmp, Alignment::AbiAligned, op.ty); + self.locals[index] = LocalRef::Operand(Some(op)); + } + DirectOperand(index) => { + // If there is a cast, we have to store and reload. + let op = if ret_ty.cast.is_some() { + let tmp = LvalueRef::alloca(bcx, op.ty, "tmp_ret"); + ret_ty.store(bcx, op.immediate(), tmp.llval); + self.trans_load(bcx, tmp.llval, tmp.alignment, op.ty) + } else { + op.unpack_if_pair(bcx) + }; + self.locals[index] = LocalRef::Operand(Some(op)); + } + } + } +} + +enum ReturnDest { + // Do nothing, the return value is indirect or ignored + Nothing, + // Store the return value to the pointer + Store(ValueRef), + // Stores an indirect return value to an operand local lvalue + IndirectOperand(ValueRef, mir::Local), + // Stores a direct return value to an operand local lvalue + DirectOperand(mir::Local) } diff --git a/src/librustc_trans/tvec.rs b/src/librustc_trans/tvec.rs index 4216a73a8dd85..cef7eab4c01c9 100644 --- a/src/librustc_trans/tvec.rs +++ b/src/librustc_trans/tvec.rs @@ -20,7 +20,9 @@ pub fn slice_for_each<'a, 'tcx, F>( unit_ty: Ty<'tcx>, len: ValueRef, f: F -) -> Builder<'a, 'tcx> where F: FnOnce(&Builder<'a, 'tcx>, ValueRef, BasicBlockRef) { +) -> Builder<'a, 'tcx> + where F: FnOnce(Builder<'a, 'tcx>, ValueRef, BasicBlockRef) -> Builder<'a, 'tcx> +{ // Special-case vectors with elements of size 0 so they don't go out of bounds (#9890) let zst = type_is_zero_size(bcx.ccx, unit_ty); let add = |bcx: &Builder, a, b| if zst { @@ -47,7 +49,7 @@ pub fn slice_for_each<'a, 'tcx, F>( header_bcx.cond_br(keep_going, body_bcx.llbb(), next_bcx.llbb()); let next = add(&body_bcx, current, C_uint(bcx.ccx, 1usize)); - f(&body_bcx, if zst { data_ptr } else { current }, header_bcx.llbb()); + let body_bcx = f(body_bcx, if zst { data_ptr } else { current }, header_bcx.llbb()); header_bcx.add_incoming_to_phi(current, next, body_bcx.llbb()); next_bcx }