// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 or the MIT license // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. use llvm::{self, BasicBlockRef, ValueRef, OperandBundleDef}; use rustc::ty; use rustc::mir::repr as mir; use trans::abi::{Abi, FnType}; use trans::adt; use trans::base; use trans::build; use trans::callee::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual}; use trans::common::{self, Block, BlockAndBuilder, C_undef}; use trans::debuginfo::DebugLoc; use trans::Disr; use trans::machine::{llalign_of_min, llbitsize_of_real}; use trans::meth; use trans::type_of; use trans::glue; use trans::type_::Type; use super::{MirContext, drop}; use super::lvalue::{LvalueRef, load_fat_ptr}; use super::operand::OperandRef; use super::operand::OperandValue::{self, FatPtr, Immediate, Ref}; impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { pub fn trans_block(&mut self, bb: mir::BasicBlock) { debug!("trans_block({:?})", bb); let mut bcx = self.bcx(bb); let mir = self.mir.clone(); let data = mir.basic_block_data(bb); // MSVC SEH bits let (cleanup_pad, cleanup_bundle) = if let Some((cp, cb)) = self.make_cleanup_pad(bb) { (Some(cp), Some(cb)) } else { (None, None) }; let funclet_br = |bcx: BlockAndBuilder, llbb: BasicBlockRef| if let Some(cp) = cleanup_pad { bcx.cleanup_ret(cp, Some(llbb)); } else { bcx.br(llbb); }; for statement in &data.statements { bcx = self.trans_statement(bcx, statement); } debug!("trans_block: terminator: {:?}", data.terminator()); match data.terminator().kind { mir::TerminatorKind::Resume => { if let Some(cleanup_pad) = cleanup_pad { bcx.cleanup_ret(cleanup_pad, None); } else { let ps = self.get_personality_slot(&bcx); let lp = bcx.load(ps); bcx.with_block(|bcx| { base::call_lifetime_end(bcx, ps); base::trans_unwind_resume(bcx, lp); }); } } mir::TerminatorKind::Goto { target } => { funclet_br(bcx, self.llblock(target)); } mir::TerminatorKind::If { ref cond, targets: (true_bb, false_bb) } => { let cond = self.trans_operand(&bcx, cond); let lltrue = self.llblock(true_bb); let llfalse = self.llblock(false_bb); bcx.cond_br(cond.immediate(), lltrue, llfalse); } mir::TerminatorKind::Switch { ref discr, ref adt_def, ref targets } => { let discr_lvalue = self.trans_lvalue(&bcx, discr); let ty = discr_lvalue.ty.to_ty(bcx.tcx()); let repr = adt::represent_type(bcx.ccx(), ty); let discr = bcx.with_block(|bcx| adt::trans_get_discr(bcx, &repr, discr_lvalue.llval, None, true) ); // The else branch of the Switch can't be hit, so branch to an unreachable // instruction so LLVM knows that let unreachable_blk = self.unreachable_block(); let switch = bcx.switch(discr, unreachable_blk.llbb, targets.len()); assert_eq!(adt_def.variants.len(), targets.len()); for (adt_variant, target) in adt_def.variants.iter().zip(targets) { let llval = bcx.with_block(|bcx| adt::trans_case(bcx, &repr, Disr::from(adt_variant.disr_val)) ); let llbb = self.llblock(*target); build::AddCase(switch, llval, llbb) } } mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => { let (otherwise, targets) = targets.split_last().unwrap(); let discr = bcx.load(self.trans_lvalue(&bcx, discr).llval); let discr = bcx.with_block(|bcx| base::to_immediate(bcx, discr, switch_ty)); let switch = bcx.switch(discr, self.llblock(*otherwise), values.len()); for (value, target) in values.iter().zip(targets) { let llval = self.trans_constval(&bcx, value, switch_ty).immediate(); let llbb = self.llblock(*target); build::AddCase(switch, llval, llbb) } } mir::TerminatorKind::Return => { bcx.with_block(|bcx| { self.fcx.build_return_block(bcx, DebugLoc::None); }) } mir::TerminatorKind::Drop { ref value, target, unwind } => { let lvalue = self.trans_lvalue(&bcx, value); let ty = lvalue.ty.to_ty(bcx.tcx()); // Double check for necessity to drop if !glue::type_needs_drop(bcx.tcx(), ty) { funclet_br(bcx, self.llblock(target)); return; } let drop_fn = glue::get_drop_glue(bcx.ccx(), ty); let drop_ty = glue::get_drop_glue_type(bcx.ccx(), ty); let llvalue = if drop_ty != ty { bcx.pointercast(lvalue.llval, type_of::type_of(bcx.ccx(), drop_ty).ptr_to()) } else { lvalue.llval }; if let Some(unwind) = unwind { let uwbcx = self.bcx(unwind); let unwind = self.make_landing_pad(uwbcx); bcx.invoke(drop_fn, &[llvalue], self.llblock(target), unwind.llbb(), cleanup_bundle.as_ref()); self.bcx(target).at_start(|bcx| drop::drop_fill(bcx, lvalue.llval, ty)); } else { bcx.call(drop_fn, &[llvalue], cleanup_bundle.as_ref()); drop::drop_fill(&bcx, lvalue.llval, ty); funclet_br(bcx, self.llblock(target)); } } mir::TerminatorKind::Call { ref func, ref args, ref destination, ref cleanup } => { // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar. let callee = self.trans_operand(&bcx, func); let (mut callee, abi, sig) = match callee.ty.sty { ty::TyFnDef(def_id, substs, f) => { (Callee::def(bcx.ccx(), def_id, substs), f.abi, &f.sig) } ty::TyFnPtr(f) => { (Callee { data: Fn(callee.immediate()), ty: callee.ty }, f.abi, &f.sig) } _ => unreachable!("{} is not callable", callee.ty) }; // Handle intrinsics old trans wants Expr's for, ourselves. let intrinsic = match (&callee.ty.sty, &callee.data) { (&ty::TyFnDef(def_id, _, _), &Intrinsic) => { Some(bcx.tcx().item_name(def_id).as_str()) } _ => None }; let intrinsic = intrinsic.as_ref().map(|s| &s[..]); if intrinsic == Some("move_val_init") { let &(_, target) = destination.as_ref().unwrap(); // The first argument is a thin destination pointer. let llptr = self.trans_operand(&bcx, &args[0]).immediate(); let val = self.trans_operand(&bcx, &args[1]); self.store_operand(&bcx, llptr, val); self.set_operand_dropped(&bcx, &args[1]); funclet_br(bcx, self.llblock(target)); return; } if intrinsic == Some("transmute") { let &(ref dest, target) = destination.as_ref().unwrap(); let dst = self.trans_lvalue(&bcx, dest); let mut val = self.trans_operand(&bcx, &args[0]); if let ty::TyFnDef(def_id, substs, _) = val.ty.sty { let llouttype = type_of::type_of(bcx.ccx(), dst.ty.to_ty(bcx.tcx())); let out_type_size = llbitsize_of_real(bcx.ccx(), llouttype); if out_type_size != 0 { // FIXME #19925 Remove this hack after a release cycle. let f = Callee::def(bcx.ccx(), def_id, substs); let datum = f.reify(bcx.ccx()); val = OperandRef { val: OperandValue::Immediate(datum.val), ty: datum.ty }; } } let llty = type_of::type_of(bcx.ccx(), val.ty); let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to()); self.store_operand(&bcx, cast_ptr, val); self.set_operand_dropped(&bcx, &args[0]); funclet_br(bcx, self.llblock(target)); return; } let extra_args = &args[sig.0.inputs.len()..]; let extra_args = extra_args.iter().map(|op_arg| { self.mir.operand_ty(bcx.tcx(), op_arg) }).collect::>(); let fn_ty = callee.direct_fn_type(bcx.ccx(), &extra_args); // The arguments we'll be passing. Plus one to account for outptr, if used. let arg_count = fn_ty.args.len() + fn_ty.ret.is_indirect() as usize; let mut llargs = Vec::with_capacity(arg_count); // Prepare the return value destination let ret_dest = if let Some((ref d, _)) = *destination { let dest = self.trans_lvalue(&bcx, d); if fn_ty.ret.is_indirect() { llargs.push(dest.llval); None } else if fn_ty.ret.is_ignore() { None } else { Some(dest) } } else { None }; // Split the rust-call tupled arguments off. let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() { let (tup, args) = args.split_last().unwrap(); (args, Some(tup)) } else { (&args[..], None) }; let mut idx = 0; for arg in first_args { let val = self.trans_operand(&bcx, arg).val; self.trans_argument(&bcx, val, &mut llargs, &fn_ty, &mut idx, &mut callee.data); } if let Some(tup) = untuple { self.trans_arguments_untupled(&bcx, tup, &mut llargs, &fn_ty, &mut idx, &mut callee.data) } let fn_ptr = match callee.data { NamedTupleConstructor(_) => { // FIXME translate this like mir::Rvalue::Aggregate. callee.reify(bcx.ccx()).val } Intrinsic => { use trans::callee::ArgVals; use trans::expr::{Ignore, SaveIn}; use trans::intrinsic::trans_intrinsic_call; let (dest, llargs) = if fn_ty.ret.is_indirect() { (SaveIn(llargs[0]), &llargs[1..]) } else if let Some(dest) = ret_dest { (SaveIn(dest.llval), &llargs[..]) } else { (Ignore, &llargs[..]) }; bcx.with_block(|bcx| { let res = trans_intrinsic_call(bcx, callee.ty, &fn_ty, ArgVals(llargs), dest, DebugLoc::None); let bcx = res.bcx.build(); if let Some((_, target)) = *destination { for op in args { self.set_operand_dropped(&bcx, op); } funclet_br(bcx, self.llblock(target)); } else { // trans_intrinsic_call already used Unreachable. // bcx.unreachable(); } }); return; } Fn(f) => f, Virtual(_) => unreachable!("Virtual fn ptr not extracted") }; // Many different ways to call a function handled here if let Some(cleanup) = cleanup.map(|bb| self.bcx(bb)) { // We translate the copy into a temporary block. The temporary block is // necessary because the current block has already been terminated (by // `invoke`) and we cannot really translate into the target block // because: // * The target block may have more than a single precedesor; // * Some LLVM insns cannot have a preceeding store insn (phi, // cleanuppad), and adding/prepending the store now may render // those other instructions invalid. // // NB: This approach still may break some LLVM code. For example if the // target block starts with a `phi` (which may only match on immediate // precedesors), it cannot know about this temporary block thus // resulting in an invalid code: // // this: // … // %0 = … // %1 = invoke to label %temp … // temp: // store ty %1, ty* %dest // br label %actualtargetblock // actualtargetblock: ; preds: %temp, … // phi … [%this, …], [%0, …] ; ERROR: phi requires to match only on // ; immediate precedesors let ret_bcx = if destination.is_some() { self.fcx.new_block("", None) } else { self.unreachable_block() }; let landingpad = self.make_landing_pad(cleanup); let invokeret = bcx.invoke(fn_ptr, &llargs, ret_bcx.llbb, landingpad.llbb(), cleanup_bundle.as_ref()); fn_ty.apply_attrs_callsite(invokeret); landingpad.at_start(|bcx| for op in args { self.set_operand_dropped(bcx, op); }); if let Some((_, target)) = *destination { let ret_bcx = ret_bcx.build(); if let Some(ret_dest) = ret_dest { fn_ty.ret.store(&ret_bcx, invokeret, ret_dest.llval); } for op in args { self.set_operand_dropped(&ret_bcx, op); } ret_bcx.br(self.llblock(target)); } } else { let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle.as_ref()); fn_ty.apply_attrs_callsite(llret); if let Some((_, target)) = *destination { if let Some(ret_dest) = ret_dest { fn_ty.ret.store(&bcx, llret, ret_dest.llval); } for op in args { self.set_operand_dropped(&bcx, op); } funclet_br(bcx, self.llblock(target)); } else { // no need to drop args, because the call never returns bcx.unreachable(); } } } } } fn trans_argument(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>, val: OperandValue, llargs: &mut Vec, fn_ty: &FnType, next_idx: &mut usize, callee: &mut CalleeData) { // Treat the values in a fat pointer separately. if let FatPtr(ptr, meta) = val { if *next_idx == 0 { if let Virtual(idx) = *callee { let llfn = bcx.with_block(|bcx| { meth::get_virtual_method(bcx, meta, idx) }); let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to(); *callee = Fn(bcx.pointercast(llfn, llty)); } } self.trans_argument(bcx, Immediate(ptr), llargs, fn_ty, next_idx, callee); self.trans_argument(bcx, Immediate(meta), llargs, fn_ty, next_idx, callee); return; } let arg = &fn_ty.args[*next_idx]; *next_idx += 1; // Fill padding with undef value, where applicable. if let Some(ty) = arg.pad { llargs.push(C_undef(ty)); } if arg.is_ignore() { return; } // Force by-ref if we have to load through a cast pointer. let (mut llval, by_ref) = match val { Immediate(llval) if arg.is_indirect() || arg.cast.is_some() => { let llscratch = build::AllocaFcx(bcx.fcx(), arg.original_ty, "arg"); bcx.store(llval, llscratch); (llscratch, true) } Immediate(llval) => (llval, false), Ref(llval) => (llval, true), FatPtr(_, _) => unreachable!("fat pointers handled above") }; if by_ref && !arg.is_indirect() { // Have to load the argument, maybe while casting it. if arg.original_ty == Type::i1(bcx.ccx()) { // We store bools as i8 so we need to truncate to i1. llval = bcx.load_range_assert(llval, 0, 2, llvm::False); llval = bcx.trunc(llval, arg.original_ty); } else if let Some(ty) = arg.cast { llval = bcx.load(bcx.pointercast(llval, ty.ptr_to())); let llalign = llalign_of_min(bcx.ccx(), arg.ty); unsafe { llvm::LLVMSetAlignment(llval, llalign); } } else { llval = bcx.load(llval); } } llargs.push(llval); } fn trans_arguments_untupled(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>, operand: &mir::Operand<'tcx>, llargs: &mut Vec, fn_ty: &FnType, next_idx: &mut usize, callee: &mut CalleeData) { // FIXME: consider having some optimization to avoid tupling/untupling // (and storing/loading in the case of immediates) // avoid trans_operand for pointless copying let lv = match *operand { mir::Operand::Consume(ref lvalue) => self.trans_lvalue(bcx, lvalue), mir::Operand::Constant(ref constant) => { // FIXME: consider being less pessimized if constant.ty.is_nil() { return; } let ty = bcx.monomorphize(&constant.ty); let lv = LvalueRef::alloca(bcx, ty, "__untuple_alloca"); let constant = self.trans_constant(bcx, constant); self.store_operand(bcx, lv.llval, constant); lv } }; let lv_ty = lv.ty.to_ty(bcx.tcx()); let result_types = match lv_ty.sty { ty::TyTuple(ref tys) => tys, _ => bcx.tcx().sess.span_bug( self.mir.span, &format!("bad final argument to \"rust-call\" fn {:?}", lv_ty)) }; let base_repr = adt::represent_type(bcx.ccx(), lv_ty); let base = adt::MaybeSizedValue::sized(lv.llval); for (n, &ty) in result_types.iter().enumerate() { let ptr = adt::trans_field_ptr_builder(bcx, &base_repr, base, Disr(0), n); let val = if common::type_is_fat_ptr(bcx.tcx(), ty) { let (lldata, llextra) = load_fat_ptr(bcx, ptr); FatPtr(lldata, llextra) } else { // Don't bother loading the value, trans_argument will. Ref(ptr) }; self.trans_argument(bcx, val, llargs, fn_ty, next_idx, callee); } } fn get_personality_slot(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>) -> ValueRef { let ccx = bcx.ccx(); if let Some(slot) = self.llpersonalityslot { slot } else { let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); bcx.with_block(|bcx| { let slot = base::alloca(bcx, llretty, "personalityslot"); self.llpersonalityslot = Some(slot); base::call_lifetime_start(bcx, slot); slot }) } } /// Create a landingpad wrapper around the given Block. /// /// No-op in MSVC SEH scheme. fn make_landing_pad(&mut self, cleanup: BlockAndBuilder<'bcx, 'tcx>) -> BlockAndBuilder<'bcx, 'tcx> { if base::wants_msvc_seh(cleanup.sess()) { return cleanup; } let bcx = self.fcx.new_block("cleanup", None).build(); let ccx = bcx.ccx(); let llpersonality = self.fcx.eh_personality(); let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); let llretval = bcx.landing_pad(llretty, llpersonality, 1, self.fcx.llfn); bcx.set_cleanup(llretval); let slot = self.get_personality_slot(&bcx); bcx.store(llretval, slot); bcx.br(cleanup.llbb()); bcx } /// Create prologue cleanuppad instruction under MSVC SEH handling scheme. /// /// Also handles setting some state for the original trans and creating an operand bundle for /// function calls. fn make_cleanup_pad(&mut self, bb: mir::BasicBlock) -> Option<(ValueRef, OperandBundleDef)> { let bcx = self.bcx(bb); let data = self.mir.basic_block_data(bb); let use_funclets = base::wants_msvc_seh(bcx.sess()) && data.is_cleanup; let cleanup_pad = if use_funclets { bcx.set_personality_fn(self.fcx.eh_personality()); bcx.at_start(|bcx| Some(bcx.cleanup_pad(None, &[]))) } else { None }; // Set the landingpad global-state for old translator, so it knows about the SEH used. bcx.set_lpad(if let Some(cleanup_pad) = cleanup_pad { Some(common::LandingPad::msvc(cleanup_pad)) } else if data.is_cleanup { Some(common::LandingPad::gnu()) } else { None }); cleanup_pad.map(|f| (f, OperandBundleDef::new("funclet", &[f]))) } fn unreachable_block(&mut self) -> Block<'bcx, 'tcx> { self.unreachable_block.unwrap_or_else(|| { let bl = self.fcx.new_block("unreachable", None); bl.build().unreachable(); self.unreachable_block = Some(bl); bl }) } fn bcx(&self, bb: mir::BasicBlock) -> BlockAndBuilder<'bcx, 'tcx> { self.blocks[bb.index()].build() } pub fn llblock(&self, bb: mir::BasicBlock) -> BasicBlockRef { self.blocks[bb.index()].llbb } }