Revert back to using FunctionCx's Body

This commit is contained in:
Paul Daniel Faria 2019-10-14 01:38:38 -04:00
parent 16952cce01
commit 66279d12f9
7 changed files with 118 additions and 147 deletions

View File

@ -245,7 +245,7 @@ impl<'mir, 'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx>
if let Some(index) = place.as_local() {
self.assign(index, location);
let decl_span = self.fx.mir.local_decls[index].source_info.span;
if !self.fx.rvalue_creates_operand(rvalue, decl_span, self.fx.mir) {
if !self.fx.rvalue_creates_operand(rvalue, decl_span) {
self.not_ssa(index);
}
} else {

View File

@ -2,7 +2,7 @@ use rustc_index::vec::Idx;
use rustc::middle::lang_items;
use rustc::ty::{self, Ty, TypeFoldable, Instance};
use rustc::ty::layout::{self, LayoutOf, HasTyCtxt, FnAbiExt};
use rustc::mir::{self, Body, PlaceBase, Static, StaticKind};
use rustc::mir::{self, PlaceBase, Static, StaticKind};
use rustc::mir::interpret::PanicInfo;
use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
use rustc_target::spec::abi::Abi;
@ -46,7 +46,6 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'a, 'tcx> {
fn lltarget<'b, 'c, 'd, Bx: BuilderMethods<'b, 'tcx>>(
&self,
fx: &'d mut FunctionCx<'b, 'c, 'tcx, Bx>,
mir: &Body<'tcx>,
target: mir::BasicBlock
) -> (Bx::BasicBlock, bool) {
let span = self.terminator.source_info.span;
@ -57,9 +56,9 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'a, 'tcx> {
(Some(f), Some(t_f)) if f == t_f || !base::wants_msvc_seh(fx.cx.tcx().sess) =>
(lltarget, false),
// jump *into* cleanup - need a landing pad if GNU
(None, Some(_)) => (fx.landing_pad_to(target, mir), false),
(None, Some(_)) => (fx.landing_pad_to(target), false),
(Some(_), None) => span_bug!(span, "{:?} - jump out of cleanup?", self.terminator),
(Some(_), Some(_)) => (fx.landing_pad_to(target, mir), true),
(Some(_), Some(_)) => (fx.landing_pad_to(target), true),
}
}
@ -67,10 +66,9 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'a, 'tcx> {
fn llblock<'d, 'c, 'b, Bx: BuilderMethods<'b, 'tcx>>(
&self,
fx: &'d mut FunctionCx<'b, 'c, 'tcx, Bx>,
mir: &Body<'tcx>,
target: mir::BasicBlock
) -> Bx::BasicBlock {
let (lltarget, is_cleanupret) = self.lltarget(fx, mir, target);
let (lltarget, is_cleanupret) = self.lltarget(fx, target);
if is_cleanupret {
// MSVC cross-funclet jump - need a trampoline
@ -88,11 +86,10 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'a, 'tcx> {
fn funclet_br<'d, 'c, 'b, Bx: BuilderMethods<'b, 'tcx>>(
&self,
fx: &'d mut FunctionCx<'b, 'c, 'tcx, Bx>,
mir: &Body<'tcx>,
bx: &mut Bx,
target: mir::BasicBlock,
) {
let (lltarget, is_cleanupret) = self.lltarget(fx, mir, target);
let (lltarget, is_cleanupret) = self.lltarget(fx, target);
if is_cleanupret {
// micro-optimization: generate a `ret` rather than a jump
// to a trampoline.
@ -107,7 +104,6 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'a, 'tcx> {
fn do_call<'d, 'c, 'b, Bx: BuilderMethods<'b, 'tcx>>(
&self,
fx: &'d mut FunctionCx<'b, 'c, 'tcx, Bx>,
mir: &Body<'tcx>,
bx: &mut Bx,
fn_abi: FnAbi<'tcx, Ty<'tcx>>,
fn_ptr: Bx::Value,
@ -124,19 +120,19 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'a, 'tcx> {
let invokeret = bx.invoke(fn_ptr,
&llargs,
ret_bx,
self.llblock(fx, mir, cleanup),
self.llblock(fx, cleanup),
self.funclet(fx));
bx.apply_attrs_callsite(&fn_abi, invokeret);
if let Some((ret_dest, target)) = destination {
let mut ret_bx = fx.build_block(target);
fx.set_debug_loc(&mut ret_bx, self.terminator.source_info, mir);
fx.set_debug_loc(&mut ret_bx, self.terminator.source_info);
fx.store_return(&mut ret_bx, ret_dest, &fn_abi.ret, invokeret);
}
} else {
let llret = bx.call(fn_ptr, &llargs, self.funclet(fx));
bx.apply_attrs_callsite(&fn_abi, llret);
if mir[*self.bb].is_cleanup {
if fx.mir[*self.bb].is_cleanup {
// Cleanup is always the cold path. Don't inline
// drop glue. Also, when there is a deeply-nested
// struct, there are "symmetry" issues that cause
@ -146,7 +142,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'a, 'tcx> {
if let Some((ret_dest, target)) = destination {
fx.store_return(bx, ret_dest, &fn_abi.ret, llret);
self.funclet_br(fx, mir, bx, target);
self.funclet_br(fx, bx, target);
} else {
bx.unreachable();
}
@ -208,20 +204,19 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
fn codegen_switchint_terminator<'c>(
&mut self,
helper: TerminatorCodegenHelper<'c, 'tcx>,
mir: &Body<'tcx>,
mut bx: Bx,
discr: &mir::Operand<'tcx>,
switch_ty: Ty<'tcx>,
values: &Cow<'tcx, [u128]>,
targets: &Vec<mir::BasicBlock>,
) {
let discr = self.codegen_operand(mir, &mut bx, &discr);
let discr = self.codegen_operand(&mut bx, &discr);
if targets.len() == 2 {
// If there are two targets, emit br instead of switch
let lltrue = helper.llblock(self, mir, targets[0]);
let llfalse = helper.llblock(self, mir, targets[1]);
let lltrue = helper.llblock(self, targets[0]);
let llfalse = helper.llblock(self, targets[1]);
if switch_ty == bx.tcx().types.bool {
helper.maybe_sideeffect(mir, &mut bx, targets.as_slice());
helper.maybe_sideeffect(self.mir, &mut bx, targets.as_slice());
// Don't generate trivial icmps when switching on bool
if let [0] = values[..] {
bx.cond_br(discr.immediate(), llfalse, lltrue);
@ -243,15 +238,15 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
let (otherwise, targets) = targets.split_last().unwrap();
bx.switch(
discr.immediate(),
helper.llblock(self, mir, *otherwise),
helper.llblock(self, *otherwise),
values.iter().zip(targets).map(|(&value, target)| {
(value, helper.llblock(self, mir, *target))
(value, helper.llblock(self, *target))
})
);
}
}
fn codegen_return_terminator(&mut self, mir: &Body<'tcx>, mut bx: Bx) {
fn codegen_return_terminator(&mut self, mut bx: Bx) {
// Call `va_end` if this is the definition of a C-variadic function.
if self.fn_abi.c_variadic {
// The `VaList` "spoofed" argument is just after all the real arguments.
@ -278,7 +273,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
PassMode::Direct(_) | PassMode::Pair(..) => {
let op =
self.codegen_consume(mir, &mut bx, &mir::Place::return_place().as_ref());
self.codegen_consume(&mut bx, &mir::Place::return_place().as_ref());
if let Ref(llval, _, align) = op.val {
bx.load(llval, align)
} else {
@ -324,24 +319,23 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
fn codegen_drop_terminator<'c>(
&mut self,
helper: TerminatorCodegenHelper<'c, 'tcx>,
mir: &Body<'tcx>,
mut bx: Bx,
location: &mir::Place<'tcx>,
target: mir::BasicBlock,
unwind: Option<mir::BasicBlock>,
) {
let ty = location.ty(mir, bx.tcx()).ty;
let ty = location.ty(self.mir, bx.tcx()).ty;
let ty = self.monomorphize(&ty);
let drop_fn = Instance::resolve_drop_in_place(bx.tcx(), ty);
if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
// we don't actually need to drop anything.
helper.maybe_sideeffect(mir, &mut bx, &[target]);
helper.funclet_br(self, mir, &mut bx, target);
helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
helper.funclet_br(self, &mut bx, target);
return
}
let place = self.codegen_place(mir, &mut bx, &location.as_ref());
let place = self.codegen_place(&mut bx, &location.as_ref());
let (args1, args2);
let mut args = if let Some(llextra) = place.llextra {
args2 = [place.llval, llextra];
@ -367,7 +361,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
FnAbi::of_instance(&bx, drop_fn))
}
};
helper.maybe_sideeffect(mir, &mut bx, &[target]);
helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
helper.do_call(self, &mut bx, fn_ty, drop_fn, args,
Some((ReturnDest::Nothing, target)),
unwind);
@ -376,7 +370,6 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
fn codegen_assert_terminator<'c>(
&mut self,
helper: TerminatorCodegenHelper<'c, 'tcx>,
mir: &Body<'tcx>,
mut bx: Bx,
terminator: &mir::Terminator<'tcx>,
cond: &mir::Operand<'tcx>,
@ -386,7 +379,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
cleanup: Option<mir::BasicBlock>,
) {
let span = terminator.source_info.span;
let cond = self.codegen_operand(mir, &mut bx, cond).immediate();
let cond = self.codegen_operand(&mut bx, cond).immediate();
let mut const_cond = bx.const_to_opt_u128(cond, false).map(|c| c == 1);
// This case can currently arise only from functions marked
@ -404,8 +397,8 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
// Don't codegen the panic block if success if known.
if const_cond == Some(expected) {
helper.maybe_sideeffect(mir, &mut bx, &[target]);
helper.funclet_br(self, mir, &mut bx, target);
helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
helper.funclet_br(self, &mut bx, target);
return;
}
@ -413,9 +406,9 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
let cond = bx.expect(cond, expected);
// Create the failure block and the conditional branch to it.
let lltarget = helper.llblock(self, mir, target);
let lltarget = helper.llblock(self, target);
let panic_block = self.new_block("panic");
helper.maybe_sideeffect(mir, &mut bx, &[target]);
helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
if expected {
bx.cond_br(cond, lltarget, panic_block.llbb());
} else {
@ -424,7 +417,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
// After this point, bx is the block for the call to panic.
bx = panic_block;
self.set_debug_loc(&mut bx, terminator.source_info, mir);
self.set_debug_loc(&mut bx, terminator.source_info);
// Get the location information.
let location = self.get_caller_location(&mut bx, span).immediate();
@ -432,8 +425,8 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
// Put together the arguments to the panic entry point.
let (lang_item, args) = match msg {
PanicInfo::BoundsCheck { ref len, ref index } => {
let len = self.codegen_operand(mir, &mut bx, len).immediate();
let index = self.codegen_operand(mir, &mut bx, index).immediate();
let len = self.codegen_operand(&mut bx, len).immediate();
let index = self.codegen_operand(&mut bx, index).immediate();
(lang_items::PanicBoundsCheckFnLangItem, vec![location, index, len])
}
_ => {
@ -450,13 +443,12 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
let llfn = bx.get_fn_addr(instance);
// Codegen the actual panic invoke/call.
helper.do_call(self, mir, &mut bx, fn_abi, llfn, &args, None, cleanup);
helper.do_call(self, &mut bx, fn_abi, llfn, &args, None, cleanup);
}
fn codegen_call_terminator<'c>(
&mut self,
helper: TerminatorCodegenHelper<'c, 'tcx>,
mir: &Body<'tcx>,
mut bx: Bx,
terminator: &mir::Terminator<'tcx>,
func: &mir::Operand<'tcx>,
@ -466,7 +458,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
) {
let span = terminator.source_info.span;
// Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
let callee = self.codegen_operand(mir, &mut bx, func);
let callee = self.codegen_operand(&mut bx, func);
let (instance, mut llfn) = match callee.layout.ty.kind {
ty::FnDef(def_id, substs) => {
@ -500,9 +492,9 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
if intrinsic == Some("transmute") {
if let Some(destination_ref) = destination.as_ref() {
let &(ref dest, target) = destination_ref;
self.codegen_transmute(mir, &mut bx, &args[0], dest);
helper.maybe_sideeffect(mir, &mut bx, &[target]);
helper.funclet_br(self, mir, &mut bx, target);
self.codegen_transmute(&mut bx, &args[0], dest);
helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
helper.funclet_br(self, &mut bx, target);
} else {
// If we are trying to transmute to an uninhabited type,
// it is likely there is no allotted destination. In fact,
@ -518,7 +510,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
let extra_args = &args[sig.inputs().len()..];
let extra_args = extra_args.iter().map(|op_arg| {
let op_ty = op_arg.ty(mir, bx.tcx());
let op_ty = op_arg.ty(self.mir, bx.tcx());
self.monomorphize(&op_ty)
}).collect::<Vec<_>>();
@ -529,8 +521,8 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
Some(ty::InstanceDef::DropGlue(_, None)) => {
// Empty drop glue; a no-op.
let &(_, target) = destination.as_ref().unwrap();
helper.maybe_sideeffect(mir, &mut bx, &[target]);
helper.funclet_br(self, mir, &mut bx, target);
helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
helper.funclet_br(self, &mut bx, target);
return;
}
_ => FnAbi::new(&bx, sig, &extra_args)
@ -566,7 +558,6 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
// Codegen the actual panic invoke/call.
helper.do_call(
self,
mir,
&mut bx,
fn_abi,
llfn,
@ -590,8 +581,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
// Prepare the return value destination
let ret_dest = if let Some((ref dest, _)) = *destination {
let is_intrinsic = intrinsic.is_some();
self.make_return_dest(&mut bx, dest, &fn_abi.ret, &mut llargs,
is_intrinsic, mir)
self.make_return_dest(&mut bx, dest, &fn_abi.ret, &mut llargs, is_intrinsic)
} else {
ReturnDest::Nothing
};
@ -680,7 +670,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
}
}
self.codegen_operand(mir, &mut bx, arg)
self.codegen_operand(&mut bx, arg)
}).collect();
@ -692,8 +682,8 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
}
if let Some((_, target)) = *destination {
helper.maybe_sideeffect(mir, &mut bx, &[target]);
helper.funclet_br(self, mir, &mut bx, target);
helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
helper.funclet_br(self, &mut bx, target);
} else {
bx.unreachable();
}
@ -710,7 +700,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
};
'make_args: for (i, arg) in first_args.iter().enumerate() {
let mut op = self.codegen_operand(mir, &mut bx, arg);
let mut op = self.codegen_operand(&mut bx, arg);
if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) {
if let Pair(..) = op.val {
@ -775,7 +765,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
self.codegen_argument(&mut bx, op, &mut llargs, &fn_abi.args[i]);
}
if let Some(tup) = untuple {
self.codegen_arguments_untupled(mir, &mut bx, tup, &mut llargs,
self.codegen_arguments_untupled(&mut bx, tup, &mut llargs,
&fn_abi.args[first_args.len()..])
}
@ -786,7 +776,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
};
if let Some((_, target)) = destination.as_ref() {
helper.maybe_sideeffect(mir, &mut bx, &[*target]);
helper.maybe_sideeffect(self.mir, &mut bx, &[*target]);
}
helper.do_call(self, &mut bx, fn_ty, fn_ptr, &llargs,
destination.as_ref().map(|&(_, target)| (ret_dest, target)),
@ -798,18 +788,17 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
pub fn codegen_block(
&mut self,
bb: mir::BasicBlock,
mir: &Body<'tcx>,
) {
let mut bx = self.build_block(bb);
let data = &mir[bb];
let data = &self.mir[bb];
debug!("codegen_block({:?}={:?})", bb, data);
for statement in &data.statements {
bx = self.codegen_statement(mir, bx, statement);
bx = self.codegen_statement(bx, statement);
}
self.codegen_terminator(bx, bb, data.terminator(), mir);
self.codegen_terminator(bx, bb, data.terminator());
}
fn codegen_terminator(
@ -817,7 +806,6 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
mut bx: Bx,
bb: mir::BasicBlock,
terminator: &mir::Terminator<'tcx>,
mir: &Body<'tcx>
) {
debug!("codegen_terminator: {:?}", terminator);
@ -827,7 +815,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
bb: &bb, terminator, funclet_bb
};
self.set_debug_loc(&mut bx, terminator.source_info, mir);
self.set_debug_loc(&mut bx, terminator.source_info);
match terminator.kind {
mir::TerminatorKind::Resume => {
self.codegen_resume_terminator(helper, bx)
@ -839,19 +827,18 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
}
mir::TerminatorKind::Goto { target } => {
helper.maybe_sideeffect(mir, &mut bx, &[target]);
helper.funclet_br(self, mir, &mut bx, target);
helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
helper.funclet_br(self, &mut bx, target);
}
mir::TerminatorKind::SwitchInt {
ref discr, switch_ty, ref values, ref targets
} => {
self.codegen_switchint_terminator(helper, mir, bx, discr, switch_ty,
values, targets);
self.codegen_switchint_terminator(helper, bx, discr, switch_ty, values, targets);
}
mir::TerminatorKind::Return => {
self.codegen_return_terminator(mir, bx);
self.codegen_return_terminator(bx);
}
mir::TerminatorKind::Unreachable => {
@ -859,11 +846,11 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
}
mir::TerminatorKind::Drop { ref location, target, unwind } => {
self.codegen_drop_terminator(helper, mir, bx, location, target, unwind);
self.codegen_drop_terminator(helper, bx, location, target, unwind);
}
mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => {
self.codegen_assert_terminator(helper, mir, bx, terminator, cond,
self.codegen_assert_terminator(helper, bx, terminator, cond,
expected, msg, target, cleanup);
}
@ -878,7 +865,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
cleanup,
from_hir_call: _
} => {
self.codegen_call_terminator(helper, mir, bx, terminator, func,
self.codegen_call_terminator(helper, bx, terminator, func,
args, destination, cleanup);
}
mir::TerminatorKind::GeneratorDrop |
@ -983,13 +970,12 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
fn codegen_arguments_untupled(
&mut self,
mir: &Body<'tcx>,
bx: &mut Bx,
operand: &mir::Operand<'tcx>,
llargs: &mut Vec<Bx::Value>,
args: &[ArgAbi<'tcx, Ty<'tcx>>]
) {
let tuple = self.codegen_operand(mir, bx, operand);
let tuple = self.codegen_operand(bx, operand);
// Handle both by-ref and immediate tuples.
if let Ref(llval, None, align) = tuple.val {
@ -1049,14 +1035,13 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
fn landing_pad_to(
&mut self,
target_bb: mir::BasicBlock,
mir: &Body<'tcx>
) -> Bx::BasicBlock {
if let Some(block) = self.landing_pads[target_bb] {
return block;
}
let block = self.blocks[target_bb];
let landing_pad = self.landing_pad_uncached(block, mir);
let landing_pad = self.landing_pad_uncached(block);
self.landing_pads[target_bb] = Some(landing_pad);
landing_pad
}
@ -1064,10 +1049,9 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
fn landing_pad_uncached(
&mut self,
target_bb: Bx::BasicBlock,
mir: &Body<'tcx>
) -> Bx::BasicBlock {
if base::wants_msvc_seh(self.cx.sess()) {
span_bug!(mir.span, "landing pad was not inserted?")
span_bug!(self.mir.span, "landing pad was not inserted?")
}
let mut bx = self.new_block("cleanup");
@ -1120,7 +1104,6 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
dest: &mir::Place<'tcx>,
fn_ret: &ArgAbi<'tcx, Ty<'tcx>>,
llargs: &mut Vec<Bx::Value>, is_intrinsic: bool,
mir: &Body<'tcx>
) -> ReturnDest<'tcx, Bx::Value> {
// If the return is ignored, we can just return a do-nothing `ReturnDest`.
if fn_ret.is_ignore() {
@ -1156,7 +1139,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
}
}
} else {
self.codegen_place(mir, bx, &mir::PlaceRef {
self.codegen_place(bx, &mir::PlaceRef {
base: &dest.base,
projection: &dest.projection,
})
@ -1169,7 +1152,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
//
// If someone changes that, please update this code path
// to create a temporary.
span_bug!(mir.span, "can't directly store to unaligned value");
span_bug!(self.mir.span, "can't directly store to unaligned value");
}
llargs.push(dest.llval);
ReturnDest::Nothing
@ -1180,21 +1163,20 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
fn codegen_transmute(
&mut self,
mir: &Body<'tcx>,
bx: &mut Bx,
src: &mir::Operand<'tcx>,
dst: &mir::Place<'tcx>
) {
if let Some(index) = dst.as_local() {
match self.locals[index] {
LocalRef::Place(place) => self.codegen_transmute_into(mir, bx, src, place),
LocalRef::Place(place) => self.codegen_transmute_into(bx, src, place),
LocalRef::UnsizedPlace(_) => bug!("transmute must not involve unsized locals"),
LocalRef::Operand(None) => {
let dst_layout = bx.layout_of(self.monomorphized_place_ty(&dst.as_ref(), mir));
let dst_layout = bx.layout_of(self.monomorphized_place_ty(&dst.as_ref()));
assert!(!dst_layout.ty.has_erasable_regions());
let place = PlaceRef::alloca(bx, dst_layout);
place.storage_live(bx);
self.codegen_transmute_into(mir, bx, src, place);
self.codegen_transmute_into(bx, src, place);
let op = bx.load_operand(place);
place.storage_dead(bx);
self.locals[index] = LocalRef::Operand(Some(op));
@ -1205,19 +1187,18 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
}
}
} else {
let dst = self.codegen_place(mir, bx, &dst.as_ref());
self.codegen_transmute_into(mir, bx, src, dst);
let dst = self.codegen_place(bx, &dst.as_ref());
self.codegen_transmute_into(bx, src, dst);
}
}
fn codegen_transmute_into(
&mut self,
mir: &Body<'tcx>,
bx: &mut Bx,
src: &mir::Operand<'tcx>,
dst: PlaceRef<'tcx, Bx::Value>
) {
let src = self.codegen_operand(mir, bx, src);
let src = self.codegen_operand(bx, src);
let llty = bx.backend_type(src.layout);
let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr_to(llty));
let align = src.layout.align.abi.min(dst.align);

View File

@ -178,7 +178,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
// Allocate variable and temp allocas
fx.locals = {
let args = arg_local_refs(&mut bx, &fx, &mir, &memory_locals);
let args = arg_local_refs(&mut bx, &fx, &memory_locals);
let mut allocate_local = |local| {
let decl = &mir.local_decls[local];
@ -232,7 +232,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
// Codegen the body of each block using reverse postorder
for (bb, _) in rpo {
visited.insert(bb.index());
fx.codegen_block(bb, &mir);
fx.codegen_block(bb);
}
// Remove blocks that haven't been visited, or have no
@ -321,16 +321,15 @@ fn create_funclets<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
fn arg_local_refs<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx: &mut Bx,
fx: &FunctionCx<'a, 'b, 'tcx, Bx>,
mir: &Body<'tcx>,
memory_locals: &BitSet<mir::Local>,
) -> Vec<LocalRef<'tcx, Bx::Value>> {
let mut idx = 0;
let mut llarg_idx = fx.fn_abi.ret.is_indirect() as usize;
mir.args_iter().enumerate().map(|(arg_index, local)| {
let arg_decl = &mir.local_decls[local];
fx.mir.args_iter().enumerate().map(|(arg_index, local)| {
let arg_decl = &fx.mir.local_decls[local];
if Some(local) == mir.spread_arg {
if Some(local) == fx.mir.spread_arg {
// This argument (e.g., the last argument in the "rust-call" ABI)
// is a tuple that was spread at the ABI level and now we have
// to reconstruct it into a tuple local variable, from multiple

View File

@ -7,7 +7,7 @@ use crate::glue;
use crate::traits::*;
use rustc::mir::interpret::{ConstValue, ErrorHandled, Pointer, Scalar};
use rustc::mir::{self, Body};
use rustc::mir;
use rustc::ty;
use rustc::ty::layout::{self, Align, LayoutOf, TyLayout, Size};
@ -428,13 +428,12 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
pub fn codegen_consume(
&mut self,
mir: &Body<'tcx>,
bx: &mut Bx,
place_ref: &mir::PlaceRef<'_, 'tcx>
) -> OperandRef<'tcx, Bx::Value> {
debug!("codegen_consume(place_ref={:?})", place_ref);
let ty = self.monomorphized_place_ty(place_ref, mir);
let ty = self.monomorphized_place_ty(place_ref);
let layout = bx.cx().layout_of(ty);
// ZSTs don't require any actual memory access.
@ -448,13 +447,12 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
// for most places, to consume them we just load them
// out from their home
let place = self.codegen_place(mir, bx, place_ref);
let place = self.codegen_place(bx, place_ref);
bx.load_operand(place)
}
pub fn codegen_operand(
&mut self,
mir: &Body<'tcx>,
bx: &mut Bx,
operand: &mir::Operand<'tcx>
) -> OperandRef<'tcx, Bx::Value> {
@ -463,7 +461,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
match *operand {
mir::Operand::Copy(ref place) |
mir::Operand::Move(ref place) => {
self.codegen_consume(mir, bx, &place.as_ref())
self.codegen_consume(bx, &place.as_ref())
}
mir::Operand::Constant(ref constant) => {

View File

@ -8,7 +8,7 @@ use crate::traits::*;
use rustc::ty::{self, Instance, Ty};
use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
use rustc::mir::{self, Body};
use rustc::mir;
use rustc::mir::tcx::PlaceTy;
#[derive(Copy, Clone, Debug)]
@ -438,7 +438,6 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
pub fn codegen_place(
&mut self,
mir: &Body<'tcx>,
bx: &mut Bx,
place_ref: &mir::PlaceRef<'_, 'tcx>
) -> PlaceRef<'tcx, Bx::Value> {
@ -519,7 +518,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
projection: [proj_base @ .., mir::ProjectionElem::Deref],
} => {
// Load the pointer from its location.
self.codegen_consume(mir, bx, &mir::PlaceRef {
self.codegen_consume(bx, &mir::PlaceRef {
base,
projection: proj_base,
}).deref(bx.cx())
@ -529,7 +528,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
projection: [proj_base @ .., elem],
} => {
// FIXME turn this recursion into iteration
let cg_base = self.codegen_place(mir, bx, &mir::PlaceRef {
let cg_base = self.codegen_place(bx, &mir::PlaceRef {
base,
projection: proj_base,
});
@ -543,7 +542,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
let index = &mir::Operand::Copy(
mir::Place::from(*index)
);
let index = self.codegen_operand(mir, bx, index);
let index = self.codegen_operand(bx, index);
let llindex = index.immediate();
cg_base.project_index(bx, llindex)
}
@ -590,9 +589,9 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
result
}
pub fn monomorphized_place_ty(&self, place_ref: &mir::PlaceRef<'_, 'tcx>, mir: &Body<'tcx>) -> Ty<'tcx> {
pub fn monomorphized_place_ty(&self, place_ref: &mir::PlaceRef<'_, 'tcx>) -> Ty<'tcx> {
let tcx = self.cx.tcx();
let place_ty = mir::Place::ty_from(place_ref.base, place_ref.projection, mir, tcx);
let place_ty = mir::Place::ty_from(place_ref.base, place_ref.projection, self.mir, tcx);
self.monomorphize(&place_ty.ty)
}
}

View File

@ -10,7 +10,7 @@ use crate::traits::*;
use rustc::ty::{self, Ty, adjustment::{PointerCast}, Instance};
use rustc::ty::cast::{CastTy, IntTy};
use rustc::ty::layout::{self, LayoutOf, HasTyCtxt};
use rustc::mir::{self, Body};
use rustc::mir;
use rustc::middle::lang_items::ExchangeMallocFnLangItem;
use rustc_apfloat::{ieee, Float, Status, Round};
use syntax::symbol::sym;
@ -21,7 +21,6 @@ use std::{u128, i128};
impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
pub fn codegen_rvalue(
&mut self,
mir: &Body<'tcx>,
mut bx: Bx,
dest: PlaceRef<'tcx, Bx::Value>,
rvalue: &mir::Rvalue<'tcx>
@ -31,7 +30,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
match *rvalue {
mir::Rvalue::Use(ref operand) => {
let cg_operand = self.codegen_operand(mir, &mut bx, operand);
let cg_operand = self.codegen_operand(&mut bx, operand);
// FIXME: consider not copying constants through stack. (Fixable by codegen'ing
// constants into `OperandValue::Ref`; why dont we do that yet if we dont?)
cg_operand.val.store(&mut bx, dest);
@ -44,7 +43,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
if bx.cx().is_backend_scalar_pair(dest.layout) {
// Into-coerce of a thin pointer to a fat pointer -- just
// use the operand path.
let (mut bx, temp) = self.codegen_rvalue_operand(mir, bx, rvalue);
let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
temp.val.store(&mut bx, dest);
return bx;
}
@ -53,7 +52,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
// this to be eliminated by MIR building, but
// `CoerceUnsized` can be passed by a where-clause,
// so the (generic) MIR may not be able to expand it.
let operand = self.codegen_operand(mir, &mut bx, source);
let operand = self.codegen_operand(&mut bx, source);
match operand.val {
OperandValue::Pair(..) |
OperandValue::Immediate(_) => {
@ -82,7 +81,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
}
mir::Rvalue::Repeat(ref elem, count) => {
let cg_elem = self.codegen_operand(mir, &mut bx, elem);
let cg_elem = self.codegen_operand(&mut bx, elem);
// Do not generate the loop for zero-sized elements or empty arrays.
if dest.layout.is_zst() {
@ -125,7 +124,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
_ => (dest, None)
};
for (i, operand) in operands.iter().enumerate() {
let op = self.codegen_operand(mir, &mut bx, operand);
let op = self.codegen_operand(&mut bx, operand);
// Do not generate stores and GEPis for zero-sized fields.
if !op.layout.is_zst() {
let field_index = active_field_index.unwrap_or(i);
@ -137,8 +136,8 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
}
_ => {
assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP, mir));
let (mut bx, temp) = self.codegen_rvalue_operand(mir, bx, rvalue);
assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
temp.val.store(&mut bx, dest);
bx
}
@ -147,7 +146,6 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
pub fn codegen_rvalue_unsized(
&mut self,
mir: &Body<'tcx>,
mut bx: Bx,
indirect_dest: PlaceRef<'tcx, Bx::Value>,
rvalue: &mir::Rvalue<'tcx>,
@ -157,7 +155,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
match *rvalue {
mir::Rvalue::Use(ref operand) => {
let cg_operand = self.codegen_operand(mir, &mut bx, operand);
let cg_operand = self.codegen_operand(&mut bx, operand);
cg_operand.val.store_unsized(&mut bx, indirect_dest);
bx
}
@ -168,19 +166,18 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
pub fn codegen_rvalue_operand(
&mut self,
mir: &Body<'tcx>,
mut bx: Bx,
rvalue: &mir::Rvalue<'tcx>
) -> (Bx, OperandRef<'tcx, Bx::Value>) {
assert!(
self.rvalue_creates_operand(rvalue, DUMMY_SP, mir),
self.rvalue_creates_operand(rvalue, DUMMY_SP),
"cannot codegen {:?} to operand",
rvalue,
);
match *rvalue {
mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
let operand = self.codegen_operand(mir, &mut bx, source);
let operand = self.codegen_operand(&mut bx, source);
debug!("cast operand is {:?}", operand);
let cast = bx.cx().layout_of(self.monomorphize(&mir_cast_ty));
@ -373,7 +370,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
}
mir::Rvalue::Ref(_, bk, ref place) => {
let cg_place = self.codegen_place(mir, &mut bx, &place.as_ref());
let cg_place = self.codegen_place(&mut bx, &place.as_ref());
let ty = cg_place.layout.ty;
@ -394,7 +391,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
}
mir::Rvalue::Len(ref place) => {
let size = self.evaluate_array_len(mir, &mut bx, place);
let size = self.evaluate_array_len(&mut bx, place);
let operand = OperandRef {
val: OperandValue::Immediate(size),
layout: bx.cx().layout_of(bx.tcx().types.usize),
@ -403,8 +400,8 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
}
mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
let lhs = self.codegen_operand(mir, &mut bx, lhs);
let rhs = self.codegen_operand(mir, &mut bx, rhs);
let lhs = self.codegen_operand(&mut bx, lhs);
let rhs = self.codegen_operand(&mut bx, rhs);
let llresult = match (lhs.val, rhs.val) {
(OperandValue::Pair(lhs_addr, lhs_extra),
OperandValue::Pair(rhs_addr, rhs_extra)) => {
@ -429,8 +426,8 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
(bx, operand)
}
mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
let lhs = self.codegen_operand(mir, &mut bx, lhs);
let rhs = self.codegen_operand(mir, &mut bx, rhs);
let lhs = self.codegen_operand(&mut bx, lhs);
let rhs = self.codegen_operand(&mut bx, rhs);
let result = self.codegen_scalar_checked_binop(&mut bx, op,
lhs.immediate(), rhs.immediate(),
lhs.layout.ty);
@ -445,7 +442,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
}
mir::Rvalue::UnaryOp(op, ref operand) => {
let operand = self.codegen_operand(mir, &mut bx, operand);
let operand = self.codegen_operand(&mut bx, operand);
let lloperand = operand.immediate();
let is_float = operand.layout.ty.is_floating_point();
let llval = match op {
@ -463,8 +460,8 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
}
mir::Rvalue::Discriminant(ref place) => {
let discr_ty = rvalue.ty(mir, bx.tcx());
let discr = self.codegen_place(mir, &mut bx, &place.as_ref())
let discr_ty = rvalue.ty(self.mir, bx.tcx());
let discr = self.codegen_place(&mut bx, &place.as_ref())
.codegen_get_discr(&mut bx, discr_ty);
(bx, OperandRef {
val: OperandValue::Immediate(discr),
@ -509,14 +506,14 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
(bx, operand)
}
mir::Rvalue::Use(ref operand) => {
let operand = self.codegen_operand(mir, &mut bx, operand);
let operand = self.codegen_operand(&mut bx, operand);
(bx, operand)
}
mir::Rvalue::Repeat(..) |
mir::Rvalue::Aggregate(..) => {
// According to `rvalue_creates_operand`, only ZST
// aggregate rvalues are allowed to be operands.
let ty = rvalue.ty(mir, self.cx.tcx());
let ty = rvalue.ty(self.mir, self.cx.tcx());
let operand = OperandRef::new_zst(
&mut bx,
self.cx.layout_of(self.monomorphize(&ty)),
@ -528,7 +525,6 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
fn evaluate_array_len(
&mut self,
mir: &Body<'tcx>,
bx: &mut Bx,
place: &mir::Place<'tcx>,
) -> Bx::Value {
@ -543,7 +539,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
}
}
// use common size calculation for non zero-sized types
let cg_value = self.codegen_place(mir, bx, &place.as_ref());
let cg_value = self.codegen_place(bx, &place.as_ref());
cg_value.len(bx.cx())
}
@ -704,7 +700,6 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
&self,
rvalue: &mir::Rvalue<'tcx>,
span: Span,
mir: &Body<'tcx>
) -> bool {
match *rvalue {
mir::Rvalue::Ref(..) |
@ -719,7 +714,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
true,
mir::Rvalue::Repeat(..) |
mir::Rvalue::Aggregate(..) => {
let ty = rvalue.ty(mir, self.cx.tcx());
let ty = rvalue.ty(self.mir, self.cx.tcx());
let ty = self.monomorphize(&ty);
self.cx.spanned_layout_of(ty, span).is_zst()
}

View File

@ -1,4 +1,4 @@
use rustc::mir::{self, Body};
use rustc::mir;
use crate::traits::BuilderMethods;
use super::FunctionCx;
@ -11,25 +11,24 @@ use rustc_error_codes::*;
impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
pub fn codegen_statement(
&mut self,
mir: &Body<'tcx>,
mut bx: Bx,
statement: &mir::Statement<'tcx>
) -> Bx {
debug!("codegen_statement(statement={:?})", statement);
self.set_debug_loc(&mut bx, statement.source_info, mir);
self.set_debug_loc(&mut bx, statement.source_info);
match statement.kind {
mir::StatementKind::Assign(box(ref place, ref rvalue)) => {
if let Some(index) = place.as_local() {
match self.locals[index] {
LocalRef::Place(cg_dest) => {
self.codegen_rvalue(mir, bx, cg_dest, rvalue)
self.codegen_rvalue(bx, cg_dest, rvalue)
}
LocalRef::UnsizedPlace(cg_indirect_dest) => {
self.codegen_rvalue_unsized(mir, bx, cg_indirect_dest, rvalue)
self.codegen_rvalue_unsized(bx, cg_indirect_dest, rvalue)
}
LocalRef::Operand(None) => {
let (mut bx, operand) = self.codegen_rvalue_operand(mir, bx, rvalue);
let (mut bx, operand) = self.codegen_rvalue_operand(bx, rvalue);
self.locals[index] = LocalRef::Operand(Some(operand));
self.debug_introduce_local(&mut bx, index);
bx
@ -43,16 +42,16 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
// If the type is zero-sized, it's already been set here,
// but we still need to make sure we codegen the operand
self.codegen_rvalue_operand(mir, bx, rvalue).0
self.codegen_rvalue_operand(bx, rvalue).0
}
}
} else {
let cg_dest = self.codegen_place(mir, &mut bx, &place.as_ref());
self.codegen_rvalue(mir, bx, cg_dest, rvalue)
let cg_dest = self.codegen_place(&mut bx, &place.as_ref());
self.codegen_rvalue(bx, cg_dest, rvalue)
}
}
mir::StatementKind::SetDiscriminant{box ref place, variant_index} => {
self.codegen_place(mir, &mut bx, &place.as_ref())
self.codegen_place(&mut bx, &place.as_ref())
.codegen_set_discr(&mut bx, variant_index);
bx
}
@ -74,12 +73,12 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'b, 'tcx, Bx> {
}
mir::StatementKind::InlineAsm(ref asm) => {
let outputs = asm.outputs.iter().map(|output| {
self.codegen_place(mir, &mut bx, &output.as_ref())
self.codegen_place(&mut bx, &output.as_ref())
}).collect();
let input_vals = asm.inputs.iter()
.fold(Vec::with_capacity(asm.inputs.len()), |mut acc, (span, input)| {
let op = self.codegen_operand(mir, &mut bx, input);
let op = self.codegen_operand(&mut bx, input);
if let OperandValue::Immediate(_) = op.val {
acc.push(op.immediate());
} else {