Refactor the MIR translator to use LLVM Builder directly

This commit is contained in:
Piotr Czarnecki 2016-02-01 11:04:46 +01:00
parent 8b776834a4
commit 06266eb3bd
9 changed files with 394 additions and 315 deletions

View File

@ -2003,7 +2003,7 @@ pub fn trans_closure<'a, 'b, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
let mut bcx = init_function(&fcx, false, output_type);
if attributes.iter().any(|item| item.check_name("rustc_mir")) {
mir::trans_mir(bcx);
mir::trans_mir(bcx.build());
fcx.cleanup();
return;
}

View File

@ -16,7 +16,7 @@ use trans::adt;
use trans::attributes;
use trans::base;
use trans::build;
use trans::common::{self, Block, LandingPad};
use trans::common::{self, Block, BlockAndBuilder, LandingPad};
use trans::debuginfo::DebugLoc;
use trans::Disr;
use trans::foreign;
@ -42,94 +42,97 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
match *data.terminator() {
mir::Terminator::Goto { target } => {
build::Br(bcx, self.llblock(target), DebugLoc::None)
bcx.br(self.llblock(target));
}
mir::Terminator::If { ref cond, targets: (true_bb, false_bb) } => {
let cond = self.trans_operand(bcx, cond);
let cond = self.trans_operand(&bcx, cond);
let lltrue = self.llblock(true_bb);
let llfalse = self.llblock(false_bb);
build::CondBr(bcx, cond.immediate(), lltrue, llfalse, DebugLoc::None);
bcx.cond_br(cond.immediate(), lltrue, llfalse);
}
mir::Terminator::Switch { ref discr, ref adt_def, ref targets } => {
let discr_lvalue = self.trans_lvalue(bcx, discr);
let discr_lvalue = self.trans_lvalue(&bcx, discr);
let ty = discr_lvalue.ty.to_ty(bcx.tcx());
let repr = adt::represent_type(bcx.ccx(), ty);
let discr = adt::trans_get_discr(bcx, &repr, discr_lvalue.llval,
None, true);
let discr = bcx.with_block(|bcx|
adt::trans_get_discr(bcx, &repr, discr_lvalue.llval, None, true)
);
// The else branch of the Switch can't be hit, so branch to an unreachable
// instruction so LLVM knows that
let unreachable_blk = self.unreachable_block();
let switch = build::Switch(bcx, discr, unreachable_blk.llbb, targets.len());
let switch = bcx.switch(discr, unreachable_blk.llbb, targets.len());
assert_eq!(adt_def.variants.len(), targets.len());
for (adt_variant, target) in adt_def.variants.iter().zip(targets) {
let llval = adt::trans_case(bcx, &*repr, Disr::from(adt_variant.disr_val));
let llval = bcx.with_block(|bcx|
adt::trans_case(bcx, &*repr, Disr::from(adt_variant.disr_val))
);
let llbb = self.llblock(*target);
build::AddCase(switch, llval, llbb)
}
}
mir::Terminator::SwitchInt { ref discr, switch_ty, ref values, ref targets } => {
let (otherwise, targets) = targets.split_last().unwrap();
let discr = build::Load(bcx, self.trans_lvalue(bcx, discr).llval);
let switch = build::Switch(bcx, discr, self.llblock(*otherwise), values.len());
let discr = bcx.load(self.trans_lvalue(&bcx, discr).llval);
let switch = bcx.switch(discr, self.llblock(*otherwise), values.len());
for (value, target) in values.iter().zip(targets) {
let llval = self.trans_constval(bcx, value, switch_ty).immediate();
let llval = self.trans_constval(&bcx, value, switch_ty).immediate();
let llbb = self.llblock(*target);
build::AddCase(switch, llval, llbb)
}
}
mir::Terminator::Resume => {
let ps = self.get_personality_slot(bcx);
let lp = build::Load(bcx, ps);
base::call_lifetime_end(bcx, ps);
base::trans_unwind_resume(bcx, lp);
let ps = self.get_personality_slot(&bcx);
let lp = bcx.load(ps);
bcx.with_block(|bcx| {
base::call_lifetime_end(bcx, ps);
base::trans_unwind_resume(bcx, lp);
});
}
mir::Terminator::Return => {
let return_ty = bcx.monomorphize(&self.mir.return_ty);
base::build_return_block(bcx.fcx, bcx, return_ty, DebugLoc::None);
bcx.with_block(|bcx| {
base::build_return_block(bcx.fcx, bcx, return_ty, DebugLoc::None);
})
}
mir::Terminator::Drop { ref value, target, unwind } => {
let lvalue = self.trans_lvalue(bcx, value);
let lvalue = self.trans_lvalue(&bcx, value);
let ty = lvalue.ty.to_ty(bcx.tcx());
// Double check for necessity to drop
if !glue::type_needs_drop(bcx.tcx(), ty) {
build::Br(bcx, self.llblock(target), DebugLoc::None);
bcx.br(self.llblock(target));
return;
}
let drop_fn = glue::get_drop_glue(bcx.ccx(), ty);
let drop_ty = glue::get_drop_glue_type(bcx.ccx(), ty);
let llvalue = if drop_ty != ty {
build::PointerCast(bcx, lvalue.llval,
type_of::type_of(bcx.ccx(), drop_ty).ptr_to())
bcx.pointercast(lvalue.llval, type_of::type_of(bcx.ccx(), drop_ty).ptr_to())
} else {
lvalue.llval
};
if let Some(unwind) = unwind {
let uwbcx = self.bcx(unwind);
let unwind = self.make_landing_pad(uwbcx);
build::Invoke(bcx,
drop_fn,
&[llvalue],
self.llblock(target),
unwind.llbb,
None,
DebugLoc::None);
bcx.invoke(drop_fn,
&[llvalue],
self.llblock(target),
unwind.llbb(),
None);
} else {
build::Call(bcx, drop_fn, &[llvalue], None, DebugLoc::None);
build::Br(bcx, self.llblock(target), DebugLoc::None);
bcx.call(drop_fn, &[llvalue], None);
bcx.br(self.llblock(target));
}
}
mir::Terminator::Call { ref func, ref args, ref destination, ref cleanup } => {
// Create the callee. This will always be a fn ptr and hence a kind of scalar.
let callee = self.trans_operand(bcx, func);
let callee = self.trans_operand(&bcx, func);
let attrs = attributes::from_fn_type(bcx.ccx(), callee.ty);
let debugloc = DebugLoc::None;
// The arguments we'll be passing. Plus one to account for outptr, if used.
@ -149,7 +152,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
// Prepare the return value destination
let (ret_dest_ty, must_copy_dest) = if let Some((ref d, _)) = *destination {
let dest = self.trans_lvalue(bcx, d);
let dest = self.trans_lvalue(&bcx, d);
let ret_ty = dest.ty.to_ty(bcx.tcx());
if !is_foreign && type_of::return_uses_outptr(bcx.ccx(), ret_ty) {
llargs.push(dest.llval);
@ -163,7 +166,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
// Process the rest of the args.
for arg in args {
let operand = self.trans_operand(bcx, arg);
let operand = self.trans_operand(&bcx, arg);
match operand.val {
Ref(llval) | Immediate(llval) => llargs.push(llval),
FatPtr(b, e) => {
@ -176,38 +179,35 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
}
}
let avoid_invoke = bcx.with_block(|bcx| base::avoid_invoke(bcx));
// Many different ways to call a function handled here
match (is_foreign, base::avoid_invoke(bcx), cleanup, destination) {
match (is_foreign, avoid_invoke, cleanup, destination) {
// The two cases below are the only ones to use LLVMs `invoke`.
(false, false, &Some(cleanup), &None) => {
let cleanup = self.bcx(cleanup);
let landingpad = self.make_landing_pad(cleanup);
let unreachable_blk = self.unreachable_block();
build::Invoke(bcx,
callee.immediate(),
&llargs[..],
unreachable_blk.llbb,
landingpad.llbb,
Some(attrs),
debugloc);
bcx.invoke(callee.immediate(),
&llargs[..],
unreachable_blk.llbb,
landingpad.llbb(),
Some(attrs));
},
(false, false, &Some(cleanup), &Some((_, success))) => {
let cleanup = self.bcx(cleanup);
let landingpad = self.make_landing_pad(cleanup);
let (target, postinvoke) = if must_copy_dest {
(bcx.fcx.new_block("", None), Some(self.bcx(success)))
(bcx.fcx().new_block("", None), Some(self.bcx(success)))
} else {
(self.bcx(success), None)
};
let invokeret = build::Invoke(bcx,
callee.immediate(),
&llargs[..],
target.llbb,
landingpad.llbb,
Some(attrs),
debugloc);
let invokeret = bcx.invoke(callee.immediate(),
&llargs[..],
target.llbb(),
landingpad.llbb(),
Some(attrs));
if let Some(postinvoketarget) = postinvoke {
// We translate the copy into a temoprary block. The temporary block is
// We translate the copy into a temporary block. The temporary block is
// necessary because the current block has already been terminated (by
// `invoke`) and we cannot really translate into the target block
// because:
@ -233,40 +233,46 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
// ; immediate precedesors
let (ret_dest, ret_ty) = ret_dest_ty
.expect("return destination and type not set");
base::store_ty(target, invokeret, ret_dest.llval, ret_ty);
build::Br(target, postinvoketarget.llbb, debugloc);
target.with_block(|target| {
base::store_ty(target, invokeret, ret_dest.llval, ret_ty);
});
target.br(postinvoketarget.llbb());
}
},
(false, _, _, &None) => {
build::Call(bcx, callee.immediate(), &llargs[..], Some(attrs), debugloc);
build::Unreachable(bcx);
bcx.call(callee.immediate(), &llargs[..], Some(attrs));
bcx.unreachable();
}
(false, _, _, &Some((_, target))) => {
let llret = build::Call(bcx,
callee.immediate(),
&llargs[..],
Some(attrs),
debugloc);
let bundle = bcx.lpad().and_then(|b| b.bundle());
let llret = bcx.call(callee.immediate(),
&llargs[..],
bundle,
Some(attrs));
if must_copy_dest {
let (ret_dest, ret_ty) = ret_dest_ty
.expect("return destination and type not set");
base::store_ty(bcx, llret, ret_dest.llval, ret_ty);
bcx.with_block(|bcx| {
base::store_ty(bcx, llret, ret_dest.llval, ret_ty);
});
}
build::Br(bcx, self.llblock(target), debugloc);
bcx.br(self.llblock(target));
}
// Foreign functions
(true, _, _, destination) => {
let (dest, _) = ret_dest_ty
.expect("return destination is not set");
bcx = foreign::trans_native_call(bcx,
callee.ty,
callee.immediate(),
dest.llval,
&llargs[..],
arg_tys,
debugloc);
bcx = bcx.map_block(|bcx| {
foreign::trans_native_call(bcx,
callee.ty,
callee.immediate(),
dest.llval,
&llargs[..],
arg_tys,
debugloc)
});
if let Some((_, target)) = *destination {
build::Br(bcx, self.llblock(target), debugloc);
bcx.br(self.llblock(target));
}
},
}
@ -274,48 +280,55 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
}
}
fn get_personality_slot(&mut self, bcx: Block<'bcx, 'tcx>) -> ValueRef {
fn get_personality_slot(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>) -> ValueRef {
let ccx = bcx.ccx();
if let Some(slot) = self.llpersonalityslot {
slot
} else {
let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false);
let slot = base::alloca(bcx, llretty, "personalityslot");
let slot = bcx.with_block(|bcx| {
base::alloca(bcx, llretty, "personalityslot")
});
self.llpersonalityslot = Some(slot);
base::call_lifetime_start(bcx, slot);
bcx.with_block(|bcx| {
base::call_lifetime_start(bcx, slot);
});
slot
}
}
fn make_landing_pad(&mut self, cleanup: Block<'bcx, 'tcx>) -> Block<'bcx, 'tcx> {
let bcx = cleanup.fcx.new_block("cleanup", None);
fn make_landing_pad(&mut self,
cleanup: BlockAndBuilder<'bcx, 'tcx>)
-> BlockAndBuilder<'bcx, 'tcx>
{
let bcx = cleanup.map_block(|cleanup| {
cleanup.fcx.new_block("cleanup", None)
});
// FIXME(#30941) this doesn't handle msvc-style exceptions
*bcx.lpad.borrow_mut() = Some(LandingPad::gnu());
let bcx = bcx.build();
let ccx = bcx.ccx();
let llpersonality = bcx.fcx.eh_personality();
let llpersonality = bcx.fcx().eh_personality();
let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false);
let llretval = build::LandingPad(bcx, llretty, llpersonality, 1);
build::SetCleanup(bcx, llretval);
let slot = self.get_personality_slot(bcx);
build::Store(bcx, llretval, slot);
build::Br(bcx, cleanup.llbb, DebugLoc::None);
let llretval = bcx.landing_pad(llretty, llpersonality, 1, bcx.fcx().llfn);
bcx.set_cleanup(llretval);
let slot = self.get_personality_slot(&bcx);
bcx.store(llretval, slot);
bcx.br(cleanup.llbb());
bcx
}
fn unreachable_block(&mut self) -> Block<'bcx, 'tcx> {
match self.unreachable_block {
Some(b) => b,
None => {
let bl = self.fcx.new_block("unreachable", None);
build::Unreachable(bl);
self.unreachable_block = Some(bl);
bl
}
}
self.unreachable_block.unwrap_or_else(|| {
let bl = self.fcx.new_block("unreachable", None);
bl.build().unreachable();
self.unreachable_block = Some(bl);
bl
})
}
fn bcx(&self, bb: mir::BasicBlock) -> Block<'bcx, 'tcx> {
self.blocks[bb.index()]
fn bcx(&self, bb: mir::BasicBlock) -> BlockAndBuilder<'bcx, 'tcx> {
self.blocks[bb.index()].build()
}
fn llblock(&self, bb: mir::BasicBlock) -> BasicBlockRef {

View File

@ -14,7 +14,8 @@ use middle::subst::Substs;
use middle::ty::{Ty, TypeFoldable};
use rustc::middle::const_eval::ConstVal;
use rustc::mir::repr as mir;
use trans::common::{self, Block, C_bool, C_bytes, C_floating_f64, C_integral, C_str_slice};
use trans::common::{self, BlockAndBuilder, C_bool, C_bytes, C_floating_f64, C_integral,
C_str_slice};
use trans::consts;
use trans::expr;
use trans::type_of;
@ -25,13 +26,13 @@ use super::MirContext;
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
pub fn trans_constval(&mut self,
bcx: Block<'bcx, 'tcx>,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
cv: &ConstVal,
ty: Ty<'tcx>)
-> OperandRef<'tcx>
{
let ccx = bcx.ccx();
let val = self.trans_constval_inner(bcx, cv, ty, bcx.fcx.param_substs);
let val = self.trans_constval_inner(bcx, cv, ty, bcx.fcx().param_substs);
let val = if common::type_is_immediate(ccx, ty) {
OperandValue::Immediate(val)
} else if common::type_is_fat_ptr(bcx.tcx(), ty) {
@ -52,7 +53,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
/// Translate ConstVal into a bare LLVM ValueRef.
fn trans_constval_inner(&mut self,
bcx: common::Block<'bcx, 'tcx>,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
cv: &ConstVal,
ty: Ty<'tcx>,
param_substs: &'tcx Substs<'tcx>)
@ -70,7 +71,9 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
ConstVal::Struct(id) | ConstVal::Tuple(id) |
ConstVal::Array(id, _) | ConstVal::Repeat(id, _) => {
let expr = bcx.tcx().map.expect_expr(id);
expr::trans(bcx, expr).datum.val
bcx.with_block(|bcx| {
expr::trans(bcx, expr).datum.val
})
},
ConstVal::Function(did) =>
self.trans_fn_ref(bcx, ty, param_substs, did).immediate()
@ -78,7 +81,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
}
pub fn trans_constant(&mut self,
bcx: Block<'bcx, 'tcx>,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
constant: &mir::Constant<'tcx>)
-> OperandRef<'tcx>
{

View File

@ -18,7 +18,7 @@ use rustc::middle::const_eval;
use rustc::middle::def_id::DefId;
use rustc::middle::traits;
use rustc::mir::repr::ItemKind;
use trans::common::{Block, fulfill_obligation};
use trans::common::{BlockAndBuilder, fulfill_obligation};
use trans::base;
use trans::closure;
use trans::expr;
@ -32,7 +32,7 @@ use super::operand::{OperandRef, OperandValue};
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
/// Translate reference to item.
pub fn trans_item_ref(&mut self,
bcx: Block<'bcx, 'tcx>,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
ty: Ty<'tcx>,
kind: ItemKind,
substs: &'tcx Substs<'tcx>,
@ -53,7 +53,9 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
.expect("def was const, but lookup_const_by_id failed");
// FIXME: this is falling back to translating from HIR. This is not easy to fix,
// because we would have somehow adapt const_eval to work on MIR rather than HIR.
let d = expr::trans(bcx, expr);
let d = bcx.with_block(|bcx| {
expr::trans(bcx, expr)
});
OperandRef::from_rvalue_datum(d.datum.to_rvalue_datum(d.bcx, "").datum)
}
}
@ -66,7 +68,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
///
/// This is an adaptation of callee::trans_fn_ref_with_substs.
pub fn trans_fn_ref(&mut self,
bcx: Block<'bcx, 'tcx>,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
ty: Ty<'tcx>,
substs: &'tcx Substs<'tcx>,
did: DefId)
@ -101,7 +103,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
///
/// This is an adaptation of meth::trans_static_method_callee
pub fn trans_trait_method(&mut self,
bcx: Block<'bcx, 'tcx>,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
ty: Ty<'tcx>,
method_id: DefId,
trait_id: DefId,

View File

@ -14,9 +14,7 @@ use rustc::mir::repr as mir;
use rustc::mir::tcx::LvalueTy;
use trans::adt;
use trans::base;
use trans::build;
use trans::common::{self, Block};
use trans::debuginfo::DebugLoc;
use trans::common::{self, BlockAndBuilder};
use trans::machine;
use trans::type_of;
use llvm;
@ -43,20 +41,20 @@ impl<'tcx> LvalueRef<'tcx> {
LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty }
}
pub fn alloca<'bcx>(bcx: Block<'bcx, 'tcx>,
pub fn alloca<'bcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
ty: Ty<'tcx>,
name: &str)
-> LvalueRef<'tcx>
{
assert!(!ty.has_erasable_regions());
let lltemp = base::alloc_ty(bcx, ty, name);
let lltemp = bcx.with_block(|bcx| base::alloc_ty(bcx, ty, name));
LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty))
}
}
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
pub fn lvalue_len(&mut self,
bcx: Block<'bcx, 'tcx>,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
lvalue: LvalueRef<'tcx>)
-> ValueRef {
match lvalue.ty.to_ty(bcx.tcx()).sty {
@ -70,13 +68,13 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
}
pub fn trans_lvalue(&mut self,
bcx: Block<'bcx, 'tcx>,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
lvalue: &mir::Lvalue<'tcx>)
-> LvalueRef<'tcx> {
debug!("trans_lvalue(lvalue={:?})", lvalue);
let fcx = bcx.fcx;
let ccx = fcx.ccx;
let fcx = bcx.fcx();
let ccx = bcx.ccx();
let tcx = bcx.tcx();
match *lvalue {
mir::Lvalue::Var(index) => self.vars[index as usize],
@ -97,7 +95,9 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
let fn_return_ty = bcx.monomorphize(&self.mir.return_ty);
let return_ty = fn_return_ty.unwrap();
let llval = if !common::return_type_is_void(bcx.ccx(), return_ty) {
fcx.get_ret_slot(bcx, fn_return_ty, "")
bcx.with_block(|bcx| {
fcx.get_ret_slot(bcx, fn_return_ty, "")
})
} else {
// This is a void return; that is, theres no place to store the value and
// there cannot really be one (or storing into it doesnt make sense, anyway).
@ -117,12 +117,14 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
let (llprojected, llextra) = match projection.elem {
mir::ProjectionElem::Deref => {
let base_ty = tr_base.ty.to_ty(tcx);
if common::type_is_sized(tcx, projected_ty.to_ty(tcx)) {
(base::load_ty(bcx, tr_base.llval, base_ty),
ptr::null_mut())
} else {
base::load_fat_ptr(bcx, tr_base.llval, base_ty)
}
bcx.with_block(|bcx| {
if common::type_is_sized(tcx, projected_ty.to_ty(tcx)) {
(base::load_ty(bcx, tr_base.llval, base_ty),
ptr::null_mut())
} else {
base::load_fat_ptr(bcx, tr_base.llval, base_ty)
}
})
}
mir::ProjectionElem::Field(ref field) => {
let base_ty = tr_base.ty.to_ty(tcx);
@ -138,18 +140,21 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
} else {
adt::MaybeSizedValue::unsized_(tr_base.llval, tr_base.llextra)
};
(adt::trans_field_ptr(bcx, &base_repr, base, Disr(discr), field.index()),
if is_sized {
ptr::null_mut()
} else {
tr_base.llextra
})
let llprojected = bcx.with_block(|bcx| {
adt::trans_field_ptr(bcx, &base_repr, base, Disr(discr), field.index())
});
let llextra = if is_sized {
ptr::null_mut()
} else {
tr_base.llextra
};
(llprojected, llextra)
}
mir::ProjectionElem::Index(ref index) => {
let index = self.trans_operand(bcx, index);
let llindex = self.prepare_index(bcx, index.immediate());
let zero = common::C_uint(bcx.ccx(), 0u64);
(build::InBoundsGEP(bcx, tr_base.llval, &[zero, llindex]),
(bcx.inbounds_gep(tr_base.llval, &[zero, llindex]),
ptr::null_mut())
}
mir::ProjectionElem::ConstantIndex { offset,
@ -158,7 +163,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
let lloffset = common::C_u32(bcx.ccx(), offset);
let llindex = self.prepare_index(bcx, lloffset);
let zero = common::C_uint(bcx.ccx(), 0u64);
(build::InBoundsGEP(bcx, tr_base.llval, &[zero, llindex]),
(bcx.inbounds_gep(tr_base.llval, &[zero, llindex]),
ptr::null_mut())
}
mir::ProjectionElem::ConstantIndex { offset,
@ -166,10 +171,10 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
min_length: _ } => {
let lloffset = common::C_u32(bcx.ccx(), offset);
let lllen = self.lvalue_len(bcx, tr_base);
let llindex = build::Sub(bcx, lllen, lloffset, DebugLoc::None);
let llindex = bcx.sub(lllen, lloffset);
let llindex = self.prepare_index(bcx, llindex);
let zero = common::C_uint(bcx.ccx(), 0u64);
(build::InBoundsGEP(bcx, tr_base.llval, &[zero, llindex]),
(bcx.inbounds_gep(tr_base.llval, &[zero, llindex]),
ptr::null_mut())
}
mir::ProjectionElem::Downcast(..) => {
@ -190,7 +195,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
///
/// nmatsakis: is this still necessary? Not sure.
fn prepare_index(&mut self,
bcx: Block<'bcx, 'tcx>,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
llindex: ValueRef)
-> ValueRef
{
@ -198,9 +203,9 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
let index_size = machine::llbitsize_of_real(bcx.ccx(), common::val_ty(llindex));
let int_size = machine::llbitsize_of_real(bcx.ccx(), ccx.int_type());
if index_size < int_size {
build::ZExt(bcx, llindex, ccx.int_type())
bcx.zext(llindex, ccx.int_type())
} else if index_size > int_size {
build::Trunc(bcx, llindex, ccx.int_type())
bcx.trunc(llindex, ccx.int_type())
} else {
llindex
}

View File

@ -13,9 +13,7 @@ use llvm::{self, ValueRef};
use rustc::mir::repr as mir;
use rustc::mir::tcx::LvalueTy;
use trans::base;
use trans::build;
use trans::common::{self, Block, LandingPad};
use trans::debuginfo::DebugLoc;
use trans::common::{self, Block, BlockAndBuilder, LandingPad};
use trans::expr;
use trans::type_of;
@ -79,26 +77,28 @@ enum TempRef<'tcx> {
///////////////////////////////////////////////////////////////////////////
pub fn trans_mir<'bcx, 'tcx>(bcx: Block<'bcx, 'tcx>) {
let fcx = bcx.fcx;
pub fn trans_mir<'bcx, 'tcx>(bcx: BlockAndBuilder<'bcx, 'tcx>) {
let fcx = bcx.fcx();
let mir = bcx.mir();
let mir_blocks = bcx.mir().all_basic_blocks();
// Analyze the temps to determine which must be lvalues
// FIXME
let lvalue_temps = analyze::lvalue_temps(bcx, mir);
let lvalue_temps = bcx.with_block(|bcx| {
analyze::lvalue_temps(bcx, mir)
});
// Allocate variable and temp allocas
let vars = mir.var_decls.iter()
.map(|decl| (bcx.monomorphize(&decl.ty), decl.name))
.map(|(mty, name)| LvalueRef::alloca(bcx, mty, &name.as_str()))
.map(|(mty, name)| LvalueRef::alloca(&bcx, mty, &name.as_str()))
.collect();
let temps = mir.temp_decls.iter()
.map(|decl| bcx.monomorphize(&decl.ty))
.enumerate()
.map(|(i, mty)| if lvalue_temps.contains(i) {
TempRef::Lvalue(LvalueRef::alloca(bcx,
TempRef::Lvalue(LvalueRef::alloca(&bcx,
mty,
&format!("temp{:?}", i)))
} else {
@ -108,7 +108,7 @@ pub fn trans_mir<'bcx, 'tcx>(bcx: Block<'bcx, 'tcx>) {
TempRef::Operand(None)
})
.collect();
let args = arg_value_refs(bcx, mir);
let args = arg_value_refs(&bcx, mir);
// Allocate a `Block` for every basic block
let block_bcxs: Vec<Block<'bcx,'tcx>> =
@ -125,7 +125,7 @@ pub fn trans_mir<'bcx, 'tcx>(bcx: Block<'bcx, 'tcx>) {
// Branch to the START block
let start_bcx = block_bcxs[mir::START_BLOCK.index()];
build::Br(bcx, start_bcx.llbb, DebugLoc::None);
bcx.br(start_bcx.llbb);
let mut mircx = MirContext {
mir: mir,
@ -147,11 +147,11 @@ pub fn trans_mir<'bcx, 'tcx>(bcx: Block<'bcx, 'tcx>) {
/// Produce, for each argument, a `ValueRef` pointing at the
/// argument's value. As arguments are lvalues, these are always
/// indirect.
fn arg_value_refs<'bcx, 'tcx>(bcx: Block<'bcx, 'tcx>,
fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
mir: &mir::Mir<'tcx>)
-> Vec<LvalueRef<'tcx>> {
// FIXME tupled_args? I think I'd rather that mapping is done in MIR land though
let fcx = bcx.fcx;
let fcx = bcx.fcx();
let tcx = bcx.tcx();
let mut idx = fcx.arg_offset() as c_uint;
mir.arg_decls
@ -174,17 +174,26 @@ fn arg_value_refs<'bcx, 'tcx>(bcx: Block<'bcx, 'tcx>,
let lldata = llvm::get_param(fcx.llfn, idx);
let llextra = llvm::get_param(fcx.llfn, idx + 1);
idx += 2;
let lltemp = base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index));
build::Store(bcx, lldata, expr::get_dataptr(bcx, lltemp));
build::Store(bcx, llextra, expr::get_meta(bcx, lltemp));
let lltemp = bcx.with_block(|bcx| {
base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index))
});
let (dataptr, meta) = bcx.with_block(|bcx| {
(expr::get_dataptr(bcx, lltemp), expr::get_meta(bcx, lltemp))
});
bcx.store(lldata, dataptr);
bcx.store(llextra, meta);
lltemp
} else {
// otherwise, arg is passed by value, so make a
// temporary and store it there
let llarg = llvm::get_param(fcx.llfn, idx);
idx += 1;
let lltemp = base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index));
base::store_ty(bcx, llarg, lltemp, arg_ty);
let lltemp = bcx.with_block(|bcx| {
base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index))
});
bcx.with_block(|bcx| {
base::store_ty(bcx, llarg, lltemp, arg_ty)
});
lltemp
};
LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty))

View File

@ -12,7 +12,7 @@ use llvm::ValueRef;
use rustc::middle::ty::{Ty, TypeFoldable};
use rustc::mir::repr as mir;
use trans::base;
use trans::common::{self, Block};
use trans::common::{self, Block, BlockAndBuilder};
use trans::datum;
use super::{MirContext, TempRef};
@ -37,8 +37,9 @@ pub enum OperandValue {
///
/// NOTE: unless you know a value's type exactly, you should not
/// generate LLVM opcodes acting on it and instead act via methods,
/// to avoid nasty edge cases. In particular, using `build::Store`
/// directly is sure to cause problems - use `store_operand` instead.
/// to avoid nasty edge cases. In particular, using `Builder.store`
/// directly is sure to cause problems -- use `MirContext.store_operand`
/// instead.
#[derive(Copy, Clone)]
pub struct OperandRef<'tcx> {
// The value.
@ -58,7 +59,7 @@ impl<'tcx> OperandRef<'tcx> {
}
}
pub fn repr<'bcx>(self, bcx: Block<'bcx, 'tcx>) -> String {
pub fn repr<'bcx>(self, bcx: &BlockAndBuilder<'bcx, 'tcx>) -> String {
match self.val {
OperandValue::Ref(r) => {
format!("OperandRef(Ref({}) @ {:?})",
@ -90,7 +91,7 @@ impl<'tcx> OperandRef<'tcx> {
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
pub fn trans_operand(&mut self,
bcx: Block<'bcx, 'tcx>,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
operand: &mir::Operand<'tcx>)
-> OperandRef<'tcx>
{
@ -124,10 +125,14 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
ty);
let val = match datum::appropriate_rvalue_mode(bcx.ccx(), ty) {
datum::ByValue => {
OperandValue::Immediate(base::load_ty(bcx, tr_lvalue.llval, ty))
bcx.with_block(|bcx| {
OperandValue::Immediate(base::load_ty(bcx, tr_lvalue.llval, ty))
})
}
datum::ByRef if common::type_is_fat_ptr(bcx.tcx(), ty) => {
let (lldata, llextra) = base::load_fat_ptr(bcx, tr_lvalue.llval, ty);
let (lldata, llextra) = bcx.with_block(|bcx| {
base::load_fat_ptr(bcx, tr_lvalue.llval, ty)
});
OperandValue::FatPtr(lldata, llextra)
}
datum::ByRef => OperandValue::Ref(tr_lvalue.llval)
@ -148,7 +153,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
}
pub fn trans_operand_into(&mut self,
bcx: Block<'bcx, 'tcx>,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
lldest: ValueRef,
operand: &mir::Operand<'tcx>)
{
@ -164,11 +169,21 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
}
pub fn store_operand(&mut self,
bcx: Block<'bcx, 'tcx>,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
lldest: ValueRef,
operand: OperandRef<'tcx>)
{
debug!("store_operand: operand={}", operand.repr(bcx));
bcx.with_block(|bcx| {
self.store_operand_direct(bcx, lldest, operand)
})
}
pub fn store_operand_direct(&mut self,
bcx: Block<'bcx, 'tcx>,
lldest: ValueRef,
operand: OperandRef<'tcx>)
{
// Avoid generating stores of zero-sized values, because the only way to have a zero-sized
// value is through `undef`, and store itself is useless.
if common::type_is_zero_size(bcx.ccx(), operand.ty) {

View File

@ -15,8 +15,7 @@ use rustc::mir::repr as mir;
use trans::asm;
use trans::base;
use trans::build;
use trans::common::{self, Block, Result};
use trans::common::{self, BlockAndBuilder, Result};
use trans::debuginfo::DebugLoc;
use trans::declare;
use trans::expr;
@ -33,10 +32,10 @@ use super::lvalue::LvalueRef;
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
pub fn trans_rvalue(&mut self,
bcx: Block<'bcx, 'tcx>,
bcx: BlockAndBuilder<'bcx, 'tcx>,
dest: LvalueRef<'tcx>,
rvalue: &mir::Rvalue<'tcx>)
-> Block<'bcx, 'tcx>
-> BlockAndBuilder<'bcx, 'tcx>
{
debug!("trans_rvalue(dest.llval={}, rvalue={:?})",
bcx.val_to_string(dest.llval),
@ -44,7 +43,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
match *rvalue {
mir::Rvalue::Use(ref operand) => {
self.trans_operand_into(bcx, dest.llval, operand);
self.trans_operand_into(&bcx, dest.llval, operand);
bcx
}
@ -53,7 +52,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
// into-coerce of a thin pointer to a fat pointer - just
// use the operand path.
let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
self.store_operand(bcx, dest.llval, temp);
self.store_operand(&bcx, dest.llval, temp);
return bcx;
}
@ -61,39 +60,43 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
// this to be eliminated by MIR translation, but
// `CoerceUnsized` can be passed by a where-clause,
// so the (generic) MIR may not be able to expand it.
let operand = self.trans_operand(bcx, operand);
match operand.val {
OperandValue::FatPtr(..) => unreachable!(),
OperandValue::Immediate(llval) => {
// unsize from an immediate structure. We don't
// really need a temporary alloca here, but
// avoiding it would require us to have
// `coerce_unsized_into` use extractvalue to
// index into the struct, and this case isn't
// important enough for it.
debug!("trans_rvalue: creating ugly alloca");
let lltemp = base::alloc_ty(bcx, operand.ty, "__unsize_temp");
base::store_ty(bcx, llval, lltemp, operand.ty);
base::coerce_unsized_into(bcx,
lltemp, operand.ty,
dest.llval, cast_ty);
let operand = self.trans_operand(&bcx, operand);
bcx.with_block(|bcx| {
match operand.val {
OperandValue::FatPtr(..) => unreachable!(),
OperandValue::Immediate(llval) => {
// unsize from an immediate structure. We don't
// really need a temporary alloca here, but
// avoiding it would require us to have
// `coerce_unsized_into` use extractvalue to
// index into the struct, and this case isn't
// important enough for it.
debug!("trans_rvalue: creating ugly alloca");
let lltemp = base::alloc_ty(bcx, operand.ty, "__unsize_temp");
base::store_ty(bcx, llval, lltemp, operand.ty);
base::coerce_unsized_into(bcx,
lltemp, operand.ty,
dest.llval, cast_ty);
}
OperandValue::Ref(llref) => {
base::coerce_unsized_into(bcx,
llref, operand.ty,
dest.llval, cast_ty);
}
}
OperandValue::Ref(llref) => {
base::coerce_unsized_into(bcx,
llref, operand.ty,
dest.llval, cast_ty);
}
}
});
bcx
}
mir::Rvalue::Repeat(ref elem, ref count) => {
let elem = self.trans_operand(bcx, elem);
let size = self.trans_constval(bcx, &count.value, count.ty).immediate();
let base = expr::get_dataptr(bcx, dest.llval);
tvec::iter_vec_raw(bcx, base, elem.ty, size, |bcx, llslot, _| {
self.store_operand(bcx, llslot, elem);
bcx
let elem = self.trans_operand(&bcx, elem);
let size = self.trans_constval(&bcx, &count.value, count.ty).immediate();
bcx.map_block(|block| {
let base = expr::get_dataptr(block, dest.llval);
tvec::iter_vec_raw(block, base, elem.ty, size, |block, llslot, _| {
self.store_operand_direct(block, llslot, elem);
block
})
})
}
@ -102,27 +105,31 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
mir::AggregateKind::Adt(adt_def, index, _) => {
let repr = adt::represent_type(bcx.ccx(), dest.ty.to_ty(bcx.tcx()));
let disr = Disr::from(adt_def.variants[index].disr_val);
adt::trans_set_discr(bcx, &*repr, dest.llval, Disr::from(disr));
bcx.with_block(|bcx| {
adt::trans_set_discr(bcx, &*repr, dest.llval, Disr::from(disr));
});
for (i, operand) in operands.iter().enumerate() {
let op = self.trans_operand(bcx, operand);
let op = self.trans_operand(&bcx, operand);
// Do not generate stores and GEPis for zero-sized fields.
if !common::type_is_zero_size(bcx.ccx(), op.ty) {
let val = adt::MaybeSizedValue::sized(dest.llval);
let lldest_i = adt::trans_field_ptr(bcx, &*repr, val, disr, i);
self.store_operand(bcx, lldest_i, op);
let lldest_i = bcx.with_block(|bcx| {
adt::trans_field_ptr(bcx, &*repr, val, disr, i)
});
self.store_operand(&bcx, lldest_i, op);
}
}
},
_ => {
for (i, operand) in operands.iter().enumerate() {
let op = self.trans_operand(bcx, operand);
let op = self.trans_operand(&bcx, operand);
// Do not generate stores and GEPis for zero-sized fields.
if !common::type_is_zero_size(bcx.ccx(), op.ty) {
// Note: perhaps this should be StructGep, but
// note that in some cases the values here will
// not be structs but arrays.
let dest = build::GEPi(bcx, dest.llval, &[0, i]);
self.store_operand(bcx, dest, op);
let dest = bcx.gepi(dest.llval, &[0, i]);
self.store_operand(&bcx, dest, op);
}
}
}
@ -132,49 +139,54 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
mir::Rvalue::Slice { ref input, from_start, from_end } => {
let ccx = bcx.ccx();
let input = self.trans_lvalue(bcx, input);
let (llbase, lllen) = tvec::get_base_and_len(bcx,
input.llval,
input.ty.to_ty(bcx.tcx()));
let llbase1 = build::GEPi(bcx, llbase, &[from_start]);
let input = self.trans_lvalue(&bcx, input);
let (llbase, lllen) = bcx.with_block(|bcx| {
tvec::get_base_and_len(bcx,
input.llval,
input.ty.to_ty(bcx.tcx()))
});
let llbase1 = bcx.gepi(llbase, &[from_start]);
let adj = common::C_uint(ccx, from_start + from_end);
let lllen1 = build::Sub(bcx, lllen, adj, DebugLoc::None);
let lladdrdest = expr::get_dataptr(bcx, dest.llval);
build::Store(bcx, llbase1, lladdrdest);
let llmetadest = expr::get_meta(bcx, dest.llval);
build::Store(bcx, lllen1, llmetadest);
let lllen1 = bcx.sub(lllen, adj);
let (lladdrdest, llmetadest) = bcx.with_block(|bcx| {
(expr::get_dataptr(bcx, dest.llval), expr::get_meta(bcx, dest.llval))
});
bcx.store(llbase1, lladdrdest);
bcx.store(lllen1, llmetadest);
bcx
}
mir::Rvalue::InlineAsm(ref inline_asm) => {
asm::trans_inline_asm(bcx, inline_asm)
bcx.map_block(|bcx| {
asm::trans_inline_asm(bcx, inline_asm)
})
}
_ => {
assert!(rvalue_creates_operand(rvalue));
let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
self.store_operand(bcx, dest.llval, temp);
self.store_operand(&bcx, dest.llval, temp);
bcx
}
}
}
pub fn trans_rvalue_operand(&mut self,
bcx: Block<'bcx, 'tcx>,
bcx: BlockAndBuilder<'bcx, 'tcx>,
rvalue: &mir::Rvalue<'tcx>)
-> (Block<'bcx, 'tcx>, OperandRef<'tcx>)
-> (BlockAndBuilder<'bcx, 'tcx>, OperandRef<'tcx>)
{
assert!(rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue);
match *rvalue {
mir::Rvalue::Use(ref operand) => {
let operand = self.trans_operand(bcx, operand);
let operand = self.trans_operand(&bcx, operand);
(bcx, operand)
}
mir::Rvalue::Cast(ref kind, ref operand, cast_ty) => {
let operand = self.trans_operand(bcx, operand);
debug!("cast operand is {}", operand.repr(bcx));
let operand = self.trans_operand(&bcx, operand);
debug!("cast operand is {}", operand.repr(&bcx));
let cast_ty = bcx.monomorphize(&cast_ty);
let val = match *kind {
@ -199,15 +211,16 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
}
OperandValue::Immediate(lldata) => {
// "standard" unsize
let (lldata, llextra) =
let (lldata, llextra) = bcx.with_block(|bcx| {
base::unsize_thin_ptr(bcx, lldata,
operand.ty, cast_ty);
operand.ty, cast_ty)
});
OperandValue::FatPtr(lldata, llextra)
}
OperandValue::Ref(_) => {
bcx.sess().bug(
&format!("by-ref operand {} in trans_rvalue_operand",
operand.repr(bcx)));
operand.repr(&bcx)));
}
}
}
@ -220,8 +233,9 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
let (llval, ll_t_in, signed) = if let CastTy::Int(IntTy::CEnum) = r_t_in {
let repr = adt::represent_type(bcx.ccx(), operand.ty);
let llval = operand.immediate();
let discr = adt::trans_get_discr(bcx, &*repr, llval,
None, true);
let discr = bcx.with_block(|bcx| {
adt::trans_get_discr(bcx, &*repr, llval, None, true)
});
(discr, common::val_ty(discr), adt::is_discr_signed(&*repr))
} else {
(operand.immediate(), ll_t_in, operand.ty.is_signed())
@ -232,22 +246,22 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
let srcsz = ll_t_in.int_width();
let dstsz = ll_t_out.int_width();
if srcsz == dstsz {
build::BitCast(bcx, llval, ll_t_out)
bcx.bitcast(llval, ll_t_out)
} else if srcsz > dstsz {
build::Trunc(bcx, llval, ll_t_out)
bcx.trunc(llval, ll_t_out)
} else if signed {
build::SExt(bcx, llval, ll_t_out)
bcx.sext(llval, ll_t_out)
} else {
build::ZExt(bcx, llval, ll_t_out)
bcx.zext(llval, ll_t_out)
}
}
(CastTy::Float, CastTy::Float) => {
let srcsz = ll_t_in.float_width();
let dstsz = ll_t_out.float_width();
if dstsz > srcsz {
build::FPExt(bcx, llval, ll_t_out)
bcx.fpext(llval, ll_t_out)
} else if srcsz > dstsz {
build::FPTrunc(bcx, llval, ll_t_out)
bcx.fptrunc(llval, ll_t_out)
} else {
llval
}
@ -255,20 +269,20 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
(CastTy::Ptr(_), CastTy::Ptr(_)) |
(CastTy::FnPtr, CastTy::Ptr(_)) |
(CastTy::RPtr(_), CastTy::Ptr(_)) =>
build::PointerCast(bcx, llval, ll_t_out),
bcx.pointercast(llval, ll_t_out),
(CastTy::Ptr(_), CastTy::Int(_)) |
(CastTy::FnPtr, CastTy::Int(_)) =>
build::PtrToInt(bcx, llval, ll_t_out),
bcx.ptrtoint(llval, ll_t_out),
(CastTy::Int(_), CastTy::Ptr(_)) =>
build::IntToPtr(bcx, llval, ll_t_out),
bcx.inttoptr(llval, ll_t_out),
(CastTy::Int(_), CastTy::Float) if signed =>
build::SIToFP(bcx, llval, ll_t_out),
bcx.sitofp(llval, ll_t_out),
(CastTy::Int(_), CastTy::Float) =>
build::UIToFP(bcx, llval, ll_t_out),
bcx.uitofp(llval, ll_t_out),
(CastTy::Float, CastTy::Int(IntTy::I)) =>
build::FPToSI(bcx, llval, ll_t_out),
bcx.fptosi(llval, ll_t_out),
(CastTy::Float, CastTy::Int(_)) =>
build::FPToUI(bcx, llval, ll_t_out),
bcx.fptoui(llval, ll_t_out),
_ => bcx.ccx().sess().bug(
&format!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty)
)
@ -282,13 +296,13 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
let ll_cft = ll_cast_ty.field_types();
let ll_fft = ll_from_ty.field_types();
let data_cast = build::PointerCast(bcx, data_ptr, ll_cft[0]);
let data_cast = bcx.pointercast(data_ptr, ll_cft[0]);
assert_eq!(ll_cft[1].kind(), ll_fft[1].kind());
OperandValue::FatPtr(data_cast, meta_ptr)
} else { // cast to thin-ptr
// Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
// pointer-cast of that pointer to desired pointer type.
let llval = build::PointerCast(bcx, data_ptr, ll_cast_ty);
let llval = bcx.pointercast(data_ptr, ll_cast_ty);
OperandValue::Immediate(llval)
}
} else {
@ -296,14 +310,15 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
}
}
};
(bcx, OperandRef {
let operand = OperandRef {
val: val,
ty: cast_ty
})
};
(bcx, operand)
}
mir::Rvalue::Ref(_, bk, ref lvalue) => {
let tr_lvalue = self.trans_lvalue(bcx, lvalue);
let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
let ty = tr_lvalue.ty.to_ty(bcx.tcx());
let ref_ty = bcx.tcx().mk_ref(
@ -313,66 +328,70 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
// Note: lvalues are indirect, so storing the `llval` into the
// destination effectively creates a reference.
if common::type_is_sized(bcx.tcx(), ty) {
(bcx, OperandRef {
let operand = if common::type_is_sized(bcx.tcx(), ty) {
OperandRef {
val: OperandValue::Immediate(tr_lvalue.llval),
ty: ref_ty,
})
}
} else {
(bcx, OperandRef {
OperandRef {
val: OperandValue::FatPtr(tr_lvalue.llval,
tr_lvalue.llextra),
ty: ref_ty,
})
}
}
};
(bcx, operand)
}
mir::Rvalue::Len(ref lvalue) => {
let tr_lvalue = self.trans_lvalue(bcx, lvalue);
(bcx, OperandRef {
val: OperandValue::Immediate(self.lvalue_len(bcx, tr_lvalue)),
let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
let operand = OperandRef {
val: OperandValue::Immediate(self.lvalue_len(&bcx, tr_lvalue)),
ty: bcx.tcx().types.usize,
})
};
(bcx, operand)
}
mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
let lhs = self.trans_operand(bcx, lhs);
let rhs = self.trans_operand(bcx, rhs);
let lhs = self.trans_operand(&bcx, lhs);
let rhs = self.trans_operand(&bcx, rhs);
let llresult = if common::type_is_fat_ptr(bcx.tcx(), lhs.ty) {
match (lhs.val, rhs.val) {
(OperandValue::FatPtr(lhs_addr, lhs_extra),
OperandValue::FatPtr(rhs_addr, rhs_extra)) => {
base::compare_fat_ptrs(bcx,
lhs_addr, lhs_extra,
rhs_addr, rhs_extra,
lhs.ty, op.to_hir_binop(),
DebugLoc::None)
bcx.with_block(|bcx| {
base::compare_fat_ptrs(bcx,
lhs_addr, lhs_extra,
rhs_addr, rhs_extra,
lhs.ty, op.to_hir_binop(),
DebugLoc::None)
})
}
_ => unreachable!()
}
} else {
self.trans_scalar_binop(bcx, op,
self.trans_scalar_binop(&bcx, op,
lhs.immediate(), rhs.immediate(),
lhs.ty, DebugLoc::None)
lhs.ty)
};
(bcx, OperandRef {
let operand = OperandRef {
val: OperandValue::Immediate(llresult),
ty: self.mir.binop_ty(bcx.tcx(), op, lhs.ty, rhs.ty),
})
};
(bcx, operand)
}
mir::Rvalue::UnaryOp(op, ref operand) => {
let operand = self.trans_operand(bcx, operand);
let operand = self.trans_operand(&bcx, operand);
let lloperand = operand.immediate();
let is_float = operand.ty.is_fp();
let debug_loc = DebugLoc::None;
let llval = match op {
mir::UnOp::Not => build::Not(bcx, lloperand, debug_loc),
mir::UnOp::Not => bcx.not(lloperand),
mir::UnOp::Neg => if is_float {
build::FNeg(bcx, lloperand, debug_loc)
bcx.fneg(lloperand)
} else {
build::Neg(bcx, lloperand, debug_loc)
bcx.neg(lloperand)
}
};
(bcx, OperandRef {
@ -389,16 +408,22 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
let llalign = common::C_uint(bcx.ccx(), align);
let llty_ptr = llty.ptr_to();
let box_ty = bcx.tcx().mk_box(content_ty);
let Result { bcx, val: llval } = base::malloc_raw_dyn(bcx,
llty_ptr,
box_ty,
llsize,
llalign,
DebugLoc::None);
(bcx, OperandRef {
val: OperandValue::Immediate(llval),
let mut llval = None;
let bcx = bcx.map_block(|bcx| {
let Result { bcx, val } = base::malloc_raw_dyn(bcx,
llty_ptr,
box_ty,
llsize,
llalign,
DebugLoc::None);
llval = Some(val);
bcx
});
let operand = OperandRef {
val: OperandValue::Immediate(llval.unwrap()),
ty: box_ty,
})
};
(bcx, operand)
}
mir::Rvalue::Repeat(..) |
@ -411,36 +436,35 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
}
pub fn trans_scalar_binop(&mut self,
bcx: Block<'bcx, 'tcx>,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
op: mir::BinOp,
lhs: ValueRef,
rhs: ValueRef,
input_ty: Ty<'tcx>,
debug_loc: DebugLoc) -> ValueRef {
input_ty: Ty<'tcx>) -> ValueRef {
let is_float = input_ty.is_fp();
let is_signed = input_ty.is_signed();
match op {
mir::BinOp::Add => if is_float {
build::FAdd(bcx, lhs, rhs, debug_loc)
bcx.fadd(lhs, rhs)
} else {
build::Add(bcx, lhs, rhs, debug_loc)
bcx.add(lhs, rhs)
},
mir::BinOp::Sub => if is_float {
build::FSub(bcx, lhs, rhs, debug_loc)
bcx.fsub(lhs, rhs)
} else {
build::Sub(bcx, lhs, rhs, debug_loc)
bcx.sub(lhs, rhs)
},
mir::BinOp::Mul => if is_float {
build::FMul(bcx, lhs, rhs, debug_loc)
bcx.fmul(lhs, rhs)
} else {
build::Mul(bcx, lhs, rhs, debug_loc)
bcx.mul(lhs, rhs)
},
mir::BinOp::Div => if is_float {
build::FDiv(bcx, lhs, rhs, debug_loc)
bcx.fdiv(lhs, rhs)
} else if is_signed {
build::SDiv(bcx, lhs, rhs, debug_loc)
bcx.sdiv(lhs, rhs)
} else {
build::UDiv(bcx, lhs, rhs, debug_loc)
bcx.udiv(lhs, rhs)
},
mir::BinOp::Rem => if is_float {
// LLVM currently always lowers the `frem` instructions appropriate
@ -471,39 +495,47 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
let llfn = declare::declare_cfn(bcx.ccx(), "fmod", fty,
tcx.types.f64);
if input_ty == tcx.types.f32 {
let lllhs = build::FPExt(bcx, lhs, f64t);
let llrhs = build::FPExt(bcx, rhs, f64t);
let llres = build::Call(bcx, llfn, &[lllhs, llrhs],
None, debug_loc);
build::FPTrunc(bcx, llres, Type::f32(bcx.ccx()))
let lllhs = bcx.fpext(lhs, f64t);
let llrhs = bcx.fpext(rhs, f64t);
let llres = bcx.call(llfn, &[lllhs, llrhs], None);
bcx.fptrunc(llres, Type::f32(bcx.ccx()))
} else {
build::Call(bcx, llfn, &[lhs, rhs],
None, debug_loc)
bcx.call(llfn, &[lhs, rhs], None)
}
} else {
build::FRem(bcx, lhs, rhs, debug_loc)
bcx.frem(lhs, rhs)
}
} else if is_signed {
build::SRem(bcx, lhs, rhs, debug_loc)
bcx.srem(lhs, rhs)
} else {
build::URem(bcx, lhs, rhs, debug_loc)
bcx.urem(lhs, rhs)
},
mir::BinOp::BitOr => build::Or(bcx, lhs, rhs, debug_loc),
mir::BinOp::BitAnd => build::And(bcx, lhs, rhs, debug_loc),
mir::BinOp::BitXor => build::Xor(bcx, lhs, rhs, debug_loc),
mir::BinOp::Shl => common::build_unchecked_lshift(bcx,
lhs,
rhs,
debug_loc),
mir::BinOp::Shr => common::build_unchecked_rshift(bcx,
input_ty,
lhs,
rhs,
debug_loc),
mir::BinOp::BitOr => bcx.or(lhs, rhs),
mir::BinOp::BitAnd => bcx.and(lhs, rhs),
mir::BinOp::BitXor => bcx.xor(lhs, rhs),
mir::BinOp::Shl => {
bcx.with_block(|bcx| {
common::build_unchecked_lshift(bcx,
lhs,
rhs,
DebugLoc::None)
})
}
mir::BinOp::Shr => {
bcx.with_block(|bcx| {
common::build_unchecked_rshift(bcx,
input_ty,
lhs,
rhs,
DebugLoc::None)
})
}
mir::BinOp::Eq | mir::BinOp::Lt | mir::BinOp::Gt |
mir::BinOp::Ne | mir::BinOp::Le | mir::BinOp::Ge => {
base::compare_scalar_types(bcx, lhs, rhs, input_ty,
op.to_hir_binop(), debug_loc)
bcx.with_block(|bcx| {
base::compare_scalar_types(bcx, lhs, rhs, input_ty,
op.to_hir_binop(), DebugLoc::None)
})
}
}
}

View File

@ -9,16 +9,16 @@
// except according to those terms.
use rustc::mir::repr as mir;
use trans::common::Block;
use trans::common::BlockAndBuilder;
use super::MirContext;
use super::TempRef;
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
pub fn trans_statement(&mut self,
bcx: Block<'bcx, 'tcx>,
bcx: BlockAndBuilder<'bcx, 'tcx>,
statement: &mir::Statement<'tcx>)
-> Block<'bcx, 'tcx> {
-> BlockAndBuilder<'bcx, 'tcx> {
debug!("trans_statement(statement={:?})", statement);
match statement.kind {
@ -43,7 +43,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
}
}
_ => {
let tr_dest = self.trans_lvalue(bcx, lvalue);
let tr_dest = self.trans_lvalue(&bcx, lvalue);
self.trans_rvalue(bcx, tr_dest, rvalue)
}
}