trans: Handle calls for all ABIs through FnType.

This commit is contained in:
Eduard Burtescu 2016-03-06 13:23:20 +02:00
parent 9e036c0ff0
commit 03993882d6
11 changed files with 477 additions and 702 deletions

View File

@ -9,6 +9,8 @@
// except according to those terms.
use llvm::{self, ValueRef};
use trans::base;
use trans::build::*;
use trans::common::{type_is_fat_ptr, Block};
use trans::context::CrateContext;
use trans::cabi_x86;
@ -126,6 +128,34 @@ impl ArgType {
pub fn is_ignore(&self) -> bool {
self.kind == ArgKind::Ignore
}
/// Store a direct/indirect value described by this ArgType into a
/// lvalue for the original Rust type of this argument/return.
/// Can be used for both storing formal arguments into Rust variables
/// or results of call/invoke instructions into their destinations.
pub fn store(&self, bcx: Block, mut val: ValueRef, dst: ValueRef) {
if self.is_ignore() {
return;
}
if self.is_indirect() {
let llsz = llsize_of(bcx.ccx(), self.ty);
let llalign = llalign_of_min(bcx.ccx(), self.ty);
base::call_memcpy(bcx, dst, val, llsz, llalign as u32);
} else if let Some(ty) = self.cast {
let store = Store(bcx, val, PointerCast(bcx, dst, ty.ptr_to()));
let llalign = llalign_of_min(bcx.ccx(), self.ty);
if !bcx.unreachable.get() {
unsafe {
llvm::LLVMSetAlignment(store, llalign);
}
}
} else {
if self.original_ty == Type::i1(bcx.ccx()) {
val = ZExt(bcx, val, Type::i8(bcx.ccx()));
}
Store(bcx, val, dst);
}
}
}
/// Metadata describing how the arguments to a native function

View File

@ -10,12 +10,12 @@
//! # Translation of inline assembly.
use llvm;
use llvm::{self, ValueRef};
use trans::build::*;
use trans::callee;
use trans::common::*;
use trans::cleanup;
use trans::cleanup::CleanupMethods;
use trans::datum::{Datum, Expr};
use trans::expr;
use trans::type_of;
use trans::type_::Type;
@ -35,6 +35,29 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ia: &ast::InlineAsm)
let temp_scope = fcx.push_custom_cleanup_scope();
let take_datum = |mut bcx: Block<'blk, 'tcx>,
arg_datum: Datum<'tcx, Expr>,
llargs: &mut Vec<ValueRef>|
-> Block<'blk, 'tcx> {
// Make this an rvalue, since we are going to be
// passing ownership.
let arg_datum = unpack_datum!(
bcx, arg_datum.to_rvalue_datum(bcx, "arg"));
// Now that arg_datum is owned, get it into the appropriate
// mode (ref vs value).
let arg_datum = unpack_datum!(
bcx, arg_datum.to_appropriate_datum(bcx));
// Technically, ownership of val passes to the callee.
// However, we must cleanup should we panic before the
// callee is actually invoked.
let val = arg_datum.add_clean(bcx.fcx,
cleanup::CustomScope(temp_scope));
llargs.push(val);
bcx
};
let mut ext_inputs = Vec::new();
let mut ext_constraints = Vec::new();
@ -46,11 +69,7 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ia: &ast::InlineAsm)
let out_datum = unpack_datum!(bcx, expr::trans(bcx, &out.expr));
if out.is_indirect {
bcx = callee::trans_arg_datum(bcx,
expr_ty(bcx, &out.expr),
out_datum,
cleanup::CustomScope(temp_scope),
&mut inputs);
bcx = take_datum(bcx, out_datum, &mut inputs);
if out.is_rw {
ext_inputs.push(*inputs.last().unwrap());
ext_constraints.push(i.to_string());
@ -59,11 +78,7 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ia: &ast::InlineAsm)
output_types.push(type_of::type_of(bcx.ccx(), out_datum.ty));
outputs.push(out_datum.val);
if out.is_rw {
bcx = callee::trans_arg_datum(bcx,
expr_ty(bcx, &out.expr),
out_datum,
cleanup::CustomScope(temp_scope),
&mut ext_inputs);
bcx = take_datum(bcx, out_datum, &mut ext_inputs);
ext_constraints.push(i.to_string());
}
}
@ -74,11 +89,7 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ia: &ast::InlineAsm)
constraints.push((*c).clone());
let in_datum = unpack_datum!(bcx, expr::trans(bcx, &input));
bcx = callee::trans_arg_datum(bcx,
expr_ty(bcx, &input),
in_datum,
cleanup::CustomScope(temp_scope),
&mut inputs);
bcx = take_datum(bcx, in_datum, &mut inputs);
}
inputs.extend_from_slice(&ext_inputs[..]);

View File

@ -1488,7 +1488,9 @@ pub fn new_fn_ctxt<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
}
ty::FnDiverging => false,
};
let debug_context = debuginfo::create_function_debug_context(ccx, id, param_substs, llfndecl);
let debug_context = debuginfo::create_function_debug_context(ccx, id,
param_substs,
llfndecl);
let (blk_id, cfg) = build_cfg(ccx.tcx(), id);
let nested_returns = if let Some(ref cfg) = cfg {
has_nested_returns(ccx.tcx(), cfg, blk_id)

View File

@ -12,7 +12,7 @@
#![allow(non_snake_case)]
use llvm;
use llvm::{CallConv, AtomicBinOp, AtomicOrdering, SynchronizationScope, AsmDialect};
use llvm::{AtomicBinOp, AtomicOrdering, SynchronizationScope, AsmDialect};
use llvm::{Opcode, IntPredicate, RealPredicate};
use llvm::{ValueRef, BasicBlockRef};
use trans::common::*;
@ -920,20 +920,6 @@ pub fn Call(cx: Block,
B(cx).call(fn_, args, bundle)
}
pub fn CallWithConv(cx: Block,
fn_: ValueRef,
args: &[ValueRef],
conv: CallConv,
debug_loc: DebugLoc)
-> ValueRef {
if cx.unreachable.get() {
return _UndefReturn(cx, fn_);
}
debug_loc.apply(cx.fcx);
let bundle = cx.lpad.get().and_then(|b| b.bundle());
B(cx).call_with_conv(fn_, args, conv, bundle)
}
pub fn AtomicFence(cx: Block, order: AtomicOrdering, scope: SynchronizationScope) {
if cx.unreachable.get() { return; }
B(cx).atomic_fence(order, scope)

View File

@ -11,7 +11,7 @@
#![allow(dead_code)] // FFI wrappers
use llvm;
use llvm::{CallConv, AtomicBinOp, AtomicOrdering, SynchronizationScope, AsmDialect};
use llvm::{AtomicBinOp, AtomicOrdering, SynchronizationScope, AsmDialect};
use llvm::{Opcode, IntPredicate, RealPredicate, False, OperandBundleDef};
use llvm::{ValueRef, BasicBlockRef, BuilderRef, ModuleRef};
use trans::base;
@ -843,15 +843,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
}
pub fn call_with_conv(&self, llfn: ValueRef, args: &[ValueRef],
conv: CallConv,
bundle: Option<&OperandBundleDef>) -> ValueRef {
self.count_insn("callwithconv");
let v = self.call(llfn, args, bundle);
llvm::SetInstructionCallConv(v, conv);
v
}
pub fn select(&self, cond: ValueRef, then_val: ValueRef, else_val: ValueRef) -> ValueRef {
self.count_insn("select");
unsafe {

View File

@ -37,6 +37,7 @@ use trans::cleanup;
use trans::cleanup::CleanupMethods;
use trans::closure;
use trans::common::{self, Block, Result, NodeIdAndSpan, CrateContext, FunctionContext};
use trans::common::{C_uint, C_undef};
use trans::consts;
use trans::datum::*;
use trans::debuginfo::DebugLoc;
@ -46,6 +47,7 @@ use trans::glue;
use trans::inline;
use trans::foreign;
use trans::intrinsic;
use trans::machine::{llalign_of_min, llsize_of_store};
use trans::meth;
use trans::monomorphize::{self, Instance};
use trans::type_::Type;
@ -60,6 +62,9 @@ use syntax::codemap::DUMMY_SP;
use syntax::errors;
use syntax::ptr::P;
use std::cmp;
#[derive(Debug)]
pub enum CalleeData {
/// Constructor for enum variant/tuple-like-struct.
NamedTupleConstructor(Disr),
@ -73,6 +78,7 @@ pub enum CalleeData {
Virtual(usize)
}
#[derive(Debug)]
pub struct Callee<'tcx> {
pub data: CalleeData,
pub ty: Ty<'tcx>
@ -591,16 +597,22 @@ fn trans_call_inner<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
let fcx = bcx.fcx;
let ccx = fcx.ccx;
let (abi, ret_ty) = match callee.ty.sty {
ty::TyFnDef(_, _, ref f) | ty::TyFnPtr(ref f) => {
let sig = bcx.tcx().erase_late_bound_regions(&f.sig);
let sig = infer::normalize_associated_type(bcx.tcx(), &sig);
(f.abi, sig.output)
}
_ => panic!("expected fn item or ptr in Callee::call")
};
let abi = callee.ty.fn_abi();
let sig = callee.ty.fn_sig();
let output = bcx.tcx().erase_late_bound_regions(&sig.output());
let output = infer::normalize_associated_type(bcx.tcx(), &output);
match callee.data {
let extra_args = match args {
ArgExprs(args) if abi != Abi::RustCall => {
args[sig.0.inputs.len()..].iter().map(|expr| {
common::expr_ty_adjusted(bcx, expr)
}).collect()
}
_ => vec![]
};
let fn_ty = callee.direct_fn_type(ccx, &extra_args);
let mut callee = match callee.data {
Intrinsic => {
assert!(abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic);
assert!(dest.is_some());
@ -613,7 +625,7 @@ fn trans_call_inner<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
};
let arg_cleanup_scope = fcx.push_custom_cleanup_scope();
return intrinsic::trans_intrinsic_call(bcx, callee.ty,
return intrinsic::trans_intrinsic_call(bcx, callee.ty, &fn_ty,
arg_cleanup_scope, args,
dest.unwrap(),
call_info);
@ -628,34 +640,25 @@ fn trans_call_inner<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
dest.unwrap(),
debug_loc);
}
_ => {}
}
// Intrinsics should not become actual functions.
// We trans them in place in `trans_intrinsic_call`
assert!(abi != Abi::RustIntrinsic && abi != Abi::PlatformIntrinsic);
let is_rust_fn = abi == Abi::Rust || abi == Abi::RustCall;
f => f
};
// Generate a location to store the result. If the user does
// not care about the result, just make a stack slot.
let opt_llretslot = dest.and_then(|dest| match dest {
expr::SaveIn(dst) => Some(dst),
expr::Ignore => {
let ret_ty = match ret_ty {
ty::FnConverging(ret_ty) => ret_ty,
ty::FnDiverging => ccx.tcx().mk_nil()
let needs_drop = || match output {
ty::FnConverging(ret_ty) => bcx.fcx.type_needs_drop(ret_ty),
ty::FnDiverging => false
};
if !is_rust_fn ||
type_of::return_uses_outptr(ccx, ret_ty) ||
bcx.fcx.type_needs_drop(ret_ty) {
if fn_ty.ret.is_indirect() || fn_ty.ret.cast.is_some() || needs_drop() {
// Push the out-pointer if we use an out-pointer for this
// return type, otherwise push "undef".
if common::type_is_zero_size(ccx, ret_ty) {
let llty = type_of::type_of(ccx, ret_ty);
Some(common::C_undef(llty.ptr_to()))
if fn_ty.ret.is_ignore() {
Some(C_undef(fn_ty.ret.original_ty.ptr_to()))
} else {
let llresult = alloc_ty(bcx, ret_ty, "__llret");
let llresult = alloca(bcx, fn_ty.ret.original_ty, "__llret");
call_lifetime_start(bcx, llresult);
Some(llresult)
}
@ -665,134 +668,95 @@ fn trans_call_inner<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
}
});
let mut llresult = unsafe {
llvm::LLVMGetUndef(Type::nil(ccx).ptr_to().to_ref())
};
// If there no destination, return must be direct, with no cast.
if opt_llretslot.is_none() {
assert!(!fn_ty.ret.is_indirect() && fn_ty.ret.cast.is_none());
}
let mut llargs = Vec::new();
if fn_ty.ret.is_indirect() {
let mut llretslot = opt_llretslot.unwrap();
if let Some(ty) = fn_ty.ret.cast {
llretslot = PointerCast(bcx, llretslot, ty.ptr_to());
}
llargs.push(llretslot);
}
let arg_cleanup_scope = fcx.push_custom_cleanup_scope();
bcx = trans_args(bcx, abi, &fn_ty, &mut callee, args, &mut llargs,
cleanup::CustomScope(arg_cleanup_scope));
fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
// The code below invokes the function, using either the Rust
// conventions (if it is a rust fn) or the native conventions
// (otherwise). The important part is that, when all is said
// and done, either the return value of the function will have been
// written in opt_llretslot (if it is Some) or `llresult` will be
// set appropriately (otherwise).
if is_rust_fn {
let mut llargs = Vec::new();
let llfn = match callee {
Fn(f) => f,
_ => unreachable!("expected fn pointer callee, found {:?}", callee)
};
if let (ty::FnConverging(ret_ty), Some(mut llretslot)) = (ret_ty, opt_llretslot) {
if type_of::return_uses_outptr(ccx, ret_ty) {
let llformal_ret_ty = type_of::type_of(ccx, ret_ty).ptr_to();
let llret_ty = common::val_ty(llretslot);
if llformal_ret_ty != llret_ty {
// this could happen due to e.g. subtyping
debug!("casting actual return type ({:?}) to match formal ({:?})",
llret_ty, llformal_ret_ty);
llretslot = PointerCast(bcx, llretslot, llformal_ret_ty);
}
llargs.push(llretslot);
}
let (llret, mut bcx) = base::invoke(bcx, llfn, &llargs, debug_loc);
if !bcx.unreachable.get() {
fn_ty.apply_attrs_callsite(llret);
}
// If the function we just called does not use an outpointer,
// store the result into the rust outpointer. Cast the outpointer
// type to match because some ABIs will use a different type than
// the Rust type. e.g., a {u32,u32} struct could be returned as
// u64.
if !fn_ty.ret.is_ignore() && !fn_ty.ret.is_indirect() {
if let Some(llforeign_ret_ty) = fn_ty.ret.cast {
let llrust_ret_ty = fn_ty.ret.original_ty;
let llretslot = opt_llretslot.unwrap();
// The actual return type is a struct, but the ABI
// adaptation code has cast it into some scalar type. The
// code that follows is the only reliable way I have
// found to do a transform like i64 -> {i32,i32}.
// Basically we dump the data onto the stack then memcpy it.
//
// Other approaches I tried:
// - Casting rust ret pointer to the foreign type and using Store
// is (a) unsafe if size of foreign type > size of rust type and
// (b) runs afoul of strict aliasing rules, yielding invalid
// assembly under -O (specifically, the store gets removed).
// - Truncating foreign type to correct integral type and then
// bitcasting to the struct type yields invalid cast errors.
let llscratch = base::alloca(bcx, llforeign_ret_ty, "__cast");
base::call_lifetime_start(bcx, llscratch);
Store(bcx, llret, llscratch);
let llscratch_i8 = PointerCast(bcx, llscratch, Type::i8(ccx).ptr_to());
let llretptr_i8 = PointerCast(bcx, llretslot, Type::i8(ccx).ptr_to());
let llrust_size = llsize_of_store(ccx, llrust_ret_ty);
let llforeign_align = llalign_of_min(ccx, llforeign_ret_ty);
let llrust_align = llalign_of_min(ccx, llrust_ret_ty);
let llalign = cmp::min(llforeign_align, llrust_align);
debug!("llrust_size={}", llrust_size);
base::call_memcpy(bcx, llretptr_i8, llscratch_i8,
C_uint(ccx, llrust_size), llalign as u32);
base::call_lifetime_end(bcx, llscratch);
} else if let Some(llretslot) = opt_llretslot {
base::store_ty(bcx, llret, llretslot, output.unwrap());
}
let arg_start = llargs.len();
// Push the arguments.
bcx = trans_args(bcx,
args,
callee.ty,
&mut llargs,
cleanup::CustomScope(arg_cleanup_scope),
abi);
fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
let datum = match callee.data {
Fn(f) => immediate_rvalue(f, callee.ty),
Virtual(idx) => {
// The data and vtable pointers were split by trans_arg_datum.
let vtable = llargs.remove(arg_start + 1);
meth::get_virtual_method(bcx, vtable, idx, callee.ty)
}
_ => unreachable!()
};
// Invoke the actual rust fn and update bcx/llresult.
let (llret, b) = base::invoke(bcx, datum.val, &llargs, debug_loc);
let fn_ty = match datum.ty.sty {
ty::TyFnDef(_, _, f) | ty::TyFnPtr(f) => {
let sig = bcx.tcx().erase_late_bound_regions(&f.sig);
let sig = infer::normalize_associated_type(bcx.tcx(), &sig);
FnType::new(bcx.ccx(), f.abi, &sig, &[])
}
_ => unreachable!("expected fn type")
};
if !bcx.unreachable.get() {
fn_ty.apply_attrs_callsite(llret);
}
bcx = b;
llresult = llret;
// If the Rust convention for this type is return via
// the return value, copy it into llretslot.
if let Some(llretslot) = opt_llretslot {
let llty = fn_ty.ret.original_ty;
if !fn_ty.ret.is_indirect() && llty != Type::void(bcx.ccx()) {
store_ty(bcx, llret, llretslot, ret_ty.unwrap())
}
}
} else {
// Lang items are the only case where dest is None, and
// they are always Rust fns.
assert!(dest.is_some());
let mut llargs = Vec::new();
let (llfn, arg_tys) = match (callee.data, &args) {
(Fn(f), &ArgExprs(a)) => {
(f, a.iter().map(|x| common::expr_ty_adjusted(bcx, &x)).collect())
}
_ => panic!("expected fn ptr and arg exprs.")
};
bcx = trans_args(bcx,
args,
callee.ty,
&mut llargs,
cleanup::CustomScope(arg_cleanup_scope),
abi);
fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
bcx = foreign::trans_native_call(bcx,
callee.ty,
llfn,
opt_llretslot.unwrap(),
&llargs[..],
arg_tys,
debug_loc);
}
fcx.pop_and_trans_custom_cleanup_scope(bcx, arg_cleanup_scope);
// If the caller doesn't care about the result of this fn call,
// drop the temporary slot we made.
match (dest, opt_llretslot, ret_ty) {
match (dest, opt_llretslot, output) {
(Some(expr::Ignore), Some(llretslot), ty::FnConverging(ret_ty)) => {
// drop the value if it is not being saved.
bcx = glue::drop_ty(bcx,
llretslot,
ret_ty,
debug_loc);
bcx = glue::drop_ty(bcx, llretslot, ret_ty, debug_loc);
call_lifetime_end(bcx, llretslot);
}
_ => {}
}
if ret_ty == ty::FnDiverging {
if output == ty::FnDiverging {
Unreachable(bcx);
}
Result::new(bcx, llresult)
Result::new(bcx, llret)
}
pub enum CallArgs<'a, 'tcx> {
@ -818,20 +782,19 @@ pub enum CallArgs<'a, 'tcx> {
fn trans_args_under_call_abi<'blk, 'tcx>(
mut bcx: Block<'blk, 'tcx>,
arg_exprs: &[P<hir::Expr>],
fn_ty: Ty<'tcx>,
callee: &mut CalleeData,
fn_ty: &FnType,
llargs: &mut Vec<ValueRef>,
arg_cleanup_scope: cleanup::ScopeId)
-> Block<'blk, 'tcx>
{
let sig = bcx.tcx().erase_late_bound_regions(&fn_ty.fn_sig());
let sig = infer::normalize_associated_type(bcx.tcx(), &sig);
let args = sig.inputs;
let mut arg_idx = 0;
// Translate the `self` argument first.
let arg_datum = unpack_datum!(bcx, expr::trans(bcx, &arg_exprs[0]));
bcx = trans_arg_datum(bcx,
args[0],
arg_datum,
callee, fn_ty, &mut arg_idx,
arg_cleanup_scope,
llargs);
@ -858,8 +821,8 @@ fn trans_args_under_call_abi<'blk, 'tcx>(
adt::trans_field_ptr(bcx, repr_ptr, srcval, Disr(0), i)
}).to_expr_datum();
bcx = trans_arg_datum(bcx,
field_type,
arg_datum,
callee, fn_ty, &mut arg_idx,
arg_cleanup_scope,
llargs);
}
@ -873,64 +836,20 @@ fn trans_args_under_call_abi<'blk, 'tcx>(
bcx
}
fn trans_overloaded_call_args<'blk, 'tcx>(
mut bcx: Block<'blk, 'tcx>,
arg_exprs: Vec<&hir::Expr>,
fn_ty: Ty<'tcx>,
llargs: &mut Vec<ValueRef>,
arg_cleanup_scope: cleanup::ScopeId)
-> Block<'blk, 'tcx> {
// Translate the `self` argument first.
let sig = bcx.tcx().erase_late_bound_regions(&fn_ty.fn_sig());
let sig = infer::normalize_associated_type(bcx.tcx(), &sig);
let arg_tys = sig.inputs;
let arg_datum = unpack_datum!(bcx, expr::trans(bcx, arg_exprs[0]));
bcx = trans_arg_datum(bcx,
arg_tys[0],
arg_datum,
arg_cleanup_scope,
llargs);
// Now untuple the rest of the arguments.
let tuple_type = arg_tys[1];
match tuple_type.sty {
ty::TyTuple(ref field_types) => {
for (i, &field_type) in field_types.iter().enumerate() {
let arg_datum =
unpack_datum!(bcx, expr::trans(bcx, arg_exprs[i + 1]));
bcx = trans_arg_datum(bcx,
field_type,
arg_datum,
arg_cleanup_scope,
llargs);
}
}
_ => {
bcx.sess().span_bug(arg_exprs[0].span,
"argument to `.call()` wasn't a tuple?!")
}
};
bcx
}
pub fn trans_args<'a, 'blk, 'tcx>(cx: Block<'blk, 'tcx>,
pub fn trans_args<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
abi: Abi,
fn_ty: &FnType,
callee: &mut CalleeData,
args: CallArgs<'a, 'tcx>,
fn_ty: Ty<'tcx>,
llargs: &mut Vec<ValueRef>,
arg_cleanup_scope: cleanup::ScopeId,
abi: Abi)
arg_cleanup_scope: cleanup::ScopeId)
-> Block<'blk, 'tcx> {
debug!("trans_args(abi={})", abi);
let _icx = push_ctxt("trans_args");
let sig = cx.tcx().erase_late_bound_regions(&fn_ty.fn_sig());
let sig = infer::normalize_associated_type(cx.tcx(), &sig);
let arg_tys = sig.inputs;
let variadic = sig.variadic;
let mut bcx = cx;
let mut bcx = bcx;
let mut arg_idx = 0;
// First we figure out the caller's view of the types of the arguments.
// This will be needed if this is a generic call, because the callee has
@ -940,78 +859,90 @@ pub fn trans_args<'a, 'blk, 'tcx>(cx: Block<'blk, 'tcx>,
if abi == Abi::RustCall {
// This is only used for direct calls to the `call`,
// `call_mut` or `call_once` functions.
return trans_args_under_call_abi(cx,
arg_exprs,
fn_ty,
return trans_args_under_call_abi(bcx,
arg_exprs, callee, fn_ty,
llargs,
arg_cleanup_scope)
}
let num_formal_args = arg_tys.len();
for (i, arg_expr) in arg_exprs.iter().enumerate() {
let arg_ty = if i >= num_formal_args {
assert!(variadic);
common::expr_ty_adjusted(cx, &arg_expr)
} else {
arg_tys[i]
};
for arg_expr in arg_exprs {
let arg_datum = unpack_datum!(bcx, expr::trans(bcx, &arg_expr));
bcx = trans_arg_datum(bcx, arg_ty, arg_datum,
bcx = trans_arg_datum(bcx,
arg_datum,
callee, fn_ty, &mut arg_idx,
arg_cleanup_scope,
llargs);
}
}
ArgOverloadedCall(arg_exprs) => {
return trans_overloaded_call_args(cx,
arg_exprs,
fn_ty,
llargs,
arg_cleanup_scope)
for expr in arg_exprs {
let arg_datum =
unpack_datum!(bcx, expr::trans(bcx, expr));
bcx = trans_arg_datum(bcx,
arg_datum,
callee, fn_ty, &mut arg_idx,
arg_cleanup_scope,
llargs);
}
}
ArgOverloadedOp(lhs, rhs) => {
assert!(!variadic);
bcx = trans_arg_datum(bcx, arg_tys[0], lhs,
bcx = trans_arg_datum(bcx, lhs,
callee, fn_ty, &mut arg_idx,
arg_cleanup_scope,
llargs);
if let Some(rhs) = rhs {
assert_eq!(arg_tys.len(), 2);
bcx = trans_arg_datum(bcx, arg_tys[1], rhs,
bcx = trans_arg_datum(bcx, rhs,
callee, fn_ty, &mut arg_idx,
arg_cleanup_scope,
llargs);
} else {
assert_eq!(arg_tys.len(), 1);
}
}
ArgVals(vs) => {
llargs.extend_from_slice(vs);
match *callee {
Virtual(idx) => {
llargs.push(vs[0]);
let fn_ptr = meth::get_virtual_method(bcx, vs[1], idx);
let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to();
*callee = Fn(PointerCast(bcx, fn_ptr, llty));
llargs.extend_from_slice(&vs[2..]);
}
_ => llargs.extend_from_slice(vs)
}
}
}
bcx
}
pub fn trans_arg_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
formal_arg_ty: Ty<'tcx>,
arg_datum: Datum<'tcx, Expr>,
arg_cleanup_scope: cleanup::ScopeId,
llargs: &mut Vec<ValueRef>)
-> Block<'blk, 'tcx> {
fn trans_arg_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
arg_datum: Datum<'tcx, Expr>,
callee: &mut CalleeData,
fn_ty: &FnType,
next_idx: &mut usize,
arg_cleanup_scope: cleanup::ScopeId,
llargs: &mut Vec<ValueRef>)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_arg_datum");
let mut bcx = bcx;
let ccx = bcx.ccx();
debug!("trans_arg_datum({:?})", formal_arg_ty);
debug!("trans_arg_datum({:?})", arg_datum);
let arg_datum_ty = arg_datum.ty;
let arg = &fn_ty.args[*next_idx];
*next_idx += 1;
debug!(" arg datum: {:?}", arg_datum);
// Fill padding with undef value, where applicable.
if let Some(ty) = arg.pad {
llargs.push(C_undef(ty));
}
let mut val = if common::type_is_fat_ptr(bcx.tcx(), arg_datum_ty) &&
!bcx.fcx.type_needs_drop(arg_datum_ty) {
arg_datum.val
// Determine whether we want a by-ref datum even if not appropriate.
let want_by_ref = arg.is_indirect() || arg.cast.is_some();
let fat_ptr = common::type_is_fat_ptr(bcx.tcx(), arg_datum.ty);
let (by_ref, val) = if fat_ptr && !bcx.fcx.type_needs_drop(arg_datum.ty) {
(true, arg_datum.val)
} else {
// Make this an rvalue, since we are going to be
// passing ownership.
@ -1020,33 +951,70 @@ pub fn trans_arg_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// Now that arg_datum is owned, get it into the appropriate
// mode (ref vs value).
let arg_datum = unpack_datum!(
bcx, arg_datum.to_appropriate_datum(bcx));
let arg_datum = unpack_datum!(bcx, if want_by_ref {
arg_datum.to_ref_datum(bcx)
} else {
arg_datum.to_appropriate_datum(bcx)
});
// Technically, ownership of val passes to the callee.
// However, we must cleanup should we panic before the
// callee is actually invoked.
arg_datum.add_clean(bcx.fcx, arg_cleanup_scope)
(arg_datum.kind.is_by_ref(),
arg_datum.add_clean(bcx.fcx, arg_cleanup_scope))
};
if type_of::arg_is_indirect(ccx, formal_arg_ty) && formal_arg_ty != arg_datum_ty {
// this could happen due to e.g. subtyping
let llformal_arg_ty = type_of::type_of_explicit_arg(ccx, formal_arg_ty);
debug!("casting actual type ({:?}) to match formal ({:?})",
Value(val), llformal_arg_ty);
debug!("Rust types: {:?}; {:?}", arg_datum_ty,
formal_arg_ty);
val = PointerCast(bcx, val, llformal_arg_ty);
if arg.is_ignore() {
return bcx;
}
debug!("--- trans_arg_datum passing {:?}", Value(val));
if common::type_is_fat_ptr(bcx.tcx(), formal_arg_ty) {
if fat_ptr {
// Fat pointers should be passed without any transformations.
assert!(!arg.is_indirect() && arg.cast.is_none());
llargs.push(Load(bcx, expr::get_dataptr(bcx, val)));
llargs.push(Load(bcx, expr::get_meta(bcx, val)));
} else {
llargs.push(val);
let info_arg = &fn_ty.args[*next_idx];
*next_idx += 1;
assert!(!info_arg.is_indirect() && info_arg.cast.is_none());
let info = Load(bcx, expr::get_meta(bcx, val));
if let Virtual(idx) = *callee {
// We have to grab the fn pointer from the vtable when
// handling the first argument, ensure that here.
assert_eq!(*next_idx, 2);
assert!(info_arg.is_ignore());
let fn_ptr = meth::get_virtual_method(bcx, info, idx);
let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to();
*callee = Fn(PointerCast(bcx, fn_ptr, llty));
} else {
assert!(!info_arg.is_ignore());
llargs.push(info);
}
return bcx;
}
let mut val = val;
if by_ref && !arg.is_indirect() {
// Have to load the argument, maybe while casting it.
if arg.original_ty == Type::i1(bcx.ccx()) {
// We store bools as i8 so we need to truncate to i1.
val = LoadRangeAssert(bcx, val, 0, 2, llvm::False);
val = Trunc(bcx, val, arg.original_ty);
} else if let Some(ty) = arg.cast {
val = Load(bcx, PointerCast(bcx, val, ty.ptr_to()));
if !bcx.unreachable.get() {
let llalign = llalign_of_min(bcx.ccx(), arg.ty);
unsafe {
llvm::LLVMSetAlignment(val, llalign);
}
}
} else {
val = Load(bcx, val);
}
}
llargs.push(val);
bcx
}

View File

@ -107,211 +107,6 @@ pub fn register_static(ccx: &CrateContext,
return c;
}
/// Prepares a call to a native function. This requires adapting
/// from the Rust argument passing rules to the native rules.
///
/// # Parameters
///
/// - `callee_ty`: Rust type for the function we are calling
/// - `llfn`: the function pointer we are calling
/// - `llretptr`: where to store the return value of the function
/// - `llargs_rust`: a list of the argument values, prepared
/// as they would be if calling a Rust function
/// - `passed_arg_tys`: Rust type for the arguments. Normally we
/// can derive these from callee_ty but in the case of variadic
/// functions passed_arg_tys will include the Rust type of all
/// the arguments including the ones not specified in the fn's signature.
pub fn trans_native_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
callee_ty: Ty<'tcx>,
llfn: ValueRef,
llretptr: ValueRef,
llargs_rust: &[ValueRef],
passed_arg_tys: Vec<Ty<'tcx>>,
call_debug_loc: DebugLoc)
-> Block<'blk, 'tcx>
{
let ccx = bcx.ccx();
debug!("trans_native_call(callee_ty={:?}, llfn={:?}, llretptr={:?})",
callee_ty, Value(llfn), Value(llretptr));
let (fn_abi, fn_sig) = match callee_ty.sty {
ty::TyFnDef(_, _, ref fn_ty) |
ty::TyFnPtr(ref fn_ty) => (fn_ty.abi, &fn_ty.sig),
_ => ccx.sess().bug("trans_native_call called on non-function type")
};
let fn_sig = ccx.tcx().erase_late_bound_regions(fn_sig);
let fn_sig = infer::normalize_associated_type(ccx.tcx(), &fn_sig);
let extra_args = &passed_arg_tys[fn_sig.inputs.len()..];
let fn_type = FnType::new(ccx, fn_abi, &fn_sig, extra_args);
let mut llargs_foreign = Vec::new();
// If the foreign ABI expects return value by pointer, supply the
// pointer that Rust gave us. Sometimes we have to bitcast
// because foreign fns return slightly different (but equivalent)
// views on the same type (e.g., i64 in place of {i32,i32}).
if fn_type.ret.is_indirect() {
match fn_type.ret.cast {
Some(ty) => {
let llcastedretptr =
BitCast(bcx, llretptr, ty.ptr_to());
llargs_foreign.push(llcastedretptr);
}
None => {
llargs_foreign.push(llretptr);
}
}
}
let mut i = 0;
for &passed_arg_ty in &passed_arg_tys {
let arg_ty = fn_type.args[i];
if arg_ty.is_ignore() {
i += 1;
continue;
}
if type_is_fat_ptr(ccx.tcx(), passed_arg_ty) {
// Fat pointers are one pointer and one integer or pointer.
let (a, b) = (fn_type.args[i], fn_type.args[i + 1]);
assert_eq!((a.cast, b.cast), (None, None));
assert!(!a.is_indirect() && !b.is_indirect());
if let Some(ty) = a.pad {
llargs_foreign.push(C_undef(ty));
}
llargs_foreign.push(llargs_rust[i]);
i += 1;
if let Some(ty) = b.pad {
llargs_foreign.push(C_undef(ty));
}
llargs_foreign.push(llargs_rust[i]);
i += 1;
continue;
}
// Does Rust pass this argument by pointer?
let rust_indirect = type_of::arg_is_indirect(ccx, passed_arg_ty);
let mut llarg_rust = llargs_rust[i];
i += 1;
debug!("argument {}, llarg_rust={:?}, rust_indirect={}, arg_ty={:?}",
i,
Value(llarg_rust),
rust_indirect,
arg_ty);
// Ensure that we always have the Rust value indirectly,
// because it makes bitcasting easier.
if !rust_indirect {
let scratch = base::alloc_ty(bcx, passed_arg_ty, "__arg");
base::store_ty(bcx, llarg_rust, scratch, passed_arg_ty);
llarg_rust = scratch;
}
debug!("llarg_rust={:?} (after indirection)",
Value(llarg_rust));
// Check whether we need to do any casting
if let Some(ty) = arg_ty.cast {
llarg_rust = BitCast(bcx, llarg_rust, ty.ptr_to());
}
debug!("llarg_rust={:?} (after casting)",
Value(llarg_rust));
// Finally, load the value if needed for the foreign ABI
let foreign_indirect = arg_ty.is_indirect();
let llarg_foreign = if foreign_indirect {
llarg_rust
} else if passed_arg_ty.is_bool() {
let val = LoadRangeAssert(bcx, llarg_rust, 0, 2, llvm::False);
Trunc(bcx, val, Type::i1(bcx.ccx()))
} else {
Load(bcx, llarg_rust)
};
debug!("argument {}, llarg_foreign={:?}",
i, Value(llarg_foreign));
// fill padding with undef value
if let Some(ty) = arg_ty.pad {
llargs_foreign.push(C_undef(ty));
}
llargs_foreign.push(llarg_foreign);
}
// A function pointer is called without the declaration available, so we have to apply
// any attributes with ABI implications directly to the call instruction.
let llforeign_retval = CallWithConv(bcx,
llfn,
&llargs_foreign[..],
fn_type.cconv,
call_debug_loc);
if !bcx.unreachable.get() {
fn_type.apply_attrs_callsite(llforeign_retval);
}
// If the function we just called does not use an outpointer,
// store the result into the rust outpointer. Cast the outpointer
// type to match because some ABIs will use a different type than
// the Rust type. e.g., a {u32,u32} struct could be returned as
// u64.
let llrust_ret_ty = fn_type.ret.original_ty;
if llrust_ret_ty != Type::void(ccx) && !fn_type.ret.is_indirect() {
let llforeign_ret_ty = fn_type.ret.cast.unwrap_or(llrust_ret_ty);
debug!("llretptr={:?}", Value(llretptr));
debug!("llforeign_retval={:?}", Value(llforeign_retval));
debug!("llrust_ret_ty={:?}", llrust_ret_ty);
debug!("llforeign_ret_ty={:?}", llforeign_ret_ty);
if llrust_ret_ty == llforeign_ret_ty {
match fn_sig.output {
ty::FnConverging(result_ty) => {
base::store_ty(bcx, llforeign_retval, llretptr, result_ty)
}
ty::FnDiverging => {}
}
} else {
// The actual return type is a struct, but the ABI
// adaptation code has cast it into some scalar type. The
// code that follows is the only reliable way I have
// found to do a transform like i64 -> {i32,i32}.
// Basically we dump the data onto the stack then memcpy it.
//
// Other approaches I tried:
// - Casting rust ret pointer to the foreign type and using Store
// is (a) unsafe if size of foreign type > size of rust type and
// (b) runs afoul of strict aliasing rules, yielding invalid
// assembly under -O (specifically, the store gets removed).
// - Truncating foreign type to correct integral type and then
// bitcasting to the struct type yields invalid cast errors.
let llscratch = base::alloca(bcx, llforeign_ret_ty, "__cast");
base::call_lifetime_start(bcx, llscratch);
Store(bcx, llforeign_retval, llscratch);
let llscratch_i8 = BitCast(bcx, llscratch, Type::i8(ccx).ptr_to());
let llretptr_i8 = BitCast(bcx, llretptr, Type::i8(ccx).ptr_to());
let llrust_size = machine::llsize_of_store(ccx, llrust_ret_ty);
let llforeign_align = machine::llalign_of_min(ccx, llforeign_ret_ty);
let llrust_align = machine::llalign_of_min(ccx, llrust_ret_ty);
let llalign = cmp::min(llforeign_align, llrust_align);
debug!("llrust_size={}", llrust_size);
base::call_memcpy(bcx, llretptr_i8, llscratch_i8,
C_uint(ccx, llrust_size), llalign as u32);
base::call_lifetime_end(bcx, llscratch);
}
}
return bcx;
}
///////////////////////////////////////////////////////////////////////////
// Rust functions with foreign ABIs
//

View File

@ -18,7 +18,7 @@ use llvm::{ValueRef, TypeKind};
use middle::infer;
use middle::subst;
use middle::subst::FnSpace;
use trans::abi::Abi;
use trans::abi::{Abi, FnType};
use trans::adt;
use trans::attributes;
use trans::base::*;
@ -172,6 +172,7 @@ pub fn check_intrinsics(ccx: &CrateContext) {
/// add them to librustc_trans/trans/context.rs
pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
callee_ty: Ty<'tcx>,
fn_ty: &FnType,
cleanup_scope: cleanup::CustomScopeIndex,
args: callee::CallArgs<'a, 'tcx>,
dest: expr::Dest,
@ -396,11 +397,12 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
// Push the arguments.
let mut llargs = Vec::new();
bcx = callee::trans_args(bcx,
Abi::RustIntrinsic,
fn_ty,
&mut callee::Intrinsic,
args,
callee_ty,
&mut llargs,
cleanup::CustomScope(cleanup_scope),
Abi::RustIntrinsic);
cleanup::CustomScope(cleanup_scope));
fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
@ -973,7 +975,15 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
if val_ty(llval) != Type::void(ccx) &&
machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
store_ty(bcx, llval, llresult, ret_ty);
if let Some(ty) = fn_ty.ret.cast {
let ptr = PointerCast(bcx, llresult, ty.ptr_to());
let store = Store(bcx, llval, ptr);
unsafe {
llvm::LLVMSetAlignment(store, type_of::align_of(ccx, ret_ty));
}
} else {
store_ty(bcx, llval, llresult, ret_ty);
}
}
// If we made a temporary stack slot, let's clean it up

View File

@ -41,29 +41,16 @@ use syntax::codemap::DUMMY_SP;
// drop_glue pointer, size, align.
const VTABLE_OFFSET: usize = 3;
/// Extracts a method from a trait object's vtable, at the
/// specified index, and casts it to the given type.
/// Extracts a method from a trait object's vtable, at the specified index.
pub fn get_virtual_method<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
llvtable: ValueRef,
vtable_index: usize,
method_ty: Ty<'tcx>)
-> Datum<'tcx, Rvalue> {
let _icx = push_ctxt("meth::get_virtual_method");
let ccx = bcx.ccx();
vtable_index: usize)
-> ValueRef {
// Load the data pointer from the object.
debug!("get_virtual_method(callee_ty={}, vtable_index={}, llvtable={:?})",
method_ty, vtable_index, Value(llvtable));
debug!("get_virtual_method(vtable_index={}, llvtable={:?})",
vtable_index, Value(llvtable));
let mptr = Load(bcx, GEPi(bcx, llvtable, &[vtable_index + VTABLE_OFFSET]));
// Replace the self type (&Self or Box<Self>) with an opaque pointer.
if let ty::TyFnDef(_, _, fty) = method_ty.sty {
let opaque_ty = opaque_method_ty(ccx.tcx(), fty);
immediate_rvalue(PointerCast(bcx, mptr, type_of(ccx, opaque_ty)), opaque_ty)
} else {
immediate_rvalue(mptr, method_ty)
}
Load(bcx, GEPi(bcx, llvtable, &[vtable_index + VTABLE_OFFSET]))
}
/// Generate a shim function that allows an object type like `SomeTrait` to
@ -323,23 +310,6 @@ pub fn get_vtable_methods<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
.collect()
}
/// Replace the self type (&Self or Box<Self>) with an opaque pointer.
fn opaque_method_ty<'tcx>(tcx: &TyCtxt<'tcx>, method_ty: &ty::BareFnTy<'tcx>)
-> Ty<'tcx> {
let mut inputs = method_ty.sig.0.inputs.clone();
inputs[0] = tcx.mk_mut_ptr(tcx.mk_mach_int(ast::IntTy::I8));
tcx.mk_fn_ptr(ty::BareFnTy {
unsafety: method_ty.unsafety,
abi: method_ty.abi,
sig: ty::Binder(ty::FnSig {
inputs: inputs,
output: method_ty.sig.0.output,
variadic: method_ty.sig.0.variadic,
}),
})
}
#[derive(Debug)]
pub struct ImplMethod<'tcx> {
pub method: Rc<ty::Method<'tcx>>,

View File

@ -8,25 +8,26 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use llvm::{BasicBlockRef, ValueRef, OperandBundleDef};
use rustc::middle::{infer, ty};
use llvm::{self, BasicBlockRef, ValueRef, OperandBundleDef};
use rustc::middle::ty;
use rustc::mir::repr as mir;
use trans::abi::{Abi, FnType};
use trans::adt;
use trans::base;
use trans::build;
use trans::callee::{Callee, Fn, Virtual};
use trans::common::{self, Block, BlockAndBuilder};
use trans::callee::{Callee, CalleeData, Fn, Virtual};
use trans::common::{self, Block, BlockAndBuilder, C_undef};
use trans::debuginfo::DebugLoc;
use trans::Disr;
use trans::foreign;
use trans::machine::llalign_of_min;
use trans::meth;
use trans::type_of;
use trans::glue;
use trans::type_::Type;
use super::{MirContext, drop};
use super::operand::OperandValue::{FatPtr, Immediate, Ref};
use super::lvalue::LvalueRef;
use super::operand::OperandValue::{self, FatPtr, Immediate, Ref};
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
pub fn trans_block(&mut self, bb: mir::BasicBlock) {
@ -152,115 +153,78 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
mir::Terminator::Call { ref func, ref args, ref destination, ref cleanup } => {
// Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
let callee = self.trans_operand(&bcx, func);
let debugloc = DebugLoc::None;
// The arguments we'll be passing. Plus one to account for outptr, if used.
let mut llargs = Vec::with_capacity(args.len() + 1);
// Types of the arguments. We do not preallocate, because this vector is only
// filled when `is_foreign` is `true` and foreign calls are minority of the cases.
let mut arg_tys = Vec::new();
let (callee, fty) = match callee.ty.sty {
let (mut callee, abi, sig) = match callee.ty.sty {
ty::TyFnDef(def_id, substs, f) => {
(Callee::def(bcx.ccx(), def_id, substs), f)
(Callee::def(bcx.ccx(), def_id, substs), f.abi, &f.sig)
}
ty::TyFnPtr(f) => {
(Callee {
data: Fn(callee.immediate()),
ty: callee.ty
}, f)
}, f.abi, &f.sig)
}
_ => unreachable!("{} is not callable", callee.ty)
};
// We do not translate intrinsics here (they shouldnt be functions)
assert!(fty.abi != Abi::RustIntrinsic && fty.abi != Abi::PlatformIntrinsic);
// Foreign-ABI functions are translated differently
let is_foreign = fty.abi != Abi::Rust && fty.abi != Abi::RustCall;
assert!(abi != Abi::RustIntrinsic && abi != Abi::PlatformIntrinsic);
let extra_args = &args[sig.0.inputs.len()..];
let extra_args = extra_args.iter().map(|op_arg| {
self.mir.operand_ty(bcx.tcx(), op_arg)
}).collect::<Vec<_>>();
let fn_ty = callee.direct_fn_type(bcx.ccx(), &extra_args);
// The arguments we'll be passing. Plus one to account for outptr, if used.
let arg_count = fn_ty.args.len() + fn_ty.ret.is_indirect() as usize;
let mut llargs = Vec::with_capacity(arg_count);
// Prepare the return value destination
let (ret_dest_ty, must_copy_dest) = if let Some((ref d, _)) = *destination {
let ret_dest = if let Some((ref d, _)) = *destination {
let dest = self.trans_lvalue(&bcx, d);
let ret_ty = dest.ty.to_ty(bcx.tcx());
if !is_foreign && type_of::return_uses_outptr(bcx.ccx(), ret_ty) {
if fn_ty.ret.is_indirect() {
llargs.push(dest.llval);
(Some((dest, ret_ty)), false)
None
} else if fn_ty.ret.is_ignore() {
None
} else {
(Some((dest, ret_ty)), !common::type_is_zero_size(bcx.ccx(), ret_ty))
Some(dest)
}
} else {
(None, false)
None
};
// Split the rust-call tupled arguments off.
let (args, rest) = if fty.abi == Abi::RustCall && !args.is_empty() {
let (args, untuple) = if abi == Abi::RustCall && !args.is_empty() {
let (tup, args) = args.split_last().unwrap();
// we can reorder safely because of MIR
(args, self.trans_operand_untupled(&bcx, tup))
(args, Some(tup))
} else {
(&args[..], vec![])
(&args[..], None)
};
let datum = {
let mut arg_ops = args.iter().map(|arg| {
self.trans_operand(&bcx, arg)
}).chain(rest.into_iter());
let mut idx = 0;
for arg in args {
let val = self.trans_operand(&bcx, arg).val;
self.trans_argument(&bcx, val, &mut llargs, &fn_ty,
&mut idx, &mut callee.data);
}
if let Some(tup) = untuple {
self.trans_arguments_untupled(&bcx, tup, &mut llargs, &fn_ty,
&mut idx, &mut callee.data)
}
// Get the actual pointer we can call.
// This can involve vtable accesses or reification.
let datum = if let Virtual(idx) = callee.data {
assert!(!is_foreign);
// Grab the first argument which is a trait object.
let vtable = match arg_ops.next().unwrap().val {
FatPtr(data, vtable) => {
llargs.push(data);
vtable
}
_ => unreachable!("expected FatPtr for Virtual call")
};
bcx.with_block(|bcx| {
meth::get_virtual_method(bcx, vtable, idx, callee.ty)
})
} else {
callee.reify(bcx.ccx())
};
// Process the rest of the args.
for operand in arg_ops {
match operand.val {
Ref(llval) | Immediate(llval) => llargs.push(llval),
FatPtr(b, e) => {
llargs.push(b);
llargs.push(e);
}
}
if is_foreign {
arg_tys.push(operand.ty);
}
}
datum
};
let fn_ty = match datum.ty.sty {
ty::TyFnDef(_, _, f) | ty::TyFnPtr(f) => {
let sig = bcx.tcx().erase_late_bound_regions(&f.sig);
let sig = infer::normalize_associated_type(bcx.tcx(), &sig);
FnType::new(bcx.ccx(), f.abi, &sig, &[])
}
_ => unreachable!("expected fn type")
};
let fn_ptr = callee.reify(bcx.ccx()).val;
// Many different ways to call a function handled here
match (is_foreign, cleanup, destination) {
match (cleanup, destination) {
// The two cases below are the only ones to use LLVMs `invoke`.
(false, &Some(cleanup), &None) => {
(&Some(cleanup), &None) => {
let cleanup = self.bcx(cleanup);
let landingpad = self.make_landing_pad(cleanup);
let unreachable_blk = self.unreachable_block();
let cs = bcx.invoke(datum.val,
&llargs[..],
let cs = bcx.invoke(fn_ptr,
&llargs,
unreachable_blk.llbb,
landingpad.llbb(),
cleanup_bundle.as_ref());
@ -269,22 +233,20 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
self.set_operand_dropped(bcx, op);
});
},
(false, &Some(cleanup), &Some((_, success))) => {
(&Some(cleanup), &Some((_, success))) => {
let cleanup = self.bcx(cleanup);
let landingpad = self.make_landing_pad(cleanup);
let invokeret = bcx.invoke(datum.val,
&llargs[..],
let invokeret = bcx.invoke(fn_ptr,
&llargs,
self.llblock(success),
landingpad.llbb(),
cleanup_bundle.as_ref());
fn_ty.apply_attrs_callsite(invokeret);
if must_copy_dest {
let (ret_dest, ret_ty) = ret_dest_ty
.expect("return destination and type not set");
if let Some(ret_dest) = ret_dest {
// We translate the copy straight into the beginning of the target
// block.
self.bcx(success).at_start(|bcx| bcx.with_block( |bcx| {
base::store_ty(bcx, invokeret, ret_dest.llval, ret_ty);
fn_ty.ret.store(bcx, invokeret, ret_dest.llval);
}));
}
self.bcx(success).at_start(|bcx| for op in args {
@ -294,24 +256,18 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
self.set_operand_dropped(bcx, op);
});
},
(false, _, &None) => {
let cs = bcx.call(datum.val,
&llargs[..],
cleanup_bundle.as_ref());
(&None, &None) => {
let cs = bcx.call(fn_ptr, &llargs, cleanup_bundle.as_ref());
fn_ty.apply_attrs_callsite(cs);
// no need to drop args, because the call never returns
bcx.unreachable();
}
(false, _, &Some((_, target))) => {
let llret = bcx.call(datum.val,
&llargs[..],
cleanup_bundle.as_ref());
(&None, &Some((_, target))) => {
let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle.as_ref());
fn_ty.apply_attrs_callsite(llret);
if must_copy_dest {
let (ret_dest, ret_ty) = ret_dest_ty
.expect("return destination and type not set");
if let Some(ret_dest) = ret_dest {
bcx.with_block(|bcx| {
base::store_ty(bcx, llret, ret_dest.llval, ret_ty);
fn_ty.ret.store(bcx, llret, ret_dest.llval);
});
}
for op in args {
@ -319,31 +275,132 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
}
funclet_br(bcx, self.llblock(target));
}
// Foreign functions
(true, _, destination) => {
let (dest, _) = ret_dest_ty
.expect("return destination is not set");
bcx = bcx.map_block(|bcx| {
foreign::trans_native_call(bcx,
datum.ty,
datum.val,
dest.llval,
&llargs[..],
arg_tys,
debugloc)
});
if let Some((_, target)) = *destination {
for op in args {
self.set_operand_dropped(&bcx, op);
}
funclet_br(bcx, self.llblock(target));
}
},
}
}
}
}
fn trans_argument(&mut self,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
val: OperandValue,
llargs: &mut Vec<ValueRef>,
fn_ty: &FnType,
next_idx: &mut usize,
callee: &mut CalleeData) {
// Treat the values in a fat pointer separately.
if let FatPtr(ptr, meta) = val {
if *next_idx == 0 {
if let Virtual(idx) = *callee {
let llfn = bcx.with_block(|bcx| {
meth::get_virtual_method(bcx, meta, idx)
});
let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to();
*callee = Fn(bcx.pointercast(llfn, llty));
}
}
self.trans_argument(bcx, Immediate(ptr), llargs, fn_ty, next_idx, callee);
self.trans_argument(bcx, Immediate(meta), llargs, fn_ty, next_idx, callee);
return;
}
let arg = &fn_ty.args[*next_idx];
*next_idx += 1;
// Fill padding with undef value, where applicable.
if let Some(ty) = arg.pad {
llargs.push(C_undef(ty));
}
if arg.is_ignore() {
return;
}
// Force by-ref if we have to load through a cast pointer.
let (mut llval, by_ref) = match val {
Immediate(llval) if arg.cast.is_some() => {
let llscratch = build::AllocaFcx(bcx.fcx(), arg.original_ty, "arg");
bcx.store(llval, llscratch);
(llscratch, true)
}
Immediate(llval) => (llval, false),
Ref(llval) => (llval, true),
FatPtr(_, _) => unreachable!("fat pointers handled above")
};
if by_ref && !arg.is_indirect() {
// Have to load the argument, maybe while casting it.
if arg.original_ty == Type::i1(bcx.ccx()) {
// We store bools as i8 so we need to truncate to i1.
llval = bcx.load_range_assert(llval, 0, 2, llvm::False);
llval = bcx.trunc(llval, arg.original_ty);
} else if let Some(ty) = arg.cast {
llval = bcx.load(bcx.pointercast(llval, ty.ptr_to()));
let llalign = llalign_of_min(bcx.ccx(), arg.ty);
unsafe {
llvm::LLVMSetAlignment(llval, llalign);
}
} else {
llval = bcx.load(llval);
}
}
llargs.push(llval);
}
fn trans_arguments_untupled(&mut self,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
operand: &mir::Operand<'tcx>,
llargs: &mut Vec<ValueRef>,
fn_ty: &FnType,
next_idx: &mut usize,
callee: &mut CalleeData) {
// FIXME: consider having some optimization to avoid tupling/untupling
// (and storing/loading in the case of immediates)
// avoid trans_operand for pointless copying
let lv = match *operand {
mir::Operand::Consume(ref lvalue) => self.trans_lvalue(bcx, lvalue),
mir::Operand::Constant(ref constant) => {
// FIXME: consider being less pessimized
if constant.ty.is_nil() {
return;
}
let ty = bcx.monomorphize(&constant.ty);
let lv = LvalueRef::alloca(bcx, ty, "__untuple_alloca");
let constant = self.trans_constant(bcx, constant);
self.store_operand(bcx, lv.llval, constant);
lv
}
};
let lv_ty = lv.ty.to_ty(bcx.tcx());
let result_types = match lv_ty.sty {
ty::TyTuple(ref tys) => tys,
_ => bcx.tcx().sess.span_bug(
self.mir.span,
&format!("bad final argument to \"rust-call\" fn {:?}", lv_ty))
};
let base_repr = adt::represent_type(bcx.ccx(), lv_ty);
let base = adt::MaybeSizedValue::sized(lv.llval);
for (n, &ty) in result_types.iter().enumerate() {
let ptr = bcx.with_block(|bcx| {
adt::trans_field_ptr(bcx, &base_repr, base, Disr(0), n)
});
let val = if common::type_is_fat_ptr(bcx.tcx(), ty) {
let (lldata, llextra) = bcx.with_block(|bcx| {
base::load_fat_ptr(bcx, ptr, ty)
});
FatPtr(lldata, llextra)
} else {
// Don't bother loading the value, trans_argument will.
Ref(ptr)
};
self.trans_argument(bcx, val, llargs, fn_ty, next_idx, callee);
}
}
fn get_personality_slot(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>) -> ValueRef {
let ccx = bcx.ccx();
if let Some(slot) = self.llpersonalityslot {

View File

@ -9,20 +9,17 @@
// except according to those terms.
use llvm::ValueRef;
use rustc::middle::ty::{self, Ty};
use rustc::middle::ty::Ty;
use rustc::mir::repr as mir;
use trans::adt;
use trans::base;
use trans::common::{self, Block, BlockAndBuilder};
use trans::datum;
use trans::value::Value;
use trans::Disr;
use trans::glue;
use std::fmt;
use super::{MirContext, TempRef, drop};
use super::lvalue::LvalueRef;
/// The representation of a Rust value. The enum variant is in fact
/// uniquely determined by the value's type, but is kept as a
@ -190,48 +187,6 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
}
}
pub fn trans_operand_untupled(&mut self,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
operand: &mir::Operand<'tcx>)
-> Vec<OperandRef<'tcx>>
{
// FIXME: consider having some optimization to avoid tupling/untupling
// (and storing/loading in the case of immediates)
// avoid trans_operand for pointless copying
let lv = match *operand {
mir::Operand::Consume(ref lvalue) => self.trans_lvalue(bcx, lvalue),
mir::Operand::Constant(ref constant) => {
// FIXME: consider being less pessimized
if constant.ty.is_nil() {
return vec![];
}
let ty = bcx.monomorphize(&constant.ty);
let lv = LvalueRef::alloca(bcx, ty, "__untuple_alloca");
let constant = self.trans_constant(bcx, constant);
self.store_operand(bcx, lv.llval, constant);
lv
}
};
let lv_ty = lv.ty.to_ty(bcx.tcx());
let result_types = match lv_ty.sty {
ty::TyTuple(ref tys) => tys,
_ => bcx.tcx().sess.span_bug(
self.mir.span,
&format!("bad final argument to \"rust-call\" fn {:?}", lv_ty))
};
let base_repr = adt::represent_type(bcx.ccx(), lv_ty);
let base = adt::MaybeSizedValue::sized(lv.llval);
result_types.iter().enumerate().map(|(n, &ty)| {
self.trans_load(bcx, bcx.with_block(|bcx| {
adt::trans_field_ptr(bcx, &base_repr, base, Disr(0), n)
}), ty)
}).collect()
}
pub fn set_operand_dropped(&mut self,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
operand: &mir::Operand<'tcx>) {