trans: Handle all function setup for all ABIs via FnType.

This commit is contained in:
Eduard Burtescu 2016-03-06 16:30:21 +02:00
parent bd0a849f3b
commit aec63821d0
20 changed files with 712 additions and 1342 deletions

View File

@ -29,6 +29,8 @@ use trans::type_of;
use rustc_front::hir;
use middle::ty::{self, Ty};
use libc::c_uint;
pub use syntax::abi::Abi;
/// The first half of a fat pointer.
@ -129,6 +131,16 @@ impl ArgType {
self.kind == ArgKind::Ignore
}
/// Get the LLVM type for an lvalue of the original Rust type of
/// this argument/return, i.e. the result of `type_of::type_of`.
pub fn memory_ty(&self, ccx: &CrateContext) -> Type {
if self.original_ty == Type::i1(ccx) {
Type::i8(ccx)
} else {
self.original_ty
}
}
/// Store a direct/indirect value described by this ArgType into a
/// lvalue for the original Rust type of this argument/return.
/// Can be used for both storing formal arguments into Rust variables
@ -156,6 +168,18 @@ impl ArgType {
Store(bcx, val, dst);
}
}
pub fn store_fn_arg(&self, bcx: Block, idx: &mut usize, dst: ValueRef) {
if self.pad.is_some() {
*idx += 1;
}
if self.is_ignore() {
return;
}
let val = llvm::get_param(bcx.fcx.llfn, *idx as c_uint);
*idx += 1;
self.store(bcx, val, dst);
}
}
/// Metadata describing how the arguments to a native function

File diff suppressed because it is too large Load Diff

View File

@ -255,15 +255,18 @@ impl<'tcx> Callee<'tcx> {
/// Turn the callee into a function pointer.
pub fn reify<'a>(self, ccx: &CrateContext<'a, 'tcx>)
-> Datum<'tcx, Rvalue> {
let fn_ptr_ty = match self.ty.sty {
ty::TyFnDef(_, _, f) => ccx.tcx().mk_ty(ty::TyFnPtr(f)),
_ => self.ty
};
match self.data {
Fn(llfn) => {
let fn_ptr_ty = match self.ty.sty {
ty::TyFnDef(_, _, f) => ccx.tcx().mk_ty(ty::TyFnPtr(f)),
_ => self.ty
};
immediate_rvalue(llfn, fn_ptr_ty)
}
Virtual(idx) => meth::trans_object_shim(ccx, self.ty, idx),
Virtual(idx) => {
let llfn = meth::trans_object_shim(ccx, self.ty, idx);
immediate_rvalue(llfn, fn_ptr_ty)
}
NamedTupleConstructor(_) => match self.ty.sty {
ty::TyFnDef(def_id, substs, _) => {
return get_fn(ccx, def_id, substs);
@ -313,6 +316,21 @@ pub fn trans_fn_pointer_shim<'a, 'tcx>(
ty::ClosureKind::Fn | ty::ClosureKind::FnMut => true,
ty::ClosureKind::FnOnce => false,
};
let llfnpointer = match bare_fn_ty.sty {
ty::TyFnDef(def_id, substs, _) => {
// Function definitions have to be turned into a pointer.
let llfn = Callee::def(ccx, def_id, substs).reify(ccx).val;
if !is_by_ref {
// A by-value fn item is ignored, so the shim has
// the same signature as the original function.
return llfn;
}
Some(llfn)
}
_ => None
};
let bare_fn_ty_maybe_ref = if is_by_ref {
tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic), bare_fn_ty)
} else {
@ -347,15 +365,17 @@ pub fn trans_fn_pointer_shim<'a, 'tcx>(
let sig = tcx.erase_late_bound_regions(sig);
let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
let tuple_input_ty = tcx.mk_tup(sig.inputs.to_vec());
let sig = ty::FnSig {
inputs: vec![bare_fn_ty_maybe_ref,
tuple_input_ty],
output: sig.output,
variadic: false
};
let fn_ty = FnType::new(ccx, Abi::RustCall, &sig, &[]);
let tuple_fn_ty = tcx.mk_fn_ptr(ty::BareFnTy {
unsafety: hir::Unsafety::Normal,
abi: Abi::RustCall,
sig: ty::Binder(ty::FnSig {
inputs: vec![bare_fn_ty_maybe_ref,
tuple_input_ty],
output: sig.output,
variadic: false
})
sig: ty::Binder(sig)
});
debug!("tuple_fn_ty: {:?}", tuple_fn_ty);
@ -368,37 +388,26 @@ pub fn trans_fn_pointer_shim<'a, 'tcx>(
let empty_substs = tcx.mk_substs(Substs::trans_empty());
let (block_arena, fcx): (TypedArena<_>, FunctionContext);
block_arena = TypedArena::new();
fcx = new_fn_ctxt(ccx,
llfn,
ast::DUMMY_NODE_ID,
false,
sig.output,
empty_substs,
None,
&block_arena);
let mut bcx = init_function(&fcx, false, sig.output);
fcx = FunctionContext::new(ccx, llfn, fn_ty, ast::DUMMY_NODE_ID,
empty_substs, None, &block_arena);
let mut bcx = fcx.init(false);
let llargs = get_params(fcx.llfn);
let self_idx = fcx.arg_offset();
let llfnpointer = match bare_fn_ty.sty {
ty::TyFnDef(def_id, substs, _) => {
// Function definitions have to be turned into a pointer.
Callee::def(ccx, def_id, substs).reify(ccx).val
}
let self_idx = fcx.fn_ty.ret.is_indirect() as usize;
let llfnpointer = llfnpointer.unwrap_or_else(|| {
// the first argument (`self`) will be ptr to the fn pointer
_ => if is_by_ref {
if is_by_ref {
Load(bcx, llargs[self_idx])
} else {
llargs[self_idx]
}
};
});
assert!(!fcx.needs_ret_allocas);
let dest = fcx.llretslotptr.get().map(|_|
expr::SaveIn(fcx.get_ret_slot(bcx, sig.output, "ret_slot"))
expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot"))
);
let callee = Callee {
@ -407,7 +416,7 @@ pub fn trans_fn_pointer_shim<'a, 'tcx>(
};
bcx = callee.call(bcx, DebugLoc::None, ArgVals(&llargs[(self_idx + 1)..]), dest).bcx;
finish_fn(&fcx, bcx, sig.output, DebugLoc::None);
fcx.finish(bcx, DebugLoc::None);
ccx.fn_pointer_shims().borrow_mut().insert(bare_fn_ty_maybe_ref, llfn);

View File

@ -10,11 +10,11 @@
use arena::TypedArena;
use back::link::{self, mangle_internal_name_by_path_and_seq};
use llvm::{ValueRef, get_params};
use llvm::{ValueRef, get_param, get_params};
use middle::def_id::DefId;
use middle::infer;
use middle::traits::ProjectionMode;
use trans::abi::Abi;
use trans::abi::{Abi, FnType};
use trans::adt;
use trans::attributes;
use trans::base::*;
@ -22,12 +22,12 @@ use trans::build::*;
use trans::callee::{self, ArgVals, Callee};
use trans::cleanup::{CleanupMethods, CustomScope, ScopeId};
use trans::common::*;
use trans::datum::{self, Datum, rvalue_scratch_datum, Rvalue};
use trans::datum::{ByRef, Datum, lvalue_scratch_datum};
use trans::datum::{rvalue_scratch_datum, Rvalue};
use trans::debuginfo::{self, DebugLoc};
use trans::declare;
use trans::expr;
use trans::monomorphize::{Instance};
use trans::type_of::*;
use trans::value::Value;
use trans::Disr;
use middle::ty::{self, Ty, TyCtxt};
@ -38,28 +38,26 @@ use syntax::attr::{ThinAttributes, ThinAttributesExt};
use rustc_front::hir;
use libc::c_uint;
fn load_closure_environment<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
closure_def_id: DefId,
arg_scope_id: ScopeId,
freevars: &[ty::Freevar])
-> Block<'blk, 'tcx>
{
freevars: &[ty::Freevar]) {
let _icx = push_ctxt("closure::load_closure_environment");
let kind = kind_for_closure(bcx.ccx(), closure_def_id);
let env_arg = &bcx.fcx.fn_ty.args[0];
let mut env_idx = bcx.fcx.fn_ty.ret.is_indirect() as usize;
// Special case for small by-value selfs.
let closure_ty = node_id_type(bcx, bcx.fcx.id);
let self_type = get_self_type(bcx.tcx(), closure_def_id, closure_ty);
let kind = kind_for_closure(bcx.ccx(), closure_def_id);
let llenv = if kind == ty::ClosureKind::FnOnce &&
!arg_is_indirect(bcx.ccx(), self_type) {
let datum = rvalue_scratch_datum(bcx,
self_type,
"closure_env");
store_ty(bcx, bcx.fcx.llenv.unwrap(), datum.val, self_type);
datum.val
let llenv = if kind == ty::ClosureKind::FnOnce && !env_arg.is_indirect() {
let closure_ty = node_id_type(bcx, bcx.fcx.id);
let llenv = rvalue_scratch_datum(bcx, closure_ty, "closure_env").val;
env_arg.store_fn_arg(bcx, &mut env_idx, llenv);
llenv
} else {
bcx.fcx.llenv.unwrap()
get_param(bcx.fcx.llfn, env_idx as c_uint)
};
// Store the pointer to closure data in an alloca for debug info because that's what the
@ -105,8 +103,6 @@ fn load_closure_environment<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
freevar.span);
}
}
bcx
}
pub enum ClosureEnv<'a> {
@ -115,26 +111,19 @@ pub enum ClosureEnv<'a> {
}
impl<'a> ClosureEnv<'a> {
pub fn load<'blk,'tcx>(self, bcx: Block<'blk, 'tcx>, arg_scope: ScopeId)
-> Block<'blk, 'tcx>
{
match self {
ClosureEnv::NotClosure => bcx,
ClosureEnv::Closure(def_id, freevars) => {
if freevars.is_empty() {
bcx
} else {
load_closure_environment(bcx, def_id, arg_scope, freevars)
}
pub fn load<'blk,'tcx>(self, bcx: Block<'blk, 'tcx>, arg_scope: ScopeId) {
if let ClosureEnv::Closure(def_id, freevars) = self {
if !freevars.is_empty() {
load_closure_environment(bcx, def_id, arg_scope, freevars);
}
}
}
}
pub fn get_self_type<'tcx>(tcx: &TyCtxt<'tcx>,
closure_id: DefId,
fn_ty: Ty<'tcx>)
-> Ty<'tcx> {
fn get_self_type<'tcx>(tcx: &TyCtxt<'tcx>,
closure_id: DefId,
fn_ty: Ty<'tcx>)
-> Ty<'tcx> {
match tcx.closure_kind(closure_id) {
ty::ClosureKind::Fn => {
tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic), fn_ty)
@ -148,10 +137,10 @@ pub fn get_self_type<'tcx>(tcx: &TyCtxt<'tcx>,
/// Returns the LLVM function declaration for a closure, creating it if
/// necessary. If the ID does not correspond to a closure ID, returns None.
pub fn get_or_create_closure_declaration<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
closure_id: DefId,
substs: &ty::ClosureSubsts<'tcx>)
-> ValueRef {
fn get_or_create_closure_declaration<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
closure_id: DefId,
substs: &ty::ClosureSubsts<'tcx>)
-> ValueRef {
// Normalize type so differences in regions and typedefs don't cause
// duplicate declarations
let tcx = ccx.tcx();
@ -246,6 +235,16 @@ pub fn trans_closure_expr<'a, 'tcx>(dest: Dest<'a, 'tcx>,
let sig = tcx.erase_late_bound_regions(&function_type.sig);
let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
let closure_type = tcx.mk_closure_from_closure_substs(closure_def_id,
Box::new(closure_substs.clone()));
let sig = ty::FnSig {
inputs: Some(get_self_type(tcx, closure_def_id, closure_type))
.into_iter().chain(sig.inputs).collect(),
output: sig.output,
variadic: false
};
let fn_ty = FnType::new(ccx, Abi::RustCall, &sig, &[]);
trans_closure(ccx,
decl,
body,
@ -253,8 +252,8 @@ pub fn trans_closure_expr<'a, 'tcx>(dest: Dest<'a, 'tcx>,
param_substs,
id,
closure_expr_attrs.as_attr_slice(),
sig.output,
function_type.abi,
fn_ty,
Abi::RustCall,
ClosureEnv::Closure(closure_def_id, &freevars));
// Don't hoist this to the top of the function. It's perfectly legitimate
@ -373,17 +372,20 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
debug!("trans_fn_once_adapter_shim: llref_fn_ty={:?}",
llref_fn_ty);
let ret_ty = tcx.erase_late_bound_regions(&sig.output());
let ret_ty = infer::normalize_associated_type(ccx.tcx(), &ret_ty);
// Make a version of the closure type with the same arguments, but
// with argument #0 being by value.
assert_eq!(abi, Abi::RustCall);
sig.0.inputs[0] = closure_ty;
let sig = tcx.erase_late_bound_regions(&sig);
let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
let fn_ty = FnType::new(ccx, abi, &sig, &[]);
let llonce_fn_ty = tcx.mk_fn_ptr(ty::BareFnTy {
unsafety: unsafety,
abi: abi,
sig: sig
sig: ty::Binder(sig)
});
// Create the by-value helper.
@ -392,36 +394,49 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
let (block_arena, fcx): (TypedArena<_>, FunctionContext);
block_arena = TypedArena::new();
fcx = new_fn_ctxt(ccx,
lloncefn,
ast::DUMMY_NODE_ID,
false,
ret_ty,
substs.func_substs,
None,
&block_arena);
let mut bcx = init_function(&fcx, false, ret_ty);
fcx = FunctionContext::new(ccx, lloncefn, fn_ty, ast::DUMMY_NODE_ID,
substs.func_substs, None, &block_arena);
let mut bcx = fcx.init(false);
let mut llargs = get_params(fcx.llfn);
// the first argument (`self`) will be the (by value) closure env.
let self_scope = fcx.push_custom_cleanup_scope();
let self_scope_id = CustomScope(self_scope);
let rvalue_mode = datum::appropriate_rvalue_mode(ccx, closure_ty);
let self_idx = fcx.arg_offset();
let llself = llargs[self_idx];
let env_datum = Datum::new(llself, closure_ty, Rvalue::new(rvalue_mode));
let env_datum = unpack_datum!(bcx,
env_datum.to_lvalue_datum_in_scope(bcx, "self",
self_scope_id));
debug!("trans_fn_once_adapter_shim: env_datum={:?}",
Value(env_datum.val));
llargs[self_idx] = env_datum.val;
let mut llargs = get_params(fcx.llfn);
let mut self_idx = fcx.fn_ty.ret.is_indirect() as usize;
let env_arg = &fcx.fn_ty.args[0];
let llenv = if env_arg.is_indirect() {
Datum::new(llargs[self_idx], closure_ty, Rvalue::new(ByRef))
.add_clean(&fcx, self_scope_id)
} else {
unpack_datum!(bcx, lvalue_scratch_datum(bcx, closure_ty, "self",
InitAlloca::Dropped,
self_scope_id, |bcx, llval| {
let mut llarg_idx = self_idx;
env_arg.store_fn_arg(bcx, &mut llarg_idx, llval);
bcx.fcx.schedule_lifetime_end(self_scope_id, llval);
bcx
})).val
};
debug!("trans_fn_once_adapter_shim: env={:?}", Value(llenv));
// Adjust llargs such that llargs[self_idx..] has the call arguments.
// For zero-sized closures that means sneaking in a new argument.
if env_arg.is_ignore() {
if self_idx > 0 {
self_idx -= 1;
llargs[self_idx] = llenv;
} else {
llargs.insert(0, llenv);
}
} else {
llargs[self_idx] = llenv;
}
let dest =
fcx.llretslotptr.get().map(
|_| expr::SaveIn(fcx.get_ret_slot(bcx, ret_ty, "ret_slot")));
|_| expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot")));
let callee = Callee {
data: callee::Fn(llreffn),
@ -431,7 +446,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
fcx.pop_and_trans_custom_cleanup_scope(bcx, self_scope);
finish_fn(&fcx, bcx, ret_ty, DebugLoc::None);
fcx.finish(bcx, DebugLoc::None);
lloncefn
}

View File

@ -22,7 +22,7 @@ use middle::def_id::DefId;
use middle::infer;
use middle::lang_items::LangItem;
use middle::subst::Substs;
use trans::abi::Abi;
use trans::abi::{Abi, FnType};
use trans::base;
use trans::build;
use trans::builder::Builder;
@ -35,7 +35,6 @@ use trans::declare;
use trans::machine;
use trans::monomorphize;
use trans::type_::Type;
use trans::type_of;
use trans::value::Value;
use middle::ty::{self, Ty, TyCtxt};
use middle::traits::{self, SelectionContext, ProjectionMode};
@ -107,12 +106,6 @@ pub fn type_is_zero_size<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -
llsize_of_alloc(ccx, llty) == 0
}
/// Identifies types which we declare to be equivalent to `void` in C for the purpose of function
/// return types. These are `()`, bot, uninhabited enums and all other zero-sized types.
pub fn return_type_is_void<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool {
ty.is_nil() || ty.is_empty(ccx.tcx()) || type_is_zero_size(ccx, ty)
}
/// Generates a unique symbol based off the name given. This is used to create
/// unique symbols for things like closures.
pub fn gensym_name(name: &str) -> ast::Name {
@ -291,9 +284,6 @@ pub struct FunctionContext<'a, 'tcx: 'a> {
// always an empty parameter-environment NOTE: @jroesch another use of ParamEnv
pub param_env: ty::ParameterEnvironment<'a, 'tcx>,
// The environment argument in a closure.
pub llenv: Option<ValueRef>,
// A pointer to where to store the return value. If the return type is
// immediate, this points to an alloca in the function. Otherwise, it's a
// pointer to the hidden first parameter of the function. After function
@ -321,11 +311,6 @@ pub struct FunctionContext<'a, 'tcx: 'a> {
// Note that for cleanuppad-based exceptions this is not used.
pub landingpad_alloca: Cell<Option<ValueRef>>,
// True if the caller expects this fn to use the out pointer to
// return. Either way, your code should write into the slot llretslotptr
// points to, but if this value is false, that slot will be a local alloca.
pub caller_expects_out_pointer: bool,
// Maps the DefId's for local variables to the allocas created for
// them in llallocas.
pub lllocals: RefCell<NodeMap<LvalueDatum<'tcx>>>,
@ -337,6 +322,9 @@ pub struct FunctionContext<'a, 'tcx: 'a> {
// paths) for the code being compiled.
pub lldropflag_hints: RefCell<DropFlagHintsMap<'tcx>>,
// Describes the return/argument LLVM types and their ABI handling.
pub fn_ty: FnType,
// The NodeId of the function, or -1 if it doesn't correspond to
// a user-defined function.
pub id: ast::NodeId,
@ -372,18 +360,6 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
self.mir.unwrap()
}
pub fn arg_offset(&self) -> usize {
self.env_arg_pos() + if self.llenv.is_some() { 1 } else { 0 }
}
pub fn env_arg_pos(&self) -> usize {
if self.caller_expects_out_pointer {
1
} else {
0
}
}
pub fn cleanup(&self) {
unsafe {
llvm::LLVMInstructionEraseFromParent(self.alloca_insert_pt
@ -404,14 +380,9 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
self.llreturn.get().unwrap()
}
pub fn get_ret_slot(&self, bcx: Block<'a, 'tcx>,
output: ty::FnOutput<'tcx>,
name: &str) -> ValueRef {
pub fn get_ret_slot(&self, bcx: Block<'a, 'tcx>, name: &str) -> ValueRef {
if self.needs_ret_allocas {
base::alloca(bcx, match output {
ty::FnConverging(output_type) => type_of::type_of(bcx.ccx(), output_type),
ty::FnDiverging => Type::void(bcx.ccx())
}, name)
base::alloca(bcx, self.fn_ty.ret.memory_ty(self.ccx), name)
} else {
self.llretslotptr.get().unwrap()
}

View File

@ -25,7 +25,6 @@ use trans::debuginfo::{DebugLoc, ToDebugLoc};
use trans::expr;
use trans::machine;
use trans;
use middle::ty;
use rustc_front::hir;
use rustc_front::util as ast_util;
@ -363,14 +362,12 @@ pub fn trans_ret<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let fcx = bcx.fcx;
let mut bcx = bcx;
let dest = match (fcx.llretslotptr.get(), retval_expr) {
(Some(_), Some(retval_expr)) => {
let ret_ty = expr_ty_adjusted(bcx, &retval_expr);
expr::SaveIn(fcx.get_ret_slot(bcx, ty::FnConverging(ret_ty), "ret_slot"))
}
_ => expr::Ignore,
};
if let Some(x) = retval_expr {
let dest = if fcx.llretslotptr.get().is_some() {
expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot"))
} else {
expr::Ignore
};
bcx = expr::trans_into(bcx, &x, dest);
match dest {
expr::SaveIn(slot) if fcx.needs_ret_allocas => {

View File

@ -35,7 +35,7 @@ use rustc_front::hir;
use trans::abi::Abi;
use trans::common::{NodeIdAndSpan, CrateContext, FunctionContext, Block};
use trans;
use trans::{monomorphize, type_of};
use trans::monomorphize;
use middle::infer;
use middle::ty::{self, Ty};
use session::config::{self, FullDebugInfo, LimitedDebugInfo, NoDebugInfo};
@ -456,10 +456,10 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
ty::FnDiverging => diverging_type_metadata(cx)
});
let inputs = &if abi == Abi::RustCall {
type_of::untuple_arguments(cx, &sig.inputs)
let inputs = if abi == Abi::RustCall {
&sig.inputs[..sig.inputs.len()-1]
} else {
sig.inputs
&sig.inputs[..]
};
// Arguments types
@ -467,6 +467,14 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
signature.push(type_metadata(cx, argument_type, codemap::DUMMY_SP));
}
if abi == Abi::RustCall && !sig.inputs.is_empty() {
if let ty::TyTuple(ref args) = sig.inputs[sig.inputs.len() - 1].sty {
for &argument_type in args {
signature.push(type_metadata(cx, argument_type, codemap::DUMMY_SP));
}
}
}
return create_DIArray(DIB(cx), &signature[..]);
}

View File

@ -92,24 +92,19 @@ pub fn declare_cfn(ccx: &CrateContext, name: &str, fn_type: Type) -> ValueRef {
pub fn declare_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &str,
fn_type: ty::Ty<'tcx>) -> ValueRef {
debug!("declare_rust_fn(name={:?}, fn_type={:?})", name, fn_type);
let f = match fn_type.sty {
ty::TyFnDef(_, _, f) | ty::TyFnPtr(f) => f,
_ => unreachable!("expected fn type for {:?}, found {:?}", name, fn_type)
};
let sig = ccx.tcx().erase_late_bound_regions(&f.sig);
let abi = fn_type.fn_abi();
let sig = ccx.tcx().erase_late_bound_regions(fn_type.fn_sig());
let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
debug!("declare_rust_fn (after region erasure) sig={:?}", sig);
let fty = FnType::new(ccx, f.abi, &sig, &[]);
let fty = FnType::new(ccx, abi, &sig, &[]);
let llfn = declare_raw_fn(ccx, name, fty.cconv, fty.llvm_type(ccx));
if sig.output == ty::FnDiverging {
llvm::SetFunctionAttribute(llfn, llvm::Attribute::NoReturn);
}
if f.abi != Abi::Rust && f.abi != Abi::RustCall {
if abi != Abi::Rust && abi != Abi::RustCall {
attributes::unwind(llfn, false);
}

View File

@ -1898,8 +1898,8 @@ fn trans_imm_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let t_out = node_id_type(bcx, id);
debug!("trans_cast({:?} as {:?})", t_in, t_out);
let mut ll_t_in = type_of::arg_type_of(ccx, t_in);
let ll_t_out = type_of::arg_type_of(ccx, t_out);
let mut ll_t_in = type_of::immediate_type_of(ccx, t_in);
let ll_t_out = type_of::immediate_type_of(ccx, t_out);
// Convert the value to be cast into a ValueRef, either by-ref or
// by-value as appropriate given its type:
let mut datum = unpack_datum!(bcx, trans(bcx, expr));

View File

@ -9,32 +9,17 @@
// except according to those terms.
use back::link;
use llvm::{ValueRef, get_param};
use llvm::{ValueRef};
use llvm;
use middle::weak_lang_items;
use trans::abi::{Abi, FnType};
use trans::attributes;
use trans::base::{llvm_linkage_by_name, push_ctxt};
use trans::base;
use trans::build::*;
use trans::base::{llvm_linkage_by_name};
use trans::common::*;
use trans::debuginfo::DebugLoc;
use trans::declare;
use trans::machine;
use trans::monomorphize;
use trans::type_::Type;
use trans::type_of;
use trans::value::Value;
use middle::infer;
use middle::ty::{self, Ty, TyCtxt};
use middle::subst::Substs;
use middle::ty;
use std::cmp;
use std::iter::once;
use libc::c_uint;
use syntax::attr;
use syntax::parse::token::{InternedString, special_idents};
use syntax::parse::token::{InternedString};
use syntax::ast;
use syntax::attr::AttrMetaMethods;
@ -95,369 +80,6 @@ pub fn register_static(ccx: &CrateContext,
}
None => // Generate an external declaration.
declare::declare_global(ccx, &ident[..], llty),
};
// Handle thread-local external statics.
for attr in foreign_item.attrs.iter() {
if attr.check_name("thread_local") {
llvm::set_thread_local(c, true);
}
}
return c;
}
///////////////////////////////////////////////////////////////////////////
// Rust functions with foreign ABIs
//
// These are normal Rust functions defined with foreign ABIs. For
// now, and perhaps forever, we translate these using a "layer of
// indirection". That is, given a Rust declaration like:
//
// extern "C" fn foo(i: u32) -> u32 { ... }
//
// we will generate a function like:
//
// S foo(T i) {
// S r;
// foo0(&r, NULL, i);
// return r;
// }
//
// #[inline_always]
// void foo0(uint32_t *r, void *env, uint32_t i) { ... }
//
// Here the (internal) `foo0` function follows the Rust ABI as normal,
// where the `foo` function follows the C ABI. We rely on LLVM to
// inline the one into the other. Of course we could just generate the
// correct code in the first place, but this is much simpler.
pub fn trans_rust_fn_with_foreign_abi<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
decl: &hir::FnDecl,
body: &hir::Block,
attrs: &[ast::Attribute],
llwrapfn: ValueRef,
param_substs: &'tcx Substs<'tcx>,
id: ast::NodeId,
hash: Option<&str>) {
let _icx = push_ctxt("foreign::build_foreign_fn");
let fnty = ccx.tcx().node_id_to_type(id);
let mty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, &fnty);
let f = match mty.sty {
ty::TyFnDef(_, _, f) => f,
_ => ccx.sess().bug("trans_rust_fn_with_foreign_abi called on non-function type")
};
assert!(f.abi != Abi::Rust);
assert!(f.abi != Abi::RustIntrinsic);
assert!(f.abi != Abi::PlatformIntrinsic);
let fn_sig = ccx.tcx().erase_late_bound_regions(&f.sig);
let fn_sig = infer::normalize_associated_type(ccx.tcx(), &fn_sig);
let rust_fn_ty = ccx.tcx().mk_fn_ptr(ty::BareFnTy {
unsafety: f.unsafety,
abi: Abi::Rust,
sig: ty::Binder(fn_sig.clone())
});
let fty = FnType::new(ccx, f.abi, &fn_sig, &[]);
let rust_fty = FnType::new(ccx, Abi::Rust, &fn_sig, &[]);
unsafe { // unsafe because we call LLVM operations
// Build up the Rust function (`foo0` above).
let llrustfn = build_rust_fn(ccx, decl, body, param_substs,
attrs, id, rust_fn_ty, hash);
// Build up the foreign wrapper (`foo` above).
return build_wrap_fn(ccx, llrustfn, llwrapfn, &fn_sig, &fty, &rust_fty);
}
fn build_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
decl: &hir::FnDecl,
body: &hir::Block,
param_substs: &'tcx Substs<'tcx>,
attrs: &[ast::Attribute],
id: ast::NodeId,
rust_fn_ty: Ty<'tcx>,
hash: Option<&str>)
-> ValueRef
{
let _icx = push_ctxt("foreign::foreign::build_rust_fn");
let tcx = ccx.tcx();
let path =
tcx.map.def_path(tcx.map.local_def_id(id))
.into_iter()
.map(|e| e.data.as_interned_str())
.chain(once(special_idents::clownshoe_abi.name.as_str()));
let ps = link::mangle(path, hash);
debug!("build_rust_fn: path={} id={} ty={:?}",
ccx.tcx().map.path_to_string(id),
id, rust_fn_ty);
let llfn = declare::define_internal_fn(ccx, &ps, rust_fn_ty);
attributes::from_fn_attrs(ccx, attrs, llfn);
base::trans_fn(ccx, decl, body, llfn, param_substs, id, attrs);
llfn
}
unsafe fn build_wrap_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
llrustfn: ValueRef,
llwrapfn: ValueRef,
fn_sig: &ty::FnSig<'tcx>,
fn_ty: &FnType,
rust_fty: &FnType) {
let _icx = push_ctxt(
"foreign::trans_rust_fn_with_foreign_abi::build_wrap_fn");
debug!("build_wrap_fn(llrustfn={:?}, llwrapfn={:?})",
Value(llrustfn),
Value(llwrapfn));
// Avoid all the Rust generation stuff and just generate raw
// LLVM here.
//
// We want to generate code like this:
//
// S foo(T i) {
// S r;
// foo0(&r, NULL, i);
// return r;
// }
if llvm::LLVMCountBasicBlocks(llwrapfn) != 0 {
ccx.sess().bug("wrapping a function inside non-empty wrapper, most likely cause is \
multiple functions being wrapped");
}
let ptr = "the block\0".as_ptr();
let the_block = llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llwrapfn,
ptr as *const _);
let builder = ccx.builder();
builder.position_at_end(the_block);
// Array for the arguments we will pass to the rust function.
let mut llrust_args = Vec::new();
let mut next_foreign_arg_counter: c_uint = 0;
let mut next_foreign_arg = |pad: bool| -> c_uint {
next_foreign_arg_counter += if pad {
2
} else {
1
};
next_foreign_arg_counter - 1
};
// If there is an out pointer on the foreign function
let foreign_outptr = {
if fn_ty.ret.is_indirect() {
Some(get_param(llwrapfn, next_foreign_arg(false)))
} else {
None
}
};
let rustfn_ty = Type::from_ref(llvm::LLVMTypeOf(llrustfn)).element_type();
let mut rust_param_tys = rustfn_ty.func_params().into_iter();
// Push Rust return pointer, using null if it will be unused.
let rust_uses_outptr = match fn_sig.output {
ty::FnConverging(ret_ty) => type_of::return_uses_outptr(ccx, ret_ty),
ty::FnDiverging => false
};
let return_alloca: Option<ValueRef>;
let llrust_ret_ty = if rust_uses_outptr {
rust_param_tys.next().expect("Missing return type!").element_type()
} else {
rustfn_ty.return_type()
};
if rust_uses_outptr {
// Rust expects to use an outpointer. If the foreign fn
// also uses an outpointer, we can reuse it, but the types
// may vary, so cast first to the Rust type. If the
// foreign fn does NOT use an outpointer, we will have to
// alloca some scratch space on the stack.
match foreign_outptr {
Some(llforeign_outptr) => {
debug!("out pointer, foreign={:?}",
Value(llforeign_outptr));
let llrust_retptr =
builder.bitcast(llforeign_outptr, llrust_ret_ty.ptr_to());
debug!("out pointer, foreign={:?} (casted)",
Value(llrust_retptr));
llrust_args.push(llrust_retptr);
return_alloca = None;
}
None => {
let slot = builder.alloca(llrust_ret_ty, "return_alloca");
debug!("out pointer, \
allocad={:?}, \
llrust_ret_ty={:?}, \
return_ty={:?}",
Value(slot),
llrust_ret_ty,
fn_sig.output);
llrust_args.push(slot);
return_alloca = Some(slot);
}
}
} else {
// Rust does not expect an outpointer. If the foreign fn
// does use an outpointer, then we will do a store of the
// value that the Rust fn returns.
return_alloca = None;
};
// Build up the arguments to the call to the rust function.
// Careful to adapt for cases where the native convention uses
// a pointer and Rust does not or vice versa.
let mut tys = fn_ty.args.iter().zip(rust_param_tys);
for i in 0..fn_sig.inputs.len() {
let rust_ty = fn_sig.inputs[i];
let rust_indirect = type_of::arg_is_indirect(ccx, rust_ty);
let (llforeign_arg_ty, llty) = tys.next().expect("Not enough parameter types!");
let llrust_ty = if rust_indirect {
llty.element_type()
} else {
llty
};
let foreign_indirect = llforeign_arg_ty.is_indirect();
if llforeign_arg_ty.is_ignore() {
debug!("skipping ignored arg #{}", i);
llrust_args.push(C_undef(llrust_ty));
continue;
}
// skip padding
let foreign_index = next_foreign_arg(llforeign_arg_ty.pad.is_some());
let mut llforeign_arg = get_param(llwrapfn, foreign_index);
if type_is_fat_ptr(ccx.tcx(), rust_ty) {
// Fat pointers are one pointer and one integer or pointer.
let a = llforeign_arg_ty;
let (b, _) = tys.next().expect("Not enough parameter types!");
assert_eq!((a.cast, b.cast), (None, None));
assert!(!a.is_indirect() && !b.is_indirect());
llrust_args.push(llforeign_arg);
let foreign_index = next_foreign_arg(llforeign_arg_ty.pad.is_some());
llrust_args.push(get_param(llwrapfn, foreign_index));
continue;
}
debug!("llforeign_arg {}{}: {:?}", "#",
i, Value(llforeign_arg));
debug!("rust_indirect = {}, foreign_indirect = {}",
rust_indirect, foreign_indirect);
// Ensure that the foreign argument is indirect (by
// pointer). It makes adapting types easier, since we can
// always just bitcast pointers.
if !foreign_indirect {
llforeign_arg = if rust_ty.is_bool() {
let lltemp = builder.alloca(Type::bool(ccx), "");
builder.store(builder.zext(llforeign_arg, Type::bool(ccx)), lltemp);
lltemp
} else {
let lltemp = builder.alloca(val_ty(llforeign_arg), "");
builder.store(llforeign_arg, lltemp);
lltemp
}
}
// If the types in the ABI and the Rust types don't match,
// bitcast the llforeign_arg pointer so it matches the types
// Rust expects.
if llforeign_arg_ty.cast.is_some() && !type_is_fat_ptr(ccx.tcx(), rust_ty){
assert!(!foreign_indirect);
llforeign_arg = builder.bitcast(llforeign_arg, llrust_ty.ptr_to());
}
let llrust_arg = if rust_indirect || type_is_fat_ptr(ccx.tcx(), rust_ty) {
llforeign_arg
} else {
if rust_ty.is_bool() {
let tmp = builder.load_range_assert(llforeign_arg, 0, 2, llvm::False);
builder.trunc(tmp, Type::i1(ccx))
} else if type_of::type_of(ccx, rust_ty).is_aggregate() {
// We want to pass small aggregates as immediate values, but using an aggregate
// LLVM type for this leads to bad optimizations, so its arg type is an
// appropriately sized integer and we have to convert it
let tmp = builder.bitcast(llforeign_arg,
type_of::arg_type_of(ccx, rust_ty).ptr_to());
let load = builder.load(tmp);
llvm::LLVMSetAlignment(load, type_of::align_of(ccx, rust_ty));
load
} else {
builder.load(llforeign_arg)
}
};
debug!("llrust_arg {}{}: {:?}", "#",
i, Value(llrust_arg));
llrust_args.push(llrust_arg);
}
// Perform the call itself
debug!("calling llrustfn = {:?}", Value(llrustfn));
let llrust_ret_val = builder.call(llrustfn, &llrust_args, None);
rust_fty.apply_attrs_callsite(llrust_ret_val);
// Get the return value where the foreign fn expects it.
let llforeign_ret_ty = fn_ty.ret.cast.unwrap_or(fn_ty.ret.original_ty);
match foreign_outptr {
None if llforeign_ret_ty == Type::void(ccx) => {
// Function returns `()` or `bot`, which in Rust is the LLVM
// type "{}" but in foreign ABIs is "Void".
builder.ret_void();
}
None if rust_uses_outptr => {
// Rust uses an outpointer, but the foreign ABI does not. Load.
let llrust_outptr = return_alloca.unwrap();
let llforeign_outptr_casted =
builder.bitcast(llrust_outptr, llforeign_ret_ty.ptr_to());
let llforeign_retval = builder.load(llforeign_outptr_casted);
builder.ret(llforeign_retval);
}
None if llforeign_ret_ty != llrust_ret_ty => {
// Neither ABI uses an outpointer, but the types don't
// quite match. Must cast. Probably we should try and
// examine the types and use a concrete llvm cast, but
// right now we just use a temp memory location and
// bitcast the pointer, which is the same thing the
// old wrappers used to do.
let lltemp = builder.alloca(llforeign_ret_ty, "");
let lltemp_casted = builder.bitcast(lltemp, llrust_ret_ty.ptr_to());
builder.store(llrust_ret_val, lltemp_casted);
let llforeign_retval = builder.load(lltemp);
builder.ret(llforeign_retval);
}
None => {
// Neither ABI uses an outpointer, and the types
// match. Easy peasy.
builder.ret(llrust_ret_val);
}
Some(llforeign_outptr) if !rust_uses_outptr => {
// Foreign ABI requires an out pointer, but Rust doesn't.
// Store Rust return value.
let llforeign_outptr_casted =
builder.bitcast(llforeign_outptr, llrust_ret_ty.ptr_to());
builder.store(llrust_ret_val, llforeign_outptr_casted);
builder.ret_void();
}
Some(_) => {
// Both ABIs use outpointers. Easy peasy.
builder.ret_void();
}
}
}
}

View File

@ -21,6 +21,7 @@ use middle::lang_items::ExchangeFreeFnLangItem;
use middle::subst::{Substs};
use middle::traits;
use middle::ty::{self, Ty, TyCtxt};
use trans::abi::{Abi, FnType};
use trans::adt;
use trans::adt::GetDtorType; // for tcx.dtor_type()
use trans::base::*;
@ -40,7 +41,6 @@ use trans::type_::Type;
use trans::value::Value;
use arena::TypedArena;
use libc::c_uint;
use syntax::ast;
use syntax::codemap::DUMMY_SP;
@ -240,13 +240,17 @@ fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
}
let t = g.ty();
let llty = if type_is_sized(ccx.tcx(), t) {
type_of(ccx, t).ptr_to()
} else {
type_of(ccx, ccx.tcx().mk_box(t)).ptr_to()
let tcx = ccx.tcx();
let sig = ty::FnSig {
inputs: vec![tcx.mk_mut_ptr(tcx.types.i8)],
output: ty::FnOutput::FnConverging(tcx.mk_nil()),
variadic: false,
};
let llfnty = Type::glue_fn(ccx, llty);
// Create a FnType for fn(*mut i8) and substitute the real type in
// later - that prevents FnType from splitting fat pointers up.
let mut fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]);
fn_ty.args[0].original_ty = type_of(ccx, t).ptr_to();
let llfnty = fn_ty.llvm_type(ccx);
// To avoid infinite recursion, don't `make_drop_glue` until after we've
// added the entry to the `drop_glues` cache.
@ -260,17 +264,17 @@ fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
assert!(declare::get_defined_value(ccx, &fn_nm).is_none());
let llfn = declare::declare_cfn(ccx, &fn_nm, llfnty);
ccx.available_drop_glues().borrow_mut().insert(g, fn_nm);
ccx.drop_glues().borrow_mut().insert(g, llfn);
let _s = StatRecorder::new(ccx, format!("drop {:?}", t));
let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
let empty_substs = tcx.mk_substs(Substs::trans_empty());
let (arena, fcx): (TypedArena<_>, FunctionContext);
arena = TypedArena::new();
fcx = new_fn_ctxt(ccx, llfn, ast::DUMMY_NODE_ID, false,
ty::FnConverging(ccx.tcx().mk_nil()),
empty_substs, None, &arena);
fcx = FunctionContext::new(ccx, llfn, fn_ty, ast::DUMMY_NODE_ID,
empty_substs, None, &arena);
let bcx = init_function(&fcx, false, ty::FnConverging(ccx.tcx().mk_nil()));
let bcx = fcx.init(false);
update_linkage(ccx, llfn, None, OriginalTranslation);
@ -283,9 +287,8 @@ fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
// llfn is expected be declared to take a parameter of the appropriate
// type, so we don't need to explicitly cast the function parameter.
let llrawptr0 = get_param(llfn, fcx.arg_offset() as c_uint);
let bcx = make_drop_glue(bcx, llrawptr0, g);
finish_fn(&fcx, bcx, ty::FnConverging(ccx.tcx().mk_nil()), DebugLoc::None);
let bcx = make_drop_glue(bcx, get_param(llfn, 0), g);
fcx.finish(bcx, DebugLoc::None);
llfn
}

View File

@ -270,7 +270,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
let val = if datum.kind.is_by_ref() {
load_ty(bcx, datum.val, datum.ty)
} else {
from_arg_ty(bcx, datum.val, datum.ty)
from_immediate(bcx, datum.val)
};
let cast_val = BitCast(bcx, val, llret_ty);
@ -509,14 +509,14 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
}
(_, "init_dropped") => {
let tp_ty = *substs.types.get(FnSpace, 0);
if !return_type_is_void(ccx, tp_ty) {
if !type_is_zero_size(ccx, tp_ty) {
drop_done_fill_mem(bcx, llresult, tp_ty);
}
C_nil(ccx)
}
(_, "init") => {
let tp_ty = *substs.types.get(FnSpace, 0);
if !return_type_is_void(ccx, tp_ty) {
if !type_is_zero_size(ccx, tp_ty) {
// Just zero out the stack slot. (See comment on base::memzero for explanation)
init_zero_mem(bcx, llresult, tp_ty);
}
@ -603,21 +603,24 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
}
(_, "volatile_load") => {
let tp_ty = *substs.types.get(FnSpace, 0);
let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
let mut ptr = llargs[0];
if let Some(ty) = fn_ty.ret.cast {
ptr = PointerCast(bcx, ptr, ty.ptr_to());
}
let load = VolatileLoad(bcx, ptr);
unsafe {
llvm::LLVMSetAlignment(load, type_of::align_of(ccx, tp_ty));
}
to_arg_ty(bcx, load, tp_ty)
to_immediate(bcx, load, tp_ty)
},
(_, "volatile_store") => {
let tp_ty = *substs.types.get(FnSpace, 0);
let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
let val = if type_is_immediate(bcx.ccx(), tp_ty) {
from_arg_ty(bcx, llargs[1], tp_ty)
from_immediate(bcx, llargs[1])
} else {
Load(bcx, llargs[1])
};
let ptr = PointerCast(bcx, llargs[0], val_ty(val).ptr_to());
let store = VolatileStore(bcx, val, ptr);
unsafe {
llvm::LLVMSetAlignment(store, type_of::align_of(ccx, tp_ty));
@ -684,7 +687,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
(_, "return_address") => {
if !fcx.caller_expects_out_pointer {
if !fcx.fn_ty.ret.is_indirect() {
span_err!(tcx.sess, call_info.span, E0510,
"invalid use of `return_address` intrinsic: function \
does not use out pointer");
@ -746,19 +749,17 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
match split[1] {
"cxchg" => {
let tp_ty = *substs.types.get(FnSpace, 0);
let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
let cmp = from_arg_ty(bcx, llargs[1], tp_ty);
let src = from_arg_ty(bcx, llargs[2], tp_ty);
let cmp = from_immediate(bcx, llargs[1]);
let src = from_immediate(bcx, llargs[2]);
let ptr = PointerCast(bcx, llargs[0], val_ty(src).ptr_to());
let res = AtomicCmpXchg(bcx, ptr, cmp, src, order, failorder, llvm::False);
ExtractValue(bcx, res, 0)
}
"cxchgweak" => {
let tp_ty = *substs.types.get(FnSpace, 0);
let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
let cmp = from_arg_ty(bcx, llargs[1], tp_ty);
let src = from_arg_ty(bcx, llargs[2], tp_ty);
let cmp = from_immediate(bcx, llargs[1]);
let src = from_immediate(bcx, llargs[2]);
let ptr = PointerCast(bcx, llargs[0], val_ty(src).ptr_to());
let val = AtomicCmpXchg(bcx, ptr, cmp, src, order, failorder, llvm::True);
let result = ExtractValue(bcx, val, 0);
let success = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
@ -769,13 +770,15 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
"load" => {
let tp_ty = *substs.types.get(FnSpace, 0);
let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
to_arg_ty(bcx, AtomicLoad(bcx, ptr, order), tp_ty)
let mut ptr = llargs[0];
if let Some(ty) = fn_ty.ret.cast {
ptr = PointerCast(bcx, ptr, ty.ptr_to());
}
to_immediate(bcx, AtomicLoad(bcx, ptr, order), tp_ty)
}
"store" => {
let tp_ty = *substs.types.get(FnSpace, 0);
let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
let val = from_arg_ty(bcx, llargs[1], tp_ty);
let val = from_immediate(bcx, llargs[1]);
let ptr = PointerCast(bcx, llargs[0], val_ty(val).ptr_to());
AtomicStore(bcx, val, ptr, order);
C_nil(ccx)
}
@ -807,9 +810,8 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
_ => ccx.sess().fatal("unknown atomic operation")
};
let tp_ty = *substs.types.get(FnSpace, 0);
let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
let val = from_arg_ty(bcx, llargs[1], tp_ty);
let val = from_immediate(bcx, llargs[1]);
let ptr = PointerCast(bcx, llargs[0], val_ty(val).ptr_to());
AtomicRMW(bcx, atom_op, ptr, val, order)
}
}
@ -1279,21 +1281,33 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// This is currently primarily used for the `try` intrinsic functions above.
fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
name: &str,
ty: Ty<'tcx>,
inputs: Vec<Ty<'tcx>>,
output: ty::FnOutput<'tcx>,
trans: &mut for<'b> FnMut(Block<'b, 'tcx>))
-> ValueRef {
let ccx = fcx.ccx;
let llfn = declare::define_internal_fn(ccx, name, ty);
let sig = ty::FnSig {
inputs: inputs,
output: output,
variadic: false,
};
let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]);
let rust_fn_ty = ccx.tcx().mk_fn_ptr(ty::BareFnTy {
unsafety: hir::Unsafety::Unsafe,
abi: Abi::Rust,
sig: ty::Binder(sig)
});
let llfn = declare::define_internal_fn(ccx, name, rust_fn_ty);
let (fcx, block_arena);
block_arena = TypedArena::new();
fcx = new_fn_ctxt(ccx, llfn, ast::DUMMY_NODE_ID, false,
output, ccx.tcx().mk_substs(Substs::trans_empty()),
None, &block_arena);
let bcx = init_function(&fcx, true, output);
fcx = FunctionContext::new(ccx, llfn, fn_ty, ast::DUMMY_NODE_ID,
ccx.tcx().mk_substs(Substs::trans_empty()),
None, &block_arena);
let bcx = fcx.init(true);
trans(bcx);
fcx.cleanup();
return llfn
llfn
}
// Helper function used to get a handle to the `__rust_try` function used to
@ -1321,17 +1335,7 @@ fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
}),
});
let output = ty::FnOutput::FnConverging(tcx.types.i32);
let try_fn_ty = ty::BareFnTy {
unsafety: hir::Unsafety::Unsafe,
abi: Abi::Rust,
sig: ty::Binder(ty::FnSig {
inputs: vec![fn_ty, i8p, i8p],
output: output,
variadic: false,
}),
};
let rust_try = gen_fn(fcx, "__rust_try", tcx.mk_fn_ptr(try_fn_ty), output,
trans);
let rust_try = gen_fn(fcx, "__rust_try", vec![fn_ty, i8p, i8p], output, trans);
ccx.rust_try_fn().set(Some(rust_try));
return rust_try
}
@ -1399,16 +1403,7 @@ fn generate_filter_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
// going on here, all I can say is that there's a few tests cases in
// LLVM's test suite which follow this pattern of instructions, so we
// just do the same.
let filter_fn_ty = tcx.mk_fn_ptr(ty::BareFnTy {
unsafety: hir::Unsafety::Unsafe,
abi: Abi::Rust,
sig: ty::Binder(ty::FnSig {
inputs: vec![],
output: output,
variadic: false,
}),
});
gen_fn(fcx, "__rustc_try_filter", filter_fn_ty, output, &mut |bcx| {
gen_fn(fcx, "__rustc_try_filter", vec![], output, &mut |bcx| {
let ebp = Call(bcx, frameaddress, &[C_i32(ccx, 1)], dloc);
let exn = InBoundsGEP(bcx, ebp, &[C_i32(ccx, -20)]);
let exn = Load(bcx, BitCast(bcx, exn, Type::i8p(ccx).ptr_to()));
@ -1418,16 +1413,7 @@ fn generate_filter_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
// Conveniently on x86_64 the EXCEPTION_POINTERS handle and base pointer
// are passed in as arguments to the filter function, so we just pass
// those along.
let filter_fn_ty = tcx.mk_fn_ptr(ty::BareFnTy {
unsafety: hir::Unsafety::Unsafe,
abi: Abi::Rust,
sig: ty::Binder(ty::FnSig {
inputs: vec![i8p, i8p],
output: output,
variadic: false,
}),
});
gen_fn(fcx, "__rustc_try_filter", filter_fn_ty, output, &mut |bcx| {
gen_fn(fcx, "__rustc_try_filter", vec![i8p, i8p], output, &mut |bcx| {
let exn = llvm::get_param(bcx.fcx.llfn, 0);
let rbp = llvm::get_param(bcx.fcx.llfn, 1);
do_trans(bcx, exn, rbp);

View File

@ -18,13 +18,13 @@ use middle::infer;
use middle::subst::{Subst, Substs};
use middle::subst;
use middle::traits::{self, ProjectionMode};
use trans::abi::FnType;
use trans::base::*;
use trans::build::*;
use trans::callee::{Callee, Virtual, ArgVals, trans_fn_pointer_shim};
use trans::closure;
use trans::common::*;
use trans::consts;
use trans::datum::*;
use trans::debuginfo::DebugLoc;
use trans::declare;
use trans::expr;
@ -77,7 +77,7 @@ pub fn get_virtual_method<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
method_ty: Ty<'tcx>,
vtable_index: usize)
-> Datum<'tcx, Rvalue> {
-> ValueRef {
let _icx = push_ctxt("trans_object_shim");
let tcx = ccx.tcx();
@ -85,58 +85,41 @@ pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
vtable_index,
method_ty);
let ret_ty = tcx.erase_late_bound_regions(&method_ty.fn_ret());
let ret_ty = infer::normalize_associated_type(tcx, &ret_ty);
let sig = tcx.erase_late_bound_regions(&method_ty.fn_sig());
let sig = infer::normalize_associated_type(tcx, &sig);
let fn_ty = FnType::new(ccx, method_ty.fn_abi(), &sig, &[]);
let shim_fn_ty = match method_ty.sty {
ty::TyFnDef(_, _, fty) => tcx.mk_ty(ty::TyFnPtr(fty)),
_ => unreachable!("expected fn item type, found {}", method_ty)
};
//
let function_name = link::mangle_internal_name_by_type_and_seq(ccx, shim_fn_ty, "object_shim");
let llfn = declare::define_internal_fn(ccx, &function_name, shim_fn_ty);
let function_name = link::mangle_internal_name_by_type_and_seq(ccx, method_ty, "object_shim");
let llfn = declare::define_internal_fn(ccx, &function_name, method_ty);
let empty_substs = tcx.mk_substs(Substs::trans_empty());
let (block_arena, fcx): (TypedArena<_>, FunctionContext);
block_arena = TypedArena::new();
fcx = new_fn_ctxt(ccx,
llfn,
ast::DUMMY_NODE_ID,
false,
ret_ty,
empty_substs,
None,
&block_arena);
let mut bcx = init_function(&fcx, false, ret_ty);
let llargs = get_params(fcx.llfn);
let self_idx = fcx.arg_offset();
let llself = llargs[self_idx];
let llvtable = llargs[self_idx + 1];
debug!("trans_object_shim: llself={:?}, llvtable={:?}",
Value(llself), Value(llvtable));
fcx = FunctionContext::new(ccx, llfn, fn_ty, ast::DUMMY_NODE_ID,
empty_substs, None, &block_arena);
let mut bcx = fcx.init(false);
assert!(!fcx.needs_ret_allocas);
let dest =
fcx.llretslotptr.get().map(
|_| expr::SaveIn(fcx.get_ret_slot(bcx, ret_ty, "ret_slot")));
|_| expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot")));
debug!("trans_object_shim: method_offset_in_vtable={}",
vtable_index);
let llargs = get_params(fcx.llfn);
let args = ArgVals(&llargs[fcx.fn_ty.ret.is_indirect() as usize..]);
let callee = Callee {
data: Virtual(vtable_index),
ty: method_ty
};
bcx = callee.call(bcx, DebugLoc::None, ArgVals(&llargs[self_idx..]), dest).bcx;
bcx = callee.call(bcx, DebugLoc::None, args, dest).bcx;
finish_fn(&fcx, bcx, ret_ty, DebugLoc::None);
fcx.finish(bcx, DebugLoc::None);
immediate_rvalue(llfn, shim_fn_ty)
llfn
}
/// Creates a returns a dynamic vtable for the given type and vtable origin.

View File

@ -113,9 +113,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
}
mir::Terminator::Return => {
let return_ty = bcx.monomorphize(&self.mir.return_ty);
bcx.with_block(|bcx| {
base::build_return_block(self.fcx, bcx, return_ty, DebugLoc::None);
self.fcx.build_return_block(bcx, DebugLoc::None);
})
}

View File

@ -17,7 +17,6 @@ use trans::base;
use trans::common::{self, BlockAndBuilder};
use trans::consts;
use trans::machine;
use trans::type_of;
use trans::mir::drop;
use llvm;
use trans::Disr;
@ -93,11 +92,9 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
LvalueRef::new_sized(consts::get_static(ccx, def_id).val, const_ty)
},
mir::Lvalue::ReturnPointer => {
let fn_return_ty = bcx.monomorphize(&self.mir.return_ty);
let return_ty = fn_return_ty.unwrap();
let llval = if !common::return_type_is_void(bcx.ccx(), return_ty) {
let llval = if !fcx.fn_ty.ret.is_ignore() {
bcx.with_block(|bcx| {
fcx.get_ret_slot(bcx, fn_return_ty, "")
fcx.get_ret_slot(bcx, "")
})
} else {
// This is a void return; that is, theres no place to store the value and
@ -105,11 +102,13 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
// Ergo, we return an undef ValueRef, so we do not have to special-case every
// place using lvalues, and could use it the same way you use a regular
// ReturnPointer LValue (i.e. store into it, load from it etc).
let llty = type_of::type_of(bcx.ccx(), return_ty).ptr_to();
let llty = fcx.fn_ty.ret.original_ty.ptr_to();
unsafe {
llvm::LLVMGetUndef(llty.to_ref())
}
};
let fn_return_ty = bcx.monomorphize(&self.mir.return_ty);
let return_ty = fn_return_ty.unwrap();
LvalueRef::new_sized(llval, LvalueTy::from_ty(return_ty))
},
mir::Lvalue::Projection(ref projection) => {

View File

@ -13,9 +13,8 @@ use llvm::{self, ValueRef};
use rustc::mir::repr as mir;
use rustc::mir::tcx::LvalueTy;
use trans::base;
use trans::common::{self, Block, BlockAndBuilder};
use trans::common::{self, Block, BlockAndBuilder, FunctionContext};
use trans::expr;
use trans::type_of;
use self::lvalue::LvalueRef;
use self::operand::OperandRef;
@ -77,11 +76,11 @@ enum TempRef<'tcx> {
///////////////////////////////////////////////////////////////////////////
pub fn trans_mir<'bcx, 'tcx>(bcx: BlockAndBuilder<'bcx, 'tcx>) {
let fcx = bcx.fcx();
pub fn trans_mir<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
let bcx = fcx.init(false).build();
let mir = bcx.mir();
let mir_blocks = bcx.mir().all_basic_blocks();
let mir_blocks = mir.all_basic_blocks();
// Analyze the temps to determine which must be lvalues
// FIXME
@ -111,7 +110,7 @@ pub fn trans_mir<'bcx, 'tcx>(bcx: BlockAndBuilder<'bcx, 'tcx>) {
let args = arg_value_refs(&bcx, mir);
// Allocate a `Block` for every basic block
let block_bcxs: Vec<Block<'bcx,'tcx>> =
let block_bcxs: Vec<Block<'blk,'tcx>> =
mir_blocks.iter()
.map(|&bb|{
// FIXME(#30941) this doesn't handle msvc-style exceptions
@ -138,6 +137,8 @@ pub fn trans_mir<'bcx, 'tcx>(bcx: BlockAndBuilder<'bcx, 'tcx>) {
for &bb in &mir_blocks {
mircx.trans_block(bb);
}
fcx.cleanup();
}
/// Produce, for each argument, a `ValueRef` pointing at the
@ -149,48 +150,41 @@ fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
// FIXME tupled_args? I think I'd rather that mapping is done in MIR land though
let fcx = bcx.fcx();
let tcx = bcx.tcx();
let mut idx = fcx.arg_offset() as c_uint;
mir.arg_decls
.iter()
.enumerate()
.map(|(arg_index, arg_decl)| {
let arg_ty = bcx.monomorphize(&arg_decl.ty);
let llval = if type_of::arg_is_indirect(bcx.ccx(), arg_ty) {
// Don't copy an indirect argument to an alloca, the caller
// already put it in a temporary alloca and gave it up, unless
// we emit extra-debug-info, which requires local allocas :(.
// FIXME: lifetimes, debug info
let llarg = llvm::get_param(fcx.llfn, idx);
idx += 1;
llarg
} else if common::type_is_fat_ptr(tcx, arg_ty) {
// we pass fat pointers as two words, but we want to
// represent them internally as a pointer to two words,
// so make an alloca to store them in.
let lldata = llvm::get_param(fcx.llfn, idx);
let llextra = llvm::get_param(fcx.llfn, idx + 1);
idx += 2;
let (lltemp, dataptr, meta) = bcx.with_block(|bcx| {
let lltemp = base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index));
(lltemp, expr::get_dataptr(bcx, lltemp), expr::get_meta(bcx, lltemp))
});
bcx.store(lldata, dataptr);
bcx.store(llextra, meta);
lltemp
} else {
// otherwise, arg is passed by value, so make a
// temporary and store it there
let llarg = llvm::get_param(fcx.llfn, idx);
idx += 1;
bcx.with_block(|bcx| {
let lltemp = base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index));
base::store_ty(bcx, llarg, lltemp, arg_ty);
lltemp
})
};
LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty))
})
.collect()
let mut idx = 0;
let mut llarg_idx = fcx.fn_ty.ret.is_indirect() as usize;
mir.arg_decls.iter().enumerate().map(|(arg_index, arg_decl)| {
let arg = &fcx.fn_ty.args[idx];
idx += 1;
let arg_ty = bcx.monomorphize(&arg_decl.ty);
let llval = if arg.is_indirect() {
// Don't copy an indirect argument to an alloca, the caller
// already put it in a temporary alloca and gave it up, unless
// we emit extra-debug-info, which requires local allocas :(.
// FIXME: lifetimes, debug info
let llarg = llvm::get_param(fcx.llfn, llarg_idx as c_uint);
llarg_idx += 1;
llarg
} else {
bcx.with_block(|bcx| {
let lltemp = base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index));
if common::type_is_fat_ptr(tcx, arg_ty) {
// we pass fat pointers as two words, but we want to
// represent them internally as a pointer to two words,
// so make an alloca to store them in.
let meta = &fcx.fn_ty.args[idx];
idx += 1;
arg.store_fn_arg(bcx, &mut llarg_idx, expr::get_dataptr(bcx, lltemp));
meta.store_fn_arg(bcx, &mut llarg_idx, expr::get_meta(bcx, lltemp));
} else {
// otherwise, arg is passed by value, so make a
// temporary and store it there
arg.store_fn_arg(bcx, &mut llarg_idx, lltemp);
}
lltemp
})
};
LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty))
}).collect()
}
mod analyze;

View File

@ -246,8 +246,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
debug_assert!(common::type_is_immediate(bcx.ccx(), cast_ty));
let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
let ll_t_in = type_of::arg_type_of(bcx.ccx(), operand.ty);
let ll_t_out = type_of::arg_type_of(bcx.ccx(), cast_ty);
let ll_t_in = type_of::immediate_type_of(bcx.ccx(), operand.ty);
let ll_t_out = type_of::immediate_type_of(bcx.ccx(), cast_ty);
let (llval, ll_t_in, signed) = if let CastTy::Int(IntTy::CEnum) = r_t_in {
let repr = adt::represent_type(bcx.ccx(), operand.ty);
let llval = operand.immediate();
@ -308,8 +308,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
OperandValue::Immediate(newval)
}
mir::CastKind::Misc => { // Casts from a fat-ptr.
let ll_cast_ty = type_of::arg_type_of(bcx.ccx(), cast_ty);
let ll_from_ty = type_of::arg_type_of(bcx.ccx(), operand.ty);
let ll_cast_ty = type_of::immediate_type_of(bcx.ccx(), cast_ty);
let ll_from_ty = type_of::immediate_type_of(bcx.ccx(), operand.ty);
if let OperandValue::FatPtr(data_ptr, meta_ptr) = operand.val {
if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
let ll_cft = ll_cast_ty.field_types();

View File

@ -16,14 +16,12 @@ use middle::infer::normalize_associated_type;
use middle::subst;
use middle::subst::{Subst, Substs};
use middle::ty::fold::{TypeFolder, TypeFoldable};
use trans::abi::Abi;
use trans::attributes;
use trans::base::{push_ctxt};
use trans::base::trans_fn;
use trans::base;
use trans::common::*;
use trans::declare;
use trans::foreign;
use middle::ty::{self, Ty, TyCtxt};
use trans::Disr;
use rustc::front::map as hir_map;
@ -129,15 +127,15 @@ pub fn monomorphic_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
});
match map_node {
hir_map::NodeItem(&hir::Item {
ref attrs, node: hir::ItemFn(ref decl, _, _, abi, _, ref body), ..
ref attrs, node: hir::ItemFn(ref decl, _, _, _, _, ref body), ..
}) |
hir_map::NodeTraitItem(&hir::TraitItem {
ref attrs, node: hir::MethodTraitItem(
hir::MethodSig { abi, ref decl, .. }, Some(ref body)), ..
hir::MethodSig { ref decl, .. }, Some(ref body)), ..
}) |
hir_map::NodeImplItem(&hir::ImplItem {
ref attrs, node: hir::ImplItemKind::Method(
hir::MethodSig { abi, ref decl, .. }, ref body), ..
hir::MethodSig { ref decl, .. }, ref body), ..
}) => {
base::update_linkage(ccx, lldecl, None, base::OriginalTranslation);
attributes::from_fn_attrs(ccx, attrs, lldecl);
@ -153,13 +151,7 @@ pub fn monomorphic_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
}
if trans_everywhere || is_first {
if abi != Abi::Rust && abi != Abi::RustCall {
foreign::trans_rust_fn_with_foreign_abi(
ccx, decl, body, attrs, lldecl, psubsts, fn_node_id,
Some(&hash));
} else {
trans_fn(ccx, decl, body, lldecl, psubsts, fn_node_id, attrs);
}
trans_fn(ccx, decl, body, lldecl, psubsts, fn_node_id, attrs);
}
}

View File

@ -183,10 +183,6 @@ impl Type {
Type::struct_(ccx, &[], false)
}
pub fn glue_fn(ccx: &CrateContext, t: Type) -> Type {
Type::func(&[t], &Type::void(ccx))
}
pub fn array(ty: &Type, len: u64) -> Type {
ty!(llvm::LLVMRustArrayType(ty.to_ref(), len))
}
@ -206,7 +202,7 @@ impl Type {
}
pub fn vtable_ptr(ccx: &CrateContext) -> Type {
Type::glue_fn(ccx, Type::i8p(ccx)).ptr_to().ptr_to()
Type::func(&[Type::i8p(ccx)], &Type::void(ccx)).ptr_to().ptr_to()
}
pub fn kind(&self) -> TypeKind {

View File

@ -35,58 +35,6 @@ fn ensure_array_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
}
}
pub fn arg_is_indirect<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
arg_ty: Ty<'tcx>) -> bool {
!type_is_immediate(ccx, arg_ty) && !type_is_fat_ptr(ccx.tcx(), arg_ty)
}
pub fn return_uses_outptr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
ty: Ty<'tcx>) -> bool {
arg_is_indirect(ccx, ty)
}
pub fn type_of_explicit_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
arg_ty: Ty<'tcx>) -> Type {
let llty = arg_type_of(ccx, arg_ty);
if arg_is_indirect(ccx, arg_ty) {
llty.ptr_to()
} else {
llty
}
}
/// Yields the types of the "real" arguments for a function using the `RustCall`
/// ABI by untupling the arguments of the function.
pub fn untuple_arguments<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
inputs: &[Ty<'tcx>])
-> Vec<Ty<'tcx>> {
if inputs.is_empty() {
return Vec::new()
}
let mut result = Vec::new();
for (i, &arg_prior_to_tuple) in inputs.iter().enumerate() {
if i < inputs.len() - 1 {
result.push(arg_prior_to_tuple);
}
}
match inputs[inputs.len() - 1].sty {
ty::TyTuple(ref tupled_arguments) => {
debug!("untuple_arguments(): untupling arguments");
for &tupled_argument in tupled_arguments {
result.push(tupled_argument);
}
}
_ => {
ccx.tcx().sess.bug("argument to function with \"rust-call\" ABI \
is neither a tuple nor unit")
}
}
result
}
// A "sizing type" is an LLVM type, the size and alignment of which are
// guaranteed to be equivalent to what you would get out of `type_of()`. It's
// useful because:
@ -189,16 +137,9 @@ fn unsized_info_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type
}
}
pub fn arg_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type {
pub fn immediate_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type {
if t.is_bool() {
Type::i1(cx)
} else if type_is_immediate(cx, t) && type_of(cx, t).is_aggregate() {
// We want to pass small aggregates as immediate values, but using an aggregate LLVM type
// for this leads to bad optimizations, so its arg type is an appropriately sized integer
match machine::llsize_of_alloc(cx, sizing_type_of(cx, t)) {
0 => type_of(cx, t),
n => Type::ix(cx, n * 8),
}
} else {
type_of(cx, t)
}