Issue #3678: Remove wrappers and call foreign functions directly

This commit is contained in:
Niko Matsakis 2013-05-21 15:25:44 -04:00
parent c178b52fe5
commit 303f650ecf
28 changed files with 1751 additions and 1620 deletions

View File

@ -16,8 +16,6 @@ use lib::llvm::{ModuleRef, ValueRef};
pub struct Upcalls {
trace: ValueRef,
call_shim_on_c_stack: ValueRef,
call_shim_on_rust_stack: ValueRef,
rust_personality: ValueRef,
reset_stack_limit: ValueRef
}
@ -47,9 +45,6 @@ pub fn declare_upcalls(targ_cfg: @session::config, llmod: ModuleRef) -> @Upcalls
@Upcalls {
trace: upcall!(fn trace(opaque_ptr, opaque_ptr, int_ty) -> Type::void()),
call_shim_on_c_stack: upcall!(fn call_shim_on_c_stack(opaque_ptr, opaque_ptr) -> int_ty),
call_shim_on_rust_stack:
upcall!(fn call_shim_on_rust_stack(opaque_ptr, opaque_ptr) -> int_ty),
rust_personality: upcall!(nothrow fn rust_personality -> Type::i32()),
reset_stack_limit: upcall!(nothrow fn reset_stack_limit -> Type::void())
}

View File

@ -265,6 +265,9 @@ pub fn phase_3_run_analysis_passes(sess: Session,
time(time_passes, ~"loop checking", ||
middle::check_loop::check_crate(ty_cx, crate));
time(time_passes, ~"stack checking", ||
middle::stack_check::stack_check_crate(ty_cx, crate));
let middle::moves::MoveMaps {moves_map, moved_variables_set,
capture_map} =
time(time_passes, ~"compute moves", ||

View File

@ -2246,6 +2246,11 @@ impl TypeNames {
self.type_to_str_depth(ty, 30)
}
pub fn types_to_str(&self, tys: &[Type]) -> ~str {
let strs = tys.map(|t| self.type_to_str(*t));
fmt!("[%s]", strs.connect(","))
}
pub fn val_to_str(&self, val: ValueRef) -> ~str {
unsafe {
let ty = Type::from_ref(llvm::LLVMTypeOf(val));

View File

@ -73,6 +73,7 @@ use syntax::{ast, oldvisit, ast_util, visit};
#[deriving(Clone, Eq)]
pub enum lint {
ctypes,
cstack,
unused_imports,
unnecessary_qualification,
while_true,
@ -146,6 +147,13 @@ static lint_table: &'static [(&'static str, LintSpec)] = &[
default: warn
}),
("cstack",
LintSpec {
lint: cstack,
desc: "only invoke foreign functions from fixedstacksegment fns",
default: deny
}),
("unused_imports",
LintSpec {
lint: unused_imports,

View File

@ -0,0 +1,110 @@
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
Lint mode to detect cases where we call non-Rust fns, which do not
have a stack growth check, from locations not annotated to request
large stacks.
*/
use middle::lint;
use middle::ty;
use syntax::ast;
use syntax::attr;
use syntax::codemap::span;
use visit = syntax::oldvisit;
use util::ppaux::Repr;
#[deriving(Clone)]
struct Context {
tcx: ty::ctxt,
safe_stack: bool
}
pub fn stack_check_crate(tcx: ty::ctxt,
crate: &ast::Crate) {
let new_cx = Context {
tcx: tcx,
safe_stack: false
};
let visitor = visit::mk_vt(@visit::Visitor {
visit_item: stack_check_item,
visit_fn: stack_check_fn,
visit_expr: stack_check_expr,
..*visit::default_visitor()
});
visit::visit_crate(crate, (new_cx, visitor));
}
fn stack_check_item(item: @ast::item,
(in_cx, v): (Context, visit::vt<Context>)) {
let safe_stack = match item.node {
ast::item_fn(*) => {
attr::contains_name(item.attrs, "fixed_stack_segment")
}
_ => {
false
}
};
let new_cx = Context {
tcx: in_cx.tcx,
safe_stack: safe_stack
};
visit::visit_item(item, (new_cx, v));
}
fn stack_check_fn<'a>(fk: &visit::fn_kind,
decl: &ast::fn_decl,
body: &ast::Block,
sp: span,
id: ast::NodeId,
(in_cx, v): (Context, visit::vt<Context>)) {
let safe_stack = match *fk {
visit::fk_item_fn(*) => in_cx.safe_stack, // see stack_check_item above
visit::fk_anon(*) | visit::fk_fn_block | visit::fk_method(*) => false,
};
let new_cx = Context {
tcx: in_cx.tcx,
safe_stack: safe_stack
};
debug!("stack_check_fn(safe_stack=%b, id=%?)", safe_stack, id);
visit::visit_fn(fk, decl, body, sp, id, (new_cx, v));
}
fn stack_check_expr<'a>(expr: @ast::expr,
(cx, v): (Context, visit::vt<Context>)) {
debug!("stack_check_expr(safe_stack=%b, expr=%s)",
cx.safe_stack, expr.repr(cx.tcx));
if !cx.safe_stack {
match expr.node {
ast::expr_call(callee, _, _) => {
let callee_ty = ty::expr_ty(cx.tcx, callee);
debug!("callee_ty=%s", callee_ty.repr(cx.tcx));
match ty::get(callee_ty).sty {
ty::ty_bare_fn(ref fty) => {
if !fty.abis.is_rust() && !fty.abis.is_intrinsic() {
cx.tcx.sess.add_lint(
lint::cstack,
callee.id,
callee.span,
fmt!("invoking non-Rust fn in fn without \
#[fixed_stack_segment]"));
}
}
_ => {}
}
}
_ => {}
}
}
visit::visit_expr(expr, (cx, v));
}

View File

@ -203,28 +203,28 @@ pub fn decl_internal_cdecl_fn(llmod: ModuleRef, name: &str, ty: Type) -> ValueRe
return llfn;
}
pub fn get_extern_fn(externs: &mut ExternMap, llmod: ModuleRef, name: @str,
pub fn get_extern_fn(externs: &mut ExternMap, llmod: ModuleRef, name: &str,
cc: lib::llvm::CallConv, ty: Type) -> ValueRef {
match externs.find_copy(&name) {
Some(n) => return n,
match externs.find_equiv(&name) {
Some(n) => return *n,
None => ()
}
let f = decl_fn(llmod, name, cc, ty);
externs.insert(name, f);
externs.insert(name.to_owned(), f);
return f;
}
pub fn get_extern_const(externs: &mut ExternMap, llmod: ModuleRef,
name: @str, ty: Type) -> ValueRef {
match externs.find_copy(&name) {
Some(n) => return n,
name: &str, ty: Type) -> ValueRef {
match externs.find_equiv(&name) {
Some(n) => return *n,
None => ()
}
unsafe {
let c = do name.with_c_str |buf| {
llvm::LLVMAddGlobal(llmod, ty.to_ref(), buf)
};
externs.insert(name, c);
externs.insert(name.to_owned(), c);
return c;
}
}
@ -511,7 +511,6 @@ pub fn get_res_dtor(ccx: @mut CrateContext,
None,
ty::lookup_item_type(tcx, parent_id).ty);
let llty = type_of_dtor(ccx, class_ty);
let name = name.to_managed(); // :-(
get_extern_fn(&mut ccx.externs,
ccx.llmod,
name,
@ -798,13 +797,13 @@ pub fn fail_if_zero(cx: @mut Block, span: span, divrem: ast::binop,
}
}
pub fn null_env_ptr(bcx: @mut Block) -> ValueRef {
C_null(Type::opaque_box(bcx.ccx()).ptr_to())
pub fn null_env_ptr(ccx: &CrateContext) -> ValueRef {
C_null(Type::opaque_box(ccx).ptr_to())
}
pub fn trans_external_path(ccx: &mut CrateContext, did: ast::def_id, t: ty::t)
-> ValueRef {
let name = csearch::get_symbol(ccx.sess.cstore, did).to_managed(); // Sad
let name = csearch::get_symbol(ccx.sess.cstore, did);
match ty::get(t).sty {
ty::ty_bare_fn(_) | ty::ty_closure(_) => {
let llty = type_of_fn_from_ty(ccx, t);
@ -1572,7 +1571,7 @@ pub fn mk_return_basic_block(llfn: ValueRef) -> BasicBlockRef {
// slot where the return value of the function must go.
pub fn make_return_pointer(fcx: @mut FunctionContext, output_type: ty::t) -> ValueRef {
unsafe {
if !ty::type_is_immediate(fcx.ccx.tcx, output_type) {
if type_of::return_uses_outptr(fcx.ccx.tcx, output_type) {
llvm::LLVMGetParam(fcx.llfn, 0)
} else {
let lloutputtype = type_of::type_of(fcx.ccx, output_type);
@ -1612,7 +1611,7 @@ pub fn new_fn_ctxt_w_id(ccx: @mut CrateContext,
ty::subst_tps(ccx.tcx, substs.tys, substs.self_ty, output_type)
}
};
let is_immediate = ty::type_is_immediate(ccx.tcx, substd_output_type);
let uses_outptr = type_of::return_uses_outptr(ccx.tcx, substd_output_type);
let fcx = @mut FunctionContext {
llfn: llfndecl,
llenv: unsafe {
@ -1624,7 +1623,7 @@ pub fn new_fn_ctxt_w_id(ccx: @mut CrateContext,
llreturn: None,
llself: None,
personality: None,
has_immediate_return_value: is_immediate,
caller_expects_out_pointer: uses_outptr,
llargs: @mut HashMap::new(),
lllocals: @mut HashMap::new(),
llupvars: @mut HashMap::new(),
@ -1647,8 +1646,15 @@ pub fn new_fn_ctxt_w_id(ccx: @mut CrateContext,
fcx.alloca_insert_pt = Some(llvm::LLVMGetFirstInstruction(entry_bcx.llbb));
}
if !ty::type_is_nil(substd_output_type) && !(is_immediate && skip_retptr) {
fcx.llretptr = Some(make_return_pointer(fcx, substd_output_type));
if !ty::type_is_voidish(substd_output_type) {
// If the function returns nil/bot, there is no real return
// value, so do not set `llretptr`.
if !skip_retptr || uses_outptr {
// Otherwise, we normally allocate the llretptr, unless we
// have been instructed to skip it for immediate return
// values.
fcx.llretptr = Some(make_return_pointer(fcx, substd_output_type));
}
}
fcx
}
@ -1796,7 +1802,7 @@ pub fn finish_fn(fcx: @mut FunctionContext, last_bcx: @mut Block) {
// Builds the return block for a function.
pub fn build_return_block(fcx: &FunctionContext, ret_cx: @mut Block) {
// Return the value if this function immediate; otherwise, return void.
if fcx.llretptr.is_none() || !fcx.has_immediate_return_value {
if fcx.llretptr.is_none() || fcx.caller_expects_out_pointer {
return RetVoid(ret_cx);
}
@ -1882,9 +1888,7 @@ pub fn trans_closure(ccx: @mut CrateContext,
// translation calls that don't have a return value (trans_crate,
// trans_mod, trans_item, et cetera) and those that do
// (trans_block, trans_expr, et cetera).
if body.expr.is_none() || ty::type_is_bot(block_ty) ||
ty::type_is_nil(block_ty)
{
if body.expr.is_none() || ty::type_is_voidish(block_ty) {
bcx = controlflow::trans_block(bcx, body, expr::Ignore);
} else {
let dest = expr::SaveIn(fcx.llretptr.unwrap());
@ -2129,13 +2133,14 @@ pub fn trans_item(ccx: @mut CrateContext, item: &ast::item) {
ast::item_fn(ref decl, purity, _abis, ref generics, ref body) => {
if purity == ast::extern_fn {
let llfndecl = get_item_val(ccx, item.id);
foreign::trans_foreign_fn(ccx,
vec::append((*path).clone(),
[path_name(item.ident)]),
decl,
body,
llfndecl,
item.id);
foreign::trans_rust_fn_with_foreign_abi(
ccx,
&vec::append((*path).clone(),
[path_name(item.ident)]),
decl,
body,
llfndecl,
item.id);
} else if !generics.is_type_parameterized() {
let llfndecl = get_item_val(ccx, item.id);
trans_fn(ccx,
@ -2196,7 +2201,7 @@ pub fn trans_item(ccx: @mut CrateContext, item: &ast::item) {
}
},
ast::item_foreign_mod(ref foreign_mod) => {
foreign::trans_foreign_mod(ccx, path, foreign_mod);
foreign::trans_foreign_mod(ccx, foreign_mod);
}
ast::item_struct(struct_def, ref generics) => {
if !generics.is_type_parameterized() {
@ -2291,8 +2296,7 @@ pub fn create_entry_wrapper(ccx: @mut CrateContext,
fn create_main(ccx: @mut CrateContext, main_llfn: ValueRef) -> ValueRef {
let nt = ty::mk_nil();
let llfty = type_of_fn(ccx, [], nt);
let llfty = type_of_rust_fn(ccx, [], nt);
let llfdecl = decl_fn(ccx.llmod, "_rust_main",
lib::llvm::CCallConv, llfty);
@ -2300,7 +2304,7 @@ pub fn create_entry_wrapper(ccx: @mut CrateContext,
// the args vector built in create_entry_fn will need
// be updated if this assertion starts to fail.
assert!(fcx.has_immediate_return_value);
assert!(!fcx.caller_expects_out_pointer);
let bcx = fcx.entry_bcx.unwrap();
// Call main.
@ -2463,7 +2467,10 @@ pub fn get_item_val(ccx: @mut CrateContext, id: ast::NodeId) -> ValueRef {
let llfn = if purity != ast::extern_fn {
register_fn(ccx, i.span, sym, i.id, ty)
} else {
foreign::register_foreign_fn(ccx, i.span, sym, i.id)
foreign::register_rust_fn_with_foreign_abi(ccx,
i.span,
sym,
i.id)
};
set_inline_hint_if_appr(i.attrs, llfn);
llfn
@ -2509,9 +2516,7 @@ pub fn get_item_val(ccx: @mut CrateContext, id: ast::NodeId) -> ValueRef {
match ni.node {
ast::foreign_item_fn(*) => {
let path = vec::append((*pth).clone(), [path_name(ni.ident)]);
let sym = exported_name(ccx, path, ty, ni.attrs);
register_fn(ccx, ni.span, sym, ni.id, ty)
foreign::register_foreign_item_fn(ccx, abis, &path, ni);
}
ast::foreign_item_static(*) => {
let ident = token::ident_to_str(&ni.ident);

View File

@ -8,19 +8,15 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use lib::llvm::{llvm, ValueRef, Attribute, Void};
use middle::trans::base::*;
use middle::trans::build::*;
use middle::trans::common::*;
use middle::trans::type_::Type;
use std::libc::c_uint;
use lib::llvm::Attribute;
use std::option;
pub trait ABIInfo {
fn compute_info(&self, atys: &[Type], rty: Type, ret_def: bool) -> FnType;
}
use middle::trans::context::CrateContext;
use middle::trans::cabi_x86;
use middle::trans::cabi_x86_64;
use middle::trans::cabi_arm;
use middle::trans::cabi_mips;
use middle::trans::type_::Type;
use syntax::abi::{X86, X86_64, Arm, Mips};
#[deriving(Clone)]
pub struct LLVMType {
@ -28,149 +24,38 @@ pub struct LLVMType {
ty: Type
}
/// Metadata describing how the arguments to a native function
/// should be passed in order to respect the native ABI.
///
/// I will do my best to describe this structure, but these
/// comments are reverse-engineered and may be inaccurate. -NDM
pub struct FnType {
/// The LLVM types of each argument. If the cast flag is true,
/// then the argument should be cast, typically because the
/// official argument type will be an int and the rust type is i8
/// or something like that.
arg_tys: ~[LLVMType],
ret_ty: LLVMType,
/// A list of attributes to be attached to each argument (parallel
/// the `arg_tys` array). If the attribute for a given is Some,
/// then the argument should be passed by reference.
attrs: ~[option::Option<Attribute>],
/// LLVM return type.
ret_ty: LLVMType,
/// If true, then an implicit pointer should be added for the result.
sret: bool
}
impl FnType {
pub fn decl_fn(&self, decl: &fn(fnty: Type) -> ValueRef) -> ValueRef {
let atys = self.arg_tys.iter().map(|t| t.ty).collect::<~[Type]>();
let rty = self.ret_ty.ty;
let fnty = Type::func(atys, &rty);
let llfn = decl(fnty);
for (i, a) in self.attrs.iter().enumerate() {
match *a {
option::Some(attr) => {
unsafe {
let llarg = get_param(llfn, i);
llvm::LLVMAddAttribute(llarg, attr as c_uint);
}
}
_ => ()
}
}
return llfn;
}
pub fn build_shim_args(&self, bcx: @mut Block, arg_tys: &[Type], llargbundle: ValueRef)
-> ~[ValueRef] {
let mut atys: &[LLVMType] = self.arg_tys;
let mut attrs: &[option::Option<Attribute>] = self.attrs;
let mut llargvals = ~[];
let mut i = 0u;
let n = arg_tys.len();
if self.sret {
let llretptr = GEPi(bcx, llargbundle, [0u, n]);
let llretloc = Load(bcx, llretptr);
llargvals = ~[llretloc];
atys = atys.tail();
attrs = attrs.tail();
}
while i < n {
let llargval = if atys[i].cast {
let arg_ptr = GEPi(bcx, llargbundle, [0u, i]);
let arg_ptr = BitCast(bcx, arg_ptr, atys[i].ty.ptr_to());
Load(bcx, arg_ptr)
} else if attrs[i].is_some() {
GEPi(bcx, llargbundle, [0u, i])
} else {
load_inbounds(bcx, llargbundle, [0u, i])
};
llargvals.push(llargval);
i += 1u;
}
return llargvals;
}
pub fn build_shim_ret(&self, bcx: @mut Block, arg_tys: &[Type], ret_def: bool,
llargbundle: ValueRef, llretval: ValueRef) {
for (i, a) in self.attrs.iter().enumerate() {
match *a {
option::Some(attr) => {
unsafe {
llvm::LLVMAddInstrAttribute(llretval, (i + 1u) as c_uint, attr as c_uint);
}
}
_ => ()
}
}
if self.sret || !ret_def {
return;
}
let n = arg_tys.len();
// R** llretptr = &args->r;
let llretptr = GEPi(bcx, llargbundle, [0u, n]);
// R* llretloc = *llretptr; /* (args->r) */
let llretloc = Load(bcx, llretptr);
if self.ret_ty.cast {
let tmp_ptr = BitCast(bcx, llretloc, self.ret_ty.ty.ptr_to());
// *args->r = r;
Store(bcx, llretval, tmp_ptr);
} else {
// *args->r = r;
Store(bcx, llretval, llretloc);
};
}
pub fn build_wrap_args(&self, bcx: @mut Block, ret_ty: Type,
llwrapfn: ValueRef, llargbundle: ValueRef) {
let mut atys: &[LLVMType] = self.arg_tys;
let mut attrs: &[option::Option<Attribute>] = self.attrs;
let mut j = 0u;
let llretptr = if self.sret {
atys = atys.tail();
attrs = attrs.tail();
j = 1u;
get_param(llwrapfn, 0u)
} else if self.ret_ty.cast {
let retptr = alloca(bcx, self.ret_ty.ty, "");
BitCast(bcx, retptr, ret_ty.ptr_to())
} else {
alloca(bcx, ret_ty, "")
};
let mut i = 0u;
let n = atys.len();
while i < n {
let mut argval = get_param(llwrapfn, i + j);
if attrs[i].is_some() {
argval = Load(bcx, argval);
store_inbounds(bcx, argval, llargbundle, [0u, i]);
} else if atys[i].cast {
let argptr = GEPi(bcx, llargbundle, [0u, i]);
let argptr = BitCast(bcx, argptr, atys[i].ty.ptr_to());
Store(bcx, argval, argptr);
} else {
store_inbounds(bcx, argval, llargbundle, [0u, i]);
}
i += 1u;
}
store_inbounds(bcx, llretptr, llargbundle, [0u, n]);
}
pub fn build_wrap_ret(&self, bcx: @mut Block, arg_tys: &[Type], llargbundle: ValueRef) {
if self.ret_ty.ty.kind() == Void {
return;
}
if bcx.fcx.llretptr.is_some() {
let llretval = load_inbounds(bcx, llargbundle, [ 0, arg_tys.len() ]);
let llretval = if self.ret_ty.cast {
let retptr = BitCast(bcx, llretval, self.ret_ty.ty.ptr_to());
Load(bcx, retptr)
} else {
Load(bcx, llretval)
};
let llretptr = BitCast(bcx, bcx.fcx.llretptr.unwrap(), self.ret_ty.ty.ptr_to());
Store(bcx, llretval, llretptr);
}
pub fn compute_abi_info(ccx: &mut CrateContext,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
match ccx.sess.targ_cfg.arch {
X86 => cabi_x86::compute_abi_info(ccx, atys, rty, ret_def),
X86_64 => cabi_x86_64::compute_abi_info(ccx, atys, rty, ret_def),
Arm => cabi_arm::compute_abi_info(ccx, atys, rty, ret_def),
Mips => cabi_mips::compute_abi_info(ccx, atys, rty, ret_def),
}
}

View File

@ -10,7 +10,8 @@
use lib::llvm::{llvm, Integer, Pointer, Float, Double, Struct, Array};
use lib::llvm::{Attribute, StructRetAttribute};
use middle::trans::cabi::{ABIInfo, FnType, LLVMType};
use middle::trans::cabi::{FnType, LLVMType};
use middle::trans::context::CrateContext;
use middle::trans::type_::Type;
@ -124,45 +125,37 @@ fn is_reg_ty(ty: Type) -> bool {
}
}
enum ARM_ABIInfo { ARM_ABIInfo }
impl ABIInfo for ARM_ABIInfo {
fn compute_info(&self,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
let mut arg_tys = ~[];
let mut attrs = ~[];
for &aty in atys.iter() {
let (ty, attr) = classify_arg_ty(aty);
arg_tys.push(ty);
attrs.push(attr);
}
let (ret_ty, ret_attr) = if ret_def {
classify_ret_ty(rty)
} else {
(LLVMType { cast: false, ty: Type::void() }, None)
};
let mut ret_ty = ret_ty;
let sret = ret_attr.is_some();
if sret {
arg_tys.unshift(ret_ty);
attrs.unshift(ret_attr);
ret_ty = LLVMType { cast: false, ty: Type::void() };
}
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
attrs: attrs,
sret: sret
};
pub fn compute_abi_info(_ccx: &mut CrateContext,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
let mut arg_tys = ~[];
let mut attrs = ~[];
for &aty in atys.iter() {
let (ty, attr) = classify_arg_ty(aty);
arg_tys.push(ty);
attrs.push(attr);
}
}
pub fn abi_info() -> @ABIInfo {
return @ARM_ABIInfo as @ABIInfo;
let (ret_ty, ret_attr) = if ret_def {
classify_ret_ty(rty)
} else {
(LLVMType { cast: false, ty: Type::void() }, None)
};
let mut ret_ty = ret_ty;
let sret = ret_attr.is_some();
if sret {
arg_tys.unshift(ret_ty);
attrs.unshift(ret_attr);
ret_ty = LLVMType { cast: false, ty: Type::void() };
}
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
attrs: attrs,
sret: sret
};
}

View File

@ -14,6 +14,7 @@ use std::num;
use std::vec;
use lib::llvm::{llvm, Integer, Pointer, Float, Double, Struct, Array};
use lib::llvm::{Attribute, StructRetAttribute};
use middle::trans::context::CrateContext;
use middle::trans::context::task_llcx;
use middle::trans::cabi::*;
@ -170,47 +171,39 @@ fn struct_ty(ty: Type,
return Type::struct_(fields, false);
}
enum MIPS_ABIInfo { MIPS_ABIInfo }
pub fn compute_abi_info(_ccx: &mut CrateContext,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
let (ret_ty, ret_attr) = if ret_def {
classify_ret_ty(rty)
} else {
(LLVMType { cast: false, ty: Type::void() }, None)
};
impl ABIInfo for MIPS_ABIInfo {
fn compute_info(&self,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
let (ret_ty, ret_attr) = if ret_def {
classify_ret_ty(rty)
} else {
(LLVMType { cast: false, ty: Type::void() }, None)
};
let mut ret_ty = ret_ty;
let mut ret_ty = ret_ty;
let sret = ret_attr.is_some();
let mut arg_tys = ~[];
let mut attrs = ~[];
let mut offset = if sret { 4 } else { 0 };
let sret = ret_attr.is_some();
let mut arg_tys = ~[];
let mut attrs = ~[];
let mut offset = if sret { 4 } else { 0 };
for aty in atys.iter() {
let (ty, attr) = classify_arg_ty(*aty, &mut offset);
arg_tys.push(ty);
attrs.push(attr);
};
for aty in atys.iter() {
let (ty, attr) = classify_arg_ty(*aty, &mut offset);
arg_tys.push(ty);
attrs.push(attr);
};
if sret {
arg_tys = vec::append(~[ret_ty], arg_tys);
attrs = vec::append(~[ret_attr], attrs);
ret_ty = LLVMType { cast: false, ty: Type::void() };
}
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
attrs: attrs,
sret: sret
};
if sret {
arg_tys = vec::append(~[ret_ty], arg_tys);
attrs = vec::append(~[ret_attr], attrs);
ret_ty = LLVMType { cast: false, ty: Type::void() };
}
}
pub fn abi_info() -> @ABIInfo {
return @MIPS_ABIInfo as @ABIInfo;
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
attrs: attrs,
sret: sret
};
}

View File

@ -14,70 +14,87 @@ use lib::llvm::*;
use super::cabi::*;
use super::common::*;
use super::machine::*;
use middle::trans::type_::Type;
struct X86_ABIInfo {
ccx: @mut CrateContext
}
pub fn compute_abi_info(ccx: &mut CrateContext,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
let mut arg_tys = ~[];
let mut attrs = ~[];
impl ABIInfo for X86_ABIInfo {
fn compute_info(&self,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
let mut arg_tys = do atys.map |a| {
LLVMType { cast: false, ty: *a }
let ret_ty;
let sret;
if !ret_def {
ret_ty = LLVMType {
cast: false,
ty: Type::void(),
};
let mut ret_ty = LLVMType {
sret = false;
} else if rty.kind() == Struct {
// Returning a structure. Most often, this will use
// a hidden first argument. On some platforms, though,
// small structs are returned as integers.
//
// Some links:
// http://www.angelcode.com/dev/callconv/callconv.html
// Clang's ABI handling is in lib/CodeGen/TargetInfo.cpp
enum Strategy { RetValue(Type), RetPointer }
let strategy = match ccx.sess.targ_cfg.os {
os_win32 | os_macos => {
match llsize_of_alloc(ccx, rty) {
1 => RetValue(Type::i8()),
2 => RetValue(Type::i16()),
4 => RetValue(Type::i32()),
8 => RetValue(Type::i64()),
_ => RetPointer
}
}
_ => {
RetPointer
}
};
match strategy {
RetValue(t) => {
ret_ty = LLVMType {
cast: true,
ty: t
};
sret = false;
}
RetPointer => {
arg_tys.push(LLVMType {
cast: false,
ty: rty.ptr_to()
});
attrs.push(Some(StructRetAttribute));
ret_ty = LLVMType {
cast: false,
ty: Type::void(),
};
sret = true;
}
}
} else {
ret_ty = LLVMType {
cast: false,
ty: rty
};
let mut attrs = do atys.map |_| {
None
};
// Rules for returning structs taken from
// http://www.angelcode.com/dev/callconv/callconv.html
// Clang's ABI handling is in lib/CodeGen/TargetInfo.cpp
let sret = {
let returning_a_struct = rty.kind() == Struct && ret_def;
let big_struct = match self.ccx.sess.targ_cfg.os {
os_win32 | os_macos => llsize_of_alloc(self.ccx, rty) > 8,
_ => true
};
returning_a_struct && big_struct
};
if sret {
let ret_ptr_ty = LLVMType {
cast: false,
ty: ret_ty.ty.ptr_to()
};
arg_tys = ~[ret_ptr_ty] + arg_tys;
attrs = ~[Some(StructRetAttribute)] + attrs;
ret_ty = LLVMType {
cast: false,
ty: Type::void(),
};
} else if !ret_def {
ret_ty = LLVMType {
cast: false,
ty: Type::void()
};
}
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
attrs: attrs,
sret: sret
};
sret = false;
}
}
pub fn abi_info(ccx: @mut CrateContext) -> @ABIInfo {
return @X86_ABIInfo {
ccx: ccx
} as @ABIInfo;
for &a in atys.iter() {
arg_tys.push(LLVMType { cast: false, ty: a });
attrs.push(None);
}
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
attrs: attrs,
sret: sret
};
}

View File

@ -15,6 +15,7 @@ use lib::llvm::{llvm, Integer, Pointer, Float, Double};
use lib::llvm::{Struct, Array, Attribute};
use lib::llvm::{StructRetAttribute, ByValAttribute};
use middle::trans::cabi::*;
use middle::trans::context::CrateContext;
use middle::trans::type_::Type;
@ -331,10 +332,10 @@ fn llreg_ty(cls: &[RegClass]) -> Type {
return Type::struct_(tys, false);
}
fn x86_64_tys(atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
pub fn compute_abi_info(_ccx: &mut CrateContext,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
fn x86_64_ty(ty: Type,
is_mem_cls: &fn(cls: &[RegClass]) -> bool,
attr: Attribute) -> (LLVMType, Option<Attribute>) {
@ -384,18 +385,3 @@ fn x86_64_tys(atys: &[Type],
sret: sret
};
}
enum X86_64_ABIInfo { X86_64_ABIInfo }
impl ABIInfo for X86_64_ABIInfo {
fn compute_info(&self,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
return x86_64_tys(atys, rty, ret_def);
}
}
pub fn abi_info() -> @ABIInfo {
return @X86_64_ABIInfo as @ABIInfo;
}

View File

@ -37,6 +37,7 @@ use middle::trans::inline;
use middle::trans::meth;
use middle::trans::monomorphize;
use middle::trans::type_of;
use middle::trans::foreign;
use middle::ty;
use middle::subst::Subst;
use middle::typeck;
@ -46,6 +47,7 @@ use util::ppaux::Repr;
use middle::trans::type_::Type;
use syntax::ast;
use syntax::abi::AbiSet;
use syntax::ast_map;
use syntax::oldvisit;
@ -240,20 +242,20 @@ pub fn trans_fn_ref_with_vtables(
type_params: &[ty::t], // values for fn's ty params
vtables: Option<typeck::vtable_res>) // vtables for the call
-> FnData {
//!
//
// Translates a reference to a fn/method item, monomorphizing and
// inlining as it goes.
//
// # Parameters
//
// - `bcx`: the current block where the reference to the fn occurs
// - `def_id`: def id of the fn or method item being referenced
// - `ref_id`: node id of the reference to the fn/method, if applicable.
// This parameter may be zero; but, if so, the resulting value may not
// have the right type, so it must be cast before being used.
// - `type_params`: values for each of the fn/method's type parameters
// - `vtables`: values for each bound on each of the type parameters
/*!
* Translates a reference to a fn/method item, monomorphizing and
* inlining as it goes.
*
* # Parameters
*
* - `bcx`: the current block where the reference to the fn occurs
* - `def_id`: def id of the fn or method item being referenced
* - `ref_id`: node id of the reference to the fn/method, if applicable.
* This parameter may be zero; but, if so, the resulting value may not
* have the right type, so it must be cast before being used.
* - `type_params`: values for each of the fn/method's type parameters
* - `vtables`: values for each bound on each of the type parameters
*/
let _icx = push_ctxt("trans_fn_ref_with_vtables");
let ccx = bcx.ccx();
@ -386,7 +388,7 @@ pub fn trans_fn_ref_with_vtables(
}
// Find the actual function pointer.
let val = {
let mut val = {
if def_id.crate == ast::LOCAL_CRATE {
// Internal reference.
get_item_val(ccx, def_id.node)
@ -396,6 +398,35 @@ pub fn trans_fn_ref_with_vtables(
}
};
// This is subtle and surprising, but sometimes we have to bitcast
// the resulting fn pointer. The reason has to do with external
// functions. If you have two crates that both bind the same C
// library, they may not use precisely the same types: for
// example, they will probably each declare their own structs,
// which are distinct types from LLVM's point of view (nominal
// types).
//
// Now, if those two crates are linked into an application, and
// they contain inlined code, you can wind up with a situation
// where both of those functions wind up being loaded into this
// application simultaneously. In that case, the same function
// (from LLVM's point of view) requires two types. But of course
// LLVM won't allow one function to have two types.
//
// What we currently do, therefore, is declare the function with
// one of the two types (whichever happens to come first) and then
// bitcast as needed when the function is referenced to make sure
// it has the type we expect.
//
// This can occur on either a crate-local or crate-external
// reference. It also occurs when testing libcore and in some
// other weird situations. Annoying.
let llty = type_of::type_of_fn_from_ty(ccx, fn_tpt.ty);
let llptrty = llty.ptr_to();
if val_ty(val) != llptrty {
val = BitCast(bcx, val, llptrty);
}
return FnData {llfn: val};
}
@ -543,16 +574,26 @@ pub fn body_contains_ret(body: &ast::Block) -> bool {
*cx
}
// See [Note-arg-mode]
pub fn trans_call_inner(in_cx: @mut Block,
call_info: Option<NodeInfo>,
fn_expr_ty: ty::t,
callee_ty: ty::t,
ret_ty: ty::t,
get_callee: &fn(@mut Block) -> Callee,
args: CallArgs,
dest: Option<expr::Dest>,
autoref_arg: AutorefArg)
-> Result {
/*!
* This behemoth of a function translates function calls.
* Unfortunately, in order to generate more efficient LLVM
* output at -O0, it has quite a complex signature (refactoring
* this into two functions seems like a good idea).
*
* In particular, for lang items, it is invoked with a dest of
* None, and
*/
do base::with_scope_result(in_cx, call_info, "call") |cx| {
let callee = get_callee(cx);
let mut bcx = callee.bcx;
@ -580,98 +621,125 @@ pub fn trans_call_inner(in_cx: @mut Block,
}
};
let llretslot = trans_ret_slot(bcx, fn_expr_ty, dest);
let abi = match ty::get(callee_ty).sty {
ty::ty_bare_fn(ref f) => f.abis,
_ => AbiSet::Rust()
};
let is_rust_fn =
abi.is_rust() ||
abi.is_intrinsic();
let mut llargs = ~[];
if !ty::type_is_immediate(bcx.tcx(), ret_ty) {
llargs.push(llretslot);
}
llargs.push(llenv);
bcx = trans_args(bcx, args, fn_expr_ty, autoref_arg, &mut llargs);
// Now that the arguments have finished evaluating, we need to revoke
// the cleanup for the self argument
match callee.data {
Method(d) => {
for &v in d.temp_cleanup.iter() {
revoke_clean(bcx, v);
}
// Generate a location to store the result. If the user does
// not care about the result, just make a stack slot.
let opt_llretslot = match dest {
None => {
assert!(!type_of::return_uses_outptr(in_cx.tcx(), ret_ty));
None
}
_ => {}
}
// Uncomment this to debug calls.
/*
printfln!("calling: %s", bcx.val_to_str(llfn));
for llarg in llargs.iter() {
printfln!("arg: %s", bcx.val_to_str(*llarg));
}
io::println("---");
*/
// If the block is terminated, then one or more of the args
// has type _|_. Since that means it diverges, the code for
// the call itself is unreachable.
let (llresult, new_bcx) = base::invoke(bcx, llfn, llargs);
bcx = new_bcx;
match dest {
None => { assert!(ty::type_is_immediate(bcx.tcx(), ret_ty)) }
Some(expr::SaveIn(dst)) => Some(dst),
Some(expr::Ignore) => {
// drop the value if it is not being saved.
if ty::type_needs_drop(bcx.tcx(), ret_ty) {
if ty::type_is_immediate(bcx.tcx(), ret_ty) {
let llscratchptr = alloc_ty(bcx, ret_ty, "__ret");
Store(bcx, llresult, llscratchptr);
bcx = glue::drop_ty(bcx, llscratchptr, ret_ty);
} else {
bcx = glue::drop_ty(bcx, llretslot, ret_ty);
if !ty::type_is_voidish(ret_ty) {
Some(alloc_ty(bcx, ret_ty, "__llret"))
} else {
unsafe {
Some(llvm::LLVMGetUndef(Type::nil().ptr_to().to_ref()))
}
}
}
Some(expr::SaveIn(lldest)) => {
// If this is an immediate, store into the result location.
// (If this was not an immediate, the result will already be
// directly written into the output slot.)
if ty::type_is_immediate(bcx.tcx(), ret_ty) {
Store(bcx, llresult, lldest);
}
};
let mut llresult = unsafe {
llvm::LLVMGetUndef(Type::nil().ptr_to().to_ref())
};
// The code below invokes the function, using either the Rust
// conventions (if it is a rust fn) or the native conventions
// (otherwise). The important part is that, when all is sad
// and done, either the return value of the function will have been
// written in opt_llretslot (if it is Some) or `llresult` will be
// set appropriately (otherwise).
if is_rust_fn {
let mut llargs = ~[];
// Push the out-pointer if we use an out-pointer for this
// return type, otherwise push "undef".
if type_of::return_uses_outptr(in_cx.tcx(), ret_ty) {
llargs.push(opt_llretslot.unwrap());
}
// Push the environment.
llargs.push(llenv);
// Push the arguments.
bcx = trans_args(bcx, args, callee_ty,
autoref_arg, &mut llargs);
// Now that the arguments have finished evaluating, we
// need to revoke the cleanup for the self argument
match callee.data {
Method(d) => {
for &v in d.temp_cleanup.iter() {
revoke_clean(bcx, v);
}
}
_ => {}
}
// Invoke the actual rust fn and update bcx/llresult.
let (llret, b) = base::invoke(bcx, llfn, llargs);
bcx = b;
llresult = llret;
// If the Rust convention for this type is return via
// the return value, copy it into llretslot.
match opt_llretslot {
Some(llretslot) => {
if !type_of::return_uses_outptr(bcx.tcx(), ret_ty) &&
!ty::type_is_voidish(ret_ty)
{
Store(bcx, llret, llretslot);
}
}
None => {}
}
} else {
// Lang items are the only case where dest is None, and
// they are always Rust fns.
assert!(dest.is_some());
let mut llargs = ~[];
bcx = trans_args(bcx, args, callee_ty,
autoref_arg, &mut llargs);
bcx = foreign::trans_native_call(bcx, callee_ty,
llfn, opt_llretslot.unwrap(), llargs);
}
// If the caller doesn't care about the result of this fn call,
// drop the temporary slot we made.
match dest {
None => {
assert!(!type_of::return_uses_outptr(bcx.tcx(), ret_ty));
}
Some(expr::Ignore) => {
// drop the value if it is not being saved.
bcx = glue::drop_ty(bcx, opt_llretslot.unwrap(), ret_ty);
}
Some(expr::SaveIn(_)) => { }
}
if ty::type_is_bot(ret_ty) {
Unreachable(bcx);
}
rslt(bcx, llresult)
}
}
pub enum CallArgs<'self> {
ArgExprs(&'self [@ast::expr]),
ArgVals(&'self [ValueRef])
}
pub fn trans_ret_slot(bcx: @mut Block, fn_ty: ty::t, dest: Option<expr::Dest>)
-> ValueRef {
let retty = ty::ty_fn_ret(fn_ty);
match dest {
Some(expr::SaveIn(dst)) => dst,
_ => {
if ty::type_is_immediate(bcx.tcx(), retty) {
unsafe {
llvm::LLVMGetUndef(Type::nil().ptr_to().to_ref())
}
} else {
alloc_ty(bcx, retty, "__trans_ret_slot")
}
}
}
}
pub fn trans_args(cx: @mut Block,
args: CallArgs,
fn_ty: ty::t,
@ -795,7 +863,7 @@ pub fn trans_arg_expr(bcx: @mut Block,
if formal_arg_ty != arg_datum.ty {
// this could happen due to e.g. subtyping
let llformal_arg_ty = type_of::type_of_explicit_arg(ccx, &formal_arg_ty);
let llformal_arg_ty = type_of::type_of_explicit_arg(ccx, formal_arg_ty);
debug!("casting actual type (%s) to match formal (%s)",
bcx.val_to_str(val), bcx.llty_str(llformal_arg_ty));
val = PointerCast(bcx, val, llformal_arg_ty);

View File

@ -121,7 +121,7 @@ pub fn BuilderRef_res(B: BuilderRef) -> BuilderRef_res {
}
}
pub type ExternMap = HashMap<@str, ValueRef>;
pub type ExternMap = HashMap<~str, ValueRef>;
// Types used for llself.
pub struct ValSelfData {
@ -197,10 +197,10 @@ pub struct FunctionContext {
// outputting the resume instruction.
personality: Option<ValueRef>,
// True if this function has an immediate return value, false otherwise.
// If this is false, the llretptr will alias the first argument of the
// function.
has_immediate_return_value: bool,
// True if the caller expects this fn to use the out pointer to
// return. Either way, your code should write into llretptr, but if
// this value is false, llretptr will be a local alloca.
caller_expects_out_pointer: bool,
// Maps arguments to allocas created for them in llallocas.
llargs: @mut HashMap<ast::NodeId, ValueRef>,
@ -232,20 +232,20 @@ pub struct FunctionContext {
impl FunctionContext {
pub fn arg_pos(&self, arg: uint) -> uint {
if self.has_immediate_return_value {
arg + 1u
} else {
if self.caller_expects_out_pointer {
arg + 2u
} else {
arg + 1u
}
}
pub fn out_arg_pos(&self) -> uint {
assert!(!self.has_immediate_return_value);
assert!(self.caller_expects_out_pointer);
0u
}
pub fn env_arg_pos(&self) -> uint {
if !self.has_immediate_return_value {
if self.caller_expects_out_pointer {
1u
} else {
0u

View File

@ -190,12 +190,12 @@ pub fn scratch_datum(bcx: @mut Block, ty: ty::t, name: &str, zero: bool) -> Datu
pub fn appropriate_mode(tcx: ty::ctxt, ty: ty::t) -> DatumMode {
/*!
*
* Indicates the "appropriate" mode for this value,
* which is either by ref or by value, depending
* on whether type is immediate or not. */
* Indicates the "appropriate" mode for this value,
* which is either by ref or by value, depending
* on whether type is immediate or not.
*/
if ty::type_is_nil(ty) || ty::type_is_bot(ty) {
if ty::type_is_voidish(ty) {
ByValue
} else if ty::type_is_immediate(tcx, ty) {
ByValue
@ -271,7 +271,7 @@ impl Datum {
let _icx = push_ctxt("copy_to");
if ty::type_is_nil(self.ty) || ty::type_is_bot(self.ty) {
if ty::type_is_voidish(self.ty) {
return bcx;
}
@ -343,7 +343,7 @@ impl Datum {
debug!("move_to(self=%s, action=%?, dst=%s)",
self.to_str(bcx.ccx()), action, bcx.val_to_str(dst));
if ty::type_is_nil(self.ty) || ty::type_is_bot(self.ty) {
if ty::type_is_voidish(self.ty) {
return bcx;
}
@ -432,7 +432,7 @@ impl Datum {
*
* Yields the value itself. */
if ty::type_is_nil(self.ty) || ty::type_is_bot(self.ty) {
if ty::type_is_voidish(self.ty) {
C_nil()
} else {
match self.mode {
@ -469,7 +469,7 @@ impl Datum {
match self.mode {
ByRef(_) => self.val,
ByValue => {
if ty::type_is_nil(self.ty) || ty::type_is_bot(self.ty) {
if ty::type_is_voidish(self.ty) {
C_null(type_of::type_of(bcx.ccx(), self.ty).ptr_to())
} else {
let slot = alloc_ty(bcx, self.ty, "");

View File

@ -290,7 +290,7 @@ pub fn trans_to_datum(bcx: @mut Block, expr: @ast::expr) -> DatumBlock {
assert_eq!(datum.appropriate_mode(tcx), ByValue);
Store(bcx, datum.to_appropriate_llval(bcx), llfn);
let llenv = GEPi(bcx, scratch.val, [0u, abi::fn_field_box]);
Store(bcx, base::null_env_ptr(bcx), llenv);
Store(bcx, base::null_env_ptr(bcx.ccx()), llenv);
DatumBlock {bcx: bcx, datum: scratch}
}
@ -416,7 +416,7 @@ pub fn trans_into(bcx: @mut Block, expr: @ast::expr, dest: Dest) -> @mut Block {
debuginfo::update_source_pos(bcx.fcx, expr.id, expr.span);
let dest = {
if ty::type_is_nil(ty) || ty::type_is_bot(ty) {
if ty::type_is_voidish(ty) {
Ignore
} else {
dest
@ -507,7 +507,7 @@ fn trans_to_datum_unadjusted(bcx: @mut Block, expr: @ast::expr) -> DatumBlock {
ty::RvalueDpsExpr => {
let ty = expr_ty(bcx, expr);
if ty::type_is_nil(ty) || ty::type_is_bot(ty) {
if ty::type_is_voidish(ty) {
bcx = trans_rvalue_dps_unadjusted(bcx, expr, Ignore);
return nil(bcx, ty);
} else {

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,503 @@
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use back::{abi};
use lib::llvm::{SequentiallyConsistent, Acquire, Release, Xchg};
use lib::llvm::{ValueRef, Pointer};
use lib;
use middle::trans::base::*;
use middle::trans::build::*;
use middle::trans::callee::*;
use middle::trans::common::*;
use middle::trans::datum::*;
use middle::trans::type_of::*;
use middle::trans::type_of;
use middle::trans::expr::Ignore;
use middle::trans::machine;
use middle::trans::glue;
use middle::ty::FnSig;
use middle::ty;
use syntax::ast;
use syntax::ast_map;
use syntax::attr;
use syntax::opt_vec;
use util::ppaux::{ty_to_str};
use middle::trans::machine::llsize_of;
use middle::trans::type_::Type;
pub fn trans_intrinsic(ccx: @mut CrateContext,
decl: ValueRef,
item: &ast::foreign_item,
path: ast_map::path,
substs: @param_substs,
attributes: &[ast::Attribute],
ref_id: Option<ast::NodeId>) {
debug!("trans_intrinsic(item.ident=%s)", ccx.sess.str_of(item.ident));
fn simple_llvm_intrinsic(bcx: @mut Block, name: &'static str, num_args: uint) {
assert!(num_args <= 4);
let mut args = [0 as ValueRef, ..4];
let first_real_arg = bcx.fcx.arg_pos(0u);
for i in range(0u, num_args) {
args[i] = get_param(bcx.fcx.llfn, first_real_arg + i);
}
let llfn = bcx.ccx().intrinsics.get_copy(&name);
Ret(bcx, Call(bcx, llfn, args.slice(0, num_args)));
}
fn with_overflow_instrinsic(bcx: @mut Block, name: &'static str) {
let first_real_arg = bcx.fcx.arg_pos(0u);
let a = get_param(bcx.fcx.llfn, first_real_arg);
let b = get_param(bcx.fcx.llfn, first_real_arg + 1);
let llfn = bcx.ccx().intrinsics.get_copy(&name);
// convert `i1` to a `bool`, and write to the out parameter
let val = Call(bcx, llfn, [a, b]);
let result = ExtractValue(bcx, val, 0);
let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool());
let retptr = get_param(bcx.fcx.llfn, bcx.fcx.out_arg_pos());
let ret = Load(bcx, retptr);
let ret = InsertValue(bcx, ret, result, 0);
let ret = InsertValue(bcx, ret, overflow, 1);
Store(bcx, ret, retptr);
RetVoid(bcx)
}
fn memcpy_intrinsic(bcx: @mut Block, name: &'static str, tp_ty: ty::t, sizebits: u8) {
let ccx = bcx.ccx();
let lltp_ty = type_of::type_of(ccx, tp_ty);
let align = C_i32(machine::llalign_of_min(ccx, lltp_ty) as i32);
let size = match sizebits {
32 => C_i32(machine::llsize_of_real(ccx, lltp_ty) as i32),
64 => C_i64(machine::llsize_of_real(ccx, lltp_ty) as i64),
_ => ccx.sess.fatal("Invalid value for sizebits")
};
let decl = bcx.fcx.llfn;
let first_real_arg = bcx.fcx.arg_pos(0u);
let dst_ptr = PointerCast(bcx, get_param(decl, first_real_arg), Type::i8p());
let src_ptr = PointerCast(bcx, get_param(decl, first_real_arg + 1), Type::i8p());
let count = get_param(decl, first_real_arg + 2);
let volatile = C_i1(false);
let llfn = bcx.ccx().intrinsics.get_copy(&name);
Call(bcx, llfn, [dst_ptr, src_ptr, Mul(bcx, size, count), align, volatile]);
RetVoid(bcx);
}
fn memset_intrinsic(bcx: @mut Block, name: &'static str, tp_ty: ty::t, sizebits: u8) {
let ccx = bcx.ccx();
let lltp_ty = type_of::type_of(ccx, tp_ty);
let align = C_i32(machine::llalign_of_min(ccx, lltp_ty) as i32);
let size = match sizebits {
32 => C_i32(machine::llsize_of_real(ccx, lltp_ty) as i32),
64 => C_i64(machine::llsize_of_real(ccx, lltp_ty) as i64),
_ => ccx.sess.fatal("Invalid value for sizebits")
};
let decl = bcx.fcx.llfn;
let first_real_arg = bcx.fcx.arg_pos(0u);
let dst_ptr = PointerCast(bcx, get_param(decl, first_real_arg), Type::i8p());
let val = get_param(decl, first_real_arg + 1);
let count = get_param(decl, first_real_arg + 2);
let volatile = C_i1(false);
let llfn = bcx.ccx().intrinsics.get_copy(&name);
Call(bcx, llfn, [dst_ptr, val, Mul(bcx, size, count), align, volatile]);
RetVoid(bcx);
}
fn count_zeros_intrinsic(bcx: @mut Block, name: &'static str) {
let x = get_param(bcx.fcx.llfn, bcx.fcx.arg_pos(0u));
let y = C_i1(false);
let llfn = bcx.ccx().intrinsics.get_copy(&name);
Ret(bcx, Call(bcx, llfn, [x, y]));
}
let output_type = ty::ty_fn_ret(ty::node_id_to_type(ccx.tcx, item.id));
let fcx = new_fn_ctxt_w_id(ccx,
path,
decl,
item.id,
output_type,
true,
Some(substs),
None,
Some(item.span));
set_always_inline(fcx.llfn);
// Set the fixed stack segment flag if necessary.
if attr::contains_name(attributes, "fixed_stack_segment") {
set_fixed_stack_segment(fcx.llfn);
}
let mut bcx = fcx.entry_bcx.unwrap();
let first_real_arg = fcx.arg_pos(0u);
let nm = ccx.sess.str_of(item.ident);
let name = nm.as_slice();
// This requires that atomic intrinsics follow a specific naming pattern:
// "atomic_<operation>[_<ordering>], and no ordering means SeqCst
if name.starts_with("atomic_") {
let split : ~[&str] = name.split_iter('_').collect();
assert!(split.len() >= 2, "Atomic intrinsic not correct format");
let order = if split.len() == 2 {
lib::llvm::SequentiallyConsistent
} else {
match split[2] {
"relaxed" => lib::llvm::Monotonic,
"acq" => lib::llvm::Acquire,
"rel" => lib::llvm::Release,
"acqrel" => lib::llvm::AcquireRelease,
_ => ccx.sess.fatal("Unknown ordering in atomic intrinsic")
}
};
match split[1] {
"cxchg" => {
let old = AtomicCmpXchg(bcx, get_param(decl, first_real_arg),
get_param(decl, first_real_arg + 1u),
get_param(decl, first_real_arg + 2u),
order);
Ret(bcx, old);
}
"load" => {
let old = AtomicLoad(bcx, get_param(decl, first_real_arg),
order);
Ret(bcx, old);
}
"store" => {
AtomicStore(bcx, get_param(decl, first_real_arg + 1u),
get_param(decl, first_real_arg),
order);
RetVoid(bcx);
}
"fence" => {
AtomicFence(bcx, order);
RetVoid(bcx);
}
op => {
// These are all AtomicRMW ops
let atom_op = match op {
"xchg" => lib::llvm::Xchg,
"xadd" => lib::llvm::Add,
"xsub" => lib::llvm::Sub,
"and" => lib::llvm::And,
"nand" => lib::llvm::Nand,
"or" => lib::llvm::Or,
"xor" => lib::llvm::Xor,
"max" => lib::llvm::Max,
"min" => lib::llvm::Min,
"umax" => lib::llvm::UMax,
"umin" => lib::llvm::UMin,
_ => ccx.sess.fatal("Unknown atomic operation")
};
let old = AtomicRMW(bcx, atom_op, get_param(decl, first_real_arg),
get_param(decl, first_real_arg + 1u),
order);
Ret(bcx, old);
}
}
fcx.cleanup();
return;
}
match name {
"size_of" => {
let tp_ty = substs.tys[0];
let lltp_ty = type_of::type_of(ccx, tp_ty);
Ret(bcx, C_uint(ccx, machine::llsize_of_real(ccx, lltp_ty)));
}
"move_val" => {
// Create a datum reflecting the value being moved.
// Use `appropriate_mode` so that the datum is by ref
// if the value is non-immediate. Note that, with
// intrinsics, there are no argument cleanups to
// concern ourselves with.
let tp_ty = substs.tys[0];
let mode = appropriate_mode(ccx.tcx, tp_ty);
let src = Datum {val: get_param(decl, first_real_arg + 1u),
ty: tp_ty, mode: mode};
bcx = src.move_to(bcx, DROP_EXISTING,
get_param(decl, first_real_arg));
RetVoid(bcx);
}
"move_val_init" => {
// See comments for `"move_val"`.
let tp_ty = substs.tys[0];
let mode = appropriate_mode(ccx.tcx, tp_ty);
let src = Datum {val: get_param(decl, first_real_arg + 1u),
ty: tp_ty, mode: mode};
bcx = src.move_to(bcx, INIT, get_param(decl, first_real_arg));
RetVoid(bcx);
}
"min_align_of" => {
let tp_ty = substs.tys[0];
let lltp_ty = type_of::type_of(ccx, tp_ty);
Ret(bcx, C_uint(ccx, machine::llalign_of_min(ccx, lltp_ty)));
}
"pref_align_of"=> {
let tp_ty = substs.tys[0];
let lltp_ty = type_of::type_of(ccx, tp_ty);
Ret(bcx, C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty)));
}
"get_tydesc" => {
let tp_ty = substs.tys[0];
let static_ti = get_tydesc(ccx, tp_ty);
glue::lazily_emit_all_tydesc_glue(ccx, static_ti);
// FIXME (#3730): ideally this shouldn't need a cast,
// but there's a circularity between translating rust types to llvm
// types and having a tydesc type available. So I can't directly access
// the llvm type of intrinsic::TyDesc struct.
let userland_tydesc_ty = type_of::type_of(ccx, output_type);
let td = PointerCast(bcx, static_ti.tydesc, userland_tydesc_ty);
Ret(bcx, td);
}
"init" => {
let tp_ty = substs.tys[0];
let lltp_ty = type_of::type_of(ccx, tp_ty);
match bcx.fcx.llretptr {
Some(ptr) => { Store(bcx, C_null(lltp_ty), ptr); RetVoid(bcx); }
None if ty::type_is_nil(tp_ty) => RetVoid(bcx),
None => Ret(bcx, C_null(lltp_ty)),
}
}
"uninit" => {
// Do nothing, this is effectively a no-op
let retty = substs.tys[0];
if ty::type_is_immediate(ccx.tcx, retty) && !ty::type_is_nil(retty) {
unsafe {
Ret(bcx, lib::llvm::llvm::LLVMGetUndef(type_of(ccx, retty).to_ref()));
}
} else {
RetVoid(bcx)
}
}
"forget" => {
RetVoid(bcx);
}
"transmute" => {
let (in_type, out_type) = (substs.tys[0], substs.tys[1]);
let llintype = type_of::type_of(ccx, in_type);
let llouttype = type_of::type_of(ccx, out_type);
let in_type_size = machine::llbitsize_of_real(ccx, llintype);
let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
if in_type_size != out_type_size {
let sp = match ccx.tcx.items.get_copy(&ref_id.unwrap()) {
ast_map::node_expr(e) => e.span,
_ => fail!("transmute has non-expr arg"),
};
let pluralize = |n| if 1u == n { "" } else { "s" };
ccx.sess.span_fatal(sp,
fmt!("transmute called on types with \
different sizes: %s (%u bit%s) to \
%s (%u bit%s)",
ty_to_str(ccx.tcx, in_type),
in_type_size,
pluralize(in_type_size),
ty_to_str(ccx.tcx, out_type),
out_type_size,
pluralize(out_type_size)));
}
if !ty::type_is_voidish(out_type) {
let llsrcval = get_param(decl, first_real_arg);
if ty::type_is_immediate(ccx.tcx, in_type) {
match fcx.llretptr {
Some(llretptr) => {
Store(bcx, llsrcval, PointerCast(bcx, llretptr, llintype.ptr_to()));
RetVoid(bcx);
}
None => match (llintype.kind(), llouttype.kind()) {
(Pointer, other) | (other, Pointer) if other != Pointer => {
let tmp = Alloca(bcx, llouttype, "");
Store(bcx, llsrcval, PointerCast(bcx, tmp, llintype.ptr_to()));
Ret(bcx, Load(bcx, tmp));
}
_ => Ret(bcx, BitCast(bcx, llsrcval, llouttype))
}
}
} else if ty::type_is_immediate(ccx.tcx, out_type) {
let llsrcptr = PointerCast(bcx, llsrcval, llouttype.ptr_to());
Ret(bcx, Load(bcx, llsrcptr));
} else {
// NB: Do not use a Load and Store here. This causes massive
// code bloat when `transmute` is used on large structural
// types.
let lldestptr = fcx.llretptr.unwrap();
let lldestptr = PointerCast(bcx, lldestptr, Type::i8p());
let llsrcptr = PointerCast(bcx, llsrcval, Type::i8p());
let llsize = llsize_of(ccx, llintype);
call_memcpy(bcx, lldestptr, llsrcptr, llsize, 1);
RetVoid(bcx);
};
} else {
RetVoid(bcx);
}
}
"needs_drop" => {
let tp_ty = substs.tys[0];
Ret(bcx, C_bool(ty::type_needs_drop(ccx.tcx, tp_ty)));
}
"contains_managed" => {
let tp_ty = substs.tys[0];
Ret(bcx, C_bool(ty::type_contents(ccx.tcx, tp_ty).contains_managed()));
}
"visit_tydesc" => {
let td = get_param(decl, first_real_arg);
let visitor = get_param(decl, first_real_arg + 1u);
let td = PointerCast(bcx, td, ccx.tydesc_type.ptr_to());
glue::call_tydesc_glue_full(bcx, visitor, td,
abi::tydesc_field_visit_glue, None);
RetVoid(bcx);
}
"frame_address" => {
let frameaddress = ccx.intrinsics.get_copy(& &"llvm.frameaddress");
let frameaddress_val = Call(bcx, frameaddress, [C_i32(0i32)]);
let star_u8 = ty::mk_imm_ptr(
bcx.tcx(),
ty::mk_mach_uint(ast::ty_u8));
let fty = ty::mk_closure(bcx.tcx(), ty::ClosureTy {
purity: ast::impure_fn,
sigil: ast::BorrowedSigil,
onceness: ast::Many,
region: ty::re_bound(ty::br_anon(0)),
bounds: ty::EmptyBuiltinBounds(),
sig: FnSig {
bound_lifetime_names: opt_vec::Empty,
inputs: ~[ star_u8 ],
output: ty::mk_nil()
}
});
let datum = Datum {val: get_param(decl, first_real_arg),
mode: ByRef(ZeroMem), ty: fty};
let arg_vals = ~[frameaddress_val];
bcx = trans_call_inner(
bcx, None, fty, ty::mk_nil(),
|bcx| Callee {bcx: bcx, data: Closure(datum)},
ArgVals(arg_vals), Some(Ignore), DontAutorefArg).bcx;
RetVoid(bcx);
}
"morestack_addr" => {
// XXX This is a hack to grab the address of this particular
// native function. There should be a general in-language
// way to do this
let llfty = type_of_rust_fn(bcx.ccx(), [], ty::mk_nil());
let morestack_addr = decl_cdecl_fn(
bcx.ccx().llmod, "__morestack", llfty);
let morestack_addr = PointerCast(bcx, morestack_addr, Type::nil().ptr_to());
Ret(bcx, morestack_addr);
}
"offset" => {
let ptr = get_param(decl, first_real_arg);
let offset = get_param(decl, first_real_arg + 1);
Ret(bcx, GEP(bcx, ptr, [offset]));
}
"offset_inbounds" => {
let ptr = get_param(decl, first_real_arg);
let offset = get_param(decl, first_real_arg + 1);
Ret(bcx, InBoundsGEP(bcx, ptr, [offset]));
}
"memcpy32" => memcpy_intrinsic(bcx, "llvm.memcpy.p0i8.p0i8.i32", substs.tys[0], 32),
"memcpy64" => memcpy_intrinsic(bcx, "llvm.memcpy.p0i8.p0i8.i64", substs.tys[0], 64),
"memmove32" => memcpy_intrinsic(bcx, "llvm.memmove.p0i8.p0i8.i32", substs.tys[0], 32),
"memmove64" => memcpy_intrinsic(bcx, "llvm.memmove.p0i8.p0i8.i64", substs.tys[0], 64),
"memset32" => memset_intrinsic(bcx, "llvm.memset.p0i8.i32", substs.tys[0], 32),
"memset64" => memset_intrinsic(bcx, "llvm.memset.p0i8.i64", substs.tys[0], 64),
"sqrtf32" => simple_llvm_intrinsic(bcx, "llvm.sqrt.f32", 1),
"sqrtf64" => simple_llvm_intrinsic(bcx, "llvm.sqrt.f64", 1),
"powif32" => simple_llvm_intrinsic(bcx, "llvm.powi.f32", 2),
"powif64" => simple_llvm_intrinsic(bcx, "llvm.powi.f64", 2),
"sinf32" => simple_llvm_intrinsic(bcx, "llvm.sin.f32", 1),
"sinf64" => simple_llvm_intrinsic(bcx, "llvm.sin.f64", 1),
"cosf32" => simple_llvm_intrinsic(bcx, "llvm.cos.f32", 1),
"cosf64" => simple_llvm_intrinsic(bcx, "llvm.cos.f64", 1),
"powf32" => simple_llvm_intrinsic(bcx, "llvm.pow.f32", 2),
"powf64" => simple_llvm_intrinsic(bcx, "llvm.pow.f64", 2),
"expf32" => simple_llvm_intrinsic(bcx, "llvm.exp.f32", 1),
"expf64" => simple_llvm_intrinsic(bcx, "llvm.exp.f64", 1),
"exp2f32" => simple_llvm_intrinsic(bcx, "llvm.exp2.f32", 1),
"exp2f64" => simple_llvm_intrinsic(bcx, "llvm.exp2.f64", 1),
"logf32" => simple_llvm_intrinsic(bcx, "llvm.log.f32", 1),
"logf64" => simple_llvm_intrinsic(bcx, "llvm.log.f64", 1),
"log10f32" => simple_llvm_intrinsic(bcx, "llvm.log10.f32", 1),
"log10f64" => simple_llvm_intrinsic(bcx, "llvm.log10.f64", 1),
"log2f32" => simple_llvm_intrinsic(bcx, "llvm.log2.f32", 1),
"log2f64" => simple_llvm_intrinsic(bcx, "llvm.log2.f64", 1),
"fmaf32" => simple_llvm_intrinsic(bcx, "llvm.fma.f32", 3),
"fmaf64" => simple_llvm_intrinsic(bcx, "llvm.fma.f64", 3),
"fabsf32" => simple_llvm_intrinsic(bcx, "llvm.fabs.f32", 1),
"fabsf64" => simple_llvm_intrinsic(bcx, "llvm.fabs.f64", 1),
"floorf32" => simple_llvm_intrinsic(bcx, "llvm.floor.f32", 1),
"floorf64" => simple_llvm_intrinsic(bcx, "llvm.floor.f64", 1),
"ceilf32" => simple_llvm_intrinsic(bcx, "llvm.ceil.f32", 1),
"ceilf64" => simple_llvm_intrinsic(bcx, "llvm.ceil.f64", 1),
"truncf32" => simple_llvm_intrinsic(bcx, "llvm.trunc.f32", 1),
"truncf64" => simple_llvm_intrinsic(bcx, "llvm.trunc.f64", 1),
"ctpop8" => simple_llvm_intrinsic(bcx, "llvm.ctpop.i8", 1),
"ctpop16" => simple_llvm_intrinsic(bcx, "llvm.ctpop.i16", 1),
"ctpop32" => simple_llvm_intrinsic(bcx, "llvm.ctpop.i32", 1),
"ctpop64" => simple_llvm_intrinsic(bcx, "llvm.ctpop.i64", 1),
"ctlz8" => count_zeros_intrinsic(bcx, "llvm.ctlz.i8"),
"ctlz16" => count_zeros_intrinsic(bcx, "llvm.ctlz.i16"),
"ctlz32" => count_zeros_intrinsic(bcx, "llvm.ctlz.i32"),
"ctlz64" => count_zeros_intrinsic(bcx, "llvm.ctlz.i64"),
"cttz8" => count_zeros_intrinsic(bcx, "llvm.cttz.i8"),
"cttz16" => count_zeros_intrinsic(bcx, "llvm.cttz.i16"),
"cttz32" => count_zeros_intrinsic(bcx, "llvm.cttz.i32"),
"cttz64" => count_zeros_intrinsic(bcx, "llvm.cttz.i64"),
"bswap16" => simple_llvm_intrinsic(bcx, "llvm.bswap.i16", 1),
"bswap32" => simple_llvm_intrinsic(bcx, "llvm.bswap.i32", 1),
"bswap64" => simple_llvm_intrinsic(bcx, "llvm.bswap.i64", 1),
"i8_add_with_overflow" => with_overflow_instrinsic(bcx, "llvm.sadd.with.overflow.i8"),
"i16_add_with_overflow" => with_overflow_instrinsic(bcx, "llvm.sadd.with.overflow.i16"),
"i32_add_with_overflow" => with_overflow_instrinsic(bcx, "llvm.sadd.with.overflow.i32"),
"i64_add_with_overflow" => with_overflow_instrinsic(bcx, "llvm.sadd.with.overflow.i64"),
"u8_add_with_overflow" => with_overflow_instrinsic(bcx, "llvm.uadd.with.overflow.i8"),
"u16_add_with_overflow" => with_overflow_instrinsic(bcx, "llvm.uadd.with.overflow.i16"),
"u32_add_with_overflow" => with_overflow_instrinsic(bcx, "llvm.uadd.with.overflow.i32"),
"u64_add_with_overflow" => with_overflow_instrinsic(bcx, "llvm.uadd.with.overflow.i64"),
"i8_sub_with_overflow" => with_overflow_instrinsic(bcx, "llvm.ssub.with.overflow.i8"),
"i16_sub_with_overflow" => with_overflow_instrinsic(bcx, "llvm.ssub.with.overflow.i16"),
"i32_sub_with_overflow" => with_overflow_instrinsic(bcx, "llvm.ssub.with.overflow.i32"),
"i64_sub_with_overflow" => with_overflow_instrinsic(bcx, "llvm.ssub.with.overflow.i64"),
"u8_sub_with_overflow" => with_overflow_instrinsic(bcx, "llvm.usub.with.overflow.i8"),
"u16_sub_with_overflow" => with_overflow_instrinsic(bcx, "llvm.usub.with.overflow.i16"),
"u32_sub_with_overflow" => with_overflow_instrinsic(bcx, "llvm.usub.with.overflow.i32"),
"u64_sub_with_overflow" => with_overflow_instrinsic(bcx, "llvm.usub.with.overflow.i64"),
"i8_mul_with_overflow" => with_overflow_instrinsic(bcx, "llvm.smul.with.overflow.i8"),
"i16_mul_with_overflow" => with_overflow_instrinsic(bcx, "llvm.smul.with.overflow.i16"),
"i32_mul_with_overflow" => with_overflow_instrinsic(bcx, "llvm.smul.with.overflow.i32"),
"i64_mul_with_overflow" => with_overflow_instrinsic(bcx, "llvm.smul.with.overflow.i64"),
"u8_mul_with_overflow" => with_overflow_instrinsic(bcx, "llvm.umul.with.overflow.i8"),
"u16_mul_with_overflow" => with_overflow_instrinsic(bcx, "llvm.umul.with.overflow.i16"),
"u32_mul_with_overflow" => with_overflow_instrinsic(bcx, "llvm.umul.with.overflow.i32"),
"u64_mul_with_overflow" => with_overflow_instrinsic(bcx, "llvm.umul.with.overflow.i64"),
_ => {
// Could we make this an enum rather than a string? does it get
// checked earlier?
ccx.sess.span_bug(item.span, "unknown intrinsic");
}
}
fcx.cleanup();
}

View File

@ -35,6 +35,7 @@ pub mod cabi_x86_64;
pub mod cabi_arm;
pub mod cabi_mips;
pub mod foreign;
pub mod intrinsic;
pub mod reflect;
pub mod debuginfo;
pub mod type_use;

View File

@ -19,12 +19,12 @@ use middle::trans::base::{get_item_val, no_self};
use middle::trans::base;
use middle::trans::common::*;
use middle::trans::datum;
use middle::trans::foreign;
use middle::trans::machine;
use middle::trans::meth;
use middle::trans::type_of::type_of_fn_from_ty;
use middle::trans::type_of;
use middle::trans::type_use;
use middle::trans::intrinsic;
use middle::ty;
use middle::ty::{FnSig};
use middle::typeck;
@ -239,8 +239,8 @@ pub fn monomorphic_fn(ccx: @mut CrateContext,
}
ast_map::node_foreign_item(i, _, _, _) => {
let d = mk_lldecl();
foreign::trans_intrinsic(ccx, d, i, pt, psubsts, i.attrs,
ref_id);
intrinsic::trans_intrinsic(ccx, d, i, pt, psubsts, i.attrs,
ref_id);
d
}
ast_map::node_variant(ref v, enum_item, _) => {

View File

@ -284,7 +284,7 @@ impl Reflector {
sub_path,
"get_disr");
let llfty = type_of_fn(ccx, [opaqueptrty], ty::mk_int());
let llfty = type_of_rust_fn(ccx, [opaqueptrty], ty::mk_int());
let llfdecl = decl_internal_cdecl_fn(ccx.llmod, sym, llfty);
let fcx = new_fn_ctxt(ccx,
~[],

View File

@ -11,6 +11,7 @@
use middle::trans::adt;
use middle::trans::common::*;
use middle::trans::foreign;
use middle::ty;
use util::ppaux;
@ -19,12 +20,16 @@ use middle::trans::type_::Type;
use syntax::ast;
use syntax::opt_vec;
pub fn arg_is_indirect(ccx: &CrateContext, arg_ty: &ty::t) -> bool {
!ty::type_is_immediate(ccx.tcx, *arg_ty)
pub fn arg_is_indirect(ccx: &CrateContext, arg_ty: ty::t) -> bool {
!ty::type_is_immediate(ccx.tcx, arg_ty)
}
pub fn type_of_explicit_arg(ccx: &mut CrateContext, arg_ty: &ty::t) -> Type {
let llty = type_of(ccx, *arg_ty);
pub fn return_uses_outptr(tcx: ty::ctxt, ty: ty::t) -> bool {
!ty::type_is_immediate(tcx, ty)
}
pub fn type_of_explicit_arg(ccx: &mut CrateContext, arg_ty: ty::t) -> Type {
let llty = type_of(ccx, arg_ty);
if arg_is_indirect(ccx, arg_ty) {
llty.ptr_to()
} else {
@ -34,17 +39,19 @@ pub fn type_of_explicit_arg(ccx: &mut CrateContext, arg_ty: &ty::t) -> Type {
pub fn type_of_explicit_args(ccx: &mut CrateContext,
inputs: &[ty::t]) -> ~[Type] {
inputs.map(|arg_ty| type_of_explicit_arg(ccx, arg_ty))
inputs.map(|&arg_ty| type_of_explicit_arg(ccx, arg_ty))
}
pub fn type_of_fn(cx: &mut CrateContext, inputs: &[ty::t], output: ty::t) -> Type {
pub fn type_of_rust_fn(cx: &mut CrateContext,
inputs: &[ty::t],
output: ty::t) -> Type {
let mut atys: ~[Type] = ~[];
// Arg 0: Output pointer.
// (if the output type is non-immediate)
let output_is_immediate = ty::type_is_immediate(cx.tcx, output);
let use_out_pointer = return_uses_outptr(cx.tcx, output);
let lloutputtype = type_of(cx, output);
if !output_is_immediate {
if use_out_pointer {
atys.push(lloutputtype.ptr_to());
}
@ -55,7 +62,7 @@ pub fn type_of_fn(cx: &mut CrateContext, inputs: &[ty::t], output: ty::t) -> Typ
atys.push_all(type_of_explicit_args(cx, inputs));
// Use the output as the actual return value if it's immediate.
if output_is_immediate && !ty::type_is_nil(output) {
if !use_out_pointer && !ty::type_is_voidish(output) {
Type::func(atys, &lloutputtype)
} else {
Type::func(atys, &Type::void())
@ -64,13 +71,21 @@ pub fn type_of_fn(cx: &mut CrateContext, inputs: &[ty::t], output: ty::t) -> Typ
// Given a function type and a count of ty params, construct an llvm type
pub fn type_of_fn_from_ty(cx: &mut CrateContext, fty: ty::t) -> Type {
match ty::get(fty).sty {
ty::ty_closure(ref f) => type_of_fn(cx, f.sig.inputs, f.sig.output),
ty::ty_bare_fn(ref f) => type_of_fn(cx, f.sig.inputs, f.sig.output),
return match ty::get(fty).sty {
ty::ty_closure(ref f) => {
type_of_rust_fn(cx, f.sig.inputs, f.sig.output)
}
ty::ty_bare_fn(ref f) => {
if f.abis.is_rust() || f.abis.is_intrinsic() {
type_of_rust_fn(cx, f.sig.inputs, f.sig.output)
} else {
foreign::lltype_for_foreign_fn(cx, fty)
}
}
_ => {
cx.sess.bug("type_of_fn_from_ty given non-closure, non-bare-fn")
}
}
};
}
// A "sizing type" is an LLVM type, the size and alignment of which are
@ -250,7 +265,9 @@ pub fn type_of(cx: &mut CrateContext, t: ty::t) -> Type {
Type::array(&type_of(cx, mt.ty), n as u64)
}
ty::ty_bare_fn(_) => type_of_fn_from_ty(cx, t).ptr_to(),
ty::ty_bare_fn(_) => {
type_of_fn_from_ty(cx, t).ptr_to()
}
ty::ty_closure(_) => {
let ty = type_of_fn_from_ty(cx, t);
Type::func_pair(cx, &ty)

View File

@ -1545,6 +1545,11 @@ pub fn subst(cx: ctxt,
// Type utilities
pub fn type_is_voidish(ty: t) -> bool {
//! "nil" and "bot" are void types in that they represent 0 bits of information
type_is_nil(ty) || type_is_bot(ty)
}
pub fn type_is_nil(ty: t) -> bool { get(ty).sty == ty_nil }
pub fn type_is_bot(ty: t) -> bool {

View File

@ -68,6 +68,7 @@ pub mod middle {
pub mod reachable;
pub mod graph;
pub mod cfg;
pub mod stack_check;
}
pub mod front {

View File

@ -862,3 +862,15 @@ impl UserString for ty::t {
ty_to_str(tcx, *self)
}
}
impl Repr for AbiSet {
fn repr(&self, _tcx: ctxt) -> ~str {
self.to_str()
}
}
impl UserString for AbiSet {
fn user_string(&self, _tcx: ctxt) -> ~str {
self.to_str()
}
}

View File

@ -8,15 +8,15 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//error-pattern:libc::c_int or libc::c_long should be used
#[forbid(ctypes)];
mod xx {
extern {
pub fn strlen(str: *u8) -> uint;
pub fn foo(x: int, y: uint);
pub fn strlen(str: *u8) -> uint; //~ ERROR found rust type `uint`
pub fn foo(x: int, y: uint); //~ ERROR found rust type `int`
//~^ ERROR found rust type `uint`
}
}
fn main() {
// let it fail to verify warning message
fail!()
}

View File

@ -8,9 +8,6 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-win32 #5745
// xfail-macos Broken on mac i686
struct TwoU16s {
one: u16, two: u16
}

View File

@ -8,9 +8,6 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-win32 #5745
// xfail-macos Broken on mac i686
struct TwoU8s {
one: u8, two: u8
}

View File

@ -8,6 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-test - FIXME(#8538) some kind of problem linking induced by extern "C" fns that I do not understand
// xfail-fast - windows doesn't like this
// Smallest hello world with no runtime