rustc: Make build_wrap_ret compile on Windows and unrevert ABI patch

This reverts commit 625405562c.
This commit is contained in:
Tim Chevalier 2013-01-25 14:56:56 -08:00
parent 198b513fc0
commit 41adf9d8ef
4 changed files with 660 additions and 605 deletions

View File

@ -0,0 +1,215 @@
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use lib::llvm::{llvm, TypeRef, ValueRef, Attribute, Void};
use middle::trans::base::*;
use middle::trans::build::*;
use middle::trans::common::*;
export ABIInfo, LLVMType, FnType;
export llvm_abi_info;
trait ABIInfo {
fn compute_info(&self,
atys: &[TypeRef],
rty: TypeRef,
ret_def: bool) -> FnType;
}
struct LLVMType {
cast: bool,
ty: TypeRef
}
struct FnType {
arg_tys: ~[LLVMType],
ret_ty: LLVMType,
attrs: ~[Option<Attribute>],
sret: bool
}
impl FnType {
fn decl_fn(&self, decl: fn(fnty: TypeRef) -> ValueRef) -> ValueRef {
let atys = vec::map(self.arg_tys, |t| t.ty);
let rty = self.ret_ty.ty;
let fnty = T_fn(atys, rty);
let llfn = decl(fnty);
for vec::eachi(self.attrs) |i, a| {
match *a {
option::Some(attr) => {
unsafe {
let llarg = get_param(llfn, i);
llvm::LLVMAddAttribute(llarg, attr as c_uint);
}
}
_ => ()
}
}
return llfn;
}
fn build_shim_args(&self, bcx: block,
arg_tys: &[TypeRef],
llargbundle: ValueRef) -> ~[ValueRef] {
let mut atys = /*bad*/copy self.arg_tys;
let mut attrs = /*bad*/copy self.attrs;
let mut llargvals = ~[];
let mut i = 0u;
let n = vec::len(arg_tys);
if self.sret {
let llretptr = GEPi(bcx, llargbundle, [0u, n]);
let llretloc = Load(bcx, llretptr);
llargvals = ~[llretloc];
atys = vec::tail(atys);
attrs = vec::tail(attrs);
}
while i < n {
let llargval = if atys[i].cast {
let arg_ptr = GEPi(bcx, llargbundle, [0u, i]);
let arg_ptr = BitCast(bcx, arg_ptr, T_ptr(atys[i].ty));
Load(bcx, arg_ptr)
} else if attrs[i].is_some() {
GEPi(bcx, llargbundle, [0u, i])
} else {
load_inbounds(bcx, llargbundle, [0u, i])
};
llargvals.push(llargval);
i += 1u;
}
return llargvals;
}
fn build_shim_ret(&self, bcx: block,
arg_tys: &[TypeRef], ret_def: bool,
llargbundle: ValueRef, llretval: ValueRef) {
for vec::eachi(self.attrs) |i, a| {
match *a {
Some(attr) => {
unsafe {
llvm::LLVMAddInstrAttribute(
llretval, (i + 1u) as c_uint,
attr as c_uint);
}
}
_ => ()
}
}
if self.sret || !ret_def {
return;
}
let n = vec::len(arg_tys);
// R** llretptr = &args->r;
let llretptr = GEPi(bcx, llargbundle, [0u, n]);
// R* llretloc = *llretptr; /* (args->r) */
let llretloc = Load(bcx, llretptr);
if self.ret_ty.cast {
let tmp_ptr = BitCast(bcx, llretloc, T_ptr(self.ret_ty.ty));
// *args->r = r;
Store(bcx, llretval, tmp_ptr);
} else {
// *args->r = r;
Store(bcx, llretval, llretloc);
};
}
fn build_wrap_args(&self, bcx: block, ret_ty: TypeRef,
llwrapfn: ValueRef, llargbundle: ValueRef) {
let mut atys = /*bad*/copy self.arg_tys;
let mut attrs = /*bad*/copy self.attrs;
let mut j = 0u;
let llretptr = if self.sret {
atys = vec::tail(atys);
attrs = vec::tail(attrs);
j = 1u;
get_param(llwrapfn, 0u)
} else if self.ret_ty.cast {
let retptr = alloca(bcx, self.ret_ty.ty);
BitCast(bcx, retptr, T_ptr(ret_ty))
} else {
alloca(bcx, ret_ty)
};
let mut i = 0u;
let n = vec::len(atys);
while i < n {
let mut argval = get_param(llwrapfn, i + j);
if attrs[i].is_some() {
argval = Load(bcx, argval);
store_inbounds(bcx, argval, llargbundle, [0u, i]);
} else if atys[i].cast {
let argptr = GEPi(bcx, llargbundle, [0u, i]);
let argptr = BitCast(bcx, argptr, T_ptr(atys[i].ty));
Store(bcx, argval, argptr);
} else {
store_inbounds(bcx, argval, llargbundle, [0u, i]);
}
i += 1u;
}
store_inbounds(bcx, llretptr, llargbundle, [0u, n]);
}
fn build_wrap_ret(&self, bcx: block,
arg_tys: &[TypeRef], llargbundle: ValueRef) {
unsafe {
if llvm::LLVMGetTypeKind(self.ret_ty.ty) == Void {
RetVoid(bcx);
return;
}
}
let n = vec::len(arg_tys);
let llretval = load_inbounds(bcx, llargbundle, ~[0u, n]);
let llretval = if self.ret_ty.cast {
let retptr = BitCast(bcx, llretval, T_ptr(self.ret_ty.ty));
Load(bcx, retptr)
} else {
Load(bcx, llretval)
};
Ret(bcx, llretval);
}
}
enum LLVM_ABIInfo { LLVM_ABIInfo }
impl LLVM_ABIInfo: ABIInfo {
fn compute_info(&self,
atys: &[TypeRef],
rty: TypeRef,
_ret_def: bool) -> FnType {
let arg_tys = do atys.map |a| {
LLVMType { cast: false, ty: *a }
};
let ret_ty = LLVMType {
cast: false,
ty: rty
};
let attrs = do atys.map |_| {
option::None
};
let sret = false;
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
attrs: attrs,
sret: sret
};
}
}
fn llvm_abi_info() -> ABIInfo {
return LLVM_ABIInfo as ABIInfo;
}

View File

@ -0,0 +1,417 @@
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// The classification code for the x86_64 ABI is taken from the clay language
// https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
use lib::llvm::{llvm, TypeRef, ValueRef, Integer, Pointer, Float, Double};
use lib::llvm::{Struct, Array, Attribute};
use lib::llvm::{StructRetAttribute, ByValAttribute};
use middle::trans::common::*;
use middle::trans::cabi::*;
export x86_64_abi_info;
enum x86_64_reg_class {
no_class,
integer_class,
sse_fs_class,
sse_fv_class,
sse_ds_class,
sse_dv_class,
sse_int_class,
sseup_class,
x87_class,
x87up_class,
complex_x87_class,
memory_class
}
impl x86_64_reg_class : cmp::Eq {
pure fn eq(&self, other: &x86_64_reg_class) -> bool {
((*self) as uint) == ((*other) as uint)
}
pure fn ne(&self, other: &x86_64_reg_class) -> bool { !(*self).eq(other) }
}
fn is_sse(++c: x86_64_reg_class) -> bool {
return match c {
sse_fs_class | sse_fv_class |
sse_ds_class | sse_dv_class => true,
_ => false
};
}
fn is_ymm(cls: &[x86_64_reg_class]) -> bool {
let len = vec::len(cls);
return (len > 2u &&
is_sse(cls[0]) &&
cls[1] == sseup_class &&
cls[2] == sseup_class) ||
(len > 3u &&
is_sse(cls[1]) &&
cls[2] == sseup_class &&
cls[3] == sseup_class);
}
fn classify_ty(ty: TypeRef) -> ~[x86_64_reg_class] {
fn align(off: uint, ty: TypeRef) -> uint {
let a = ty_align(ty);
return (off + a - 1u) / a * a;
}
fn struct_tys(ty: TypeRef) -> ~[TypeRef] {
unsafe {
let n = llvm::LLVMCountStructElementTypes(ty);
if (n == 0) {
return ~[];
}
let mut elts = vec::from_elem(n as uint, ptr::null());
llvm::LLVMGetStructElementTypes(ty,
ptr::to_mut_unsafe_ptr(&mut elts[0]));
return elts;
}
}
fn ty_align(ty: TypeRef) -> uint {
unsafe {
return match llvm::LLVMGetTypeKind(ty) {
Integer => {
((llvm::LLVMGetIntTypeWidth(ty) as uint) + 7) / 8
}
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
do vec::foldl(1, struct_tys(ty)) |a, t| {
uint::max(a, ty_align(*t))
}
}
Array => {
let elt = llvm::LLVMGetElementType(ty);
ty_align(elt)
}
_ => fail ~"ty_size: unhandled type"
};
}
}
fn ty_size(ty: TypeRef) -> uint {
unsafe {
return match llvm::LLVMGetTypeKind(ty) {
Integer => {
((llvm::LLVMGetIntTypeWidth(ty) as uint) + 7) / 8
}
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
let size = do vec::foldl(0, struct_tys(ty)) |s, t| {
align(s, *t) + ty_size(*t)
};
align(size, ty)
}
Array => {
let len = llvm::LLVMGetArrayLength(ty) as uint;
let elt = llvm::LLVMGetElementType(ty);
let eltsz = ty_size(elt);
len * eltsz
}
_ => fail ~"ty_size: unhandled type"
};
}
}
fn all_mem(cls: &[mut x86_64_reg_class]) {
for uint::range(0, cls.len()) |i| {
cls[i] = memory_class;
}
}
fn unify(cls: &[mut x86_64_reg_class],
i: uint,
newv: x86_64_reg_class) {
if cls[i] == newv {
return;
} else if cls[i] == no_class {
cls[i] = newv;
} else if newv == no_class {
return;
} else if cls[i] == memory_class || newv == memory_class {
cls[i] = memory_class;
} else if cls[i] == integer_class || newv == integer_class {
cls[i] = integer_class;
} else if cls[i] == x87_class ||
cls[i] == x87up_class ||
cls[i] == complex_x87_class ||
newv == x87_class ||
newv == x87up_class ||
newv == complex_x87_class {
cls[i] = memory_class;
} else {
cls[i] = newv;
}
}
fn classify_struct(tys: &[TypeRef],
cls: &[mut x86_64_reg_class], i: uint,
off: uint) {
let mut field_off = off;
for vec::each(tys) |ty| {
field_off = align(field_off, *ty);
classify(*ty, cls, i, field_off);
field_off += ty_size(*ty);
}
}
fn classify(ty: TypeRef,
cls: &[mut x86_64_reg_class], ix: uint,
off: uint) {
unsafe {
let t_align = ty_align(ty);
let t_size = ty_size(ty);
let misalign = off % t_align;
if misalign != 0u {
let mut i = off / 8u;
let e = (off + t_size + 7u) / 8u;
while i < e {
unify(cls, ix + i, memory_class);
i += 1u;
}
return;
}
match llvm::LLVMGetTypeKind(ty) as int {
8 /* integer */ |
12 /* pointer */ => {
unify(cls, ix + off / 8u, integer_class);
}
2 /* float */ => {
if off % 8u == 4u {
unify(cls, ix + off / 8u, sse_fv_class);
} else {
unify(cls, ix + off / 8u, sse_fs_class);
}
}
3 /* double */ => {
unify(cls, ix + off / 8u, sse_ds_class);
}
10 /* struct */ => {
classify_struct(struct_tys(ty), cls, ix, off);
}
11 /* array */ => {
let elt = llvm::LLVMGetElementType(ty);
let eltsz = ty_size(elt);
let len = llvm::LLVMGetArrayLength(ty) as uint;
let mut i = 0u;
while i < len {
classify(elt, cls, ix, off + i * eltsz);
i += 1u;
}
}
_ => fail ~"classify: unhandled type"
}
}
}
fn fixup(ty: TypeRef, cls: &[mut x86_64_reg_class]) {
unsafe {
let mut i = 0u;
let llty = llvm::LLVMGetTypeKind(ty) as int;
let e = vec::len(cls);
if vec::len(cls) > 2u &&
(llty == 10 /* struct */ ||
llty == 11 /* array */) {
if is_sse(cls[i]) {
i += 1u;
while i < e {
if cls[i] != sseup_class {
all_mem(cls);
return;
}
i += 1u;
}
} else {
all_mem(cls);
return
}
} else {
while i < e {
if cls[i] == memory_class {
all_mem(cls);
return;
}
if cls[i] == x87up_class {
// for darwin
// cls[i] = sse_ds_class;
all_mem(cls);
return;
}
if cls[i] == sseup_class {
cls[i] = sse_int_class;
} else if is_sse(cls[i]) {
i += 1;
while cls[i] == sseup_class { i += 1u; }
} else if cls[i] == x87_class {
i += 1;
while cls[i] == x87up_class { i += 1u; }
} else {
i += 1;
}
}
}
}
}
let words = (ty_size(ty) + 7) / 8;
let cls = vec::cast_to_mut(vec::from_elem(words, no_class));
if words > 4 {
all_mem(cls);
return vec::cast_from_mut(move cls);
}
classify(ty, cls, 0, 0);
fixup(ty, cls);
return vec::cast_from_mut(move cls);
}
fn llreg_ty(cls: &[x86_64_reg_class]) -> TypeRef {
fn llvec_len(cls: &[x86_64_reg_class]) -> uint {
let mut len = 1u;
for vec::each(cls) |c| {
if *c != sseup_class {
break;
}
len += 1u;
}
return len;
}
unsafe {
let mut tys = ~[];
let mut i = 0u;
let e = vec::len(cls);
while i < e {
match cls[i] {
integer_class => {
tys.push(T_i64());
}
sse_fv_class => {
let vec_len = llvec_len(vec::tailn(cls, i + 1u)) * 2u;
let vec_ty = llvm::LLVMVectorType(T_f32(),
vec_len as c_uint);
tys.push(vec_ty);
i += vec_len;
loop;
}
sse_fs_class => {
tys.push(T_f32());
}
sse_ds_class => {
tys.push(T_f64());
}
_ => fail ~"llregtype: unhandled class"
}
i += 1u;
}
return T_struct(tys);
}
}
fn x86_64_tys(atys: &[TypeRef],
rty: TypeRef,
ret_def: bool) -> FnType {
fn is_reg_ty(ty: TypeRef) -> bool {
unsafe {
return match llvm::LLVMGetTypeKind(ty) as int {
8 /* integer */ |
12 /* pointer */ |
2 /* float */ |
3 /* double */ => true,
_ => false
};
}
}
fn is_pass_byval(cls: &[x86_64_reg_class]) -> bool {
return cls.len() > 0 &&
(cls[0] == memory_class ||
cls[0] == x87_class ||
cls[0] == complex_x87_class);
}
fn is_ret_bysret(cls: &[x86_64_reg_class]) -> bool {
return cls.len() > 0 && cls[0] == memory_class;
}
fn x86_64_ty(ty: TypeRef,
is_mem_cls: fn(cls: &[x86_64_reg_class]) -> bool,
attr: Attribute) -> (LLVMType, Option<Attribute>) {
let mut cast = false;
let mut ty_attr = option::None;
let mut llty = ty;
if !is_reg_ty(ty) {
let cls = classify_ty(ty);
if is_mem_cls(cls) {
llty = T_ptr(ty);
ty_attr = option::Some(attr);
} else {
cast = true;
llty = llreg_ty(cls);
}
}
return (LLVMType { cast: cast, ty: llty }, ty_attr);
}
let mut arg_tys = ~[];
let mut attrs = ~[];
for vec::each(atys) |t| {
let (ty, attr) = x86_64_ty(*t, is_pass_byval, ByValAttribute);
arg_tys.push(ty);
attrs.push(attr);
}
let mut (ret_ty, ret_attr) = x86_64_ty(rty, is_ret_bysret,
StructRetAttribute);
let sret = ret_attr.is_some();
if sret {
arg_tys = vec::append(~[ret_ty], arg_tys);
ret_ty = LLVMType {
cast: false,
ty: T_void()
};
attrs = vec::append(~[ret_attr], attrs);
} else if !ret_def {
ret_ty = LLVMType {
cast: false,
ty: T_void()
};
}
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
attrs: attrs,
sret: sret
};
}
enum X86_64_ABIInfo { X86_64_ABIInfo }
impl X86_64_ABIInfo: ABIInfo {
fn compute_info(&self,
atys: &[TypeRef],
rty: TypeRef,
ret_def: bool) -> FnType {
return x86_64_tys(atys, rty, ret_def);
}
}
fn x86_64_abi_info() -> ABIInfo {
return X86_64_ABIInfo as ABIInfo;
}

View File

@ -8,9 +8,6 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// The classification code for the x86_64 ABI is taken from the clay language
// https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
use core::prelude::*;
use back::{link, abi};
@ -22,6 +19,8 @@
use lib::llvm::{llvm, TypeRef, ValueRef, Integer, Pointer, Float, Double};
use lib;
use middle::trans::base::*;
use middle::trans::cabi;
use middle::trans::cabi_x86_64::*;
use middle::trans::build::*;
use middle::trans::callee::*;
use middle::trans::common::*;
@ -44,417 +43,11 @@
export link_name, trans_foreign_mod, register_foreign_fn, trans_foreign_fn,
trans_intrinsic;
enum x86_64_reg_class {
no_class,
integer_class,
sse_fs_class,
sse_fv_class,
sse_ds_class,
sse_dv_class,
sse_int_class,
sseup_class,
x87_class,
x87up_class,
complex_x87_class,
memory_class
}
impl x86_64_reg_class : cmp::Eq {
pure fn eq(&self, other: &x86_64_reg_class) -> bool {
((*self) as uint) == ((*other) as uint)
fn abi_info(arch: session::arch) -> cabi::ABIInfo {
return match arch {
arch_x86_64 => x86_64_abi_info(),
_ => cabi::llvm_abi_info()
}
pure fn ne(&self, other: &x86_64_reg_class) -> bool { !(*self).eq(other) }
}
fn is_sse(++c: x86_64_reg_class) -> bool {
return match c {
sse_fs_class | sse_fv_class |
sse_ds_class | sse_dv_class => true,
_ => false
};
}
fn is_ymm(cls: &[x86_64_reg_class]) -> bool {
let len = vec::len(cls);
return (len > 2u &&
is_sse(cls[0]) &&
cls[1] == sseup_class &&
cls[2] == sseup_class) ||
(len > 3u &&
is_sse(cls[1]) &&
cls[2] == sseup_class &&
cls[3] == sseup_class);
}
fn classify_ty(ty: TypeRef) -> ~[x86_64_reg_class] {
fn align(off: uint, ty: TypeRef) -> uint {
let a = ty_align(ty);
return (off + a - 1u) / a * a;
}
fn struct_tys(ty: TypeRef) -> ~[TypeRef] {
unsafe {
let n = llvm::LLVMCountStructElementTypes(ty);
if (n == 0) {
return ~[];
}
let mut elts = vec::from_elem(n as uint, ptr::null());
llvm::LLVMGetStructElementTypes(ty,
ptr::to_mut_unsafe_ptr(&mut elts[0]));
return elts;
}
}
fn ty_align(ty: TypeRef) -> uint {
unsafe {
return match llvm::LLVMGetTypeKind(ty) {
Integer => {
((llvm::LLVMGetIntTypeWidth(ty) as uint) + 7) / 8
}
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
do vec::foldl(1, struct_tys(ty)) |a, t| {
uint::max(a, ty_align(*t))
}
}
Array => {
let elt = llvm::LLVMGetElementType(ty);
ty_align(elt)
}
_ => fail ~"ty_size: unhandled type"
};
}
}
fn ty_size(ty: TypeRef) -> uint {
unsafe {
return match llvm::LLVMGetTypeKind(ty) {
Integer => {
((llvm::LLVMGetIntTypeWidth(ty) as uint) + 7) / 8
}
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
let size = do vec::foldl(0, struct_tys(ty)) |s, t| {
align(s, *t) + ty_size(*t)
};
align(size, ty)
}
Array => {
let len = llvm::LLVMGetArrayLength(ty) as uint;
let elt = llvm::LLVMGetElementType(ty);
let eltsz = ty_size(elt);
len * eltsz
}
_ => fail ~"ty_size: unhandled type"
};
}
}
fn all_mem(cls: &[mut x86_64_reg_class]) {
for uint::range(0, cls.len()) |i| {
cls[i] = memory_class;
}
}
fn unify(cls: &[mut x86_64_reg_class],
i: uint,
newv: x86_64_reg_class) {
if cls[i] == newv {
return;
} else if cls[i] == no_class {
cls[i] = newv;
} else if newv == no_class {
return;
} else if cls[i] == memory_class || newv == memory_class {
cls[i] = memory_class;
} else if cls[i] == integer_class || newv == integer_class {
cls[i] = integer_class;
} else if cls[i] == x87_class ||
cls[i] == x87up_class ||
cls[i] == complex_x87_class ||
newv == x87_class ||
newv == x87up_class ||
newv == complex_x87_class {
cls[i] = memory_class;
} else {
cls[i] = newv;
}
}
fn classify_struct(tys: &[TypeRef],
cls: &[mut x86_64_reg_class], i: uint,
off: uint) {
let mut field_off = off;
for vec::each(tys) |ty| {
field_off = align(field_off, *ty);
classify(*ty, cls, i, field_off);
field_off += ty_size(*ty);
}
}
fn classify(ty: TypeRef,
cls: &[mut x86_64_reg_class], ix: uint,
off: uint) {
unsafe {
let t_align = ty_align(ty);
let t_size = ty_size(ty);
let misalign = off % t_align;
if misalign != 0u {
let mut i = off / 8u;
let e = (off + t_size + 7u) / 8u;
while i < e {
unify(cls, ix + i, memory_class);
i += 1u;
}
return;
}
match llvm::LLVMGetTypeKind(ty) as int {
8 /* integer */ |
12 /* pointer */ => {
unify(cls, ix + off / 8u, integer_class);
}
2 /* float */ => {
if off % 8u == 4u {
unify(cls, ix + off / 8u, sse_fv_class);
} else {
unify(cls, ix + off / 8u, sse_fs_class);
}
}
3 /* double */ => {
unify(cls, ix + off / 8u, sse_ds_class);
}
10 /* struct */ => {
classify_struct(struct_tys(ty), cls, ix, off);
}
11 /* array */ => {
let elt = llvm::LLVMGetElementType(ty);
let eltsz = ty_size(elt);
let len = llvm::LLVMGetArrayLength(ty) as uint;
let mut i = 0u;
while i < len {
classify(elt, cls, ix, off + i * eltsz);
i += 1u;
}
}
_ => fail ~"classify: unhandled type"
}
}
}
fn fixup(ty: TypeRef, cls: &[mut x86_64_reg_class]) {
unsafe {
let mut i = 0u;
let llty = llvm::LLVMGetTypeKind(ty) as int;
let e = vec::len(cls);
if vec::len(cls) > 2u &&
(llty == 10 /* struct */ ||
llty == 11 /* array */) {
if is_sse(cls[i]) {
i += 1u;
while i < e {
if cls[i] != sseup_class {
all_mem(cls);
return;
}
i += 1u;
}
} else {
all_mem(cls);
return
}
} else {
while i < e {
if cls[i] == memory_class {
all_mem(cls);
return;
}
if cls[i] == x87up_class {
// for darwin
// cls[i] = sse_ds_class;
all_mem(cls);
return;
}
if cls[i] == sseup_class {
cls[i] = sse_int_class;
} else if is_sse(cls[i]) {
i += 1;
while cls[i] == sseup_class { i += 1u; }
} else if cls[i] == x87_class {
i += 1;
while cls[i] == x87up_class { i += 1u; }
} else {
i += 1;
}
}
}
}
}
let words = (ty_size(ty) + 7) / 8;
let cls = vec::cast_to_mut(vec::from_elem(words, no_class));
if words > 4 {
all_mem(cls);
return vec::cast_from_mut(move cls);
}
classify(ty, cls, 0, 0);
fixup(ty, cls);
return vec::cast_from_mut(move cls);
}
fn llreg_ty(cls: &[x86_64_reg_class]) -> TypeRef {
fn llvec_len(cls: &[x86_64_reg_class]) -> uint {
let mut len = 1u;
for vec::each(cls) |c| {
if *c != sseup_class {
break;
}
len += 1u;
}
return len;
}
unsafe {
let mut tys = ~[];
let mut i = 0u;
let e = vec::len(cls);
while i < e {
match cls[i] {
integer_class => {
tys.push(T_i64());
}
sse_fv_class => {
let vec_len = llvec_len(vec::tailn(cls, i + 1u)) * 2u;
let vec_ty = llvm::LLVMVectorType(T_f32(),
vec_len as c_uint);
tys.push(vec_ty);
i += vec_len;
loop;
}
sse_fs_class => {
tys.push(T_f32());
}
sse_ds_class => {
tys.push(T_f64());
}
_ => fail ~"llregtype: unhandled class"
}
i += 1u;
}
return T_struct(tys);
}
}
type x86_64_llty = {
cast: bool,
ty: TypeRef
};
type x86_64_tys = {
arg_tys: ~[x86_64_llty],
ret_ty: x86_64_llty,
attrs: ~[Option<Attribute>],
sret: bool
};
fn x86_64_tys(atys: &[TypeRef],
rty: TypeRef,
ret_def: bool) -> x86_64_tys {
fn is_reg_ty(ty: TypeRef) -> bool {
unsafe {
return match llvm::LLVMGetTypeKind(ty) as int {
8 /* integer */ |
12 /* pointer */ |
2 /* float */ |
3 /* double */ => true,
_ => false
};
}
}
fn is_pass_byval(cls: &[x86_64_reg_class]) -> bool {
return cls.len() > 0 &&
(cls[0] == memory_class ||
cls[0] == x87_class ||
cls[0] == complex_x87_class);
}
fn is_ret_bysret(cls: &[x86_64_reg_class]) -> bool {
return cls.len() > 0 && cls[0] == memory_class;
}
fn x86_64_ty(ty: TypeRef,
is_mem_cls: fn(cls: &[x86_64_reg_class]) -> bool,
attr: Attribute) -> (x86_64_llty, Option<Attribute>) {
let mut cast = false;
let mut ty_attr = option::None;
let mut llty = ty;
if !is_reg_ty(ty) {
let cls = classify_ty(ty);
if is_mem_cls(cls) {
llty = T_ptr(ty);
ty_attr = option::Some(attr);
} else {
cast = true;
llty = llreg_ty(cls);
}
}
return ({ cast: cast, ty: llty }, ty_attr);
}
let mut arg_tys = ~[];
let mut attrs = ~[];
for vec::each(atys) |t| {
let (ty, attr) = x86_64_ty(*t, is_pass_byval, ByValAttribute);
arg_tys.push(ty);
attrs.push(attr);
}
let mut (ret_ty, ret_attr) = x86_64_ty(rty, is_ret_bysret,
StructRetAttribute);
let sret = ret_attr.is_some();
if sret {
arg_tys = vec::append(~[ret_ty], arg_tys);
ret_ty = { cast: false,
ty: T_void()
};
attrs = vec::append(~[ret_attr], attrs);
} else if !ret_def {
ret_ty = { cast: false,
ty: T_void()
};
}
return {
arg_tys: arg_tys,
ret_ty: ret_ty,
attrs: attrs,
sret: sret
};
}
fn decl_x86_64_fn(tys: x86_64_tys,
decl: fn(fnty: TypeRef) -> ValueRef) -> ValueRef {
let atys = vec::map(tys.arg_tys, |t| t.ty);
let rty = tys.ret_ty.ty;
let fnty = T_fn(atys, rty);
let llfn = decl(fnty);
for vec::eachi(tys.attrs) |i, a| {
match *a {
option::Some(attr) => {
unsafe {
let llarg = get_param(llfn, i);
llvm::LLVMAddAttribute(llarg, attr as c_uint);
}
}
_ => ()
}
}
return llfn;
}
fn link_name(ccx: @crate_ctxt, i: @ast::foreign_item) -> ~str {
@ -470,7 +63,7 @@ fn link_name(ccx: @crate_ctxt, i: @ast::foreign_item) -> ~str {
ret_def: bool,
bundle_ty: TypeRef,
shim_fn_ty: TypeRef,
x86_64_tys: Option<x86_64_tys>
fn_ty: cabi::FnType
};
fn c_arg_and_ret_lltys(ccx: @crate_ctxt,
@ -493,20 +86,15 @@ fn c_stack_tys(ccx: @crate_ctxt,
// XXX: Bad copy.
let bundle_ty = T_struct(vec::append_one(copy llargtys, T_ptr(llretty)));
let ret_def = !ty::type_is_bot(ret_ty) && !ty::type_is_nil(ret_ty);
let x86_64 = if ccx.sess.targ_cfg.arch == arch_x86_64 {
option::Some(x86_64_tys(llargtys, llretty, ret_def))
} else if ccx.sess.targ_cfg.arch == arch_arm {
option::Some(x86_64_tys(llargtys, llretty, ret_def))
} else {
option::None
};
let fn_ty = abi_info(ccx.sess.targ_cfg.arch).
compute_info(llargtys, llretty, ret_def);
return @{
arg_tys: llargtys,
ret_ty: llretty,
ret_def: ret_def,
bundle_ty: bundle_ty,
shim_fn_ty: T_fn(~[T_ptr(bundle_ty)], T_void()),
x86_64_tys: x86_64
fn_ty: fn_ty
};
}
@ -633,92 +221,14 @@ fn build_shim_fn(ccx: @crate_ctxt,
fn build_args(bcx: block, tys: @c_stack_tys,
llargbundle: ValueRef) -> ~[ValueRef] {
let _icx = bcx.insn_ctxt("foreign::shim::build_args");
let mut llargvals = ~[];
let mut i = 0u;
let n = vec::len(tys.arg_tys);
match tys.x86_64_tys {
Some(ref x86_64) => {
let mut atys = /*bad*/copy (*x86_64).arg_tys;
let mut attrs = /*bad*/copy (*x86_64).attrs;
if (*x86_64).sret {
let llretptr = GEPi(bcx, llargbundle, [0u, n]);
let llretloc = Load(bcx, llretptr);
llargvals = ~[llretloc];
atys = vec::tail(atys);
attrs = vec::tail(attrs);
}
while i < n {
let llargval = if atys[i].cast {
let arg_ptr = GEPi(bcx, llargbundle, [0u, i]);
let arg_ptr = BitCast(bcx, arg_ptr,
T_ptr(atys[i].ty));
Load(bcx, arg_ptr)
} else if attrs[i].is_some() {
GEPi(bcx, llargbundle, [0u, i])
} else {
load_inbounds(bcx, llargbundle, [0u, i])
};
llargvals.push(llargval);
i += 1u;
}
}
_ => {
while i < n {
let llargval = load_inbounds(bcx, llargbundle,
[0u, i]);
llargvals.push(llargval);
i += 1u;
}
}
}
return llargvals;
return tys.fn_ty.build_shim_args(bcx, tys.arg_tys, llargbundle);
}
fn build_ret(bcx: block, tys: @c_stack_tys,
llargbundle: ValueRef, llretval: ValueRef) {
let _icx = bcx.insn_ctxt("foreign::shim::build_ret");
match tys.x86_64_tys {
Some(ref x86_64) => {
for vec::eachi((*x86_64).attrs) |i, a| {
match *a {
Some(attr) => {
unsafe {
llvm::LLVMAddInstrAttribute(
llretval, (i + 1u) as c_uint,
attr as c_uint);
}
}
_ => ()
}
}
if (*x86_64).sret || !tys.ret_def {
return;
}
let n = vec::len(tys.arg_tys);
let llretptr = GEPi(bcx, llargbundle, [0u, n]);
let llretloc = Load(bcx, llretptr);
if (*x86_64).ret_ty.cast {
let tmp_ptr = BitCast(bcx,
llretloc,
T_ptr((*x86_64).ret_ty.ty));
Store(bcx, llretval, tmp_ptr);
} else {
Store(bcx, llretval, llretloc);
};
}
_ => {
if tys.ret_def {
let n = vec::len(tys.arg_tys);
// R** llretptr = &args->r;
let llretptr = GEPi(bcx, llargbundle, [0u, n]);
// R* llretloc = *llretptr; /* (args->r) */
let llretloc = Load(bcx, llretptr);
// *args->r = r;
Store(bcx, llretval, llretloc);
}
}
}
tys.fn_ty.build_shim_ret(bcx, tys.arg_tys, tys.ret_def,
llargbundle, llretval);
}
let lname = link_name(ccx, foreign_item);
@ -732,16 +242,8 @@ fn build_ret(bcx: block, tys: @c_stack_tys,
fn base_fn(ccx: @crate_ctxt, +lname: ~str, tys: @c_stack_tys,
cc: lib::llvm::CallConv) -> ValueRef {
// Declare the "prototype" for the base function F:
match tys.x86_64_tys {
Some(ref x86_64) => {
do decl_x86_64_fn((*x86_64)) |fnty| {
decl_fn(ccx.llmod, /*bad*/copy lname, cc, fnty)
}
}
_ => {
let llbasefnty = T_fn(/*bad*/copy tys.arg_tys, tys.ret_ty);
decl_fn(ccx.llmod, lname, cc, llbasefnty)
}
do tys.fn_ty.decl_fn |fnty| {
decl_fn(ccx.llmod, /*bad*/copy lname, cc, fnty)
}
}
@ -1383,84 +885,14 @@ fn build_wrap_fn(ccx: @crate_ctxt, llshimfn: ValueRef,
fn build_args(bcx: block, tys: @c_stack_tys,
llwrapfn: ValueRef, llargbundle: ValueRef) {
let _icx = bcx.insn_ctxt("foreign::foreign::wrap::build_args");
match tys.x86_64_tys {
option::Some(ref x86_64) => {
let mut atys = /*bad*/copy (*x86_64).arg_tys;
let mut attrs = /*bad*/copy (*x86_64).attrs;
let mut j = 0u;
let llretptr = if (*x86_64).sret {
atys = vec::tail(atys);
attrs = vec::tail(attrs);
j = 1u;
get_param(llwrapfn, 0u)
} else if (*x86_64).ret_ty.cast {
let retptr = alloca(bcx, (*x86_64).ret_ty.ty);
BitCast(bcx, retptr, T_ptr(tys.ret_ty))
} else {
alloca(bcx, tys.ret_ty)
};
let mut i = 0u;
let n = vec::len(atys);
while i < n {
let mut argval = get_param(llwrapfn, i + j);
if attrs[i].is_some() {
argval = Load(bcx, argval);
store_inbounds(bcx, argval, llargbundle,
[0u, i]);
} else if atys[i].cast {
let argptr = GEPi(bcx, llargbundle, [0u, i]);
let argptr = BitCast(bcx, argptr,
T_ptr(atys[i].ty));
Store(bcx, argval, argptr);
} else {
store_inbounds(bcx, argval, llargbundle,
[0u, i]);
}
i += 1u;
}
store_inbounds(bcx, llretptr, llargbundle, [0u, n]);
}
_ => {
let llretptr = alloca(bcx, tys.ret_ty);
let n = vec::len(tys.arg_tys);
for uint::range(0u, n) |i| {
let llargval = get_param(llwrapfn, i);
store_inbounds(bcx, llargval, llargbundle,
[0u, i]);
};
store_inbounds(bcx, llretptr, llargbundle, [0u, n]);
}
}
tys.fn_ty.build_wrap_args(bcx, tys.ret_ty,
llwrapfn, llargbundle);
}
fn build_ret(bcx: block, tys: @c_stack_tys,
llargbundle: ValueRef) {
let _icx = bcx.insn_ctxt("foreign::foreign::wrap::build_ret");
match tys.x86_64_tys {
option::Some(ref x86_64) => {
if (*x86_64).sret || !tys.ret_def {
RetVoid(bcx);
return;
}
let n = vec::len(tys.arg_tys);
let llretval = load_inbounds(bcx, llargbundle, ~[0u, n]);
let llretval = if (*x86_64).ret_ty.cast {
let retptr = BitCast(bcx, llretval,
T_ptr((*x86_64).ret_ty.ty));
Load(bcx, retptr)
} else {
Load(bcx, llretval)
};
Ret(bcx, llretval);
}
_ => {
let n = vec::len(tys.arg_tys);
let llretval = load_inbounds(bcx, llargbundle, ~[0u, n]);
let llretval = Load(bcx, llretval);
Ret(bcx, llretval);
}
}
tys.fn_ty.build_wrap_ret(bcx, tys.arg_tys, llargbundle);
}
build_wrap_fn_(ccx, tys, llshimfn, llwrapfn,
@ -1487,25 +919,12 @@ fn register_foreign_fn(ccx: @crate_ctxt,
let _icx = ccx.insn_ctxt("foreign::register_foreign_fn");
let t = ty::node_id_to_type(ccx.tcx, node_id);
let (llargtys, llretty, ret_ty) = c_arg_and_ret_lltys(ccx, node_id);
return if ccx.sess.targ_cfg.arch == arch_x86_64 {
let ret_def = !ty::type_is_bot(ret_ty) && !ty::type_is_nil(ret_ty);
let x86_64 = x86_64_tys(llargtys, llretty, ret_def);
do decl_x86_64_fn(x86_64) |fnty| {
register_fn_fuller(ccx, sp, /*bad*/copy path, node_id, attrs,
t, lib::llvm::CCallConv, fnty)
}
} else if ccx.sess.targ_cfg.arch == arch_arm {
let ret_def = !ty::type_is_bot(ret_ty) && !ty::type_is_nil(ret_ty);
let x86_64 = x86_64_tys(llargtys, llretty, ret_def);
do decl_x86_64_fn(x86_64) |fnty| {
register_fn_fuller(ccx, sp, /*bad*/copy path, node_id, attrs,
t, lib::llvm::CCallConv, fnty)
}
} else {
let llfty = T_fn(llargtys, llretty);
register_fn_fuller(ccx, sp, path, node_id, attrs,
t, lib::llvm::CCallConv, llfty)
let ret_def = !ty::type_is_bot(ret_ty) && !ty::type_is_nil(ret_ty);
let fn_ty = abi_info(ccx.sess.targ_cfg.arch).
compute_info(llargtys, llretty, ret_def);
do fn_ty.decl_fn |fnty| {
register_fn_fuller(ccx, sp, /*bad*/copy path, node_id, attrs,
t, lib::llvm::CCallConv, fnty)
}
}

View File

@ -86,6 +86,10 @@ mod middle {
#[legacy_exports]
mod meth;
#[legacy_exports]
mod cabi;
#[legacy_exports]
mod cabi_x86_64;
#[legacy_exports]
mod foreign;
#[legacy_exports]
mod reflect;