rust/src/comp/middle/trans.rs

7805 lines
264 KiB
Rust

import std.Int;
import std.Str;
import std.UInt;
import std.Vec;
import std.Str.rustrt.sbuf;
import std.Vec.rustrt.vbuf;
import std.Map;
import std.Map.hashmap;
import std.Option;
import std.Option.some;
import std.Option.none;
import front.ast;
import front.creader;
import driver.session;
import middle.ty;
import back.Link;
import back.x86;
import back.abi;
import middle.ty.pat_ty;
import util.common;
import util.common.istr;
import util.common.new_def_hash;
import util.common.new_str_hash;
import lib.llvm.llvm;
import lib.llvm.builder;
import lib.llvm.target_data;
import lib.llvm.type_handle;
import lib.llvm.type_names;
import lib.llvm.mk_target_data;
import lib.llvm.mk_type_handle;
import lib.llvm.mk_type_names;
import lib.llvm.llvm.ModuleRef;
import lib.llvm.llvm.ValueRef;
import lib.llvm.llvm.TypeRef;
import lib.llvm.llvm.TypeHandleRef;
import lib.llvm.llvm.BuilderRef;
import lib.llvm.llvm.BasicBlockRef;
import lib.llvm.False;
import lib.llvm.True;
import lib.llvm.Bool;
state obj namegen(mutable int i) {
fn next(str prefix) -> str {
i += 1;
ret prefix + istr(i);
}
}
type glue_fns = rec(ValueRef activate_glue,
ValueRef yield_glue,
ValueRef exit_task_glue,
vec[ValueRef] native_glues_rust,
vec[ValueRef] native_glues_pure_rust,
vec[ValueRef] native_glues_cdecl,
ValueRef no_op_type_glue,
ValueRef memcpy_glue,
ValueRef bzero_glue,
ValueRef vec_append_glue);
type tydesc_info = rec(ValueRef tydesc,
ValueRef take_glue,
ValueRef drop_glue,
ValueRef cmp_glue);
/*
* A note on nomenclature of linking: "upcall", "extern" and "native".
*
* An "extern" is an LLVM symbol we wind up emitting an undefined external
* reference to. This means "we don't have the thing in this compilation unit,
* please make sure you link it in at runtime". This could be a reference to
* C code found in a C library, or rust code found in a rust crate.
*
* A "native" is a combination of an extern that references C code, plus a
* glue-code stub that "looks like" a rust function, emitted here, plus a
* generic N-ary bit of asm glue (found over in back/x86.rs) that performs a
* control transfer into C from rust. Natives may be normal C library code.
*
* An upcall is a native call generated by the compiler (not corresponding to
* any user-written call in the code) into librustrt, to perform some helper
* task such as bringing a task to life, allocating memory, etc.
*
*/
state type crate_ctxt = rec(session.session sess,
ModuleRef llmod,
target_data td,
type_names tn,
ValueRef crate_ptr,
hashmap[str, ValueRef] externs,
hashmap[str, ValueRef] intrinsics,
hashmap[ast.def_id, ValueRef] item_ids,
hashmap[ast.def_id, @ast.item] items,
hashmap[ast.def_id,
@ast.native_item] native_items,
ty.type_cache type_cache,
hashmap[ast.def_id, str] item_symbols,
// TODO: hashmap[tup(tag_id,subtys), @tag_info]
hashmap[ty.t, uint] tag_sizes,
hashmap[ast.def_id, ValueRef] discrims,
hashmap[ast.def_id, str] discrim_symbols,
hashmap[ast.def_id, ValueRef] fn_pairs,
hashmap[ast.def_id, ValueRef] consts,
hashmap[ast.def_id,()] obj_methods,
hashmap[ty.t, @tydesc_info] tydescs,
hashmap[str, ValueRef] module_data,
hashmap[ty.t, TypeRef] lltypes,
@glue_fns glues,
namegen names,
std.SHA1.sha1 sha,
hashmap[ty.t, str] type_sha1s,
hashmap[ty.t, metadata.ty_abbrev] type_abbrevs,
ty.ctxt tcx);
type local_ctxt = rec(vec[str] path,
vec[str] module_path,
vec[ast.ty_param] obj_typarams,
vec[ast.obj_field] obj_fields,
@crate_ctxt ccx);
type self_vt = rec(ValueRef v, ty.t t);
state type fn_ctxt = rec(ValueRef llfn,
ValueRef lltaskptr,
ValueRef llenv,
ValueRef llretptr,
mutable BasicBlockRef llallocas,
mutable Option.t[self_vt] llself,
mutable Option.t[ValueRef] lliterbody,
hashmap[ast.def_id, ValueRef] llargs,
hashmap[ast.def_id, ValueRef] llobjfields,
hashmap[ast.def_id, ValueRef] lllocals,
hashmap[ast.def_id, ValueRef] llupvars,
mutable vec[ValueRef] lltydescs,
@local_ctxt lcx);
tag cleanup {
clean(fn(@block_ctxt cx) -> result);
}
tag block_kind {
SCOPE_BLOCK;
LOOP_SCOPE_BLOCK(Option.t[@block_ctxt], @block_ctxt);
NON_SCOPE_BLOCK;
}
state type block_ctxt = rec(BasicBlockRef llbb,
builder build,
block_parent parent,
block_kind kind,
mutable vec[cleanup] cleanups,
@fn_ctxt fcx);
// FIXME: we should be able to use Option.t[@block_parent] here but
// the infinite-tag check in rustboot gets upset.
tag block_parent {
parent_none;
parent_some(@block_ctxt);
}
state type result = rec(mutable @block_ctxt bcx,
mutable ValueRef val);
fn sep() -> str {
ret "_";
}
fn extend_path(@local_ctxt cx, str name) -> @local_ctxt {
ret @rec(path = cx.path + vec(name) with *cx);
}
fn path_name(vec[str] path) -> str {
ret Str.connect(path, sep());
}
fn get_type_sha1(@crate_ctxt ccx, ty.t t) -> str {
auto hash = "";
alt (ccx.type_sha1s.find(t)) {
case (some[str](?h)) { hash = h; }
case (none[str]) {
ccx.sha.reset();
auto f = metadata.def_to_str;
// NB: do *not* use abbrevs here as we want the symbol names
// to be independent of one another in the crate.
auto cx = @rec(ds=f, tcx=ccx.tcx, abbrevs=metadata.ac_no_abbrevs);
ccx.sha.input_str(metadata.Encode.ty_str(cx, t));
hash = Str.substr(ccx.sha.result_str(), 0u, 16u);
ccx.type_sha1s.insert(t, hash);
}
}
ret hash;
}
fn mangle_name_by_type(@crate_ctxt ccx, vec[str] path, ty.t t) -> str {
auto hash = get_type_sha1(ccx, t);
ret sep() + "rust" + sep() + hash + sep() + path_name(path);
}
fn mangle_name_by_type_only(@crate_ctxt ccx, ty.t t, str name) -> str {
auto f = metadata.def_to_str;
auto cx = @rec(ds=f, tcx=ccx.tcx, abbrevs=metadata.ac_no_abbrevs);
auto s = metadata.Encode.ty_str(cx, t);
auto hash = get_type_sha1(ccx, t);
ret sep() + "rust" + sep() + hash + sep() + name + "_" + s;
}
fn mangle_name_by_seq(@crate_ctxt ccx, vec[str] path, str flav) -> str {
ret sep() + "rust" + sep()
+ ccx.names.next(flav) + sep()
+ path_name(path);
}
fn res(@block_ctxt bcx, ValueRef val) -> result {
ret rec(mutable bcx = bcx,
mutable val = val);
}
fn ty_str(type_names tn, TypeRef t) -> str {
ret lib.llvm.type_to_str(tn, t);
}
fn val_ty(ValueRef v) -> TypeRef {
ret llvm.LLVMTypeOf(v);
}
fn val_str(type_names tn, ValueRef v) -> str {
ret ty_str(tn, val_ty(v));
}
// LLVM type constructors.
fn T_void() -> TypeRef {
// Note: For the time being llvm is kinda busted here, it has the notion
// of a 'void' type that can only occur as part of the signature of a
// function, but no general unit type of 0-sized value. This is, afaict,
// vestigial from its C heritage, and we'll be attempting to submit a
// patch upstream to fix it. In the mean time we only model function
// outputs (Rust functions and C functions) using T_void, and model the
// Rust general purpose nil type you can construct as 1-bit (always
// zero). This makes the result incorrect for now -- things like a tuple
// of 10 nil values will have 10-bit size -- but it doesn't seem like we
// have any other options until it's fixed upstream.
ret llvm.LLVMVoidType();
}
fn T_nil() -> TypeRef {
// NB: See above in T_void().
ret llvm.LLVMInt1Type();
}
fn T_i1() -> TypeRef {
ret llvm.LLVMInt1Type();
}
fn T_i8() -> TypeRef {
ret llvm.LLVMInt8Type();
}
fn T_i16() -> TypeRef {
ret llvm.LLVMInt16Type();
}
fn T_i32() -> TypeRef {
ret llvm.LLVMInt32Type();
}
fn T_i64() -> TypeRef {
ret llvm.LLVMInt64Type();
}
fn T_f32() -> TypeRef {
ret llvm.LLVMFloatType();
}
fn T_f64() -> TypeRef {
ret llvm.LLVMDoubleType();
}
fn T_bool() -> TypeRef {
ret T_i1();
}
fn T_int() -> TypeRef {
// FIXME: switch on target type.
ret T_i32();
}
fn T_float() -> TypeRef {
// FIXME: switch on target type.
ret T_f64();
}
fn T_char() -> TypeRef {
ret T_i32();
}
fn T_size_t() -> TypeRef {
// FIXME: switch on target type.
ret T_i32();
}
fn T_fn(vec[TypeRef] inputs, TypeRef output) -> TypeRef {
ret llvm.LLVMFunctionType(output,
Vec.buf[TypeRef](inputs),
Vec.len[TypeRef](inputs),
False);
}
fn T_fn_pair(type_names tn, TypeRef tfn) -> TypeRef {
ret T_struct(vec(T_ptr(tfn),
T_opaque_closure_ptr(tn)));
}
fn T_ptr(TypeRef t) -> TypeRef {
ret llvm.LLVMPointerType(t, 0u);
}
fn T_struct(vec[TypeRef] elts) -> TypeRef {
ret llvm.LLVMStructType(Vec.buf[TypeRef](elts),
Vec.len[TypeRef](elts),
False);
}
fn T_opaque() -> TypeRef {
ret llvm.LLVMOpaqueType();
}
fn T_task(type_names tn) -> TypeRef {
auto s = "task";
if (tn.name_has_type(s)) {
ret tn.get_type(s);
}
auto t = T_struct(vec(T_int(), // Refcount
T_int(), // Delegate pointer
T_int(), // Stack segment pointer
T_int(), // Runtime SP
T_int(), // Rust SP
T_int(), // GC chain
T_int(), // Domain pointer
T_int() // Crate cache pointer
));
tn.associate(s, t);
ret t;
}
fn T_tydesc_field(type_names tn, int field) -> TypeRef {
// Bit of a kludge: pick the fn typeref out of the tydesc..
let vec[TypeRef] tydesc_elts =
Vec.init_elt[TypeRef](T_nil(), abi.n_tydesc_fields as uint);
llvm.LLVMGetStructElementTypes(T_tydesc(tn),
Vec.buf[TypeRef](tydesc_elts));
auto t = llvm.LLVMGetElementType(tydesc_elts.(field));
ret t;
}
fn T_glue_fn(type_names tn) -> TypeRef {
auto s = "glue_fn";
if (tn.name_has_type(s)) {
ret tn.get_type(s);
}
auto t = T_tydesc_field(tn, abi.tydesc_field_drop_glue);
tn.associate(s, t);
ret t;
}
fn T_dtor(@crate_ctxt ccx, TypeRef llself_ty) -> TypeRef {
ret type_of_fn_full(ccx, ast.proto_fn, some[TypeRef](llself_ty),
Vec.empty[ty.arg](), ty.mk_nil(ccx.tcx), 0u);
}
fn T_cmp_glue_fn(type_names tn) -> TypeRef {
auto s = "cmp_glue_fn";
if (tn.name_has_type(s)) {
ret tn.get_type(s);
}
auto t = T_tydesc_field(tn, abi.tydesc_field_cmp_glue);
tn.associate(s, t);
ret t;
}
fn T_tydesc(type_names tn) -> TypeRef {
auto s = "tydesc";
if (tn.name_has_type(s)) {
ret tn.get_type(s);
}
auto th = mk_type_handle();
auto abs_tydesc = llvm.LLVMResolveTypeHandle(th.llth);
auto tydescpp = T_ptr(T_ptr(abs_tydesc));
auto pvoid = T_ptr(T_i8());
auto glue_fn_ty = T_ptr(T_fn(vec(T_ptr(T_nil()),
T_taskptr(tn),
T_ptr(T_nil()),
tydescpp,
pvoid), T_void()));
auto cmp_glue_fn_ty = T_ptr(T_fn(vec(T_ptr(T_i1()),
T_taskptr(tn),
T_ptr(T_nil()),
tydescpp,
pvoid,
pvoid,
T_i8()), T_void()));
auto tydesc = T_struct(vec(tydescpp, // first_param
T_int(), // size
T_int(), // align
glue_fn_ty, // take_glue
glue_fn_ty, // drop_glue
glue_fn_ty, // free_glue
glue_fn_ty, // sever_glue
glue_fn_ty, // mark_glue
glue_fn_ty, // obj_drop_glue
glue_fn_ty, // is_stateful
cmp_glue_fn_ty)); // cmp_glue
llvm.LLVMRefineType(abs_tydesc, tydesc);
auto t = llvm.LLVMResolveTypeHandle(th.llth);
tn.associate(s, t);
ret t;
}
fn T_array(TypeRef t, uint n) -> TypeRef {
assert (n != 0u);
ret llvm.LLVMArrayType(t, n);
}
fn T_vec(TypeRef t) -> TypeRef {
ret T_struct(vec(T_int(), // Refcount
T_int(), // Alloc
T_int(), // Fill
T_int(), // Pad
T_array(t, 1u) // Body elements
));
}
fn T_opaque_vec_ptr() -> TypeRef {
ret T_ptr(T_vec(T_int()));
}
fn T_str() -> TypeRef {
ret T_vec(T_i8());
}
fn T_box(TypeRef t) -> TypeRef {
ret T_struct(vec(T_int(), t));
}
fn T_port(TypeRef t) -> TypeRef {
ret T_struct(vec(T_int())); // Refcount
}
fn T_chan(TypeRef t) -> TypeRef {
ret T_struct(vec(T_int())); // Refcount
}
fn T_crate(type_names tn) -> TypeRef {
auto s = "crate";
if (tn.name_has_type(s)) {
ret tn.get_type(s);
}
auto t = T_struct(vec(T_int(), // ptrdiff_t image_base_off
T_int(), // uintptr_t self_addr
T_int(), // ptrdiff_t debug_abbrev_off
T_int(), // size_t debug_abbrev_sz
T_int(), // ptrdiff_t debug_info_off
T_int(), // size_t debug_info_sz
T_int(), // size_t activate_glue
T_int(), // size_t yield_glue
T_int(), // size_t unwind_glue
T_int(), // size_t gc_glue
T_int(), // size_t main_exit_task_glue
T_int(), // int n_rust_syms
T_int(), // int n_c_syms
T_int(), // int n_libs
T_int() // uintptr_t abi_tag
));
tn.associate(s, t);
ret t;
}
fn T_taskptr(type_names tn) -> TypeRef {
ret T_ptr(T_task(tn));
}
// This type must never be used directly; it must always be cast away.
fn T_typaram(type_names tn) -> TypeRef {
auto s = "typaram";
if (tn.name_has_type(s)) {
ret tn.get_type(s);
}
auto t = T_i8();
tn.associate(s, t);
ret t;
}
fn T_typaram_ptr(type_names tn) -> TypeRef {
ret T_ptr(T_typaram(tn));
}
fn T_closure_ptr(type_names tn,
TypeRef lltarget_ty,
TypeRef llbindings_ty,
uint n_ty_params) -> TypeRef {
// NB: keep this in sync with code in trans_bind; we're making
// an LLVM typeref structure that has the same "shape" as the ty.t
// it constructs.
ret T_ptr(T_box(T_struct(vec(T_ptr(T_tydesc(tn)),
lltarget_ty,
llbindings_ty,
T_captured_tydescs(tn, n_ty_params))
)));
}
fn T_opaque_closure_ptr(type_names tn) -> TypeRef {
auto s = "*closure";
if (tn.name_has_type(s)) {
ret tn.get_type(s);
}
auto t = T_closure_ptr(tn, T_struct(vec(T_ptr(T_nil()),
T_ptr(T_nil()))),
T_nil(),
0u);
tn.associate(s, t);
ret t;
}
fn T_tag(type_names tn, uint size) -> TypeRef {
auto s = "tag_" + UInt.to_str(size, 10u);
if (tn.name_has_type(s)) {
ret tn.get_type(s);
}
auto t;
if (size == 0u) {
t = T_struct(vec(T_int()));
} else {
t = T_struct(vec(T_int(), T_array(T_i8(), size)));
}
tn.associate(s, t);
ret t;
}
fn T_opaque_tag(type_names tn) -> TypeRef {
auto s = "opaque_tag";
if (tn.name_has_type(s)) {
ret tn.get_type(s);
}
auto t = T_struct(vec(T_int(), T_i8()));
tn.associate(s, t);
ret t;
}
fn T_opaque_tag_ptr(type_names tn) -> TypeRef {
ret T_ptr(T_opaque_tag(tn));
}
fn T_captured_tydescs(type_names tn, uint n) -> TypeRef {
ret T_struct(Vec.init_elt[TypeRef](T_ptr(T_tydesc(tn)), n));
}
fn T_obj_ptr(type_names tn, uint n_captured_tydescs) -> TypeRef {
// This function is not publicly exposed because it returns an incomplete
// type. The dynamically-sized fields follow the captured tydescs.
fn T_obj(type_names tn, uint n_captured_tydescs) -> TypeRef {
ret T_struct(vec(T_ptr(T_tydesc(tn)),
T_captured_tydescs(tn, n_captured_tydescs)));
}
ret T_ptr(T_box(T_obj(tn, n_captured_tydescs)));
}
fn T_opaque_obj_ptr(type_names tn) -> TypeRef {
ret T_obj_ptr(tn, 0u);
}
fn T_opaque_port_ptr(type_names tn) -> TypeRef {
auto s = "*port";
if (tn.name_has_type(s)) { ret tn.get_type(s); }
auto t = T_ptr(T_i8());
tn.associate(s, t);
ret t;
}
fn T_opaque_chan_ptr(type_names tn) -> TypeRef {
auto s = "*chan";
if (tn.name_has_type(s)) { ret tn.get_type(s); }
auto t = T_ptr(T_i8());
tn.associate(s, t);
ret t;
}
// This function now fails if called on a type with dynamic size (as its
// return value was always meaningless in that case anyhow). Beware!
//
// TODO: Enforce via a predicate.
fn type_of(@crate_ctxt cx, ty.t t) -> TypeRef {
if (ty.type_has_dynamic_size(cx.tcx, t)) {
log_err "type_of() called on a type with dynamic size: " +
ty.ty_to_str(cx.tcx, t);
fail;
}
ret type_of_inner(cx, t);
}
fn type_of_explicit_args(@crate_ctxt cx, vec[ty.arg] inputs) -> vec[TypeRef] {
let vec[TypeRef] atys = vec();
for (ty.arg arg in inputs) {
if (ty.type_has_dynamic_size(cx.tcx, arg.ty)) {
assert (arg.mode == ty.mo_alias);
atys += vec(T_typaram_ptr(cx.tn));
} else {
let TypeRef t;
alt (arg.mode) {
case (ty.mo_alias) {
t = T_ptr(type_of_inner(cx, arg.ty));
}
case (_) {
t = type_of_inner(cx, arg.ty);
}
}
atys += vec(t);
}
}
ret atys;
}
// NB: must keep 4 fns in sync:
//
// - type_of_fn_full
// - create_llargs_for_fn_args.
// - new_fn_ctxt
// - trans_args
fn type_of_fn_full(@crate_ctxt cx,
ast.proto proto,
Option.t[TypeRef] obj_self,
vec[ty.arg] inputs,
ty.t output,
uint ty_param_count) -> TypeRef {
let vec[TypeRef] atys = vec();
// Arg 0: Output pointer.
if (ty.type_has_dynamic_size(cx.tcx, output)) {
atys += vec(T_typaram_ptr(cx.tn));
} else {
atys += vec(T_ptr(type_of_inner(cx, output)));
}
// Arg 1: Task pointer.
atys += vec(T_taskptr(cx.tn));
// Arg 2: Env (closure-bindings / self-obj)
alt (obj_self) {
case (some[TypeRef](?t)) {
assert (t as int != 0);
atys += vec(t);
}
case (_) {
atys += vec(T_opaque_closure_ptr(cx.tn));
}
}
// Args >3: ty params, if not acquired via capture...
if (obj_self == none[TypeRef]) {
auto i = 0u;
while (i < ty_param_count) {
atys += vec(T_ptr(T_tydesc(cx.tn)));
i += 1u;
}
}
if (proto == ast.proto_iter) {
// If it's an iter, the 'output' type of the iter is actually the
// *input* type of the function we're given as our iter-block
// argument.
atys +=
vec(T_fn_pair(cx.tn,
type_of_fn_full(cx, ast.proto_fn, none[TypeRef],
vec(rec(mode=ty.mo_alias,
ty=output)),
ty.mk_nil(cx.tcx), 0u)));
}
// ... then explicit args.
atys += type_of_explicit_args(cx, inputs);
ret T_fn(atys, llvm.LLVMVoidType());
}
fn type_of_fn(@crate_ctxt cx,
ast.proto proto,
vec[ty.arg] inputs,
ty.t output,
uint ty_param_count) -> TypeRef {
ret type_of_fn_full(cx, proto, none[TypeRef], inputs, output,
ty_param_count);
}
fn type_of_native_fn(@crate_ctxt cx, ast.native_abi abi,
vec[ty.arg] inputs,
ty.t output,
uint ty_param_count) -> TypeRef {
let vec[TypeRef] atys = vec();
if (abi == ast.native_abi_rust) {
atys += vec(T_taskptr(cx.tn));
auto t = ty.ty_native_fn(abi, inputs, output);
auto i = 0u;
while (i < ty_param_count) {
atys += vec(T_ptr(T_tydesc(cx.tn)));
i += 1u;
}
}
atys += type_of_explicit_args(cx, inputs);
ret T_fn(atys, type_of_inner(cx, output));
}
fn type_of_inner(@crate_ctxt cx, ty.t t) -> TypeRef {
// Check the cache.
if (cx.lltypes.contains_key(t)) {
ret cx.lltypes.get(t);
}
let TypeRef llty = 0 as TypeRef;
alt (ty.struct(cx.tcx, t)) {
case (ty.ty_native) { llty = T_ptr(T_i8()); }
case (ty.ty_nil) { llty = T_nil(); }
case (ty.ty_bool) { llty = T_bool(); }
case (ty.ty_int) { llty = T_int(); }
case (ty.ty_float) { llty = T_float(); }
case (ty.ty_uint) { llty = T_int(); }
case (ty.ty_machine(?tm)) {
alt (tm) {
case (common.ty_i8) { llty = T_i8(); }
case (common.ty_u8) { llty = T_i8(); }
case (common.ty_i16) { llty = T_i16(); }
case (common.ty_u16) { llty = T_i16(); }
case (common.ty_i32) { llty = T_i32(); }
case (common.ty_u32) { llty = T_i32(); }
case (common.ty_i64) { llty = T_i64(); }
case (common.ty_u64) { llty = T_i64(); }
case (common.ty_f32) { llty = T_f32(); }
case (common.ty_f64) { llty = T_f64(); }
}
}
case (ty.ty_char) { llty = T_char(); }
case (ty.ty_str) { llty = T_ptr(T_str()); }
case (ty.ty_tag(_, _)) {
if (ty.type_has_dynamic_size(cx.tcx, t)) {
llty = T_opaque_tag(cx.tn);
} else {
auto size = static_size_of_tag(cx, t);
llty = T_tag(cx.tn, size);
}
}
case (ty.ty_box(?mt)) {
llty = T_ptr(T_box(type_of_inner(cx, mt.ty)));
}
case (ty.ty_vec(?mt)) {
llty = T_ptr(T_vec(type_of_inner(cx, mt.ty)));
}
case (ty.ty_port(?t)) {
llty = T_ptr(T_port(type_of_inner(cx, t)));
}
case (ty.ty_chan(?t)) {
llty = T_ptr(T_chan(type_of_inner(cx, t)));
}
case (ty.ty_tup(?elts)) {
let vec[TypeRef] tys = vec();
for (ty.mt elt in elts) {
tys += vec(type_of_inner(cx, elt.ty));
}
llty = T_struct(tys);
}
case (ty.ty_rec(?fields)) {
let vec[TypeRef] tys = vec();
for (ty.field f in fields) {
tys += vec(type_of_inner(cx, f.mt.ty));
}
llty = T_struct(tys);
}
case (ty.ty_fn(?proto, ?args, ?out)) {
llty = T_fn_pair(cx.tn, type_of_fn(cx, proto, args, out, 0u));
}
case (ty.ty_native_fn(?abi, ?args, ?out)) {
auto nft = native_fn_wrapper_type(cx, 0u, t);
llty = T_fn_pair(cx.tn, nft);
}
case (ty.ty_obj(?meths)) {
auto th = mk_type_handle();
auto self_ty = llvm.LLVMResolveTypeHandle(th.llth);
let vec[TypeRef] mtys = vec(T_ptr(T_i8()));
for (ty.method m in meths) {
let TypeRef mty =
type_of_fn_full(cx, m.proto,
some[TypeRef](self_ty),
m.inputs, m.output, 0u);
mtys += vec(T_ptr(mty));
}
let TypeRef vtbl = T_struct(mtys);
let TypeRef pair = T_struct(vec(T_ptr(vtbl),
T_opaque_obj_ptr(cx.tn)));
auto abs_pair = llvm.LLVMResolveTypeHandle(th.llth);
llvm.LLVMRefineType(abs_pair, pair);
abs_pair = llvm.LLVMResolveTypeHandle(th.llth);
llty = abs_pair;
}
case (ty.ty_var(_)) {
log_err "ty_var in trans.type_of";
fail;
}
case (ty.ty_param(_)) {
llty = T_i8();
}
case (ty.ty_bound_param(_)) {
log_err "ty_bound_param in trans.type_of";
fail;
}
case (ty.ty_type) { llty = T_ptr(T_tydesc(cx.tn)); }
}
assert (llty as int != 0);
llvm.LLVMAddTypeName(cx.llmod, Str.buf(ty.ty_to_short_str(cx.tcx, t)),
llty);
cx.lltypes.insert(t, llty);
ret llty;
}
fn type_of_arg(@local_ctxt cx, &ty.arg arg) -> TypeRef {
alt (ty.struct(cx.ccx.tcx, arg.ty)) {
case (ty.ty_param(_)) {
if (arg.mode == ty.mo_alias) {
ret T_typaram_ptr(cx.ccx.tn);
}
}
case (_) {
// fall through
}
}
auto typ;
if (arg.mode == ty.mo_alias) {
typ = T_ptr(type_of_inner(cx.ccx, arg.ty));
} else {
typ = type_of_inner(cx.ccx, arg.ty);
}
ret typ;
}
fn type_of_ty_param_count_and_ty(@local_ctxt lcx,
ty.ty_param_count_and_ty tpt) -> TypeRef {
alt (ty.struct(lcx.ccx.tcx, tpt._1)) {
case (ty.ty_fn(?proto, ?inputs, ?output)) {
auto llfnty = type_of_fn(lcx.ccx, proto, inputs, output, tpt._0);
ret T_fn_pair(lcx.ccx.tn, llfnty);
}
case (_) {
// fall through
}
}
ret type_of(lcx.ccx, tpt._1);
}
// Name sanitation. LLVM will happily accept identifiers with weird names, but
// gas doesn't!
fn sanitize(str s) -> str {
auto result = "";
for (u8 c in s) {
if (c == ('@' as u8)) {
result += "boxed_";
} else {
if (c == (',' as u8)) {
result += "_";
} else {
if (c == ('{' as u8) || c == ('(' as u8)) {
result += "_of_";
} else {
if (c != 10u8 && c != ('}' as u8) && c != (')' as u8) &&
c != (' ' as u8) && c != ('\t' as u8) &&
c != (';' as u8)) {
auto v = vec(c);
result += Str.from_bytes(v);
}
}
}
}
}
ret result;
}
// LLVM constant constructors.
fn C_null(TypeRef t) -> ValueRef {
ret llvm.LLVMConstNull(t);
}
fn C_integral(TypeRef t, uint u, Bool sign_extend) -> ValueRef {
// FIXME. We can't use LLVM.ULongLong with our existing minimal native
// API, which only knows word-sized args.
//
// ret llvm.LLVMConstInt(T_int(), t as LLVM.ULongLong, False);
//
ret llvm.LLVMRustConstSmallInt(t, u, sign_extend);
}
fn C_float(str s) -> ValueRef {
ret llvm.LLVMConstRealOfString(T_float(), Str.buf(s));
}
fn C_floating(str s, TypeRef t) -> ValueRef {
ret llvm.LLVMConstRealOfString(t, Str.buf(s));
}
fn C_nil() -> ValueRef {
// NB: See comment above in T_void().
ret C_integral(T_i1(), 0u, False);
}
fn C_bool(bool b) -> ValueRef {
if (b) {
ret C_integral(T_bool(), 1u, False);
} else {
ret C_integral(T_bool(), 0u, False);
}
}
fn C_int(int i) -> ValueRef {
ret C_integral(T_int(), i as uint, True);
}
fn C_u8(uint i) -> ValueRef {
ret C_integral(T_i8(), i, False);
}
// This is a 'c-like' raw string, which differs from
// our boxed-and-length-annotated strings.
fn C_cstr(@crate_ctxt cx, str s) -> ValueRef {
auto sc = llvm.LLVMConstString(Str.buf(s), Str.byte_len(s), False);
auto g = llvm.LLVMAddGlobal(cx.llmod, val_ty(sc),
Str.buf(cx.names.next("str")));
llvm.LLVMSetInitializer(g, sc);
llvm.LLVMSetGlobalConstant(g, True);
llvm.LLVMSetLinkage(g, lib.llvm.LLVMInternalLinkage
as llvm.Linkage);
ret g;
}
// A rust boxed-and-length-annotated string.
fn C_str(@crate_ctxt cx, str s) -> ValueRef {
auto len = Str.byte_len(s);
auto box = C_struct(vec(C_int(abi.const_refcount as int),
C_int(len + 1u as int), // 'alloc'
C_int(len + 1u as int), // 'fill'
C_int(0), // 'pad'
llvm.LLVMConstString(Str.buf(s),
len, False)));
auto g = llvm.LLVMAddGlobal(cx.llmod, val_ty(box),
Str.buf(cx.names.next("str")));
llvm.LLVMSetInitializer(g, box);
llvm.LLVMSetGlobalConstant(g, True);
llvm.LLVMSetLinkage(g, lib.llvm.LLVMInternalLinkage
as llvm.Linkage);
ret llvm.LLVMConstPointerCast(g, T_ptr(T_str()));
}
fn C_zero_byte_arr(uint size) -> ValueRef {
auto i = 0u;
let vec[ValueRef] elts = vec();
while (i < size) {
elts += vec(C_u8(0u));
i += 1u;
}
ret llvm.LLVMConstArray(T_i8(), Vec.buf[ValueRef](elts),
Vec.len[ValueRef](elts));
}
fn C_struct(vec[ValueRef] elts) -> ValueRef {
ret llvm.LLVMConstStruct(Vec.buf[ValueRef](elts),
Vec.len[ValueRef](elts),
False);
}
fn C_array(TypeRef ty, vec[ValueRef] elts) -> ValueRef {
ret llvm.LLVMConstArray(ty, Vec.buf[ValueRef](elts),
Vec.len[ValueRef](elts));
}
fn decl_fn(ModuleRef llmod, str name, uint cc, TypeRef llty) -> ValueRef {
let ValueRef llfn =
llvm.LLVMAddFunction(llmod, Str.buf(name), llty);
llvm.LLVMSetFunctionCallConv(llfn, cc);
ret llfn;
}
fn decl_cdecl_fn(ModuleRef llmod, str name, TypeRef llty) -> ValueRef {
ret decl_fn(llmod, name, lib.llvm.LLVMCCallConv, llty);
}
fn decl_fastcall_fn(ModuleRef llmod, str name, TypeRef llty) -> ValueRef {
ret decl_fn(llmod, name, lib.llvm.LLVMFastCallConv, llty);
}
fn decl_internal_fastcall_fn(ModuleRef llmod,
str name, TypeRef llty) -> ValueRef {
auto llfn = decl_fn(llmod, name, lib.llvm.LLVMFastCallConv, llty);
llvm.LLVMSetLinkage(llfn, lib.llvm.LLVMInternalLinkage as llvm.Linkage);
ret llfn;
}
fn decl_glue(ModuleRef llmod, type_names tn, str s) -> ValueRef {
ret decl_cdecl_fn(llmod, s, T_fn(vec(T_taskptr(tn)), T_void()));
}
fn decl_native_glue(ModuleRef llmod, type_names tn,
abi.native_glue_type ngt, uint _n) -> ValueRef {
let bool pass_task;
alt (ngt) {
case (abi.ngt_rust) { pass_task = true; }
case (abi.ngt_pure_rust) { pass_task = true; }
case (abi.ngt_cdecl) { pass_task = false; }
}
// It doesn't actually matter what type we come up with here, at the
// moment, as we cast the native function pointers to int before passing
// them to the indirect native-invocation glue. But eventually we'd like
// to call them directly, once we have a calling convention worked out.
let int n = _n as int;
let str s = abi.native_glue_name(n, ngt);
let vec[TypeRef] args = vec(T_int()); // callee
if (!pass_task) {
args += vec(T_int()); // taskptr, will not be passed
}
args += Vec.init_elt[TypeRef](T_int(), n as uint);
ret decl_fastcall_fn(llmod, s, T_fn(args, T_int()));
}
fn get_extern_fn(&hashmap[str, ValueRef] externs,
ModuleRef llmod, str name,
uint cc, TypeRef ty) -> ValueRef {
if (externs.contains_key(name)) {
ret externs.get(name);
}
auto f = decl_fn(llmod, name, cc, ty);
externs.insert(name, f);
ret f;
}
fn get_extern_const(&hashmap[str, ValueRef] externs,
ModuleRef llmod, str name, TypeRef ty) -> ValueRef {
if (externs.contains_key(name)) {
ret externs.get(name);
}
auto c = llvm.LLVMAddGlobal(llmod, ty, Str.buf(name));
externs.insert(name, c);
ret c;
}
fn get_simple_extern_fn(&hashmap[str, ValueRef] externs,
ModuleRef llmod, str name, int n_args) -> ValueRef {
auto inputs = Vec.init_elt[TypeRef](T_int(), n_args as uint);
auto output = T_int();
auto t = T_fn(inputs, output);
ret get_extern_fn(externs, llmod, name, lib.llvm.LLVMCCallConv, t);
}
fn trans_upcall(@block_ctxt cx, str name, vec[ValueRef] args, bool pure)
-> result {
auto cxx = cx.fcx.lcx.ccx;
auto lltaskptr = cx.build.PtrToInt(cx.fcx.lltaskptr, T_int());
auto args2 = vec(lltaskptr) + args;
auto t = trans_native_call(cx.build, cxx.glues, lltaskptr,
cxx.externs, cxx.tn, cxx.llmod, name,
true, args2);
ret res(cx, t);
}
fn trans_native_call(builder b, @glue_fns glues, ValueRef lltaskptr,
&hashmap[str, ValueRef] externs,
type_names tn, ModuleRef llmod, str name,
bool pass_task, vec[ValueRef] args) -> ValueRef {
let int n = (Vec.len[ValueRef](args) as int);
let ValueRef llnative = get_simple_extern_fn(externs, llmod, name, n);
llnative = llvm.LLVMConstPointerCast(llnative, T_int());
let ValueRef llglue;
if (pass_task) {
llglue = glues.native_glues_rust.(n);
} else {
llglue = glues.native_glues_cdecl.(n);
}
let vec[ValueRef] call_args = vec(llnative);
if (!pass_task) {
call_args += vec(lltaskptr);
}
for (ValueRef a in args) {
call_args += vec(b.ZExtOrBitCast(a, T_int()));
}
ret b.FastCall(llglue, call_args);
}
fn trans_non_gc_free(@block_ctxt cx, ValueRef v) -> result {
ret trans_upcall(cx, "upcall_free", vec(vp2i(cx, v), C_int(0)), false);
}
fn find_scope_cx(@block_ctxt cx) -> @block_ctxt {
if (cx.kind != NON_SCOPE_BLOCK) {
ret cx;
}
alt (cx.parent) {
case (parent_some(?b)) {
be find_scope_cx(b);
}
case (parent_none) {
fail;
}
}
}
fn find_outer_scope_cx(@block_ctxt cx) -> @block_ctxt {
auto scope_cx = find_scope_cx(cx);
alt (cx.parent) {
case (parent_some(?b)) {
be find_scope_cx(b);
}
case (parent_none) {
fail;
}
}
}
fn umax(@block_ctxt cx, ValueRef a, ValueRef b) -> ValueRef {
auto cond = cx.build.ICmp(lib.llvm.LLVMIntULT, a, b);
ret cx.build.Select(cond, b, a);
}
fn umin(@block_ctxt cx, ValueRef a, ValueRef b) -> ValueRef {
auto cond = cx.build.ICmp(lib.llvm.LLVMIntULT, a, b);
ret cx.build.Select(cond, a, b);
}
fn align_to(@block_ctxt cx, ValueRef off, ValueRef align) -> ValueRef {
auto mask = cx.build.Sub(align, C_int(1));
auto bumped = cx.build.Add(off, mask);
ret cx.build.And(bumped, cx.build.Not(mask));
}
// Returns the real size of the given type for the current target.
fn llsize_of_real(@crate_ctxt cx, TypeRef t) -> uint {
ret llvm.LLVMStoreSizeOfType(cx.td.lltd, t);
}
fn llsize_of(TypeRef t) -> ValueRef {
ret llvm.LLVMConstIntCast(lib.llvm.llvm.LLVMSizeOf(t), T_int(), False);
}
fn llalign_of(TypeRef t) -> ValueRef {
ret llvm.LLVMConstIntCast(lib.llvm.llvm.LLVMAlignOf(t), T_int(), False);
}
fn size_of(@block_ctxt cx, ty.t t) -> result {
if (!ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, t)) {
ret res(cx, llsize_of(type_of(cx.fcx.lcx.ccx, t)));
}
ret dynamic_size_of(cx, t);
}
fn align_of(@block_ctxt cx, ty.t t) -> result {
if (!ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, t)) {
ret res(cx, llalign_of(type_of(cx.fcx.lcx.ccx, t)));
}
ret dynamic_align_of(cx, t);
}
fn alloca(@block_ctxt cx, TypeRef t) -> ValueRef {
ret new_builder(cx.fcx.llallocas).Alloca(t);
}
fn array_alloca(@block_ctxt cx, TypeRef t, ValueRef n) -> ValueRef {
ret new_builder(cx.fcx.llallocas).ArrayAlloca(t, n);
}
// Creates a simpler, size-equivalent type. The resulting type is guaranteed
// to have (a) the same size as the type that was passed in; (b) to be non-
// recursive. This is done by replacing all boxes in a type with boxed unit
// types.
fn simplify_type(@crate_ctxt ccx, ty.t typ) -> ty.t {
fn simplifier(@crate_ctxt ccx, ty.t typ) -> ty.t {
alt (ty.struct(ccx.tcx, typ)) {
case (ty.ty_box(_)) {
ret ty.mk_imm_box(ccx.tcx, ty.mk_nil(ccx.tcx));
}
case (_) { ret typ; }
}
}
auto f = bind simplifier(ccx, _);
ret ty.fold_ty(ccx.tcx, f, typ);
}
// Computes the size of the data part of a non-dynamically-sized tag.
fn static_size_of_tag(@crate_ctxt cx, ty.t t) -> uint {
if (ty.type_has_dynamic_size(cx.tcx, t)) {
log_err "dynamically sized type passed to static_size_of_tag()";
fail;
}
if (cx.tag_sizes.contains_key(t)) {
ret cx.tag_sizes.get(t);
}
auto tid;
let vec[ty.t] subtys;
alt (ty.struct(cx.tcx, t)) {
case (ty.ty_tag(?tid_, ?subtys_)) {
tid = tid_;
subtys = subtys_;
}
case (_) {
log_err "non-tag passed to static_size_of_tag()";
fail;
}
}
// Compute max(variant sizes).
auto max_size = 0u;
auto variants = tag_variants(cx, tid);
for (variant_info variant in variants) {
auto tup_ty = simplify_type(cx, ty.mk_imm_tup(cx.tcx, variant.args));
// Perform any type parameter substitutions.
tup_ty = ty.bind_params_in_type(cx.tcx, tup_ty);
tup_ty = ty.substitute_type_params(cx.tcx, subtys, tup_ty);
// Here we possibly do a recursive call.
auto this_size = llsize_of_real(cx, type_of(cx, tup_ty));
if (max_size < this_size) {
max_size = this_size;
}
}
cx.tag_sizes.insert(t, max_size);
ret max_size;
}
fn dynamic_size_of(@block_ctxt cx, ty.t t) -> result {
fn align_elements(@block_ctxt cx, vec[ty.t] elts) -> result {
//
// C padding rules:
//
//
// - Pad after each element so that next element is aligned.
// - Pad after final structure member so that whole structure
// is aligned to max alignment of interior.
//
auto off = C_int(0);
auto max_align = C_int(1);
auto bcx = cx;
for (ty.t e in elts) {
auto elt_align = align_of(bcx, e);
bcx = elt_align.bcx;
auto elt_size = size_of(bcx, e);
bcx = elt_size.bcx;
auto aligned_off = align_to(bcx, off, elt_align.val);
off = bcx.build.Add(aligned_off, elt_size.val);
max_align = umax(bcx, max_align, elt_align.val);
}
off = align_to(bcx, off, max_align);
ret res(bcx, off);
}
alt (ty.struct(cx.fcx.lcx.ccx.tcx, t)) {
case (ty.ty_param(?p)) {
auto szptr = field_of_tydesc(cx, t, false, abi.tydesc_field_size);
ret res(szptr.bcx, szptr.bcx.build.Load(szptr.val));
}
case (ty.ty_tup(?elts)) {
let vec[ty.t] tys = vec();
for (ty.mt mt in elts) {
tys += vec(mt.ty);
}
ret align_elements(cx, tys);
}
case (ty.ty_rec(?flds)) {
let vec[ty.t] tys = vec();
for (ty.field f in flds) {
tys += vec(f.mt.ty);
}
ret align_elements(cx, tys);
}
case (ty.ty_tag(?tid, ?tps)) {
auto bcx = cx;
// Compute max(variant sizes).
let ValueRef max_size = alloca(bcx, T_int());
bcx.build.Store(C_int(0), max_size);
auto variants = tag_variants(bcx.fcx.lcx.ccx, tid);
for (variant_info variant in variants) {
// Perform type substitution on the raw argument types.
let vec[ty.t] raw_tys = variant.args;
let vec[ty.t] tys = vec();
for (ty.t raw_ty in raw_tys) {
auto t = ty.bind_params_in_type(cx.fcx.lcx.ccx.tcx,
raw_ty);
t = ty.substitute_type_params(cx.fcx.lcx.ccx.tcx, tps, t);
tys += vec(t);
}
auto rslt = align_elements(bcx, tys);
bcx = rslt.bcx;
auto this_size = rslt.val;
auto old_max_size = bcx.build.Load(max_size);
bcx.build.Store(umax(bcx, this_size, old_max_size), max_size);
}
auto max_size_val = bcx.build.Load(max_size);
auto total_size = bcx.build.Add(max_size_val, llsize_of(T_int()));
ret res(bcx, total_size);
}
}
}
fn dynamic_align_of(@block_ctxt cx, ty.t t) -> result {
alt (ty.struct(cx.fcx.lcx.ccx.tcx, t)) {
case (ty.ty_param(?p)) {
auto aptr = field_of_tydesc(cx, t, false, abi.tydesc_field_align);
ret res(aptr.bcx, aptr.bcx.build.Load(aptr.val));
}
case (ty.ty_tup(?elts)) {
auto a = C_int(1);
auto bcx = cx;
for (ty.mt e in elts) {
auto align = align_of(bcx, e.ty);
bcx = align.bcx;
a = umax(bcx, a, align.val);
}
ret res(bcx, a);
}
case (ty.ty_rec(?flds)) {
auto a = C_int(1);
auto bcx = cx;
for (ty.field f in flds) {
auto align = align_of(bcx, f.mt.ty);
bcx = align.bcx;
a = umax(bcx, a, align.val);
}
ret res(bcx, a);
}
case (ty.ty_tag(_, _)) {
ret res(cx, C_int(1)); // FIXME: stub
}
}
}
// Replacement for the LLVM 'GEP' instruction when field-indexing into a
// tuple-like structure (tup, rec) with a static index. This one is driven off
// ty.struct and knows what to do when it runs into a ty_param stuck in the
// middle of the thing it's GEP'ing into. Much like size_of and align_of,
// above.
fn GEP_tup_like(@block_ctxt cx, ty.t t,
ValueRef base, vec[int] ixs) -> result {
assert (ty.type_is_tup_like(cx.fcx.lcx.ccx.tcx, t));
// It might be a static-known type. Handle this.
if (! ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, t)) {
let vec[ValueRef] v = vec();
for (int i in ixs) {
v += vec(C_int(i));
}
ret res(cx, cx.build.GEP(base, v));
}
// It is a dynamic-containing type that, if we convert directly to an LLVM
// TypeRef, will be all wrong; there's no proper LLVM type to represent
// it, and the lowering function will stick in i8* values for each
// ty_param, which is not right; the ty_params are all of some dynamic
// size.
//
// What we must do instead is sadder. We must look through the indices
// manually and split the input type into a prefix and a target. We then
// measure the prefix size, bump the input pointer by that amount, and
// cast to a pointer-to-target type.
// Given a type, an index vector and an element number N in that vector,
// calculate index X and the type that results by taking the first X-1
// elements of the type and splitting the Xth off. Return the prefix as
// well as the innermost Xth type.
fn split_type(@crate_ctxt ccx, ty.t t, vec[int] ixs, uint n)
-> rec(vec[ty.t] prefix, ty.t target) {
let uint len = Vec.len[int](ixs);
// We don't support 0-index or 1-index GEPs. The former is nonsense
// and the latter would only be meaningful if we supported non-0
// values for the 0th index (we don't).
assert (len > 1u);
if (n == 0u) {
// Since we're starting from a value that's a pointer to a
// *single* structure, the first index (in GEP-ese) should just be
// 0, to yield the pointee.
assert (ixs.(n) == 0);
ret split_type(ccx, t, ixs, n+1u);
}
assert (n < len);
let int ix = ixs.(n);
let vec[ty.t] prefix = vec();
let int i = 0;
while (i < ix) {
Vec.push[ty.t](prefix,
ty.get_element_type(ccx.tcx, t, i as uint));
i += 1 ;
}
auto selected = ty.get_element_type(ccx.tcx, t, i as uint);
if (n == len-1u) {
// We are at the innermost index.
ret rec(prefix=prefix, target=selected);
} else {
// Not the innermost index; call self recursively to dig deeper.
// Once we get an inner result, append it current prefix and
// return to caller.
auto inner = split_type(ccx, selected, ixs, n+1u);
prefix += inner.prefix;
ret rec(prefix=prefix with inner);
}
}
// We make a fake prefix tuple-type here; luckily for measuring sizes
// the tuple parens are associative so it doesn't matter that we've
// flattened the incoming structure.
auto s = split_type(cx.fcx.lcx.ccx, t, ixs, 0u);
auto prefix_ty = ty.mk_imm_tup(cx.fcx.lcx.ccx.tcx, s.prefix);
auto bcx = cx;
auto sz = size_of(bcx, prefix_ty);
bcx = sz.bcx;
auto raw = bcx.build.PointerCast(base, T_ptr(T_i8()));
auto bumped = bcx.build.GEP(raw, vec(sz.val));
if (ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, s.target)) {
ret res(bcx, bumped);
}
auto typ = T_ptr(type_of(bcx.fcx.lcx.ccx, s.target));
ret res(bcx, bcx.build.PointerCast(bumped, typ));
}
// Replacement for the LLVM 'GEP' instruction when field indexing into a tag.
// This function uses GEP_tup_like() above and automatically performs casts as
// appropriate. @llblobptr is the data part of a tag value; its actual type is
// meaningless, as it will be cast away.
fn GEP_tag(@block_ctxt cx,
ValueRef llblobptr,
&ast.def_id tag_id,
&ast.def_id variant_id,
vec[ty.t] ty_substs,
int ix)
-> result {
auto variant = tag_variant_with_id(cx.fcx.lcx.ccx, tag_id, variant_id);
// Synthesize a tuple type so that GEP_tup_like() can work its magic.
// Separately, store the type of the element we're interested in.
auto arg_tys = variant.args;
auto elem_ty = ty.mk_nil(cx.fcx.lcx.ccx.tcx); // typestate infelicity
auto i = 0;
let vec[ty.t] true_arg_tys = vec();
for (ty.t aty in arg_tys) {
auto arg_ty = ty.bind_params_in_type(cx.fcx.lcx.ccx.tcx, aty);
arg_ty = ty.substitute_type_params(cx.fcx.lcx.ccx.tcx, ty_substs,
arg_ty);
true_arg_tys += vec(arg_ty);
if (i == ix) {
elem_ty = arg_ty;
}
i += 1;
}
auto tup_ty = ty.mk_imm_tup(cx.fcx.lcx.ccx.tcx, true_arg_tys);
// Cast the blob pointer to the appropriate type, if we need to (i.e. if
// the blob pointer isn't dynamically sized).
let ValueRef llunionptr;
if (!ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, tup_ty)) {
auto llty = type_of(cx.fcx.lcx.ccx, tup_ty);
llunionptr = cx.build.TruncOrBitCast(llblobptr, T_ptr(llty));
} else {
llunionptr = llblobptr;
}
// Do the GEP_tup_like().
auto rslt = GEP_tup_like(cx, tup_ty, llunionptr, vec(0, ix));
// Cast the result to the appropriate type, if necessary.
auto val;
if (!ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, elem_ty)) {
auto llelemty = type_of(rslt.bcx.fcx.lcx.ccx, elem_ty);
val = rslt.bcx.build.PointerCast(rslt.val, T_ptr(llelemty));
} else {
val = rslt.val;
}
ret res(rslt.bcx, val);
}
fn trans_raw_malloc(@block_ctxt cx, TypeRef llptr_ty, ValueRef llsize)
-> result {
// FIXME: need a table to collect tydesc globals.
auto tydesc = C_int(0);
auto rslt = trans_upcall(cx, "upcall_malloc", vec(llsize, tydesc), false);
rslt = res(rslt.bcx, vi2p(rslt.bcx, rslt.val, llptr_ty));
ret rslt;
}
fn trans_malloc_boxed(@block_ctxt cx, ty.t t) -> result {
// Synthesize a fake box type structurally so we have something
// to measure the size of.
auto boxed_body = ty.mk_imm_tup(cx.fcx.lcx.ccx.tcx,
vec(ty.mk_int(cx.fcx.lcx.ccx.tcx), t));
auto box_ptr = ty.mk_imm_box(cx.fcx.lcx.ccx.tcx, t);
auto sz = size_of(cx, boxed_body);
auto llty = type_of(cx.fcx.lcx.ccx, box_ptr);
ret trans_raw_malloc(sz.bcx, llty, sz.val);
}
// Type descriptor and type glue stuff
// Given a type and a field index into its corresponding type descriptor,
// returns an LLVM ValueRef of that field from the tydesc, generating the
// tydesc if necessary.
fn field_of_tydesc(@block_ctxt cx, ty.t t, bool escapes, int field)
-> result {
auto tydesc = get_tydesc(cx, t, escapes);
ret res(tydesc.bcx,
tydesc.bcx.build.GEP(tydesc.val, vec(C_int(0), C_int(field))));
}
// Given a type containing ty params, build a vector containing a ValueRef for
// each of the ty params it uses (from the current frame) and a vector of the
// indices of the ty params present in the type. This is used solely for
// constructing derived tydescs.
fn linearize_ty_params(@block_ctxt cx, ty.t t) ->
tup(vec[uint], vec[ValueRef]) {
let vec[ValueRef] param_vals = vec();
let vec[uint] param_defs = vec();
type rr = rec(@block_ctxt cx,
mutable vec[ValueRef] vals,
mutable vec[uint] defs);
fn linearizer(@rr r, ty.t t) {
alt(ty.struct(r.cx.fcx.lcx.ccx.tcx, t)) {
case (ty.ty_param(?pid)) {
let bool seen = false;
for (uint d in r.defs) {
if (d == pid) {
seen = true;
}
}
if (!seen) {
r.vals += vec(r.cx.fcx.lltydescs.(pid));
r.defs += vec(pid);
}
}
case (_) { }
}
}
auto x = @rec(cx = cx,
mutable vals = param_vals,
mutable defs = param_defs);
auto f = bind linearizer(x, _);
ty.walk_ty(cx.fcx.lcx.ccx.tcx, f, t);
ret tup(x.defs, x.vals);
}
fn trans_stack_local_derived_tydesc(@block_ctxt cx, ValueRef llsz,
ValueRef llalign, ValueRef llroottydesc,
Option.t[ValueRef] llparamtydescs) -> result {
auto llmyroottydesc = alloca(cx, T_tydesc(cx.fcx.lcx.ccx.tn));
// By convention, desc 0 is the root descriptor.
llroottydesc = cx.build.Load(llroottydesc);
cx.build.Store(llroottydesc, llmyroottydesc);
// Store a pointer to the rest of the descriptors.
auto llrootfirstparam = cx.build.GEP(llmyroottydesc,
vec(C_int(0), C_int(0)));
auto llfirstparam;
alt (llparamtydescs) {
case (none[ValueRef]) {
llfirstparam = C_null(val_ty(llrootfirstparam));
}
case (some[ValueRef](?llparamtydescs)) {
llfirstparam = cx.build.GEP(llparamtydescs,
vec(C_int(0), C_int(0)));
}
}
cx.build.Store(llfirstparam,
cx.build.GEP(llmyroottydesc, vec(C_int(0), C_int(0))));
cx.build.Store(llsz,
cx.build.GEP(llmyroottydesc, vec(C_int(0), C_int(1))));
cx.build.Store(llalign,
cx.build.GEP(llmyroottydesc, vec(C_int(0), C_int(2))));
ret res(cx, llmyroottydesc);
}
fn mk_derived_tydesc(@block_ctxt cx, ty.t t, bool escapes) -> result {
let uint n_params = ty.count_ty_params(cx.fcx.lcx.ccx.tcx, t);
auto tys = linearize_ty_params(cx, t);
assert (n_params == Vec.len[uint](tys._0));
assert (n_params == Vec.len[ValueRef](tys._1));
auto root = get_static_tydesc(cx, t, tys._0).tydesc;
auto bcx = cx;
auto sz = size_of(bcx, t);
bcx = sz.bcx;
auto align = align_of(bcx, t);
bcx = align.bcx;
auto v;
if (escapes) {
auto tydescs = alloca(cx, T_array(T_ptr(T_tydesc(cx.fcx.lcx.ccx.tn)),
1u /* for root*/ + n_params));
auto i = 0;
auto tdp = cx.build.GEP(tydescs, vec(C_int(0), C_int(i)));
cx.build.Store(root, tdp);
i += 1;
for (ValueRef td in tys._1) {
auto tdp = cx.build.GEP(tydescs, vec(C_int(0), C_int(i)));
cx.build.Store(td, tdp);
i += 1;
}
v = trans_upcall(bcx, "upcall_get_type_desc",
vec(p2i(bcx.fcx.lcx.ccx.crate_ptr),
sz.val,
align.val,
C_int((1u + n_params) as int),
vp2i(bcx, tydescs)), true);
} else {
auto llparamtydescs_opt;
if (n_params == 0u) {
llparamtydescs_opt = none[ValueRef];
} else {
auto llparamtydescs = alloca(cx,
T_array(T_ptr(T_tydesc(cx.fcx.lcx.ccx.tn)), n_params));
auto i = 0;
for (ValueRef td in tys._1) {
auto tdp = cx.build.GEP(llparamtydescs,
vec(C_int(0), C_int(i)));
cx.build.Store(td, tdp);
i += 1;
}
llparamtydescs_opt = some[ValueRef](llparamtydescs);
}
v = trans_stack_local_derived_tydesc(bcx, sz.val, align.val, root,
llparamtydescs_opt);
}
ret res(v.bcx, vi2p(v.bcx, v.val, T_ptr(T_tydesc(cx.fcx.lcx.ccx.tn))));
}
fn get_tydesc(&@block_ctxt cx, ty.t t, bool escapes) -> result {
// Is the supplied type a type param? If so, return the passed-in tydesc.
alt (ty.type_param(cx.fcx.lcx.ccx.tcx, t)) {
case (some[uint](?id)) { ret res(cx, cx.fcx.lltydescs.(id)); }
case (none[uint]) { /* fall through */ }
}
// Does it contain a type param? If so, generate a derived tydesc.
if (ty.type_contains_params(cx.fcx.lcx.ccx.tcx, t)) {
ret mk_derived_tydesc(cx, t, escapes);
}
// Otherwise, generate a tydesc if necessary, and return it.
let vec[uint] tps = vec();
auto st = get_static_tydesc(cx, t, tps).tydesc;
ret res(cx, st);
}
fn get_static_tydesc(&@block_ctxt cx,
ty.t t, vec[uint] ty_params) -> @tydesc_info {
alt (cx.fcx.lcx.ccx.tydescs.find(t)) {
case (some[@tydesc_info](?info)) {
ret info;
}
case (none[@tydesc_info]) {
// FIXME: Use of a simplified tydesc (w/o names) removes a lot of
// generated glue, but the compile time goes way down due to
// greatly increasing the miss rate on the type_of cache elsewhere
// in this file. Experiment with other approaches to this.
/*
fn simplifier(ty.t typ) -> ty.t {
ret @rec(cname=none[str] with *typ);
}
auto f = simplifier;
auto t_simplified = ty.fold_ty(cx.fcx.lcx.ccx.tcx, f, t);
auto info = declare_tydesc(cx.fcx.lcx, t_simplified);
cx.fcx.lcx.ccx.tydescs.insert(t_simplified, info);
*/
auto info = declare_tydesc(cx.fcx.lcx, t);
cx.fcx.lcx.ccx.tydescs.insert(t, info);
define_tydesc(cx.fcx.lcx, t, ty_params);
ret info;
}
}
}
// Generates the declaration for (but doesn't fill in) a type descriptor. This
// needs to be separate from make_tydesc() below, because sometimes type glue
// functions needs to refer to their own type descriptors.
fn declare_tydesc(@local_ctxt cx, ty.t t) -> @tydesc_info {
auto take_glue = declare_generic_glue(cx, t, T_glue_fn(cx.ccx.tn),
"take");
auto drop_glue = declare_generic_glue(cx, t, T_glue_fn(cx.ccx.tn),
"drop");
auto cmp_glue = declare_generic_glue(cx, t, T_cmp_glue_fn(cx.ccx.tn),
"cmp");
auto ccx = cx.ccx;
auto llsize;
auto llalign;
if (!ty.type_has_dynamic_size(ccx.tcx, t)) {
auto llty = type_of(ccx, t);
llsize = llsize_of(llty);
llalign = llalign_of(llty);
} else {
// These will be overwritten as the derived tydesc is generated, so
// we create placeholder values.
llsize = C_int(0);
llalign = C_int(0);
}
auto glue_fn_ty = T_ptr(T_glue_fn(ccx.tn));
auto name = mangle_name_by_type_only(ccx, t, "tydesc");
auto gvar = llvm.LLVMAddGlobal(ccx.llmod, T_tydesc(ccx.tn),
Str.buf(name));
auto tydesc = C_struct(vec(C_null(T_ptr(T_ptr(T_tydesc(ccx.tn)))),
llsize,
llalign,
take_glue, // take_glue
drop_glue, // drop_glue
C_null(glue_fn_ty), // free_glue
C_null(glue_fn_ty), // sever_glue
C_null(glue_fn_ty), // mark_glue
C_null(glue_fn_ty), // obj_drop_glue
C_null(glue_fn_ty), // is_stateful
cmp_glue)); // cmp_glue
llvm.LLVMSetInitializer(gvar, tydesc);
llvm.LLVMSetGlobalConstant(gvar, True);
llvm.LLVMSetLinkage(gvar, lib.llvm.LLVMInternalLinkage
as llvm.Linkage);
auto info = @rec(
tydesc=gvar,
take_glue=take_glue,
drop_glue=drop_glue,
cmp_glue=cmp_glue
);
ret info;
}
tag make_generic_glue_helper_fn {
mgghf_single(fn(@block_ctxt cx, ValueRef v, ty.t t));
mgghf_cmp;
}
// declare_tydesc() above must have been called first.
fn define_tydesc(@local_ctxt cx, ty.t t, vec[uint] ty_params) {
auto info = cx.ccx.tydescs.get(t);
auto gvar = info.tydesc;
auto tg = make_take_glue;
make_generic_glue(cx, t, info.take_glue, mgghf_single(tg), ty_params);
auto dg = make_drop_glue;
make_generic_glue(cx, t, info.drop_glue, mgghf_single(dg), ty_params);
make_generic_glue(cx, t, info.cmp_glue, mgghf_cmp, ty_params);
}
fn declare_generic_glue(@local_ctxt cx,
ty.t t,
TypeRef llfnty,
str name) -> ValueRef {
auto fn_nm;
if (cx.ccx.sess.get_opts().debuginfo) {
fn_nm = mangle_name_by_type_only(cx.ccx, t, "glue_" + name);
fn_nm = sanitize(fn_nm);
} else {
fn_nm = mangle_name_by_seq(cx.ccx, cx.path, "glue_" + name);
}
auto llfn = decl_internal_fastcall_fn(cx.ccx.llmod, fn_nm, llfnty);
ret llfn;
}
fn make_generic_glue(@local_ctxt cx,
ty.t t,
ValueRef llfn,
make_generic_glue_helper_fn helper,
vec[uint] ty_params) -> ValueRef {
auto fcx = new_fn_ctxt(cx, llfn);
auto bcx = new_top_block_ctxt(fcx);
auto lltop = bcx.llbb;
// Any nontrivial glue is with values passed *by alias*; this is a
// requirement since in many contexts glue is invoked indirectly and
// the caller has no idea if it's dealing with something that can be
// passed by value.
auto llty;
if (ty.type_has_dynamic_size(cx.ccx.tcx, t)) {
llty = T_ptr(T_i8());
} else {
llty = T_ptr(type_of(cx.ccx, t));
}
auto ty_param_count = Vec.len[uint](ty_params);
auto lltyparams = llvm.LLVMGetParam(llfn, 3u);
auto lltydescs = Vec.empty_mut[ValueRef]();
auto p = 0u;
while (p < ty_param_count) {
auto llparam = bcx.build.GEP(lltyparams, vec(C_int(p as int)));
llparam = bcx.build.Load(llparam);
Vec.grow_set[ValueRef](lltydescs, ty_params.(p), 0 as ValueRef,
llparam);
p += 1u;
}
bcx.fcx.lltydescs = Vec.freeze[ValueRef](lltydescs);
auto llrawptr0 = llvm.LLVMGetParam(llfn, 4u);
auto llval0 = bcx.build.BitCast(llrawptr0, llty);
alt (helper) {
case (mgghf_single(?single_fn)) {
single_fn(bcx, llval0, t);
}
case (mgghf_cmp) {
auto llrawptr1 = llvm.LLVMGetParam(llfn, 5u);
auto llval1 = bcx.build.BitCast(llrawptr1, llty);
auto llcmpval = llvm.LLVMGetParam(llfn, 6u);
make_cmp_glue(bcx, llval0, llval1, t, llcmpval);
}
}
// Tie up the llallocas -> lltop edge.
new_builder(fcx.llallocas).Br(lltop);
ret llfn;
}
fn make_take_glue(@block_ctxt cx, ValueRef v, ty.t t) {
// NB: v is an *alias* of type t here, not a direct value.
auto bcx;
if (ty.type_is_boxed(cx.fcx.lcx.ccx.tcx, t)) {
bcx = incr_refcnt_of_boxed(cx, cx.build.Load(v)).bcx;
} else if (ty.type_is_structural(cx.fcx.lcx.ccx.tcx, t)) {
bcx = iter_structural_ty(cx, v, t,
bind take_ty(_, _, _)).bcx;
} else {
bcx = cx;
}
bcx.build.RetVoid();
}
fn incr_refcnt_of_boxed(@block_ctxt cx, ValueRef box_ptr) -> result {
auto rc_ptr = cx.build.GEP(box_ptr, vec(C_int(0),
C_int(abi.box_rc_field_refcnt)));
auto rc = cx.build.Load(rc_ptr);
auto rc_adj_cx = new_sub_block_ctxt(cx, "rc++");
auto next_cx = new_sub_block_ctxt(cx, "next");
auto const_test = cx.build.ICmp(lib.llvm.LLVMIntEQ,
C_int(abi.const_refcount as int), rc);
cx.build.CondBr(const_test, next_cx.llbb, rc_adj_cx.llbb);
rc = rc_adj_cx.build.Add(rc, C_int(1));
rc_adj_cx.build.Store(rc, rc_ptr);
rc_adj_cx.build.Br(next_cx.llbb);
ret res(next_cx, C_nil());
}
fn make_drop_glue(@block_ctxt cx, ValueRef v0, ty.t t) {
// NB: v0 is an *alias* of type t here, not a direct value.
auto rslt;
alt (ty.struct(cx.fcx.lcx.ccx.tcx, t)) {
case (ty.ty_str) {
auto v = cx.build.Load(v0);
rslt = decr_refcnt_and_if_zero
(cx, v, bind trans_non_gc_free(_, v),
"free string",
T_int(), C_int(0));
}
case (ty.ty_vec(_)) {
fn hit_zero(@block_ctxt cx, ValueRef v,
ty.t t) -> result {
auto res = iter_sequence(cx, v, t,
bind drop_ty(_,_,_));
// FIXME: switch gc/non-gc on layer of the type.
ret trans_non_gc_free(res.bcx, v);
}
auto v = cx.build.Load(v0);
rslt = decr_refcnt_and_if_zero(cx, v,
bind hit_zero(_, v, t),
"free vector",
T_int(), C_int(0));
}
case (ty.ty_box(?body_mt)) {
fn hit_zero(@block_ctxt cx, ValueRef v,
ty.t body_ty) -> result {
auto body = cx.build.GEP(v,
vec(C_int(0),
C_int(abi.box_rc_field_body)));
auto body_val = load_if_immediate(cx, body, body_ty);
auto res = drop_ty(cx, body_val, body_ty);
// FIXME: switch gc/non-gc on layer of the type.
ret trans_non_gc_free(res.bcx, v);
}
auto v = cx.build.Load(v0);
rslt = decr_refcnt_and_if_zero(cx, v,
bind hit_zero(_, v, body_mt.ty),
"free box",
T_int(), C_int(0));
}
case (ty.ty_port(_)) {
fn hit_zero(@block_ctxt cx, ValueRef v) -> result {
ret trans_upcall(cx, "upcall_del_port",
vec(vp2i(cx, v)), true);
}
auto v = cx.build.Load(v0);
rslt = decr_refcnt_and_if_zero(cx, v,
bind hit_zero(_, v),
"free port",
T_int(), C_int(0));
}
case (ty.ty_chan(_)) {
fn hit_zero(@block_ctxt cx, ValueRef v) -> result {
ret trans_upcall(cx, "upcall_del_chan",
vec(vp2i(cx, v)), true);
}
auto v = cx.build.Load(v0);
rslt = decr_refcnt_and_if_zero(cx, v,
bind hit_zero(_, v),
"free chan",
T_int(), C_int(0));
}
case (ty.ty_obj(_)) {
fn hit_zero(@block_ctxt cx, ValueRef b, ValueRef o) -> result {
auto body =
cx.build.GEP(b,
vec(C_int(0),
C_int(abi.box_rc_field_body)));
auto tydescptr =
cx.build.GEP(body,
vec(C_int(0),
C_int(abi.obj_body_elt_tydesc)));
auto tydesc = cx.build.Load(tydescptr);
auto cx_ = maybe_call_dtor(cx, o);
// Call through the obj's own fields-drop glue first.
call_tydesc_glue_full(cx_, body, tydesc,
abi.tydesc_field_drop_glue);
// Then free the body.
// FIXME: switch gc/non-gc on layer of the type.
ret trans_non_gc_free(cx_, b);
}
auto box_cell =
cx.build.GEP(v0,
vec(C_int(0),
C_int(abi.obj_field_box)));
auto boxptr = cx.build.Load(box_cell);
rslt = decr_refcnt_and_if_zero(cx, boxptr,
bind hit_zero(_, boxptr, v0),
"free obj",
T_int(), C_int(0));
}
case (ty.ty_fn(_,_,_)) {
fn hit_zero(@block_ctxt cx, ValueRef v) -> result {
// Call through the closure's own fields-drop glue first.
auto body =
cx.build.GEP(v,
vec(C_int(0),
C_int(abi.box_rc_field_body)));
auto bindings =
cx.build.GEP(body,
vec(C_int(0),
C_int(abi.closure_elt_bindings)));
auto tydescptr =
cx.build.GEP(body,
vec(C_int(0),
C_int(abi.closure_elt_tydesc)));
call_tydesc_glue_full(cx, bindings, cx.build.Load(tydescptr),
abi.tydesc_field_drop_glue);
// Then free the body.
// FIXME: switch gc/non-gc on layer of the type.
ret trans_non_gc_free(cx, v);
}
auto box_cell =
cx.build.GEP(v0,
vec(C_int(0),
C_int(abi.fn_field_box)));
auto boxptr = cx.build.Load(box_cell);
rslt = decr_refcnt_and_if_zero(cx, boxptr,
bind hit_zero(_, boxptr),
"free fn",
T_int(), C_int(0));
}
case (_) {
if (ty.type_is_structural(cx.fcx.lcx.ccx.tcx, t)) {
rslt = iter_structural_ty(cx, v0, t,
bind drop_ty(_, _, _));
} else if (ty.type_is_scalar(cx.fcx.lcx.ccx.tcx, t) ||
ty.type_is_native(cx.fcx.lcx.ccx.tcx, t) ||
ty.type_is_nil(cx.fcx.lcx.ccx.tcx, t)) {
rslt = res(cx, C_nil());
} else {
rslt = res(cx, C_nil());
}
}
}
rslt.bcx.build.RetVoid();
}
fn decr_refcnt_and_if_zero(@block_ctxt cx,
ValueRef box_ptr,
fn(@block_ctxt cx) -> result inner,
str inner_name,
TypeRef t_else, ValueRef v_else) -> result {
auto load_rc_cx = new_sub_block_ctxt(cx, "load rc");
auto rc_adj_cx = new_sub_block_ctxt(cx, "rc--");
auto inner_cx = new_sub_block_ctxt(cx, inner_name);
auto next_cx = new_sub_block_ctxt(cx, "next");
auto null_test = cx.build.IsNull(box_ptr);
cx.build.CondBr(null_test, next_cx.llbb, load_rc_cx.llbb);
auto rc_ptr = load_rc_cx.build.GEP(box_ptr,
vec(C_int(0),
C_int(abi.box_rc_field_refcnt)));
auto rc = load_rc_cx.build.Load(rc_ptr);
auto const_test =
load_rc_cx.build.ICmp(lib.llvm.LLVMIntEQ,
C_int(abi.const_refcount as int), rc);
load_rc_cx.build.CondBr(const_test, next_cx.llbb, rc_adj_cx.llbb);
rc = rc_adj_cx.build.Sub(rc, C_int(1));
rc_adj_cx.build.Store(rc, rc_ptr);
auto zero_test = rc_adj_cx.build.ICmp(lib.llvm.LLVMIntEQ, C_int(0), rc);
rc_adj_cx.build.CondBr(zero_test, inner_cx.llbb, next_cx.llbb);
auto inner_res = inner(inner_cx);
inner_res.bcx.build.Br(next_cx.llbb);
auto phi = next_cx.build.Phi(t_else,
vec(v_else, v_else, v_else, inner_res.val),
vec(cx.llbb,
load_rc_cx.llbb,
rc_adj_cx.llbb,
inner_res.bcx.llbb));
ret res(next_cx, phi);
}
// Structural comparison: a rather involved form of glue.
fn make_cmp_glue(@block_ctxt cx,
ValueRef lhs0,
ValueRef rhs0,
ty.t t,
ValueRef llop) {
auto lhs = load_if_immediate(cx, lhs0, t);
auto rhs = load_if_immediate(cx, rhs0, t);
if (ty.type_is_scalar(cx.fcx.lcx.ccx.tcx, t)) {
make_scalar_cmp_glue(cx, lhs, rhs, t, llop);
} else if (ty.type_is_box(cx.fcx.lcx.ccx.tcx, t)) {
lhs = cx.build.GEP(lhs, vec(C_int(0), C_int(abi.box_rc_field_body)));
rhs = cx.build.GEP(rhs, vec(C_int(0), C_int(abi.box_rc_field_body)));
auto rslt = call_cmp_glue(cx, lhs, rhs, t, llop);
rslt.bcx.build.Store(rslt.val, cx.fcx.llretptr);
rslt.bcx.build.RetVoid();
} else if (ty.type_is_structural(cx.fcx.lcx.ccx.tcx, t)
|| ty.type_is_sequence(cx.fcx.lcx.ccx.tcx, t)) {
auto scx = new_sub_block_ctxt(cx, "structural compare start");
auto next = new_sub_block_ctxt(cx, "structural compare end");
cx.build.Br(scx.llbb);
/*
* We're doing lexicographic comparison here. We start with the
* assumption that the two input elements are equal. Depending on
* operator, this means that the result is either true or false;
* equality produces 'true' for ==, <= and >=. It produces 'false' for
* !=, < and >.
*
* We then move one element at a time through the structure checking
* for pairwise element equality. If we have equality, our assumption
* about overall sequence equality is not modified, so we have to move
* to the next element.
*
* If we do not have pairwise element equality, we have reached an
* element that 'decides' the lexicographic comparison. So we exit the
* loop with a flag that indicates the true/false sense of that
* decision, by testing the element again with the operator we're
* interested in.
*
* When we're lucky, LLVM should be able to fold some of these two
* tests together (as they're applied to the same operands and in some
* cases are sometimes redundant). But we don't bother trying to
* optimize combinations like that, at this level.
*/
auto flag = alloca(scx, T_i1());
llvm.LLVMSetValueName(flag, Str.buf("flag"));
auto r;
if (ty.type_is_sequence(cx.fcx.lcx.ccx.tcx, t)) {
// If we hit == all the way through the minimum-shared-length
// section, default to judging the relative sequence lengths.
r = compare_integral_values(scx,
vec_fill(scx, lhs),
vec_fill(scx, rhs),
false,
llop);
r.bcx.build.Store(r.val, flag);
} else {
// == and <= default to true if they find == all the way. <
// defaults to false if it finds == all the way.
auto result_if_equal = scx.build.ICmp(lib.llvm.LLVMIntNE, llop,
C_u8(abi.cmp_glue_op_lt));
scx.build.Store(result_if_equal, flag);
r = res(scx, C_nil());
}
fn inner(@block_ctxt last_cx,
bool load_inner,
ValueRef flag,
ValueRef llop,
@block_ctxt cx,
ValueRef av0,
ValueRef bv0,
ty.t t) -> result {
auto cnt_cx = new_sub_block_ctxt(cx, "continue_comparison");
auto stop_cx = new_sub_block_ctxt(cx, "stop_comparison");
auto av = av0;
auto bv = bv0;
if (load_inner) {
// If `load_inner` is true, then the pointer type will always
// be i8, because the data part of a vector always has type
// i8[]. So we need to cast it to the proper type.
if (!ty.type_has_dynamic_size(last_cx.fcx.lcx.ccx.tcx, t)) {
auto llelemty = T_ptr(type_of(last_cx.fcx.lcx.ccx, t));
av = cx.build.PointerCast(av, llelemty);
bv = cx.build.PointerCast(bv, llelemty);
}
av = load_if_immediate(cx, av, t);
bv = load_if_immediate(cx, bv, t);
}
// First 'eq' comparison: if so, continue to next elts.
auto eq_r = call_cmp_glue(cx, av, bv, t,
C_u8(abi.cmp_glue_op_eq));
eq_r.bcx.build.CondBr(eq_r.val, cnt_cx.llbb, stop_cx.llbb);
// Second 'op' comparison: find out how this elt-pair decides.
auto stop_r = call_cmp_glue(stop_cx, av, bv, t, llop);
stop_r.bcx.build.Store(stop_r.val, flag);
stop_r.bcx.build.Br(last_cx.llbb);
ret res(cnt_cx, C_nil());
}
if (ty.type_is_structural(cx.fcx.lcx.ccx.tcx, t)) {
r = iter_structural_ty_full(r.bcx, lhs, rhs, t,
bind inner(next, false, flag, llop,
_, _, _, _));
} else {
auto lhs_p0 = vec_p0(r.bcx, lhs);
auto rhs_p0 = vec_p0(r.bcx, rhs);
auto min_len = umin(r.bcx, vec_fill(r.bcx, lhs),
vec_fill(r.bcx, rhs));
auto rhs_lim = r.bcx.build.GEP(rhs_p0, vec(min_len));
auto elt_ty = ty.sequence_element_type(cx.fcx.lcx.ccx.tcx, t);
r = size_of(r.bcx, elt_ty);
r = iter_sequence_raw(r.bcx, lhs_p0, rhs_p0, rhs_lim, r.val,
bind inner(next, true, flag, llop,
_, _, _, elt_ty));
}
r.bcx.build.Br(next.llbb);
auto v = next.build.Load(flag);
next.build.Store(v, cx.fcx.llretptr);
next.build.RetVoid();
} else {
// FIXME: compare obj, fn by pointer?
trans_fail(cx, none[common.span],
"attempt to compare values of type " +
ty.ty_to_str(cx.fcx.lcx.ccx.tcx, t));
}
}
// A helper function to create scalar comparison glue.
fn make_scalar_cmp_glue(@block_ctxt cx, ValueRef lhs, ValueRef rhs, ty.t t,
ValueRef llop) {
if (ty.type_is_fp(cx.fcx.lcx.ccx.tcx, t)) {
make_fp_cmp_glue(cx, lhs, rhs, t, llop);
ret;
}
if (ty.type_is_integral(cx.fcx.lcx.ccx.tcx, t) ||
ty.type_is_bool(cx.fcx.lcx.ccx.tcx, t)) {
make_integral_cmp_glue(cx, lhs, rhs, t, llop);
ret;
}
if (ty.type_is_nil(cx.fcx.lcx.ccx.tcx, t)) {
cx.build.Store(C_bool(true), cx.fcx.llretptr);
cx.build.RetVoid();
ret;
}
trans_fail(cx, none[common.span],
"attempt to compare values of type " +
ty.ty_to_str(cx.fcx.lcx.ccx.tcx, t));
}
// A helper function to create floating point comparison glue.
fn make_fp_cmp_glue(@block_ctxt cx, ValueRef lhs, ValueRef rhs, ty.t fptype,
ValueRef llop) {
auto last_cx = new_sub_block_ctxt(cx, "last");
auto eq_cx = new_sub_block_ctxt(cx, "eq");
auto eq_result = eq_cx.build.FCmp(lib.llvm.LLVMRealUEQ, lhs, rhs);
eq_cx.build.Br(last_cx.llbb);
auto lt_cx = new_sub_block_ctxt(cx, "lt");
auto lt_result = lt_cx.build.FCmp(lib.llvm.LLVMRealULT, lhs, rhs);
lt_cx.build.Br(last_cx.llbb);
auto le_cx = new_sub_block_ctxt(cx, "le");
auto le_result = le_cx.build.FCmp(lib.llvm.LLVMRealULE, lhs, rhs);
le_cx.build.Br(last_cx.llbb);
auto unreach_cx = new_sub_block_ctxt(cx, "unreach");
unreach_cx.build.Unreachable();
auto llswitch = cx.build.Switch(llop, unreach_cx.llbb, 3u);
llvm.LLVMAddCase(llswitch, C_u8(abi.cmp_glue_op_eq), eq_cx.llbb);
llvm.LLVMAddCase(llswitch, C_u8(abi.cmp_glue_op_lt), lt_cx.llbb);
llvm.LLVMAddCase(llswitch, C_u8(abi.cmp_glue_op_le), le_cx.llbb);
auto last_result =
last_cx.build.Phi(T_i1(), vec(eq_result, lt_result, le_result),
vec(eq_cx.llbb, lt_cx.llbb, le_cx.llbb));
last_cx.build.Store(last_result, cx.fcx.llretptr);
last_cx.build.RetVoid();
}
// A helper function to compare integral values. This is used by both
// `make_integral_cmp_glue` and `make_cmp_glue`.
fn compare_integral_values(@block_ctxt cx, ValueRef lhs, ValueRef rhs,
bool signed, ValueRef llop) -> result {
auto lt_cmp; auto le_cmp;
if (signed) {
lt_cmp = lib.llvm.LLVMIntSLT;
le_cmp = lib.llvm.LLVMIntSLE;
} else {
lt_cmp = lib.llvm.LLVMIntULT;
le_cmp = lib.llvm.LLVMIntULE;
}
auto last_cx = new_sub_block_ctxt(cx, "last");
auto eq_cx = new_sub_block_ctxt(cx, "eq");
auto eq_result = eq_cx.build.ICmp(lib.llvm.LLVMIntEQ, lhs, rhs);
eq_cx.build.Br(last_cx.llbb);
auto lt_cx = new_sub_block_ctxt(cx, "lt");
auto lt_result = lt_cx.build.ICmp(lt_cmp, lhs, rhs);
lt_cx.build.Br(last_cx.llbb);
auto le_cx = new_sub_block_ctxt(cx, "le");
auto le_result = le_cx.build.ICmp(le_cmp, lhs, rhs);
le_cx.build.Br(last_cx.llbb);
auto unreach_cx = new_sub_block_ctxt(cx, "unreach");
unreach_cx.build.Unreachable();
auto llswitch = cx.build.Switch(llop, unreach_cx.llbb, 3u);
llvm.LLVMAddCase(llswitch, C_u8(abi.cmp_glue_op_eq), eq_cx.llbb);
llvm.LLVMAddCase(llswitch, C_u8(abi.cmp_glue_op_lt), lt_cx.llbb);
llvm.LLVMAddCase(llswitch, C_u8(abi.cmp_glue_op_le), le_cx.llbb);
auto last_result =
last_cx.build.Phi(T_i1(), vec(eq_result, lt_result, le_result),
vec(eq_cx.llbb, lt_cx.llbb, le_cx.llbb));
ret res(last_cx, last_result);
}
// A helper function to create integral comparison glue.
fn make_integral_cmp_glue(@block_ctxt cx, ValueRef lhs, ValueRef rhs,
ty.t intype, ValueRef llop) {
auto r = compare_integral_values(cx, lhs, rhs,
ty.type_is_signed(cx.fcx.lcx.ccx.tcx, intype), llop);
r.bcx.build.Store(r.val, r.bcx.fcx.llretptr);
r.bcx.build.RetVoid();
}
// Tag information
type variant_info = rec(vec[ty.t] args, ty.t ctor_ty, ast.def_id id);
// Returns information about the variants in a tag.
fn tag_variants(@crate_ctxt cx, ast.def_id id) -> vec[variant_info] {
if (cx.sess.get_targ_crate_num() != id._0) {
ret creader.get_tag_variants(cx.sess, cx.tcx, id);
}
assert (cx.items.contains_key(id));
alt (cx.items.get(id).node) {
case (ast.item_tag(_, ?variants, _, _, _)) {
let vec[variant_info] result = vec();
for (ast.variant variant in variants) {
auto ctor_ty = node_ann_type(cx, variant.node.ann);
let vec[ty.t] arg_tys = vec();
if (Vec.len[ast.variant_arg](variant.node.args) > 0u) {
for (ty.arg a in ty.ty_fn_args(cx.tcx, ctor_ty)) {
arg_tys += vec(a.ty);
}
}
auto did = variant.node.id;
result += vec(rec(args=arg_tys, ctor_ty=ctor_ty, id=did));
}
ret result;
}
}
fail; // not reached
}
// Returns information about the tag variant with the given ID.
fn tag_variant_with_id(@crate_ctxt cx,
&ast.def_id tag_id,
&ast.def_id variant_id) -> variant_info {
auto variants = tag_variants(cx, tag_id);
auto i = 0u;
while (i < Vec.len[variant_info](variants)) {
auto variant = variants.(i);
if (common.def_eq(variant.id, variant_id)) {
ret variant;
}
i += 1u;
}
log_err "tag_variant_with_id(): no variant exists with that ID";
fail;
}
type val_pair_fn = fn(@block_ctxt cx, ValueRef dst, ValueRef src) -> result;
type val_and_ty_fn = fn(@block_ctxt cx, ValueRef v, ty.t t) -> result;
type val_pair_and_ty_fn =
fn(@block_ctxt cx, ValueRef av, ValueRef bv, ty.t t) -> result;
// Iterates through the elements of a structural type.
fn iter_structural_ty(@block_ctxt cx,
ValueRef v,
ty.t t,
val_and_ty_fn f)
-> result {
fn adaptor_fn(val_and_ty_fn f,
@block_ctxt cx,
ValueRef av,
ValueRef bv,
ty.t t) -> result {
ret f(cx, av, t);
}
be iter_structural_ty_full(cx, v, v, t,
bind adaptor_fn(f, _, _, _, _));
}
fn iter_structural_ty_full(@block_ctxt cx,
ValueRef av,
ValueRef bv,
ty.t t,
val_pair_and_ty_fn f)
-> result {
let result r = res(cx, C_nil());
fn iter_boxpp(@block_ctxt cx,
ValueRef box_a_cell,
ValueRef box_b_cell,
val_pair_and_ty_fn f) -> result {
auto box_a_ptr = cx.build.Load(box_a_cell);
auto box_b_ptr = cx.build.Load(box_b_cell);
auto tnil = ty.mk_nil(cx.fcx.lcx.ccx.tcx);
auto tbox = ty.mk_imm_box(cx.fcx.lcx.ccx.tcx, tnil);
auto inner_cx = new_sub_block_ctxt(cx, "iter box");
auto next_cx = new_sub_block_ctxt(cx, "next");
auto null_test = cx.build.IsNull(box_a_ptr);
cx.build.CondBr(null_test, next_cx.llbb, inner_cx.llbb);
auto r = f(inner_cx, box_a_ptr, box_b_ptr, tbox);
r.bcx.build.Br(next_cx.llbb);
ret res(next_cx, C_nil());
}
alt (ty.struct(cx.fcx.lcx.ccx.tcx, t)) {
case (ty.ty_tup(?args)) {
let int i = 0;
for (ty.mt arg in args) {
r = GEP_tup_like(r.bcx, t, av, vec(0, i));
auto elt_a = r.val;
r = GEP_tup_like(r.bcx, t, bv, vec(0, i));
auto elt_b = r.val;
r = f(r.bcx,
load_if_immediate(r.bcx, elt_a, arg.ty),
load_if_immediate(r.bcx, elt_b, arg.ty),
arg.ty);
i += 1;
}
}
case (ty.ty_rec(?fields)) {
let int i = 0;
for (ty.field fld in fields) {
r = GEP_tup_like(r.bcx, t, av, vec(0, i));
auto llfld_a = r.val;
r = GEP_tup_like(r.bcx, t, bv, vec(0, i));
auto llfld_b = r.val;
r = f(r.bcx,
load_if_immediate(r.bcx, llfld_a, fld.mt.ty),
load_if_immediate(r.bcx, llfld_b, fld.mt.ty),
fld.mt.ty);
i += 1;
}
}
case (ty.ty_tag(?tid, ?tps)) {
auto variants = tag_variants(cx.fcx.lcx.ccx, tid);
auto n_variants = Vec.len[variant_info](variants);
// Cast the tags to types we can GEP into.
auto lltagty = T_opaque_tag_ptr(cx.fcx.lcx.ccx.tn);
auto av_tag = cx.build.PointerCast(av, lltagty);
auto bv_tag = cx.build.PointerCast(bv, lltagty);
auto lldiscrim_a_ptr = cx.build.GEP(av_tag,
vec(C_int(0), C_int(0)));
auto llunion_a_ptr = cx.build.GEP(av_tag,
vec(C_int(0), C_int(1)));
auto lldiscrim_a = cx.build.Load(lldiscrim_a_ptr);
auto lldiscrim_b_ptr = cx.build.GEP(bv_tag,
vec(C_int(0), C_int(0)));
auto llunion_b_ptr = cx.build.GEP(bv_tag,
vec(C_int(0), C_int(1)));
auto lldiscrim_b = cx.build.Load(lldiscrim_b_ptr);
// NB: we must hit the discriminant first so that structural
// comparison know not to proceed when the discriminants differ.
auto bcx = cx;
bcx = f(bcx, lldiscrim_a, lldiscrim_b,
ty.mk_int(cx.fcx.lcx.ccx.tcx)).bcx;
auto unr_cx = new_sub_block_ctxt(bcx, "tag-iter-unr");
unr_cx.build.Unreachable();
auto llswitch = bcx.build.Switch(lldiscrim_a, unr_cx.llbb,
n_variants);
auto next_cx = new_sub_block_ctxt(bcx, "tag-iter-next");
auto i = 0u;
for (variant_info variant in variants) {
auto variant_cx = new_sub_block_ctxt(bcx,
"tag-iter-variant-" +
UInt.to_str(i, 10u));
llvm.LLVMAddCase(llswitch, C_int(i as int), variant_cx.llbb);
if (Vec.len[ty.t](variant.args) > 0u) {
// N-ary variant.
auto fn_ty = variant.ctor_ty;
alt (ty.struct(bcx.fcx.lcx.ccx.tcx, fn_ty)) {
case (ty.ty_fn(_, ?args, _)) {
auto j = 0;
for (ty.arg a in args) {
auto v = vec(C_int(0), C_int(j as int));
auto rslt = GEP_tag(variant_cx, llunion_a_ptr,
tid, variant.id, tps, j);
auto llfldp_a = rslt.val;
variant_cx = rslt.bcx;
rslt = GEP_tag(variant_cx, llunion_b_ptr, tid,
variant.id, tps, j);
auto llfldp_b = rslt.val;
variant_cx = rslt.bcx;
auto ty_subst = ty.bind_params_in_type(
cx.fcx.lcx.ccx.tcx, a.ty);
ty_subst = ty.substitute_type_params(
cx.fcx.lcx.ccx.tcx, tps, ty_subst);
auto llfld_a =
load_if_immediate(variant_cx,
llfldp_a,
ty_subst);
auto llfld_b =
load_if_immediate(variant_cx,
llfldp_b,
ty_subst);
auto res = f(variant_cx,
llfld_a, llfld_b, ty_subst);
variant_cx = res.bcx;
j += 1;
}
}
case (_) { fail; }
}
variant_cx.build.Br(next_cx.llbb);
} else {
// Nullary variant; nothing to do.
variant_cx.build.Br(next_cx.llbb);
}
i += 1u;
}
ret res(next_cx, C_nil());
}
case (ty.ty_fn(_,_,_)) {
auto box_cell_a =
cx.build.GEP(av,
vec(C_int(0),
C_int(abi.fn_field_box)));
auto box_cell_b =
cx.build.GEP(bv,
vec(C_int(0),
C_int(abi.fn_field_box)));
ret iter_boxpp(cx, box_cell_a, box_cell_b, f);
}
case (ty.ty_obj(_)) {
auto box_cell_a =
cx.build.GEP(av,
vec(C_int(0),
C_int(abi.obj_field_box)));
auto box_cell_b =
cx.build.GEP(bv,
vec(C_int(0),
C_int(abi.obj_field_box)));
ret iter_boxpp(cx, box_cell_a, box_cell_b, f);
}
case (_) {
cx.fcx.lcx.ccx.sess.unimpl("type in iter_structural_ty_full");
}
}
ret r;
}
// Iterates through a pointer range, until the src* hits the src_lim*.
fn iter_sequence_raw(@block_ctxt cx,
ValueRef dst, // elt*
ValueRef src, // elt*
ValueRef src_lim, // elt*
ValueRef elt_sz,
val_pair_fn f) -> result {
auto bcx = cx;
let ValueRef dst_int = vp2i(bcx, dst);
let ValueRef src_int = vp2i(bcx, src);
let ValueRef src_lim_int = vp2i(bcx, src_lim);
auto cond_cx = new_scope_block_ctxt(cx, "sequence-iter cond");
auto body_cx = new_scope_block_ctxt(cx, "sequence-iter body");
auto next_cx = new_sub_block_ctxt(cx, "next");
bcx.build.Br(cond_cx.llbb);
let ValueRef dst_curr = cond_cx.build.Phi(T_int(),
vec(dst_int), vec(bcx.llbb));
let ValueRef src_curr = cond_cx.build.Phi(T_int(),
vec(src_int), vec(bcx.llbb));
auto end_test = cond_cx.build.ICmp(lib.llvm.LLVMIntULT,
src_curr, src_lim_int);
cond_cx.build.CondBr(end_test, body_cx.llbb, next_cx.llbb);
auto dst_curr_ptr = vi2p(body_cx, dst_curr, T_ptr(T_i8()));
auto src_curr_ptr = vi2p(body_cx, src_curr, T_ptr(T_i8()));
auto body_res = f(body_cx, dst_curr_ptr, src_curr_ptr);
body_cx = body_res.bcx;
auto dst_next = body_cx.build.Add(dst_curr, elt_sz);
auto src_next = body_cx.build.Add(src_curr, elt_sz);
body_cx.build.Br(cond_cx.llbb);
cond_cx.build.AddIncomingToPhi(dst_curr, vec(dst_next),
vec(body_cx.llbb));
cond_cx.build.AddIncomingToPhi(src_curr, vec(src_next),
vec(body_cx.llbb));
ret res(next_cx, C_nil());
}
fn iter_sequence_inner(@block_ctxt cx,
ValueRef src, // elt*
ValueRef src_lim, // elt*
ty.t elt_ty,
val_and_ty_fn f) -> result {
fn adaptor_fn(val_and_ty_fn f,
ty.t elt_ty,
@block_ctxt cx,
ValueRef dst,
ValueRef src) -> result {
auto llptrty;
if (!ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, elt_ty)) {
auto llty = type_of(cx.fcx.lcx.ccx, elt_ty);
llptrty = T_ptr(llty);
} else {
llptrty = T_ptr(T_ptr(T_i8()));
}
auto p = cx.build.PointerCast(src, llptrty);
ret f(cx, load_if_immediate(cx, p, elt_ty), elt_ty);
}
auto elt_sz = size_of(cx, elt_ty);
be iter_sequence_raw(elt_sz.bcx, src, src, src_lim, elt_sz.val,
bind adaptor_fn(f, elt_ty, _, _, _));
}
// Iterates through the elements of a vec or str.
fn iter_sequence(@block_ctxt cx,
ValueRef v,
ty.t t,
val_and_ty_fn f) -> result {
fn iter_sequence_body(@block_ctxt cx,
ValueRef v,
ty.t elt_ty,
val_and_ty_fn f,
bool trailing_null) -> result {
auto p0 = cx.build.GEP(v, vec(C_int(0),
C_int(abi.vec_elt_data)));
auto lenptr = cx.build.GEP(v, vec(C_int(0),
C_int(abi.vec_elt_fill)));
auto llunit_ty;
if (ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, elt_ty)) {
llunit_ty = T_i8();
} else {
llunit_ty = type_of(cx.fcx.lcx.ccx, elt_ty);
}
auto bcx = cx;
auto len = bcx.build.Load(lenptr);
if (trailing_null) {
auto unit_sz = size_of(bcx, elt_ty);
bcx = unit_sz.bcx;
len = bcx.build.Sub(len, unit_sz.val);
}
auto p1 = vi2p(bcx, bcx.build.Add(vp2i(bcx, p0), len),
T_ptr(llunit_ty));
ret iter_sequence_inner(bcx, p0, p1, elt_ty, f);
}
alt (ty.struct(cx.fcx.lcx.ccx.tcx, t)) {
case (ty.ty_vec(?elt)) {
ret iter_sequence_body(cx, v, elt.ty, f, false);
}
case (ty.ty_str) {
auto et = ty.mk_mach(cx.fcx.lcx.ccx.tcx, common.ty_u8);
ret iter_sequence_body(cx, v, et, f, true);
}
case (_) { fail; }
}
cx.fcx.lcx.ccx.sess.bug("bad type in trans.iter_sequence");
fail;
}
fn call_tydesc_glue_full(@block_ctxt cx, ValueRef v,
ValueRef tydesc, int field) {
auto llrawptr = cx.build.BitCast(v, T_ptr(T_i8()));
auto lltydescs = cx.build.GEP(tydesc,
vec(C_int(0),
C_int(abi.tydesc_field_first_param)));
lltydescs = cx.build.Load(lltydescs);
auto llfnptr = cx.build.GEP(tydesc, vec(C_int(0), C_int(field)));
auto llfn = cx.build.Load(llfnptr);
cx.build.FastCall(llfn, vec(C_null(T_ptr(T_nil())),
cx.fcx.lltaskptr,
C_null(T_ptr(T_nil())),
lltydescs,
llrawptr));
}
fn call_tydesc_glue(@block_ctxt cx, ValueRef v,
ty.t t, bool escapes, int field) -> result {
auto td = get_tydesc(cx, t, escapes);
call_tydesc_glue_full(td.bcx,
spill_if_immediate(td.bcx, v, t),
td.val, field);
ret res(td.bcx, C_nil());
}
fn maybe_call_dtor(@block_ctxt cx, ValueRef v) -> @block_ctxt {
auto vtbl = cx.build.GEP(v, vec(C_int(0), C_int(abi.obj_field_vtbl)));
vtbl = cx.build.Load(vtbl);
auto dtor_ptr = cx.build.GEP(vtbl, vec(C_int(0), C_int(0)));
dtor_ptr = cx.build.Load(dtor_ptr);
auto self_t = llvm.LLVMGetElementType(val_ty(v));
dtor_ptr = cx.build.BitCast(dtor_ptr,
T_ptr(T_dtor(cx.fcx.lcx.ccx, self_t)));
auto dtor_cx = new_sub_block_ctxt(cx, "dtor");
auto after_cx = new_sub_block_ctxt(cx, "after_dtor");
auto test = cx.build.ICmp(lib.llvm.LLVMIntNE, dtor_ptr,
C_null(val_ty(dtor_ptr)));
cx.build.CondBr(test, dtor_cx.llbb, after_cx.llbb);
auto me = dtor_cx.build.Load(v);
dtor_cx.build.FastCall(dtor_ptr, vec(C_null(T_ptr(T_nil())),
cx.fcx.lltaskptr, me));
dtor_cx.build.Br(after_cx.llbb);
ret after_cx;
}
fn call_cmp_glue(@block_ctxt cx,
ValueRef lhs,
ValueRef rhs,
ty.t t,
ValueRef llop) -> result {
// We can't use call_tydesc_glue_full() and friends here because compare
// glue has a special signature.
auto lllhs = spill_if_immediate(cx, lhs, t);
auto llrhs = spill_if_immediate(cx, rhs, t);
auto llrawlhsptr = cx.build.BitCast(lllhs, T_ptr(T_i8()));
auto llrawrhsptr = cx.build.BitCast(llrhs, T_ptr(T_i8()));
auto r = get_tydesc(cx, t, false);
auto lltydescs =
r.bcx.build.GEP(r.val, vec(C_int(0),
C_int(abi.tydesc_field_first_param)));
lltydescs = r.bcx.build.Load(lltydescs);
auto llfnptr =
r.bcx.build.GEP(r.val, vec(C_int(0),
C_int(abi.tydesc_field_cmp_glue)));
auto llfn = r.bcx.build.Load(llfnptr);
auto llcmpresultptr = r.bcx.build.Alloca(T_i1());
let vec[ValueRef] llargs = vec(llcmpresultptr,
r.bcx.fcx.lltaskptr,
C_null(T_ptr(T_nil())),
lltydescs,
llrawlhsptr,
llrawrhsptr,
llop);
r.bcx.build.FastCall(llfn, llargs);
ret res(r.bcx, r.bcx.build.Load(llcmpresultptr));
}
fn take_ty(@block_ctxt cx, ValueRef v, ty.t t) -> result {
if (!ty.type_is_scalar(cx.fcx.lcx.ccx.tcx, t)) {
ret call_tydesc_glue(cx, v, t, false, abi.tydesc_field_take_glue);
}
ret res(cx, C_nil());
}
fn drop_slot(@block_ctxt cx,
ValueRef slot,
ty.t t) -> result {
auto llptr = load_if_immediate(cx, slot, t);
auto re = drop_ty(cx, llptr, t);
auto llty = val_ty(slot);
auto llelemty = lib.llvm.llvm.LLVMGetElementType(llty);
re.bcx.build.Store(C_null(llelemty), slot);
ret re;
}
fn drop_ty(@block_ctxt cx,
ValueRef v,
ty.t t) -> result {
if (!ty.type_is_scalar(cx.fcx.lcx.ccx.tcx, t)) {
ret call_tydesc_glue(cx, v, t, false, abi.tydesc_field_drop_glue);
}
ret res(cx, C_nil());
}
fn call_memcpy(@block_ctxt cx,
ValueRef dst,
ValueRef src,
ValueRef n_bytes) -> result {
auto src_ptr = cx.build.PointerCast(src, T_ptr(T_i8()));
auto dst_ptr = cx.build.PointerCast(dst, T_ptr(T_i8()));
auto size = cx.build.IntCast(n_bytes, T_int());
ret res(cx, cx.build.FastCall(cx.fcx.lcx.ccx.glues.memcpy_glue,
vec(dst_ptr, src_ptr, size)));
}
fn call_bzero(@block_ctxt cx,
ValueRef dst,
ValueRef n_bytes) -> result {
auto dst_ptr = cx.build.PointerCast(dst, T_ptr(T_i8()));
auto size = cx.build.IntCast(n_bytes, T_int());
ret res(cx, cx.build.FastCall(cx.fcx.lcx.ccx.glues.bzero_glue,
vec(dst_ptr, size)));
}
fn memcpy_ty(@block_ctxt cx,
ValueRef dst,
ValueRef src,
ty.t t) -> result {
if (ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, t)) {
auto llszptr = field_of_tydesc(cx, t, false, abi.tydesc_field_size);
auto llsz = llszptr.bcx.build.Load(llszptr.val);
ret call_memcpy(llszptr.bcx, dst, src, llsz);
} else {
ret res(cx, cx.build.Store(cx.build.Load(src), dst));
}
}
tag copy_action {
INIT;
DROP_EXISTING;
}
fn copy_ty(@block_ctxt cx,
copy_action action,
ValueRef dst,
ValueRef src,
ty.t t) -> result {
if (ty.type_is_scalar(cx.fcx.lcx.ccx.tcx, t) ||
ty.type_is_native(cx.fcx.lcx.ccx.tcx, t)) {
ret res(cx, cx.build.Store(src, dst));
} else if (ty.type_is_nil(cx.fcx.lcx.ccx.tcx, t)) {
ret res(cx, C_nil());
} else if (ty.type_is_boxed(cx.fcx.lcx.ccx.tcx, t)) {
auto r = take_ty(cx, src, t);
if (action == DROP_EXISTING) {
r = drop_ty(r.bcx, r.bcx.build.Load(dst), t);
}
ret res(r.bcx, r.bcx.build.Store(src, dst));
} else if (ty.type_is_structural(cx.fcx.lcx.ccx.tcx, t) ||
ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, t)) {
auto r = take_ty(cx, src, t);
if (action == DROP_EXISTING) {
r = drop_ty(r.bcx, dst, t);
}
ret memcpy_ty(r.bcx, dst, src, t);
}
cx.fcx.lcx.ccx.sess.bug("unexpected type in trans.copy_ty: " +
ty.ty_to_str(cx.fcx.lcx.ccx.tcx, t));
fail;
}
fn trans_lit(@crate_ctxt cx, &ast.lit lit, &ast.ann ann) -> ValueRef {
alt (lit.node) {
case (ast.lit_int(?i)) {
ret C_int(i);
}
case (ast.lit_uint(?u)) {
ret C_int(u as int);
}
case (ast.lit_mach_int(?tm, ?i)) {
// FIXME: the entire handling of mach types falls apart
// if target int width is larger than host, at the moment;
// re-do the mach-int types using 'big' when that works.
auto t = T_int();
auto s = True;
alt (tm) {
case (common.ty_u8) { t = T_i8(); s = False; }
case (common.ty_u16) { t = T_i16(); s = False; }
case (common.ty_u32) { t = T_i32(); s = False; }
case (common.ty_u64) { t = T_i64(); s = False; }
case (common.ty_i8) { t = T_i8(); }
case (common.ty_i16) { t = T_i16(); }
case (common.ty_i32) { t = T_i32(); }
case (common.ty_i64) { t = T_i64(); }
}
ret C_integral(t, i as uint, s);
}
case(ast.lit_float(?fs)) {
ret C_float(fs);
}
case(ast.lit_mach_float(?tm, ?s)) {
auto t = T_float();
alt(tm) {
case(common.ty_f32) { t = T_f32(); }
case(common.ty_f64) { t = T_f64(); }
}
ret C_floating(s, t);
}
case (ast.lit_char(?c)) {
ret C_integral(T_char(), c as uint, False);
}
case (ast.lit_bool(?b)) {
ret C_bool(b);
}
case (ast.lit_nil) {
ret C_nil();
}
case (ast.lit_str(?s)) {
ret C_str(cx, s);
}
}
}
fn target_type(@crate_ctxt cx, ty.t t) -> ty.t {
alt (ty.struct(cx.tcx, t)) {
case (ty.ty_int) {
auto struct_ty = ty.mk_mach(cx.tcx,
cx.sess.get_targ_cfg().int_type);
ret ty.copy_cname(cx.tcx, struct_ty, t);
}
case (ty.ty_uint) {
auto struct_ty = ty.mk_mach(cx.tcx,
cx.sess.get_targ_cfg().uint_type);
ret ty.copy_cname(cx.tcx, struct_ty, t);
}
case (_) { /* fall through */ }
}
ret t;
}
// Converts an annotation to a type
fn node_ann_type(@crate_ctxt cx, &ast.ann a) -> ty.t {
ret target_type(cx, ty.ann_to_monotype(cx.tcx, a));
}
fn node_ann_ty_params(&ast.ann a) -> vec[ty.t] {
alt (a) {
case (ast.ann_none) {
log_err "missing type annotation";
fail;
}
case (ast.ann_type(_, ?tps_opt, _)) {
alt (tps_opt) {
case (none[vec[ty.t]]) {
log_err "type annotation has no ty params";
fail;
}
case (some[vec[ty.t]](?tps)) { ret tps; }
}
}
}
}
fn node_type(@crate_ctxt cx, &ast.ann a) -> TypeRef {
ret type_of(cx, node_ann_type(cx, a));
}
fn trans_unary(@block_ctxt cx, ast.unop op,
@ast.expr e, &ast.ann a) -> result {
auto sub = trans_expr(cx, e);
auto e_ty = ty.expr_ty(cx.fcx.lcx.ccx.tcx, e);
alt (op) {
case (ast.bitnot) {
sub = autoderef(sub.bcx, sub.val,
ty.expr_ty(cx.fcx.lcx.ccx.tcx, e));
ret res(sub.bcx, sub.bcx.build.Not(sub.val));
}
case (ast.not) {
sub = autoderef(sub.bcx, sub.val,
ty.expr_ty(cx.fcx.lcx.ccx.tcx, e));
ret res(sub.bcx, sub.bcx.build.Not(sub.val));
}
case (ast.neg) {
sub = autoderef(sub.bcx, sub.val,
ty.expr_ty(cx.fcx.lcx.ccx.tcx, e));
if(ty.struct(cx.fcx.lcx.ccx.tcx, e_ty) == ty.ty_float) {
ret res(sub.bcx, sub.bcx.build.FNeg(sub.val));
}
else {
ret res(sub.bcx, sub.bcx.build.Neg(sub.val));
}
}
case (ast.box(_)) {
auto e_ty = ty.expr_ty(cx.fcx.lcx.ccx.tcx, e);
auto e_val = sub.val;
auto box_ty = node_ann_type(sub.bcx.fcx.lcx.ccx, a);
sub = trans_malloc_boxed(sub.bcx, e_ty);
find_scope_cx(cx).cleanups +=
vec(clean(bind drop_ty(_, sub.val, box_ty)));
auto box = sub.val;
auto rc = sub.bcx.build.GEP(box,
vec(C_int(0),
C_int(abi.box_rc_field_refcnt)));
auto body = sub.bcx.build.GEP(box,
vec(C_int(0),
C_int(abi.box_rc_field_body)));
sub.bcx.build.Store(C_int(1), rc);
// Cast the body type to the type of the value. This is needed to
// make tags work, since tags have a different LLVM type depending
// on whether they're boxed or not.
if (!ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, e_ty)) {
auto llety = T_ptr(type_of(sub.bcx.fcx.lcx.ccx, e_ty));
body = sub.bcx.build.PointerCast(body, llety);
}
sub = copy_ty(sub.bcx, INIT, body, e_val, e_ty);
ret res(sub.bcx, box);
}
case (ast.deref) {
log_err "deref expressions should have been translated using " +
"trans_lval(), not trans_unary()";
fail;
}
}
fail;
}
fn trans_compare(@block_ctxt cx0, ast.binop op, ty.t t0,
ValueRef lhs0, ValueRef rhs0) -> result {
// Autoderef both sides.
auto cx = cx0;
auto lhs_r = autoderef(cx, lhs0, t0);
auto lhs = lhs_r.val;
cx = lhs_r.bcx;
auto rhs_r = autoderef(cx, rhs0, t0);
auto rhs = rhs_r.val;
cx = rhs_r.bcx;
auto t = autoderefed_ty(cx.fcx.lcx.ccx, t0);
// Determine the operation we need.
// FIXME: Use or-patterns when we have them.
auto llop;
alt (op) {
case (ast.eq) { llop = C_u8(abi.cmp_glue_op_eq); }
case (ast.lt) { llop = C_u8(abi.cmp_glue_op_lt); }
case (ast.le) { llop = C_u8(abi.cmp_glue_op_le); }
case (ast.ne) { llop = C_u8(abi.cmp_glue_op_eq); }
case (ast.ge) { llop = C_u8(abi.cmp_glue_op_lt); }
case (ast.gt) { llop = C_u8(abi.cmp_glue_op_le); }
}
auto rslt = call_cmp_glue(cx, lhs, rhs, t, llop);
// Invert the result if necessary.
// FIXME: Use or-patterns when we have them.
alt (op) {
case (ast.eq) { ret res(rslt.bcx, rslt.val); }
case (ast.lt) { ret res(rslt.bcx, rslt.val); }
case (ast.le) { ret res(rslt.bcx, rslt.val); }
case (ast.ne) { ret res(rslt.bcx, rslt.bcx.build.Not(rslt.val)); }
case (ast.ge) { ret res(rslt.bcx, rslt.bcx.build.Not(rslt.val)); }
case (ast.gt) { ret res(rslt.bcx, rslt.bcx.build.Not(rslt.val)); }
}
}
fn trans_vec_append(@block_ctxt cx, ty.t t,
ValueRef lhs, ValueRef rhs) -> result {
auto elt_ty = ty.sequence_element_type(cx.fcx.lcx.ccx.tcx, t);
auto skip_null = C_bool(false);
alt (ty.struct(cx.fcx.lcx.ccx.tcx, t)) {
case (ty.ty_str) { skip_null = C_bool(true); }
case (_) { }
}
auto bcx = cx;
auto llvec_tydesc = get_tydesc(bcx, t, false);
bcx = llvec_tydesc.bcx;
auto llelt_tydesc = get_tydesc(bcx, elt_ty, false);
bcx = llelt_tydesc.bcx;
auto dst = bcx.build.PointerCast(lhs, T_ptr(T_opaque_vec_ptr()));
auto src = bcx.build.PointerCast(rhs, T_opaque_vec_ptr());
ret res(bcx, bcx.build.FastCall(cx.fcx.lcx.ccx.glues.vec_append_glue,
vec(cx.fcx.lltaskptr,
llvec_tydesc.val,
llelt_tydesc.val,
dst, src, skip_null)));
}
fn trans_vec_add(@block_ctxt cx, ty.t t,
ValueRef lhs, ValueRef rhs) -> result {
auto r = alloc_ty(cx, t);
auto tmp = r.val;
r = copy_ty(r.bcx, INIT, tmp, lhs, t);
auto bcx = trans_vec_append(r.bcx, t, tmp, rhs).bcx;
tmp = load_if_immediate(bcx, tmp, t);
find_scope_cx(cx).cleanups +=
vec(clean(bind drop_ty(_, tmp, t)));
ret res(bcx, tmp);
}
fn trans_eager_binop(@block_ctxt cx, ast.binop op, ty.t intype,
ValueRef lhs, ValueRef rhs) -> result {
auto is_float = false;
alt (ty.struct(cx.fcx.lcx.ccx.tcx, intype)) {
case (ty.ty_float) {
is_float = true;
}
case (_) {
is_float = false;
}
}
alt (op) {
case (ast.add) {
if (ty.type_is_sequence(cx.fcx.lcx.ccx.tcx, intype)) {
ret trans_vec_add(cx, intype, lhs, rhs);
}
if (is_float) {
ret res(cx, cx.build.FAdd(lhs, rhs));
}
else {
ret res(cx, cx.build.Add(lhs, rhs));
}
}
case (ast.sub) {
if (is_float) {
ret res(cx, cx.build.FSub(lhs, rhs));
}
else {
ret res(cx, cx.build.Sub(lhs, rhs));
}
}
case (ast.mul) {
if (is_float) {
ret res(cx, cx.build.FMul(lhs, rhs));
}
else {
ret res(cx, cx.build.Mul(lhs, rhs));
}
}
case (ast.div) {
if (is_float) {
ret res(cx, cx.build.FDiv(lhs, rhs));
}
if (ty.type_is_signed(cx.fcx.lcx.ccx.tcx, intype)) {
ret res(cx, cx.build.SDiv(lhs, rhs));
} else {
ret res(cx, cx.build.UDiv(lhs, rhs));
}
}
case (ast.rem) {
if (is_float) {
ret res(cx, cx.build.FRem(lhs, rhs));
}
if (ty.type_is_signed(cx.fcx.lcx.ccx.tcx, intype)) {
ret res(cx, cx.build.SRem(lhs, rhs));
} else {
ret res(cx, cx.build.URem(lhs, rhs));
}
}
case (ast.bitor) { ret res(cx, cx.build.Or(lhs, rhs)); }
case (ast.bitand) { ret res(cx, cx.build.And(lhs, rhs)); }
case (ast.bitxor) { ret res(cx, cx.build.Xor(lhs, rhs)); }
case (ast.lsl) { ret res(cx, cx.build.Shl(lhs, rhs)); }
case (ast.lsr) { ret res(cx, cx.build.LShr(lhs, rhs)); }
case (ast.asr) { ret res(cx, cx.build.AShr(lhs, rhs)); }
case (_) {
ret trans_compare(cx, op, intype, lhs, rhs);
}
}
fail;
}
fn autoderef(@block_ctxt cx, ValueRef v, ty.t t) -> result {
let ValueRef v1 = v;
let ty.t t1 = t;
while (true) {
alt (ty.struct(cx.fcx.lcx.ccx.tcx, t1)) {
case (ty.ty_box(?mt)) {
auto body = cx.build.GEP(v1,
vec(C_int(0),
C_int(abi.box_rc_field_body)));
t1 = mt.ty;
// Since we're changing levels of box indirection, we may have
// to cast this pointer, since statically-sized tag types have
// different types depending on whether they're behind a box
// or not.
if (!ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, mt.ty)) {
auto llty = type_of(cx.fcx.lcx.ccx, mt.ty);
v1 = cx.build.PointerCast(body, T_ptr(llty));
} else {
v1 = body;
}
v1 = load_if_immediate(cx, v1, t1);
}
case (_) {
ret res(cx, v1);
}
}
}
}
fn autoderefed_ty(@crate_ctxt ccx, ty.t t) -> ty.t {
let ty.t t1 = t;
while (true) {
alt (ty.struct(ccx.tcx, t1)) {
case (ty.ty_box(?mt)) {
t1 = mt.ty;
}
case (_) {
ret t1;
}
}
}
}
fn trans_binary(@block_ctxt cx, ast.binop op,
@ast.expr a, @ast.expr b) -> result {
// First couple cases are lazy:
alt (op) {
case (ast.and) {
// Lazy-eval and
auto lhs_res = trans_expr(cx, a);
lhs_res = autoderef(lhs_res.bcx, lhs_res.val,
ty.expr_ty(cx.fcx.lcx.ccx.tcx, a));
auto rhs_cx = new_scope_block_ctxt(cx, "rhs");
auto rhs_res = trans_expr(rhs_cx, b);
rhs_res = autoderef(rhs_res.bcx, rhs_res.val,
ty.expr_ty(cx.fcx.lcx.ccx.tcx, b));
auto lhs_false_cx = new_scope_block_ctxt(cx, "lhs false");
auto lhs_false_res = res(lhs_false_cx, C_bool(false));
lhs_res.bcx.build.CondBr(lhs_res.val,
rhs_cx.llbb,
lhs_false_cx.llbb);
ret join_results(cx, T_bool(),
vec(lhs_false_res, rhs_res));
}
case (ast.or) {
// Lazy-eval or
auto lhs_res = trans_expr(cx, a);
lhs_res = autoderef(lhs_res.bcx, lhs_res.val,
ty.expr_ty(cx.fcx.lcx.ccx.tcx, a));
auto rhs_cx = new_scope_block_ctxt(cx, "rhs");
auto rhs_res = trans_expr(rhs_cx, b);
rhs_res = autoderef(rhs_res.bcx, rhs_res.val,
ty.expr_ty(cx.fcx.lcx.ccx.tcx, b));
auto lhs_true_cx = new_scope_block_ctxt(cx, "lhs true");
auto lhs_true_res = res(lhs_true_cx, C_bool(true));
lhs_res.bcx.build.CondBr(lhs_res.val,
lhs_true_cx.llbb,
rhs_cx.llbb);
ret join_results(cx, T_bool(),
vec(lhs_true_res, rhs_res));
}
case (_) {
// Remaining cases are eager:
auto lhs = trans_expr(cx, a);
auto lhty = ty.expr_ty(cx.fcx.lcx.ccx.tcx, a);
lhs = autoderef(lhs.bcx, lhs.val, lhty);
auto rhs = trans_expr(lhs.bcx, b);
auto rhty = ty.expr_ty(cx.fcx.lcx.ccx.tcx, b);
rhs = autoderef(rhs.bcx, rhs.val, rhty);
ret trans_eager_binop(rhs.bcx, op,
autoderefed_ty(cx.fcx.lcx.ccx, lhty), lhs.val, rhs.val);
}
}
fail;
}
fn join_results(@block_ctxt parent_cx,
TypeRef t,
vec[result] ins)
-> result {
let vec[result] live = vec();
let vec[ValueRef] vals = vec();
let vec[BasicBlockRef] bbs = vec();
for (result r in ins) {
if (! is_terminated(r.bcx)) {
live += vec(r);
vals += vec(r.val);
bbs += vec(r.bcx.llbb);
}
}
alt (Vec.len[result](live)) {
case (0u) {
// No incoming edges are live, so we're in dead-code-land.
// Arbitrarily pick the first dead edge, since the caller
// is just going to propagate it outward.
assert (Vec.len[result](ins) >= 1u);
ret ins.(0);
}
case (_) { /* fall through */ }
}
// We have >1 incoming edges. Make a join block and br+phi them into it.
auto join_cx = new_sub_block_ctxt(parent_cx, "join");
for (result r in live) {
r.bcx.build.Br(join_cx.llbb);
}
auto phi = join_cx.build.Phi(t, vals, bbs);
ret res(join_cx, phi);
}
fn trans_if(@block_ctxt cx, @ast.expr cond,
&ast.block thn, &Option.t[@ast.expr] els) -> result {
auto cond_res = trans_expr(cx, cond);
auto then_cx = new_scope_block_ctxt(cx, "then");
auto then_res = trans_block(then_cx, thn);
auto else_cx = new_scope_block_ctxt(cx, "else");
auto else_res;
auto expr_llty;
alt (els) {
case (some[@ast.expr](?elexpr)) {
alt (elexpr.node) {
case (ast.expr_if(_, _, _, _)) {
else_res = trans_expr(else_cx, elexpr);
}
case (ast.expr_block(?blk, _)) {
// Calling trans_block directly instead of trans_expr
// because trans_expr will create another scope block
// context for the block, but we've already got the
// 'else' context
else_res = trans_block(else_cx, blk);
}
}
// If we have an else expression, then the entire
// if expression can have a non-nil type.
// FIXME: This isn't quite right, particularly re: dynamic types
auto expr_ty = ty.expr_ty(cx.fcx.lcx.ccx.tcx, elexpr);
if (ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, expr_ty)) {
expr_llty = T_typaram_ptr(cx.fcx.lcx.ccx.tn);
} else {
expr_llty = type_of(else_res.bcx.fcx.lcx.ccx, expr_ty);
if (ty.type_is_structural(cx.fcx.lcx.ccx.tcx, expr_ty)) {
expr_llty = T_ptr(expr_llty);
}
}
}
case (_) {
else_res = res(else_cx, C_nil());
expr_llty = T_nil();
}
}
cond_res.bcx.build.CondBr(cond_res.val,
then_cx.llbb,
else_cx.llbb);
ret join_results(cx, expr_llty,
vec(then_res, else_res));
}
fn trans_for(@block_ctxt cx,
@ast.decl decl,
@ast.expr seq,
&ast.block body) -> result {
fn inner(@block_ctxt cx,
@ast.local local, ValueRef curr,
ty.t t, ast.block body,
@block_ctxt outer_next_cx) -> result {
auto next_cx = new_sub_block_ctxt(cx, "next");
auto scope_cx =
new_loop_scope_block_ctxt(cx, Option.some[@block_ctxt](next_cx),
outer_next_cx, "for loop scope");
cx.build.Br(scope_cx.llbb);
auto local_res = alloc_local(scope_cx, local);
auto bcx = copy_ty(local_res.bcx, INIT, local_res.val, curr, t).bcx;
scope_cx.cleanups +=
vec(clean(bind drop_slot(_, local_res.val, t)));
bcx = trans_block(bcx, body).bcx;
bcx.build.Br(next_cx.llbb);
ret res(next_cx, C_nil());
}
let @ast.local local;
alt (decl.node) {
case (ast.decl_local(?loc)) {
local = loc;
}
}
auto next_cx = new_sub_block_ctxt(cx, "next");
auto seq_ty = ty.expr_ty(cx.fcx.lcx.ccx.tcx, seq);
auto seq_res = trans_expr(cx, seq);
auto it = iter_sequence(seq_res.bcx, seq_res.val, seq_ty,
bind inner(_, local, _, _, body, next_cx));
it.bcx.build.Br(next_cx.llbb);
ret res(next_cx, it.val);
}
// Iterator translation
// Searches through a block for all references to locals or upvars in this
// frame and returns the list of definition IDs thus found.
fn collect_upvars(@block_ctxt cx, &ast.block bloc, &ast.def_id initial_decl)
-> vec[ast.def_id] {
type env = @rec(
mutable vec[ast.def_id] refs,
hashmap[ast.def_id,()] decls
);
fn walk_expr(env e, @ast.expr expr) {
alt (expr.node) {
case (ast.expr_path(?path, ?d, _)) {
alt (Option.get[ast.def](d)) {
case (ast.def_arg(?did)) {
Vec.push[ast.def_id](e.refs, did);
}
case (ast.def_local(?did)) {
Vec.push[ast.def_id](e.refs, did);
}
case (ast.def_upvar(?did)) {
Vec.push[ast.def_id](e.refs, did);
}
case (_) {}
}
}
case (_) {}
}
}
fn walk_decl(env e, @ast.decl decl) {
alt (decl.node) {
case (ast.decl_local(?local)) {
e.decls.insert(local.id, ());
}
case (_) {}
}
}
let vec[ast.def_id] refs = vec();
let hashmap[ast.def_id,()] decls = new_def_hash[()]();
decls.insert(initial_decl, ());
let env e = @rec(mutable refs=refs, decls=decls);
auto visitor = @rec(visit_decl_pre = bind walk_decl(e, _),
visit_expr_pre = bind walk_expr(e, _)
with walk.default_visitor());
walk.walk_block(*visitor, bloc);
// Calculate (refs - decls). This is the set of captured upvars.
let vec[ast.def_id] result = vec();
for (ast.def_id ref_id in e.refs) {
if (!decls.contains_key(ref_id)) {
result += vec(ref_id);
}
}
ret result;
}
fn trans_for_each(@block_ctxt cx,
@ast.decl decl,
@ast.expr seq,
&ast.block body) -> result {
/*
* The translation is a little .. complex here. Code like:
*
* let ty1 p = ...;
*
* let ty1 q = ...;
*
* foreach (ty v in foo(a,b)) { body(p,q,v) }
*
*
* Turns into a something like so (C/Rust mishmash):
*
* type env = { *ty1 p, *ty2 q, ... };
*
* let env e = { &p, &q, ... };
*
* fn foreach123_body(env* e, ty v) { body(*(e->p),*(e->q),v) }
*
* foo([foreach123_body, env*], a, b);
*
*/
// Step 1: walk body and figure out which references it makes
// escape. This could be determined upstream, and probably ought
// to be so, eventualy. For first cut, skip this. Null env.
auto lcx = cx.fcx.lcx;
// FIXME: possibly support alias-mode here?
auto decl_ty = ty.mk_nil(lcx.ccx.tcx);
auto decl_id;
alt (decl.node) {
case (ast.decl_local(?local)) {
decl_ty = node_ann_type(lcx.ccx, local.ann);
decl_id = local.id;
}
}
auto upvars = collect_upvars(cx, body, decl_id);
auto upvar_count = Vec.len[ast.def_id](upvars);
auto llbindingsptr;
if (upvar_count > 0u) {
// Gather up the upvars.
let vec[ValueRef] llbindings = vec();
let vec[TypeRef] llbindingtys = vec();
for (ast.def_id did in upvars) {
auto llbinding;
alt (cx.fcx.lllocals.find(did)) {
case (none[ValueRef]) {
alt (cx.fcx.llupvars.find(did)) {
case (none[ValueRef]) {
llbinding = cx.fcx.llargs.get(did);
}
case (some[ValueRef](?llval)) { llbinding = llval; }
}
}
case (some[ValueRef](?llval)) { llbinding = llval; }
}
llbindings += vec(llbinding);
llbindingtys += vec(val_ty(llbinding));
}
// Create an array of bindings and copy in aliases to the upvars.
llbindingsptr = alloca(cx, T_struct(llbindingtys));
auto i = 0u;
while (i < upvar_count) {
auto llbindingptr = cx.build.GEP(llbindingsptr,
vec(C_int(0), C_int(i as int)));
cx.build.Store(llbindings.(i), llbindingptr);
i += 1u;
}
} else {
// Null bindings.
llbindingsptr = C_null(T_ptr(T_i8()));
}
// Create an environment and populate it with the bindings.
auto tydesc_count = Vec.len[ValueRef](cx.fcx.lltydescs);
auto llenvptrty = T_closure_ptr(lcx.ccx.tn, T_ptr(T_nil()),
val_ty(llbindingsptr), tydesc_count);
auto llenvptr = alloca(cx, llvm.LLVMGetElementType(llenvptrty));
auto llbindingsptrptr = cx.build.GEP(llenvptr,
vec(C_int(0),
C_int(abi.box_rc_field_body),
C_int(2)));
cx.build.Store(llbindingsptr, llbindingsptrptr);
// Copy in our type descriptors, in case the iterator body needs to refer
// to them.
auto lltydescsptr = cx.build.GEP(llenvptr,
vec(C_int(0),
C_int(abi.box_rc_field_body),
C_int(abi.closure_elt_ty_params)));
auto i = 0u;
while (i < tydesc_count) {
auto lltydescptr = cx.build.GEP(lltydescsptr,
vec(C_int(0), C_int(i as int)));
cx.build.Store(cx.fcx.lltydescs.(i), lltydescptr);
i += 1u;
}
// Step 2: Declare foreach body function.
let str s = mangle_name_by_seq(lcx.ccx, lcx.path, "foreach");
// The 'env' arg entering the body function is a fake env member (as in
// the env-part of the normal rust calling convention) that actually
// points to a stack allocated env in this frame. We bundle that env
// pointer along with the foreach-body-fn pointer into a 'normal' fn pair
// and pass it in as a first class fn-arg to the iterator.
auto iter_body_llty =
type_of_fn_full(lcx.ccx, ast.proto_fn,
none[TypeRef],
vec(rec(mode=ty.mo_alias, ty=decl_ty)),
ty.mk_nil(lcx.ccx.tcx), 0u);
let ValueRef lliterbody = decl_internal_fastcall_fn(lcx.ccx.llmod,
s, iter_body_llty);
auto fcx = new_fn_ctxt(lcx, lliterbody);
auto bcx = new_top_block_ctxt(fcx);
auto lltop = bcx.llbb;
// Populate the upvars from the environment.
auto llremoteenvptr = bcx.build.PointerCast(fcx.llenv, llenvptrty);
auto llremotebindingsptrptr =
bcx.build.GEP(llremoteenvptr, vec(C_int(0),
C_int(abi.box_rc_field_body),
C_int(abi.closure_elt_bindings)));
auto llremotebindingsptr = bcx.build.Load(llremotebindingsptrptr);
i = 0u;
while (i < upvar_count) {
auto upvar_id = upvars.(i);
auto llupvarptrptr = bcx.build.GEP(llremotebindingsptr,
vec(C_int(0), C_int(i as int)));
auto llupvarptr = bcx.build.Load(llupvarptrptr);
fcx.llupvars.insert(upvar_id, llupvarptr);
i += 1u;
}
// Populate the type parameters from the environment.
auto llremotetydescsptr =
bcx.build.GEP(llremoteenvptr,
vec(C_int(0),
C_int(abi.box_rc_field_body),
C_int(abi.closure_elt_ty_params)));
i = 0u;
while (i < tydesc_count) {
auto llremotetydescptr = bcx.build.GEP(llremotetydescsptr,
vec(C_int(0),
C_int(i as int)));
auto llremotetydesc = bcx.build.Load(llremotetydescptr);
fcx.lltydescs += vec(llremotetydesc);
i += 1u;
}
// Add an upvar for the loop variable alias.
fcx.llupvars.insert(decl_id, llvm.LLVMGetParam(fcx.llfn, 3u));
auto r = trans_block(bcx, body);
// Tie up the llallocas -> lltop edge.
new_builder(fcx.llallocas).Br(lltop);
r.bcx.build.RetVoid();
// Step 3: Call iter passing [lliterbody, llenv], plus other args.
alt (seq.node) {
case (ast.expr_call(?f, ?args, ?ann)) {
auto pair = alloca(cx, T_fn_pair(lcx.ccx.tn,
iter_body_llty));
auto code_cell = cx.build.GEP(pair,
vec(C_int(0),
C_int(abi.fn_field_code)));
cx.build.Store(lliterbody, code_cell);
auto env_cell = cx.build.GEP(pair, vec(C_int(0),
C_int(abi.fn_field_box)));
auto llenvblobptr = cx.build.PointerCast(llenvptr,
T_opaque_closure_ptr(lcx.ccx.tn));
cx.build.Store(llenvblobptr, env_cell);
// log "lliterbody: " + val_str(lcx.ccx.tn, lliterbody);
r = trans_call(cx, f,
some[ValueRef](cx.build.Load(pair)),
args,
ann);
ret res(r.bcx, C_nil());
}
}
fail;
}
fn trans_while(@block_ctxt cx, @ast.expr cond,
&ast.block body) -> result {
auto cond_cx = new_scope_block_ctxt(cx, "while cond");
auto next_cx = new_sub_block_ctxt(cx, "next");
auto body_cx = new_loop_scope_block_ctxt(cx, Option.none[@block_ctxt],
next_cx, "while loop body");
auto body_res = trans_block(body_cx, body);
auto cond_res = trans_expr(cond_cx, cond);
body_res.bcx.build.Br(cond_cx.llbb);
auto cond_bcx = trans_block_cleanups(cond_res.bcx, cond_cx);
cond_bcx.build.CondBr(cond_res.val, body_cx.llbb, next_cx.llbb);
cx.build.Br(cond_cx.llbb);
ret res(next_cx, C_nil());
}
fn trans_do_while(@block_ctxt cx, &ast.block body,
@ast.expr cond) -> result {
auto next_cx = new_sub_block_ctxt(cx, "next");
auto body_cx = new_loop_scope_block_ctxt(cx, Option.none[@block_ctxt],
next_cx, "do-while loop body");
auto body_res = trans_block(body_cx, body);
auto cond_res = trans_expr(body_res.bcx, cond);
cond_res.bcx.build.CondBr(cond_res.val,
body_cx.llbb,
next_cx.llbb);
cx.build.Br(body_cx.llbb);
ret res(next_cx, body_res.val);
}
// Pattern matching translation
fn trans_pat_match(@block_ctxt cx, @ast.pat pat, ValueRef llval,
@block_ctxt next_cx) -> result {
alt (pat.node) {
case (ast.pat_wild(_)) { ret res(cx, llval); }
case (ast.pat_bind(_, _, _)) { ret res(cx, llval); }
case (ast.pat_lit(?lt, ?ann)) {
auto lllit = trans_lit(cx.fcx.lcx.ccx, *lt, ann);
auto lltype = ty.ann_to_type(ann);
auto lleq = trans_compare(cx, ast.eq, lltype, llval, lllit);
auto matched_cx = new_sub_block_ctxt(lleq.bcx, "matched_cx");
lleq.bcx.build.CondBr(lleq.val, matched_cx.llbb, next_cx.llbb);
ret res(matched_cx, llval);
}
case (ast.pat_tag(?id, ?subpats, ?vdef_opt, ?ann)) {
auto lltagptr = cx.build.PointerCast(llval,
T_opaque_tag_ptr(cx.fcx.lcx.ccx.tn));
auto lldiscrimptr = cx.build.GEP(lltagptr,
vec(C_int(0), C_int(0)));
auto lldiscrim = cx.build.Load(lldiscrimptr);
auto vdef = Option.get[ast.variant_def](vdef_opt);
auto variant_id = vdef._1;
auto variant_tag = 0;
auto variants = tag_variants(cx.fcx.lcx.ccx, vdef._0);
auto i = 0;
for (variant_info v in variants) {
auto this_variant_id = v.id;
if (variant_id._0 == this_variant_id._0 &&
variant_id._1 == this_variant_id._1) {
variant_tag = i;
}
i += 1;
}
auto matched_cx = new_sub_block_ctxt(cx, "matched_cx");
auto lleq = cx.build.ICmp(lib.llvm.LLVMIntEQ, lldiscrim,
C_int(variant_tag));
cx.build.CondBr(lleq, matched_cx.llbb, next_cx.llbb);
auto ty_params = node_ann_ty_params(ann);
if (Vec.len[@ast.pat](subpats) > 0u) {
auto llblobptr = matched_cx.build.GEP(lltagptr,
vec(C_int(0), C_int(1)));
auto i = 0;
for (@ast.pat subpat in subpats) {
auto rslt = GEP_tag(matched_cx, llblobptr, vdef._0,
vdef._1, ty_params, i);
auto llsubvalptr = rslt.val;
matched_cx = rslt.bcx;
auto llsubval = load_if_immediate(matched_cx,
llsubvalptr, pat_ty(cx.fcx.lcx.ccx.tcx, subpat));
auto subpat_res = trans_pat_match(matched_cx, subpat,
llsubval, next_cx);
matched_cx = subpat_res.bcx;
i += 1;
}
}
ret res(matched_cx, llval);
}
}
fail;
}
fn trans_pat_binding(@block_ctxt cx, @ast.pat pat,
ValueRef llval, bool bind_alias)
-> result {
alt (pat.node) {
case (ast.pat_wild(_)) { ret res(cx, llval); }
case (ast.pat_lit(_, _)) { ret res(cx, llval); }
case (ast.pat_bind(?id, ?def_id, ?ann)) {
if (bind_alias) {
cx.fcx.lllocals.insert(def_id, llval);
ret res(cx, llval);
} else {
auto t = node_ann_type(cx.fcx.lcx.ccx, ann);
auto rslt = alloc_ty(cx, t);
auto dst = rslt.val;
auto bcx = rslt.bcx;
llvm.LLVMSetValueName(dst, Str.buf(id));
bcx.fcx.lllocals.insert(def_id, dst);
bcx.cleanups +=
vec(clean(bind drop_slot(_, dst, t)));
ret copy_ty(bcx, INIT, dst, llval, t);
}
}
case (ast.pat_tag(_, ?subpats, ?vdef_opt, ?ann)) {
if (Vec.len[@ast.pat](subpats) == 0u) { ret res(cx, llval); }
// Get the appropriate variant for this tag.
auto vdef = Option.get[ast.variant_def](vdef_opt);
auto lltagptr = cx.build.PointerCast(llval,
T_opaque_tag_ptr(cx.fcx.lcx.ccx.tn));
auto llblobptr = cx.build.GEP(lltagptr, vec(C_int(0), C_int(1)));
auto ty_param_substs = node_ann_ty_params(ann);
auto this_cx = cx;
auto i = 0;
for (@ast.pat subpat in subpats) {
auto rslt = GEP_tag(this_cx, llblobptr, vdef._0, vdef._1,
ty_param_substs, i);
this_cx = rslt.bcx;
auto subpat_res = trans_pat_binding(this_cx, subpat,
rslt.val, true);
this_cx = subpat_res.bcx;
i += 1;
}
ret res(this_cx, llval);
}
}
}
fn trans_alt(@block_ctxt cx, @ast.expr expr,
vec[ast.arm] arms, ast.ann ann) -> result {
auto expr_res = trans_expr(cx, expr);
auto this_cx = expr_res.bcx;
let vec[result] arm_results = vec();
for (ast.arm arm in arms) {
auto next_cx = new_sub_block_ctxt(expr_res.bcx, "next");
auto match_res = trans_pat_match(this_cx, arm.pat, expr_res.val,
next_cx);
auto binding_cx = new_scope_block_ctxt(match_res.bcx, "binding");
match_res.bcx.build.Br(binding_cx.llbb);
auto binding_res = trans_pat_binding(binding_cx, arm.pat,
expr_res.val, false);
auto block_res = trans_block(binding_res.bcx, arm.block);
arm_results += vec(block_res);
this_cx = next_cx;
}
auto default_cx = this_cx;
auto default_res = trans_fail(default_cx, some[common.span](expr.span),
"non-exhaustive match failure");
// FIXME: This isn't quite right, particularly re: dynamic types
auto expr_ty = ty.ann_to_type(ann);
auto expr_llty;
if (ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, expr_ty)) {
expr_llty = T_typaram_ptr(cx.fcx.lcx.ccx.tn);
} else {
expr_llty = type_of(cx.fcx.lcx.ccx, expr_ty);
if (ty.type_is_structural(cx.fcx.lcx.ccx.tcx, expr_ty)) {
expr_llty = T_ptr(expr_llty);
}
}
ret join_results(cx, expr_llty, arm_results);
}
type generic_info = rec(ty.t item_type,
vec[ValueRef] tydescs);
type lval_result = rec(result res,
bool is_mem,
Option.t[generic_info] generic,
Option.t[ValueRef] llobj,
Option.t[ty.t] method_ty);
fn lval_mem(@block_ctxt cx, ValueRef val) -> lval_result {
ret rec(res=res(cx, val),
is_mem=true,
generic=none[generic_info],
llobj=none[ValueRef],
method_ty=none[ty.t]);
}
fn lval_val(@block_ctxt cx, ValueRef val) -> lval_result {
ret rec(res=res(cx, val),
is_mem=false,
generic=none[generic_info],
llobj=none[ValueRef],
method_ty=none[ty.t]);
}
fn trans_external_path(@block_ctxt cx, ast.def_id did,
ty.ty_param_count_and_ty tpt) -> lval_result {
auto lcx = cx.fcx.lcx;
auto name = creader.get_symbol(lcx.ccx.sess, did);
auto v = get_extern_const(lcx.ccx.externs, lcx.ccx.llmod,
name, type_of_ty_param_count_and_ty(lcx, tpt));
ret lval_val(cx, v);
}
fn lval_generic_fn(@block_ctxt cx,
ty.ty_param_count_and_ty tpt,
ast.def_id fn_id,
&ast.ann ann)
-> lval_result {
auto lv;
if (cx.fcx.lcx.ccx.sess.get_targ_crate_num() == fn_id._0) {
// Internal reference.
assert (cx.fcx.lcx.ccx.fn_pairs.contains_key(fn_id));
lv = lval_val(cx, cx.fcx.lcx.ccx.fn_pairs.get(fn_id));
} else {
// External reference.
lv = trans_external_path(cx, fn_id, tpt);
}
auto monoty;
let vec[ty.t] tys;
alt (ann) {
case (ast.ann_none) {
cx.fcx.lcx.ccx.sess.bug("no type annotation for path!");
fail;
}
case (ast.ann_type(?monoty_, ?tps, _)) {
monoty = monoty_;
tys = Option.get[vec[ty.t]](tps);
}
}
if (Vec.len[ty.t](tys) != 0u) {
auto bcx = lv.res.bcx;
let vec[ValueRef] tydescs = vec();
for (ty.t t in tys) {
// TODO: Doesn't always escape.
auto td = get_tydesc(bcx, t, true);
bcx = td.bcx;
Vec.push[ValueRef](tydescs, td.val);
}
auto gen = rec( item_type = tpt._1,
tydescs = tydescs );
lv = rec(res = res(bcx, lv.res.val),
generic = some[generic_info](gen)
with lv);
}
ret lv;
}
fn lookup_discriminant(@local_ctxt lcx, ast.def_id tid, ast.def_id vid)
-> ValueRef {
alt (lcx.ccx.discrims.find(vid)) {
case (none[ValueRef]) {
// It's an external discriminant that we haven't seen yet.
assert (lcx.ccx.sess.get_targ_crate_num() != vid._0);
auto sym = creader.get_symbol(lcx.ccx.sess, vid);
auto gvar = llvm.LLVMAddGlobal(lcx.ccx.llmod, T_int(),
Str.buf(sym));
llvm.LLVMSetLinkage(gvar,
lib.llvm.LLVMExternalLinkage as llvm.Linkage);
llvm.LLVMSetGlobalConstant(gvar, True);
lcx.ccx.discrims.insert(vid, gvar);
ret gvar;
}
case (some[ValueRef](?llval)) { ret llval; }
}
}
fn trans_path(@block_ctxt cx, &ast.path p, &Option.t[ast.def] dopt,
&ast.ann ann) -> lval_result {
alt (dopt) {
case (some[ast.def](?def)) {
alt (def) {
case (ast.def_arg(?did)) {
alt (cx.fcx.llargs.find(did)) {
case (none[ValueRef]) {
assert (cx.fcx.llupvars.contains_key(did));
ret lval_mem(cx, cx.fcx.llupvars.get(did));
}
case (some[ValueRef](?llval)) {
ret lval_mem(cx, llval);
}
}
}
case (ast.def_local(?did)) {
alt (cx.fcx.lllocals.find(did)) {
case (none[ValueRef]) {
assert (cx.fcx.llupvars.contains_key(did));
ret lval_mem(cx, cx.fcx.llupvars.get(did));
}
case (some[ValueRef](?llval)) {
ret lval_mem(cx, llval);
}
}
}
case (ast.def_binding(?did)) {
assert (cx.fcx.lllocals.contains_key(did));
ret lval_mem(cx, cx.fcx.lllocals.get(did));
}
case (ast.def_obj_field(?did)) {
assert (cx.fcx.llobjfields.contains_key(did));
ret lval_mem(cx, cx.fcx.llobjfields.get(did));
}
case (ast.def_fn(?did)) {
auto tyt = ty.lookup_item_type(cx.fcx.lcx.ccx.sess,
cx.fcx.lcx.ccx.tcx, cx.fcx.lcx.ccx.type_cache, did);
ret lval_generic_fn(cx, tyt, did, ann);
}
case (ast.def_obj(?did)) {
auto tyt = ty.lookup_item_type(cx.fcx.lcx.ccx.sess,
cx.fcx.lcx.ccx.tcx, cx.fcx.lcx.ccx.type_cache, did);
ret lval_generic_fn(cx, tyt, did, ann);
}
case (ast.def_variant(?tid, ?vid)) {
auto v_tyt = ty.lookup_item_type(cx.fcx.lcx.ccx.sess,
cx.fcx.lcx.ccx.tcx, cx.fcx.lcx.ccx.type_cache, vid);
alt (ty.struct(cx.fcx.lcx.ccx.tcx, v_tyt._1)) {
case (ty.ty_fn(_, _, _)) {
// N-ary variant.
ret lval_generic_fn(cx, v_tyt, vid, ann);
}
case (_) {
// Nullary variant.
auto tag_ty = node_ann_type(cx.fcx.lcx.ccx, ann);
auto lldiscrim_gv =
lookup_discriminant(cx.fcx.lcx, tid, vid);
auto lldiscrim = cx.build.Load(lldiscrim_gv);
auto alloc_result = alloc_ty(cx, tag_ty);
auto lltagblob = alloc_result.val;
auto lltagty;
if (ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tcx,
tag_ty)) {
lltagty = T_opaque_tag(cx.fcx.lcx.ccx.tn);
} else {
lltagty = type_of(cx.fcx.lcx.ccx, tag_ty);
}
auto lltagptr = alloc_result.bcx.build.
PointerCast(lltagblob, T_ptr(lltagty));
auto lldiscrimptr = alloc_result.bcx.build.GEP(
lltagptr, vec(C_int(0), C_int(0)));
alloc_result.bcx.build.Store(lldiscrim,
lldiscrimptr);
ret lval_val(alloc_result.bcx, lltagptr);
}
}
}
case (ast.def_const(?did)) {
// TODO: externals
assert (cx.fcx.lcx.ccx.consts.contains_key(did));
ret lval_mem(cx, cx.fcx.lcx.ccx.consts.get(did));
}
case (ast.def_native_fn(?did)) {
auto tyt = ty.lookup_item_type(cx.fcx.lcx.ccx.sess,
cx.fcx.lcx.ccx.tcx,
cx.fcx.lcx.ccx.type_cache, did);
ret lval_generic_fn(cx, tyt, did, ann);
}
case (_) {
cx.fcx.lcx.ccx.sess.unimpl("def variant in trans");
}
}
}
case (none[ast.def]) {
cx.fcx.lcx.ccx.sess.err("unresolved expr_path in trans");
}
}
fail;
}
fn trans_field(@block_ctxt cx, &ast.span sp, ValueRef v, ty.t t0,
&ast.ident field, &ast.ann ann) -> lval_result {
auto r = autoderef(cx, v, t0);
auto t = autoderefed_ty(cx.fcx.lcx.ccx, t0);
alt (ty.struct(cx.fcx.lcx.ccx.tcx, t)) {
case (ty.ty_tup(_)) {
let uint ix = ty.field_num(cx.fcx.lcx.ccx.sess, sp, field);
auto v = GEP_tup_like(r.bcx, t, r.val, vec(0, ix as int));
ret lval_mem(v.bcx, v.val);
}
case (ty.ty_rec(?fields)) {
let uint ix = ty.field_idx(cx.fcx.lcx.ccx.sess, sp, field,
fields);
auto v = GEP_tup_like(r.bcx, t, r.val, vec(0, ix as int));
ret lval_mem(v.bcx, v.val);
}
case (ty.ty_obj(?methods)) {
let uint ix = ty.method_idx(cx.fcx.lcx.ccx.sess, sp, field,
methods);
auto vtbl = r.bcx.build.GEP(r.val,
vec(C_int(0),
C_int(abi.obj_field_vtbl)));
vtbl = r.bcx.build.Load(vtbl);
// +1 because slot #0 contains the destructor
auto v = r.bcx.build.GEP(vtbl, vec(C_int(0),
C_int((ix + 1u) as int)));
auto lvo = lval_mem(r.bcx, v);
let ty.t fn_ty = ty.method_ty_to_fn_ty(cx.fcx.lcx.ccx.tcx,
methods.(ix));
ret rec(llobj = some[ValueRef](r.val),
method_ty = some[ty.t](fn_ty)
with lvo);
}
case (_) {cx.fcx.lcx.ccx.sess.unimpl("field variant in trans_field");}
}
fail;
}
fn trans_index(@block_ctxt cx, &ast.span sp, @ast.expr base,
@ast.expr idx, &ast.ann ann) -> lval_result {
auto lv = trans_expr(cx, base);
lv = autoderef(lv.bcx, lv.val, ty.expr_ty(cx.fcx.lcx.ccx.tcx, base));
auto ix = trans_expr(lv.bcx, idx);
auto v = lv.val;
auto bcx = ix.bcx;
// Cast to an LLVM integer. Rust is less strict than LLVM in this regard.
auto ix_val;
auto ix_size = llsize_of_real(cx.fcx.lcx.ccx, val_ty(ix.val));
auto int_size = llsize_of_real(cx.fcx.lcx.ccx, T_int());
if (ix_size < int_size) {
ix_val = bcx.build.ZExt(ix.val, T_int());
} else if (ix_size > int_size) {
ix_val = bcx.build.Trunc(ix.val, T_int());
} else {
ix_val = ix.val;
}
auto unit_ty = node_ann_type(cx.fcx.lcx.ccx, ann);
auto unit_sz = size_of(bcx, unit_ty);
bcx = unit_sz.bcx;
llvm.LLVMSetValueName(unit_sz.val, Str.buf("unit_sz"));
auto scaled_ix = bcx.build.Mul(ix_val, unit_sz.val);
llvm.LLVMSetValueName(scaled_ix, Str.buf("scaled_ix"));
auto lim = bcx.build.GEP(v, vec(C_int(0), C_int(abi.vec_elt_fill)));
lim = bcx.build.Load(lim);
auto bounds_check = bcx.build.ICmp(lib.llvm.LLVMIntULT,
scaled_ix, lim);
auto fail_cx = new_sub_block_ctxt(bcx, "fail");
auto next_cx = new_sub_block_ctxt(bcx, "next");
bcx.build.CondBr(bounds_check, next_cx.llbb, fail_cx.llbb);
// fail: bad bounds check.
auto fail_res = trans_fail(fail_cx, some[common.span](sp),
"bounds check");
auto body = next_cx.build.GEP(v, vec(C_int(0), C_int(abi.vec_elt_data)));
auto elt;
if (ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, unit_ty)) {
body = next_cx.build.PointerCast(body, T_ptr(T_array(T_i8(), 1u)));
elt = next_cx.build.GEP(body, vec(C_int(0), scaled_ix));
} else {
elt = next_cx.build.GEP(body, vec(C_int(0), ix_val));
// We're crossing a box boundary here, so we may need to pointer cast.
auto llunitty = type_of(next_cx.fcx.lcx.ccx, unit_ty);
elt = next_cx.build.PointerCast(elt, T_ptr(llunitty));
}
ret lval_mem(next_cx, elt);
}
// The additional bool returned indicates whether it's mem (that is
// represented as an alloca or heap, hence needs a 'load' to be used as an
// immediate).
fn trans_lval(@block_ctxt cx, @ast.expr e) -> lval_result {
alt (e.node) {
case (ast.expr_path(?p, ?dopt, ?ann)) {
ret trans_path(cx, p, dopt, ann);
}
case (ast.expr_field(?base, ?ident, ?ann)) {
auto r = trans_expr(cx, base);
auto t = ty.expr_ty(cx.fcx.lcx.ccx.tcx, base);
ret trans_field(r.bcx, e.span, r.val, t, ident, ann);
}
case (ast.expr_index(?base, ?idx, ?ann)) {
ret trans_index(cx, e.span, base, idx, ann);
}
case (ast.expr_unary(?unop, ?base, ?ann)) {
assert (unop == ast.deref);
auto sub = trans_expr(cx, base);
auto val = sub.bcx.build.GEP(sub.val,
vec(C_int(0),
C_int(abi.box_rc_field_body)));
ret lval_mem(sub.bcx, val);
}
case (ast.expr_self_method(?ident, ?ann)) {
alt (cx.fcx.llself) {
case (some[self_vt](?s_vt)) {
auto r = s_vt.v;
auto t = s_vt.t;
ret trans_field(cx, e.span, r, t, ident, ann);
}
case (_) {
// Shouldn't happen.
fail;
}
}
}
case (_) {
cx.fcx.lcx.ccx.sess.span_unimpl(e.span,
"expr variant in trans_lval");
}
}
fail;
}
fn trans_cast(@block_ctxt cx, @ast.expr e, &ast.ann ann) -> result {
auto e_res = trans_expr(cx, e);
auto llsrctype = val_ty(e_res.val);
auto t = node_ann_type(cx.fcx.lcx.ccx, ann);
auto lldsttype = type_of(cx.fcx.lcx.ccx, t);
if (!ty.type_is_fp(cx.fcx.lcx.ccx.tcx, t)) {
// TODO: native-to-native casts
if (ty.type_is_native(cx.fcx.lcx.ccx.tcx,
ty.expr_ty(cx.fcx.lcx.ccx.tcx, e))) {
e_res.val = e_res.bcx.build.PtrToInt(e_res.val, lldsttype);
} else if (ty.type_is_native(cx.fcx.lcx.ccx.tcx, t)) {
e_res.val = e_res.bcx.build.IntToPtr(e_res.val, lldsttype);
} else if (llvm.LLVMGetIntTypeWidth(lldsttype) >
llvm.LLVMGetIntTypeWidth(llsrctype)) {
if (ty.type_is_signed(cx.fcx.lcx.ccx.tcx, t)) {
// Widening signed cast.
e_res.val =
e_res.bcx.build.SExtOrBitCast(e_res.val,
lldsttype);
} else {
// Widening unsigned cast.
e_res.val =
e_res.bcx.build.ZExtOrBitCast(e_res.val,
lldsttype);
}
} else {
// Narrowing cast.
e_res.val =
e_res.bcx.build.TruncOrBitCast(e_res.val,
lldsttype);
}
} else {
cx.fcx.lcx.ccx.sess.unimpl("fp cast");
}
ret e_res;
}
fn trans_bind_thunk(@local_ctxt cx,
ty.t incoming_fty,
ty.t outgoing_fty,
vec[Option.t[@ast.expr]] args,
ty.t closure_ty,
vec[ty.t] bound_tys,
uint ty_param_count) -> ValueRef {
// Construct a thunk-call with signature incoming_fty, and that copies
// args forward into a call to outgoing_fty.
let str s = mangle_name_by_seq(cx.ccx, cx.path, "thunk");
let TypeRef llthunk_ty = get_pair_fn_ty(type_of(cx.ccx, incoming_fty));
let ValueRef llthunk = decl_internal_fastcall_fn(cx.ccx.llmod,
s, llthunk_ty);
auto fcx = new_fn_ctxt(cx, llthunk);
auto bcx = new_top_block_ctxt(fcx);
auto lltop = bcx.llbb;
auto llclosure_ptr_ty =
type_of(cx.ccx, ty.mk_imm_box(cx.ccx.tcx, closure_ty));
auto llclosure = bcx.build.PointerCast(fcx.llenv, llclosure_ptr_ty);
auto lltarget = GEP_tup_like(bcx, closure_ty, llclosure,
vec(0,
abi.box_rc_field_body,
abi.closure_elt_target));
bcx = lltarget.bcx;
auto lltargetclosure = bcx.build.GEP(lltarget.val,
vec(C_int(0),
C_int(abi.fn_field_box)));
lltargetclosure = bcx.build.Load(lltargetclosure);
auto outgoing_ret_ty = ty.ty_fn_ret(cx.ccx.tcx, outgoing_fty);
auto outgoing_args = ty.ty_fn_args(cx.ccx.tcx, outgoing_fty);
auto llretptr = fcx.llretptr;
if (ty.type_has_dynamic_size(cx.ccx.tcx, outgoing_ret_ty)) {
llretptr = bcx.build.PointerCast(llretptr, T_typaram_ptr(cx.ccx.tn));
}
let vec[ValueRef] llargs = vec(llretptr,
fcx.lltaskptr,
lltargetclosure);
// Copy in the type parameters.
let uint i = 0u;
while (i < ty_param_count) {
auto lltyparam_ptr =
GEP_tup_like(bcx, closure_ty, llclosure,
vec(0,
abi.box_rc_field_body,
abi.closure_elt_ty_params,
(i as int)));
bcx = lltyparam_ptr.bcx;
auto td = bcx.build.Load(lltyparam_ptr.val);
llargs += vec(td);
fcx.lltydescs += vec(td);
i += 1u;
}
let uint a = 3u; // retptr, task ptr, env come first
let int b = 0;
let uint outgoing_arg_index = 0u;
let vec[TypeRef] llout_arg_tys =
type_of_explicit_args(cx.ccx, outgoing_args);
for (Option.t[@ast.expr] arg in args) {
auto out_arg = outgoing_args.(outgoing_arg_index);
auto llout_arg_ty = llout_arg_tys.(outgoing_arg_index);
alt (arg) {
// Arg provided at binding time; thunk copies it from closure.
case (some[@ast.expr](?e)) {
auto e_ty = ty.expr_ty(cx.ccx.tcx, e);
auto bound_arg =
GEP_tup_like(bcx, closure_ty, llclosure,
vec(0,
abi.box_rc_field_body,
abi.closure_elt_bindings,
b));
bcx = bound_arg.bcx;
auto val = bound_arg.val;
if (out_arg.mode == ty.mo_val) {
if (type_is_immediate(cx.ccx, e_ty)) {
val = bcx.build.Load(val);
bcx = take_ty(bcx, val, e_ty).bcx;
} else {
bcx = take_ty(bcx, val, e_ty).bcx;
val = bcx.build.Load(val);
}
} else if (ty.type_contains_params(cx.ccx.tcx,
out_arg.ty)) {
assert (out_arg.mode == ty.mo_alias);
val = bcx.build.PointerCast(val, llout_arg_ty);
}
llargs += vec(val);
b += 1;
}
// Arg will be provided when the thunk is invoked.
case (none[@ast.expr]) {
let ValueRef passed_arg = llvm.LLVMGetParam(llthunk, a);
if (ty.type_contains_params(cx.ccx.tcx, out_arg.ty)) {
assert (out_arg.mode == ty.mo_alias);
passed_arg = bcx.build.PointerCast(passed_arg,
llout_arg_ty);
}
llargs += vec(passed_arg);
a += 1u;
}
}
outgoing_arg_index += 1u;
}
// FIXME: turn this call + ret into a tail call.
auto lltargetfn = bcx.build.GEP(lltarget.val,
vec(C_int(0),
C_int(abi.fn_field_code)));
// Cast the outgoing function to the appropriate type (see the comments in
// trans_bind below for why this is necessary).
auto lltargetty = type_of_fn(bcx.fcx.lcx.ccx,
ty.ty_fn_proto(bcx.fcx.lcx.ccx.tcx,
outgoing_fty),
outgoing_args,
outgoing_ret_ty,
ty_param_count);
lltargetfn = bcx.build.PointerCast(lltargetfn, T_ptr(T_ptr(lltargetty)));
lltargetfn = bcx.build.Load(lltargetfn);
auto r = bcx.build.FastCall(lltargetfn, llargs);
bcx.build.RetVoid();
// Tie up the llallocas -> lltop edge.
new_builder(fcx.llallocas).Br(lltop);
ret llthunk;
}
fn trans_bind(@block_ctxt cx, @ast.expr f,
vec[Option.t[@ast.expr]] args,
&ast.ann ann) -> result {
auto f_res = trans_lval(cx, f);
if (f_res.is_mem) {
cx.fcx.lcx.ccx.sess.unimpl("re-binding existing function");
} else {
let vec[@ast.expr] bound = vec();
for (Option.t[@ast.expr] argopt in args) {
alt (argopt) {
case (none[@ast.expr]) {
}
case (some[@ast.expr](?e)) {
Vec.push[@ast.expr](bound, e);
}
}
}
// Figure out which tydescs we need to pass, if any.
let ty.t outgoing_fty;
let vec[ValueRef] lltydescs;
alt (f_res.generic) {
case (none[generic_info]) {
outgoing_fty = ty.expr_ty(cx.fcx.lcx.ccx.tcx, f);
lltydescs = vec();
}
case (some[generic_info](?ginfo)) {
outgoing_fty = ginfo.item_type;
lltydescs = ginfo.tydescs;
}
}
auto ty_param_count = Vec.len[ValueRef](lltydescs);
if (Vec.len[@ast.expr](bound) == 0u && ty_param_count == 0u) {
// Trivial 'binding': just return the static pair-ptr.
ret f_res.res;
} else {
auto bcx = f_res.res.bcx;
auto pair_t = node_type(cx.fcx.lcx.ccx, ann);
auto pair_v = alloca(bcx, pair_t);
// Translate the bound expressions.
let vec[ty.t] bound_tys = vec();
let vec[ValueRef] bound_vals = vec();
auto i = 0u;
for (@ast.expr e in bound) {
auto arg = trans_expr(bcx, e);
bcx = arg.bcx;
Vec.push[ValueRef](bound_vals, arg.val);
Vec.push[ty.t](bound_tys,
ty.expr_ty(cx.fcx.lcx.ccx.tcx, e));
i += 1u;
}
// Synthesize a closure type.
let ty.t bindings_ty = ty.mk_imm_tup(cx.fcx.lcx.ccx.tcx,
bound_tys);
// NB: keep this in sync with T_closure_ptr; we're making
// a ty.t structure that has the same "shape" as the LLVM type
// it constructs.
let ty.t tydesc_ty = ty.mk_type(cx.fcx.lcx.ccx.tcx);
let vec[ty.t] captured_tys =
Vec.init_elt[ty.t](tydesc_ty, ty_param_count);
let vec[ty.t] closure_tys =
vec(tydesc_ty,
outgoing_fty,
bindings_ty,
ty.mk_imm_tup(cx.fcx.lcx.ccx.tcx, captured_tys));
let ty.t closure_ty = ty.mk_imm_tup(cx.fcx.lcx.ccx.tcx,
closure_tys);
auto r = trans_malloc_boxed(bcx, closure_ty);
auto box = r.val;
bcx = r.bcx;
auto rc = bcx.build.GEP(box,
vec(C_int(0),
C_int(abi.box_rc_field_refcnt)));
auto closure =
bcx.build.GEP(box,
vec(C_int(0),
C_int(abi.box_rc_field_body)));
bcx.build.Store(C_int(1), rc);
// Store bindings tydesc.
auto bound_tydesc =
bcx.build.GEP(closure,
vec(C_int(0),
C_int(abi.closure_elt_tydesc)));
auto bindings_tydesc = get_tydesc(bcx, bindings_ty, true);
bcx = bindings_tydesc.bcx;
bcx.build.Store(bindings_tydesc.val, bound_tydesc);
// Determine the LLVM type for the outgoing function type. This
// may be different from the type returned by trans_malloc_boxed()
// since we have more information than that function does;
// specifically, we know how many type descriptors the outgoing
// function has, which type_of() doesn't, as only we know which
// item the function refers to.
auto llfnty = type_of_fn(bcx.fcx.lcx.ccx,
ty.ty_fn_proto(bcx.fcx.lcx.ccx.tcx, outgoing_fty),
ty.ty_fn_args(bcx.fcx.lcx.ccx.tcx, outgoing_fty),
ty.ty_fn_ret(bcx.fcx.lcx.ccx.tcx, outgoing_fty),
ty_param_count);
auto llclosurety = T_ptr(T_fn_pair(bcx.fcx.lcx.ccx.tn, llfnty));
// Store thunk-target.
auto bound_target =
bcx.build.GEP(closure,
vec(C_int(0),
C_int(abi.closure_elt_target)));
auto src = bcx.build.Load(f_res.res.val);
bound_target = bcx.build.PointerCast(bound_target, llclosurety);
bcx.build.Store(src, bound_target);
// Copy expr values into boxed bindings.
i = 0u;
auto bindings =
bcx.build.GEP(closure,
vec(C_int(0),
C_int(abi.closure_elt_bindings)));
for (ValueRef v in bound_vals) {
auto bound = bcx.build.GEP(bindings,
vec(C_int(0), C_int(i as int)));
bcx = copy_ty(bcx, INIT, bound, v, bound_tys.(i)).bcx;
i += 1u;
}
// If necessary, copy tydescs describing type parameters into the
// appropriate slot in the closure.
alt (f_res.generic) {
case (none[generic_info]) { /* nothing to do */ }
case (some[generic_info](?ginfo)) {
auto ty_params_slot =
bcx.build.GEP(closure,
vec(C_int(0),
C_int(abi.closure_elt_ty_params)));
auto i = 0;
for (ValueRef td in ginfo.tydescs) {
auto ty_param_slot = bcx.build.GEP(ty_params_slot,
vec(C_int(0),
C_int(i)));
bcx.build.Store(td, ty_param_slot);
i += 1;
}
outgoing_fty = ginfo.item_type;
}
}
// Make thunk and store thunk-ptr in outer pair's code slot.
auto pair_code = bcx.build.GEP(pair_v,
vec(C_int(0),
C_int(abi.fn_field_code)));
let ty.t pair_ty = node_ann_type(cx.fcx.lcx.ccx, ann);
let ValueRef llthunk =
trans_bind_thunk(cx.fcx.lcx, pair_ty, outgoing_fty,
args, closure_ty, bound_tys,
ty_param_count);
bcx.build.Store(llthunk, pair_code);
// Store box ptr in outer pair's box slot.
auto pair_box = bcx.build.GEP(pair_v,
vec(C_int(0),
C_int(abi.fn_field_box)));
bcx.build.Store
(bcx.build.PointerCast
(box,
T_opaque_closure_ptr(bcx.fcx.lcx.ccx.tn)),
pair_box);
find_scope_cx(cx).cleanups +=
vec(clean(bind drop_slot(_, pair_v, pair_ty)));
ret res(bcx, pair_v);
}
}
}
fn trans_arg_expr(@block_ctxt cx,
ty.arg arg,
TypeRef lldestty0,
@ast.expr e) -> result {
auto val;
auto bcx = cx;
auto e_ty = ty.expr_ty(cx.fcx.lcx.ccx.tcx, e);
if (ty.type_is_structural(cx.fcx.lcx.ccx.tcx, e_ty)) {
auto re = trans_expr(bcx, e);
val = re.val;
bcx = re.bcx;
} else if (arg.mode == ty.mo_alias) {
let lval_result lv;
if (ty.is_lval(e)) {
lv = trans_lval(bcx, e);
} else {
auto r = trans_expr(bcx, e);
if (type_is_immediate(cx.fcx.lcx.ccx, e_ty)) {
lv = lval_val(r.bcx, r.val);
} else {
lv = lval_mem(r.bcx, r.val);
}
}
bcx = lv.res.bcx;
if (lv.is_mem) {
val = lv.res.val;
} else {
// Non-mem but we're trying to alias; synthesize an
// alloca, spill to it and pass its address.
val = do_spill(lv.res.bcx, lv.res.val);
}
} else {
auto re = trans_expr(bcx, e);
val = re.val;
bcx = re.bcx;
}
if (arg.mode != ty.mo_alias) {
bcx = take_ty(bcx, val, e_ty).bcx;
}
if (ty.type_contains_params(cx.fcx.lcx.ccx.tcx, arg.ty)) {
auto lldestty = lldestty0;
if (arg.mode == ty.mo_val) {
// FIXME: we'd prefer to use &&, but rustboot doesn't like it
if (ty.type_is_structural(cx.fcx.lcx.ccx.tcx, e_ty)) {
lldestty = T_ptr(lldestty);
}
}
val = bcx.build.PointerCast(val, lldestty);
}
if (arg.mode == ty.mo_val) {
// FIXME: we'd prefer to use &&, but rustboot doesn't like it
if (ty.type_is_structural(cx.fcx.lcx.ccx.tcx, e_ty)) {
// Until here we've been treating structures by pointer;
// we are now passing it as an arg, so need to load it.
val = bcx.build.Load(val);
}
}
ret res(bcx, val);
}
// NB: must keep 4 fns in sync:
//
// - type_of_fn_full
// - create_llargs_for_fn_args.
// - new_fn_ctxt
// - trans_args
fn trans_args(@block_ctxt cx,
ValueRef llenv,
Option.t[ValueRef] llobj,
Option.t[generic_info] gen,
Option.t[ValueRef] lliterbody,
&vec[@ast.expr] es,
ty.t fn_ty)
-> tup(@block_ctxt, vec[ValueRef], ValueRef) {
let vec[ty.arg] args = ty.ty_fn_args(cx.fcx.lcx.ccx.tcx, fn_ty);
let vec[ValueRef] llargs = vec();
let vec[ValueRef] lltydescs = vec();
let @block_ctxt bcx = cx;
// Arg 0: Output pointer.
auto retty = ty.ty_fn_ret(cx.fcx.lcx.ccx.tcx, fn_ty);
auto llretslot_res = alloc_ty(bcx, retty);
bcx = llretslot_res.bcx;
auto llretslot = llretslot_res.val;
alt (gen) {
case (some[generic_info](?g)) {
lltydescs = g.tydescs;
args = ty.ty_fn_args(cx.fcx.lcx.ccx.tcx, g.item_type);
retty = ty.ty_fn_ret(cx.fcx.lcx.ccx.tcx, g.item_type);
}
case (_) {
}
}
if (ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, retty)) {
llargs += vec(bcx.build.PointerCast
(llretslot, T_typaram_ptr(cx.fcx.lcx.ccx.tn)));
} else if (ty.type_contains_params(cx.fcx.lcx.ccx.tcx, retty)) {
// It's possible that the callee has some generic-ness somewhere in
// its return value -- say a method signature within an obj or a fn
// type deep in a structure -- which the caller has a concrete view
// of. If so, cast the caller's view of the restlot to the callee's
// view, for the sake of making a type-compatible call.
llargs +=
vec(cx.build.PointerCast(llretslot,
T_ptr(type_of(bcx.fcx.lcx.ccx, retty))));
} else {
llargs += vec(llretslot);
}
// Arg 1: Task pointer.
llargs += vec(bcx.fcx.lltaskptr);
// Arg 2: Env (closure-bindings / self-obj)
alt (llobj) {
case (some[ValueRef](?ob)) {
// Every object is always found in memory,
// and not-yet-loaded (as part of an lval x.y
// doted method-call).
llargs += vec(bcx.build.Load(ob));
}
case (_) {
llargs += vec(llenv);
}
}
// Args >3: ty_params ...
llargs += lltydescs;
// ... then possibly an lliterbody argument.
alt (lliterbody) {
case (none[ValueRef]) {}
case (some[ValueRef](?lli)) {
llargs += vec(lli);
}
}
// ... then explicit args.
// First we figure out the caller's view of the types of the arguments.
// This will be needed if this is a generic call, because the callee has
// to cast her view of the arguments to the caller's view.
auto arg_tys = type_of_explicit_args(cx.fcx.lcx.ccx, args);
auto i = 0u;
for (@ast.expr e in es) {
auto r = trans_arg_expr(bcx, args.(i), arg_tys.(i), e);
bcx = r.bcx;
llargs += vec(r.val);
i += 1u;
}
ret tup(bcx, llargs, llretslot);
}
fn trans_call(@block_ctxt cx, @ast.expr f,
Option.t[ValueRef] lliterbody,
vec[@ast.expr] args,
&ast.ann ann) -> result {
// NB: 'f' isn't necessarily a function; it might be an entire self-call
// expression because of the hack that allows us to process self-calls
// with trans_call.
auto f_res = trans_lval(cx, f);
auto faddr = f_res.res.val;
auto llenv = C_null(T_opaque_closure_ptr(cx.fcx.lcx.ccx.tn));
alt (f_res.llobj) {
case (some[ValueRef](_)) {
// It's a vtbl entry.
faddr = f_res.res.bcx.build.Load(faddr);
}
case (none[ValueRef]) {
// It's a closure.
auto bcx = f_res.res.bcx;
auto pair = faddr;
faddr = bcx.build.GEP(pair, vec(C_int(0),
C_int(abi.fn_field_code)));
faddr = bcx.build.Load(faddr);
auto llclosure = bcx.build.GEP(pair,
vec(C_int(0),
C_int(abi.fn_field_box)));
llenv = bcx.build.Load(llclosure);
}
}
let ty.t fn_ty;
alt (f_res.method_ty) {
case (some[ty.t](?meth)) {
// self-call
fn_ty = meth;
}
case (_) {
fn_ty = ty.expr_ty(cx.fcx.lcx.ccx.tcx, f);
}
}
auto ret_ty = ty.ann_to_type(ann);
auto args_res = trans_args(f_res.res.bcx,
llenv, f_res.llobj,
f_res.generic,
lliterbody,
args, fn_ty);
auto bcx = args_res._0;
auto llargs = args_res._1;
auto llretslot = args_res._2;
/*
log "calling: " + val_str(cx.fcx.lcx.ccx.tn, faddr);
for (ValueRef arg in llargs) {
log "arg: " + val_str(cx.fcx.lcx.ccx.tn, arg);
}
*/
bcx.build.FastCall(faddr, llargs);
auto retval = C_nil();
alt (lliterbody) {
case (none[ValueRef]) {
if (!ty.type_is_nil(cx.fcx.lcx.ccx.tcx, ret_ty)) {
retval = load_if_immediate(bcx, llretslot, ret_ty);
// Retval doesn't correspond to anything really tangible in
// the frame, but it's a ref all the same, so we put a note
// here to drop it when we're done in this scope.
find_scope_cx(cx).cleanups +=
vec(clean(bind drop_ty(_, retval, ret_ty)));
}
}
case (some[ValueRef](_)) {
// If there was an lliterbody, it means we were calling an
// iter, and we are *not* the party using its 'output' value,
// we should ignore llretslot.
}
}
ret res(bcx, retval);
}
fn trans_tup(@block_ctxt cx, vec[ast.elt] elts,
&ast.ann ann) -> result {
auto bcx = cx;
auto t = node_ann_type(bcx.fcx.lcx.ccx, ann);
auto tup_res = alloc_ty(bcx, t);
auto tup_val = tup_res.val;
bcx = tup_res.bcx;
find_scope_cx(cx).cleanups +=
vec(clean(bind drop_ty(_, tup_val, t)));
let int i = 0;
for (ast.elt e in elts) {
auto e_ty = ty.expr_ty(cx.fcx.lcx.ccx.tcx, e.expr);
auto src_res = trans_expr(bcx, e.expr);
bcx = src_res.bcx;
auto dst_res = GEP_tup_like(bcx, t, tup_val, vec(0, i));
bcx = dst_res.bcx;
bcx = copy_ty(src_res.bcx, INIT, dst_res.val, src_res.val, e_ty).bcx;
i += 1;
}
ret res(bcx, tup_val);
}
fn trans_vec(@block_ctxt cx, vec[@ast.expr] args,
&ast.ann ann) -> result {
auto t = node_ann_type(cx.fcx.lcx.ccx, ann);
auto unit_ty = t;
alt (ty.struct(cx.fcx.lcx.ccx.tcx, t)) {
case (ty.ty_vec(?mt)) {
unit_ty = mt.ty;
}
case (_) {
cx.fcx.lcx.ccx.sess.bug("non-vec type in trans_vec");
}
}
auto bcx = cx;
auto unit_sz = size_of(bcx, unit_ty);
bcx = unit_sz.bcx;
auto data_sz = bcx.build.Mul(C_int(Vec.len[@ast.expr](args) as int),
unit_sz.val);
// FIXME: pass tydesc properly.
auto sub = trans_upcall(bcx, "upcall_new_vec", vec(data_sz, C_int(0)),
false);
bcx = sub.bcx;
auto llty = type_of(bcx.fcx.lcx.ccx, t);
auto vec_val = vi2p(bcx, sub.val, llty);
find_scope_cx(bcx).cleanups +=
vec(clean(bind drop_ty(_, vec_val, t)));
auto body = bcx.build.GEP(vec_val, vec(C_int(0),
C_int(abi.vec_elt_data)));
auto pseudo_tup_ty =
ty.mk_imm_tup(cx.fcx.lcx.ccx.tcx,
Vec.init_elt[ty.t](unit_ty,
Vec.len[@ast.expr](args)));
let int i = 0;
for (@ast.expr e in args) {
auto src_res = trans_expr(bcx, e);
bcx = src_res.bcx;
auto dst_res = GEP_tup_like(bcx, pseudo_tup_ty, body, vec(0, i));
bcx = dst_res.bcx;
// Cast the destination type to the source type. This is needed to
// make tags work, for a subtle combination of reasons:
//
// (1) "dst_res" above is derived from "body", which is in turn
// derived from "vec_val".
// (2) "vec_val" has the LLVM type "llty".
// (3) "llty" is the result of calling type_of() on a vector type.
// (4) For tags, type_of() returns a different type depending on
// on whether the tag is behind a box or not. Vector types are
// considered boxes.
// (5) "src_res" is derived from "unit_ty", which is not behind a box.
auto dst_val;
if (!ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, unit_ty)) {
auto llunit_ty = type_of(cx.fcx.lcx.ccx, unit_ty);
dst_val = bcx.build.PointerCast(dst_res.val, T_ptr(llunit_ty));
} else {
dst_val = dst_res.val;
}
bcx = copy_ty(bcx, INIT, dst_val, src_res.val, unit_ty).bcx;
i += 1;
}
auto fill = bcx.build.GEP(vec_val,
vec(C_int(0), C_int(abi.vec_elt_fill)));
bcx.build.Store(data_sz, fill);
ret res(bcx, vec_val);
}
fn trans_rec(@block_ctxt cx, vec[ast.field] fields,
Option.t[@ast.expr] base, &ast.ann ann) -> result {
auto bcx = cx;
auto t = node_ann_type(bcx.fcx.lcx.ccx, ann);
auto rec_res = alloc_ty(bcx, t);
auto rec_val = rec_res.val;
bcx = rec_res.bcx;
find_scope_cx(cx).cleanups +=
vec(clean(bind drop_ty(_, rec_val, t)));
let int i = 0;
auto base_val = C_nil();
alt (base) {
case (none[@ast.expr]) { }
case (some[@ast.expr](?bexp)) {
auto base_res = trans_expr(bcx, bexp);
bcx = base_res.bcx;
base_val = base_res.val;
}
}
let vec[ty.field] ty_fields = vec();
alt (ty.struct(cx.fcx.lcx.ccx.tcx, t)) {
case (ty.ty_rec(?flds)) { ty_fields = flds; }
}
for (ty.field tf in ty_fields) {
auto e_ty = tf.mt.ty;
auto dst_res = GEP_tup_like(bcx, t, rec_val, vec(0, i));
bcx = dst_res.bcx;
auto expr_provided = false;
auto src_res = res(bcx, C_nil());
for (ast.field f in fields) {
if (Str.eq(f.ident, tf.ident)) {
expr_provided = true;
src_res = trans_expr(bcx, f.expr);
}
}
if (!expr_provided) {
src_res = GEP_tup_like(bcx, t, base_val, vec(0, i));
src_res = res(src_res.bcx,
load_if_immediate(bcx, src_res.val, e_ty));
}
bcx = src_res.bcx;
bcx = copy_ty(bcx, INIT, dst_res.val, src_res.val, e_ty).bcx;
i += 1;
}
ret res(bcx, rec_val);
}
fn trans_expr(@block_ctxt cx, @ast.expr e) -> result {
alt (e.node) {
case (ast.expr_lit(?lit, ?ann)) {
ret res(cx, trans_lit(cx.fcx.lcx.ccx, *lit, ann));
}
case (ast.expr_unary(?op, ?x, ?ann)) {
if (op != ast.deref) {
ret trans_unary(cx, op, x, ann);
}
}
case (ast.expr_binary(?op, ?x, ?y, _)) {
ret trans_binary(cx, op, x, y);
}
case (ast.expr_if(?cond, ?thn, ?els, _)) {
ret trans_if(cx, cond, thn, els);
}
case (ast.expr_for(?decl, ?seq, ?body, _)) {
ret trans_for(cx, decl, seq, body);
}
case (ast.expr_for_each(?decl, ?seq, ?body, _)) {
ret trans_for_each(cx, decl, seq, body);
}
case (ast.expr_while(?cond, ?body, _)) {
ret trans_while(cx, cond, body);
}
case (ast.expr_do_while(?body, ?cond, _)) {
ret trans_do_while(cx, body, cond);
}
case (ast.expr_alt(?expr, ?arms, ?ann)) {
ret trans_alt(cx, expr, arms, ann);
}
case (ast.expr_block(?blk, _)) {
auto sub_cx = new_scope_block_ctxt(cx, "block-expr body");
auto next_cx = new_sub_block_ctxt(cx, "next");
auto sub = trans_block(sub_cx, blk);
cx.build.Br(sub_cx.llbb);
sub.bcx.build.Br(next_cx.llbb);
ret res(next_cx, sub.val);
}
case (ast.expr_assign(?dst, ?src, ?ann)) {
auto lhs_res = trans_lval(cx, dst);
assert (lhs_res.is_mem);
auto rhs_res = trans_expr(lhs_res.res.bcx, src);
auto t = node_ann_type(cx.fcx.lcx.ccx, ann);
// FIXME: calculate copy init-ness in typestate.
ret copy_ty(rhs_res.bcx, DROP_EXISTING,
lhs_res.res.val, rhs_res.val, t);
}
case (ast.expr_assign_op(?op, ?dst, ?src, ?ann)) {
auto t = node_ann_type(cx.fcx.lcx.ccx, ann);
auto lhs_res = trans_lval(cx, dst);
assert (lhs_res.is_mem);
auto rhs_res = trans_expr(lhs_res.res.bcx, src);
if (ty.type_is_sequence(cx.fcx.lcx.ccx.tcx, t)) {
alt (op) {
case (ast.add) {
ret trans_vec_append(rhs_res.bcx, t,
lhs_res.res.val,
rhs_res.val);
}
case (_) { }
}
}
auto lhs_val = load_if_immediate(rhs_res.bcx,
lhs_res.res.val, t);
auto v = trans_eager_binop(rhs_res.bcx, op, t,
lhs_val, rhs_res.val);
// FIXME: calculate copy init-ness in typestate.
ret copy_ty(v.bcx, DROP_EXISTING,
lhs_res.res.val, v.val, t);
}
case (ast.expr_bind(?f, ?args, ?ann)) {
ret trans_bind(cx, f, args, ann);
}
case (ast.expr_call(?f, ?args, ?ann)) {
ret trans_call(cx, f, none[ValueRef], args, ann);
}
case (ast.expr_cast(?e, _, ?ann)) {
ret trans_cast(cx, e, ann);
}
case (ast.expr_vec(?args, _, ?ann)) {
ret trans_vec(cx, args, ann);
}
case (ast.expr_tup(?args, ?ann)) {
ret trans_tup(cx, args, ann);
}
case (ast.expr_rec(?args, ?base, ?ann)) {
ret trans_rec(cx, args, base, ann);
}
case (ast.expr_ext(_, _, _, ?expanded, _)) {
ret trans_expr(cx, expanded);
}
case (ast.expr_fail(_)) {
ret trans_fail(cx, some[common.span](e.span), "explicit failure");
}
case (ast.expr_log(?lvl, ?a, _)) {
ret trans_log(lvl, cx, a);
}
case (ast.expr_assert(?a, _)) {
ret trans_check_expr(cx, a);
}
case (ast.expr_check(?a, _)) {
ret trans_check_expr(cx, a);
}
case (ast.expr_break(?a)) {
ret trans_break(cx);
}
case (ast.expr_cont(?a)) {
ret trans_cont(cx);
}
case (ast.expr_ret(?e, _)) {
ret trans_ret(cx, e);
}
case (ast.expr_put(?e, _)) {
ret trans_put(cx, e);
}
case (ast.expr_be(?e, _)) {
ret trans_be(cx, e);
}
case (ast.expr_port(?ann)) {
ret trans_port(cx, ann);
}
case (ast.expr_chan(?e, ?ann)) {
ret trans_chan(cx, e, ann);
}
case (ast.expr_send(?lhs, ?rhs, ?ann)) {
ret trans_send(cx, lhs, rhs, ann);
}
case (ast.expr_recv(?lhs, ?rhs, ?ann)) {
ret trans_recv(cx, lhs, rhs, ann);
}
case (_) {
// The expression is an lvalue. Fall through.
}
}
// lval cases fall through to trans_lval and then
// possibly load the result (if it's non-structural).
auto t = ty.expr_ty(cx.fcx.lcx.ccx.tcx, e);
auto sub = trans_lval(cx, e);
ret res(sub.res.bcx, load_if_immediate(sub.res.bcx, sub.res.val, t));
}
// We pass structural values around the compiler "by pointer" and
// non-structural values (scalars, boxes, pointers) "by value". We call the
// latter group "immediates" and, in some circumstances when we know we have a
// pointer (or need one), perform load/store operations based on the
// immediate-ness of the type.
fn type_is_immediate(@crate_ctxt ccx, ty.t t) -> bool {
ret ty.type_is_scalar(ccx.tcx, t) ||
ty.type_is_boxed(ccx.tcx, t) ||
ty.type_is_native(ccx.tcx, t);
}
fn do_spill(@block_ctxt cx, ValueRef v) -> ValueRef {
// We have a value but we have to spill it to pass by alias.
auto llptr = alloca(cx, val_ty(v));
cx.build.Store(v, llptr);
ret llptr;
}
fn spill_if_immediate(@block_ctxt cx, ValueRef v, ty.t t) -> ValueRef {
if (type_is_immediate(cx.fcx.lcx.ccx, t)) {
ret do_spill(cx, v);
}
ret v;
}
fn load_if_immediate(@block_ctxt cx, ValueRef v, ty.t t) -> ValueRef {
if (type_is_immediate(cx.fcx.lcx.ccx, t)) {
ret cx.build.Load(v);
}
ret v;
}
fn trans_log(int lvl, @block_ctxt cx, @ast.expr e) -> result {
auto lcx = cx.fcx.lcx;
auto modname = Str.connect(lcx.module_path, ".");
auto global;
if (lcx.ccx.module_data.contains_key(modname)) {
global = lcx.ccx.module_data.get(modname);
} else {
global = llvm.LLVMAddGlobal(lcx.ccx.llmod, T_int(),
Str.buf("_rust_mod_log_" + modname));
llvm.LLVMSetGlobalConstant(global, False);
llvm.LLVMSetInitializer(global, C_null(T_int()));
llvm.LLVMSetLinkage(global, lib.llvm.LLVMInternalLinkage
as llvm.Linkage);
lcx.ccx.module_data.insert(modname, global);
}
auto log_cx = new_scope_block_ctxt(cx, "log");
auto after_cx = new_sub_block_ctxt(cx, "after");
auto load = cx.build.Load(global);
auto test = cx.build.ICmp(lib.llvm.LLVMIntSGE, load, C_int(lvl));
cx.build.CondBr(test, log_cx.llbb, after_cx.llbb);
auto sub = trans_expr(log_cx, e);
auto e_ty = ty.expr_ty(cx.fcx.lcx.ccx.tcx, e);
auto log_bcx = sub.bcx;
if (ty.type_is_fp(cx.fcx.lcx.ccx.tcx, e_ty)) {
let TypeRef tr;
let bool is32bit = false;
alt (ty.struct(cx.fcx.lcx.ccx.tcx, e_ty)) {
case (ty.ty_machine(util.common.ty_f32)) {
tr = T_f32();
is32bit = true;
}
case (ty.ty_machine(util.common.ty_f64)) {
tr = T_f64();
}
case (_) {
tr = T_float();
}
}
if (is32bit) {
auto uval = trans_upcall(log_bcx,
"upcall_log_float",
vec(C_int(lvl), sub.val),
false);
log_bcx = uval.bcx;
} else {
auto tmp = alloca(log_bcx, tr);
sub.bcx.build.Store(sub.val, tmp);
auto uval = trans_upcall(log_bcx,
"upcall_log_double",
vec(C_int(lvl), vp2i(log_bcx, tmp)),
false);
log_bcx = uval.bcx;
}
} else {
alt (ty.struct(cx.fcx.lcx.ccx.tcx, e_ty)) {
case (ty.ty_str) {
auto v = vp2i(log_bcx, sub.val);
log_bcx = trans_upcall(log_bcx,
"upcall_log_str",
vec(C_int(lvl), v),
false).bcx;
}
case (_) {
auto v = vec(C_int(lvl), sub.val);
log_bcx = trans_upcall(log_bcx,
"upcall_log_int",
v, false).bcx;
}
}
}
log_bcx = trans_block_cleanups(log_bcx, log_cx);
log_bcx.build.Br(after_cx.llbb);
ret res(after_cx, C_nil());
}
fn trans_check_expr(@block_ctxt cx, @ast.expr e) -> result {
auto cond_res = trans_expr(cx, e);
auto expr_str = util.common.expr_to_str(e);
auto fail_cx = new_sub_block_ctxt(cx, "fail");
auto fail_res = trans_fail(fail_cx, some[common.span](e.span), expr_str);
auto next_cx = new_sub_block_ctxt(cx, "next");
cond_res.bcx.build.CondBr(cond_res.val,
next_cx.llbb,
fail_cx.llbb);
ret res(next_cx, C_nil());
}
fn trans_fail(@block_ctxt cx, Option.t[common.span] sp_opt, str fail_str)
-> result {
auto V_fail_str = p2i(C_cstr(cx.fcx.lcx.ccx, fail_str));
auto V_filename; auto V_line;
alt (sp_opt) {
case (some[common.span](?sp)) {
auto loc = cx.fcx.lcx.ccx.sess.lookup_pos(sp.lo);
V_filename = p2i(C_cstr(cx.fcx.lcx.ccx, loc.filename));
V_line = loc.line as int;
}
case (none[common.span]) {
V_filename = p2i(C_str(cx.fcx.lcx.ccx, "<runtime>"));
V_line = 0;
}
}
auto args = vec(V_fail_str, V_filename, C_int(V_line));
auto sub = trans_upcall(cx, "upcall_fail", args, false);
sub.bcx.build.Unreachable();
ret res(sub.bcx, C_nil());
}
fn trans_put(@block_ctxt cx, &Option.t[@ast.expr] e) -> result {
auto llcallee = C_nil();
auto llenv = C_nil();
alt (cx.fcx.lliterbody) {
case (some[ValueRef](?lli)) {
auto slot = alloca(cx, val_ty(lli));
cx.build.Store(lli, slot);
llcallee = cx.build.GEP(slot, vec(C_int(0),
C_int(abi.fn_field_code)));
llcallee = cx.build.Load(llcallee);
llenv = cx.build.GEP(slot, vec(C_int(0),
C_int(abi.fn_field_box)));
llenv = cx.build.Load(llenv);
}
}
auto bcx = cx;
auto dummy_retslot = alloca(bcx, T_nil());
let vec[ValueRef] llargs = vec(dummy_retslot, cx.fcx.lltaskptr, llenv);
alt (e) {
case (none[@ast.expr]) { }
case (some[@ast.expr](?x)) {
auto e_ty = ty.expr_ty(cx.fcx.lcx.ccx.tcx, x);
auto arg = rec(mode=ty.mo_alias, ty=e_ty);
auto arg_tys = type_of_explicit_args(cx.fcx.lcx.ccx, vec(arg));
auto r = trans_arg_expr(bcx, arg, arg_tys.(0), x);
bcx = r.bcx;
llargs += vec(r.val);
}
}
ret res(bcx, bcx.build.FastCall(llcallee, llargs));
}
fn trans_break_cont(@block_ctxt cx, bool to_end) -> result {
auto bcx = cx;
// Locate closest loop block, outputting cleanup as we go.
auto cleanup_cx = cx;
while (true) {
bcx = trans_block_cleanups(bcx, cleanup_cx);
alt (cleanup_cx.kind) {
case (LOOP_SCOPE_BLOCK(?_cont, ?_break)) {
if (to_end) {
bcx.build.Br(_break.llbb);
} else {
alt (_cont) {
case (Option.some[@block_ctxt](?_cont)) {
bcx.build.Br(_cont.llbb);
}
case (_) {
bcx.build.Br(cleanup_cx.llbb);
}
}
}
ret res(new_sub_block_ctxt(bcx, "unreachable"), C_nil());
}
case (_) {
alt (cleanup_cx.parent) {
case (parent_some(?cx)) { cleanup_cx = cx; }
}
}
}
}
fail;
}
fn trans_break(@block_ctxt cx) -> result {
ret trans_break_cont(cx, true);
}
fn trans_cont(@block_ctxt cx) -> result {
ret trans_break_cont(cx, false);
}
fn trans_ret(@block_ctxt cx, &Option.t[@ast.expr] e) -> result {
auto bcx = cx;
auto val = C_nil();
alt (e) {
case (some[@ast.expr](?x)) {
auto t = ty.expr_ty(cx.fcx.lcx.ccx.tcx, x);
auto r = trans_expr(cx, x);
bcx = r.bcx;
val = r.val;
bcx = copy_ty(bcx, INIT, cx.fcx.llretptr, val, t).bcx;
}
case (_) {
auto t = llvm.LLVMGetElementType(val_ty(cx.fcx.llretptr));
auto null = lib.llvm.llvm.LLVMConstNull(t);
bcx.build.Store(null, cx.fcx.llretptr);
}
}
// Run all cleanups and back out.
let bool more_cleanups = true;
auto cleanup_cx = cx;
while (more_cleanups) {
bcx = trans_block_cleanups(bcx, cleanup_cx);
alt (cleanup_cx.parent) {
case (parent_some(?b)) {
cleanup_cx = b;
}
case (parent_none) {
more_cleanups = false;
}
}
}
bcx.build.RetVoid();
ret res(new_sub_block_ctxt(bcx, "unreachable"), C_nil());
}
fn trans_be(@block_ctxt cx, @ast.expr e) -> result {
// FIXME: This should be a typestate precondition
assert (ast.is_call_expr(e));
// FIXME: Turn this into a real tail call once
// calling convention issues are settled
ret trans_ret(cx, some(e));
}
fn trans_port(@block_ctxt cx, ast.ann ann) -> result {
auto t = node_ann_type(cx.fcx.lcx.ccx, ann);
auto unit_ty;
alt (ty.struct(cx.fcx.lcx.ccx.tcx, t)) {
case (ty.ty_port(?t)) {
unit_ty = t;
}
case (_) {
cx.fcx.lcx.ccx.sess.bug("non-port type in trans_port");
fail;
}
}
auto llunit_ty = type_of(cx.fcx.lcx.ccx, unit_ty);
auto bcx = cx;
auto unit_sz = size_of(bcx, unit_ty);
bcx = unit_sz.bcx;
auto sub = trans_upcall(bcx, "upcall_new_port", vec(unit_sz.val), false);
bcx = sub.bcx;
auto llty = type_of(cx.fcx.lcx.ccx, t);
auto port_val = vi2p(bcx, sub.val, llty);
auto dropref = clean(bind drop_ty(_, port_val, t));
find_scope_cx(bcx).cleanups += vec(dropref);
ret res(bcx, port_val);
}
fn trans_chan(@block_ctxt cx, @ast.expr e, ast.ann ann) -> result {
auto bcx = cx;
auto prt = trans_expr(bcx, e);
bcx = prt.bcx;
auto prt_val = vp2i(bcx, prt.val);
auto sub = trans_upcall(bcx, "upcall_new_chan", vec(prt_val), false);
bcx = sub.bcx;
auto chan_ty = node_ann_type(bcx.fcx.lcx.ccx, ann);
auto chan_llty = type_of(bcx.fcx.lcx.ccx, chan_ty);
auto chan_val = vi2p(bcx, sub.val, chan_llty);
auto dropref = clean(bind drop_ty(_, chan_val, chan_ty));
find_scope_cx(bcx).cleanups += vec(dropref);
ret res(bcx, chan_val);
}
fn trans_send(@block_ctxt cx, @ast.expr lhs, @ast.expr rhs,
ast.ann ann) -> result {
auto bcx = cx;
auto chn = trans_expr(bcx, lhs);
bcx = chn.bcx;
auto data = trans_expr(bcx, rhs);
bcx = data.bcx;
auto chan_ty = node_ann_type(cx.fcx.lcx.ccx, ann);
auto unit_ty;
alt (ty.struct(cx.fcx.lcx.ccx.tcx, chan_ty)) {
case (ty.ty_chan(?t)) {
unit_ty = t;
}
case (_) {
bcx.fcx.lcx.ccx.sess.bug("non-chan type in trans_send");
fail;
}
}
auto data_alloc = alloc_ty(bcx, unit_ty);
bcx = data_alloc.bcx;
auto data_tmp = copy_ty(bcx, INIT, data_alloc.val, data.val, unit_ty);
bcx = data_tmp.bcx;
find_scope_cx(bcx).cleanups +=
vec(clean(bind drop_ty(_, data_alloc.val, unit_ty)));
auto sub = trans_upcall(bcx, "upcall_send",
vec(vp2i(bcx, chn.val),
vp2i(bcx, data_alloc.val)), false);
bcx = sub.bcx;
ret res(bcx, chn.val);
}
fn trans_recv(@block_ctxt cx, @ast.expr lhs, @ast.expr rhs,
ast.ann ann) -> result {
auto bcx = cx;
auto data = trans_lval(bcx, lhs);
assert (data.is_mem);
bcx = data.res.bcx;
auto unit_ty = node_ann_type(bcx.fcx.lcx.ccx, ann);
// FIXME: calculate copy init-ness in typestate.
ret recv_val(bcx, data.res.val, rhs, unit_ty, DROP_EXISTING);
}
fn recv_val(@block_ctxt cx, ValueRef lhs, @ast.expr rhs,
ty.t unit_ty, copy_action action) -> result {
auto bcx = cx;
auto prt = trans_expr(bcx, rhs);
bcx = prt.bcx;
auto sub = trans_upcall(bcx, "upcall_recv",
vec(vp2i(bcx, lhs),
vp2i(bcx, prt.val)), false);
bcx = sub.bcx;
auto data_load = load_if_immediate(bcx, lhs, unit_ty);
auto cp = copy_ty(bcx, action, lhs, data_load, unit_ty);
bcx = cp.bcx;
// TODO: Any cleanup need to be done here?
ret res(bcx, lhs);
}
fn init_local(@block_ctxt cx, @ast.local local) -> result {
// Make a note to drop this slot on the way out.
assert (cx.fcx.lllocals.contains_key(local.id));
auto llptr = cx.fcx.lllocals.get(local.id);
auto ty = node_ann_type(cx.fcx.lcx.ccx, local.ann);
auto bcx = cx;
find_scope_cx(cx).cleanups +=
vec(clean(bind drop_slot(_, llptr, ty)));
alt (local.init) {
case (some[ast.initializer](?init)) {
alt (init.op) {
case (ast.init_assign) {
auto sub = trans_expr(bcx, init.expr);
bcx = copy_ty(sub.bcx, INIT, llptr, sub.val, ty).bcx;
}
case (ast.init_recv) {
bcx = recv_val(bcx, llptr, init.expr, ty, INIT).bcx;
}
}
}
case (_) {
bcx = zero_alloca(bcx, llptr, ty).bcx;
}
}
ret res(bcx, llptr);
}
fn zero_alloca(@block_ctxt cx, ValueRef llptr, ty.t t) -> result {
auto bcx = cx;
if (ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, t)) {
auto llsz = size_of(bcx, t);
bcx = call_bzero(llsz.bcx, llptr, llsz.val).bcx;
} else {
auto llty = type_of(bcx.fcx.lcx.ccx, t);
auto null = lib.llvm.llvm.LLVMConstNull(llty);
bcx.build.Store(null, llptr);
}
ret res(bcx, llptr);
}
fn trans_stmt(@block_ctxt cx, &ast.stmt s) -> result {
auto bcx = cx;
alt (s.node) {
case (ast.stmt_expr(?e,_)) {
bcx = trans_expr(cx, e).bcx;
}
case (ast.stmt_decl(?d,_)) {
alt (d.node) {
case (ast.decl_local(?local)) {
bcx = init_local(bcx, local).bcx;
}
case (ast.decl_item(?i)) {
trans_item(cx.fcx.lcx, *i);
}
}
}
case (_) {
cx.fcx.lcx.ccx.sess.unimpl("stmt variant");
}
}
ret res(bcx, C_nil());
}
fn new_builder(BasicBlockRef llbb) -> builder {
let BuilderRef llbuild = llvm.LLVMCreateBuilder();
llvm.LLVMPositionBuilderAtEnd(llbuild, llbb);
ret builder(llbuild, @mutable false);
}
// You probably don't want to use this one. See the
// next three functions instead.
fn new_block_ctxt(@fn_ctxt cx, block_parent parent,
block_kind kind,
str name) -> @block_ctxt {
let vec[cleanup] cleanups = vec();
let BasicBlockRef llbb =
llvm.LLVMAppendBasicBlock(cx.llfn,
Str.buf(cx.lcx.ccx.names.next(name)));
ret @rec(llbb=llbb,
build=new_builder(llbb),
parent=parent,
kind=kind,
mutable cleanups=cleanups,
fcx=cx);
}
// Use this when you're at the top block of a function or the like.
fn new_top_block_ctxt(@fn_ctxt fcx) -> @block_ctxt {
ret new_block_ctxt(fcx, parent_none, SCOPE_BLOCK,
"function top level");
}
// Use this when you're at a curly-brace or similar lexical scope.
fn new_scope_block_ctxt(@block_ctxt bcx, str n) -> @block_ctxt {
ret new_block_ctxt(bcx.fcx, parent_some(bcx), SCOPE_BLOCK, n);
}
fn new_loop_scope_block_ctxt(@block_ctxt bcx, Option.t[@block_ctxt] _cont,
@block_ctxt _break, str n) -> @block_ctxt {
ret new_block_ctxt(bcx.fcx, parent_some(bcx),
LOOP_SCOPE_BLOCK(_cont, _break), n);
}
// Use this when you're making a general CFG BB within a scope.
fn new_sub_block_ctxt(@block_ctxt bcx, str n) -> @block_ctxt {
ret new_block_ctxt(bcx.fcx, parent_some(bcx), NON_SCOPE_BLOCK, n);
}
fn trans_block_cleanups(@block_ctxt cx,
@block_ctxt cleanup_cx) -> @block_ctxt {
auto bcx = cx;
if (cleanup_cx.kind == NON_SCOPE_BLOCK) {
assert (Vec.len[cleanup](cleanup_cx.cleanups) == 0u);
}
auto i = Vec.len[cleanup](cleanup_cx.cleanups);
while (i > 0u) {
i -= 1u;
auto c = cleanup_cx.cleanups.(i);
alt (c) {
case (clean(?cfn)) {
bcx = cfn(bcx).bcx;
}
}
}
ret bcx;
}
iter block_locals(&ast.block b) -> @ast.local {
// FIXME: putting from inside an iter block doesn't work, so we can't
// use the index here.
for (@ast.stmt s in b.node.stmts) {
alt (s.node) {
case (ast.stmt_decl(?d,_)) {
alt (d.node) {
case (ast.decl_local(?local)) {
put local;
}
case (_) { /* fall through */ }
}
}
case (_) { /* fall through */ }
}
}
}
fn llallocas_block_ctxt(@fn_ctxt fcx) -> @block_ctxt {
let vec[cleanup] cleanups = vec();
ret @rec(llbb=fcx.llallocas,
build=new_builder(fcx.llallocas),
parent=parent_none,
kind=SCOPE_BLOCK,
mutable cleanups=cleanups,
fcx=fcx);
}
fn alloc_ty(@block_ctxt cx, ty.t t) -> result {
auto val = C_int(0);
if (ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, t)) {
// NB: we have to run this particular 'size_of' in a
// block_ctxt built on the llallocas block for the fn,
// so that the size dominates the array_alloca that
// comes next.
auto n = size_of(llallocas_block_ctxt(cx.fcx), t);
cx.fcx.llallocas = n.bcx.llbb;
val = array_alloca(cx, T_i8(), n.val);
} else {
val = alloca(cx, type_of(cx.fcx.lcx.ccx, t));
}
// NB: since we've pushed all size calculations in this
// function up to the alloca block, we actually return the
// block passed into us unmodified; it doesn't really
// have to be passed-and-returned here, but it fits
// past caller conventions and may well make sense again,
// so we leave it as-is.
ret res(cx, val);
}
fn alloc_local(@block_ctxt cx, @ast.local local) -> result {
auto t = node_ann_type(cx.fcx.lcx.ccx, local.ann);
auto r = alloc_ty(cx, t);
r.bcx.fcx.lllocals.insert(local.id, r.val);
ret r;
}
fn trans_block(@block_ctxt cx, &ast.block b) -> result {
auto bcx = cx;
for each (@ast.local local in block_locals(b)) {
bcx = alloc_local(bcx, local).bcx;
}
auto r = res(bcx, C_nil());
for (@ast.stmt s in b.node.stmts) {
r = trans_stmt(bcx, *s);
bcx = r.bcx;
// If we hit a terminator, control won't go any further so
// we're in dead-code land. Stop here.
if (is_terminated(bcx)) {
ret r;
}
}
alt (b.node.expr) {
case (some[@ast.expr](?e)) {
r = trans_expr(bcx, e);
bcx = r.bcx;
if (is_terminated(bcx)) {
ret r;
} else {
auto r_ty = ty.expr_ty(cx.fcx.lcx.ccx.tcx, e);
if (!ty.type_is_nil(cx.fcx.lcx.ccx.tcx, r_ty)) {
// The value resulting from the block gets copied into an
// alloca created in an outer scope and its refcount
// bumped so that it can escape this block. This means
// that it will definitely live until the end of the
// enclosing scope, even if nobody uses it, which may be
// something of a surprise.
// It's possible we never hit this block, so the alloca
// must be initialized to null, then when the potential
// value finally goes out of scope the drop glue will see
// that it was never used and ignore it.
// NB: Here we're building and initalizing the alloca in
// the alloca context, not this block's context.
auto res_alloca = alloc_ty(bcx, r_ty);
auto llbcx = llallocas_block_ctxt(bcx.fcx);
zero_alloca(llbcx, res_alloca.val, r_ty);
// Now we're working in our own block context again
auto res_copy = copy_ty(bcx, INIT,
res_alloca.val, r.val, r_ty);
bcx = res_copy.bcx;
fn drop_hoisted_ty(@block_ctxt cx,
ValueRef alloca_val,
ty.t t) -> result {
auto reg_val = load_if_immediate(cx,
alloca_val, t);
ret drop_ty(cx, reg_val, t);
}
auto cleanup = bind drop_hoisted_ty(_, res_alloca.val,
r_ty);
find_outer_scope_cx(bcx).cleanups += vec(clean(cleanup));
}
}
}
case (none[@ast.expr]) {
r = res(bcx, C_nil());
}
}
bcx = trans_block_cleanups(bcx, find_scope_cx(bcx));
ret res(bcx, r.val);
}
fn new_local_ctxt(@crate_ctxt ccx) -> @local_ctxt {
let vec[str] pth = vec();
let vec[ast.ty_param] obj_typarams = vec();
let vec[ast.obj_field] obj_fields = vec();
ret @rec(path=pth,
module_path=vec(crate_name(ccx, "main")),
obj_typarams = obj_typarams,
obj_fields = obj_fields,
ccx = ccx);
}
// NB: must keep 4 fns in sync:
//
// - type_of_fn_full
// - create_llargs_for_fn_args.
// - new_fn_ctxt
// - trans_args
fn new_fn_ctxt(@local_ctxt cx,
ValueRef llfndecl) -> @fn_ctxt {
let ValueRef llretptr = llvm.LLVMGetParam(llfndecl, 0u);
let ValueRef lltaskptr = llvm.LLVMGetParam(llfndecl, 1u);
let ValueRef llenv = llvm.LLVMGetParam(llfndecl, 2u);
let hashmap[ast.def_id, ValueRef] llargs = new_def_hash[ValueRef]();
let hashmap[ast.def_id, ValueRef] llobjfields = new_def_hash[ValueRef]();
let hashmap[ast.def_id, ValueRef] lllocals = new_def_hash[ValueRef]();
let hashmap[ast.def_id, ValueRef] llupvars = new_def_hash[ValueRef]();
let BasicBlockRef llallocas =
llvm.LLVMAppendBasicBlock(llfndecl, Str.buf("allocas"));
ret @rec(llfn=llfndecl,
lltaskptr=lltaskptr,
llenv=llenv,
llretptr=llretptr,
mutable llallocas = llallocas,
mutable llself=none[self_vt],
mutable lliterbody=none[ValueRef],
llargs=llargs,
llobjfields=llobjfields,
lllocals=lllocals,
llupvars=llupvars,
mutable lltydescs=Vec.empty[ValueRef](),
lcx=cx);
}
// NB: must keep 4 fns in sync:
//
// - type_of_fn_full
// - create_llargs_for_fn_args.
// - new_fn_ctxt
// - trans_args
fn create_llargs_for_fn_args(&@fn_ctxt cx,
ast.proto proto,
Option.t[tup(TypeRef, ty.t)] ty_self,
ty.t ret_ty,
&vec[ast.arg] args,
&vec[ast.ty_param] ty_params) {
auto arg_n = 3u;
alt (ty_self) {
case (some[tup(TypeRef, ty.t)](?tt)) {
cx.llself = some[self_vt](rec(v = cx.llenv, t = tt._1));
}
case (none[tup(TypeRef, ty.t)]) {
auto i = 0u;
for (ast.ty_param tp in ty_params) {
auto llarg = llvm.LLVMGetParam(cx.llfn, arg_n);
assert (llarg as int != 0);
cx.lltydescs += vec(llarg);
arg_n += 1u;
i += 1u;
}
}
}
if (proto == ast.proto_iter) {
auto llarg = llvm.LLVMGetParam(cx.llfn, arg_n);
assert (llarg as int != 0);
cx.lliterbody = some[ValueRef](llarg);
arg_n += 1u;
}
for (ast.arg arg in args) {
auto llarg = llvm.LLVMGetParam(cx.llfn, arg_n);
assert (llarg as int != 0);
cx.llargs.insert(arg.id, llarg);
arg_n += 1u;
}
}
// Recommended LLVM style, strange though this is, is to copy from args to
// allocas immediately upon entry; this permits us to GEP into structures we
// were passed and whatnot. Apparently mem2reg will mop up.
fn copy_any_self_to_alloca(@fn_ctxt fcx,
Option.t[tup(TypeRef, ty.t)] ty_self) {
auto bcx = llallocas_block_ctxt(fcx);
alt (fcx.llself) {
case (some[self_vt](?s_vt)) {
alt (ty_self) {
case (some[tup(TypeRef, ty.t)](?tt)) {
auto a = alloca(bcx, tt._0);
bcx.build.Store(s_vt.v, a);
fcx.llself = some[self_vt](rec(v = a, t = s_vt.t));
}
}
}
case (_) {
}
}
}
fn copy_args_to_allocas(@fn_ctxt fcx,
vec[ast.arg] args,
vec[ty.arg] arg_tys) {
auto bcx = llallocas_block_ctxt(fcx);
let uint arg_n = 0u;
for (ast.arg aarg in args) {
if (aarg.mode != ast.alias) {
auto arg_t = type_of_arg(fcx.lcx, arg_tys.(arg_n));
auto a = alloca(bcx, arg_t);
auto argval = fcx.llargs.get(aarg.id);
bcx.build.Store(argval, a);
// Overwrite the llargs entry for this arg with its alloca.
fcx.llargs.insert(aarg.id, a);
}
arg_n += 1u;
}
fcx.llallocas = bcx.llbb;
}
fn add_cleanups_for_args(@block_ctxt bcx,
vec[ast.arg] args,
vec[ty.arg] arg_tys) {
let uint arg_n = 0u;
for (ast.arg aarg in args) {
if (aarg.mode != ast.alias) {
auto argval = bcx.fcx.llargs.get(aarg.id);
find_scope_cx(bcx).cleanups +=
vec(clean(bind drop_slot(_, argval, arg_tys.(arg_n).ty)));
}
arg_n += 1u;
}
}
fn is_terminated(@block_ctxt cx) -> bool {
auto inst = llvm.LLVMGetLastInstruction(cx.llbb);
ret llvm.LLVMIsATerminatorInst(inst) as int != 0;
}
fn arg_tys_of_fn(@crate_ctxt ccx, ast.ann ann) -> vec[ty.arg] {
alt (ty.struct(ccx.tcx, ty.ann_to_type(ann))) {
case (ty.ty_fn(_, ?arg_tys, _)) {
ret arg_tys;
}
}
fail;
}
fn ret_ty_of_fn_ty(@crate_ctxt ccx, ty.t t) -> ty.t {
alt (ty.struct(ccx.tcx, t)) {
case (ty.ty_fn(_, _, ?ret_ty)) {
ret ret_ty;
}
}
fail;
}
fn ret_ty_of_fn(@crate_ctxt ccx, ast.ann ann) -> ty.t {
ret ret_ty_of_fn_ty(ccx, ty.ann_to_type(ann));
}
fn populate_fn_ctxt_from_llself(@fn_ctxt fcx, self_vt llself) {
auto bcx = llallocas_block_ctxt(fcx);
let vec[ty.t] field_tys = vec();
for (ast.obj_field f in bcx.fcx.lcx.obj_fields) {
field_tys += vec(node_ann_type(bcx.fcx.lcx.ccx, f.ann));
}
// Synthesize a tuple type for the fields so that GEP_tup_like() can work
// its magic.
auto fields_tup_ty = ty.mk_imm_tup(fcx.lcx.ccx.tcx, field_tys);
auto n_typarams = Vec.len[ast.ty_param](bcx.fcx.lcx.obj_typarams);
let TypeRef llobj_box_ty = T_obj_ptr(bcx.fcx.lcx.ccx.tn, n_typarams);
auto box_cell =
bcx.build.GEP(llself.v,
vec(C_int(0),
C_int(abi.obj_field_box)));
auto box_ptr = bcx.build.Load(box_cell);
box_ptr = bcx.build.PointerCast(box_ptr, llobj_box_ty);
auto obj_typarams = bcx.build.GEP(box_ptr,
vec(C_int(0),
C_int(abi.box_rc_field_body),
C_int(abi.obj_body_elt_typarams)));
// The object fields immediately follow the type parameters, so we skip
// over them to get the pointer.
auto obj_fields = bcx.build.Add(vp2i(bcx, obj_typarams),
llsize_of(llvm.LLVMGetElementType(val_ty(obj_typarams))));
// If we can (i.e. the type is statically sized), then cast the resulting
// fields pointer to the appropriate LLVM type. If not, just leave it as
// i8 *.
if (!ty.type_has_dynamic_size(fcx.lcx.ccx.tcx, fields_tup_ty)) {
auto llfields_ty = type_of(fcx.lcx.ccx, fields_tup_ty);
obj_fields = vi2p(bcx, obj_fields, T_ptr(llfields_ty));
} else {
obj_fields = vi2p(bcx, obj_fields, T_ptr(T_i8()));
}
let int i = 0;
for (ast.ty_param p in fcx.lcx.obj_typarams) {
let ValueRef lltyparam = bcx.build.GEP(obj_typarams,
vec(C_int(0),
C_int(i)));
lltyparam = bcx.build.Load(lltyparam);
fcx.lltydescs += vec(lltyparam);
i += 1;
}
i = 0;
for (ast.obj_field f in fcx.lcx.obj_fields) {
auto rslt = GEP_tup_like(bcx, fields_tup_ty, obj_fields, vec(0, i));
bcx = llallocas_block_ctxt(fcx);
auto llfield = rslt.val;
fcx.llobjfields.insert(f.id, llfield);
i += 1;
}
fcx.llallocas = bcx.llbb;
}
fn trans_fn(@local_ctxt cx, &ast._fn f, ast.def_id fid,
Option.t[tup(TypeRef, ty.t)] ty_self,
&vec[ast.ty_param] ty_params, &ast.ann ann) {
auto llfndecl = cx.ccx.item_ids.get(fid);
auto fcx = new_fn_ctxt(cx, llfndecl);
create_llargs_for_fn_args(fcx, f.proto,
ty_self, ret_ty_of_fn(cx.ccx, ann),
f.decl.inputs, ty_params);
copy_any_self_to_alloca(fcx, ty_self);
alt (fcx.llself) {
case (some[self_vt](?llself)) {
populate_fn_ctxt_from_llself(fcx, llself);
}
case (_) {
}
}
auto arg_tys = arg_tys_of_fn(fcx.lcx.ccx, ann);
copy_args_to_allocas(fcx, f.decl.inputs, arg_tys);
auto bcx = new_top_block_ctxt(fcx);
add_cleanups_for_args(bcx, f.decl.inputs, arg_tys);
auto lltop = bcx.llbb;
auto res = trans_block(bcx, f.body);
if (!is_terminated(res.bcx)) {
// FIXME: until LLVM has a unit type, we are moving around
// C_nil values rather than their void type.
res.bcx.build.RetVoid();
}
// Tie up the llallocas -> lltop edge.
new_builder(fcx.llallocas).Br(lltop);
}
fn trans_vtbl(@local_ctxt cx,
TypeRef llself_ty,
ty.t self_ty,
&ast._obj ob,
&vec[ast.ty_param] ty_params) -> ValueRef {
auto dtor = C_null(T_ptr(T_i8()));
alt (ob.dtor) {
case (some[@ast.method](?d)) {
auto dtor_1 = trans_dtor(cx, llself_ty, self_ty, ty_params, d);
dtor = llvm.LLVMConstBitCast(dtor_1, val_ty(dtor));
}
case (none[@ast.method]) {}
}
let vec[ValueRef] methods = vec(dtor);
fn meth_lteq(&@ast.method a, &@ast.method b) -> bool {
ret Str.lteq(a.node.ident, b.node.ident);
}
auto meths = std.Sort.merge_sort[@ast.method](bind meth_lteq(_,_),
ob.methods);
for (@ast.method m in meths) {
auto llfnty = T_nil();
alt (ty.struct(cx.ccx.tcx, node_ann_type(cx.ccx, m.node.ann))) {
case (ty.ty_fn(?proto, ?inputs, ?output)) {
llfnty = type_of_fn_full(cx.ccx, proto,
some[TypeRef](llself_ty),
inputs, output,
Vec.len[ast.ty_param](ty_params));
}
}
let @local_ctxt mcx = extend_path(cx, m.node.ident);
let str s = mangle_name_by_seq(mcx.ccx, mcx.path, "method");
let ValueRef llfn = decl_internal_fastcall_fn(cx.ccx.llmod, s,
llfnty);
cx.ccx.item_ids.insert(m.node.id, llfn);
cx.ccx.item_symbols.insert(m.node.id, s);
trans_fn(mcx, m.node.meth, m.node.id,
some[tup(TypeRef, ty.t)](tup(llself_ty, self_ty)),
ty_params, m.node.ann);
methods += vec(llfn);
}
auto vtbl = C_struct(methods);
auto vtbl_name = mangle_name_by_seq(cx.ccx, cx.path, "vtbl");
auto gvar = llvm.LLVMAddGlobal(cx.ccx.llmod, val_ty(vtbl),
Str.buf(vtbl_name));
llvm.LLVMSetInitializer(gvar, vtbl);
llvm.LLVMSetGlobalConstant(gvar, True);
llvm.LLVMSetLinkage(gvar, lib.llvm.LLVMInternalLinkage
as llvm.Linkage);
ret gvar;
}
fn trans_dtor(@local_ctxt cx,
TypeRef llself_ty,
ty.t self_ty,
&vec[ast.ty_param] ty_params,
&@ast.method dtor) -> ValueRef {
auto llfnty = T_dtor(cx.ccx, llself_ty);
let @local_ctxt dcx = extend_path(cx, "drop");
let str s = mangle_name_by_seq(dcx.ccx, dcx.path, "drop");
let ValueRef llfn = decl_internal_fastcall_fn(cx.ccx.llmod, s, llfnty);
cx.ccx.item_ids.insert(dtor.node.id, llfn);
cx.ccx.item_symbols.insert(dtor.node.id, s);
trans_fn(dcx, dtor.node.meth, dtor.node.id,
some[tup(TypeRef, ty.t)](tup(llself_ty, self_ty)),
ty_params, dtor.node.ann);
ret llfn;
}
fn trans_obj(@local_ctxt cx, &ast._obj ob, ast.def_id oid,
&vec[ast.ty_param] ty_params, &ast.ann ann) {
auto ccx = cx.ccx;
auto llctor_decl = ccx.item_ids.get(oid);
// Translate obj ctor args to function arguments.
let vec[ast.arg] fn_args = vec();
for (ast.obj_field f in ob.fields) {
fn_args += vec(rec(mode=ast.alias, ty=f.ty, ident=f.ident, id=f.id));
}
auto fcx = new_fn_ctxt(cx, llctor_decl);
create_llargs_for_fn_args(fcx, ast.proto_fn,
none[tup(TypeRef, ty.t)],
ret_ty_of_fn(ccx, ann),
fn_args, ty_params);
let vec[ty.arg] arg_tys = arg_tys_of_fn(ccx, ann);
copy_args_to_allocas(fcx, fn_args, arg_tys);
auto bcx = new_top_block_ctxt(fcx);
auto lltop = bcx.llbb;
auto self_ty = ret_ty_of_fn(ccx, ann);
auto llself_ty = type_of(ccx, self_ty);
auto pair = bcx.fcx.llretptr;
auto vtbl = trans_vtbl(cx, llself_ty, self_ty, ob, ty_params);
auto pair_vtbl = bcx.build.GEP(pair,
vec(C_int(0),
C_int(abi.obj_field_vtbl)));
auto pair_box = bcx.build.GEP(pair,
vec(C_int(0),
C_int(abi.obj_field_box)));
bcx.build.Store(vtbl, pair_vtbl);
let TypeRef llbox_ty = T_opaque_obj_ptr(ccx.tn);
// FIXME we should probably also allocate a box for empty objs that have a
// dtor, since otherwise they are never dropped, and the dtor never runs
if (Vec.len[ast.ty_param](ty_params) == 0u &&
Vec.len[ty.arg](arg_tys) == 0u) {
// Store null into pair, if no args or typarams.
bcx.build.Store(C_null(llbox_ty), pair_box);
} else {
// Malloc a box for the body and copy args in.
let vec[ty.t] obj_fields = vec();
for (ty.arg a in arg_tys) {
Vec.push[ty.t](obj_fields, a.ty);
}
// Synthesize an obj body type.
auto tydesc_ty = ty.mk_type(ccx.tcx);
let vec[ty.t] tps = vec();
for (ast.ty_param tp in ty_params) {
Vec.push[ty.t](tps, tydesc_ty);
}
let ty.t typarams_ty = ty.mk_imm_tup(ccx.tcx, tps);
let ty.t fields_ty = ty.mk_imm_tup(ccx.tcx, obj_fields);
let ty.t body_ty = ty.mk_imm_tup(ccx.tcx,
vec(tydesc_ty,
typarams_ty,
fields_ty));
let ty.t boxed_body_ty = ty.mk_imm_box(ccx.tcx, body_ty);
// Malloc a box for the body.
auto box = trans_malloc_boxed(bcx, body_ty);
bcx = box.bcx;
auto rc = GEP_tup_like(bcx, boxed_body_ty, box.val,
vec(0, abi.box_rc_field_refcnt));
bcx = rc.bcx;
auto body = GEP_tup_like(bcx, boxed_body_ty, box.val,
vec(0, abi.box_rc_field_body));
bcx = body.bcx;
bcx.build.Store(C_int(1), rc.val);
// Store body tydesc.
auto body_tydesc =
GEP_tup_like(bcx, body_ty, body.val,
vec(0, abi.obj_body_elt_tydesc));
bcx = body_tydesc.bcx;
auto body_td = get_tydesc(bcx, body_ty, true);
auto dtor = C_null(T_ptr(T_glue_fn(ccx.tn)));
alt (ob.dtor) {
case (some[@ast.method](?d)) {
dtor = trans_dtor(cx, llself_ty, self_ty, ty_params, d);
}
case (none[@ast.method]) {}
}
bcx = body_td.bcx;
bcx.build.Store(body_td.val, body_tydesc.val);
// Copy typarams into captured typarams.
auto body_typarams =
GEP_tup_like(bcx, body_ty, body.val,
vec(0, abi.obj_body_elt_typarams));
bcx = body_typarams.bcx;
let int i = 0;
for (ast.ty_param tp in ty_params) {
auto typaram = bcx.fcx.lltydescs.(i);
auto capture = GEP_tup_like(bcx, typarams_ty, body_typarams.val,
vec(0, i));
bcx = capture.bcx;
bcx = copy_ty(bcx, INIT, capture.val, typaram, tydesc_ty).bcx;
i += 1;
}
// Copy args into body fields.
auto body_fields =
GEP_tup_like(bcx, body_ty, body.val,
vec(0, abi.obj_body_elt_fields));
bcx = body_fields.bcx;
i = 0;
for (ast.obj_field f in ob.fields) {
auto arg = bcx.fcx.llargs.get(f.id);
arg = load_if_immediate(bcx, arg, arg_tys.(i).ty);
auto field = GEP_tup_like(bcx, fields_ty, body_fields.val,
vec(0, i));
bcx = field.bcx;
bcx = copy_ty(bcx, INIT, field.val, arg, arg_tys.(i).ty).bcx;
i += 1;
}
// Store box ptr in outer pair.
auto p = bcx.build.PointerCast(box.val, llbox_ty);
bcx.build.Store(p, pair_box);
}
bcx.build.RetVoid();
// Tie up the llallocas -> lltop edge.
new_builder(fcx.llallocas).Br(lltop);
}
fn trans_tag_variant(@local_ctxt cx, ast.def_id tag_id,
&ast.variant variant, int index,
&vec[ast.ty_param] ty_params) {
if (Vec.len[ast.variant_arg](variant.node.args) == 0u) {
ret; // nullary constructors are just constants
}
// Translate variant arguments to function arguments.
let vec[ast.arg] fn_args = vec();
auto i = 0u;
for (ast.variant_arg varg in variant.node.args) {
fn_args += vec(rec(mode=ast.alias,
ty=varg.ty,
ident="arg" + UInt.to_str(i, 10u),
id=varg.id));
}
assert (cx.ccx.item_ids.contains_key(variant.node.id));
let ValueRef llfndecl = cx.ccx.item_ids.get(variant.node.id);
auto fcx = new_fn_ctxt(cx, llfndecl);
create_llargs_for_fn_args(fcx, ast.proto_fn,
none[tup(TypeRef, ty.t)],
ret_ty_of_fn(cx.ccx, variant.node.ann),
fn_args, ty_params);
let vec[ty.t] ty_param_substs = vec();
i = 0u;
for (ast.ty_param tp in ty_params) {
ty_param_substs += vec(ty.mk_param(cx.ccx.tcx, i));
i += 1u;
}
auto arg_tys = arg_tys_of_fn(cx.ccx, variant.node.ann);
copy_args_to_allocas(fcx, fn_args, arg_tys);
auto bcx = new_top_block_ctxt(fcx);
auto lltop = bcx.llbb;
// Cast the tag to a type we can GEP into.
auto lltagptr = bcx.build.PointerCast(fcx.llretptr,
T_opaque_tag_ptr(fcx.lcx.ccx.tn));
auto lldiscrimptr = bcx.build.GEP(lltagptr,
vec(C_int(0), C_int(0)));
bcx.build.Store(C_int(index), lldiscrimptr);
auto llblobptr = bcx.build.GEP(lltagptr,
vec(C_int(0), C_int(1)));
i = 0u;
for (ast.variant_arg va in variant.node.args) {
auto rslt = GEP_tag(bcx, llblobptr, tag_id, variant.node.id,
ty_param_substs, i as int);
bcx = rslt.bcx;
auto lldestptr = rslt.val;
// If this argument to this function is a tag, it'll have come in to
// this function as an opaque blob due to the way that type_of()
// works. So we have to cast to the destination's view of the type.
auto llargptr = bcx.build.PointerCast(fcx.llargs.get(va.id),
val_ty(lldestptr));
auto arg_ty = arg_tys.(i).ty;
auto llargval;
if (ty.type_is_structural(cx.ccx.tcx, arg_ty) ||
ty.type_has_dynamic_size(cx.ccx.tcx, arg_ty)) {
llargval = llargptr;
} else {
llargval = bcx.build.Load(llargptr);
}
rslt = copy_ty(bcx, INIT, lldestptr, llargval, arg_ty);
bcx = rslt.bcx;
i += 1u;
}
bcx = trans_block_cleanups(bcx, find_scope_cx(bcx));
bcx.build.RetVoid();
// Tie up the llallocas -> lltop edge.
new_builder(fcx.llallocas).Br(lltop);
}
// FIXME: this should do some structural hash-consing to avoid
// duplicate constants. I think. Maybe LLVM has a magical mode
// that does so later on?
fn trans_const_expr(@crate_ctxt cx, @ast.expr e) -> ValueRef {
alt (e.node) {
case (ast.expr_lit(?lit, ?ann)) {
ret trans_lit(cx, *lit, ann);
}
}
}
fn trans_const(@crate_ctxt cx, @ast.expr e,
&ast.def_id cid, &ast.ann ann) {
auto t = node_ann_type(cx, ann);
auto v = trans_const_expr(cx, e);
// The scalars come back as 1st class LLVM vals
// which we have to stick into global constants.
auto g = cx.consts.get(cid);
llvm.LLVMSetInitializer(g, v);
llvm.LLVMSetGlobalConstant(g, True);
}
fn trans_item(@local_ctxt cx, &ast.item item) {
alt (item.node) {
case (ast.item_fn(?name, ?f, ?tps, ?fid, ?ann)) {
auto sub_cx = extend_path(cx, name);
trans_fn(sub_cx, f, fid, none[tup(TypeRef, ty.t)], tps, ann);
}
case (ast.item_obj(?name, ?ob, ?tps, ?oid, ?ann)) {
auto sub_cx = @rec(obj_typarams=tps,
obj_fields=ob.fields with
*extend_path(cx, name));
trans_obj(sub_cx, ob, oid.ctor, tps, ann);
}
case (ast.item_mod(?name, ?m, _)) {
auto sub_cx = @rec(path = cx.path + vec(name),
module_path = cx.module_path + vec(name)
with *cx);
trans_mod(sub_cx, m);
}
case (ast.item_tag(?name, ?variants, ?tps, ?tag_id, _)) {
auto sub_cx = extend_path(cx, name);
auto i = 0;
for (ast.variant variant in variants) {
trans_tag_variant(sub_cx, tag_id, variant, i, tps);
i += 1;
}
}
case (ast.item_const(?name, _, ?expr, ?cid, ?ann)) {
trans_const(cx.ccx, expr, cid, ann);
}
case (_) { /* fall through */ }
}
}
fn trans_mod(@local_ctxt cx, &ast._mod m) {
for (@ast.item item in m.items) {
trans_item(cx, *item);
}
}
fn get_pair_fn_ty(TypeRef llpairty) -> TypeRef {
// Bit of a kludge: pick the fn typeref out of the pair.
let vec[TypeRef] pair_tys = vec(T_nil(), T_nil());
llvm.LLVMGetStructElementTypes(llpairty,
Vec.buf[TypeRef](pair_tys));
ret llvm.LLVMGetElementType(pair_tys.(0));
}
fn decl_fn_and_pair(@crate_ctxt ccx,
vec[str] path,
str flav,
vec[ast.ty_param] ty_params,
&ast.ann ann,
ast.def_id id) {
auto llfty;
auto llpairty;
alt (ty.struct(ccx.tcx, node_ann_type(ccx, ann))) {
case (ty.ty_fn(?proto, ?inputs, ?output)) {
llfty = type_of_fn(ccx, proto, inputs, output,
Vec.len[ast.ty_param](ty_params));
llpairty = T_fn_pair(ccx.tn, llfty);
}
case (_) {
ccx.sess.bug("decl_fn_and_pair(): fn item doesn't have fn type!");
fail;
}
}
// Declare the function itself.
let str s = mangle_name_by_seq(ccx, path, flav);
let ValueRef llfn = decl_internal_fastcall_fn(ccx.llmod, s, llfty);
// Declare the global constant pair that points to it.
let str ps = mangle_name_by_type(ccx, path, node_ann_type(ccx, ann));
register_fn_pair(ccx, ps, llpairty, llfn, id);
}
fn register_fn_pair(@crate_ctxt cx, str ps, TypeRef llpairty, ValueRef llfn,
ast.def_id id) {
let ValueRef gvar = llvm.LLVMAddGlobal(cx.llmod, llpairty,
Str.buf(ps));
auto pair = C_struct(vec(llfn,
C_null(T_opaque_closure_ptr(cx.tn))));
llvm.LLVMSetInitializer(gvar, pair);
llvm.LLVMSetGlobalConstant(gvar, True);
llvm.LLVMSetVisibility(gvar,
lib.llvm.LLVMProtectedVisibility
as llvm.Visibility);
cx.item_ids.insert(id, llfn);
cx.item_symbols.insert(id, ps);
cx.fn_pairs.insert(id, gvar);
}
// Returns the number of type parameters that the given native function has.
fn native_fn_ty_param_count(@crate_ctxt cx, &ast.def_id id) -> uint {
auto count;
auto native_item = cx.native_items.get(id);
alt (native_item.node) {
case (ast.native_item_ty(_,_)) {
cx.sess.bug("decl_native_fn_and_pair(): native fn isn't " +
"actually a fn?!");
fail;
}
case (ast.native_item_fn(_, _, _, ?tps, _, _)) {
count = Vec.len[ast.ty_param](tps);
}
}
ret count;
}
fn native_fn_wrapper_type(@crate_ctxt cx, uint ty_param_count, ty.t x)
-> TypeRef {
alt (ty.struct(cx.tcx, x)) {
case (ty.ty_native_fn(?abi, ?args, ?out)) {
ret type_of_fn(cx, ast.proto_fn, args, out, ty_param_count);
}
}
fail;
}
fn decl_native_fn_and_pair(@crate_ctxt ccx,
vec[str] path,
str name,
&ast.ann ann,
ast.def_id id) {
auto num_ty_param = native_fn_ty_param_count(ccx, id);
// Declare the wrapper.
auto t = node_ann_type(ccx, ann);
auto wrapper_type = native_fn_wrapper_type(ccx, num_ty_param, t);
let str s = mangle_name_by_seq(ccx, path, "wrapper");
let ValueRef wrapper_fn = decl_internal_fastcall_fn(ccx.llmod, s,
wrapper_type);
// Declare the global constant pair that points to it.
auto wrapper_pair_type = T_fn_pair(ccx.tn, wrapper_type);
let str ps = mangle_name_by_type(ccx, path, node_ann_type(ccx, ann));
register_fn_pair(ccx, ps, wrapper_pair_type, wrapper_fn, id);
// Build the wrapper.
auto fcx = new_fn_ctxt(new_local_ctxt(ccx), wrapper_fn);
auto bcx = new_top_block_ctxt(fcx);
auto lltop = bcx.llbb;
// Declare the function itself.
auto item = ccx.native_items.get(id);
auto fn_type = node_ann_type(ccx, ann); // NB: has no type params
auto abi = ty.ty_fn_abi(ccx.tcx, fn_type);
auto llfnty = type_of_native_fn(ccx, abi,
ty.ty_fn_args(ccx.tcx, fn_type),
ty.ty_fn_ret(ccx.tcx, fn_type), num_ty_param);
// FIXME: If the returned type is not nil, then we assume it's 32 bits
// wide. This is obviously wildly unsafe. We should have a better FFI
// that allows types of different sizes to be returned.
auto rty_is_nil = ty.type_is_nil(ccx.tcx, ty.ty_fn_ret(ccx.tcx, fn_type));
auto pass_task;
auto cast_to_i32;
alt (abi) {
case (ast.native_abi_rust) {
pass_task = true;
cast_to_i32 = true;
}
case (ast.native_abi_rust_intrinsic) {
pass_task = true;
cast_to_i32 = false;
}
case (ast.native_abi_cdecl) {
pass_task = false;
cast_to_i32 = true;
}
case (ast.native_abi_llvm) {
pass_task = false;
cast_to_i32 = false;
}
}
auto lltaskptr;
if (cast_to_i32) {
lltaskptr = vp2i(bcx, fcx.lltaskptr);
} else {
lltaskptr = fcx.lltaskptr;
}
let vec[ValueRef] call_args = vec();
if (pass_task) { call_args += vec(lltaskptr); }
auto arg_n = 3u;
for each (uint i in UInt.range(0u, num_ty_param)) {
auto llarg = llvm.LLVMGetParam(fcx.llfn, arg_n);
fcx.lltydescs += vec(llarg);
assert (llarg as int != 0);
if (cast_to_i32) {
call_args += vec(vp2i(bcx, llarg));
} else {
call_args += vec(llarg);
}
arg_n += 1u;
}
fn convert_arg_to_i32(@block_ctxt cx,
ValueRef v,
ty.t t,
ty.mode mode) -> ValueRef {
if (mode == ty.mo_val) {
if (ty.type_is_integral(cx.fcx.lcx.ccx.tcx, t)) {
auto lldsttype = T_int();
auto llsrctype = type_of(cx.fcx.lcx.ccx, t);
if (llvm.LLVMGetIntTypeWidth(lldsttype) >
llvm.LLVMGetIntTypeWidth(llsrctype)) {
ret cx.build.ZExtOrBitCast(v, T_int());
}
ret cx.build.TruncOrBitCast(v, T_int());
}
if (ty.type_is_fp(cx.fcx.lcx.ccx.tcx, t)) {
ret cx.build.FPToSI(v, T_int());
}
}
ret vp2i(cx, v);
}
fn trans_simple_native_abi(@block_ctxt bcx,
str name,
&mutable vec[ValueRef] call_args,
ty.t fn_type,
uint first_arg_n) -> tup(ValueRef, ValueRef) {
let vec[TypeRef] call_arg_tys = vec();
for (ValueRef arg in call_args) {
call_arg_tys += vec(val_ty(arg));
}
auto llnativefnty =
T_fn(call_arg_tys,
type_of(bcx.fcx.lcx.ccx,
ty.ty_fn_ret(bcx.fcx.lcx.ccx.tcx, fn_type)));
auto llnativefn = get_extern_fn(bcx.fcx.lcx.ccx.externs,
bcx.fcx.lcx.ccx.llmod,
name,
lib.llvm.LLVMCCallConv,
llnativefnty);
auto r = bcx.build.Call(llnativefn, call_args);
auto rptr = bcx.fcx.llretptr;
ret tup(r, rptr);
}
auto args = ty.ty_fn_args(ccx.tcx, fn_type);
// Build up the list of arguments.
let vec[tup(ValueRef, ty.t)] drop_args = vec();
auto i = arg_n;
for (ty.arg arg in args) {
auto llarg = llvm.LLVMGetParam(fcx.llfn, i);
assert (llarg as int != 0);
if (cast_to_i32) {
auto llarg_i32 = convert_arg_to_i32(bcx, llarg, arg.ty, arg.mode);
call_args += vec(llarg_i32);
} else {
call_args += vec(llarg);
}
if (arg.mode == ty.mo_val) {
drop_args += vec(tup(llarg, arg.ty));
}
i += 1u;
}
auto r;
auto rptr;
alt (abi) {
case (ast.native_abi_llvm) {
auto result = trans_simple_native_abi(bcx, name, call_args,
fn_type, arg_n);
r = result._0; rptr = result._1;
}
case (ast.native_abi_rust_intrinsic) {
auto external_name = "rust_intrinsic_" + name;
auto result = trans_simple_native_abi(bcx, external_name,
call_args, fn_type, arg_n);
r = result._0; rptr = result._1;
}
case (_) {
r = trans_native_call(bcx.build, ccx.glues, lltaskptr,
ccx.externs, ccx.tn, ccx.llmod, name,
pass_task, call_args);
rptr = bcx.build.BitCast(fcx.llretptr, T_ptr(T_i32()));
}
}
// We don't store the return value if it's nil, to avoid stomping on a nil
// pointer. This is the only concession made to non-i32 return values. See
// the FIXME above.
if (!rty_is_nil) { bcx.build.Store(r, rptr); }
for (tup(ValueRef, ty.t) d in drop_args) {
bcx = drop_ty(bcx, d._0, d._1).bcx;
}
bcx.build.RetVoid();
// Tie up the llallocas -> lltop edge.
new_builder(fcx.llallocas).Br(lltop);
}
type walk_ctxt = rec(mutable vec[str] path);
fn new_walk_ctxt() -> @walk_ctxt {
let vec[str] path = vec();
ret @rec(mutable path=path);
}
fn enter_item(@walk_ctxt cx, @ast.item item) {
alt (item.node) {
case (ast.item_fn(?name, _, _, _, _)) {
Vec.push[str](cx.path, name);
}
case (ast.item_obj(?name, _, _, _, _)) {
Vec.push[str](cx.path, name);
}
case (ast.item_mod(?name, _, _)) {
Vec.push[str](cx.path, name);
}
case (_) { }
}
}
fn leave_item(@walk_ctxt cx, @ast.item item) {
alt (item.node) {
case (ast.item_fn(_, _, _, _, _)) {
Vec.pop[str](cx.path);
}
case (ast.item_obj(_, _, _, _, _)) {
Vec.pop[str](cx.path);
}
case (ast.item_mod(_, _, _)) {
Vec.pop[str](cx.path);
}
case (_) { }
}
}
fn collect_native_item(@crate_ctxt ccx, @walk_ctxt wcx, @ast.native_item i) {
alt (i.node) {
case (ast.native_item_fn(?name, _, _, _, ?fid, ?ann)) {
ccx.native_items.insert(fid, i);
if (!ccx.obj_methods.contains_key(fid)) {
decl_native_fn_and_pair(ccx, wcx.path, name, ann, fid);
}
}
case (ast.native_item_ty(_, ?tid)) {
ccx.native_items.insert(tid, i);
}
}
}
fn collect_item_1(@crate_ctxt ccx, @walk_ctxt wcx, @ast.item i) {
enter_item(wcx, i);
alt (i.node) {
case (ast.item_const(?name, _, _, ?cid, ?ann)) {
auto typ = node_ann_type(ccx, ann);
auto g = llvm.LLVMAddGlobal(ccx.llmod, type_of(ccx, typ),
Str.buf(ccx.names.next(name)));
llvm.LLVMSetLinkage(g, lib.llvm.LLVMInternalLinkage
as llvm.Linkage);
ccx.items.insert(cid, i);
ccx.consts.insert(cid, g);
}
case (ast.item_mod(?name, ?m, ?mid)) {
ccx.items.insert(mid, i);
}
case (ast.item_ty(_, _, _, ?did, _)) {
ccx.items.insert(did, i);
}
case (ast.item_tag(?name, ?variants, ?tps, ?tag_id, _)) {
ccx.items.insert(tag_id, i);
}
case (_) {}
}
}
fn collect_item_2(@crate_ctxt ccx, @walk_ctxt wcx, @ast.item i) {
enter_item(wcx, i);
alt (i.node) {
case (ast.item_fn(?name, ?f, ?tps, ?fid, ?ann)) {
ccx.items.insert(fid, i);
if (!ccx.obj_methods.contains_key(fid)) {
decl_fn_and_pair(ccx, wcx.path, "fn", tps, ann, fid);
}
}
case (ast.item_obj(?name, ?ob, ?tps, ?oid, ?ann)) {
ccx.items.insert(oid.ctor, i);
decl_fn_and_pair(ccx, wcx.path, "obj_ctor", tps, ann, oid.ctor);
for (@ast.method m in ob.methods) {
ccx.obj_methods.insert(m.node.id, ());
}
}
case (_) {}
}
}
fn collect_items(@crate_ctxt ccx, @ast.crate crate) {
auto wcx = new_walk_ctxt();
auto visitor0 = walk.default_visitor();
auto visitor1 = rec(visit_native_item_pre =
bind collect_native_item(ccx, wcx, _),
visit_item_pre = bind collect_item_1(ccx, wcx, _),
visit_item_post = bind leave_item(wcx, _)
with visitor0);
auto visitor2 = rec(visit_item_pre = bind collect_item_2(ccx, wcx, _),
visit_item_post = bind leave_item(wcx, _)
with visitor0);
walk.walk_crate(visitor1, *crate);
walk.walk_crate(visitor2, *crate);
}
fn collect_tag_ctor(@crate_ctxt ccx, @walk_ctxt wcx, @ast.item i) {
enter_item(wcx, i);
alt (i.node) {
case (ast.item_tag(_, ?variants, ?tps, _, _)) {
for (ast.variant variant in variants) {
if (Vec.len[ast.variant_arg](variant.node.args) != 0u) {
decl_fn_and_pair(ccx, wcx.path + vec(variant.node.name),
"tag", tps, variant.node.ann,
variant.node.id);
}
}
}
case (_) { /* fall through */ }
}
}
fn collect_tag_ctors(@crate_ctxt ccx, @ast.crate crate) {
auto wcx = new_walk_ctxt();
auto visitor = rec(visit_item_pre = bind collect_tag_ctor(ccx, wcx, _),
visit_item_post = bind leave_item(wcx, _)
with walk.default_visitor());
walk.walk_crate(visitor, *crate);
}
// The constant translation pass.
fn trans_constant(@crate_ctxt ccx, @walk_ctxt wcx, @ast.item it) {
enter_item(wcx, it);
alt (it.node) {
case (ast.item_tag(?ident, ?variants, _, ?tag_id, _)) {
auto i = 0u;
auto n_variants = Vec.len[ast.variant](variants);
while (i < n_variants) {
auto variant = variants.(i);
auto discrim_val = C_int(i as int);
auto s = mangle_name_by_seq(ccx, wcx.path,
#fmt("_rust_tag_discrim_%s_%u",
ident, i));
auto discrim_gvar = llvm.LLVMAddGlobal(ccx.llmod, T_int(),
Str.buf(s));
llvm.LLVMSetInitializer(discrim_gvar, discrim_val);
llvm.LLVMSetGlobalConstant(discrim_gvar, True);
ccx.discrims.insert(variant.node.id, discrim_gvar);
ccx.discrim_symbols.insert(variant.node.id, s);
i += 1u;
}
}
case (ast.item_const(?name, _, ?expr, ?cid, ?ann)) {
// FIXME: The whole expr-translation system needs cloning to deal
// with consts.
auto v = C_int(1);
ccx.item_ids.insert(cid, v);
auto s = mangle_name_by_type(ccx, wcx.path + vec(name),
node_ann_type(ccx, ann));
ccx.item_symbols.insert(cid, s);
}
case (_) {}
}
}
fn trans_constants(@crate_ctxt ccx, @ast.crate crate) {
auto wcx = new_walk_ctxt();
auto visitor = rec(visit_item_pre = bind trans_constant(ccx, wcx, _),
visit_item_post = bind leave_item(wcx, _)
with walk.default_visitor());
walk.walk_crate(visitor, *crate);
}
fn vp2i(@block_ctxt cx, ValueRef v) -> ValueRef {
ret cx.build.PtrToInt(v, T_int());
}
fn vi2p(@block_ctxt cx, ValueRef v, TypeRef t) -> ValueRef {
ret cx.build.IntToPtr(v, t);
}
fn p2i(ValueRef v) -> ValueRef {
ret llvm.LLVMConstPtrToInt(v, T_int());
}
fn i2p(ValueRef v, TypeRef t) -> ValueRef {
ret llvm.LLVMConstIntToPtr(v, t);
}
fn trans_exit_task_glue(@glue_fns glues,
&hashmap[str, ValueRef] externs,
type_names tn, ModuleRef llmod) {
let vec[TypeRef] T_args = vec();
let vec[ValueRef] V_args = vec();
auto llfn = glues.exit_task_glue;
auto entrybb = llvm.LLVMAppendBasicBlock(llfn, Str.buf("entry"));
auto build = new_builder(entrybb);
let ValueRef arg1 = llvm.LLVMGetParam(llfn, 0u);
let ValueRef arg2 = llvm.LLVMGetParam(llfn, 1u);
let ValueRef arg3 = llvm.LLVMGetParam(llfn, 2u);
let ValueRef arg4 = llvm.LLVMGetParam(llfn, 3u);
let ValueRef arg5 = llvm.LLVMGetParam(llfn, 4u);
auto main_type = T_fn(vec(T_int(), T_int(), T_int(), T_int()), T_void());
auto fun = build.IntToPtr(arg1, T_ptr(main_type));
auto call_args = vec(arg2, arg3, arg4, arg5);
build.FastCall(fun, call_args);
trans_native_call(build, glues, arg3,
externs, tn, llmod, "upcall_exit", true, vec(arg3));
build.RetVoid();
}
fn create_typedefs(@crate_ctxt cx) {
llvm.LLVMAddTypeName(cx.llmod, Str.buf("crate"), T_crate(cx.tn));
llvm.LLVMAddTypeName(cx.llmod, Str.buf("task"), T_task(cx.tn));
llvm.LLVMAddTypeName(cx.llmod, Str.buf("tydesc"), T_tydesc(cx.tn));
}
fn create_crate_constant(ValueRef crate_ptr, @glue_fns glues) {
let ValueRef crate_addr = p2i(crate_ptr);
let ValueRef activate_glue_off =
llvm.LLVMConstSub(p2i(glues.activate_glue), crate_addr);
let ValueRef yield_glue_off =
llvm.LLVMConstSub(p2i(glues.yield_glue), crate_addr);
let ValueRef exit_task_glue_off =
llvm.LLVMConstSub(p2i(glues.exit_task_glue), crate_addr);
let ValueRef crate_val =
C_struct(vec(C_null(T_int()), // ptrdiff_t image_base_off
p2i(crate_ptr), // uintptr_t self_addr
C_null(T_int()), // ptrdiff_t debug_abbrev_off
C_null(T_int()), // size_t debug_abbrev_sz
C_null(T_int()), // ptrdiff_t debug_info_off
C_null(T_int()), // size_t debug_info_sz
activate_glue_off, // size_t activate_glue_off
yield_glue_off, // size_t yield_glue_off
C_null(T_int()), // size_t unwind_glue_off
C_null(T_int()), // size_t gc_glue_off
exit_task_glue_off, // size_t main_exit_task_glue_off
C_null(T_int()), // int n_rust_syms
C_null(T_int()), // int n_c_syms
C_null(T_int()), // int n_libs
C_int(abi.abi_x86_rustc_fastcall) // uintptr_t abi_tag
));
llvm.LLVMSetInitializer(crate_ptr, crate_val);
}
fn find_main_fn(@crate_ctxt cx) -> ValueRef {
auto e = sep() + "main";
let ValueRef v = C_nil();
let uint n = 0u;
for each (@tup(ast.def_id, str) i in cx.item_symbols.items()) {
if (Str.ends_with(i._1, e)) {
n += 1u;
v = cx.item_ids.get(i._0);
}
}
alt (n) {
case (0u) {
cx.sess.err("main fn not found");
}
case (1u) {
ret v;
}
case (_) {
cx.sess.err("multiple main fns found");
}
}
fail;
}
fn trans_main_fn(@local_ctxt cx, ValueRef llcrate, ValueRef crate_map) {
auto T_main_args = vec(T_int(), T_int());
auto T_rust_start_args = vec(T_int(), T_int(), T_int(), T_int(), T_int());
auto main_name;
if (Str.eq(std.OS.target_os(), "win32")) {
main_name = "WinMain@16";
} else {
main_name = "main";
}
auto llmain =
decl_cdecl_fn(cx.ccx.llmod, main_name, T_fn(T_main_args, T_int()));
auto llrust_start = decl_cdecl_fn(cx.ccx.llmod, "rust_start",
T_fn(T_rust_start_args, T_int()));
auto llargc = llvm.LLVMGetParam(llmain, 0u);
auto llargv = llvm.LLVMGetParam(llmain, 1u);
auto llrust_main = find_main_fn(cx.ccx);
//
// Emit the moral equivalent of:
//
// main(int argc, char **argv) {
// rust_start(&_rust.main, &crate, argc, argv);
// }
//
let BasicBlockRef llbb =
llvm.LLVMAppendBasicBlock(llmain, Str.buf(""));
auto b = new_builder(llbb);
auto start_args = vec(p2i(llrust_main), p2i(llcrate), llargc, llargv,
p2i(crate_map));
b.Ret(b.Call(llrust_start, start_args));
}
fn declare_intrinsics(ModuleRef llmod) -> hashmap[str,ValueRef] {
let vec[TypeRef] T_trap_args = vec();
auto trap = decl_cdecl_fn(llmod, "llvm.trap",
T_fn(T_trap_args, T_void()));
auto intrinsics = new_str_hash[ValueRef]();
intrinsics.insert("llvm.trap", trap);
ret intrinsics;
}
fn trace_str(@block_ctxt cx, str s) {
trans_upcall(cx, "upcall_trace_str", vec(p2i(C_cstr(cx.fcx.lcx.ccx, s))),
false);
}
fn trace_word(@block_ctxt cx, ValueRef v) {
trans_upcall(cx, "upcall_trace_word", vec(v), false);
}
fn trace_ptr(@block_ctxt cx, ValueRef v) {
trace_word(cx, cx.build.PtrToInt(v, T_int()));
}
fn trap(@block_ctxt bcx) {
let vec[ValueRef] v = vec();
bcx.build.Call(bcx.fcx.lcx.ccx.intrinsics.get("llvm.trap"), v);
}
fn decl_no_op_type_glue(ModuleRef llmod, type_names tn) -> ValueRef {
auto ty = T_fn(vec(T_taskptr(tn), T_ptr(T_i8())), T_void());
ret decl_fastcall_fn(llmod, abi.no_op_type_glue_name(), ty);
}
fn make_no_op_type_glue(ValueRef fun) {
auto bb_name = Str.buf("_rust_no_op_type_glue_bb");
auto llbb = llvm.LLVMAppendBasicBlock(fun, bb_name);
new_builder(llbb).RetVoid();
}
fn decl_memcpy_glue(ModuleRef llmod) -> ValueRef {
auto p8 = T_ptr(T_i8());
auto ty = T_fn(vec(p8, p8, T_int()), T_void());
ret decl_fastcall_fn(llmod, abi.memcpy_glue_name(), ty);
}
fn make_memcpy_glue(ValueRef fun) {
// We're not using the LLVM memcpy intrinsic. It appears to call through
// to the platform memcpy in some cases, which is not terribly safe to run
// on a rust stack.
auto initbb = llvm.LLVMAppendBasicBlock(fun, Str.buf("init"));
auto hdrbb = llvm.LLVMAppendBasicBlock(fun, Str.buf("hdr"));
auto loopbb = llvm.LLVMAppendBasicBlock(fun, Str.buf("loop"));
auto endbb = llvm.LLVMAppendBasicBlock(fun, Str.buf("end"));
auto dst = llvm.LLVMGetParam(fun, 0u);
auto src = llvm.LLVMGetParam(fun, 1u);
auto count = llvm.LLVMGetParam(fun, 2u);
// Init block.
auto ib = new_builder(initbb);
auto ip = ib.Alloca(T_int());
ib.Store(C_int(0), ip);
ib.Br(hdrbb);
// Loop-header block
auto hb = new_builder(hdrbb);
auto i = hb.Load(ip);
hb.CondBr(hb.ICmp(lib.llvm.LLVMIntEQ, count, i), endbb, loopbb);
// Loop-body block
auto lb = new_builder(loopbb);
i = lb.Load(ip);
lb.Store(lb.Load(lb.GEP(src, vec(i))),
lb.GEP(dst, vec(i)));
lb.Store(lb.Add(i, C_int(1)), ip);
lb.Br(hdrbb);
// End block
auto eb = new_builder(endbb);
eb.RetVoid();
}
fn decl_bzero_glue(ModuleRef llmod) -> ValueRef {
auto p8 = T_ptr(T_i8());
auto ty = T_fn(vec(p8, T_int()), T_void());
ret decl_fastcall_fn(llmod, abi.bzero_glue_name(), ty);
}
fn make_bzero_glue(ValueRef fun) -> ValueRef {
// We're not using the LLVM memset intrinsic. Same as with memcpy.
auto initbb = llvm.LLVMAppendBasicBlock(fun, Str.buf("init"));
auto hdrbb = llvm.LLVMAppendBasicBlock(fun, Str.buf("hdr"));
auto loopbb = llvm.LLVMAppendBasicBlock(fun, Str.buf("loop"));
auto endbb = llvm.LLVMAppendBasicBlock(fun, Str.buf("end"));
auto dst = llvm.LLVMGetParam(fun, 0u);
auto count = llvm.LLVMGetParam(fun, 1u);
// Init block.
auto ib = new_builder(initbb);
auto ip = ib.Alloca(T_int());
ib.Store(C_int(0), ip);
ib.Br(hdrbb);
// Loop-header block
auto hb = new_builder(hdrbb);
auto i = hb.Load(ip);
hb.CondBr(hb.ICmp(lib.llvm.LLVMIntEQ, count, i), endbb, loopbb);
// Loop-body block
auto lb = new_builder(loopbb);
i = lb.Load(ip);
lb.Store(C_u8(0u), lb.GEP(dst, vec(i)));
lb.Store(lb.Add(i, C_int(1)), ip);
lb.Br(hdrbb);
// End block
auto eb = new_builder(endbb);
eb.RetVoid();
ret fun;
}
fn make_vec_append_glue(ModuleRef llmod, type_names tn) -> ValueRef {
/*
* Args to vec_append_glue:
*
* 0. (Implicit) task ptr
*
* 1. Pointer to the tydesc of the vec, so that we can tell if it's gc
* mem, and have a tydesc to pass to malloc if we're allocating anew.
*
* 2. Pointer to the tydesc of the vec's stored element type, so that
* elements can be copied to a newly alloc'ed vec if one must be
* created.
*
* 3. Dst vec ptr (i.e. ptr to ptr to rust_vec).
*
* 4. Src vec (i.e. ptr to rust_vec).
*
* 5. Flag indicating whether to skip trailing null on dst.
*
*/
auto ty = T_fn(vec(T_taskptr(tn),
T_ptr(T_tydesc(tn)),
T_ptr(T_tydesc(tn)),
T_ptr(T_opaque_vec_ptr()),
T_opaque_vec_ptr(), T_bool()),
T_void());
auto llfn = decl_fastcall_fn(llmod, abi.vec_append_glue_name(), ty);
ret llfn;
}
fn vec_fill(@block_ctxt bcx, ValueRef v) -> ValueRef {
ret bcx.build.Load(bcx.build.GEP(v, vec(C_int(0),
C_int(abi.vec_elt_fill))));
}
fn put_vec_fill(@block_ctxt bcx, ValueRef v, ValueRef fill) -> ValueRef {
ret bcx.build.Store(fill,
bcx.build.GEP(v,
vec(C_int(0),
C_int(abi.vec_elt_fill))));
}
fn vec_fill_adjusted(@block_ctxt bcx, ValueRef v,
ValueRef skipnull) -> ValueRef {
auto f = bcx.build.Load(bcx.build.GEP(v,
vec(C_int(0),
C_int(abi.vec_elt_fill))));
ret bcx.build.Select(skipnull, bcx.build.Sub(f, C_int(1)), f);
}
fn vec_p0(@block_ctxt bcx, ValueRef v) -> ValueRef {
auto p = bcx.build.GEP(v, vec(C_int(0),
C_int(abi.vec_elt_data)));
ret bcx.build.PointerCast(p, T_ptr(T_i8()));
}
fn vec_p1(@block_ctxt bcx, ValueRef v) -> ValueRef {
auto len = vec_fill(bcx, v);
ret bcx.build.GEP(vec_p0(bcx, v), vec(len));
}
fn vec_p1_adjusted(@block_ctxt bcx, ValueRef v,
ValueRef skipnull) -> ValueRef {
auto len = vec_fill_adjusted(bcx, v, skipnull);
ret bcx.build.GEP(vec_p0(bcx, v), vec(len));
}
fn trans_vec_append_glue(@local_ctxt cx) {
auto llfn = cx.ccx.glues.vec_append_glue;
let ValueRef lltaskptr = llvm.LLVMGetParam(llfn, 0u);
let ValueRef llvec_tydesc = llvm.LLVMGetParam(llfn, 1u);
let ValueRef llelt_tydesc = llvm.LLVMGetParam(llfn, 2u);
let ValueRef lldst_vec_ptr = llvm.LLVMGetParam(llfn, 3u);
let ValueRef llsrc_vec = llvm.LLVMGetParam(llfn, 4u);
let ValueRef llskipnull = llvm.LLVMGetParam(llfn, 5u);
let BasicBlockRef llallocas =
llvm.LLVMAppendBasicBlock(llfn, Str.buf("allocas"));
auto fcx = @rec(llfn=llfn,
lltaskptr=lltaskptr,
llenv=C_null(T_ptr(T_nil())),
llretptr=C_null(T_ptr(T_nil())),
mutable llallocas = llallocas,
mutable llself=none[self_vt],
mutable lliterbody=none[ValueRef],
llargs=new_def_hash[ValueRef](),
llobjfields=new_def_hash[ValueRef](),
lllocals=new_def_hash[ValueRef](),
llupvars=new_def_hash[ValueRef](),
mutable lltydescs=Vec.empty[ValueRef](),
lcx=cx);
auto bcx = new_top_block_ctxt(fcx);
auto lltop = bcx.llbb;
auto lldst_vec = bcx.build.Load(lldst_vec_ptr);
// First the dst vec needs to grow to accommodate the src vec.
// To do this we have to figure out how many bytes to add.
auto llcopy_dst_ptr = alloca(bcx, T_int());
auto llnew_vec_res =
trans_upcall(bcx, "upcall_vec_grow",
vec(vp2i(bcx, lldst_vec),
vec_fill_adjusted(bcx, llsrc_vec, llskipnull),
vp2i(bcx, llcopy_dst_ptr),
vp2i(bcx, llvec_tydesc)),
false);
bcx = llnew_vec_res.bcx;
auto llnew_vec = vi2p(bcx, llnew_vec_res.val,
T_opaque_vec_ptr());
llvm.LLVMSetValueName(llnew_vec, Str.buf("llnew_vec"));
auto copy_dst_cx = new_sub_block_ctxt(bcx, "copy new <- dst");
auto copy_src_cx = new_sub_block_ctxt(bcx, "copy new <- src");
auto pp0 = alloca(bcx, T_ptr(T_i8()));
bcx.build.Store(vec_p1_adjusted(bcx, llnew_vec, llskipnull), pp0);
llvm.LLVMSetValueName(pp0, Str.buf("pp0"));
bcx.build.CondBr(bcx.build.TruncOrBitCast
(bcx.build.Load(llcopy_dst_ptr),
T_i1()),
copy_dst_cx.llbb,
copy_src_cx.llbb);
fn copy_elts(@block_ctxt cx,
ValueRef elt_tydesc,
ValueRef dst,
ValueRef src,
ValueRef n_bytes) -> result {
auto src_lim = cx.build.GEP(src, vec(n_bytes));
llvm.LLVMSetValueName(src_lim, Str.buf("src_lim"));
auto elt_llsz =
cx.build.Load(cx.build.GEP(elt_tydesc,
vec(C_int(0),
C_int(abi.tydesc_field_size))));
llvm.LLVMSetValueName(elt_llsz, Str.buf("elt_llsz"));
fn take_one(ValueRef elt_tydesc,
@block_ctxt cx,
ValueRef dst, ValueRef src) -> result {
call_tydesc_glue_full(cx, src,
elt_tydesc,
abi.tydesc_field_take_glue);
ret res(cx, src);
}
auto bcx = iter_sequence_raw(cx, dst, src, src_lim,
elt_llsz, bind take_one(elt_tydesc,
_, _, _)).bcx;
ret call_memcpy(bcx, dst, src, n_bytes);
}
// Copy any dst elements in, omitting null if doing str.
auto n_bytes = vec_fill_adjusted(copy_dst_cx, lldst_vec, llskipnull);
llvm.LLVMSetValueName(n_bytes, Str.buf("n_bytes"));
copy_dst_cx = copy_elts(copy_dst_cx,
llelt_tydesc,
vec_p0(copy_dst_cx, llnew_vec),
vec_p0(copy_dst_cx, lldst_vec),
n_bytes).bcx;
put_vec_fill(copy_dst_cx, llnew_vec, vec_fill(copy_dst_cx, lldst_vec));
copy_dst_cx.build.Store(vec_p1_adjusted(copy_dst_cx, llnew_vec,
llskipnull), pp0);
copy_dst_cx.build.Br(copy_src_cx.llbb);
// Copy any src elements in, carrying along null if doing str.
n_bytes = vec_fill(copy_src_cx, llsrc_vec);
copy_src_cx = copy_elts(copy_src_cx,
llelt_tydesc,
copy_src_cx.build.Load(pp0),
vec_p0(copy_src_cx, llsrc_vec),
n_bytes).bcx;
put_vec_fill(copy_src_cx, llnew_vec,
copy_src_cx.build.Add(vec_fill_adjusted(copy_src_cx,
llnew_vec,
llskipnull),
n_bytes));
// Write new_vec back through the alias we were given.
copy_src_cx.build.Store(llnew_vec, lldst_vec_ptr);
copy_src_cx.build.RetVoid();
// Tie up the llallocas -> lltop edge.
new_builder(fcx.llallocas).Br(lltop);
}
fn make_glues(ModuleRef llmod, type_names tn) -> @glue_fns {
ret @rec(activate_glue = decl_glue(llmod, tn, abi.activate_glue_name()),
yield_glue = decl_glue(llmod, tn, abi.yield_glue_name()),
exit_task_glue = decl_cdecl_fn(llmod, abi.exit_task_glue_name(),
T_fn(vec(T_int(),
T_int(),
T_int(),
T_int(),
T_int()),
T_void())),
native_glues_rust =
Vec.init_fn[ValueRef](bind decl_native_glue(llmod, tn,
abi.ngt_rust, _), abi.n_native_glues + 1 as uint),
native_glues_pure_rust =
Vec.init_fn[ValueRef](bind decl_native_glue(llmod, tn,
abi.ngt_pure_rust, _), abi.n_native_glues + 1 as uint),
native_glues_cdecl =
Vec.init_fn[ValueRef](bind decl_native_glue(llmod, tn,
abi.ngt_cdecl, _), abi.n_native_glues + 1 as uint),
no_op_type_glue = decl_no_op_type_glue(llmod, tn),
memcpy_glue = decl_memcpy_glue(llmod),
bzero_glue = decl_bzero_glue(llmod),
vec_append_glue = make_vec_append_glue(llmod, tn));
}
fn make_common_glue(session.session sess, str output) {
// FIXME: part of this is repetitive and is probably a good idea
// to autogen it, but things like the memcpy implementation are not
// and it might be better to just check in a .ll file.
auto llmod =
llvm.LLVMModuleCreateWithNameInContext(Str.buf("rust_out"),
llvm.LLVMGetGlobalContext());
llvm.LLVMSetDataLayout(llmod, Str.buf(x86.get_data_layout()));
llvm.LLVMSetTarget(llmod, Str.buf(x86.get_target_triple()));
auto td = mk_target_data(x86.get_data_layout());
auto tn = mk_type_names();
let ValueRef crate_ptr =
llvm.LLVMAddGlobal(llmod, T_crate(tn), Str.buf("rust_crate"));
auto intrinsics = declare_intrinsics(llmod);
llvm.LLVMSetModuleInlineAsm(llmod, Str.buf(x86.get_module_asm()));
auto glues = make_glues(llmod, tn);
create_crate_constant(crate_ptr, glues);
make_memcpy_glue(glues.memcpy_glue);
make_bzero_glue(glues.bzero_glue);
trans.trans_exit_task_glue(glues, new_str_hash[ValueRef](), tn,
llmod);
Link.Write.run_passes(sess, llmod, output);
}
fn create_module_map(@crate_ctxt ccx) -> ValueRef {
auto elttype = T_struct(vec(T_int(), T_int()));
auto maptype = T_array(elttype, ccx.module_data.size() + 1u);
auto map = llvm.LLVMAddGlobal(ccx.llmod, maptype,
Str.buf("_rust_mod_map"));
llvm.LLVMSetLinkage(map, lib.llvm.LLVMInternalLinkage as llvm.Linkage);
let vec[ValueRef] elts = vec();
for each (@tup(str, ValueRef) item in ccx.module_data.items()) {
auto elt = C_struct(vec(p2i(C_cstr(ccx, item._0)), p2i(item._1)));
Vec.push[ValueRef](elts, elt);
}
auto term = C_struct(vec(C_int(0), C_int(0)));
Vec.push[ValueRef](elts, term);
llvm.LLVMSetInitializer(map, C_array(elttype, elts));
ret map;
}
fn crate_name(@crate_ctxt ccx, str deflt) -> str {
for (@ast.meta_item item in ccx.sess.get_metadata()) {
if (Str.eq(item.node.name, "name")) {
ret item.node.value;
}
}
ret deflt;
}
// FIXME use hashed metadata instead of crate names once we have that
fn create_crate_map(@crate_ctxt ccx) -> ValueRef {
let vec[ValueRef] subcrates = vec();
auto i = 1;
while (ccx.sess.has_external_crate(i)) {
auto name = ccx.sess.get_external_crate(i).name;
auto cr = llvm.LLVMAddGlobal(ccx.llmod, T_int(),
Str.buf("_rust_crate_map_" + name));
Vec.push[ValueRef](subcrates, p2i(cr));
i += 1;
}
Vec.push[ValueRef](subcrates, C_int(0));
auto sym_name = "_rust_crate_map_" + crate_name(ccx, "__none__");
auto arrtype = T_array(T_int(), Vec.len[ValueRef](subcrates));
auto maptype = T_struct(vec(T_int(), arrtype));
auto map = llvm.LLVMAddGlobal(ccx.llmod, maptype, Str.buf(sym_name));
llvm.LLVMSetLinkage(map, lib.llvm.LLVMExternalLinkage as llvm.Linkage);
llvm.LLVMSetInitializer(map, C_struct(vec(p2i(create_module_map(ccx)),
C_array(T_int(), subcrates))));
ret map;
}
fn trans_crate(session.session sess, @ast.crate crate, ty.ctxt tcx,
ty.type_cache type_cache, str output)
-> ModuleRef {
auto llmod =
llvm.LLVMModuleCreateWithNameInContext(Str.buf("rust_out"),
llvm.LLVMGetGlobalContext());
llvm.LLVMSetDataLayout(llmod, Str.buf(x86.get_data_layout()));
llvm.LLVMSetTarget(llmod, Str.buf(x86.get_target_triple()));
auto td = mk_target_data(x86.get_data_layout());
auto tn = mk_type_names();
let ValueRef crate_ptr =
llvm.LLVMAddGlobal(llmod, T_crate(tn), Str.buf("rust_crate"));
auto intrinsics = declare_intrinsics(llmod);
auto glues = make_glues(llmod, tn);
auto hasher = ty.hash_ty;
auto eqer = ty.eq_ty;
auto tag_sizes = Map.mk_hashmap[ty.t,uint](hasher, eqer);
auto tydescs = Map.mk_hashmap[ty.t,@tydesc_info](hasher, eqer);
auto lltypes = Map.mk_hashmap[ty.t,TypeRef](hasher, eqer);
auto sha1s = Map.mk_hashmap[ty.t,str](hasher, eqer);
auto abbrevs = Map.mk_hashmap[ty.t,metadata.ty_abbrev](hasher, eqer);
auto ccx = @rec(sess = sess,
llmod = llmod,
td = td,
tn = tn,
crate_ptr = crate_ptr,
externs = new_str_hash[ValueRef](),
intrinsics = intrinsics,
item_ids = new_def_hash[ValueRef](),
items = new_def_hash[@ast.item](),
native_items = new_def_hash[@ast.native_item](),
type_cache = type_cache,
item_symbols = new_def_hash[str](),
tag_sizes = tag_sizes,
discrims = new_def_hash[ValueRef](),
discrim_symbols = new_def_hash[str](),
fn_pairs = new_def_hash[ValueRef](),
consts = new_def_hash[ValueRef](),
obj_methods = new_def_hash[()](),
tydescs = tydescs,
module_data = new_str_hash[ValueRef](),
lltypes = lltypes,
glues = glues,
names = namegen(0),
sha = std.SHA1.mk_sha1(),
type_sha1s = sha1s,
type_abbrevs = abbrevs,
tcx = tcx);
auto cx = new_local_ctxt(ccx);
create_typedefs(ccx);
collect_items(ccx, crate);
collect_tag_ctors(ccx, crate);
trans_constants(ccx, crate);
trans_mod(cx, crate.node.module);
trans_vec_append_glue(cx);
auto crate_map = create_crate_map(ccx);
if (!sess.get_opts().shared) {
trans_main_fn(cx, crate_ptr, crate_map);
}
// Translate the metadata.
middle.metadata.write_metadata(cx.ccx, crate);
ret llmod;
}
//
// Local Variables:
// mode: rust
// fill-column: 78;
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// compile-command: "make -k -C $RBUILD 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
// End:
//