Remove partial DPS back-end

It won't be finished on the short term, and it's already bit-rotting
fast. We can fetch this from git's autumn annals if we need it.
This commit is contained in:
Marijn Haverbeke 2011-08-16 21:34:52 +02:00
parent 3db300b06e
commit 27169387fe
6 changed files with 12 additions and 823 deletions

View File

@ -278,7 +278,6 @@ fn usage(argv0: str) {
--sysroot <path> override the system root (default: rustc's directory)
--no-typestate don't run the typestate pass (unsafe!)
--test build test harness
--dps translate via destination-passing style (experimental)
--gc garbage collect shared data (experimental/temporary)
");
@ -384,7 +383,6 @@ fn build_session_options(binary: str, match: getopts::match, binary_dir: str)
};
let cfg = parse_cfgspecs(getopts::opt_strs(match, "cfg"));
let test = opt_present(match, "test");
let dps = opt_present(match, "dps");
let do_gc = opt_present(match, "gc");
let sopts: @session::options =
@{library: library,
@ -402,7 +400,6 @@ fn build_session_options(binary: str, match: getopts::match, binary_dir: str)
sysroot: sysroot,
cfg: cfg,
test: test,
dps: dps,
parse_only: parse_only,
no_trans: no_trans,
do_gc: do_gc};
@ -437,7 +434,7 @@ fn opts() -> [getopts::opt] {
optopt("sysroot"), optflag("stats"), optflag("time-passes"),
optflag("time-llvm-passes"), optflag("no-typestate"),
optflag("noverify"), optmulti("cfg"), optflag("test"),
optflag("lib"), optflag("static"), optflag("dps"), optflag("gc")];
optflag("lib"), optflag("static"), optflag("gc")];
}
fn main(args: vec[str]) {

View File

@ -41,7 +41,6 @@
sysroot: str,
cfg: ast::crate_cfg,
test: bool,
dps: bool,
parse_only: bool,
no_trans: bool,
do_gc: bool

View File

@ -6021,11 +6021,6 @@ fn trans_closure(bcx_maybe: &option::t[@block_ctxt],
let lltop = bcx.llbb;
let block_ty = node_id_type(cx.ccx, f.body.node.id);
if cx.ccx.sess.get_opts().dps {
// Call into the new destination-passing-style translation engine.
let dest = trans_dps::dest_move(cx.ccx.tcx, fcx.llretptr, block_ty);
bcx = trans_dps::trans_block(bcx, dest, f.body);
} else {
// This call to trans_block is the place where we bridge between
// translation calls that don't have a return value (trans_crate,
// trans_mod, trans_item, trans_obj, et cetera) and those that do
@ -6037,7 +6032,6 @@ fn trans_closure(bcx_maybe: &option::t[@block_ctxt],
trans_block(bcx, f.body, save_in(fcx.llretptr))
} else { trans_block(bcx, f.body, return) };
bcx = rslt.bcx;
}
if !is_terminated(bcx) {
// FIXME: until LLVM has a unit type, we are moving around

View File

@ -1,580 +0,0 @@
// Translates individual functions in the completed AST to the LLVM IR, using
// destination-passing style.
import back::abi;
import back::link;
import lib::llvm::llvm;
import llvm::TypeRef;
import llvm::ValueRef;
import middle::trans_common;
import middle::ty;
import syntax::ast;
import syntax::codemap::span;
import util::ppaux;
import trans_common::*;
import std::vec;
import std::option::none;
import std::option::some;
import std::str;
import std::uint;
import LLFalse = lib::llvm::False;
import LLTrue = lib::llvm::True;
import ll = lib::llvm;
import lltype_of = trans_common::val_ty;
import option = std::option::t;
import tc = trans_common;
import type_of_node = trans::node_id_type;
// LLVM utilities
fn llelement_type(llty: TypeRef) -> TypeRef {
lib::llvm::llvm::LLVMGetElementType(llty)
}
fn llalign_of(ccx: &@crate_ctxt, llty: TypeRef) -> uint {
ret llvm::LLVMPreferredAlignmentOfType(ccx.td.lltd, llty);
}
fn llsize_of(ccx: &@crate_ctxt, llty: TypeRef) -> uint {
ret llvm::LLVMStoreSizeOfType(ccx.td.lltd, llty);
}
fn mk_const(ccx: &@crate_ctxt, name: &str, exported: bool, llval: ValueRef) ->
ValueRef {
let llglobal =
llvm::LLVMAddGlobal(ccx.llmod, tc::val_ty(llval), str::buf(name));
llvm::LLVMSetInitializer(llglobal, llval);
llvm::LLVMSetGlobalConstant(llglobal, LLTrue);
if !exported {
llvm::LLVMSetLinkage(llglobal,
lib::llvm::LLVMInternalLinkage as llvm::Linkage);
}
ret llglobal;
}
// Type utilities
fn size_of(ccx: &@crate_ctxt, sp: &span, t: ty::t) -> uint {
if ty::type_has_dynamic_size(ccx.tcx, t) {
ccx.sess.bug("trans_dps::size_of() called on a type with dynamic " +
"size");
}
ret llsize_of(ccx, trans::type_of_inner(ccx, sp, t));
}
// Destination utilities
tag dest {
dst_nil; // Unit destination; ignore.
dst_imm(@mutable option[ValueRef]); // Fill with an immediate value.
dst_alias(@mutable option[ValueRef]); // Fill with an alias pointer.
dst_copy(ValueRef); // Copy to the given address.
dst_move(ValueRef); // Move to the given address.
}
fn dest_imm(tcx: &ty::ctxt, t: ty::t) -> dest {
if ty::type_is_nil(tcx, t) { dst_nil } else { dst_imm(@mutable none) }
}
fn dest_alias(tcx: &ty::ctxt, t: ty::t) -> dest {
if ty::type_is_nil(tcx, t) { dst_nil } else { dst_alias(@mutable none) }
}
fn dest_copy(tcx: &ty::ctxt, llptr: ValueRef, t: ty::t) -> dest {
if ty::type_is_nil(tcx, t) { dst_nil } else { dst_copy(llptr) }
}
fn dest_move(tcx: &ty::ctxt, llptr: ValueRef, t: ty::t) -> dest {
if ty::type_is_nil(tcx, t) { dst_nil } else { dst_move(llptr) }
}
// Invariant: the type of the destination must be structural (non-immediate).
fn dest_ptr(dest: &dest) -> ValueRef {
alt dest {
dst_nil. { fail "nil dest in dest_ptr" }
dst_imm(_) { fail "immediate dest in dest_ptr" }
dst_alias(box) {
alt *box {
none. { fail "alias wasn't filled in prior to dest_ptr" }
some(llval) { llval }
}
}
dst_copy(llptr) { llptr }
dst_move(llptr) { llptr }
}
}
fn dest_llval(dest: &dest) -> ValueRef {
alt dest {
dst_nil. { ret tc::C_nil(); }
dst_imm(box) {
alt *box {
none. { fail "immediate wasn't filled in prior to dest_llval"; }
some(llval) { ret llval; }
}
}
dst_alias(box) {
alt *box {
none. { fail "alias wasn't filled in prior to dest_llval"; }
some(llval) { ret llval; }
}
}
dst_copy(llptr) { ret llptr; }
dst_move(llptr) { ret llptr; }
}
}
fn dest_is_alias(dest: &dest) -> bool {
alt dest { dst_alias(_) { true } _ { false } }
}
// Common operations
fn memmove(bcx: &@block_ctxt, lldestptr: ValueRef, llsrcptr: ValueRef,
llsz: ValueRef) {
let lldestty = llelement_type(tc::val_ty(lldestptr));
let llsrcty = llelement_type(tc::val_ty(llsrcptr));
let dest_align = llalign_of(bcx_ccx(bcx), lldestty);
let src_align = llalign_of(bcx_ccx(bcx), llsrcty);
let align = uint::min(dest_align, src_align);
let llfn = bcx_ccx(bcx).intrinsics.get("llvm.memmove.p0i8.p0i8.i32");
let lldestptr_i8 =
bcx.build.PointerCast(lldestptr, tc::T_ptr(tc::T_i8()));
let llsrcptr_i8 = bcx.build.PointerCast(llsrcptr, tc::T_ptr(tc::T_i8()));
bcx.build.Call(llfn,
~[lldestptr_i8, llsrcptr_i8, llsz, tc::C_uint(align),
tc::C_bool(false)]);
}
// If "cast" is true, casts dest appropriately before the store.
fn store_imm(bcx: &@block_ctxt, dest: &dest, llsrc: ValueRef, cast: bool) ->
@block_ctxt {
alt dest {
dst_nil. {/* no-op */ }
dst_imm(box) {
assert (std::option::is_none(*box));
*box = some(llsrc);
}
dst_alias(box) {
bcx_ccx(bcx).sess.unimpl("dst_alias spill in store_imm");
}
dst_copy(lldestptr_orig) | dst_move(lldestptr_orig) {
let lldestptr = lldestptr_orig;
if cast {
lldestptr =
bcx.build.PointerCast(lldestptr, tc::T_ptr(lltype_of(llsrc)));
}
bcx.build.Store(llsrc, lldestptr);
}
}
ret bcx;
}
fn store_ptr(bcx: &@block_ctxt, dest: &dest, llsrcptr: ValueRef) ->
@block_ctxt {
alt dest {
dst_nil. {/* no-op */ }
dst_imm(box) {
assert (std::option::is_none(*box));
*box = some(bcx.build.Load(llsrcptr));
}
dst_alias(box) {
assert (std::option::is_none(*box));
*box = some(llsrcptr);
}
dst_copy(lldestptr) | dst_move(lldestptr) {
let llsrcty = llelement_type(tc::val_ty(llsrcptr));
let llsz = tc::C_uint(llsize_of(bcx_ccx(bcx), llsrcty));
memmove(bcx, lldestptr, llsrcptr, llsz);
ret bcx;
}
}
ret bcx;
}
// Allocates a value of the given LLVM size on either the task heap or the
// shared heap.
//
// TODO: This should *not* use destination-passing style, because doing so
// makes callers incur an extra load.
tag heap { hp_task; hp_shared; }
fn malloc(bcx: &@block_ctxt, lldest: ValueRef, heap: heap,
llcustom_size_opt: option[ValueRef]) -> @block_ctxt {
let llptrty = llelement_type(lltype_of(lldest));
let llty = llelement_type(llptrty);
let lltydescptr = tc::C_null(tc::T_ptr(bcx_ccx(bcx).tydesc_type));
let llsize;
alt llcustom_size_opt {
none. { llsize = trans::llsize_of(llty); }
some(llcustom_size) { llsize = llcustom_size; }
}
let llupcall;
alt heap {
hp_task. { llupcall = bcx_ccx(bcx).upcalls.malloc; }
hp_shared. { llupcall = bcx_ccx(bcx).upcalls.shared_malloc; }
}
let llresult =
bcx.build.Call(llupcall,
~[bcx_fcx(bcx).lltaskptr, llsize, lltydescptr]);
llresult = bcx.build.PointerCast(llresult, llptrty);
bcx.build.Store(llresult, lldest);
ret bcx;
}
// If the supplied destination is an alias, spills to a temporary. Returns the
// new destination.
fn spill_alias(cx: &@block_ctxt, dest: &dest, t: ty::t) ->
{bcx: @block_ctxt, dest: dest} {
let bcx = cx;
alt dest {
dst_alias(box) {
// TODO: Mark the alias as needing a cleanup.
assert (std::option::is_none(*box));
let r = trans::alloc_ty(cx, t);
bcx = r.bcx;
let llptr = r.val;
*box = some(llptr);
ret {bcx: bcx, dest: dst_move(llptr)};
}
_ { ret {bcx: bcx, dest: dest}; }
}
}
fn mk_temp(cx: &@block_ctxt, t: ty::t) -> {bcx: @block_ctxt, dest: dest} {
let bcx = cx;
if ty::type_is_nil(bcx_tcx(bcx), t) { ret {bcx: bcx, dest: dst_nil}; }
if trans::type_is_immediate(bcx_ccx(bcx), t) {
ret {bcx: bcx, dest: dst_imm(@mutable none)};
}
let r = trans::alloc_ty(cx, t);
bcx = r.bcx;
let llptr = r.val;
ret {bcx: bcx, dest: dst_copy(llptr)};
}
// AST substructure translation, with destinations
fn trans_lit(cx: &@block_ctxt, dest: &dest, lit: &ast::lit) -> @block_ctxt {
let bcx = cx;
alt lit.node {
ast::lit_str(s, ast::sk_unique.) {
let r = trans_lit_str_common(bcx_ccx(bcx), s, dest_is_alias(dest));
let llstackpart = r.stack;
let llheappartopt = r.heap;
bcx = store_ptr(bcx, dest, llstackpart);
alt llheappartopt {
none. {/* no-op */ }
some(llheappart) {
let lldestptrptr =
bcx.build.InBoundsGEP(dest_ptr(dest),
~[tc::C_int(0),
tc::C_uint(abi::ivec_elt_elems)]);
let llheappartty = lltype_of(llheappart);
lldestptrptr =
bcx.build.PointerCast(lldestptrptr,
tc::T_ptr(tc::T_ptr(llheappartty)));
malloc(bcx, lldestptrptr, hp_shared, none);
let lldestptr = bcx.build.Load(lldestptrptr);
store_ptr(bcx, dst_copy(lldestptr), llheappart);
}
}
}
_ {
bcx =
store_imm(bcx, dest, trans_lit_common(bcx_ccx(bcx), lit), false);
}
}
ret bcx;
}
fn trans_binary(cx: &@block_ctxt, dest: &dest, sp: &span, op: ast::binop,
lhs: &@ast::expr, rhs: &@ast::expr) -> @block_ctxt {
let bcx = cx;
alt op {
ast::add. {
bcx =
trans_vec::trans_concat(bcx, dest, sp,
ty::expr_ty(bcx_tcx(bcx), rhs), lhs, rhs);
}
}
// TODO: Many more to add here.
ret bcx;
}
fn trans_log(cx: &@block_ctxt, sp: &span, level: int, expr: &@ast::expr) ->
@block_ctxt {
fn trans_log_level(lcx: &@local_ctxt) -> ValueRef {
let modname = str::connect(lcx.module_path, "::");
if lcx_ccx(lcx).module_data.contains_key(modname) {
ret lcx_ccx(lcx).module_data.get(modname);
}
let s =
link::mangle_internal_name_by_path_and_seq(lcx_ccx(lcx),
lcx.module_path,
"loglevel");
let lllevelptr =
llvm::LLVMAddGlobal(lcx.ccx.llmod, tc::T_int(), str::buf(s));
llvm::LLVMSetGlobalConstant(lllevelptr, LLFalse);
llvm::LLVMSetInitializer(lllevelptr, tc::C_int(0));
llvm::LLVMSetLinkage(lllevelptr,
lib::llvm::LLVMInternalLinkage as llvm::Linkage);
lcx_ccx(lcx).module_data.insert(modname, lllevelptr);
ret lllevelptr;
}
let bcx = cx;
let lllevelptr = trans_log_level(bcx_lcx(bcx));
let log_bcx = trans::new_scope_block_ctxt(bcx, "log");
let next_bcx = trans::new_scope_block_ctxt(bcx, "next_log");
let should_log =
bcx.build.ICmp(ll::LLVMIntSGE, bcx.build.Load(lllevelptr),
tc::C_int(level));
bcx.build.CondBr(should_log, log_bcx.llbb, next_bcx.llbb);
let expr_t = ty::expr_ty(bcx_tcx(log_bcx), expr);
let arg_dest = dest_alias(bcx_tcx(log_bcx), expr_t);
log_bcx = trans_expr(log_bcx, arg_dest, expr);
let llarg = dest_llval(arg_dest);
let llarg_i8 = bcx.build.PointerCast(llarg, T_ptr(T_i8()));
let ti = none;
let r2 = trans::get_tydesc(bcx, expr_t, false, ti);
bcx = r2.bcx;
let lltydesc = r2.val;
log_bcx.build.Call(bcx_ccx(log_bcx).upcalls.log_type,
~[bcx_fcx(bcx).lltaskptr, lltydesc, llarg_i8,
tc::C_int(level)]);
log_bcx =
trans::trans_block_cleanups(log_bcx, tc::find_scope_cx(log_bcx));
log_bcx.build.Br(next_bcx.llbb);
ret next_bcx;
}
fn trans_path(bcx: &@block_ctxt, dest: &dest, path: &ast::path,
id: ast::node_id) -> @block_ctxt {
alt bcx_tcx(bcx).def_map.get(id) {
ast::def_local(def_id) {
alt bcx_fcx(bcx).lllocals.find(def_id.node) {
none. { bcx_ccx(bcx).sess.unimpl("upvar in trans_path"); }
some(llptr) {
// TODO: Copy hooks.
store_ptr(bcx, dest, llptr);
}
}
}
_ { bcx_ccx(bcx).sess.unimpl("def variant in trans_dps::trans_path"); }
}
ret bcx;
}
fn trans_expr(bcx: &@block_ctxt, dest: &dest, expr: &@ast::expr) ->
@block_ctxt {
alt expr.node {
ast::expr_lit(lit) { trans_lit(bcx, dest, *lit); ret bcx; }
ast::expr_log(level, operand) {
ret trans_log(bcx, expr.span, level, operand);
}
ast::expr_binary(op, lhs, rhs) {
ret trans_binary(bcx, dest, expr.span, op, lhs, rhs);
}
ast::expr_path(path) { ret trans_path(bcx, dest, path, expr.id); }
_ { fail "unhandled expr type in trans_expr"; }
}
}
fn trans_recv(bcx: &@block_ctxt, dest: &dest, expr: &@ast::expr) ->
@block_ctxt {
ret bcx; // TODO
}
fn trans_block(cx: &@block_ctxt, dest: &dest, blk: &ast::blk) -> @block_ctxt {
let bcx = cx;
for each local: @ast::local in trans::block_locals(blk) {
bcx = trans::alloc_local(bcx, local).bcx;
}
for stmt: @ast::stmt in blk.node.stmts {
bcx = trans_stmt(bcx, stmt);
// If we hit a terminator, control won't go any further so
// we're in dead-code land. Stop here.
if trans::is_terminated(bcx) { ret bcx; }
}
alt blk.node.expr {
some(e) { bcx = trans_expr(bcx, dest, e); }
none. {/* no-op */ }
}
bcx = trans::trans_block_cleanups(bcx, tc::find_scope_cx(bcx));
ret bcx;
}
// AST substructure translation, without destinations
// Common setup code shared between the crate-constant literal string case and
// the block-local literal string case. We don't use destination-passing style
// since that doesn't work for crate constants.
//
// If |expand| is true, we never spill to the heap. This should be used
// whenever the destination size isn't fixed.
fn trans_lit_str_common(ccx: &@crate_ctxt, s: &str, expand: bool) ->
{stack: ValueRef, heap: option[ValueRef]} {
let llstackpart;
let llheappartopt;
let len = str::byte_len(s);
let array = ~[];
for ch: u8 in s { array += ~[tc::C_u8(ch as uint)]; }
array += ~[tc::C_u8(0u)];
if expand {
llstackpart =
tc::C_struct(~[tc::C_uint(len + 1u), tc::C_uint(len + 1u),
tc::C_array(tc::T_i8(), array)]);
llheappartopt = none;
} else if (len < abi::ivec_default_length - 1u)
{ // minus one for the null
while vec::len(array) < abi::ivec_default_length {
array += ~[tc::C_u8(0u)];
}
llstackpart =
tc::C_struct(~[tc::C_uint(len + 1u),
tc::C_uint(abi::ivec_default_length),
tc::C_array(tc::T_i8(), array)]);
llheappartopt = none;
} else {
let llheappart =
tc::C_struct(~[tc::C_uint(len), tc::C_array(tc::T_i8(), array)]);
llstackpart =
tc::C_struct(~[tc::C_uint(0u),
tc::C_uint(abi::ivec_default_length),
tc::C_null(tc::T_ptr(lltype_of(llheappart)))]);
llheappartopt =
some(mk_const(ccx, "const_istr_heap", false, llheappart));
}
ret {stack: mk_const(ccx, "const_istr_stack", false, llstackpart),
heap: llheappartopt};
}
// As above, we don't use destination-passing style here.
fn trans_lit_common(ccx: &@crate_ctxt, lit: &ast::lit) -> ValueRef {
alt lit.node {
ast::lit_int(i) { ret tc::C_int(i); }
ast::lit_uint(u) { ret tc::C_int(u as int); }
ast::lit_mach_int(tm, i) {
// FIXME: the entire handling of mach types falls apart
// if target int width is larger than host, at the moment;
// re-do the mach-int types using 'big' when that works.
let t = tc::T_int();
let s = LLTrue;
alt tm {
ast::ty_u8. { t = tc::T_i8(); s = LLFalse; }
ast::ty_u16. { t = tc::T_i16(); s = LLFalse; }
ast::ty_u32. { t = tc::T_i32(); s = LLFalse; }
ast::ty_u64. { t = tc::T_i64(); s = LLFalse; }
ast::ty_i8. { t = tc::T_i8(); }
ast::ty_i16. { t = tc::T_i16(); }
ast::ty_i32. { t = tc::T_i32(); }
ast::ty_i64. { t = tc::T_i64(); }
}
ret tc::C_integral(t, i as uint, s);
}
ast::lit_float(fs) { ret tc::C_float(fs); }
ast::lit_mach_float(tm, s) {
let t = tc::T_float();
alt tm {
ast::ty_f32. { t = tc::T_f32(); }
ast::ty_f64. { t = tc::T_f64(); }
}
ret tc::C_floating(s, t);
}
ast::lit_char(c) {
ret tc::C_integral(tc::T_char(), c as uint, LLFalse);
}
ast::lit_bool(b) { ret tc::C_bool(b); }
ast::lit_nil. { ret tc::C_nil(); }
ast::lit_str(s, ast::sk_rc.) { ret tc::C_str(ccx, s); }
ast::lit_str(s, ast::sk_unique.) {
fail "unique str in trans_lit_common";
}
}
}
fn trans_init_local(bcx: &@block_ctxt, local: &@ast::local) -> @block_ctxt {
let llptr = bcx_fcx(bcx).lllocals.get(local.node.pat.id); // FIXME DESTR
let t = type_of_node(bcx_ccx(bcx), local.node.pat.id);
tc::add_clean(bcx, llptr, t);
alt local.node.init {
some(init) {
alt init.op {
ast::init_assign. {
ret trans_expr(bcx, dest_copy(bcx_tcx(bcx), llptr, t), init.expr);
}
ast::init_move. {
ret trans_expr(bcx, dest_move(bcx_tcx(bcx), llptr, t), init.expr);
}
}
}
none. { ret bcx; }
}
}
fn trans_stmt(cx: &@block_ctxt, stmt: &@ast::stmt) -> @block_ctxt {
let bcx = cx;
alt stmt.node {
ast::stmt_expr(e, _) {
let tmp = dest_alias(bcx_tcx(bcx), ty::expr_ty(bcx_tcx(bcx), e));
ret trans_expr(bcx, tmp, e);
}
ast::stmt_decl(d, _) {
alt d.node {
ast::decl_local(locals) {
for local: @ast::local in locals {
bcx = trans_init_local(bcx, local);
}
}
ast::decl_item(item) { trans::trans_item(bcx_lcx(bcx), *item); }
}
ret bcx;
}
}
}

View File

@ -1,219 +0,0 @@
// Translation of vector operations to LLVM IR, in destination-passing style.
import back::abi;
import lib::llvm::llvm;
import llvm::ValueRef;
import middle::trans;
import middle::trans_common;
import middle::trans_dps;
import middle::ty;
import syntax::ast;
import syntax::codemap::span;
import trans::alloca;
import trans::load_inbounds;
import trans::new_sub_block_ctxt;
import trans::type_of_or_i8;
import trans_common::block_ctxt;
import trans_common::struct_elt;
import trans_common::C_int;
import trans_common::C_null;
import trans_common::C_uint;
import trans_common::T_int;
import trans_common::T_ivec_heap;
import trans_common::T_ivec_heap_part;
import trans_common::T_opaque_ivec;
import trans_common::T_ptr;
import trans_common::bcx_ccx;
import trans_common::bcx_tcx;
import trans_dps::dest;
import trans_dps::llsize_of;
import trans_dps::mk_temp;
import std::option::none;
import std::option::some;
import tc = middle::trans_common;
// Returns the length of an interior vector and a pointer to its first
// element, in that order.
//
// TODO: We can optimize this in the cases in which we statically know the
// vector must be on the stack.
fn get_len_and_data(cx: &@block_ctxt, t: ty::t, llvecptr: ValueRef) ->
{bcx: @block_ctxt, len: ValueRef, data: ValueRef} {
let bcx = cx;
// If this interior vector has dynamic size, we can't assume anything
// about the LLVM type of the value passed in, so we cast it to an
// opaque vector type.
let unit_ty = ty::sequence_element_type(bcx_tcx(bcx), t);
let v;
if ty::type_has_dynamic_size(bcx_tcx(bcx), unit_ty) {
v = bcx.build.PointerCast(llvecptr, T_ptr(T_opaque_ivec()));
} else { v = llvecptr; }
let llunitty = type_of_or_i8(bcx, unit_ty);
let stack_len =
load_inbounds(bcx, v, ~[C_int(0), C_uint(abi::ivec_elt_len)]);
let stack_elem =
bcx.build.InBoundsGEP(v,
~[C_int(0), C_uint(abi::ivec_elt_elems),
C_int(0)]);
let on_heap = bcx.build.ICmp(lib::llvm::LLVMIntEQ, stack_len, C_int(0));
let on_heap_cx = new_sub_block_ctxt(bcx, "on_heap");
let next_cx = new_sub_block_ctxt(bcx, "next");
bcx.build.CondBr(on_heap, on_heap_cx.llbb, next_cx.llbb);
let heap_stub =
on_heap_cx.build.PointerCast(v, T_ptr(T_ivec_heap(llunitty)));
let heap_ptr =
load_inbounds(on_heap_cx, heap_stub,
~[C_int(0), C_uint(abi::ivec_heap_stub_elt_ptr)]);
// Check whether the heap pointer is null. If it is, the vector length
// is truly zero.
let llstubty = T_ivec_heap(llunitty);
let llheapptrty = struct_elt(llstubty, abi::ivec_heap_stub_elt_ptr);
let heap_ptr_is_null =
on_heap_cx.build.ICmp(lib::llvm::LLVMIntEQ, heap_ptr,
C_null(T_ptr(llheapptrty)));
let zero_len_cx = new_sub_block_ctxt(bcx, "zero_len");
let nonzero_len_cx = new_sub_block_ctxt(bcx, "nonzero_len");
on_heap_cx.build.CondBr(heap_ptr_is_null, zero_len_cx.llbb,
nonzero_len_cx.llbb);
// Technically this context is unnecessary, but it makes this function
// clearer.
let zero_len = C_int(0);
let zero_elem = C_null(T_ptr(llunitty));
zero_len_cx.build.Br(next_cx.llbb);
// If we're here, then we actually have a heapified vector.
let heap_len =
load_inbounds(nonzero_len_cx, heap_ptr,
~[C_int(0), C_uint(abi::ivec_heap_elt_len)]);
let heap_elem =
{
let v = ~[C_int(0), C_uint(abi::ivec_heap_elt_elems), C_int(0)];
nonzero_len_cx.build.InBoundsGEP(heap_ptr, v)
};
nonzero_len_cx.build.Br(next_cx.llbb);
// Now we can figure out the length of |v| and get a pointer to its
// first element.
let len =
next_cx.build.Phi(T_int(), ~[stack_len, zero_len, heap_len],
~[bcx.llbb, zero_len_cx.llbb, nonzero_len_cx.llbb]);
let elem =
next_cx.build.Phi(T_ptr(llunitty),
~[stack_elem, zero_elem, heap_elem],
~[bcx.llbb, zero_len_cx.llbb, nonzero_len_cx.llbb]);
ret {bcx: next_cx, len: len, data: elem};
}
fn trans_concat(cx: &@block_ctxt, in_dest: &dest, sp: &span, t: ty::t,
lhs: &@ast::expr, rhs: &@ast::expr) -> @block_ctxt {
let bcx = cx;
// TODO: Detect "a = a + b" and promote to trans_append.
// TODO: Detect "a + [ literal ]" and optimize to copying the literal
// elements in directly.
let t = ty::expr_ty(bcx_tcx(bcx), lhs);
let skip_null = ty::type_is_str(bcx_tcx(bcx), t);
// Translate the LHS and RHS. Pull out their length and data.
let lhs_tmp = trans_dps::dest_alias(bcx_tcx(bcx), t);
bcx = trans_dps::trans_expr(bcx, lhs_tmp, lhs);
let lllhsptr = trans_dps::dest_ptr(lhs_tmp);
let rhs_tmp = trans_dps::dest_alias(bcx_tcx(bcx), t);
bcx = trans_dps::trans_expr(bcx, rhs_tmp, rhs);
let llrhsptr = trans_dps::dest_ptr(rhs_tmp);
let r0 = get_len_and_data(bcx, t, lllhsptr);
bcx = r0.bcx;
let lllhslen = r0.len;
let lllhsdata = r0.data;
r0 = get_len_and_data(bcx, t, llrhsptr);
bcx = r0.bcx;
let llrhslen = r0.len;
let llrhsdata = r0.data;
if skip_null { lllhslen = bcx.build.Sub(lllhslen, C_int(1)); }
// Allocate the destination.
let r1 = trans_dps::spill_alias(bcx, in_dest, t);
bcx = r1.bcx;
let dest = r1.dest;
let unit_t = ty::sequence_element_type(bcx_tcx(bcx), t);
let unit_sz = trans_dps::size_of(bcx_ccx(bcx), sp, unit_t);
let stack_elems_sz = unit_sz * abi::ivec_default_length;
let lldestptr = trans_dps::dest_ptr(dest);
let llunitty = trans::type_of(bcx_ccx(bcx), sp, unit_t);
// Decide whether to allocate the result on the stack or on the heap.
let llnewlen = bcx.build.Add(lllhslen, llrhslen);
let llonstack =
bcx.build.ICmp(lib::llvm::LLVMIntULE, llnewlen,
C_uint(stack_elems_sz));
let on_stack_bcx = new_sub_block_ctxt(bcx, "on_stack");
let on_heap_bcx = new_sub_block_ctxt(bcx, "on_heap");
bcx.build.CondBr(llonstack, on_stack_bcx.llbb, on_heap_bcx.llbb);
// On-stack case.
let next_bcx = new_sub_block_ctxt(bcx, "next");
trans::store_inbounds(on_stack_bcx, llnewlen, lldestptr,
~[C_int(0), C_uint(abi::ivec_elt_len)]);
trans::store_inbounds(on_stack_bcx, C_uint(stack_elems_sz), lldestptr,
~[C_int(0), C_uint(abi::ivec_elt_alen)]);
let llonstackdataptr =
on_stack_bcx.build.InBoundsGEP(lldestptr,
~[C_int(0),
C_uint(abi::ivec_elt_elems),
C_int(0)]);
on_stack_bcx.build.Br(next_bcx.llbb);
// On-heap case.
let llheappartty = tc::T_ivec_heap(llunitty);
let lldeststubptr =
on_heap_bcx.build.PointerCast(lldestptr, tc::T_ptr(llheappartty));
trans::store_inbounds(on_heap_bcx, C_int(0), lldeststubptr,
~[C_int(0), C_uint(abi::ivec_elt_len)]);
trans::store_inbounds(on_heap_bcx, llnewlen, lldeststubptr,
~[C_int(0), C_uint(abi::ivec_elt_alen)]);
let llheappartptrptr =
on_heap_bcx.build.InBoundsGEP(lldeststubptr,
~[C_int(0),
C_uint(abi::ivec_elt_elems)]);
let llsizeofint = C_uint(llsize_of(bcx_ccx(bcx), tc::T_int()));
on_heap_bcx =
trans_dps::malloc(on_heap_bcx, llheappartptrptr, trans_dps::hp_shared,
some(on_heap_bcx.build.Add(llnewlen, llsizeofint)));
let llheappartptr = on_heap_bcx.build.Load(llheappartptrptr);
trans::store_inbounds(on_heap_bcx, llnewlen, llheappartptr,
~[C_int(0), C_uint(abi::ivec_heap_elt_len)]);
let llheapdataptr =
on_heap_bcx.build.InBoundsGEP(llheappartptr,
~[C_int(0),
C_uint(abi::ivec_heap_elt_elems),
C_int(0)]);
on_heap_bcx.build.Br(next_bcx.llbb);
// Perform the memmove.
let lldataptr =
next_bcx.build.Phi(T_ptr(llunitty),
~[llonstackdataptr, llheapdataptr],
~[on_stack_bcx.llbb, on_heap_bcx.llbb]);
trans_dps::memmove(next_bcx, lldataptr, lllhsdata, lllhslen);
trans_dps::memmove(next_bcx,
next_bcx.build.InBoundsGEP(lldataptr, ~[lllhslen]),
llrhsdata, llrhslen);
ret next_bcx;
}

View File

@ -17,9 +17,7 @@ mod middle {
mod trans_common;
mod trans;
mod trans_alt;
mod trans_dps;
mod trans_objects;
mod trans_vec;
mod ty;
mod ast_map;
mod resolve;