rust/src/comp/middle/shape.rs

835 lines
28 KiB
Rust
Raw Normal View History

2011-08-04 12:46:10 -05:00
// A "shape" is a compact encoding of a type that is used by interpreted glue.
// This substitutes for the runtime tags used by e.g. MLs.
import lib::llvm::llvm;
import lib::llvm::{True, False};
2011-09-12 18:13:28 -05:00
import lib::llvm::llvm::{ModuleRef, TypeRef, ValueRef};
import driver::session;
import driver::session::session;
2011-09-12 18:13:28 -05:00
import middle::{trans, trans_common};
import middle::trans_common::{crate_ctxt, val_ty, C_bytes, C_int,
C_named_struct, C_struct, T_tag_variant,
block_ctxt, result, rslt, bcx_ccx, bcx_tcx,
type_has_static_size, umax, umin, align_to,
tydesc_info};
import back::abi;
2011-08-04 12:46:10 -05:00
import middle::ty;
2011-11-10 10:41:42 -06:00
import middle::ty::field;
2011-08-04 12:46:10 -05:00
import syntax::ast;
import syntax::ast_util::dummy_sp;
2011-08-04 12:46:10 -05:00
import syntax::util::interner;
import util::common;
import trans_build::{Load, Store, Add, GEPi};
import syntax::codemap::span;
2011-08-04 12:46:10 -05:00
import core::{vec, str};
2011-08-04 12:46:10 -05:00
import std::map::hashmap;
import option::{none, some};
2011-08-04 12:46:10 -05:00
import ty_ctxt = middle::ty::ctxt;
type res_info = {did: ast::def_id, t: ty::t};
type ctxt =
{mutable next_tag_id: u16,
pad: u16,
tag_id_to_index: hashmap<ast::def_id, u16>,
mutable tag_order: [ast::def_id],
resources: interner::interner<res_info>,
llshapetablesty: TypeRef,
llshapetables: ValueRef};
const shape_u8: u8 = 0u8;
const shape_u16: u8 = 1u8;
const shape_u32: u8 = 2u8;
const shape_u64: u8 = 3u8;
const shape_i8: u8 = 4u8;
const shape_i16: u8 = 5u8;
const shape_i32: u8 = 6u8;
const shape_i64: u8 = 7u8;
const shape_f32: u8 = 8u8;
const shape_f64: u8 = 9u8;
// (10 is currently unused, was evec)
2011-09-02 09:09:41 -05:00
const shape_vec: u8 = 11u8;
const shape_tag: u8 = 12u8;
const shape_box: u8 = 13u8;
const shape_struct: u8 = 17u8;
const shape_box_fn: u8 = 18u8;
const shape_UNUSED: u8 = 19u8;
const shape_res: u8 = 20u8;
const shape_var: u8 = 21u8;
const shape_uniq: u8 = 22u8;
const shape_opaque_closure_ptr: u8 = 23u8; // the closure itself.
const shape_iface: u8 = 24u8;
const shape_uniq_fn: u8 = 25u8;
const shape_stack_fn: u8 = 26u8;
const shape_bare_fn: u8 = 27u8;
const shape_tydesc: u8 = 28u8;
const shape_send_tydesc: u8 = 29u8;
2011-08-04 12:46:10 -05:00
// FIXME: This is a bad API in trans_common.
fn C_u8(n: u8) -> ValueRef { ret trans_common::C_u8(n as uint); }
2011-08-04 12:46:10 -05:00
fn hash_res_info(ri: res_info) -> uint {
2011-08-04 12:46:10 -05:00
let h = 5381u;
h *= 33u;
h += ri.did.crate as uint;
h *= 33u;
h += ri.did.node as uint;
h *= 33u;
h += ri.t as uint;
2011-08-04 12:46:10 -05:00
ret h;
}
fn eq_res_info(a: res_info, b: res_info) -> bool {
2011-08-04 12:46:10 -05:00
ret a.did.crate == b.did.crate && a.did.node == b.did.node && a.t == b.t;
}
fn mk_global(ccx: @crate_ctxt, name: str, llval: ValueRef, internal: bool) ->
ValueRef {
2011-09-02 17:34:58 -05:00
let llglobal =
str::as_buf(name,
{|buf|
lib::llvm::llvm::LLVMAddGlobal(ccx.llmod,
val_ty(llval), buf)
});
2011-08-04 12:46:10 -05:00
lib::llvm::llvm::LLVMSetInitializer(llglobal, llval);
lib::llvm::llvm::LLVMSetGlobalConstant(llglobal, True);
2011-09-02 17:34:58 -05:00
if internal {
lib::llvm::llvm::LLVMSetLinkage(llglobal,
lib::llvm::LLVMInternalLinkage as
lib::llvm::llvm::Linkage);
}
2011-08-04 12:46:10 -05:00
ret llglobal;
}
2012-01-19 16:24:03 -06:00
// Computes a set of variants of a enum that are guaranteed to have size and
// alignment at least as large as any other variant of the enum. This is an
2011-08-04 12:46:10 -05:00
// important performance optimization.
//
// TODO: Use this in dynamic_size_of() as well.
fn largest_variants(ccx: @crate_ctxt, tag_id: ast::def_id) -> [uint] {
2011-08-04 12:46:10 -05:00
// Compute the minimum and maximum size and alignment for each variant.
//
// TODO: We could do better here; e.g. we know that any variant that
// contains (T,T) must be as least as large as any variant that contains
// just T.
let ranges = [];
2011-08-04 12:46:10 -05:00
let variants = ty::tag_variants(ccx.tcx, tag_id);
for variant: ty::variant_info in *variants {
2011-08-04 12:46:10 -05:00
let bounded = true;
let {a: min_size, b: min_align} = {a: 0u, b: 0u};
for elem_t: ty::t in variant.args {
2011-08-04 12:46:10 -05:00
if ty::type_contains_params(ccx.tcx, elem_t) {
// TODO: We could do better here; this causes us to
// conservatively assume that (int, T) has minimum size 0,
// when in fact it has minimum size sizeof(int).
bounded = false;
} else {
// Could avoid this check: the constraint should
// follow from how elem_t doesn't contain params.
// (Could add a postcondition to type_contains_params,
// once we implement Issue #586.)
check (trans_common::type_has_static_size(ccx, elem_t));
let llty = trans::type_of(ccx, dummy_sp(), elem_t);
min_size += llsize_of_real(ccx, llty);
min_align += llalign_of_real(ccx, llty);
2011-08-04 12:46:10 -05:00
}
}
ranges +=
[{size: {min: min_size, bounded: bounded},
align: {min: min_align, bounded: bounded}}];
2011-08-04 12:46:10 -05:00
}
// Initialize the candidate set to contain all variants.
let candidates = [mutable];
for variant in *variants { candidates += [mutable true]; }
2011-08-04 12:46:10 -05:00
// Do a pairwise comparison among all variants still in the candidate set.
// Throw out any variant that we know has size and alignment at least as
// small as some other variant.
let i = 0u;
2011-08-15 18:38:23 -05:00
while i < vec::len(ranges) - 1u {
if candidates[i] {
2011-08-04 12:46:10 -05:00
let j = i + 1u;
while j < vec::len(ranges) {
if candidates[j] {
if ranges[i].size.bounded && ranges[i].align.bounded &&
ranges[j].size.bounded && ranges[j].align.bounded {
if ranges[i].size >= ranges[j].size &&
ranges[i].align >= ranges[j].align {
2011-08-04 12:46:10 -05:00
// Throw out j.
candidates[j] = false;
} else if ranges[j].size >= ranges[i].size &&
ranges[j].align >= ranges[j].align {
2011-08-04 12:46:10 -05:00
// Throw out i.
candidates[i] = false;
2011-08-04 12:46:10 -05:00
}
}
}
j += 1u;
}
}
i += 1u;
}
// Return the resulting set.
let result = [];
2011-08-04 12:46:10 -05:00
i = 0u;
2011-08-15 18:38:23 -05:00
while i < vec::len(candidates) {
if candidates[i] { result += [i]; }
2011-08-04 12:46:10 -05:00
i += 1u;
}
ret result;
}
2012-01-19 16:24:03 -06:00
// Computes the static size of a enum, without using mk_tup(), which is
2011-08-04 12:46:10 -05:00
// bad for performance.
//
// TODO: Migrate trans over to use this.
fn round_up(size: u16, align: u8) -> u16 {
assert (align >= 1u8);
2011-08-04 12:46:10 -05:00
let alignment = align as u16;
ret size - 1u16 + alignment & !(alignment - 1u16);
2011-08-04 12:46:10 -05:00
}
type size_align = {size: u16, align: u8};
2011-08-04 12:46:10 -05:00
fn compute_static_tag_size(ccx: @crate_ctxt, largest_variants: [uint],
did: ast::def_id) -> size_align {
let max_size = 0u16;
let max_align = 1u8;
2011-08-04 12:46:10 -05:00
let variants = ty::tag_variants(ccx.tcx, did);
for vid: uint in largest_variants {
2011-08-04 12:46:10 -05:00
// We increment a "virtual data pointer" to compute the size.
let lltys = [];
for typ: ty::t in variants[vid].args {
// FIXME: there should really be a postcondition
// on tag_variants that would obviate the need for
// this check. (Issue #586)
check (trans_common::type_has_static_size(ccx, typ));
lltys += [trans::type_of(ccx, dummy_sp(), typ)];
2011-08-04 12:46:10 -05:00
}
let llty = trans_common::T_struct(lltys);
let dp = llsize_of_real(ccx, llty) as u16;
let variant_align = llalign_of_real(ccx, llty) as u8;
2011-08-04 12:46:10 -05:00
if max_size < dp { max_size = dp; }
if max_align < variant_align { max_align = variant_align; }
}
2012-01-19 16:24:03 -06:00
// Add space for the enum if applicable.
// FIXME (issue #792): This is wrong. If the enum starts with an 8 byte
2011-08-04 12:46:10 -05:00
// aligned quantity, we don't align it.
if vec::len(*variants) > 1u {
let variant_t = T_tag_variant(ccx);
max_size += llsize_of_real(ccx, variant_t) as u16;
let align = llalign_of_real(ccx, variant_t) as u8;
if max_align < align { max_align = align; }
}
2011-08-04 12:46:10 -05:00
ret {size: max_size, align: max_align};
2011-08-04 12:46:10 -05:00
}
2012-01-19 19:56:05 -06:00
enum tag_kind { tk_unit, tk_enum, tk_complex, }
2011-08-04 12:46:10 -05:00
fn tag_kind(ccx: @crate_ctxt, did: ast::def_id) -> tag_kind {
2011-08-04 12:46:10 -05:00
let variants = ty::tag_variants(ccx.tcx, did);
if vec::len(*variants) == 0u { ret tk_complex; }
for v: ty::variant_info in *variants {
2011-08-15 18:38:23 -05:00
if vec::len(v.args) > 0u { ret tk_complex; }
2011-08-04 12:46:10 -05:00
}
if vec::len(*variants) == 1u { ret tk_unit; }
2011-08-04 12:46:10 -05:00
ret tk_enum;
}
// Returns the code corresponding to the pointer size on this architecture.
fn s_int(tcx: ty_ctxt) -> u8 {
ret alt tcx.sess.targ_cfg.arch {
session::arch_x86 { shape_i32 }
session::arch_x86_64 { shape_i64 }
session::arch_arm { shape_i32 }
};
2011-08-04 12:46:10 -05:00
}
fn s_uint(tcx: ty_ctxt) -> u8 {
ret alt tcx.sess.targ_cfg.arch {
session::arch_x86 { shape_u32 }
session::arch_x86_64 { shape_u64 }
session::arch_arm { shape_u32 }
};
2011-08-04 12:46:10 -05:00
}
fn s_float(tcx: ty_ctxt) -> u8 {
ret alt tcx.sess.targ_cfg.arch {
session::arch_x86 { shape_f64 }
session::arch_x86_64 { shape_f64 }
session::arch_arm { shape_f64 }
};
}
fn s_variant_tag_t(tcx: ty_ctxt) -> u8 {
ret s_int(tcx);
2011-08-04 12:46:10 -05:00
}
fn s_tydesc(_tcx: ty_ctxt) -> u8 {
ret shape_tydesc;
}
fn s_send_tydesc(_tcx: ty_ctxt) -> u8 {
ret shape_send_tydesc;
}
fn mk_ctxt(llmod: ModuleRef) -> ctxt {
2011-09-02 17:34:58 -05:00
let llshapetablesty = trans_common::T_named_struct("shapes");
let llshapetables =
str::as_buf("shapes",
{|buf|
lib::llvm::llvm::LLVMAddGlobal(llmod, llshapetablesty,
buf)
});
2011-08-04 12:46:10 -05:00
ret {mutable next_tag_id: 0u16,
pad: 0u16,
tag_id_to_index: common::new_def_hash(),
mutable tag_order: [],
resources: interner::mk(hash_res_info, eq_res_info),
llshapetablesty: llshapetablesty,
llshapetables: llshapetables};
2011-08-04 12:46:10 -05:00
}
2011-09-12 05:39:38 -05:00
fn add_bool(&dest: [u8], val: bool) { dest += [if val { 1u8 } else { 0u8 }]; }
2011-08-04 12:46:10 -05:00
2011-09-12 05:39:38 -05:00
fn add_u16(&dest: [u8], val: u16) {
dest += [val & 0xffu16 as u8, val >> 8u16 as u8];
2011-08-04 12:46:10 -05:00
}
2011-09-12 05:39:38 -05:00
fn add_substr(&dest: [u8], src: [u8]) {
2011-08-15 18:38:23 -05:00
add_u16(dest, vec::len(src) as u16);
2011-08-04 12:46:10 -05:00
dest += src;
}
fn shape_of(ccx: @crate_ctxt, t: ty::t, ty_param_map: [uint]) -> [u8] {
let s = [];
2011-08-04 12:46:10 -05:00
alt ty::struct(ccx.tcx, t) {
ty::ty_nil | ty::ty_bool | ty::ty_uint(ast::ty_u8) |
ty::ty_bot { s += [shape_u8]; }
ty::ty_int(ast::ty_i) { s += [s_int(ccx.tcx)]; }
ty::ty_float(ast::ty_f) { s += [s_float(ccx.tcx)]; }
ty::ty_uint(ast::ty_u) | ty::ty_ptr(_) |
ty::ty_native(_) { s += [s_uint(ccx.tcx)]; }
ty::ty_type { s += [s_tydesc(ccx.tcx)]; }
ty::ty_send_type { s += [s_send_tydesc(ccx.tcx)]; }
ty::ty_int(ast::ty_i8) { s += [shape_i8]; }
ty::ty_uint(ast::ty_u16) { s += [shape_u16]; }
ty::ty_int(ast::ty_i16) { s += [shape_i16]; }
ty::ty_uint(ast::ty_u32) { s += [shape_u32]; }
ty::ty_int(ast::ty_i32) | ty::ty_int(ast::ty_char) {s += [shape_i32];}
ty::ty_uint(ast::ty_u64) { s += [shape_u64]; }
ty::ty_int(ast::ty_i64) { s += [shape_i64]; }
ty::ty_float(ast::ty_f32) { s += [shape_f32]; }
ty::ty_float(ast::ty_f64) { s += [shape_f64]; }
ty::ty_str {
2011-09-02 09:09:41 -05:00
s += [shape_vec];
add_bool(s, true); // type is POD
let unit_ty = ty::mk_mach_uint(ccx.tcx, ast::ty_u8);
add_substr(s, shape_of(ccx, unit_ty, ty_param_map));
}
2011-08-04 12:46:10 -05:00
ty::ty_tag(did, tps) {
alt tag_kind(ccx, did) {
tk_unit {
2011-08-04 12:46:10 -05:00
// FIXME: For now we do this.
s += [s_variant_tag_t(ccx.tcx)];
2011-08-04 12:46:10 -05:00
}
tk_enum { s += [s_variant_tag_t(ccx.tcx)]; }
tk_complex {
s += [shape_tag];
2011-08-04 12:46:10 -05:00
let sub = [];
2011-08-04 12:46:10 -05:00
let id;
alt ccx.shape_cx.tag_id_to_index.find(did) {
none {
2011-08-04 12:46:10 -05:00
id = ccx.shape_cx.next_tag_id;
ccx.shape_cx.tag_id_to_index.insert(did, id);
ccx.shape_cx.tag_order += [did];
2011-08-04 12:46:10 -05:00
ccx.shape_cx.next_tag_id += 1u16;
}
some(existing_id) { id = existing_id; }
}
add_u16(sub, id as u16);
2011-08-15 18:38:23 -05:00
add_u16(sub, vec::len(tps) as u16);
for tp: ty::t in tps {
let subshape = shape_of(ccx, tp, ty_param_map);
2011-08-15 18:38:23 -05:00
add_u16(sub, vec::len(subshape) as u16);
2011-08-04 12:46:10 -05:00
sub += subshape;
}
s += sub;
}
}
}
ty::ty_box(mt) {
s += [shape_box];
add_substr(s, shape_of(ccx, mt.ty, ty_param_map));
2011-08-04 12:46:10 -05:00
}
ty::ty_uniq(mt) {
s += [shape_uniq];
add_substr(s, shape_of(ccx, mt.ty, ty_param_map));
}
ty::ty_vec(mt) {
2011-09-02 09:09:41 -05:00
s += [shape_vec];
2011-08-04 12:46:10 -05:00
add_bool(s, ty::type_is_pod(ccx.tcx, mt.ty));
add_substr(s, shape_of(ccx, mt.ty, ty_param_map));
2011-08-04 12:46:10 -05:00
}
ty::ty_rec(fields) {
s += [shape_struct];
let sub = [];
for f: field in fields {
sub += shape_of(ccx, f.mt.ty, ty_param_map);
}
2011-08-04 12:46:10 -05:00
add_substr(s, sub);
}
ty::ty_tup(elts) {
s += [shape_struct];
let sub = [];
for elt in elts {
sub += shape_of(ccx, elt, ty_param_map);
}
add_substr(s, sub);
}
2011-11-20 12:15:40 -06:00
ty::ty_native_fn(_, _) { s += [shape_u32]; }
ty::ty_iface(_, _) { s += [shape_iface]; }
2011-08-04 12:46:10 -05:00
ty::ty_res(did, raw_subt, tps) {
let subt = ty::substitute_type_params(ccx.tcx, tps, raw_subt);
let ri = {did: did, t: subt};
2011-08-04 12:46:10 -05:00
let id = interner::intern(ccx.shape_cx.resources, ri);
s += [shape_res];
2011-08-04 12:46:10 -05:00
add_u16(s, id as u16);
2011-08-15 18:38:23 -05:00
add_u16(s, vec::len(tps) as u16);
for tp: ty::t in tps {
add_substr(s, shape_of(ccx, tp, ty_param_map));
}
add_substr(s, shape_of(ccx, subt, ty_param_map));
2011-08-04 12:46:10 -05:00
}
ty::ty_var(n) {
fail "shape_of ty_var";
}
ty::ty_param(n, _) {
// Find the type parameter in the parameter list.
alt vec::position(n, ty_param_map) {
some(i) { s += [shape_var, i as u8]; }
none { fail "ty param not found in ty_param_map"; }
}
}
ty::ty_fn({proto: ast::proto_box, _}) {
s += [shape_box_fn];
}
ty::ty_fn({proto: ast::proto_uniq, _}) {
s += [shape_uniq_fn];
}
ty::ty_fn({proto: ast::proto_block, _}) |
ty::ty_fn({proto: ast::proto_any, _}) {
s += [shape_stack_fn];
}
ty::ty_fn({proto: ast::proto_bare, _}) {
s += [shape_bare_fn];
2011-12-15 13:06:48 -06:00
}
ty::ty_opaque_closure_ptr(_) {
s += [shape_opaque_closure_ptr];
2011-12-15 13:06:48 -06:00
}
2011-08-04 12:46:10 -05:00
}
ret s;
}
// FIXME: We might discover other variants as we traverse these. Handle this.
fn shape_of_variant(ccx: @crate_ctxt, v: ty::variant_info,
ty_param_count: uint) -> [u8] {
let ty_param_map = [];
let i = 0u;
2011-09-02 17:34:58 -05:00
while i < ty_param_count { ty_param_map += [i]; i += 1u; }
let s = [];
for t: ty::t in v.args { s += shape_of(ccx, t, ty_param_map); }
2011-08-04 12:46:10 -05:00
ret s;
}
//fn variant_names(ccx: @crate_ctxt, tag_id: ast::def_id) -> [str] {
// assert ast::local_crate == tag_id.crate;
// alt ccx.tcx.items.get(tag_id.node) {
// ast_map::node_item(@{node: ast::item_tag(variants, _), _}) {
// vec::map(variants) {|variant| variant.node.name}
// }
// }
//}
fn gen_tag_shapes(ccx: @crate_ctxt) -> ValueRef {
2012-01-19 16:24:03 -06:00
// Loop over all the enum variants and write their shapes into a data
2011-08-04 12:46:10 -05:00
// buffer. As we do this, it's possible for us to discover new tags, so we
// must do this first.
let i = 0u;
let data = [];
let offsets = [];
while i < vec::len(ccx.shape_cx.tag_order) {
let did = ccx.shape_cx.tag_order[i];
2011-08-04 12:46:10 -05:00
let variants = ty::tag_variants(ccx.tcx, did);
let item_tyt = ty::lookup_item_type(ccx.tcx, did);
2012-01-02 05:09:26 -06:00
let ty_param_count = vec::len(*item_tyt.bounds);
2011-08-04 12:46:10 -05:00
vec::iter(*variants) {|v|
offsets += [vec::len(data) as u16];
2011-08-04 12:46:10 -05:00
let variant_shape = shape_of_variant(ccx, v, ty_param_count);
2011-08-04 12:46:10 -05:00
add_substr(data, variant_shape);
let zname = str::bytes(v.name) + [0u8];
add_substr(data, zname);
2011-08-04 12:46:10 -05:00
}
i += 1u;
}
// Now calculate the sizes of the header space (which contains offsets to
2012-01-19 16:24:03 -06:00
// info records for each enum) and the info space (which contains offsets
2011-08-04 12:46:10 -05:00
// to each variant shape). As we do so, build up the header.
let header = [];
let info = [];
2011-08-04 12:46:10 -05:00
let header_sz = 2u16 * ccx.shape_cx.next_tag_id;
2011-08-15 18:38:23 -05:00
let data_sz = vec::len(data) as u16;
2011-08-04 12:46:10 -05:00
let info_sz = 0u16;
for did_: ast::def_id in ccx.shape_cx.tag_order {
let did = did_; // Satisfy alias checker.
let num_variants = vec::len(*ty::tag_variants(ccx.tcx, did)) as u16;
2011-08-04 12:46:10 -05:00
add_u16(header, header_sz + info_sz);
info_sz += 2u16 * (num_variants + 2u16) + 3u16;
2011-08-04 12:46:10 -05:00
}
// Construct the info tables, which contain offsets to the shape of each
2012-01-19 16:24:03 -06:00
// variant. Also construct the largest-variant table for each enum, which
2011-08-04 12:46:10 -05:00
// contains the variants that the size-of operation needs to look at.
let lv_table = [];
2011-08-04 12:46:10 -05:00
i = 0u;
for did_: ast::def_id in ccx.shape_cx.tag_order {
let did = did_; // Satisfy alias checker.
2011-08-04 12:46:10 -05:00
let variants = ty::tag_variants(ccx.tcx, did);
add_u16(info, vec::len(*variants) as u16);
2011-08-04 12:46:10 -05:00
// Construct the largest-variants table.
add_u16(info,
header_sz + info_sz + data_sz + (vec::len(lv_table) as u16));
2011-08-04 12:46:10 -05:00
let lv = largest_variants(ccx, did);
2011-08-15 18:38:23 -05:00
add_u16(lv_table, vec::len(lv) as u16);
for v: uint in lv { add_u16(lv_table, v as u16); }
2011-08-04 12:46:10 -05:00
2012-01-19 16:24:03 -06:00
// Determine whether the enum has dynamic size.
2011-08-04 12:46:10 -05:00
let dynamic = false;
for variant: ty::variant_info in *variants {
for typ: ty::t in variant.args {
2011-08-04 12:46:10 -05:00
if ty::type_has_dynamic_size(ccx.tcx, typ) { dynamic = true; }
}
}
2012-01-19 16:24:03 -06:00
// If we can, write in the static size and alignment of the enum.
2011-08-04 12:46:10 -05:00
// Otherwise, write a placeholder.
let size_align;
if dynamic {
size_align = {size: 0u16, align: 0u8};
} else { size_align = compute_static_tag_size(ccx, lv, did); }
2011-08-04 12:46:10 -05:00
add_u16(info, size_align.size);
info += [size_align.align];
2011-08-04 12:46:10 -05:00
// Now write in the offset of each variant.
for v: ty::variant_info in *variants {
add_u16(info, header_sz + info_sz + offsets[i]);
2011-08-04 12:46:10 -05:00
i += 1u;
}
}
2011-08-15 18:38:23 -05:00
assert (i == vec::len(offsets));
assert (header_sz == vec::len(header) as u16);
assert (info_sz == vec::len(info) as u16);
assert (data_sz == vec::len(data) as u16);
2011-08-04 12:46:10 -05:00
header += info;
header += data;
header += lv_table;
2011-09-02 17:34:58 -05:00
ret mk_global(ccx, "tag_shapes", C_bytes(header), true);
2011-08-04 12:46:10 -05:00
}
fn gen_resource_shapes(ccx: @crate_ctxt) -> ValueRef {
let dtors = [];
2011-08-04 12:46:10 -05:00
let i = 0u;
let len = interner::len(ccx.shape_cx.resources);
while i < len {
let ri = interner::get(ccx.shape_cx.resources, i);
dtors += [trans_common::get_res_dtor(ccx, dummy_sp(), ri.did, ri.t)];
2011-08-04 12:46:10 -05:00
i += 1u;
}
2011-09-02 17:34:58 -05:00
ret mk_global(ccx, "resource_shapes", C_struct(dtors), true);
2011-08-04 12:46:10 -05:00
}
fn gen_shape_tables(ccx: @crate_ctxt) {
2011-08-04 12:46:10 -05:00
let lltagstable = gen_tag_shapes(ccx);
let llresourcestable = gen_resource_shapes(ccx);
trans_common::set_struct_body(ccx.shape_cx.llshapetablesty,
[val_ty(lltagstable),
val_ty(llresourcestable)]);
2011-08-04 12:46:10 -05:00
let lltables =
C_named_struct(ccx.shape_cx.llshapetablesty,
[lltagstable, llresourcestable]);
2011-08-04 12:46:10 -05:00
lib::llvm::llvm::LLVMSetInitializer(ccx.shape_cx.llshapetables, lltables);
lib::llvm::llvm::LLVMSetGlobalConstant(ccx.shape_cx.llshapetables, True);
lib::llvm::llvm::LLVMSetLinkage(ccx.shape_cx.llshapetables,
lib::llvm::LLVMInternalLinkage as
lib::llvm::llvm::Linkage);
2011-08-04 12:46:10 -05:00
}
// ______________________________________________________________________
// compute sizeof / alignof
fn size_of(cx: @block_ctxt, t: ty::t) -> result {
let ccx = bcx_ccx(cx);
if check type_has_static_size(ccx, t) {
let sp = cx.sp;
rslt(cx, llsize_of(bcx_ccx(cx), trans::type_of(ccx, sp, t)))
} else { dynamic_size_of(cx, t) }
}
fn align_of(cx: @block_ctxt, t: ty::t) -> result {
let ccx = bcx_ccx(cx);
if check type_has_static_size(ccx, t) {
let sp = cx.sp;
rslt(cx, llalign_of(bcx_ccx(cx), trans::type_of(ccx, sp, t)))
} else { dynamic_align_of(cx, t) }
}
// Returns the real size of the given type for the current target.
fn llsize_of_real(cx: @crate_ctxt, t: TypeRef) -> uint {
ret llvm::LLVMStoreSizeOfType(cx.td.lltd, t) as uint;
}
// Returns the real alignment of the given type for the current target.
fn llalign_of_real(cx: @crate_ctxt, t: TypeRef) -> uint {
ret llvm::LLVMPreferredAlignmentOfType(cx.td.lltd, t) as uint;
}
fn llsize_of(cx: @crate_ctxt, t: TypeRef) -> ValueRef {
ret llvm::LLVMConstIntCast(lib::llvm::llvm::LLVMSizeOf(t), cx.int_type,
False);
}
fn llalign_of(cx: @crate_ctxt, t: TypeRef) -> ValueRef {
ret llvm::LLVMConstIntCast(lib::llvm::llvm::LLVMAlignOf(t), cx.int_type,
False);
}
// Computes the size of the data part of a non-dynamically-sized enum.
fn static_size_of_tag(cx: @crate_ctxt, sp: span, t: ty::t)
: type_has_static_size(cx, t) -> uint {
if cx.tag_sizes.contains_key(t) { ret cx.tag_sizes.get(t); }
alt ty::struct(cx.tcx, t) {
ty::ty_tag(tid, subtys) {
// Compute max(variant sizes).
let max_size = 0u;
let variants = ty::tag_variants(cx.tcx, tid);
for variant: ty::variant_info in *variants {
let tup_ty = simplify_type(cx, ty::mk_tup(cx.tcx, variant.args));
// Perform any type parameter substitutions.
tup_ty = ty::substitute_type_params(cx.tcx, subtys, tup_ty);
// Here we possibly do a recursive call.
// FIXME: Avoid this check. Since the parent has static
// size, any field must as well. There should be a way to
// express that with constrained types.
check (type_has_static_size(cx, tup_ty));
let this_size = llsize_of_real(cx, type_of(cx, sp, tup_ty));
if max_size < this_size { max_size = this_size; }
}
cx.tag_sizes.insert(t, max_size);
ret max_size;
}
_ {
cx.tcx.sess.span_fatal(sp, "non-enum passed to static_size_of_tag()");
}
}
}
fn dynamic_size_of(cx: @block_ctxt, t: ty::t) -> result {
fn align_elements(cx: @block_ctxt, elts: [ty::t]) -> result {
//
// C padding rules:
//
//
// - Pad after each element so that next element is aligned.
// - Pad after final structure member so that whole structure
// is aligned to max alignment of interior.
//
let off = C_int(bcx_ccx(cx), 0);
let max_align = C_int(bcx_ccx(cx), 1);
let bcx = cx;
for e: ty::t in elts {
let elt_align = align_of(bcx, e);
bcx = elt_align.bcx;
let elt_size = size_of(bcx, e);
bcx = elt_size.bcx;
let aligned_off = align_to(bcx, off, elt_align.val);
off = Add(bcx, aligned_off, elt_size.val);
max_align = umax(bcx, max_align, elt_align.val);
}
off = align_to(bcx, off, max_align);
//off = alt mode {
// align_total. {
// align_to(bcx, off, max_align)
// }
// align_next(t) {
// let {bcx, val: align} = align_of(bcx, t);
// align_to(bcx, off, align)
// }
//};
ret rslt(bcx, off);
}
alt ty::struct(bcx_tcx(cx), t) {
ty::ty_param(p, _) {
let szptr = field_of_tydesc(cx, t, false, abi::tydesc_field_size);
ret rslt(szptr.bcx, Load(szptr.bcx, szptr.val));
}
ty::ty_rec(flds) {
let tys: [ty::t] = [];
for f: ty::field in flds { tys += [f.mt.ty]; }
ret align_elements(cx, tys);
}
ty::ty_tup(elts) {
let tys = [];
for tp in elts { tys += [tp]; }
ret align_elements(cx, tys);
}
ty::ty_tag(tid, tps) {
let bcx = cx;
let ccx = bcx_ccx(bcx);
// Compute max(variant sizes).
let max_size: ValueRef = trans::alloca(bcx, ccx.int_type);
Store(bcx, C_int(ccx, 0), max_size);
let variants = ty::tag_variants(bcx_tcx(bcx), tid);
for variant: ty::variant_info in *variants {
// Perform type substitution on the raw argument types.
let raw_tys: [ty::t] = variant.args;
let tys: [ty::t] = [];
for raw_ty: ty::t in raw_tys {
let t = ty::substitute_type_params(bcx_tcx(cx), tps, raw_ty);
tys += [t];
}
let rslt = align_elements(bcx, tys);
bcx = rslt.bcx;
let this_size = rslt.val;
let old_max_size = Load(bcx, max_size);
Store(bcx, umax(bcx, this_size, old_max_size), max_size);
}
let max_size_val = Load(bcx, max_size);
let total_size =
if vec::len(*variants) != 1u {
Add(bcx, max_size_val, llsize_of(ccx, ccx.int_type))
} else { max_size_val };
ret rslt(bcx, total_size);
}
}
}
fn dynamic_align_of(cx: @block_ctxt, t: ty::t) -> result {
// FIXME: Typestate constraint that shows this alt is
// exhaustive
alt ty::struct(bcx_tcx(cx), t) {
ty::ty_param(p, _) {
let aptr = field_of_tydesc(cx, t, false, abi::tydesc_field_align);
ret rslt(aptr.bcx, Load(aptr.bcx, aptr.val));
}
ty::ty_rec(flds) {
let a = C_int(bcx_ccx(cx), 1);
let bcx = cx;
for f: ty::field in flds {
let align = align_of(bcx, f.mt.ty);
bcx = align.bcx;
a = umax(bcx, a, align.val);
}
ret rslt(bcx, a);
}
ty::ty_tag(_, _) {
ret rslt(cx, C_int(bcx_ccx(cx), 1)); // FIXME: stub
}
ty::ty_tup(elts) {
let a = C_int(bcx_ccx(cx), 1);
let bcx = cx;
for e in elts {
let align = align_of(bcx, e);
bcx = align.bcx;
a = umax(bcx, a, align.val);
}
ret rslt(bcx, a);
}
}
}
// Given a type and a field index into its corresponding type descriptor,
// returns an LLVM ValueRef of that field from the tydesc, generating the
// tydesc if necessary.
fn field_of_tydesc(cx: @block_ctxt, t: ty::t, escapes: bool, field: int) ->
result {
let ti = none::<@tydesc_info>;
let tydesc = trans::get_tydesc(cx, t, escapes, ti).result;
ret rslt(tydesc.bcx,
GEPi(tydesc.bcx, tydesc.val, [0, field]));
}
// Creates a simpler, size-equivalent type. The resulting type is guaranteed
// to have (a) the same size as the type that was passed in; (b) to be non-
// recursive. This is done by replacing all boxes in a type with boxed unit
// types.
fn simplify_type(ccx: @crate_ctxt, typ: ty::t) -> ty::t {
fn simplifier(ccx: @crate_ctxt, typ: ty::t) -> ty::t {
alt ty::struct(ccx.tcx, typ) {
ty::ty_box(_) | ty::ty_iface(_, _) {
ret ty::mk_imm_box(ccx.tcx, ty::mk_nil(ccx.tcx));
}
ty::ty_uniq(_) {
ret ty::mk_imm_uniq(ccx.tcx, ty::mk_nil(ccx.tcx));
}
ty::ty_fn(_) {
ret ty::mk_tup(ccx.tcx,
[ty::mk_imm_box(ccx.tcx, ty::mk_nil(ccx.tcx)),
ty::mk_imm_box(ccx.tcx, ty::mk_nil(ccx.tcx))]);
}
ty::ty_res(_, sub, tps) {
let sub1 = ty::substitute_type_params(ccx.tcx, tps, sub);
ret ty::mk_tup(ccx.tcx,
[ty::mk_int(ccx.tcx), simplify_type(ccx, sub1)]);
}
_ { ret typ; }
}
}
ret ty::fold_ty(ccx.tcx, ty::fm_general(bind simplifier(ccx, _)), typ);
}