2013-04-10 07:47:22 -05:00
|
|
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
|
2012-12-03 18:48:01 -06:00
|
|
|
// file at the top-level directory of this distribution and at
|
|
|
|
// http://rust-lang.org/COPYRIGHT.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
|
|
// option. This file may not be copied, modified, or distributed
|
|
|
|
// except according to those terms.
|
|
|
|
|
2013-05-17 17:28:44 -05:00
|
|
|
//! Code that is useful in various trans modules.
|
2013-01-07 16:16:52 -06:00
|
|
|
|
2011-07-14 19:08:22 -05:00
|
|
|
|
2012-12-23 16:41:37 -06:00
|
|
|
use driver::session;
|
|
|
|
use driver::session::Session;
|
2013-06-16 06:11:17 -05:00
|
|
|
use lib::llvm::{ValueRef, BasicBlockRef, BuilderRef};
|
2013-06-12 21:49:01 -05:00
|
|
|
use lib::llvm::{True, False, Bool};
|
2013-09-30 16:45:53 -05:00
|
|
|
use lib::llvm::llvm;
|
2012-12-23 16:41:37 -06:00
|
|
|
use lib;
|
2013-07-15 22:42:13 -05:00
|
|
|
use middle::lang_items::LangItem;
|
2012-12-23 16:41:37 -06:00
|
|
|
use middle::trans::base;
|
|
|
|
use middle::trans::build;
|
|
|
|
use middle::trans::datum;
|
|
|
|
use middle::trans::glue;
|
2013-05-04 13:29:32 -05:00
|
|
|
use middle::trans::write_guard;
|
2013-08-05 04:12:40 -05:00
|
|
|
use middle::trans::debuginfo;
|
2013-01-25 18:57:39 -06:00
|
|
|
use middle::ty::substs;
|
2012-12-23 16:41:37 -06:00
|
|
|
use middle::ty;
|
|
|
|
use middle::typeck;
|
2013-05-04 13:29:32 -05:00
|
|
|
use middle::borrowck::root_map_key;
|
2013-09-30 16:45:53 -05:00
|
|
|
use util::ppaux::Repr;
|
2012-12-23 16:41:37 -06:00
|
|
|
|
2013-06-16 05:52:44 -05:00
|
|
|
use middle::trans::type_::Type;
|
|
|
|
|
2013-08-03 19:13:14 -05:00
|
|
|
use std::c_str::ToCStr;
|
2013-06-28 17:32:26 -05:00
|
|
|
use std::cast::transmute;
|
|
|
|
use std::cast;
|
2013-09-30 16:45:53 -05:00
|
|
|
use std::hashmap::HashMap;
|
2013-08-15 01:40:28 -05:00
|
|
|
use std::libc::{c_uint, c_longlong, c_ulonglong, c_char};
|
2013-06-28 17:32:26 -05:00
|
|
|
use std::vec;
|
2013-09-30 16:45:53 -05:00
|
|
|
use syntax::ast::{Name, Ident};
|
2013-09-07 01:56:17 -05:00
|
|
|
use syntax::ast_map::{path, path_elt, path_pretty_name};
|
2013-08-31 11:13:04 -05:00
|
|
|
use syntax::codemap::Span;
|
2013-06-04 14:34:25 -05:00
|
|
|
use syntax::parse::token;
|
2012-12-23 16:41:37 -06:00
|
|
|
use syntax::{ast, ast_map};
|
2011-07-14 19:08:22 -05:00
|
|
|
|
2013-06-12 21:02:33 -05:00
|
|
|
pub use middle::trans::context::CrateContext;
|
|
|
|
|
make small (<= size_of::<int>()) tuples immediate
fn foo() -> (u32, u8, u8, u8, u8) {
(4, 5, 6, 7, 8)
}
Before:
; Function Attrs: nounwind uwtable
define void @_ZN3foo18hbb616262f874f8daf4v0.0E({ i32, i8, i8, i8, i8 }* noalias nocapture sret, { i64, %tydesc*, i8*, i8*, i8 }* nocapture readnone) #0 {
"function top level":
%2 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 0
store i32 4, i32* %2, align 4
%3 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 1
store i8 5, i8* %3, align 4
%4 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 2
store i8 6, i8* %4, align 1
%5 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 3
store i8 7, i8* %5, align 2
%6 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 4
store i8 8, i8* %6, align 1
ret void
}
After:
; Function Attrs: nounwind readnone uwtable
define { i32, i8, i8, i8, i8 } @_ZN3foo18hbb616262f874f8daf4v0.0E({ i64, %tydesc*, i8*, i8*, i8 }* nocapture readnone) #0 {
"function top level":
ret { i32, i8, i8, i8, i8 } { i32 4, i8 5, i8 6, i8 7, i8 8 }
}
2013-09-30 17:29:42 -05:00
|
|
|
fn type_is_newtype_immediate(ccx: &mut CrateContext, ty: ty::t) -> bool {
|
2013-09-30 16:45:53 -05:00
|
|
|
match ty::get(ty).sty {
|
|
|
|
ty::ty_struct(def_id, ref substs) => {
|
make small (<= size_of::<int>()) tuples immediate
fn foo() -> (u32, u8, u8, u8, u8) {
(4, 5, 6, 7, 8)
}
Before:
; Function Attrs: nounwind uwtable
define void @_ZN3foo18hbb616262f874f8daf4v0.0E({ i32, i8, i8, i8, i8 }* noalias nocapture sret, { i64, %tydesc*, i8*, i8*, i8 }* nocapture readnone) #0 {
"function top level":
%2 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 0
store i32 4, i32* %2, align 4
%3 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 1
store i8 5, i8* %3, align 4
%4 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 2
store i8 6, i8* %4, align 1
%5 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 3
store i8 7, i8* %5, align 2
%6 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 4
store i8 8, i8* %6, align 1
ret void
}
After:
; Function Attrs: nounwind readnone uwtable
define { i32, i8, i8, i8, i8 } @_ZN3foo18hbb616262f874f8daf4v0.0E({ i64, %tydesc*, i8*, i8*, i8 }* nocapture readnone) #0 {
"function top level":
ret { i32, i8, i8, i8, i8 } { i32 4, i8 5, i8 6, i8 7, i8 8 }
}
2013-09-30 17:29:42 -05:00
|
|
|
let fields = ty::struct_fields(ccx.tcx, def_id, substs);
|
2013-09-30 16:45:53 -05:00
|
|
|
fields.len() == 1 &&
|
|
|
|
fields[0].ident.name == token::special_idents::unnamed_field.name &&
|
make small (<= size_of::<int>()) tuples immediate
fn foo() -> (u32, u8, u8, u8, u8) {
(4, 5, 6, 7, 8)
}
Before:
; Function Attrs: nounwind uwtable
define void @_ZN3foo18hbb616262f874f8daf4v0.0E({ i32, i8, i8, i8, i8 }* noalias nocapture sret, { i64, %tydesc*, i8*, i8*, i8 }* nocapture readnone) #0 {
"function top level":
%2 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 0
store i32 4, i32* %2, align 4
%3 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 1
store i8 5, i8* %3, align 4
%4 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 2
store i8 6, i8* %4, align 1
%5 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 3
store i8 7, i8* %5, align 2
%6 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 4
store i8 8, i8* %6, align 1
ret void
}
After:
; Function Attrs: nounwind readnone uwtable
define { i32, i8, i8, i8, i8 } @_ZN3foo18hbb616262f874f8daf4v0.0E({ i64, %tydesc*, i8*, i8*, i8 }* nocapture readnone) #0 {
"function top level":
ret { i32, i8, i8, i8, i8 } { i32 4, i8 5, i8 6, i8 7, i8 8 }
}
2013-09-30 17:29:42 -05:00
|
|
|
type_is_immediate(ccx, fields[0].mt.ty)
|
2013-09-30 16:45:53 -05:00
|
|
|
}
|
|
|
|
_ => false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
make small (<= size_of::<int>()) tuples immediate
fn foo() -> (u32, u8, u8, u8, u8) {
(4, 5, 6, 7, 8)
}
Before:
; Function Attrs: nounwind uwtable
define void @_ZN3foo18hbb616262f874f8daf4v0.0E({ i32, i8, i8, i8, i8 }* noalias nocapture sret, { i64, %tydesc*, i8*, i8*, i8 }* nocapture readnone) #0 {
"function top level":
%2 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 0
store i32 4, i32* %2, align 4
%3 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 1
store i8 5, i8* %3, align 4
%4 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 2
store i8 6, i8* %4, align 1
%5 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 3
store i8 7, i8* %5, align 2
%6 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 4
store i8 8, i8* %6, align 1
ret void
}
After:
; Function Attrs: nounwind readnone uwtable
define { i32, i8, i8, i8, i8 } @_ZN3foo18hbb616262f874f8daf4v0.0E({ i64, %tydesc*, i8*, i8*, i8 }* nocapture readnone) #0 {
"function top level":
ret { i32, i8, i8, i8, i8 } { i32 4, i8 5, i8 6, i8 7, i8 8 }
}
2013-09-30 17:29:42 -05:00
|
|
|
pub fn type_is_immediate(ccx: &mut CrateContext, ty: ty::t) -> bool {
|
|
|
|
use middle::trans::machine::llsize_of_alloc;
|
|
|
|
use middle::trans::type_of::sizing_type_of;
|
|
|
|
let tcx = ccx.tcx;
|
|
|
|
let simple = ty::type_is_scalar(ty) || ty::type_is_boxed(ty) ||
|
2013-09-30 16:45:53 -05:00
|
|
|
ty::type_is_unique(ty) || ty::type_is_region_ptr(ty) ||
|
make small (<= size_of::<int>()) tuples immediate
fn foo() -> (u32, u8, u8, u8, u8) {
(4, 5, 6, 7, 8)
}
Before:
; Function Attrs: nounwind uwtable
define void @_ZN3foo18hbb616262f874f8daf4v0.0E({ i32, i8, i8, i8, i8 }* noalias nocapture sret, { i64, %tydesc*, i8*, i8*, i8 }* nocapture readnone) #0 {
"function top level":
%2 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 0
store i32 4, i32* %2, align 4
%3 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 1
store i8 5, i8* %3, align 4
%4 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 2
store i8 6, i8* %4, align 1
%5 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 3
store i8 7, i8* %5, align 2
%6 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 4
store i8 8, i8* %6, align 1
ret void
}
After:
; Function Attrs: nounwind readnone uwtable
define { i32, i8, i8, i8, i8 } @_ZN3foo18hbb616262f874f8daf4v0.0E({ i64, %tydesc*, i8*, i8*, i8 }* nocapture readnone) #0 {
"function top level":
ret { i32, i8, i8, i8, i8 } { i32 4, i8 5, i8 6, i8 7, i8 8 }
}
2013-09-30 17:29:42 -05:00
|
|
|
type_is_newtype_immediate(ccx, ty) ||
|
|
|
|
ty::type_is_simd(tcx, ty);
|
|
|
|
if simple {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
match ty::get(ty).sty {
|
2013-10-27 15:34:17 -05:00
|
|
|
ty::ty_bot => true,
|
2013-10-07 20:58:33 -05:00
|
|
|
ty::ty_struct(*) | ty::ty_enum(*) | ty::ty_tup(*) => {
|
make small (<= size_of::<int>()) tuples immediate
fn foo() -> (u32, u8, u8, u8, u8) {
(4, 5, 6, 7, 8)
}
Before:
; Function Attrs: nounwind uwtable
define void @_ZN3foo18hbb616262f874f8daf4v0.0E({ i32, i8, i8, i8, i8 }* noalias nocapture sret, { i64, %tydesc*, i8*, i8*, i8 }* nocapture readnone) #0 {
"function top level":
%2 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 0
store i32 4, i32* %2, align 4
%3 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 1
store i8 5, i8* %3, align 4
%4 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 2
store i8 6, i8* %4, align 1
%5 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 3
store i8 7, i8* %5, align 2
%6 = getelementptr inbounds { i32, i8, i8, i8, i8 }* %0, i64 0, i32 4
store i8 8, i8* %6, align 1
ret void
}
After:
; Function Attrs: nounwind readnone uwtable
define { i32, i8, i8, i8, i8 } @_ZN3foo18hbb616262f874f8daf4v0.0E({ i64, %tydesc*, i8*, i8*, i8 }* nocapture readnone) #0 {
"function top level":
ret { i32, i8, i8, i8, i8 } { i32 4, i8 5, i8 6, i8 7, i8 8 }
}
2013-09-30 17:29:42 -05:00
|
|
|
let llty = sizing_type_of(ccx, ty);
|
|
|
|
llsize_of_alloc(ccx, llty) <= llsize_of_alloc(ccx, ccx.int_type)
|
|
|
|
}
|
|
|
|
_ => false
|
|
|
|
}
|
2013-09-30 16:45:53 -05:00
|
|
|
}
|
|
|
|
|
2013-09-07 01:56:17 -05:00
|
|
|
pub fn gensym_name(name: &str) -> (Ident, path_elt) {
|
|
|
|
let name = token::gensym(name);
|
|
|
|
let ident = Ident::new(name);
|
|
|
|
(ident, path_pretty_name(ident, name as u64))
|
2011-07-21 19:27:34 -05:00
|
|
|
}
|
|
|
|
|
2013-02-04 16:02:01 -06:00
|
|
|
pub struct tydesc_info {
|
|
|
|
ty: ty::t,
|
|
|
|
tydesc: ValueRef,
|
|
|
|
size: ValueRef,
|
|
|
|
align: ValueRef,
|
2013-08-11 12:29:14 -05:00
|
|
|
borrow_offset: ValueRef,
|
2013-09-03 03:44:47 -05:00
|
|
|
name: ValueRef,
|
2013-02-04 16:02:01 -06:00
|
|
|
take_glue: Option<ValueRef>,
|
|
|
|
drop_glue: Option<ValueRef>,
|
|
|
|
free_glue: Option<ValueRef>,
|
|
|
|
visit_glue: Option<ValueRef>
|
|
|
|
}
|
2011-07-21 19:27:34 -05:00
|
|
|
|
|
|
|
/*
|
2012-06-26 18:18:37 -05:00
|
|
|
* A note on nomenclature of linking: "extern", "foreign", and "upcall".
|
2011-07-21 19:27:34 -05:00
|
|
|
*
|
|
|
|
* An "extern" is an LLVM symbol we wind up emitting an undefined external
|
|
|
|
* reference to. This means "we don't have the thing in this compilation unit,
|
|
|
|
* please make sure you link it in at runtime". This could be a reference to
|
|
|
|
* C code found in a C library, or rust code found in a rust crate.
|
|
|
|
*
|
2012-06-26 18:18:37 -05:00
|
|
|
* Most "externs" are implicitly declared (automatically) as a result of a
|
|
|
|
* user declaring an extern _module_ dependency; this causes the rust driver
|
|
|
|
* to locate an extern crate, scan its compilation metadata, and emit extern
|
|
|
|
* declarations for any symbols used by the declaring crate.
|
2011-07-21 19:27:34 -05:00
|
|
|
*
|
2012-06-26 18:18:37 -05:00
|
|
|
* A "foreign" is an extern that references C (or other non-rust ABI) code.
|
|
|
|
* There is no metadata to scan for extern references so in these cases either
|
|
|
|
* a header-digester like bindgen, or manual function prototypes, have to
|
|
|
|
* serve as declarators. So these are usually given explicitly as prototype
|
|
|
|
* declarations, in rust code, with ABI attributes on them noting which ABI to
|
|
|
|
* link via.
|
|
|
|
*
|
|
|
|
* An "upcall" is a foreign call generated by the compiler (not corresponding
|
|
|
|
* to any user-written call in the code) into the runtime library, to perform
|
|
|
|
* some helper task such as bringing a task to life, allocating memory, etc.
|
2011-07-21 19:27:34 -05:00
|
|
|
*
|
|
|
|
*/
|
2012-03-22 15:44:20 -05:00
|
|
|
|
2013-02-04 16:02:01 -06:00
|
|
|
pub struct Stats {
|
|
|
|
n_static_tydescs: uint,
|
|
|
|
n_glues_created: uint,
|
|
|
|
n_null_glues: uint,
|
|
|
|
n_real_glues: uint,
|
|
|
|
n_fns: uint,
|
|
|
|
n_monos: uint,
|
|
|
|
n_inlines: uint,
|
|
|
|
n_closures: uint,
|
2013-06-28 13:15:34 -05:00
|
|
|
n_llvm_insns: uint,
|
|
|
|
llvm_insn_ctxt: ~[~str],
|
2013-06-13 02:19:50 -05:00
|
|
|
llvm_insns: HashMap<~str, uint>,
|
2013-06-28 13:15:34 -05:00
|
|
|
fn_stats: ~[(~str, uint, uint)] // (ident, time-in-ms, llvm-instructions)
|
2013-02-04 16:02:01 -06:00
|
|
|
}
|
2011-07-21 19:27:34 -05:00
|
|
|
|
2013-01-30 13:46:19 -06:00
|
|
|
pub struct BuilderRef_res {
|
2012-09-06 21:40:15 -05:00
|
|
|
B: BuilderRef,
|
2013-02-27 18:13:53 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for BuilderRef_res {
|
2013-09-16 20:18:07 -05:00
|
|
|
fn drop(&mut self) {
|
2013-01-10 23:23:07 -06:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMDisposeBuilder(self.B);
|
|
|
|
}
|
|
|
|
}
|
2012-06-22 13:53:25 -05:00
|
|
|
}
|
2011-08-24 09:30:20 -05:00
|
|
|
|
2013-01-30 13:46:19 -06:00
|
|
|
pub fn BuilderRef_res(B: BuilderRef) -> BuilderRef_res {
|
2012-09-05 17:58:43 -05:00
|
|
|
BuilderRef_res {
|
|
|
|
B: B
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-21 14:25:44 -05:00
|
|
|
pub type ExternMap = HashMap<~str, ValueRef>;
|
2013-02-04 17:30:32 -06:00
|
|
|
|
2011-07-21 19:27:34 -05:00
|
|
|
// Types used for llself.
|
2013-01-30 13:46:19 -06:00
|
|
|
pub struct ValSelfData {
|
2012-09-07 16:50:47 -05:00
|
|
|
v: ValueRef,
|
|
|
|
t: ty::t,
|
2013-06-27 18:40:27 -05:00
|
|
|
is_copy: bool,
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2011-07-21 19:27:34 -05:00
|
|
|
|
2012-10-08 14:39:30 -05:00
|
|
|
// Here `self_ty` is the real type of the self parameter to this method. It
|
|
|
|
// will only be set in the case of default methods.
|
2013-01-30 13:46:19 -06:00
|
|
|
pub struct param_substs {
|
2013-01-06 13:16:14 -06:00
|
|
|
tys: ~[ty::t],
|
2013-06-20 11:29:24 -05:00
|
|
|
self_ty: Option<ty::t>,
|
2013-06-27 19:43:34 -05:00
|
|
|
vtables: Option<typeck::vtable_res>,
|
2013-07-22 18:40:31 -05:00
|
|
|
self_vtables: Option<typeck::vtable_param_res>
|
2013-01-06 13:16:14 -06:00
|
|
|
}
|
2012-02-09 04:17:11 -06:00
|
|
|
|
2013-05-31 17:17:22 -05:00
|
|
|
impl param_substs {
|
|
|
|
pub fn validate(&self) {
|
2013-08-03 11:45:23 -05:00
|
|
|
for t in self.tys.iter() { assert!(!ty::type_needs_infer(*t)); }
|
|
|
|
for t in self.self_ty.iter() { assert!(!ty::type_needs_infer(*t)); }
|
2013-03-21 07:34:18 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-10 17:15:06 -05:00
|
|
|
fn param_substs_to_str(this: ¶m_substs, tcx: ty::ctxt) -> ~str {
|
2013-09-28 00:38:08 -05:00
|
|
|
format!("param_substs \\{tys:{}, vtables:{}\\}",
|
2013-05-10 17:15:06 -05:00
|
|
|
this.tys.repr(tcx),
|
2013-06-27 19:43:34 -05:00
|
|
|
this.vtables.repr(tcx))
|
2012-09-10 14:25:45 -05:00
|
|
|
}
|
|
|
|
|
Cleanup substitutions and treatment of generics around traits in a number of ways.
- In a TraitRef, use the self type consistently to refer to the Self type:
- trait ref in `impl Trait<A,B,C> for S` has a self type of `S`.
- trait ref in `A:Trait` has the self type `A`
- trait ref associated with a trait decl has self type `Self`
- trait ref associated with a supertype has self type `Self`
- trait ref in an object type `@Trait` has no self type
- Rewrite `each_bound_traits_and_supertraits` to perform
substitutions as it goes, and thus yield a series of trait refs
that are always in the same 'namespace' as the type parameter
bound given as input. Before, we left this to the caller, but
this doesn't work because the caller lacks adequare information
to perform the type substitutions correctly.
- For provided methods, substitute the generics involved in the provided
method correctly.
- Introduce TypeParameterDef, which tracks the bounds declared on a type
parameter and brings them together with the def_id and (in the future)
other information (maybe even the parameter's name!).
- Introduce Subst trait, which helps to cleanup a lot of the
repetitive code involved with doing type substitution.
- Introduce Repr trait, which makes debug printouts far more convenient.
Fixes #4183. Needed for #5656.
2013-04-09 00:54:49 -05:00
|
|
|
impl Repr for param_substs {
|
|
|
|
fn repr(&self, tcx: ty::ctxt) -> ~str {
|
|
|
|
param_substs_to_str(self, tcx)
|
|
|
|
}
|
2013-03-21 07:33:52 -05:00
|
|
|
}
|
|
|
|
|
2011-08-03 17:39:43 -05:00
|
|
|
// Function context. Every LLVM function we create will have one of
|
|
|
|
// these.
|
2013-07-17 05:12:08 -05:00
|
|
|
pub struct FunctionContext {
|
2011-07-27 07:19:39 -05:00
|
|
|
// The ValueRef returned from a call to llvm::LLVMAddFunction; the
|
2011-08-03 17:39:43 -05:00
|
|
|
// address of the first instruction in the sequence of
|
|
|
|
// instructions for this function that will go in the .text
|
|
|
|
// section of the executable we're generating.
|
2012-02-03 06:37:55 -06:00
|
|
|
llfn: ValueRef,
|
2011-07-27 07:19:39 -05:00
|
|
|
|
2013-04-18 17:53:29 -05:00
|
|
|
// The implicit environment argument that arrives in the function we're
|
|
|
|
// creating.
|
2012-02-03 06:37:55 -06:00
|
|
|
llenv: ValueRef,
|
2013-04-18 17:53:29 -05:00
|
|
|
|
|
|
|
// The place to store the return value. If the return type is immediate,
|
|
|
|
// this is an alloca in the function. Otherwise, it's the hidden first
|
|
|
|
// parameter to the function. After function construction, this should
|
|
|
|
// always be Some.
|
|
|
|
llretptr: Option<ValueRef>,
|
2011-07-27 07:19:39 -05:00
|
|
|
|
2013-07-17 05:12:08 -05:00
|
|
|
entry_bcx: Option<@mut Block>,
|
2013-07-21 09:19:34 -05:00
|
|
|
|
2012-02-03 06:37:55 -06:00
|
|
|
// These elements: "hoisted basic blocks" containing
|
2011-07-27 07:19:39 -05:00
|
|
|
// administrative activities that have to happen in only one place in
|
|
|
|
// the function, due to LLVM's quirks.
|
2013-07-21 09:19:34 -05:00
|
|
|
// A marker for the place where we want to insert the function's static
|
|
|
|
// allocas, so that LLVM will coalesce them into a single alloca call.
|
|
|
|
alloca_insert_pt: Option<ValueRef>,
|
2013-07-12 20:25:46 -05:00
|
|
|
llreturn: Option<BasicBlockRef>,
|
2012-01-13 03:58:31 -06:00
|
|
|
// The 'self' value currently in use in this function, if there
|
2011-08-03 17:39:43 -05:00
|
|
|
// is one.
|
2012-10-08 14:39:30 -05:00
|
|
|
//
|
|
|
|
// NB: This is the type of the self *variable*, not the self *type*. The
|
|
|
|
// self type is set only for default methods, while the self variable is
|
|
|
|
// set for all methods.
|
2013-02-21 17:30:24 -06:00
|
|
|
llself: Option<ValSelfData>,
|
2012-02-15 06:57:29 -06:00
|
|
|
// The a value alloca'd for calls to upcalls.rust_personality. Used when
|
|
|
|
// outputting the resume instruction.
|
2013-02-21 17:30:24 -06:00
|
|
|
personality: Option<ValueRef>,
|
2011-07-27 07:19:39 -05:00
|
|
|
|
2013-05-21 14:25:44 -05:00
|
|
|
// True if the caller expects this fn to use the out pointer to
|
|
|
|
// return. Either way, your code should write into llretptr, but if
|
|
|
|
// this value is false, llretptr will be a local alloca.
|
|
|
|
caller_expects_out_pointer: bool,
|
2013-04-18 17:53:29 -05:00
|
|
|
|
2011-07-27 07:19:39 -05:00
|
|
|
// Maps arguments to allocas created for them in llallocas.
|
2013-07-27 03:25:59 -05:00
|
|
|
llargs: @mut HashMap<ast::NodeId, ValueRef>,
|
2011-07-27 07:19:39 -05:00
|
|
|
// Maps the def_ids for local variables to the allocas created for
|
|
|
|
// them in llallocas.
|
2013-07-27 03:25:59 -05:00
|
|
|
lllocals: @mut HashMap<ast::NodeId, ValueRef>,
|
2012-02-03 06:37:55 -06:00
|
|
|
// Same as above, but for closure upvars
|
2013-07-27 03:25:59 -05:00
|
|
|
llupvars: @mut HashMap<ast::NodeId, ValueRef>,
|
2011-07-27 07:19:39 -05:00
|
|
|
|
2013-07-27 03:25:59 -05:00
|
|
|
// The NodeId of the function, or -1 if it doesn't correspond to
|
2011-08-03 17:39:43 -05:00
|
|
|
// a user-defined function.
|
2013-07-27 03:25:59 -05:00
|
|
|
id: ast::NodeId,
|
2012-02-03 06:37:55 -06:00
|
|
|
|
|
|
|
// If this function is being monomorphized, this contains the type
|
|
|
|
// substitutions used.
|
2013-02-18 14:36:30 -06:00
|
|
|
param_substs: Option<@param_substs>,
|
2012-02-03 06:37:55 -06:00
|
|
|
|
|
|
|
// The source span and nesting context where this function comes from, for
|
|
|
|
// error reporting and symbol generation.
|
2013-08-31 11:13:04 -05:00
|
|
|
span: Option<Span>,
|
2012-02-03 06:37:55 -06:00
|
|
|
path: path,
|
2011-08-02 17:13:08 -05:00
|
|
|
|
2012-02-03 06:37:55 -06:00
|
|
|
// This function's enclosing crate context.
|
2013-08-05 04:12:40 -05:00
|
|
|
ccx: @mut CrateContext,
|
|
|
|
|
|
|
|
// Used and maintained by the debuginfo module.
|
2013-08-23 11:45:02 -05:00
|
|
|
debug_context: debuginfo::FunctionDebugContext,
|
2013-01-06 13:16:14 -06:00
|
|
|
}
|
|
|
|
|
2013-07-17 05:12:08 -05:00
|
|
|
impl FunctionContext {
|
2013-05-27 20:33:57 -05:00
|
|
|
pub fn arg_pos(&self, arg: uint) -> uint {
|
2013-05-21 14:25:44 -05:00
|
|
|
if self.caller_expects_out_pointer {
|
2013-05-27 20:33:57 -05:00
|
|
|
arg + 2u
|
2013-05-21 14:25:44 -05:00
|
|
|
} else {
|
|
|
|
arg + 1u
|
2013-05-27 20:33:57 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn out_arg_pos(&self) -> uint {
|
2013-05-21 14:25:44 -05:00
|
|
|
assert!(self.caller_expects_out_pointer);
|
2013-05-27 20:33:57 -05:00
|
|
|
0u
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn env_arg_pos(&self) -> uint {
|
2013-05-21 14:25:44 -05:00
|
|
|
if self.caller_expects_out_pointer {
|
2013-05-27 20:33:57 -05:00
|
|
|
1u
|
|
|
|
} else {
|
|
|
|
0u
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-21 09:19:34 -05:00
|
|
|
pub fn cleanup(&mut self) {
|
|
|
|
unsafe {
|
2013-08-03 18:59:24 -05:00
|
|
|
llvm::LLVMInstructionEraseFromParent(self.alloca_insert_pt.unwrap());
|
2013-07-12 21:09:57 -05:00
|
|
|
}
|
2013-07-21 09:19:34 -05:00
|
|
|
// Remove the cycle between fcx and bcx, so memory can be freed
|
|
|
|
self.entry_bcx = None;
|
2013-07-12 21:09:57 -05:00
|
|
|
}
|
|
|
|
|
2013-07-12 20:25:46 -05:00
|
|
|
pub fn get_llreturn(&mut self) -> BasicBlockRef {
|
|
|
|
if self.llreturn.is_none() {
|
|
|
|
self.llreturn = Some(base::mk_return_basic_block(self.llfn));
|
|
|
|
}
|
|
|
|
|
2013-08-03 18:59:24 -05:00
|
|
|
self.llreturn.unwrap()
|
2013-07-12 20:25:46 -05:00
|
|
|
}
|
2013-05-27 20:33:57 -05:00
|
|
|
}
|
|
|
|
|
2013-06-13 02:19:50 -05:00
|
|
|
pub fn warn_not_to_commit(ccx: &mut CrateContext, msg: &str) {
|
|
|
|
if !ccx.do_not_commit_warning_issued {
|
|
|
|
ccx.do_not_commit_warning_issued = true;
|
2013-05-23 11:09:11 -05:00
|
|
|
ccx.sess.warn(msg.to_str() + " -- do not commit like this!");
|
2012-02-01 20:52:08 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-26 15:50:43 -05:00
|
|
|
// Heap selectors. Indicate which heap something should go on.
|
2013-03-20 10:40:02 -05:00
|
|
|
#[deriving(Eq)]
|
2013-01-30 13:46:19 -06:00
|
|
|
pub enum heap {
|
2013-02-20 17:02:21 -06:00
|
|
|
heap_managed,
|
|
|
|
heap_managed_unique,
|
2012-06-26 15:50:43 -05:00
|
|
|
heap_exchange,
|
2013-06-30 02:22:18 -05:00
|
|
|
heap_exchange_closure
|
2012-06-26 15:50:43 -05:00
|
|
|
}
|
|
|
|
|
2013-07-02 14:47:32 -05:00
|
|
|
#[deriving(Clone, Eq)]
|
2013-01-30 13:46:19 -06:00
|
|
|
pub enum cleantype {
|
2012-03-23 19:52:20 -05:00
|
|
|
normal_exit_only,
|
|
|
|
normal_exit_and_unwind
|
|
|
|
}
|
|
|
|
|
2013-08-29 19:46:33 -05:00
|
|
|
// Cleanup functions
|
|
|
|
|
|
|
|
/// A cleanup function: a built-in destructor.
|
|
|
|
pub trait CleanupFunction {
|
|
|
|
fn clean(&self, block: @mut Block) -> @mut Block;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A cleanup function that calls the "drop glue" (destructor function) on
|
|
|
|
/// a typed value.
|
|
|
|
pub struct TypeDroppingCleanupFunction {
|
|
|
|
val: ValueRef,
|
|
|
|
t: ty::t,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl CleanupFunction for TypeDroppingCleanupFunction {
|
|
|
|
fn clean(&self, block: @mut Block) -> @mut Block {
|
|
|
|
glue::drop_ty(block, self.val, self.t)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A cleanup function that calls the "drop glue" (destructor function) on
|
|
|
|
/// an immediate typed value.
|
|
|
|
pub struct ImmediateTypeDroppingCleanupFunction {
|
|
|
|
val: ValueRef,
|
|
|
|
t: ty::t,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl CleanupFunction for ImmediateTypeDroppingCleanupFunction {
|
|
|
|
fn clean(&self, block: @mut Block) -> @mut Block {
|
|
|
|
glue::drop_ty_immediate(block, self.val, self.t)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A cleanup function that releases a write guard, returning a value to
|
|
|
|
/// mutable status.
|
|
|
|
pub struct WriteGuardReleasingCleanupFunction {
|
|
|
|
root_key: root_map_key,
|
|
|
|
frozen_val_ref: ValueRef,
|
|
|
|
bits_val_ref: ValueRef,
|
|
|
|
filename_val: ValueRef,
|
|
|
|
line_val: ValueRef,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl CleanupFunction for WriteGuardReleasingCleanupFunction {
|
|
|
|
fn clean(&self, bcx: @mut Block) -> @mut Block {
|
|
|
|
write_guard::return_to_mut(bcx,
|
|
|
|
self.root_key,
|
|
|
|
self.frozen_val_ref,
|
|
|
|
self.bits_val_ref,
|
|
|
|
self.filename_val,
|
|
|
|
self.line_val)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A cleanup function that frees some memory in the garbage-collected heap.
|
|
|
|
pub struct GCHeapFreeingCleanupFunction {
|
|
|
|
ptr: ValueRef,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl CleanupFunction for GCHeapFreeingCleanupFunction {
|
|
|
|
fn clean(&self, bcx: @mut Block) -> @mut Block {
|
|
|
|
glue::trans_free(bcx, self.ptr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A cleanup function that frees some memory in the exchange heap.
|
|
|
|
pub struct ExchangeHeapFreeingCleanupFunction {
|
|
|
|
ptr: ValueRef,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl CleanupFunction for ExchangeHeapFreeingCleanupFunction {
|
|
|
|
fn clean(&self, bcx: @mut Block) -> @mut Block {
|
|
|
|
glue::trans_exchange_free(bcx, self.ptr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-30 13:46:19 -06:00
|
|
|
pub enum cleanup {
|
2013-08-29 19:46:33 -05:00
|
|
|
clean(@CleanupFunction, cleantype),
|
|
|
|
clean_temp(ValueRef, @CleanupFunction, cleantype),
|
2011-07-21 19:27:34 -05:00
|
|
|
}
|
|
|
|
|
2013-07-02 14:47:32 -05:00
|
|
|
// Can't use deriving(Clone) because of the managed closure.
|
|
|
|
impl Clone for cleanup {
|
|
|
|
fn clone(&self) -> cleanup {
|
|
|
|
match *self {
|
|
|
|
clean(f, ct) => clean(f, ct),
|
|
|
|
clean_temp(v, f, ct) => clean_temp(v, f, ct),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-02-15 06:57:29 -06:00
|
|
|
// Used to remember and reuse existing cleanup paths
|
|
|
|
// target: none means the path ends in an resume instruction
|
2013-07-02 14:47:32 -05:00
|
|
|
#[deriving(Clone)]
|
2013-02-19 01:40:42 -06:00
|
|
|
pub struct cleanup_path {
|
|
|
|
target: Option<BasicBlockRef>,
|
2013-06-16 07:51:50 -05:00
|
|
|
size: uint,
|
2013-02-19 01:40:42 -06:00
|
|
|
dest: BasicBlockRef
|
|
|
|
}
|
2012-02-15 06:57:29 -06:00
|
|
|
|
2013-07-17 05:12:08 -05:00
|
|
|
pub fn shrink_scope_clean(scope_info: &mut ScopeInfo, size: uint) {
|
2013-06-16 07:51:50 -05:00
|
|
|
scope_info.landing_pad = None;
|
|
|
|
scope_info.cleanup_paths = scope_info.cleanup_paths.iter()
|
2013-08-09 22:09:47 -05:00
|
|
|
.take_while(|&cu| cu.size <= size).map(|&x|x).collect();
|
2013-06-16 07:51:50 -05:00
|
|
|
}
|
|
|
|
|
2013-07-17 05:12:08 -05:00
|
|
|
pub fn grow_scope_clean(scope_info: &mut ScopeInfo) {
|
2012-11-06 20:41:06 -06:00
|
|
|
scope_info.landing_pad = None;
|
2012-02-15 06:57:29 -06:00
|
|
|
}
|
|
|
|
|
2013-01-30 13:46:19 -06:00
|
|
|
pub fn cleanup_type(cx: ty::ctxt, ty: ty::t) -> cleantype {
|
2012-03-23 19:52:20 -05:00
|
|
|
if ty::type_needs_unwind_cleanup(cx, ty) {
|
|
|
|
normal_exit_and_unwind
|
|
|
|
} else {
|
|
|
|
normal_exit_only
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-17 05:12:08 -05:00
|
|
|
pub fn add_clean(bcx: @mut Block, val: ValueRef, t: ty::t) {
|
2013-08-29 19:46:33 -05:00
|
|
|
if !ty::type_needs_drop(bcx.tcx(), t) {
|
|
|
|
return
|
|
|
|
}
|
2013-06-16 05:52:44 -05:00
|
|
|
|
2013-10-21 15:08:31 -05:00
|
|
|
debug!("add_clean({}, {}, {})", bcx.to_str(), bcx.val_to_str(val), t.repr(bcx.tcx()));
|
2013-06-16 05:52:44 -05:00
|
|
|
|
2012-08-06 12:53:38 -05:00
|
|
|
let cleanup_type = cleanup_type(bcx.tcx(), t);
|
Implement scopes independent of LLVM basic blocks
Currently, scopes are tied to LLVM basic blocks. For each scope, there
are two new basic blocks, which means two extra jumps in the unoptimized
IR. These blocks aren't actually required, but only used to act as the
boundary for cleanups.
By keeping track of the current scope within a single basic block, we
can avoid those extra blocks and jumps, shrinking the pre-optimization
IR quite considerably. For example, the IR for trans_intrinsic goes
from ~22k lines to ~16k lines, almost 30% less.
The impact on the build times of optimized builds is rather small (about
1%), but unoptimized builds are about 11% faster. The testsuite for
unoptimized builds runs between 15% (CPU time) and 7.5% (wallclock time on
my i7) faster.
Also, in some situations this helps LLVM to generate better code by
inlining functions that it previously considered to be too large.
Likely because of the pointless blocks/jumps that were still present at
the time the inlining pass runs.
Refs #7462
2013-07-07 07:53:57 -05:00
|
|
|
do in_scope_cx(bcx, None) |scope_info| {
|
2013-08-29 19:46:33 -05:00
|
|
|
scope_info.cleanups.push(clean(@TypeDroppingCleanupFunction {
|
|
|
|
val: val,
|
|
|
|
t: t,
|
|
|
|
} as @CleanupFunction,
|
|
|
|
cleanup_type));
|
2013-06-16 07:51:50 -05:00
|
|
|
grow_scope_clean(scope_info);
|
2012-02-17 04:18:14 -06:00
|
|
|
}
|
2011-07-21 19:27:34 -05:00
|
|
|
}
|
2012-08-28 17:54:45 -05:00
|
|
|
|
2013-07-17 05:12:08 -05:00
|
|
|
pub fn add_clean_temp_immediate(cx: @mut Block, val: ValueRef, ty: ty::t) {
|
2012-08-01 19:30:05 -05:00
|
|
|
if !ty::type_needs_drop(cx.tcx(), ty) { return; }
|
2013-10-21 15:08:31 -05:00
|
|
|
debug!("add_clean_temp_immediate({}, {}, {})",
|
2013-06-14 22:16:03 -05:00
|
|
|
cx.to_str(), cx.val_to_str(val),
|
Cleanup substitutions and treatment of generics around traits in a number of ways.
- In a TraitRef, use the self type consistently to refer to the Self type:
- trait ref in `impl Trait<A,B,C> for S` has a self type of `S`.
- trait ref in `A:Trait` has the self type `A`
- trait ref associated with a trait decl has self type `Self`
- trait ref associated with a supertype has self type `Self`
- trait ref in an object type `@Trait` has no self type
- Rewrite `each_bound_traits_and_supertraits` to perform
substitutions as it goes, and thus yield a series of trait refs
that are always in the same 'namespace' as the type parameter
bound given as input. Before, we left this to the caller, but
this doesn't work because the caller lacks adequare information
to perform the type substitutions correctly.
- For provided methods, substitute the generics involved in the provided
method correctly.
- Introduce TypeParameterDef, which tracks the bounds declared on a type
parameter and brings them together with the def_id and (in the future)
other information (maybe even the parameter's name!).
- Introduce Subst trait, which helps to cleanup a lot of the
repetitive code involved with doing type substitution.
- Introduce Repr trait, which makes debug printouts far more convenient.
Fixes #4183. Needed for #5656.
2013-04-09 00:54:49 -05:00
|
|
|
ty.repr(cx.tcx()));
|
2012-03-23 19:52:20 -05:00
|
|
|
let cleanup_type = cleanup_type(cx.tcx(), ty);
|
Implement scopes independent of LLVM basic blocks
Currently, scopes are tied to LLVM basic blocks. For each scope, there
are two new basic blocks, which means two extra jumps in the unoptimized
IR. These blocks aren't actually required, but only used to act as the
boundary for cleanups.
By keeping track of the current scope within a single basic block, we
can avoid those extra blocks and jumps, shrinking the pre-optimization
IR quite considerably. For example, the IR for trans_intrinsic goes
from ~22k lines to ~16k lines, almost 30% less.
The impact on the build times of optimized builds is rather small (about
1%), but unoptimized builds are about 11% faster. The testsuite for
unoptimized builds runs between 15% (CPU time) and 7.5% (wallclock time on
my i7) faster.
Also, in some situations this helps LLVM to generate better code by
inlining functions that it previously considered to be too large.
Likely because of the pointless blocks/jumps that were still present at
the time the inlining pass runs.
Refs #7462
2013-07-07 07:53:57 -05:00
|
|
|
do in_scope_cx(cx, None) |scope_info| {
|
2013-08-29 19:46:33 -05:00
|
|
|
scope_info.cleanups.push(clean_temp(val,
|
|
|
|
@ImmediateTypeDroppingCleanupFunction {
|
|
|
|
val: val,
|
|
|
|
t: ty,
|
|
|
|
} as @CleanupFunction,
|
|
|
|
cleanup_type));
|
2013-06-16 07:51:50 -05:00
|
|
|
grow_scope_clean(scope_info);
|
2012-02-17 04:18:14 -06:00
|
|
|
}
|
2011-07-21 19:27:34 -05:00
|
|
|
}
|
Implement scopes independent of LLVM basic blocks
Currently, scopes are tied to LLVM basic blocks. For each scope, there
are two new basic blocks, which means two extra jumps in the unoptimized
IR. These blocks aren't actually required, but only used to act as the
boundary for cleanups.
By keeping track of the current scope within a single basic block, we
can avoid those extra blocks and jumps, shrinking the pre-optimization
IR quite considerably. For example, the IR for trans_intrinsic goes
from ~22k lines to ~16k lines, almost 30% less.
The impact on the build times of optimized builds is rather small (about
1%), but unoptimized builds are about 11% faster. The testsuite for
unoptimized builds runs between 15% (CPU time) and 7.5% (wallclock time on
my i7) faster.
Also, in some situations this helps LLVM to generate better code by
inlining functions that it previously considered to be too large.
Likely because of the pointless blocks/jumps that were still present at
the time the inlining pass runs.
Refs #7462
2013-07-07 07:53:57 -05:00
|
|
|
|
2013-07-17 05:12:08 -05:00
|
|
|
pub fn add_clean_temp_mem(bcx: @mut Block, val: ValueRef, t: ty::t) {
|
Implement scopes independent of LLVM basic blocks
Currently, scopes are tied to LLVM basic blocks. For each scope, there
are two new basic blocks, which means two extra jumps in the unoptimized
IR. These blocks aren't actually required, but only used to act as the
boundary for cleanups.
By keeping track of the current scope within a single basic block, we
can avoid those extra blocks and jumps, shrinking the pre-optimization
IR quite considerably. For example, the IR for trans_intrinsic goes
from ~22k lines to ~16k lines, almost 30% less.
The impact on the build times of optimized builds is rather small (about
1%), but unoptimized builds are about 11% faster. The testsuite for
unoptimized builds runs between 15% (CPU time) and 7.5% (wallclock time on
my i7) faster.
Also, in some situations this helps LLVM to generate better code by
inlining functions that it previously considered to be too large.
Likely because of the pointless blocks/jumps that were still present at
the time the inlining pass runs.
Refs #7462
2013-07-07 07:53:57 -05:00
|
|
|
add_clean_temp_mem_in_scope_(bcx, None, val, t);
|
|
|
|
}
|
|
|
|
|
2013-07-17 05:12:08 -05:00
|
|
|
pub fn add_clean_temp_mem_in_scope(bcx: @mut Block,
|
2013-07-27 03:25:59 -05:00
|
|
|
scope_id: ast::NodeId,
|
2013-07-17 05:12:08 -05:00
|
|
|
val: ValueRef,
|
|
|
|
t: ty::t) {
|
Implement scopes independent of LLVM basic blocks
Currently, scopes are tied to LLVM basic blocks. For each scope, there
are two new basic blocks, which means two extra jumps in the unoptimized
IR. These blocks aren't actually required, but only used to act as the
boundary for cleanups.
By keeping track of the current scope within a single basic block, we
can avoid those extra blocks and jumps, shrinking the pre-optimization
IR quite considerably. For example, the IR for trans_intrinsic goes
from ~22k lines to ~16k lines, almost 30% less.
The impact on the build times of optimized builds is rather small (about
1%), but unoptimized builds are about 11% faster. The testsuite for
unoptimized builds runs between 15% (CPU time) and 7.5% (wallclock time on
my i7) faster.
Also, in some situations this helps LLVM to generate better code by
inlining functions that it previously considered to be too large.
Likely because of the pointless blocks/jumps that were still present at
the time the inlining pass runs.
Refs #7462
2013-07-07 07:53:57 -05:00
|
|
|
add_clean_temp_mem_in_scope_(bcx, Some(scope_id), val, t);
|
|
|
|
}
|
|
|
|
|
2013-07-27 03:25:59 -05:00
|
|
|
pub fn add_clean_temp_mem_in_scope_(bcx: @mut Block, scope_id: Option<ast::NodeId>,
|
Implement scopes independent of LLVM basic blocks
Currently, scopes are tied to LLVM basic blocks. For each scope, there
are two new basic blocks, which means two extra jumps in the unoptimized
IR. These blocks aren't actually required, but only used to act as the
boundary for cleanups.
By keeping track of the current scope within a single basic block, we
can avoid those extra blocks and jumps, shrinking the pre-optimization
IR quite considerably. For example, the IR for trans_intrinsic goes
from ~22k lines to ~16k lines, almost 30% less.
The impact on the build times of optimized builds is rather small (about
1%), but unoptimized builds are about 11% faster. The testsuite for
unoptimized builds runs between 15% (CPU time) and 7.5% (wallclock time on
my i7) faster.
Also, in some situations this helps LLVM to generate better code by
inlining functions that it previously considered to be too large.
Likely because of the pointless blocks/jumps that were still present at
the time the inlining pass runs.
Refs #7462
2013-07-07 07:53:57 -05:00
|
|
|
val: ValueRef, t: ty::t) {
|
2012-08-06 12:53:38 -05:00
|
|
|
if !ty::type_needs_drop(bcx.tcx(), t) { return; }
|
2013-10-21 15:08:31 -05:00
|
|
|
debug!("add_clean_temp_mem({}, {}, {})",
|
2013-06-14 22:16:03 -05:00
|
|
|
bcx.to_str(), bcx.val_to_str(val),
|
Cleanup substitutions and treatment of generics around traits in a number of ways.
- In a TraitRef, use the self type consistently to refer to the Self type:
- trait ref in `impl Trait<A,B,C> for S` has a self type of `S`.
- trait ref in `A:Trait` has the self type `A`
- trait ref associated with a trait decl has self type `Self`
- trait ref associated with a supertype has self type `Self`
- trait ref in an object type `@Trait` has no self type
- Rewrite `each_bound_traits_and_supertraits` to perform
substitutions as it goes, and thus yield a series of trait refs
that are always in the same 'namespace' as the type parameter
bound given as input. Before, we left this to the caller, but
this doesn't work because the caller lacks adequare information
to perform the type substitutions correctly.
- For provided methods, substitute the generics involved in the provided
method correctly.
- Introduce TypeParameterDef, which tracks the bounds declared on a type
parameter and brings them together with the def_id and (in the future)
other information (maybe even the parameter's name!).
- Introduce Subst trait, which helps to cleanup a lot of the
repetitive code involved with doing type substitution.
- Introduce Repr trait, which makes debug printouts far more convenient.
Fixes #4183. Needed for #5656.
2013-04-09 00:54:49 -05:00
|
|
|
t.repr(bcx.tcx()));
|
2012-08-06 12:53:38 -05:00
|
|
|
let cleanup_type = cleanup_type(bcx.tcx(), t);
|
Implement scopes independent of LLVM basic blocks
Currently, scopes are tied to LLVM basic blocks. For each scope, there
are two new basic blocks, which means two extra jumps in the unoptimized
IR. These blocks aren't actually required, but only used to act as the
boundary for cleanups.
By keeping track of the current scope within a single basic block, we
can avoid those extra blocks and jumps, shrinking the pre-optimization
IR quite considerably. For example, the IR for trans_intrinsic goes
from ~22k lines to ~16k lines, almost 30% less.
The impact on the build times of optimized builds is rather small (about
1%), but unoptimized builds are about 11% faster. The testsuite for
unoptimized builds runs between 15% (CPU time) and 7.5% (wallclock time on
my i7) faster.
Also, in some situations this helps LLVM to generate better code by
inlining functions that it previously considered to be too large.
Likely because of the pointless blocks/jumps that were still present at
the time the inlining pass runs.
Refs #7462
2013-07-07 07:53:57 -05:00
|
|
|
do in_scope_cx(bcx, scope_id) |scope_info| {
|
2013-08-29 19:46:33 -05:00
|
|
|
scope_info.cleanups.push(clean_temp(val,
|
|
|
|
@TypeDroppingCleanupFunction {
|
|
|
|
val: val,
|
|
|
|
t: t,
|
|
|
|
} as @CleanupFunction,
|
|
|
|
cleanup_type));
|
2013-06-16 07:51:50 -05:00
|
|
|
grow_scope_clean(scope_info);
|
2013-01-11 23:01:42 -06:00
|
|
|
}
|
|
|
|
}
|
2013-07-17 05:12:08 -05:00
|
|
|
pub fn add_clean_return_to_mut(bcx: @mut Block,
|
2013-07-27 03:25:59 -05:00
|
|
|
scope_id: ast::NodeId,
|
2013-05-04 13:29:32 -05:00
|
|
|
root_key: root_map_key,
|
2013-05-01 12:48:00 -05:00
|
|
|
frozen_val_ref: ValueRef,
|
2013-05-01 20:46:34 -05:00
|
|
|
bits_val_ref: ValueRef,
|
|
|
|
filename_val: ValueRef,
|
|
|
|
line_val: ValueRef) {
|
2013-05-01 12:48:00 -05:00
|
|
|
//! When an `@mut` has been frozen, we have to
|
|
|
|
//! call the lang-item `return_to_mut` when the
|
|
|
|
//! freeze goes out of scope. We need to pass
|
|
|
|
//! in both the value which was frozen (`frozen_val`) and
|
|
|
|
//! the value (`bits_val_ref`) which was returned when the
|
|
|
|
//! box was frozen initially. Here, both `frozen_val_ref` and
|
|
|
|
//! `bits_val_ref` are in fact pointers to stack slots.
|
|
|
|
|
2013-10-21 15:08:31 -05:00
|
|
|
debug!("add_clean_return_to_mut({}, {}, {})",
|
2013-05-01 12:48:00 -05:00
|
|
|
bcx.to_str(),
|
2013-06-14 22:16:03 -05:00
|
|
|
bcx.val_to_str(frozen_val_ref),
|
|
|
|
bcx.val_to_str(bits_val_ref));
|
Implement scopes independent of LLVM basic blocks
Currently, scopes are tied to LLVM basic blocks. For each scope, there
are two new basic blocks, which means two extra jumps in the unoptimized
IR. These blocks aren't actually required, but only used to act as the
boundary for cleanups.
By keeping track of the current scope within a single basic block, we
can avoid those extra blocks and jumps, shrinking the pre-optimization
IR quite considerably. For example, the IR for trans_intrinsic goes
from ~22k lines to ~16k lines, almost 30% less.
The impact on the build times of optimized builds is rather small (about
1%), but unoptimized builds are about 11% faster. The testsuite for
unoptimized builds runs between 15% (CPU time) and 7.5% (wallclock time on
my i7) faster.
Also, in some situations this helps LLVM to generate better code by
inlining functions that it previously considered to be too large.
Likely because of the pointless blocks/jumps that were still present at
the time the inlining pass runs.
Refs #7462
2013-07-07 07:53:57 -05:00
|
|
|
do in_scope_cx(bcx, Some(scope_id)) |scope_info| {
|
2013-08-29 19:46:33 -05:00
|
|
|
scope_info.cleanups.push(clean_temp(
|
2013-05-01 12:48:00 -05:00
|
|
|
frozen_val_ref,
|
2013-08-29 19:46:33 -05:00
|
|
|
@WriteGuardReleasingCleanupFunction {
|
|
|
|
root_key: root_key,
|
|
|
|
frozen_val_ref: frozen_val_ref,
|
|
|
|
bits_val_ref: bits_val_ref,
|
|
|
|
filename_val: filename_val,
|
|
|
|
line_val: line_val,
|
|
|
|
} as @CleanupFunction,
|
2013-05-01 12:48:00 -05:00
|
|
|
normal_exit_only));
|
2013-06-16 07:51:50 -05:00
|
|
|
grow_scope_clean(scope_info);
|
2012-02-17 04:18:14 -06:00
|
|
|
}
|
2011-09-26 15:13:08 -05:00
|
|
|
}
|
2013-07-17 05:12:08 -05:00
|
|
|
pub fn add_clean_free(cx: @mut Block, ptr: ValueRef, heap: heap) {
|
2012-08-06 14:34:08 -05:00
|
|
|
let free_fn = match heap {
|
2013-08-29 19:46:33 -05:00
|
|
|
heap_managed | heap_managed_unique => {
|
|
|
|
@GCHeapFreeingCleanupFunction {
|
|
|
|
ptr: ptr,
|
|
|
|
} as @CleanupFunction
|
|
|
|
}
|
|
|
|
heap_exchange | heap_exchange_closure => {
|
|
|
|
@ExchangeHeapFreeingCleanupFunction {
|
|
|
|
ptr: ptr,
|
|
|
|
} as @CleanupFunction
|
|
|
|
}
|
2012-06-26 15:50:43 -05:00
|
|
|
};
|
Implement scopes independent of LLVM basic blocks
Currently, scopes are tied to LLVM basic blocks. For each scope, there
are two new basic blocks, which means two extra jumps in the unoptimized
IR. These blocks aren't actually required, but only used to act as the
boundary for cleanups.
By keeping track of the current scope within a single basic block, we
can avoid those extra blocks and jumps, shrinking the pre-optimization
IR quite considerably. For example, the IR for trans_intrinsic goes
from ~22k lines to ~16k lines, almost 30% less.
The impact on the build times of optimized builds is rather small (about
1%), but unoptimized builds are about 11% faster. The testsuite for
unoptimized builds runs between 15% (CPU time) and 7.5% (wallclock time on
my i7) faster.
Also, in some situations this helps LLVM to generate better code by
inlining functions that it previously considered to be too large.
Likely because of the pointless blocks/jumps that were still present at
the time the inlining pass runs.
Refs #7462
2013-07-07 07:53:57 -05:00
|
|
|
do in_scope_cx(cx, None) |scope_info| {
|
2013-08-29 19:46:33 -05:00
|
|
|
scope_info.cleanups.push(clean_temp(ptr,
|
|
|
|
free_fn,
|
|
|
|
normal_exit_and_unwind));
|
2013-06-16 07:51:50 -05:00
|
|
|
grow_scope_clean(scope_info);
|
2012-02-17 04:18:14 -06:00
|
|
|
}
|
2011-09-27 06:19:55 -05:00
|
|
|
}
|
2011-07-21 19:27:34 -05:00
|
|
|
|
|
|
|
// Note that this only works for temporaries. We should, at some point, move
|
|
|
|
// to a system where we can also cancel the cleanup on local variables, but
|
|
|
|
// this will be more involved. For now, we simply zero out the local, and the
|
|
|
|
// drop glue checks whether it is zero.
|
2013-07-17 05:12:08 -05:00
|
|
|
pub fn revoke_clean(cx: @mut Block, val: ValueRef) {
|
Implement scopes independent of LLVM basic blocks
Currently, scopes are tied to LLVM basic blocks. For each scope, there
are two new basic blocks, which means two extra jumps in the unoptimized
IR. These blocks aren't actually required, but only used to act as the
boundary for cleanups.
By keeping track of the current scope within a single basic block, we
can avoid those extra blocks and jumps, shrinking the pre-optimization
IR quite considerably. For example, the IR for trans_intrinsic goes
from ~22k lines to ~16k lines, almost 30% less.
The impact on the build times of optimized builds is rather small (about
1%), but unoptimized builds are about 11% faster. The testsuite for
unoptimized builds runs between 15% (CPU time) and 7.5% (wallclock time on
my i7) faster.
Also, in some situations this helps LLVM to generate better code by
inlining functions that it previously considered to be too large.
Likely because of the pointless blocks/jumps that were still present at
the time the inlining pass runs.
Refs #7462
2013-07-07 07:53:57 -05:00
|
|
|
do in_scope_cx(cx, None) |scope_info| {
|
2013-07-04 21:13:26 -05:00
|
|
|
let cleanup_pos = scope_info.cleanups.iter().position(
|
2012-09-28 00:20:47 -05:00
|
|
|
|cu| match *cu {
|
2012-08-28 17:54:45 -05:00
|
|
|
clean_temp(v, _, _) if v == val => true,
|
|
|
|
_ => false
|
|
|
|
});
|
2013-08-03 11:45:23 -05:00
|
|
|
for i in cleanup_pos.iter() {
|
2012-11-06 20:41:06 -06:00
|
|
|
scope_info.cleanups =
|
2013-06-27 04:48:50 -05:00
|
|
|
vec::append(scope_info.cleanups.slice(0u, *i).to_owned(),
|
|
|
|
scope_info.cleanups.slice(*i + 1u,
|
|
|
|
scope_info.cleanups.len()));
|
2013-06-16 07:51:50 -05:00
|
|
|
shrink_scope_clean(scope_info, *i);
|
2011-07-21 19:27:34 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-09-30 12:37:22 -05:00
|
|
|
pub fn block_cleanups(bcx: &mut Block) -> ~[cleanup] {
|
Implement scopes independent of LLVM basic blocks
Currently, scopes are tied to LLVM basic blocks. For each scope, there
are two new basic blocks, which means two extra jumps in the unoptimized
IR. These blocks aren't actually required, but only used to act as the
boundary for cleanups.
By keeping track of the current scope within a single basic block, we
can avoid those extra blocks and jumps, shrinking the pre-optimization
IR quite considerably. For example, the IR for trans_intrinsic goes
from ~22k lines to ~16k lines, almost 30% less.
The impact on the build times of optimized builds is rather small (about
1%), but unoptimized builds are about 11% faster. The testsuite for
unoptimized builds runs between 15% (CPU time) and 7.5% (wallclock time on
my i7) faster.
Also, in some situations this helps LLVM to generate better code by
inlining functions that it previously considered to be too large.
Likely because of the pointless blocks/jumps that were still present at
the time the inlining pass runs.
Refs #7462
2013-07-07 07:53:57 -05:00
|
|
|
match bcx.scope {
|
|
|
|
None => ~[],
|
2013-07-02 14:47:32 -05:00
|
|
|
Some(inf) => inf.cleanups.clone(),
|
2012-08-23 19:39:07 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-17 05:12:08 -05:00
|
|
|
pub struct ScopeInfo {
|
|
|
|
parent: Option<@mut ScopeInfo>,
|
|
|
|
loop_break: Option<@mut Block>,
|
2013-09-10 14:01:44 -05:00
|
|
|
loop_label: Option<Name>,
|
2012-02-17 04:18:14 -06:00
|
|
|
// A list of functions that must be run at when leaving this
|
|
|
|
// block, cleaning up any variables that were introduced in the
|
|
|
|
// block.
|
2013-02-21 13:57:20 -06:00
|
|
|
cleanups: ~[cleanup],
|
2012-02-17 04:18:14 -06:00
|
|
|
// Existing cleanup paths that may be reused, indexed by destination and
|
|
|
|
// cleared when the set of cleanups changes.
|
2013-02-21 13:57:20 -06:00
|
|
|
cleanup_paths: ~[cleanup_path],
|
2012-02-17 04:18:14 -06:00
|
|
|
// Unwinding landing pad. Also cleared when cleanups change.
|
2013-02-21 13:57:20 -06:00
|
|
|
landing_pad: Option<BasicBlockRef>,
|
Implement scopes independent of LLVM basic blocks
Currently, scopes are tied to LLVM basic blocks. For each scope, there
are two new basic blocks, which means two extra jumps in the unoptimized
IR. These blocks aren't actually required, but only used to act as the
boundary for cleanups.
By keeping track of the current scope within a single basic block, we
can avoid those extra blocks and jumps, shrinking the pre-optimization
IR quite considerably. For example, the IR for trans_intrinsic goes
from ~22k lines to ~16k lines, almost 30% less.
The impact on the build times of optimized builds is rather small (about
1%), but unoptimized builds are about 11% faster. The testsuite for
unoptimized builds runs between 15% (CPU time) and 7.5% (wallclock time on
my i7) faster.
Also, in some situations this helps LLVM to generate better code by
inlining functions that it previously considered to be too large.
Likely because of the pointless blocks/jumps that were still present at
the time the inlining pass runs.
Refs #7462
2013-07-07 07:53:57 -05:00
|
|
|
// info about the AST node this scope originated from, if any
|
|
|
|
node_info: Option<NodeInfo>,
|
2013-01-06 13:16:14 -06:00
|
|
|
}
|
2012-02-17 04:18:14 -06:00
|
|
|
|
2013-07-17 05:12:08 -05:00
|
|
|
impl ScopeInfo {
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn empty_cleanups(&mut self) -> bool {
|
2013-05-02 20:15:36 -05:00
|
|
|
self.cleanups.is_empty()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-30 13:46:19 -06:00
|
|
|
pub trait get_node_info {
|
2013-02-22 00:41:37 -06:00
|
|
|
fn info(&self) -> Option<NodeInfo>;
|
2012-07-11 17:00:40 -05:00
|
|
|
}
|
|
|
|
|
2013-09-01 20:45:37 -05:00
|
|
|
impl get_node_info for ast::Expr {
|
2013-02-22 00:41:37 -06:00
|
|
|
fn info(&self) -> Option<NodeInfo> {
|
2013-03-15 14:24:24 -05:00
|
|
|
Some(NodeInfo {id: self.id,
|
2013-06-01 17:31:56 -05:00
|
|
|
callee_id: self.get_callee_id(),
|
2013-03-15 14:24:24 -05:00
|
|
|
span: self.span})
|
2012-05-14 16:24:16 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-19 00:38:55 -05:00
|
|
|
impl get_node_info for ast::Block {
|
2013-02-22 00:41:37 -06:00
|
|
|
fn info(&self) -> Option<NodeInfo> {
|
2013-07-16 13:08:35 -05:00
|
|
|
Some(NodeInfo {id: self.id,
|
2013-03-15 14:24:24 -05:00
|
|
|
callee_id: None,
|
|
|
|
span: self.span})
|
2012-05-14 16:24:16 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-09-01 20:45:37 -05:00
|
|
|
impl get_node_info for Option<@ast::Expr> {
|
2013-02-22 00:41:37 -06:00
|
|
|
fn info(&self) -> Option<NodeInfo> {
|
2013-09-20 01:08:47 -05:00
|
|
|
self.as_ref().and_then(|s| s.info())
|
2012-05-14 16:24:16 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-02-19 01:40:42 -06:00
|
|
|
pub struct NodeInfo {
|
2013-07-27 03:25:59 -05:00
|
|
|
id: ast::NodeId,
|
|
|
|
callee_id: Option<ast::NodeId>,
|
2013-08-31 11:13:04 -05:00
|
|
|
span: Span
|
2013-02-19 01:40:42 -06:00
|
|
|
}
|
2012-05-14 16:24:16 -05:00
|
|
|
|
2011-07-21 19:27:34 -05:00
|
|
|
// Basic block context. We create a block context for each basic block
|
|
|
|
// (single-entry, single-exit sequence of instructions) we generate from Rust
|
|
|
|
// code. Each basic block we generate is attached to a function, typically
|
|
|
|
// with many basic blocks per function. All the basic blocks attached to a
|
|
|
|
// function are organized as a directed graph.
|
2013-07-17 05:12:08 -05:00
|
|
|
pub struct Block {
|
2011-07-27 07:19:39 -05:00
|
|
|
// The BasicBlockRef returned from a call to
|
2011-08-03 17:39:43 -05:00
|
|
|
// llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic
|
|
|
|
// block to the function pointed to by llfn. We insert
|
|
|
|
// instructions into that block by way of this block context.
|
2011-09-02 17:34:58 -05:00
|
|
|
// The block pointing to this one in the function's digraph.
|
2012-09-06 21:40:15 -05:00
|
|
|
llbb: BasicBlockRef,
|
2013-02-21 13:57:20 -06:00
|
|
|
terminated: bool,
|
|
|
|
unreachable: bool,
|
2013-07-17 05:12:08 -05:00
|
|
|
parent: Option<@mut Block>,
|
Implement scopes independent of LLVM basic blocks
Currently, scopes are tied to LLVM basic blocks. For each scope, there
are two new basic blocks, which means two extra jumps in the unoptimized
IR. These blocks aren't actually required, but only used to act as the
boundary for cleanups.
By keeping track of the current scope within a single basic block, we
can avoid those extra blocks and jumps, shrinking the pre-optimization
IR quite considerably. For example, the IR for trans_intrinsic goes
from ~22k lines to ~16k lines, almost 30% less.
The impact on the build times of optimized builds is rather small (about
1%), but unoptimized builds are about 11% faster. The testsuite for
unoptimized builds runs between 15% (CPU time) and 7.5% (wallclock time on
my i7) faster.
Also, in some situations this helps LLVM to generate better code by
inlining functions that it previously considered to be too large.
Likely because of the pointless blocks/jumps that were still present at
the time the inlining pass runs.
Refs #7462
2013-07-07 07:53:57 -05:00
|
|
|
// The current scope within this basic block
|
2013-07-17 05:12:08 -05:00
|
|
|
scope: Option<@mut ScopeInfo>,
|
2012-07-23 18:00:19 -05:00
|
|
|
// Is this block part of a landing pad?
|
2012-09-06 21:40:15 -05:00
|
|
|
is_lpad: bool,
|
2012-05-14 16:24:16 -05:00
|
|
|
// info about the AST node this block originated from, if any
|
2013-02-19 01:40:42 -06:00
|
|
|
node_info: Option<NodeInfo>,
|
2011-09-02 17:34:58 -05:00
|
|
|
// The function context for the function to which this block is
|
|
|
|
// attached.
|
2013-07-17 05:12:08 -05:00
|
|
|
fcx: @mut FunctionContext
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Block {
|
|
|
|
|
|
|
|
pub fn new(llbb: BasicBlockRef,
|
|
|
|
parent: Option<@mut Block>,
|
|
|
|
is_lpad: bool,
|
|
|
|
node_info: Option<NodeInfo>,
|
|
|
|
fcx: @mut FunctionContext)
|
|
|
|
-> Block {
|
|
|
|
Block {
|
|
|
|
llbb: llbb,
|
|
|
|
terminated: false,
|
|
|
|
unreachable: false,
|
|
|
|
parent: parent,
|
|
|
|
scope: None,
|
|
|
|
is_lpad: is_lpad,
|
|
|
|
node_info: node_info,
|
|
|
|
fcx: fcx
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn ccx(&self) -> @mut CrateContext { self.fcx.ccx }
|
|
|
|
pub fn tcx(&self) -> ty::ctxt { self.fcx.ccx.tcx }
|
|
|
|
pub fn sess(&self) -> Session { self.fcx.ccx.sess }
|
2012-09-05 17:58:43 -05:00
|
|
|
|
2013-09-01 19:50:59 -05:00
|
|
|
pub fn ident(&self, ident: Ident) -> @str {
|
2013-07-17 05:12:08 -05:00
|
|
|
token::ident_to_str(&ident)
|
|
|
|
}
|
2012-09-05 17:58:43 -05:00
|
|
|
|
2013-07-27 03:25:59 -05:00
|
|
|
pub fn node_id_to_str(&self, id: ast::NodeId) -> ~str {
|
2013-07-17 05:12:08 -05:00
|
|
|
ast_map::node_id_to_str(self.tcx().items, id, self.sess().intr())
|
|
|
|
}
|
|
|
|
|
2013-09-30 12:37:17 -05:00
|
|
|
pub fn expr_to_str(&self, e: &ast::Expr) -> ~str {
|
2013-07-17 05:12:08 -05:00
|
|
|
e.repr(self.tcx())
|
|
|
|
}
|
|
|
|
|
2013-09-01 20:45:37 -05:00
|
|
|
pub fn expr_is_lval(&self, e: &ast::Expr) -> bool {
|
2013-07-17 05:12:08 -05:00
|
|
|
ty::expr_is_lval(self.tcx(), self.ccx().maps.method_map, e)
|
|
|
|
}
|
|
|
|
|
2013-09-01 20:45:37 -05:00
|
|
|
pub fn expr_kind(&self, e: &ast::Expr) -> ty::ExprKind {
|
2013-07-17 05:12:08 -05:00
|
|
|
ty::expr_kind(self.tcx(), self.ccx().maps.method_map, e)
|
|
|
|
}
|
|
|
|
|
2013-09-01 20:45:37 -05:00
|
|
|
pub fn def(&self, nid: ast::NodeId) -> ast::Def {
|
2013-07-17 05:12:08 -05:00
|
|
|
match self.tcx().def_map.find(&nid) {
|
|
|
|
Some(&v) => v,
|
|
|
|
None => {
|
2013-09-28 00:38:08 -05:00
|
|
|
self.tcx().sess.bug(format!(
|
|
|
|
"No def associated with node id {:?}", nid));
|
2013-07-17 05:12:08 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn val_to_str(&self, val: ValueRef) -> ~str {
|
|
|
|
self.ccx().tn.val_to_str(val)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn llty_str(&self, ty: Type) -> ~str {
|
|
|
|
self.ccx().tn.type_to_str(ty)
|
2012-06-12 16:55:44 -05:00
|
|
|
}
|
|
|
|
|
2013-07-17 05:12:08 -05:00
|
|
|
pub fn ty_to_str(&self, t: ty::t) -> ~str {
|
|
|
|
t.repr(self.tcx())
|
|
|
|
}
|
2012-06-12 16:55:44 -05:00
|
|
|
|
2013-07-17 05:12:08 -05:00
|
|
|
pub fn to_str(&self) -> ~str {
|
|
|
|
unsafe {
|
|
|
|
match self.node_info {
|
2013-09-28 00:38:08 -05:00
|
|
|
Some(node_info) => format!("[block {}]", node_info.id),
|
|
|
|
None => format!("[block {}]", transmute::<&Block, *Block>(self)),
|
2013-07-17 05:12:08 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-06-12 16:55:44 -05:00
|
|
|
}
|
2011-07-21 19:27:34 -05:00
|
|
|
|
2013-01-30 13:46:19 -06:00
|
|
|
pub struct Result {
|
2013-07-17 05:12:08 -05:00
|
|
|
bcx: @mut Block,
|
2012-09-07 16:50:47 -05:00
|
|
|
val: ValueRef
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
|
|
|
|
2013-07-17 05:12:08 -05:00
|
|
|
pub fn rslt(bcx: @mut Block, val: ValueRef) -> Result {
|
2012-08-28 17:54:45 -05:00
|
|
|
Result {bcx: bcx, val: val}
|
|
|
|
}
|
2011-07-21 19:27:34 -05:00
|
|
|
|
2013-05-31 17:17:22 -05:00
|
|
|
impl Result {
|
2013-07-17 05:12:08 -05:00
|
|
|
pub fn unpack(&self, bcx: &mut @mut Block) -> ValueRef {
|
2012-08-28 17:54:45 -05:00
|
|
|
*bcx = self.bcx;
|
|
|
|
return self.val;
|
|
|
|
}
|
2011-07-21 19:27:34 -05:00
|
|
|
}
|
|
|
|
|
2013-06-16 05:52:44 -05:00
|
|
|
pub fn val_ty(v: ValueRef) -> Type {
|
2013-01-10 23:23:07 -06:00
|
|
|
unsafe {
|
2013-06-16 05:52:44 -05:00
|
|
|
Type::from_ref(llvm::LLVMTypeOf(v))
|
2013-01-10 23:23:07 -06:00
|
|
|
}
|
|
|
|
}
|
2011-07-21 19:27:34 -05:00
|
|
|
|
2013-07-27 03:25:59 -05:00
|
|
|
pub fn in_scope_cx(cx: @mut Block, scope_id: Option<ast::NodeId>, f: &fn(si: &mut ScopeInfo)) {
|
2012-03-15 08:47:03 -05:00
|
|
|
let mut cur = cx;
|
Implement scopes independent of LLVM basic blocks
Currently, scopes are tied to LLVM basic blocks. For each scope, there
are two new basic blocks, which means two extra jumps in the unoptimized
IR. These blocks aren't actually required, but only used to act as the
boundary for cleanups.
By keeping track of the current scope within a single basic block, we
can avoid those extra blocks and jumps, shrinking the pre-optimization
IR quite considerably. For example, the IR for trans_intrinsic goes
from ~22k lines to ~16k lines, almost 30% less.
The impact on the build times of optimized builds is rather small (about
1%), but unoptimized builds are about 11% faster. The testsuite for
unoptimized builds runs between 15% (CPU time) and 7.5% (wallclock time on
my i7) faster.
Also, in some situations this helps LLVM to generate better code by
inlining functions that it previously considered to be too large.
Likely because of the pointless blocks/jumps that were still present at
the time the inlining pass runs.
Refs #7462
2013-07-07 07:53:57 -05:00
|
|
|
let mut cur_scope = cur.scope;
|
2012-03-10 22:34:17 -06:00
|
|
|
loop {
|
Implement scopes independent of LLVM basic blocks
Currently, scopes are tied to LLVM basic blocks. For each scope, there
are two new basic blocks, which means two extra jumps in the unoptimized
IR. These blocks aren't actually required, but only used to act as the
boundary for cleanups.
By keeping track of the current scope within a single basic block, we
can avoid those extra blocks and jumps, shrinking the pre-optimization
IR quite considerably. For example, the IR for trans_intrinsic goes
from ~22k lines to ~16k lines, almost 30% less.
The impact on the build times of optimized builds is rather small (about
1%), but unoptimized builds are about 11% faster. The testsuite for
unoptimized builds runs between 15% (CPU time) and 7.5% (wallclock time on
my i7) faster.
Also, in some situations this helps LLVM to generate better code by
inlining functions that it previously considered to be too large.
Likely because of the pointless blocks/jumps that were still present at
the time the inlining pass runs.
Refs #7462
2013-07-07 07:53:57 -05:00
|
|
|
cur_scope = match cur_scope {
|
|
|
|
Some(inf) => match scope_id {
|
|
|
|
Some(wanted) => match inf.node_info {
|
|
|
|
Some(NodeInfo { id: actual, _ }) if wanted == actual => {
|
2013-10-21 15:08:31 -05:00
|
|
|
debug!("in_scope_cx: selected cur={} (cx={})",
|
Implement scopes independent of LLVM basic blocks
Currently, scopes are tied to LLVM basic blocks. For each scope, there
are two new basic blocks, which means two extra jumps in the unoptimized
IR. These blocks aren't actually required, but only used to act as the
boundary for cleanups.
By keeping track of the current scope within a single basic block, we
can avoid those extra blocks and jumps, shrinking the pre-optimization
IR quite considerably. For example, the IR for trans_intrinsic goes
from ~22k lines to ~16k lines, almost 30% less.
The impact on the build times of optimized builds is rather small (about
1%), but unoptimized builds are about 11% faster. The testsuite for
unoptimized builds runs between 15% (CPU time) and 7.5% (wallclock time on
my i7) faster.
Also, in some situations this helps LLVM to generate better code by
inlining functions that it previously considered to be too large.
Likely because of the pointless blocks/jumps that were still present at
the time the inlining pass runs.
Refs #7462
2013-07-07 07:53:57 -05:00
|
|
|
cur.to_str(), cx.to_str());
|
|
|
|
f(inf);
|
|
|
|
return;
|
|
|
|
},
|
|
|
|
_ => inf.parent,
|
|
|
|
},
|
|
|
|
None => {
|
2013-10-21 15:08:31 -05:00
|
|
|
debug!("in_scope_cx: selected cur={} (cx={})",
|
Implement scopes independent of LLVM basic blocks
Currently, scopes are tied to LLVM basic blocks. For each scope, there
are two new basic blocks, which means two extra jumps in the unoptimized
IR. These blocks aren't actually required, but only used to act as the
boundary for cleanups.
By keeping track of the current scope within a single basic block, we
can avoid those extra blocks and jumps, shrinking the pre-optimization
IR quite considerably. For example, the IR for trans_intrinsic goes
from ~22k lines to ~16k lines, almost 30% less.
The impact on the build times of optimized builds is rather small (about
1%), but unoptimized builds are about 11% faster. The testsuite for
unoptimized builds runs between 15% (CPU time) and 7.5% (wallclock time on
my i7) faster.
Also, in some situations this helps LLVM to generate better code by
inlining functions that it previously considered to be too large.
Likely because of the pointless blocks/jumps that were still present at
the time the inlining pass runs.
Refs #7462
2013-07-07 07:53:57 -05:00
|
|
|
cur.to_str(), cx.to_str());
|
|
|
|
f(inf);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
},
|
|
|
|
None => {
|
|
|
|
cur = block_parent(cur);
|
|
|
|
cur.scope
|
2013-02-21 13:57:20 -06:00
|
|
|
}
|
2012-02-17 04:18:14 -06:00
|
|
|
}
|
2011-07-21 19:27:34 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-17 05:12:08 -05:00
|
|
|
pub fn block_parent(cx: @mut Block) -> @mut Block {
|
2012-08-06 14:34:08 -05:00
|
|
|
match cx.parent {
|
2012-08-20 14:23:37 -05:00
|
|
|
Some(b) => b,
|
2013-09-28 00:38:08 -05:00
|
|
|
None => cx.sess().bug(format!("block_parent called on root block {:?}",
|
2012-08-22 19:24:52 -05:00
|
|
|
cx))
|
2012-06-12 16:55:44 -05:00
|
|
|
}
|
2012-02-17 06:17:40 -06:00
|
|
|
}
|
|
|
|
|
2011-07-14 19:08:22 -05:00
|
|
|
|
2013-06-15 09:29:52 -05:00
|
|
|
// Let T be the content of a box @T. tuplify_box_ty(t) returns the
|
|
|
|
// representation of @T as a tuple (i.e., the ty::t version of what T_box()
|
|
|
|
// returns).
|
|
|
|
pub fn tuplify_box_ty(tcx: ty::ctxt, t: ty::t) -> ty::t {
|
|
|
|
let ptr = ty::mk_ptr(
|
|
|
|
tcx,
|
2013-09-01 20:45:37 -05:00
|
|
|
ty::mt {ty: ty::mk_i8(), mutbl: ast::MutImmutable}
|
2013-06-15 09:29:52 -05:00
|
|
|
);
|
|
|
|
return ty::mk_tup(tcx, ~[ty::mk_uint(), ty::mk_type(tcx),
|
|
|
|
ptr, ptr,
|
|
|
|
t]);
|
|
|
|
}
|
2011-07-14 19:08:22 -05:00
|
|
|
|
|
|
|
// LLVM constant constructors.
|
2013-06-15 22:45:48 -05:00
|
|
|
pub fn C_null(t: Type) -> ValueRef {
|
2013-01-10 23:23:07 -06:00
|
|
|
unsafe {
|
2013-06-15 22:45:48 -05:00
|
|
|
llvm::LLVMConstNull(t.to_ref())
|
2013-01-10 23:23:07 -06:00
|
|
|
}
|
|
|
|
}
|
2011-07-14 19:08:22 -05:00
|
|
|
|
2013-06-15 22:45:48 -05:00
|
|
|
pub fn C_undef(t: Type) -> ValueRef {
|
2013-02-18 16:16:21 -06:00
|
|
|
unsafe {
|
2013-06-15 22:45:48 -05:00
|
|
|
llvm::LLVMGetUndef(t.to_ref())
|
2013-02-18 16:16:21 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-15 22:45:48 -05:00
|
|
|
pub fn C_integral(t: Type, u: u64, sign_extend: bool) -> ValueRef {
|
2013-01-10 23:23:07 -06:00
|
|
|
unsafe {
|
2013-06-15 22:45:48 -05:00
|
|
|
llvm::LLVMConstInt(t.to_ref(), u, sign_extend as Bool)
|
2013-01-10 23:23:07 -06:00
|
|
|
}
|
2011-07-14 19:08:22 -05:00
|
|
|
}
|
|
|
|
|
2013-06-15 22:45:48 -05:00
|
|
|
pub fn C_floating(s: &str, t: Type) -> ValueRef {
|
2013-01-10 23:23:07 -06:00
|
|
|
unsafe {
|
2013-08-14 21:21:59 -05:00
|
|
|
do s.with_c_str |buf| {
|
2013-06-15 22:45:48 -05:00
|
|
|
llvm::LLVMConstRealOfString(t.to_ref(), buf)
|
|
|
|
}
|
2013-01-10 23:23:07 -06:00
|
|
|
}
|
2011-07-14 19:08:22 -05:00
|
|
|
}
|
|
|
|
|
2013-01-30 13:46:19 -06:00
|
|
|
pub fn C_nil() -> ValueRef {
|
2013-10-12 22:19:22 -05:00
|
|
|
C_struct([], false)
|
2011-07-14 19:08:22 -05:00
|
|
|
}
|
|
|
|
|
2013-06-15 22:45:48 -05:00
|
|
|
pub fn C_bool(val: bool) -> ValueRef {
|
|
|
|
C_integral(Type::bool(), val as u64, false)
|
2011-07-14 19:08:22 -05:00
|
|
|
}
|
|
|
|
|
2013-06-15 22:45:48 -05:00
|
|
|
pub fn C_i1(val: bool) -> ValueRef {
|
|
|
|
C_integral(Type::i1(), val as u64, false)
|
2013-02-06 16:28:02 -06:00
|
|
|
}
|
|
|
|
|
2013-01-30 13:46:19 -06:00
|
|
|
pub fn C_i32(i: i32) -> ValueRef {
|
2013-06-15 22:45:48 -05:00
|
|
|
return C_integral(Type::i32(), i as u64, true);
|
2011-10-26 00:23:28 -05:00
|
|
|
}
|
|
|
|
|
2013-01-30 13:46:19 -06:00
|
|
|
pub fn C_i64(i: i64) -> ValueRef {
|
2013-06-15 22:45:48 -05:00
|
|
|
return C_integral(Type::i64(), i as u64, true);
|
2011-11-15 20:11:22 -06:00
|
|
|
}
|
|
|
|
|
2013-06-13 02:19:50 -05:00
|
|
|
pub fn C_int(cx: &CrateContext, i: int) -> ValueRef {
|
2013-06-15 22:45:48 -05:00
|
|
|
return C_integral(cx.int_type, i as u64, true);
|
2011-10-14 18:45:25 -05:00
|
|
|
}
|
2011-07-14 19:08:22 -05:00
|
|
|
|
2013-06-13 02:19:50 -05:00
|
|
|
pub fn C_uint(cx: &CrateContext, i: uint) -> ValueRef {
|
2013-06-15 22:45:48 -05:00
|
|
|
return C_integral(cx.int_type, i as u64, false);
|
2011-10-14 18:45:25 -05:00
|
|
|
}
|
2011-07-14 19:08:22 -05:00
|
|
|
|
2013-01-30 13:46:19 -06:00
|
|
|
pub fn C_u8(i: uint) -> ValueRef {
|
2013-06-15 22:45:48 -05:00
|
|
|
return C_integral(Type::i8(), i as u64, false);
|
2013-01-30 13:46:19 -06:00
|
|
|
}
|
2011-07-14 19:08:22 -05:00
|
|
|
|
|
|
|
|
|
|
|
// This is a 'c-like' raw string, which differs from
|
|
|
|
// our boxed-and-length-annotated strings.
|
2013-06-13 02:19:50 -05:00
|
|
|
pub fn C_cstr(cx: &mut CrateContext, s: @str) -> ValueRef {
|
2013-01-10 23:23:07 -06:00
|
|
|
unsafe {
|
2013-06-15 22:45:48 -05:00
|
|
|
match cx.const_cstr_cache.find_equiv(&s) {
|
2013-03-22 21:26:41 -05:00
|
|
|
Some(&llval) => return llval,
|
2013-02-16 12:36:09 -06:00
|
|
|
None => ()
|
2013-01-10 23:23:07 -06:00
|
|
|
}
|
2012-04-21 15:23:25 -05:00
|
|
|
|
2013-08-15 01:40:28 -05:00
|
|
|
let sc = do s.as_imm_buf |buf, buflen| {
|
|
|
|
llvm::LLVMConstStringInContext(cx.llcx, buf as *c_char, buflen as c_uint, False)
|
2013-06-15 22:45:48 -05:00
|
|
|
};
|
|
|
|
|
|
|
|
let gsym = token::gensym("str");
|
2013-09-28 00:38:08 -05:00
|
|
|
let g = do format!("str{}", gsym).with_c_str |buf| {
|
2013-06-15 22:45:48 -05:00
|
|
|
llvm::LLVMAddGlobal(cx.llmod, val_ty(sc).to_ref(), buf)
|
2013-01-10 23:23:07 -06:00
|
|
|
};
|
|
|
|
llvm::LLVMSetInitializer(g, sc);
|
|
|
|
llvm::LLVMSetGlobalConstant(g, True);
|
|
|
|
lib::llvm::SetLinkage(g, lib::llvm::InternalLinkage);
|
2012-04-21 15:23:25 -05:00
|
|
|
|
2013-01-10 23:23:07 -06:00
|
|
|
cx.const_cstr_cache.insert(s, g);
|
2012-04-21 15:23:25 -05:00
|
|
|
|
2013-01-10 23:23:07 -06:00
|
|
|
return g;
|
|
|
|
}
|
2011-07-14 19:08:22 -05:00
|
|
|
}
|
|
|
|
|
2013-01-05 01:06:25 -06:00
|
|
|
// NB: Do not use `do_spill_noroot` to make this into a constant string, or
|
|
|
|
// you will be kicked off fast isel. See issue #4352 for an example of this.
|
2013-06-13 02:19:50 -05:00
|
|
|
pub fn C_estr_slice(cx: &mut CrateContext, s: @str) -> ValueRef {
|
2013-01-10 23:23:07 -06:00
|
|
|
unsafe {
|
2013-02-10 18:33:16 -06:00
|
|
|
let len = s.len();
|
2013-06-15 22:45:48 -05:00
|
|
|
let cs = llvm::LLVMConstPointerCast(C_cstr(cx, s), Type::i8p().to_ref());
|
2013-10-12 22:19:22 -05:00
|
|
|
C_struct([cs, C_uint(cx, len)], false)
|
2013-01-10 23:23:07 -06:00
|
|
|
}
|
2012-06-08 15:26:06 -05:00
|
|
|
}
|
|
|
|
|
2013-10-14 10:24:17 -05:00
|
|
|
pub fn C_binary_slice(cx: &mut CrateContext, data: &[u8]) -> ValueRef {
|
|
|
|
unsafe {
|
|
|
|
let len = data.len();
|
|
|
|
let lldata = C_bytes(data);
|
|
|
|
|
|
|
|
let gsym = token::gensym("binary");
|
|
|
|
let g = do format!("binary{}", gsym).with_c_str |buf| {
|
|
|
|
llvm::LLVMAddGlobal(cx.llmod, val_ty(lldata).to_ref(), buf)
|
|
|
|
};
|
|
|
|
llvm::LLVMSetInitializer(g, lldata);
|
|
|
|
llvm::LLVMSetGlobalConstant(g, True);
|
|
|
|
lib::llvm::SetLinkage(g, lib::llvm::InternalLinkage);
|
|
|
|
|
|
|
|
let cs = llvm::LLVMConstPointerCast(g, Type::i8p().to_ref());
|
|
|
|
C_struct([cs, C_uint(cx, len)], false)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-30 13:46:19 -06:00
|
|
|
pub fn C_zero_byte_arr(size: uint) -> ValueRef {
|
2013-01-10 23:23:07 -06:00
|
|
|
unsafe {
|
|
|
|
let mut i = 0u;
|
|
|
|
let mut elts: ~[ValueRef] = ~[];
|
|
|
|
while i < size { elts.push(C_u8(0u)); i += 1u; }
|
2013-06-16 05:52:44 -05:00
|
|
|
return llvm::LLVMConstArray(Type::i8().to_ref(),
|
|
|
|
vec::raw::to_ptr(elts), elts.len() as c_uint);
|
2013-01-10 23:23:07 -06:00
|
|
|
}
|
2011-07-14 19:08:22 -05:00
|
|
|
}
|
|
|
|
|
2013-10-12 22:19:22 -05:00
|
|
|
pub fn C_struct(elts: &[ValueRef], packed: bool) -> ValueRef {
|
2013-01-10 23:23:07 -06:00
|
|
|
unsafe {
|
2013-07-03 01:34:17 -05:00
|
|
|
do elts.as_imm_buf |ptr, len| {
|
2013-10-12 22:19:22 -05:00
|
|
|
llvm::LLVMConstStructInContext(base::task_llcx(), ptr, len as c_uint, packed as Bool)
|
2013-01-23 18:25:47 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-16 05:52:44 -05:00
|
|
|
pub fn C_named_struct(T: Type, elts: &[ValueRef]) -> ValueRef {
|
2013-01-10 23:23:07 -06:00
|
|
|
unsafe {
|
2013-07-03 01:34:17 -05:00
|
|
|
do elts.as_imm_buf |ptr, len| {
|
2013-06-16 05:52:44 -05:00
|
|
|
llvm::LLVMConstNamedStruct(T.to_ref(), ptr, len as c_uint)
|
2013-01-10 23:23:07 -06:00
|
|
|
}
|
2012-08-28 17:54:45 -05:00
|
|
|
}
|
2011-07-14 19:08:22 -05:00
|
|
|
}
|
|
|
|
|
2013-06-16 05:52:44 -05:00
|
|
|
pub fn C_array(ty: Type, elts: &[ValueRef]) -> ValueRef {
|
2013-01-10 23:23:07 -06:00
|
|
|
unsafe {
|
2013-06-16 05:52:44 -05:00
|
|
|
return llvm::LLVMConstArray(ty.to_ref(), vec::raw::to_ptr(elts), elts.len() as c_uint);
|
2013-01-10 23:23:07 -06:00
|
|
|
}
|
2011-07-27 17:14:59 -05:00
|
|
|
}
|
2011-08-04 12:46:10 -05:00
|
|
|
|
2013-03-05 19:36:39 -06:00
|
|
|
pub fn C_bytes(bytes: &[u8]) -> ValueRef {
|
2013-01-10 23:23:07 -06:00
|
|
|
unsafe {
|
2013-06-15 22:45:48 -05:00
|
|
|
let ptr = cast::transmute(vec::raw::to_ptr(bytes));
|
|
|
|
return llvm::LLVMConstStringInContext(base::task_llcx(), ptr, bytes.len() as c_uint, True);
|
2013-01-10 23:23:07 -06:00
|
|
|
}
|
2012-09-05 17:27:22 -05:00
|
|
|
}
|
|
|
|
|
2013-01-30 13:46:19 -06:00
|
|
|
pub fn get_param(fndecl: ValueRef, param: uint) -> ValueRef {
|
2013-01-10 23:23:07 -06:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMGetParam(fndecl, param as c_uint)
|
|
|
|
}
|
2012-03-21 09:42:20 -05:00
|
|
|
}
|
|
|
|
|
2013-06-13 02:19:50 -05:00
|
|
|
pub fn const_get_elt(cx: &CrateContext, v: ValueRef, us: &[c_uint])
|
2013-02-18 16:16:21 -06:00
|
|
|
-> ValueRef {
|
|
|
|
unsafe {
|
2013-07-03 01:34:17 -05:00
|
|
|
let r = do us.as_imm_buf |p, len| {
|
2013-02-18 16:16:21 -06:00
|
|
|
llvm::LLVMConstExtractValue(v, p, len as c_uint)
|
|
|
|
};
|
|
|
|
|
2013-10-21 15:08:31 -05:00
|
|
|
debug!("const_get_elt(v={}, us={:?}, r={})",
|
2013-06-14 22:16:03 -05:00
|
|
|
cx.tn.val_to_str(v), us, cx.tn.val_to_str(r));
|
2013-02-18 16:16:21 -06:00
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-25 03:51:45 -05:00
|
|
|
pub fn is_const(v: ValueRef) -> bool {
|
|
|
|
unsafe {
|
|
|
|
llvm::LLVMIsConstant(v) == True
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-02-18 16:16:21 -06:00
|
|
|
pub fn const_to_int(v: ValueRef) -> c_longlong {
|
|
|
|
unsafe {
|
|
|
|
llvm::LLVMConstIntGetSExtValue(v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn const_to_uint(v: ValueRef) -> c_ulonglong {
|
|
|
|
unsafe {
|
|
|
|
llvm::LLVMConstIntGetZExtValue(v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn is_undef(val: ValueRef) -> bool {
|
|
|
|
unsafe {
|
|
|
|
llvm::LLVMIsUndef(val) != False
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-31 17:55:30 -05:00
|
|
|
pub fn is_null(val: ValueRef) -> bool {
|
|
|
|
unsafe {
|
|
|
|
llvm::LLVMIsNull(val) != False
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-12 10:31:22 -05:00
|
|
|
// Used to identify cached monomorphized functions and vtables
|
2013-06-21 19:37:03 -05:00
|
|
|
#[deriving(Eq,IterBytes)]
|
2013-01-30 13:46:19 -06:00
|
|
|
pub enum mono_param_id {
|
2013-05-17 19:27:44 -05:00
|
|
|
mono_precise(ty::t, Option<@~[mono_id]>),
|
2012-03-12 10:31:22 -05:00
|
|
|
mono_any,
|
2012-09-20 14:29:15 -05:00
|
|
|
mono_repr(uint /* size */,
|
|
|
|
uint /* align */,
|
2013-04-06 21:52:35 -05:00
|
|
|
MonoDataClass,
|
2012-09-20 14:29:15 -05:00
|
|
|
datum::DatumMode),
|
2012-03-12 10:31:22 -05:00
|
|
|
}
|
2012-08-27 18:26:35 -05:00
|
|
|
|
2013-06-21 19:37:03 -05:00
|
|
|
#[deriving(Eq,IterBytes)]
|
2013-04-06 21:52:35 -05:00
|
|
|
pub enum MonoDataClass {
|
|
|
|
MonoBits, // Anything not treated differently from arbitrary integer data
|
|
|
|
MonoNonNull, // Non-null pointers (used for optional-pointer optimization)
|
|
|
|
// FIXME(#3547)---scalars and floats are
|
|
|
|
// treated differently in most ABIs. But we
|
|
|
|
// should be doing something more detailed
|
|
|
|
// here.
|
|
|
|
MonoFloat
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn mono_data_classify(t: ty::t) -> MonoDataClass {
|
|
|
|
match ty::get(t).sty {
|
|
|
|
ty::ty_float(_) => MonoFloat,
|
|
|
|
ty::ty_rptr(*) | ty::ty_uniq(*) |
|
|
|
|
ty::ty_box(*) | ty::ty_opaque_box(*) |
|
|
|
|
ty::ty_estr(ty::vstore_uniq) | ty::ty_evec(_, ty::vstore_uniq) |
|
|
|
|
ty::ty_estr(ty::vstore_box) | ty::ty_evec(_, ty::vstore_box) |
|
|
|
|
ty::ty_bare_fn(*) => MonoNonNull,
|
|
|
|
// Is that everything? Would closures or slices qualify?
|
|
|
|
_ => MonoBits
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-06-21 19:37:03 -05:00
|
|
|
#[deriving(Eq,IterBytes)]
|
2013-01-30 13:46:19 -06:00
|
|
|
pub struct mono_id_ {
|
2013-09-01 20:45:37 -05:00
|
|
|
def: ast::DefId,
|
2013-07-15 19:26:56 -05:00
|
|
|
params: ~[mono_param_id]
|
2013-01-06 13:16:14 -06:00
|
|
|
}
|
2012-08-27 18:26:35 -05:00
|
|
|
|
2013-01-30 13:46:19 -06:00
|
|
|
pub type mono_id = @mono_id_;
|
2012-08-27 18:26:35 -05:00
|
|
|
|
2013-07-17 05:12:08 -05:00
|
|
|
pub fn umax(cx: @mut Block, a: ValueRef, b: ValueRef) -> ValueRef {
|
2012-02-01 04:04:56 -06:00
|
|
|
let cond = build::ICmp(cx, lib::llvm::IntULT, a, b);
|
2012-08-01 19:30:05 -05:00
|
|
|
return build::Select(cx, cond, b, a);
|
2012-01-19 12:21:42 -06:00
|
|
|
}
|
|
|
|
|
2013-07-17 05:12:08 -05:00
|
|
|
pub fn umin(cx: @mut Block, a: ValueRef, b: ValueRef) -> ValueRef {
|
2012-02-01 04:04:56 -06:00
|
|
|
let cond = build::ICmp(cx, lib::llvm::IntULT, a, b);
|
2012-08-01 19:30:05 -05:00
|
|
|
return build::Select(cx, cond, a, b);
|
2012-01-19 12:21:42 -06:00
|
|
|
}
|
|
|
|
|
2013-07-17 05:12:08 -05:00
|
|
|
pub fn align_to(cx: @mut Block, off: ValueRef, align: ValueRef) -> ValueRef {
|
2012-02-21 07:20:18 -06:00
|
|
|
let mask = build::Sub(cx, align, C_int(cx.ccx(), 1));
|
2012-01-27 06:17:06 -06:00
|
|
|
let bumped = build::Add(cx, off, mask);
|
2012-08-01 19:30:05 -05:00
|
|
|
return build::And(cx, bumped, build::Not(cx, mask));
|
2012-01-19 12:21:42 -06:00
|
|
|
}
|
|
|
|
|
2013-03-10 10:02:16 -05:00
|
|
|
pub fn path_str(sess: session::Session, p: &[path_elt]) -> ~str {
|
2013-06-04 23:43:41 -05:00
|
|
|
let mut r = ~"";
|
|
|
|
let mut first = true;
|
2013-08-03 11:45:23 -05:00
|
|
|
for e in p.iter() {
|
2012-09-18 23:41:37 -05:00
|
|
|
match *e {
|
2013-08-30 02:47:10 -05:00
|
|
|
ast_map::path_name(s) | ast_map::path_mod(s) |
|
|
|
|
ast_map::path_pretty_name(s, _) => {
|
2013-06-11 21:13:42 -05:00
|
|
|
if first {
|
|
|
|
first = false
|
|
|
|
} else {
|
|
|
|
r.push_str("::")
|
|
|
|
}
|
2013-06-14 21:40:11 -05:00
|
|
|
r.push_str(sess.str_of(s));
|
2012-09-18 23:41:37 -05:00
|
|
|
}
|
|
|
|
}
|
2012-02-03 02:53:37 -06:00
|
|
|
}
|
|
|
|
r
|
|
|
|
}
|
|
|
|
|
2013-09-30 12:37:22 -05:00
|
|
|
pub fn monomorphize_type(bcx: &mut Block, t: ty::t) -> ty::t {
|
2013-03-20 00:17:42 -05:00
|
|
|
match bcx.fcx.param_substs {
|
2012-10-08 14:39:30 -05:00
|
|
|
Some(substs) => {
|
|
|
|
ty::subst_tps(bcx.tcx(), substs.tys, substs.self_ty, t)
|
|
|
|
}
|
2013-06-20 11:29:24 -05:00
|
|
|
_ => {
|
|
|
|
assert!(!ty::type_has_params(t));
|
|
|
|
assert!(!ty::type_has_self(t));
|
|
|
|
t
|
|
|
|
}
|
2012-09-11 23:25:01 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-09-30 12:37:22 -05:00
|
|
|
pub fn node_id_type(bcx: &mut Block, id: ast::NodeId) -> ty::t {
|
2012-02-21 07:20:18 -06:00
|
|
|
let tcx = bcx.tcx();
|
2012-02-02 05:37:17 -06:00
|
|
|
let t = ty::node_id_to_type(tcx, id);
|
2012-09-11 23:25:01 -05:00
|
|
|
monomorphize_type(bcx, t)
|
2012-02-02 05:37:17 -06:00
|
|
|
}
|
2012-09-10 14:25:45 -05:00
|
|
|
|
2013-09-30 12:37:22 -05:00
|
|
|
pub fn expr_ty(bcx: &mut Block, ex: &ast::Expr) -> ty::t {
|
2012-02-02 05:37:17 -06:00
|
|
|
node_id_type(bcx, ex.id)
|
|
|
|
}
|
2012-09-10 14:25:45 -05:00
|
|
|
|
2013-09-30 12:37:22 -05:00
|
|
|
pub fn expr_ty_adjusted(bcx: &mut Block, ex: &ast::Expr) -> ty::t {
|
2013-02-27 18:28:37 -06:00
|
|
|
let tcx = bcx.tcx();
|
|
|
|
let t = ty::expr_ty_adjusted(tcx, ex);
|
|
|
|
monomorphize_type(bcx, t)
|
|
|
|
}
|
|
|
|
|
2013-09-30 12:37:22 -05:00
|
|
|
pub fn node_id_type_params(bcx: &mut Block, id: ast::NodeId) -> ~[ty::t] {
|
2012-02-21 07:20:18 -06:00
|
|
|
let tcx = bcx.tcx();
|
2012-02-09 07:33:00 -06:00
|
|
|
let params = ty::node_id_to_type_params(tcx, id);
|
2013-03-21 07:34:18 -05:00
|
|
|
|
2013-06-17 18:43:22 -05:00
|
|
|
if !params.iter().all(|t| !ty::type_needs_infer(*t)) {
|
2013-03-21 07:34:18 -05:00
|
|
|
bcx.sess().bug(
|
2013-09-28 00:38:08 -05:00
|
|
|
format!("Type parameters for node {} include inference types: {}",
|
2013-06-10 08:25:25 -05:00
|
|
|
id, params.map(|t| bcx.ty_to_str(*t)).connect(",")));
|
2013-03-21 07:34:18 -05:00
|
|
|
}
|
|
|
|
|
2013-03-20 00:17:42 -05:00
|
|
|
match bcx.fcx.param_substs {
|
2012-08-20 14:23:37 -05:00
|
|
|
Some(substs) => {
|
2013-08-09 22:09:47 -05:00
|
|
|
do params.iter().map |t| {
|
2012-10-08 14:39:30 -05:00
|
|
|
ty::subst_tps(tcx, substs.tys, substs.self_ty, *t)
|
2013-06-29 00:05:50 -05:00
|
|
|
}.collect()
|
2012-02-09 07:33:00 -06:00
|
|
|
}
|
2012-08-03 21:59:04 -05:00
|
|
|
_ => params
|
2012-02-09 07:33:00 -06:00
|
|
|
}
|
|
|
|
}
|
2012-02-02 05:37:17 -06:00
|
|
|
|
2013-07-27 03:25:59 -05:00
|
|
|
pub fn node_vtables(bcx: @mut Block, id: ast::NodeId)
|
2013-01-30 13:46:19 -06:00
|
|
|
-> Option<typeck::vtable_res> {
|
2013-02-05 21:41:45 -06:00
|
|
|
let raw_vtables = bcx.ccx().maps.vtable_map.find(&id);
|
2013-09-20 01:08:47 -05:00
|
|
|
raw_vtables.map(|vts| resolve_vtables_in_fn_ctxt(bcx.fcx, *vts))
|
2012-09-10 14:25:45 -05:00
|
|
|
}
|
|
|
|
|
2013-08-13 15:22:58 -05:00
|
|
|
// Apply the typaram substitutions in the FunctionContext to some
|
|
|
|
// vtables. This should eliminate any vtable_params.
|
2013-07-17 05:12:08 -05:00
|
|
|
pub fn resolve_vtables_in_fn_ctxt(fcx: &FunctionContext, vts: typeck::vtable_res)
|
2013-01-30 13:46:19 -06:00
|
|
|
-> typeck::vtable_res {
|
2013-06-28 13:18:09 -05:00
|
|
|
resolve_vtables_under_param_substs(fcx.ccx.tcx,
|
|
|
|
fcx.param_substs,
|
|
|
|
vts)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn resolve_vtables_under_param_substs(tcx: ty::ctxt,
|
|
|
|
param_substs: Option<@param_substs>,
|
|
|
|
vts: typeck::vtable_res)
|
|
|
|
-> typeck::vtable_res {
|
2013-08-09 22:09:47 -05:00
|
|
|
@vts.iter().map(|ds|
|
2013-07-22 18:40:31 -05:00
|
|
|
resolve_param_vtables_under_param_substs(tcx,
|
|
|
|
param_substs,
|
|
|
|
*ds))
|
|
|
|
.collect()
|
2012-09-10 14:25:45 -05:00
|
|
|
}
|
|
|
|
|
2013-07-22 18:40:31 -05:00
|
|
|
pub fn resolve_param_vtables_under_param_substs(
|
|
|
|
tcx: ty::ctxt,
|
|
|
|
param_substs: Option<@param_substs>,
|
|
|
|
ds: typeck::vtable_param_res)
|
|
|
|
-> typeck::vtable_param_res {
|
2013-08-09 22:09:47 -05:00
|
|
|
@ds.iter().map(
|
2013-07-22 18:40:31 -05:00
|
|
|
|d| resolve_vtable_under_param_substs(tcx,
|
|
|
|
param_substs,
|
|
|
|
d))
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-06-28 13:18:09 -05:00
|
|
|
|
|
|
|
pub fn resolve_vtable_under_param_substs(tcx: ty::ctxt,
|
|
|
|
param_substs: Option<@param_substs>,
|
2013-07-02 14:47:32 -05:00
|
|
|
vt: &typeck::vtable_origin)
|
|
|
|
-> typeck::vtable_origin {
|
|
|
|
match *vt {
|
|
|
|
typeck::vtable_static(trait_id, ref tys, sub) => {
|
2013-06-28 13:18:09 -05:00
|
|
|
let tys = match param_substs {
|
2012-09-10 14:25:45 -05:00
|
|
|
Some(substs) => {
|
2013-08-09 22:09:47 -05:00
|
|
|
do tys.iter().map |t| {
|
2012-10-08 14:39:30 -05:00
|
|
|
ty::subst_tps(tcx, substs.tys, substs.self_ty, *t)
|
2013-06-29 00:05:50 -05:00
|
|
|
}.collect()
|
2012-09-10 14:25:45 -05:00
|
|
|
}
|
2013-07-02 14:47:32 -05:00
|
|
|
_ => tys.to_owned()
|
2012-09-10 14:25:45 -05:00
|
|
|
};
|
2013-06-28 13:18:09 -05:00
|
|
|
typeck::vtable_static(
|
|
|
|
trait_id, tys,
|
|
|
|
resolve_vtables_under_param_substs(tcx, param_substs, sub))
|
2012-09-10 14:25:45 -05:00
|
|
|
}
|
|
|
|
typeck::vtable_param(n_param, n_bound) => {
|
2013-06-28 13:18:09 -05:00
|
|
|
match param_substs {
|
2013-02-18 14:36:30 -06:00
|
|
|
Some(substs) => {
|
2012-09-10 14:25:45 -05:00
|
|
|
find_vtable(tcx, substs, n_param, n_bound)
|
|
|
|
}
|
|
|
|
_ => {
|
2013-09-28 00:38:08 -05:00
|
|
|
tcx.sess.bug(format!(
|
2013-08-13 15:22:58 -05:00
|
|
|
"resolve_vtable_under_param_substs: asked to lookup \
|
|
|
|
but no vtables in the fn_ctxt!"))
|
2012-09-10 14:25:45 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-02 14:47:32 -05:00
|
|
|
pub fn find_vtable(tcx: ty::ctxt,
|
|
|
|
ps: ¶m_substs,
|
2013-07-22 19:42:45 -05:00
|
|
|
n_param: typeck::param_index,
|
2013-07-02 14:47:32 -05:00
|
|
|
n_bound: uint)
|
|
|
|
-> typeck::vtable_origin {
|
2013-10-21 15:08:31 -05:00
|
|
|
debug!("find_vtable(n_param={:?}, n_bound={}, ps={})",
|
Cleanup substitutions and treatment of generics around traits in a number of ways.
- In a TraitRef, use the self type consistently to refer to the Self type:
- trait ref in `impl Trait<A,B,C> for S` has a self type of `S`.
- trait ref in `A:Trait` has the self type `A`
- trait ref associated with a trait decl has self type `Self`
- trait ref associated with a supertype has self type `Self`
- trait ref in an object type `@Trait` has no self type
- Rewrite `each_bound_traits_and_supertraits` to perform
substitutions as it goes, and thus yield a series of trait refs
that are always in the same 'namespace' as the type parameter
bound given as input. Before, we left this to the caller, but
this doesn't work because the caller lacks adequare information
to perform the type substitutions correctly.
- For provided methods, substitute the generics involved in the provided
method correctly.
- Introduce TypeParameterDef, which tracks the bounds declared on a type
parameter and brings them together with the def_id and (in the future)
other information (maybe even the parameter's name!).
- Introduce Subst trait, which helps to cleanup a lot of the
repetitive code involved with doing type substitution.
- Introduce Repr trait, which makes debug printouts far more convenient.
Fixes #4183. Needed for #5656.
2013-04-09 00:54:49 -05:00
|
|
|
n_param, n_bound, ps.repr(tcx));
|
2012-09-10 14:25:45 -05:00
|
|
|
|
2013-07-22 19:42:45 -05:00
|
|
|
let param_bounds = match n_param {
|
|
|
|
typeck::param_self => ps.self_vtables.expect("self vtables missing"),
|
|
|
|
typeck::param_numbered(n) => {
|
|
|
|
let tables = ps.vtables
|
|
|
|
.expect("vtables missing where they are needed");
|
|
|
|
tables[n]
|
|
|
|
}
|
|
|
|
};
|
2013-07-18 19:20:58 -05:00
|
|
|
param_bounds[n_bound].clone()
|
2012-09-10 14:25:45 -05:00
|
|
|
}
|
|
|
|
|
2013-04-17 11:15:37 -05:00
|
|
|
pub fn dummy_substs(tps: ~[ty::t]) -> ty::substs {
|
2013-01-25 18:57:39 -06:00
|
|
|
substs {
|
2013-07-24 15:52:57 -05:00
|
|
|
regions: ty::ErasedRegions,
|
2013-01-25 18:57:39 -06:00
|
|
|
self_ty: None,
|
|
|
|
tps: tps
|
|
|
|
}
|
2012-04-18 23:26:25 -05:00
|
|
|
}
|
|
|
|
|
2013-07-17 05:12:08 -05:00
|
|
|
pub fn filename_and_line_num_from_span(bcx: @mut Block,
|
2013-08-31 11:13:04 -05:00
|
|
|
span: Span) -> (ValueRef, ValueRef) {
|
2013-05-04 13:29:32 -05:00
|
|
|
let loc = bcx.sess().parse_sess.cm.lookup_char_pos(span.lo);
|
2013-06-12 12:02:55 -05:00
|
|
|
let filename_cstr = C_cstr(bcx.ccx(), loc.file.name);
|
2013-06-15 22:45:48 -05:00
|
|
|
let filename = build::PointerCast(bcx, filename_cstr, Type::i8p());
|
2013-05-04 13:29:32 -05:00
|
|
|
let line = C_int(bcx.ccx(), loc.line as int);
|
|
|
|
(filename, line)
|
|
|
|
}
|
|
|
|
|
2013-02-06 16:28:02 -06:00
|
|
|
// Casts a Rust bool value to an i1.
|
2013-07-17 05:12:08 -05:00
|
|
|
pub fn bool_to_i1(bcx: @mut Block, llval: ValueRef) -> ValueRef {
|
2013-02-06 16:28:02 -06:00
|
|
|
build::ICmp(bcx, lib::llvm::IntNE, llval, C_bool(false))
|
|
|
|
}
|
2013-07-15 22:42:13 -05:00
|
|
|
|
2013-08-31 11:13:04 -05:00
|
|
|
pub fn langcall(bcx: @mut Block, span: Option<Span>, msg: &str,
|
2013-09-01 20:45:37 -05:00
|
|
|
li: LangItem) -> ast::DefId {
|
2013-07-15 22:42:13 -05:00
|
|
|
match bcx.tcx().lang_items.require(li) {
|
|
|
|
Ok(id) => id,
|
|
|
|
Err(s) => {
|
2013-09-28 00:38:08 -05:00
|
|
|
let msg = format!("{} {}", msg, s);
|
2013-07-15 22:42:13 -05:00
|
|
|
match span {
|
|
|
|
Some(span) => { bcx.tcx().sess.span_fatal(span, msg); }
|
|
|
|
None => { bcx.tcx().sess.fatal(msg); }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|