2015-02-28 15:53:12 -06:00
|
|
|
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
|
|
|
|
// file at the top-level directory of this distribution and at
|
|
|
|
// http://rust-lang.org/COPYRIGHT.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
|
|
// option. This file may not be copied, modified, or distributed
|
|
|
|
// except according to those terms.
|
|
|
|
//! Set and unset common attributes on LLVM values.
|
|
|
|
|
2015-03-03 17:03:25 -06:00
|
|
|
use libc::{c_uint, c_ulonglong};
|
2015-02-28 15:53:12 -06:00
|
|
|
use llvm::{self, ValueRef, AttrHelper};
|
2015-06-30 04:18:03 -05:00
|
|
|
use middle::ty;
|
2015-06-28 00:04:15 -05:00
|
|
|
use middle::infer;
|
rustc: Update LLVM
This commit updates the LLVM submodule in use to the current HEAD of the LLVM
repository. This is primarily being done to start picking up unwinding support
for MSVC, which is currently unimplemented in the revision of LLVM we are using.
Along the way a few changes had to be made:
* As usual, lots of C++ debuginfo bindings in LLVM changed, so there were some
significant changes to our RustWrapper.cpp
* As usual, some pass management changed in LLVM, so clang was re-scrutinized to
ensure that we're doing the same thing as clang.
* Some optimization options are now passed directly into the
`PassManagerBuilder` instead of through CLI switches to LLVM.
* The `NoFramePointerElim` option was removed from LLVM, favoring instead the
`no-frame-pointer-elim` function attribute instead.
Additionally, LLVM has picked up some new optimizations which required fixing an
existing soundness hole in the IR we generate. It appears that the current LLVM
we use does not expose this hole. When an enum is moved, the previous slot in
memory is overwritten with a bit pattern corresponding to "dropped". When the
drop glue for this slot is run, however, the switch on the discriminant can
often start executing the `unreachable` block of the switch due to the
discriminant now being outside the normal range. This was patched over locally
for now by having the `unreachable` block just change to a `ret void`.
2015-05-14 14:10:43 -05:00
|
|
|
use session::config::NoDebugInfo;
|
2015-03-03 17:03:25 -06:00
|
|
|
use syntax::abi;
|
2015-02-28 15:53:12 -06:00
|
|
|
use syntax::ast;
|
2015-03-03 17:03:25 -06:00
|
|
|
pub use syntax::attr::InlineAttr;
|
|
|
|
use trans::base;
|
|
|
|
use trans::common;
|
2015-02-28 15:53:12 -06:00
|
|
|
use trans::context::CrateContext;
|
2015-03-03 17:03:25 -06:00
|
|
|
use trans::machine;
|
|
|
|
use trans::type_of;
|
2015-02-28 15:53:12 -06:00
|
|
|
|
|
|
|
/// Mark LLVM function to use provided inline heuristic.
|
|
|
|
#[inline]
|
|
|
|
pub fn inline(val: ValueRef, inline: InlineAttr) {
|
2015-03-03 17:03:25 -06:00
|
|
|
use self::InlineAttr::*;
|
2015-02-28 15:53:12 -06:00
|
|
|
match inline {
|
2015-04-29 17:00:20 -05:00
|
|
|
Hint => llvm::SetFunctionAttribute(val, llvm::Attribute::InlineHint),
|
|
|
|
Always => llvm::SetFunctionAttribute(val, llvm::Attribute::AlwaysInline),
|
|
|
|
Never => llvm::SetFunctionAttribute(val, llvm::Attribute::NoInline),
|
2015-03-03 17:03:25 -06:00
|
|
|
None => {
|
2015-04-29 17:00:20 -05:00
|
|
|
let attr = llvm::Attribute::InlineHint |
|
|
|
|
llvm::Attribute::AlwaysInline |
|
|
|
|
llvm::Attribute::NoInline;
|
2015-02-28 15:53:12 -06:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMRemoveFunctionAttr(val, attr.bits() as c_ulonglong)
|
|
|
|
}
|
|
|
|
},
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Tell LLVM to emit or not emit the information necessary to unwind the stack for the function.
|
|
|
|
#[inline]
|
|
|
|
pub fn emit_uwtable(val: ValueRef, emit: bool) {
|
|
|
|
if emit {
|
2015-04-29 17:00:20 -05:00
|
|
|
llvm::SetFunctionAttribute(val, llvm::Attribute::UWTable);
|
2015-02-28 15:53:12 -06:00
|
|
|
} else {
|
|
|
|
unsafe {
|
2015-04-28 18:36:22 -05:00
|
|
|
llvm::LLVMRemoveFunctionAttr(
|
|
|
|
val,
|
2015-04-29 17:00:20 -05:00
|
|
|
llvm::Attribute::UWTable.bits() as c_ulonglong,
|
2015-04-28 18:36:22 -05:00
|
|
|
);
|
2015-02-28 15:53:12 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Tell LLVM whether the function can or cannot unwind.
|
|
|
|
#[inline]
|
|
|
|
#[allow(dead_code)] // possibly useful function
|
|
|
|
pub fn unwind(val: ValueRef, can_unwind: bool) {
|
|
|
|
if can_unwind {
|
|
|
|
unsafe {
|
2015-04-28 18:36:22 -05:00
|
|
|
llvm::LLVMRemoveFunctionAttr(
|
|
|
|
val,
|
2015-04-29 17:00:20 -05:00
|
|
|
llvm::Attribute::NoUnwind.bits() as c_ulonglong,
|
2015-04-28 18:36:22 -05:00
|
|
|
);
|
2015-02-28 15:53:12 -06:00
|
|
|
}
|
|
|
|
} else {
|
2015-04-29 17:00:20 -05:00
|
|
|
llvm::SetFunctionAttribute(val, llvm::Attribute::NoUnwind);
|
2015-02-28 15:53:12 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Tell LLVM whether it should optimise function for size.
|
|
|
|
#[inline]
|
|
|
|
#[allow(dead_code)] // possibly useful function
|
|
|
|
pub fn set_optimize_for_size(val: ValueRef, optimize: bool) {
|
|
|
|
if optimize {
|
2015-04-29 17:00:20 -05:00
|
|
|
llvm::SetFunctionAttribute(val, llvm::Attribute::OptimizeForSize);
|
2015-02-28 15:53:12 -06:00
|
|
|
} else {
|
|
|
|
unsafe {
|
2015-04-28 18:36:22 -05:00
|
|
|
llvm::LLVMRemoveFunctionAttr(
|
|
|
|
val,
|
2015-04-29 17:00:20 -05:00
|
|
|
llvm::Attribute::OptimizeForSize.bits() as c_ulonglong,
|
2015-04-28 18:36:22 -05:00
|
|
|
);
|
2015-02-28 15:53:12 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Composite function which sets LLVM attributes for function depending on its AST (#[attribute])
|
|
|
|
/// attributes.
|
2015-03-03 17:03:25 -06:00
|
|
|
pub fn from_fn_attrs(ccx: &CrateContext, attrs: &[ast::Attribute], llfn: ValueRef) {
|
2015-02-28 15:53:12 -06:00
|
|
|
use syntax::attr::*;
|
|
|
|
inline(llfn, find_inline_attr(Some(ccx.sess().diagnostic()), attrs));
|
|
|
|
|
rustc: Update LLVM
This commit updates the LLVM submodule in use to the current HEAD of the LLVM
repository. This is primarily being done to start picking up unwinding support
for MSVC, which is currently unimplemented in the revision of LLVM we are using.
Along the way a few changes had to be made:
* As usual, lots of C++ debuginfo bindings in LLVM changed, so there were some
significant changes to our RustWrapper.cpp
* As usual, some pass management changed in LLVM, so clang was re-scrutinized to
ensure that we're doing the same thing as clang.
* Some optimization options are now passed directly into the
`PassManagerBuilder` instead of through CLI switches to LLVM.
* The `NoFramePointerElim` option was removed from LLVM, favoring instead the
`no-frame-pointer-elim` function attribute instead.
Additionally, LLVM has picked up some new optimizations which required fixing an
existing soundness hole in the IR we generate. It appears that the current LLVM
we use does not expose this hole. When an enum is moved, the previous slot in
memory is overwritten with a bit pattern corresponding to "dropped". When the
drop glue for this slot is run, however, the switch on the discriminant can
often start executing the `unreachable` block of the switch due to the
discriminant now being outside the normal range. This was patched over locally
for now by having the `unreachable` block just change to a `ret void`.
2015-05-14 14:10:43 -05:00
|
|
|
// FIXME: #11906: Omitting frame pointers breaks retrieving the value of a
|
|
|
|
// parameter.
|
|
|
|
let no_fp_elim = (ccx.sess().opts.debuginfo != NoDebugInfo) ||
|
|
|
|
!ccx.sess().target.target.options.eliminate_frame_pointer;
|
|
|
|
if no_fp_elim {
|
|
|
|
unsafe {
|
|
|
|
let attr = "no-frame-pointer-elim\0".as_ptr() as *const _;
|
|
|
|
let val = "true\0".as_ptr() as *const _;
|
|
|
|
llvm::LLVMAddFunctionAttrStringValue(llfn,
|
|
|
|
llvm::FunctionIndex as c_uint,
|
|
|
|
attr, val);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-28 15:53:12 -06:00
|
|
|
for attr in attrs {
|
2015-07-27 15:41:35 -05:00
|
|
|
if attr.check_name("cold") {
|
2015-02-28 15:53:12 -06:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMAddFunctionAttribute(llfn,
|
|
|
|
llvm::FunctionIndex as c_uint,
|
|
|
|
llvm::ColdAttribute as u64)
|
|
|
|
}
|
|
|
|
} else if attr.check_name("allocator") {
|
2015-04-29 17:00:20 -05:00
|
|
|
llvm::Attribute::NoAlias.apply_llfn(llvm::ReturnIndex as c_uint, llfn);
|
2015-02-28 15:53:12 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-03-03 17:03:25 -06:00
|
|
|
|
|
|
|
/// Composite function which converts function type into LLVM attributes for the function.
|
|
|
|
pub fn from_fn_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn_type: ty::Ty<'tcx>)
|
|
|
|
-> llvm::AttrBuilder {
|
|
|
|
use middle::ty::{BrAnon, ReLateBound};
|
|
|
|
|
|
|
|
let function_type;
|
|
|
|
let (fn_sig, abi, env_ty) = match fn_type.sty {
|
2015-06-11 18:21:46 -05:00
|
|
|
ty::TyBareFn(_, ref f) => (&f.sig, f.abi, None),
|
2015-07-16 08:46:35 -05:00
|
|
|
ty::TyClosure(closure_did, ref substs) => {
|
2015-06-28 00:04:15 -05:00
|
|
|
let infcx = infer::normalizing_infer_ctxt(ccx.tcx(), &ccx.tcx().tables);
|
|
|
|
function_type = infcx.closure_type(closure_did, substs);
|
2015-03-03 17:03:25 -06:00
|
|
|
let self_type = base::self_type_for_closure(ccx, closure_did, fn_type);
|
|
|
|
(&function_type.sig, abi::RustCall, Some(self_type))
|
|
|
|
}
|
|
|
|
_ => ccx.sess().bug("expected closure or function.")
|
|
|
|
};
|
|
|
|
|
2015-06-25 15:42:17 -05:00
|
|
|
let fn_sig = ccx.tcx().erase_late_bound_regions(fn_sig);
|
2015-03-03 17:03:25 -06:00
|
|
|
|
|
|
|
let mut attrs = llvm::AttrBuilder::new();
|
|
|
|
let ret_ty = fn_sig.output;
|
|
|
|
|
|
|
|
// These have an odd calling convention, so we need to manually
|
|
|
|
// unpack the input ty's
|
|
|
|
let input_tys = match fn_type.sty {
|
2015-06-11 18:21:46 -05:00
|
|
|
ty::TyClosure(..) => {
|
2015-03-03 17:03:25 -06:00
|
|
|
assert!(abi == abi::RustCall);
|
|
|
|
|
|
|
|
match fn_sig.inputs[0].sty {
|
2015-06-11 18:21:46 -05:00
|
|
|
ty::TyTuple(ref inputs) => {
|
2015-03-03 17:03:25 -06:00
|
|
|
let mut full_inputs = vec![env_ty.expect("Missing closure environment")];
|
|
|
|
full_inputs.push_all(inputs);
|
|
|
|
full_inputs
|
|
|
|
}
|
|
|
|
_ => ccx.sess().bug("expected tuple'd inputs")
|
|
|
|
}
|
|
|
|
},
|
2015-06-11 18:21:46 -05:00
|
|
|
ty::TyBareFn(..) if abi == abi::RustCall => {
|
2015-03-03 17:03:25 -06:00
|
|
|
let mut inputs = vec![fn_sig.inputs[0]];
|
|
|
|
|
|
|
|
match fn_sig.inputs[1].sty {
|
2015-06-11 18:21:46 -05:00
|
|
|
ty::TyTuple(ref t_in) => {
|
2015-03-03 17:03:25 -06:00
|
|
|
inputs.push_all(&t_in[..]);
|
|
|
|
inputs
|
|
|
|
}
|
|
|
|
_ => ccx.sess().bug("expected tuple'd inputs")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_ => fn_sig.inputs.clone()
|
|
|
|
};
|
|
|
|
|
|
|
|
// Index 0 is the return value of the llvm func, so we start at 1
|
Pass fat pointers in two immediate arguments
This has a number of advantages compared to creating a copy in memory
and passing a pointer. The obvious one is that we don't have to put the
data into memory but can keep it in registers. Since we're currently
passing a pointer anyway (instead of using e.g. a known offset on the
stack, which is what the `byval` attribute would achieve), we only use a
single additional register for each fat pointer, but save at least two
pointers worth of stack in exchange (sometimes more because more than
one copy gets eliminated). On archs that pass arguments on the stack, we
save a pointer worth of stack even without considering the omitted
copies.
Additionally, LLVM can optimize the code a lot better, to a large degree
due to the fact that lots of copies are gone or can be optimized away.
Additionally, we can now emit attributes like nonnull on the data and/or
vtable pointers contained in the fat pointer, potentially allowing for
even more optimizations.
This results in LLVM passes being about 3-7% faster (depending on the
crate), and the resulting code is also a few percent smaller, for
example:
text data filename
5671479 3941461 before/librustc-d8ace771.so
5447663 3905745 after/librustc-d8ace771.so
1944425 2394024 before/libstd-d8ace771.so
1896769 2387610 after/libstd-d8ace771.so
I had to remove a call in the backtrace-debuginfo test, because LLVM can
now merge the tails of some blocks when optimizations are turned on,
which can't correctly preserve line info.
Fixes #22924
Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 16:57:40 -05:00
|
|
|
let mut idx = 1;
|
2015-03-03 17:03:25 -06:00
|
|
|
if let ty::FnConverging(ret_ty) = ret_ty {
|
|
|
|
// A function pointer is called without the declaration
|
|
|
|
// available, so we have to apply any attributes with ABI
|
|
|
|
// implications directly to the call instruction. Right now,
|
|
|
|
// the only attribute we need to worry about is `sret`.
|
|
|
|
if type_of::return_uses_outptr(ccx, ret_ty) {
|
|
|
|
let llret_sz = machine::llsize_of_real(ccx, type_of::type_of(ccx, ret_ty));
|
|
|
|
|
|
|
|
// The outptr can be noalias and nocapture because it's entirely
|
|
|
|
// invisible to the program. We also know it's nonnull as well
|
|
|
|
// as how many bytes we can dereference
|
2015-04-29 17:00:20 -05:00
|
|
|
attrs.arg(1, llvm::Attribute::StructRet)
|
|
|
|
.arg(1, llvm::Attribute::NoAlias)
|
|
|
|
.arg(1, llvm::Attribute::NoCapture)
|
2015-03-03 17:03:25 -06:00
|
|
|
.arg(1, llvm::DereferenceableAttribute(llret_sz));
|
|
|
|
|
|
|
|
// Add one more since there's an outptr
|
Pass fat pointers in two immediate arguments
This has a number of advantages compared to creating a copy in memory
and passing a pointer. The obvious one is that we don't have to put the
data into memory but can keep it in registers. Since we're currently
passing a pointer anyway (instead of using e.g. a known offset on the
stack, which is what the `byval` attribute would achieve), we only use a
single additional register for each fat pointer, but save at least two
pointers worth of stack in exchange (sometimes more because more than
one copy gets eliminated). On archs that pass arguments on the stack, we
save a pointer worth of stack even without considering the omitted
copies.
Additionally, LLVM can optimize the code a lot better, to a large degree
due to the fact that lots of copies are gone or can be optimized away.
Additionally, we can now emit attributes like nonnull on the data and/or
vtable pointers contained in the fat pointer, potentially allowing for
even more optimizations.
This results in LLVM passes being about 3-7% faster (depending on the
crate), and the resulting code is also a few percent smaller, for
example:
text data filename
5671479 3941461 before/librustc-d8ace771.so
5447663 3905745 after/librustc-d8ace771.so
1944425 2394024 before/libstd-d8ace771.so
1896769 2387610 after/libstd-d8ace771.so
I had to remove a call in the backtrace-debuginfo test, because LLVM can
now merge the tails of some blocks when optimizations are turned on,
which can't correctly preserve line info.
Fixes #22924
Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 16:57:40 -05:00
|
|
|
idx += 1;
|
2015-03-03 17:03:25 -06:00
|
|
|
} else {
|
|
|
|
// The `noalias` attribute on the return value is useful to a
|
|
|
|
// function ptr caller.
|
|
|
|
match ret_ty.sty {
|
2015-05-02 15:25:49 -05:00
|
|
|
// `Box` pointer return values never alias because ownership
|
2015-03-03 17:03:25 -06:00
|
|
|
// is transferred
|
2015-06-11 18:21:46 -05:00
|
|
|
ty::TyBox(it) if common::type_is_sized(ccx.tcx(), it) => {
|
2015-04-29 17:00:20 -05:00
|
|
|
attrs.ret(llvm::Attribute::NoAlias);
|
2015-03-03 17:03:25 -06:00
|
|
|
}
|
|
|
|
_ => {}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We can also mark the return value as `dereferenceable` in certain cases
|
|
|
|
match ret_ty.sty {
|
|
|
|
// These are not really pointers but pairs, (pointer, len)
|
2015-07-10 20:27:06 -05:00
|
|
|
ty::TyRef(_, ty::TypeAndMut { ty: inner, .. })
|
2015-06-11 18:21:46 -05:00
|
|
|
| ty::TyBox(inner) if common::type_is_sized(ccx.tcx(), inner) => {
|
2015-03-03 17:03:25 -06:00
|
|
|
let llret_sz = machine::llsize_of_real(ccx, type_of::type_of(ccx, inner));
|
|
|
|
attrs.ret(llvm::DereferenceableAttribute(llret_sz));
|
|
|
|
}
|
|
|
|
_ => {}
|
|
|
|
}
|
|
|
|
|
2015-06-11 18:21:46 -05:00
|
|
|
if let ty::TyBool = ret_ty.sty {
|
2015-04-29 17:00:20 -05:00
|
|
|
attrs.ret(llvm::Attribute::ZExt);
|
2015-03-03 17:03:25 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Pass fat pointers in two immediate arguments
This has a number of advantages compared to creating a copy in memory
and passing a pointer. The obvious one is that we don't have to put the
data into memory but can keep it in registers. Since we're currently
passing a pointer anyway (instead of using e.g. a known offset on the
stack, which is what the `byval` attribute would achieve), we only use a
single additional register for each fat pointer, but save at least two
pointers worth of stack in exchange (sometimes more because more than
one copy gets eliminated). On archs that pass arguments on the stack, we
save a pointer worth of stack even without considering the omitted
copies.
Additionally, LLVM can optimize the code a lot better, to a large degree
due to the fact that lots of copies are gone or can be optimized away.
Additionally, we can now emit attributes like nonnull on the data and/or
vtable pointers contained in the fat pointer, potentially allowing for
even more optimizations.
This results in LLVM passes being about 3-7% faster (depending on the
crate), and the resulting code is also a few percent smaller, for
example:
text data filename
5671479 3941461 before/librustc-d8ace771.so
5447663 3905745 after/librustc-d8ace771.so
1944425 2394024 before/libstd-d8ace771.so
1896769 2387610 after/libstd-d8ace771.so
I had to remove a call in the backtrace-debuginfo test, because LLVM can
now merge the tails of some blocks when optimizations are turned on,
which can't correctly preserve line info.
Fixes #22924
Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 16:57:40 -05:00
|
|
|
for &t in input_tys.iter() {
|
2015-03-03 17:03:25 -06:00
|
|
|
match t.sty {
|
Pass fat pointers in two immediate arguments
This has a number of advantages compared to creating a copy in memory
and passing a pointer. The obvious one is that we don't have to put the
data into memory but can keep it in registers. Since we're currently
passing a pointer anyway (instead of using e.g. a known offset on the
stack, which is what the `byval` attribute would achieve), we only use a
single additional register for each fat pointer, but save at least two
pointers worth of stack in exchange (sometimes more because more than
one copy gets eliminated). On archs that pass arguments on the stack, we
save a pointer worth of stack even without considering the omitted
copies.
Additionally, LLVM can optimize the code a lot better, to a large degree
due to the fact that lots of copies are gone or can be optimized away.
Additionally, we can now emit attributes like nonnull on the data and/or
vtable pointers contained in the fat pointer, potentially allowing for
even more optimizations.
This results in LLVM passes being about 3-7% faster (depending on the
crate), and the resulting code is also a few percent smaller, for
example:
text data filename
5671479 3941461 before/librustc-d8ace771.so
5447663 3905745 after/librustc-d8ace771.so
1944425 2394024 before/libstd-d8ace771.so
1896769 2387610 after/libstd-d8ace771.so
I had to remove a call in the backtrace-debuginfo test, because LLVM can
now merge the tails of some blocks when optimizations are turned on,
which can't correctly preserve line info.
Fixes #22924
Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 16:57:40 -05:00
|
|
|
_ if type_of::arg_is_indirect(ccx, t) => {
|
2015-03-03 17:03:25 -06:00
|
|
|
let llarg_sz = machine::llsize_of_real(ccx, type_of::type_of(ccx, t));
|
|
|
|
|
|
|
|
// For non-immediate arguments the callee gets its own copy of
|
|
|
|
// the value on the stack, so there are no aliases. It's also
|
|
|
|
// program-invisible so can't possibly capture
|
2015-04-29 17:00:20 -05:00
|
|
|
attrs.arg(idx, llvm::Attribute::NoAlias)
|
|
|
|
.arg(idx, llvm::Attribute::NoCapture)
|
2015-03-03 17:03:25 -06:00
|
|
|
.arg(idx, llvm::DereferenceableAttribute(llarg_sz));
|
|
|
|
}
|
|
|
|
|
2015-06-11 18:21:46 -05:00
|
|
|
ty::TyBool => {
|
2015-04-29 17:00:20 -05:00
|
|
|
attrs.arg(idx, llvm::Attribute::ZExt);
|
2015-03-03 17:03:25 -06:00
|
|
|
}
|
|
|
|
|
2015-05-02 15:25:49 -05:00
|
|
|
// `Box` pointer parameters never alias because ownership is transferred
|
2015-06-11 18:21:46 -05:00
|
|
|
ty::TyBox(inner) => {
|
Pass fat pointers in two immediate arguments
This has a number of advantages compared to creating a copy in memory
and passing a pointer. The obvious one is that we don't have to put the
data into memory but can keep it in registers. Since we're currently
passing a pointer anyway (instead of using e.g. a known offset on the
stack, which is what the `byval` attribute would achieve), we only use a
single additional register for each fat pointer, but save at least two
pointers worth of stack in exchange (sometimes more because more than
one copy gets eliminated). On archs that pass arguments on the stack, we
save a pointer worth of stack even without considering the omitted
copies.
Additionally, LLVM can optimize the code a lot better, to a large degree
due to the fact that lots of copies are gone or can be optimized away.
Additionally, we can now emit attributes like nonnull on the data and/or
vtable pointers contained in the fat pointer, potentially allowing for
even more optimizations.
This results in LLVM passes being about 3-7% faster (depending on the
crate), and the resulting code is also a few percent smaller, for
example:
text data filename
5671479 3941461 before/librustc-d8ace771.so
5447663 3905745 after/librustc-d8ace771.so
1944425 2394024 before/libstd-d8ace771.so
1896769 2387610 after/libstd-d8ace771.so
I had to remove a call in the backtrace-debuginfo test, because LLVM can
now merge the tails of some blocks when optimizations are turned on,
which can't correctly preserve line info.
Fixes #22924
Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 16:57:40 -05:00
|
|
|
attrs.arg(idx, llvm::Attribute::NoAlias);
|
|
|
|
|
|
|
|
if common::type_is_sized(ccx.tcx(), inner) {
|
|
|
|
let llsz = machine::llsize_of_real(ccx, type_of::type_of(ccx, inner));
|
|
|
|
attrs.arg(idx, llvm::DereferenceableAttribute(llsz));
|
|
|
|
} else {
|
|
|
|
attrs.arg(idx, llvm::NonNullAttribute);
|
2015-06-24 00:24:13 -05:00
|
|
|
if inner.is_trait() {
|
Pass fat pointers in two immediate arguments
This has a number of advantages compared to creating a copy in memory
and passing a pointer. The obvious one is that we don't have to put the
data into memory but can keep it in registers. Since we're currently
passing a pointer anyway (instead of using e.g. a known offset on the
stack, which is what the `byval` attribute would achieve), we only use a
single additional register for each fat pointer, but save at least two
pointers worth of stack in exchange (sometimes more because more than
one copy gets eliminated). On archs that pass arguments on the stack, we
save a pointer worth of stack even without considering the omitted
copies.
Additionally, LLVM can optimize the code a lot better, to a large degree
due to the fact that lots of copies are gone or can be optimized away.
Additionally, we can now emit attributes like nonnull on the data and/or
vtable pointers contained in the fat pointer, potentially allowing for
even more optimizations.
This results in LLVM passes being about 3-7% faster (depending on the
crate), and the resulting code is also a few percent smaller, for
example:
text data filename
5671479 3941461 before/librustc-d8ace771.so
5447663 3905745 after/librustc-d8ace771.so
1944425 2394024 before/libstd-d8ace771.so
1896769 2387610 after/libstd-d8ace771.so
I had to remove a call in the backtrace-debuginfo test, because LLVM can
now merge the tails of some blocks when optimizations are turned on,
which can't correctly preserve line info.
Fixes #22924
Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 16:57:40 -05:00
|
|
|
attrs.arg(idx + 1, llvm::NonNullAttribute);
|
|
|
|
}
|
|
|
|
}
|
2015-03-03 17:03:25 -06:00
|
|
|
}
|
|
|
|
|
2015-06-18 07:23:19 -05:00
|
|
|
ty::TyRef(b, mt) => {
|
|
|
|
// `&mut` pointer parameters never alias other parameters, or mutable global data
|
|
|
|
//
|
|
|
|
// `&T` where `T` contains no `UnsafeCell<U>` is immutable, and can be marked as
|
|
|
|
// both `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely
|
|
|
|
// on memory dependencies rather than pointer equality
|
2015-06-25 15:42:17 -05:00
|
|
|
let interior_unsafe = mt.ty.type_contents(ccx.tcx()).interior_unsafe();
|
2015-06-18 07:23:19 -05:00
|
|
|
|
|
|
|
if mt.mutbl == ast::MutMutable || !interior_unsafe {
|
|
|
|
attrs.arg(idx, llvm::Attribute::NoAlias);
|
|
|
|
}
|
2015-03-03 17:03:25 -06:00
|
|
|
|
2015-06-18 07:23:19 -05:00
|
|
|
if mt.mutbl == ast::MutImmutable && !interior_unsafe {
|
2015-04-29 17:00:20 -05:00
|
|
|
attrs.arg(idx, llvm::Attribute::ReadOnly);
|
2015-03-03 17:03:25 -06:00
|
|
|
}
|
|
|
|
|
Pass fat pointers in two immediate arguments
This has a number of advantages compared to creating a copy in memory
and passing a pointer. The obvious one is that we don't have to put the
data into memory but can keep it in registers. Since we're currently
passing a pointer anyway (instead of using e.g. a known offset on the
stack, which is what the `byval` attribute would achieve), we only use a
single additional register for each fat pointer, but save at least two
pointers worth of stack in exchange (sometimes more because more than
one copy gets eliminated). On archs that pass arguments on the stack, we
save a pointer worth of stack even without considering the omitted
copies.
Additionally, LLVM can optimize the code a lot better, to a large degree
due to the fact that lots of copies are gone or can be optimized away.
Additionally, we can now emit attributes like nonnull on the data and/or
vtable pointers contained in the fat pointer, potentially allowing for
even more optimizations.
This results in LLVM passes being about 3-7% faster (depending on the
crate), and the resulting code is also a few percent smaller, for
example:
text data filename
5671479 3941461 before/librustc-d8ace771.so
5447663 3905745 after/librustc-d8ace771.so
1944425 2394024 before/libstd-d8ace771.so
1896769 2387610 after/libstd-d8ace771.so
I had to remove a call in the backtrace-debuginfo test, because LLVM can
now merge the tails of some blocks when optimizations are turned on,
which can't correctly preserve line info.
Fixes #22924
Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 16:57:40 -05:00
|
|
|
// & pointer parameters are also never null and for sized types we also know
|
|
|
|
// exactly how many bytes we can dereference
|
|
|
|
if common::type_is_sized(ccx.tcx(), mt.ty) {
|
|
|
|
let llsz = machine::llsize_of_real(ccx, type_of::type_of(ccx, mt.ty));
|
|
|
|
attrs.arg(idx, llvm::DereferenceableAttribute(llsz));
|
|
|
|
} else {
|
|
|
|
attrs.arg(idx, llvm::NonNullAttribute);
|
2015-06-24 00:24:13 -05:00
|
|
|
if mt.ty.is_trait() {
|
Pass fat pointers in two immediate arguments
This has a number of advantages compared to creating a copy in memory
and passing a pointer. The obvious one is that we don't have to put the
data into memory but can keep it in registers. Since we're currently
passing a pointer anyway (instead of using e.g. a known offset on the
stack, which is what the `byval` attribute would achieve), we only use a
single additional register for each fat pointer, but save at least two
pointers worth of stack in exchange (sometimes more because more than
one copy gets eliminated). On archs that pass arguments on the stack, we
save a pointer worth of stack even without considering the omitted
copies.
Additionally, LLVM can optimize the code a lot better, to a large degree
due to the fact that lots of copies are gone or can be optimized away.
Additionally, we can now emit attributes like nonnull on the data and/or
vtable pointers contained in the fat pointer, potentially allowing for
even more optimizations.
This results in LLVM passes being about 3-7% faster (depending on the
crate), and the resulting code is also a few percent smaller, for
example:
text data filename
5671479 3941461 before/librustc-d8ace771.so
5447663 3905745 after/librustc-d8ace771.so
1944425 2394024 before/libstd-d8ace771.so
1896769 2387610 after/libstd-d8ace771.so
I had to remove a call in the backtrace-debuginfo test, because LLVM can
now merge the tails of some blocks when optimizations are turned on,
which can't correctly preserve line info.
Fixes #22924
Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 16:57:40 -05:00
|
|
|
attrs.arg(idx + 1, llvm::NonNullAttribute);
|
|
|
|
}
|
|
|
|
}
|
2015-06-18 07:23:19 -05:00
|
|
|
|
|
|
|
// When a reference in an argument has no named lifetime, it's
|
|
|
|
// impossible for that reference to escape this function
|
|
|
|
// (returned or stored beyond the call by a closure).
|
2015-03-03 17:03:25 -06:00
|
|
|
if let ReLateBound(_, BrAnon(_)) = *b {
|
2015-04-29 17:00:20 -05:00
|
|
|
attrs.arg(idx, llvm::Attribute::NoCapture);
|
2015-03-03 17:03:25 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
_ => ()
|
|
|
|
}
|
Pass fat pointers in two immediate arguments
This has a number of advantages compared to creating a copy in memory
and passing a pointer. The obvious one is that we don't have to put the
data into memory but can keep it in registers. Since we're currently
passing a pointer anyway (instead of using e.g. a known offset on the
stack, which is what the `byval` attribute would achieve), we only use a
single additional register for each fat pointer, but save at least two
pointers worth of stack in exchange (sometimes more because more than
one copy gets eliminated). On archs that pass arguments on the stack, we
save a pointer worth of stack even without considering the omitted
copies.
Additionally, LLVM can optimize the code a lot better, to a large degree
due to the fact that lots of copies are gone or can be optimized away.
Additionally, we can now emit attributes like nonnull on the data and/or
vtable pointers contained in the fat pointer, potentially allowing for
even more optimizations.
This results in LLVM passes being about 3-7% faster (depending on the
crate), and the resulting code is also a few percent smaller, for
example:
text data filename
5671479 3941461 before/librustc-d8ace771.so
5447663 3905745 after/librustc-d8ace771.so
1944425 2394024 before/libstd-d8ace771.so
1896769 2387610 after/libstd-d8ace771.so
I had to remove a call in the backtrace-debuginfo test, because LLVM can
now merge the tails of some blocks when optimizations are turned on,
which can't correctly preserve line info.
Fixes #22924
Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 16:57:40 -05:00
|
|
|
|
|
|
|
if common::type_is_fat_ptr(ccx.tcx(), t) {
|
|
|
|
idx += 2;
|
|
|
|
} else {
|
|
|
|
idx += 1;
|
|
|
|
}
|
2015-03-03 17:03:25 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
attrs
|
|
|
|
}
|