rust/src/librustc_trans/trans/attributes.rs

305 lines
12 KiB
Rust
Raw Normal View History

// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Set and unset common attributes on LLVM values.
use libc::{c_uint, c_ulonglong};
use llvm::{self, ValueRef, AttrHelper};
use middle::ty;
2015-06-28 00:04:15 -05:00
use middle::infer;
use session::config::NoDebugInfo;
use syntax::abi;
use syntax::ast;
pub use syntax::attr::InlineAttr;
use trans::base;
use trans::common;
use trans::context::CrateContext;
use trans::machine;
use trans::type_of;
/// Mark LLVM function to use provided inline heuristic.
#[inline]
pub fn inline(val: ValueRef, inline: InlineAttr) {
use self::InlineAttr::*;
match inline {
Hint => llvm::SetFunctionAttribute(val, llvm::Attribute::InlineHint),
Always => llvm::SetFunctionAttribute(val, llvm::Attribute::AlwaysInline),
Never => llvm::SetFunctionAttribute(val, llvm::Attribute::NoInline),
None => {
let attr = llvm::Attribute::InlineHint |
llvm::Attribute::AlwaysInline |
llvm::Attribute::NoInline;
unsafe {
llvm::LLVMRemoveFunctionAttr(val, attr.bits() as c_ulonglong)
}
},
};
}
/// Tell LLVM to emit or not emit the information necessary to unwind the stack for the function.
#[inline]
pub fn emit_uwtable(val: ValueRef, emit: bool) {
if emit {
llvm::SetFunctionAttribute(val, llvm::Attribute::UWTable);
} else {
unsafe {
2015-04-28 18:36:22 -05:00
llvm::LLVMRemoveFunctionAttr(
val,
llvm::Attribute::UWTable.bits() as c_ulonglong,
2015-04-28 18:36:22 -05:00
);
}
}
}
/// Tell LLVM whether the function can or cannot unwind.
#[inline]
#[allow(dead_code)] // possibly useful function
pub fn unwind(val: ValueRef, can_unwind: bool) {
if can_unwind {
unsafe {
2015-04-28 18:36:22 -05:00
llvm::LLVMRemoveFunctionAttr(
val,
llvm::Attribute::NoUnwind.bits() as c_ulonglong,
2015-04-28 18:36:22 -05:00
);
}
} else {
llvm::SetFunctionAttribute(val, llvm::Attribute::NoUnwind);
}
}
/// Tell LLVM whether it should optimise function for size.
#[inline]
#[allow(dead_code)] // possibly useful function
pub fn set_optimize_for_size(val: ValueRef, optimize: bool) {
if optimize {
llvm::SetFunctionAttribute(val, llvm::Attribute::OptimizeForSize);
} else {
unsafe {
2015-04-28 18:36:22 -05:00
llvm::LLVMRemoveFunctionAttr(
val,
llvm::Attribute::OptimizeForSize.bits() as c_ulonglong,
2015-04-28 18:36:22 -05:00
);
}
}
}
/// Composite function which sets LLVM attributes for function depending on its AST (#[attribute])
/// attributes.
pub fn from_fn_attrs(ccx: &CrateContext, attrs: &[ast::Attribute], llfn: ValueRef) {
use syntax::attr::*;
inline(llfn, find_inline_attr(Some(ccx.sess().diagnostic()), attrs));
// FIXME: #11906: Omitting frame pointers breaks retrieving the value of a
// parameter.
let no_fp_elim = (ccx.sess().opts.debuginfo != NoDebugInfo) ||
!ccx.sess().target.target.options.eliminate_frame_pointer;
if no_fp_elim {
unsafe {
let attr = "no-frame-pointer-elim\0".as_ptr() as *const _;
let val = "true\0".as_ptr() as *const _;
llvm::LLVMAddFunctionAttrStringValue(llfn,
llvm::FunctionIndex as c_uint,
attr, val);
}
}
for attr in attrs {
if attr.check_name("cold") {
unsafe {
llvm::LLVMAddFunctionAttribute(llfn,
llvm::FunctionIndex as c_uint,
llvm::ColdAttribute as u64)
}
} else if attr.check_name("allocator") {
llvm::Attribute::NoAlias.apply_llfn(llvm::ReturnIndex as c_uint, llfn);
}
}
}
/// Composite function which converts function type into LLVM attributes for the function.
pub fn from_fn_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn_type: ty::Ty<'tcx>)
-> llvm::AttrBuilder {
use middle::ty::{BrAnon, ReLateBound};
let function_type;
let (fn_sig, abi, env_ty) = match fn_type.sty {
ty::TyBareFn(_, ref f) => (&f.sig, f.abi, None),
ty::TyClosure(closure_did, ref substs) => {
2015-06-28 00:04:15 -05:00
let infcx = infer::normalizing_infer_ctxt(ccx.tcx(), &ccx.tcx().tables);
function_type = infcx.closure_type(closure_did, substs);
let self_type = base::self_type_for_closure(ccx, closure_did, fn_type);
(&function_type.sig, abi::RustCall, Some(self_type))
}
_ => ccx.sess().bug("expected closure or function.")
};
let fn_sig = ccx.tcx().erase_late_bound_regions(fn_sig);
let mut attrs = llvm::AttrBuilder::new();
let ret_ty = fn_sig.output;
// These have an odd calling convention, so we need to manually
// unpack the input ty's
let input_tys = match fn_type.sty {
ty::TyClosure(..) => {
assert!(abi == abi::RustCall);
match fn_sig.inputs[0].sty {
ty::TyTuple(ref inputs) => {
let mut full_inputs = vec![env_ty.expect("Missing closure environment")];
full_inputs.push_all(inputs);
full_inputs
}
_ => ccx.sess().bug("expected tuple'd inputs")
}
},
ty::TyBareFn(..) if abi == abi::RustCall => {
let mut inputs = vec![fn_sig.inputs[0]];
match fn_sig.inputs[1].sty {
ty::TyTuple(ref t_in) => {
inputs.push_all(&t_in[..]);
inputs
}
_ => ccx.sess().bug("expected tuple'd inputs")
}
}
_ => fn_sig.inputs.clone()
};
// Index 0 is the return value of the llvm func, so we start at 1
Pass fat pointers in two immediate arguments This has a number of advantages compared to creating a copy in memory and passing a pointer. The obvious one is that we don't have to put the data into memory but can keep it in registers. Since we're currently passing a pointer anyway (instead of using e.g. a known offset on the stack, which is what the `byval` attribute would achieve), we only use a single additional register for each fat pointer, but save at least two pointers worth of stack in exchange (sometimes more because more than one copy gets eliminated). On archs that pass arguments on the stack, we save a pointer worth of stack even without considering the omitted copies. Additionally, LLVM can optimize the code a lot better, to a large degree due to the fact that lots of copies are gone or can be optimized away. Additionally, we can now emit attributes like nonnull on the data and/or vtable pointers contained in the fat pointer, potentially allowing for even more optimizations. This results in LLVM passes being about 3-7% faster (depending on the crate), and the resulting code is also a few percent smaller, for example: text data filename 5671479 3941461 before/librustc-d8ace771.so 5447663 3905745 after/librustc-d8ace771.so 1944425 2394024 before/libstd-d8ace771.so 1896769 2387610 after/libstd-d8ace771.so I had to remove a call in the backtrace-debuginfo test, because LLVM can now merge the tails of some blocks when optimizations are turned on, which can't correctly preserve line info. Fixes #22924 Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 16:57:40 -05:00
let mut idx = 1;
if let ty::FnConverging(ret_ty) = ret_ty {
// A function pointer is called without the declaration
// available, so we have to apply any attributes with ABI
// implications directly to the call instruction. Right now,
// the only attribute we need to worry about is `sret`.
if type_of::return_uses_outptr(ccx, ret_ty) {
let llret_sz = machine::llsize_of_real(ccx, type_of::type_of(ccx, ret_ty));
// The outptr can be noalias and nocapture because it's entirely
// invisible to the program. We also know it's nonnull as well
// as how many bytes we can dereference
attrs.arg(1, llvm::Attribute::StructRet)
.arg(1, llvm::Attribute::NoAlias)
.arg(1, llvm::Attribute::NoCapture)
.arg(1, llvm::DereferenceableAttribute(llret_sz));
// Add one more since there's an outptr
Pass fat pointers in two immediate arguments This has a number of advantages compared to creating a copy in memory and passing a pointer. The obvious one is that we don't have to put the data into memory but can keep it in registers. Since we're currently passing a pointer anyway (instead of using e.g. a known offset on the stack, which is what the `byval` attribute would achieve), we only use a single additional register for each fat pointer, but save at least two pointers worth of stack in exchange (sometimes more because more than one copy gets eliminated). On archs that pass arguments on the stack, we save a pointer worth of stack even without considering the omitted copies. Additionally, LLVM can optimize the code a lot better, to a large degree due to the fact that lots of copies are gone or can be optimized away. Additionally, we can now emit attributes like nonnull on the data and/or vtable pointers contained in the fat pointer, potentially allowing for even more optimizations. This results in LLVM passes being about 3-7% faster (depending on the crate), and the resulting code is also a few percent smaller, for example: text data filename 5671479 3941461 before/librustc-d8ace771.so 5447663 3905745 after/librustc-d8ace771.so 1944425 2394024 before/libstd-d8ace771.so 1896769 2387610 after/libstd-d8ace771.so I had to remove a call in the backtrace-debuginfo test, because LLVM can now merge the tails of some blocks when optimizations are turned on, which can't correctly preserve line info. Fixes #22924 Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 16:57:40 -05:00
idx += 1;
} else {
// The `noalias` attribute on the return value is useful to a
// function ptr caller.
match ret_ty.sty {
// `Box` pointer return values never alias because ownership
// is transferred
ty::TyBox(it) if common::type_is_sized(ccx.tcx(), it) => {
attrs.ret(llvm::Attribute::NoAlias);
}
_ => {}
}
// We can also mark the return value as `dereferenceable` in certain cases
match ret_ty.sty {
// These are not really pointers but pairs, (pointer, len)
ty::TyRef(_, ty::TypeAndMut { ty: inner, .. })
| ty::TyBox(inner) if common::type_is_sized(ccx.tcx(), inner) => {
let llret_sz = machine::llsize_of_real(ccx, type_of::type_of(ccx, inner));
attrs.ret(llvm::DereferenceableAttribute(llret_sz));
}
_ => {}
}
if let ty::TyBool = ret_ty.sty {
attrs.ret(llvm::Attribute::ZExt);
}
}
}
Pass fat pointers in two immediate arguments This has a number of advantages compared to creating a copy in memory and passing a pointer. The obvious one is that we don't have to put the data into memory but can keep it in registers. Since we're currently passing a pointer anyway (instead of using e.g. a known offset on the stack, which is what the `byval` attribute would achieve), we only use a single additional register for each fat pointer, but save at least two pointers worth of stack in exchange (sometimes more because more than one copy gets eliminated). On archs that pass arguments on the stack, we save a pointer worth of stack even without considering the omitted copies. Additionally, LLVM can optimize the code a lot better, to a large degree due to the fact that lots of copies are gone or can be optimized away. Additionally, we can now emit attributes like nonnull on the data and/or vtable pointers contained in the fat pointer, potentially allowing for even more optimizations. This results in LLVM passes being about 3-7% faster (depending on the crate), and the resulting code is also a few percent smaller, for example: text data filename 5671479 3941461 before/librustc-d8ace771.so 5447663 3905745 after/librustc-d8ace771.so 1944425 2394024 before/libstd-d8ace771.so 1896769 2387610 after/libstd-d8ace771.so I had to remove a call in the backtrace-debuginfo test, because LLVM can now merge the tails of some blocks when optimizations are turned on, which can't correctly preserve line info. Fixes #22924 Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 16:57:40 -05:00
for &t in input_tys.iter() {
match t.sty {
Pass fat pointers in two immediate arguments This has a number of advantages compared to creating a copy in memory and passing a pointer. The obvious one is that we don't have to put the data into memory but can keep it in registers. Since we're currently passing a pointer anyway (instead of using e.g. a known offset on the stack, which is what the `byval` attribute would achieve), we only use a single additional register for each fat pointer, but save at least two pointers worth of stack in exchange (sometimes more because more than one copy gets eliminated). On archs that pass arguments on the stack, we save a pointer worth of stack even without considering the omitted copies. Additionally, LLVM can optimize the code a lot better, to a large degree due to the fact that lots of copies are gone or can be optimized away. Additionally, we can now emit attributes like nonnull on the data and/or vtable pointers contained in the fat pointer, potentially allowing for even more optimizations. This results in LLVM passes being about 3-7% faster (depending on the crate), and the resulting code is also a few percent smaller, for example: text data filename 5671479 3941461 before/librustc-d8ace771.so 5447663 3905745 after/librustc-d8ace771.so 1944425 2394024 before/libstd-d8ace771.so 1896769 2387610 after/libstd-d8ace771.so I had to remove a call in the backtrace-debuginfo test, because LLVM can now merge the tails of some blocks when optimizations are turned on, which can't correctly preserve line info. Fixes #22924 Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 16:57:40 -05:00
_ if type_of::arg_is_indirect(ccx, t) => {
let llarg_sz = machine::llsize_of_real(ccx, type_of::type_of(ccx, t));
// For non-immediate arguments the callee gets its own copy of
// the value on the stack, so there are no aliases. It's also
// program-invisible so can't possibly capture
attrs.arg(idx, llvm::Attribute::NoAlias)
.arg(idx, llvm::Attribute::NoCapture)
.arg(idx, llvm::DereferenceableAttribute(llarg_sz));
}
ty::TyBool => {
attrs.arg(idx, llvm::Attribute::ZExt);
}
// `Box` pointer parameters never alias because ownership is transferred
ty::TyBox(inner) => {
Pass fat pointers in two immediate arguments This has a number of advantages compared to creating a copy in memory and passing a pointer. The obvious one is that we don't have to put the data into memory but can keep it in registers. Since we're currently passing a pointer anyway (instead of using e.g. a known offset on the stack, which is what the `byval` attribute would achieve), we only use a single additional register for each fat pointer, but save at least two pointers worth of stack in exchange (sometimes more because more than one copy gets eliminated). On archs that pass arguments on the stack, we save a pointer worth of stack even without considering the omitted copies. Additionally, LLVM can optimize the code a lot better, to a large degree due to the fact that lots of copies are gone or can be optimized away. Additionally, we can now emit attributes like nonnull on the data and/or vtable pointers contained in the fat pointer, potentially allowing for even more optimizations. This results in LLVM passes being about 3-7% faster (depending on the crate), and the resulting code is also a few percent smaller, for example: text data filename 5671479 3941461 before/librustc-d8ace771.so 5447663 3905745 after/librustc-d8ace771.so 1944425 2394024 before/libstd-d8ace771.so 1896769 2387610 after/libstd-d8ace771.so I had to remove a call in the backtrace-debuginfo test, because LLVM can now merge the tails of some blocks when optimizations are turned on, which can't correctly preserve line info. Fixes #22924 Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 16:57:40 -05:00
attrs.arg(idx, llvm::Attribute::NoAlias);
if common::type_is_sized(ccx.tcx(), inner) {
let llsz = machine::llsize_of_real(ccx, type_of::type_of(ccx, inner));
attrs.arg(idx, llvm::DereferenceableAttribute(llsz));
} else {
attrs.arg(idx, llvm::NonNullAttribute);
if inner.is_trait() {
Pass fat pointers in two immediate arguments This has a number of advantages compared to creating a copy in memory and passing a pointer. The obvious one is that we don't have to put the data into memory but can keep it in registers. Since we're currently passing a pointer anyway (instead of using e.g. a known offset on the stack, which is what the `byval` attribute would achieve), we only use a single additional register for each fat pointer, but save at least two pointers worth of stack in exchange (sometimes more because more than one copy gets eliminated). On archs that pass arguments on the stack, we save a pointer worth of stack even without considering the omitted copies. Additionally, LLVM can optimize the code a lot better, to a large degree due to the fact that lots of copies are gone or can be optimized away. Additionally, we can now emit attributes like nonnull on the data and/or vtable pointers contained in the fat pointer, potentially allowing for even more optimizations. This results in LLVM passes being about 3-7% faster (depending on the crate), and the resulting code is also a few percent smaller, for example: text data filename 5671479 3941461 before/librustc-d8ace771.so 5447663 3905745 after/librustc-d8ace771.so 1944425 2394024 before/libstd-d8ace771.so 1896769 2387610 after/libstd-d8ace771.so I had to remove a call in the backtrace-debuginfo test, because LLVM can now merge the tails of some blocks when optimizations are turned on, which can't correctly preserve line info. Fixes #22924 Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 16:57:40 -05:00
attrs.arg(idx + 1, llvm::NonNullAttribute);
}
}
}
ty::TyRef(b, mt) => {
// `&mut` pointer parameters never alias other parameters, or mutable global data
//
// `&T` where `T` contains no `UnsafeCell<U>` is immutable, and can be marked as
// both `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely
// on memory dependencies rather than pointer equality
let interior_unsafe = mt.ty.type_contents(ccx.tcx()).interior_unsafe();
if mt.mutbl == ast::MutMutable || !interior_unsafe {
attrs.arg(idx, llvm::Attribute::NoAlias);
}
if mt.mutbl == ast::MutImmutable && !interior_unsafe {
attrs.arg(idx, llvm::Attribute::ReadOnly);
}
Pass fat pointers in two immediate arguments This has a number of advantages compared to creating a copy in memory and passing a pointer. The obvious one is that we don't have to put the data into memory but can keep it in registers. Since we're currently passing a pointer anyway (instead of using e.g. a known offset on the stack, which is what the `byval` attribute would achieve), we only use a single additional register for each fat pointer, but save at least two pointers worth of stack in exchange (sometimes more because more than one copy gets eliminated). On archs that pass arguments on the stack, we save a pointer worth of stack even without considering the omitted copies. Additionally, LLVM can optimize the code a lot better, to a large degree due to the fact that lots of copies are gone or can be optimized away. Additionally, we can now emit attributes like nonnull on the data and/or vtable pointers contained in the fat pointer, potentially allowing for even more optimizations. This results in LLVM passes being about 3-7% faster (depending on the crate), and the resulting code is also a few percent smaller, for example: text data filename 5671479 3941461 before/librustc-d8ace771.so 5447663 3905745 after/librustc-d8ace771.so 1944425 2394024 before/libstd-d8ace771.so 1896769 2387610 after/libstd-d8ace771.so I had to remove a call in the backtrace-debuginfo test, because LLVM can now merge the tails of some blocks when optimizations are turned on, which can't correctly preserve line info. Fixes #22924 Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 16:57:40 -05:00
// & pointer parameters are also never null and for sized types we also know
// exactly how many bytes we can dereference
if common::type_is_sized(ccx.tcx(), mt.ty) {
let llsz = machine::llsize_of_real(ccx, type_of::type_of(ccx, mt.ty));
attrs.arg(idx, llvm::DereferenceableAttribute(llsz));
} else {
attrs.arg(idx, llvm::NonNullAttribute);
if mt.ty.is_trait() {
Pass fat pointers in two immediate arguments This has a number of advantages compared to creating a copy in memory and passing a pointer. The obvious one is that we don't have to put the data into memory but can keep it in registers. Since we're currently passing a pointer anyway (instead of using e.g. a known offset on the stack, which is what the `byval` attribute would achieve), we only use a single additional register for each fat pointer, but save at least two pointers worth of stack in exchange (sometimes more because more than one copy gets eliminated). On archs that pass arguments on the stack, we save a pointer worth of stack even without considering the omitted copies. Additionally, LLVM can optimize the code a lot better, to a large degree due to the fact that lots of copies are gone or can be optimized away. Additionally, we can now emit attributes like nonnull on the data and/or vtable pointers contained in the fat pointer, potentially allowing for even more optimizations. This results in LLVM passes being about 3-7% faster (depending on the crate), and the resulting code is also a few percent smaller, for example: text data filename 5671479 3941461 before/librustc-d8ace771.so 5447663 3905745 after/librustc-d8ace771.so 1944425 2394024 before/libstd-d8ace771.so 1896769 2387610 after/libstd-d8ace771.so I had to remove a call in the backtrace-debuginfo test, because LLVM can now merge the tails of some blocks when optimizations are turned on, which can't correctly preserve line info. Fixes #22924 Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 16:57:40 -05:00
attrs.arg(idx + 1, llvm::NonNullAttribute);
}
}
// When a reference in an argument has no named lifetime, it's
// impossible for that reference to escape this function
// (returned or stored beyond the call by a closure).
if let ReLateBound(_, BrAnon(_)) = *b {
attrs.arg(idx, llvm::Attribute::NoCapture);
}
}
_ => ()
}
Pass fat pointers in two immediate arguments This has a number of advantages compared to creating a copy in memory and passing a pointer. The obvious one is that we don't have to put the data into memory but can keep it in registers. Since we're currently passing a pointer anyway (instead of using e.g. a known offset on the stack, which is what the `byval` attribute would achieve), we only use a single additional register for each fat pointer, but save at least two pointers worth of stack in exchange (sometimes more because more than one copy gets eliminated). On archs that pass arguments on the stack, we save a pointer worth of stack even without considering the omitted copies. Additionally, LLVM can optimize the code a lot better, to a large degree due to the fact that lots of copies are gone or can be optimized away. Additionally, we can now emit attributes like nonnull on the data and/or vtable pointers contained in the fat pointer, potentially allowing for even more optimizations. This results in LLVM passes being about 3-7% faster (depending on the crate), and the resulting code is also a few percent smaller, for example: text data filename 5671479 3941461 before/librustc-d8ace771.so 5447663 3905745 after/librustc-d8ace771.so 1944425 2394024 before/libstd-d8ace771.so 1896769 2387610 after/libstd-d8ace771.so I had to remove a call in the backtrace-debuginfo test, because LLVM can now merge the tails of some blocks when optimizations are turned on, which can't correctly preserve line info. Fixes #22924 Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 16:57:40 -05:00
if common::type_is_fat_ptr(ccx.tcx(), t) {
idx += 2;
} else {
idx += 1;
}
}
attrs
}