Auto merge of - erikdesjardins:addattr, r=nikic

Add LLVM attributes in batches instead of individually

This should improve performance.

~r? `@ghost` (blocked on )~
This commit is contained in:
bors 2022-02-27 09:23:24 +00:00
commit 2bd9656c80
11 changed files with 454 additions and 473 deletions

@ -1,6 +1,7 @@
use crate::attributes;
use crate::builder::Builder; use crate::builder::Builder;
use crate::context::CodegenCx; use crate::context::CodegenCx;
use crate::llvm::{self, AttributePlace}; use crate::llvm::{self, Attribute, AttributePlace};
use crate::type_::Type; use crate::type_::Type;
use crate::type_of::LayoutLlvmExt; use crate::type_of::LayoutLlvmExt;
use crate::value::Value; use crate::value::Value;
@ -20,6 +21,7 @@ use rustc_target::abi::{self, HasDataLayout, Int};
pub use rustc_target::spec::abi::Abi; pub use rustc_target::spec::abi::Abi;
use libc::c_uint; use libc::c_uint;
use smallvec::SmallVec;
pub trait ArgAttributesExt { pub trait ArgAttributesExt {
fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value); fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value);
@ -38,57 +40,65 @@ fn should_use_mutable_noalias(cx: &CodegenCx<'_, '_>) -> bool {
cx.tcx.sess.opts.debugging_opts.mutable_noalias.unwrap_or(true) cx.tcx.sess.opts.debugging_opts.mutable_noalias.unwrap_or(true)
} }
const ABI_AFFECTING_ATTRIBUTES: [(ArgAttribute, llvm::Attribute); 1] = const ABI_AFFECTING_ATTRIBUTES: [(ArgAttribute, llvm::AttributeKind); 1] =
[(ArgAttribute::InReg, llvm::Attribute::InReg)]; [(ArgAttribute::InReg, llvm::AttributeKind::InReg)];
const OPTIMIZATION_ATTRIBUTES: [(ArgAttribute, llvm::Attribute); 5] = [ const OPTIMIZATION_ATTRIBUTES: [(ArgAttribute, llvm::AttributeKind); 5] = [
(ArgAttribute::NoAlias, llvm::Attribute::NoAlias), (ArgAttribute::NoAlias, llvm::AttributeKind::NoAlias),
(ArgAttribute::NoCapture, llvm::Attribute::NoCapture), (ArgAttribute::NoCapture, llvm::AttributeKind::NoCapture),
(ArgAttribute::NonNull, llvm::Attribute::NonNull), (ArgAttribute::NonNull, llvm::AttributeKind::NonNull),
(ArgAttribute::ReadOnly, llvm::Attribute::ReadOnly), (ArgAttribute::ReadOnly, llvm::AttributeKind::ReadOnly),
(ArgAttribute::NoUndef, llvm::Attribute::NoUndef), (ArgAttribute::NoUndef, llvm::AttributeKind::NoUndef),
]; ];
fn get_attrs<'ll>(this: &ArgAttributes, cx: &CodegenCx<'ll, '_>) -> SmallVec<[&'ll Attribute; 8]> {
let mut regular = this.regular;
let mut attrs = SmallVec::new();
// ABI-affecting attributes must always be applied
for (attr, llattr) in ABI_AFFECTING_ATTRIBUTES {
if regular.contains(attr) {
attrs.push(llattr.create_attr(cx.llcx));
}
}
if let Some(align) = this.pointee_align {
attrs.push(llvm::CreateAlignmentAttr(cx.llcx, align.bytes()));
}
match this.arg_ext {
ArgExtension::None => {}
ArgExtension::Zext => attrs.push(llvm::AttributeKind::ZExt.create_attr(cx.llcx)),
ArgExtension::Sext => attrs.push(llvm::AttributeKind::SExt.create_attr(cx.llcx)),
}
// Only apply remaining attributes when optimizing
if cx.sess().opts.optimize != config::OptLevel::No {
let deref = this.pointee_size.bytes();
if deref != 0 {
if regular.contains(ArgAttribute::NonNull) {
attrs.push(llvm::CreateDereferenceableAttr(cx.llcx, deref));
} else {
attrs.push(llvm::CreateDereferenceableOrNullAttr(cx.llcx, deref));
}
regular -= ArgAttribute::NonNull;
}
for (attr, llattr) in OPTIMIZATION_ATTRIBUTES {
if regular.contains(attr) {
attrs.push(llattr.create_attr(cx.llcx));
}
}
if regular.contains(ArgAttribute::NoAliasMutRef) && should_use_mutable_noalias(cx) {
attrs.push(llvm::AttributeKind::NoAlias.create_attr(cx.llcx));
}
}
attrs
}
impl ArgAttributesExt for ArgAttributes { impl ArgAttributesExt for ArgAttributes {
fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value) { fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value) {
let mut regular = self.regular; let attrs = get_attrs(self, cx);
unsafe { attributes::apply_to_llfn(llfn, idx, &attrs);
// ABI-affecting attributes must always be applied
for (attr, llattr) in ABI_AFFECTING_ATTRIBUTES {
if regular.contains(attr) {
llattr.apply_llfn(idx, llfn);
}
}
if let Some(align) = self.pointee_align {
llvm::LLVMRustAddAlignmentAttr(llfn, idx.as_uint(), align.bytes() as u32);
}
match self.arg_ext {
ArgExtension::None => {}
ArgExtension::Zext => llvm::Attribute::ZExt.apply_llfn(idx, llfn),
ArgExtension::Sext => llvm::Attribute::SExt.apply_llfn(idx, llfn),
}
// Only apply remaining attributes when optimizing
if cx.sess().opts.optimize == config::OptLevel::No {
return;
}
let deref = self.pointee_size.bytes();
if deref != 0 {
if regular.contains(ArgAttribute::NonNull) {
llvm::LLVMRustAddDereferenceableAttr(llfn, idx.as_uint(), deref);
} else {
llvm::LLVMRustAddDereferenceableOrNullAttr(llfn, idx.as_uint(), deref);
}
regular -= ArgAttribute::NonNull;
}
for (attr, llattr) in OPTIMIZATION_ATTRIBUTES {
if regular.contains(attr) {
llattr.apply_llfn(idx, llfn);
}
}
if regular.contains(ArgAttribute::NoAliasMutRef) && should_use_mutable_noalias(cx) {
llvm::Attribute::NoAlias.apply_llfn(idx, llfn);
}
}
} }
fn apply_attrs_to_callsite( fn apply_attrs_to_callsite(
@ -97,52 +107,8 @@ impl ArgAttributesExt for ArgAttributes {
cx: &CodegenCx<'_, '_>, cx: &CodegenCx<'_, '_>,
callsite: &Value, callsite: &Value,
) { ) {
let mut regular = self.regular; let attrs = get_attrs(self, cx);
unsafe { attributes::apply_to_callsite(callsite, idx, &attrs);
// ABI-affecting attributes must always be applied
for (attr, llattr) in ABI_AFFECTING_ATTRIBUTES {
if regular.contains(attr) {
llattr.apply_callsite(idx, callsite);
}
}
if let Some(align) = self.pointee_align {
llvm::LLVMRustAddAlignmentCallSiteAttr(
callsite,
idx.as_uint(),
align.bytes() as u32,
);
}
match self.arg_ext {
ArgExtension::None => {}
ArgExtension::Zext => llvm::Attribute::ZExt.apply_callsite(idx, callsite),
ArgExtension::Sext => llvm::Attribute::SExt.apply_callsite(idx, callsite),
}
// Only apply remaining attributes when optimizing
if cx.sess().opts.optimize == config::OptLevel::No {
return;
}
let deref = self.pointee_size.bytes();
if deref != 0 {
if regular.contains(ArgAttribute::NonNull) {
llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite, idx.as_uint(), deref);
} else {
llvm::LLVMRustAddDereferenceableOrNullCallSiteAttr(
callsite,
idx.as_uint(),
deref,
);
}
regular -= ArgAttribute::NonNull;
}
for (attr, llattr) in OPTIMIZATION_ATTRIBUTES {
if regular.contains(attr) {
llattr.apply_callsite(idx, callsite);
}
}
if regular.contains(ArgAttribute::NoAliasMutRef) && should_use_mutable_noalias(cx) {
llvm::Attribute::NoAlias.apply_callsite(idx, callsite);
}
}
} }
} }
@ -444,15 +410,14 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
} }
fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) { fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) {
// FIXME(eddyb) can this also be applied to callsites? let mut func_attrs = SmallVec::<[_; 2]>::new();
if self.ret.layout.abi.is_uninhabited() { if self.ret.layout.abi.is_uninhabited() {
llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, llfn); func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(cx.llcx));
} }
// FIXME(eddyb, wesleywiser): apply this to callsites as well?
if !self.can_unwind { if !self.can_unwind {
llvm::Attribute::NoUnwind.apply_llfn(llvm::AttributePlace::Function, llfn); func_attrs.push(llvm::AttributeKind::NoUnwind.create_attr(cx.llcx));
} }
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &{ func_attrs });
let mut i = 0; let mut i = 0;
let mut apply = |attrs: &ArgAttributes| { let mut apply = |attrs: &ArgAttributes| {
@ -467,13 +432,8 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => { PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => {
assert!(!on_stack); assert!(!on_stack);
let i = apply(attrs); let i = apply(attrs);
unsafe { let sret = llvm::CreateStructRetAttr(cx.llcx, self.ret.layout.llvm_type(cx));
llvm::LLVMRustAddStructRetAttr( attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[sret]);
llfn,
llvm::AttributePlace::Argument(i).as_uint(),
self.ret.layout.llvm_type(cx),
);
}
} }
PassMode::Cast(cast) => { PassMode::Cast(cast) => {
cast.attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn); cast.attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
@ -488,13 +448,8 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
PassMode::Ignore => {} PassMode::Ignore => {}
PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: true } => { PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: true } => {
let i = apply(attrs); let i = apply(attrs);
unsafe { let byval = llvm::CreateByValAttr(cx.llcx, arg.layout.llvm_type(cx));
llvm::LLVMRustAddByValAttr( attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[byval]);
llfn,
llvm::AttributePlace::Argument(i).as_uint(),
arg.layout.llvm_type(cx),
);
}
} }
PassMode::Direct(ref attrs) PassMode::Direct(ref attrs)
| PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: false } => { | PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: false } => {
@ -517,12 +472,14 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
} }
fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll Value) { fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll Value) {
let mut func_attrs = SmallVec::<[_; 2]>::new();
if self.ret.layout.abi.is_uninhabited() { if self.ret.layout.abi.is_uninhabited() {
llvm::Attribute::NoReturn.apply_callsite(llvm::AttributePlace::Function, callsite); func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(bx.cx.llcx));
} }
if !self.can_unwind { if !self.can_unwind {
llvm::Attribute::NoUnwind.apply_callsite(llvm::AttributePlace::Function, callsite); func_attrs.push(llvm::AttributeKind::NoUnwind.create_attr(bx.cx.llcx));
} }
attributes::apply_to_callsite(callsite, llvm::AttributePlace::Function, &{ func_attrs });
let mut i = 0; let mut i = 0;
let mut apply = |cx: &CodegenCx<'_, '_>, attrs: &ArgAttributes| { let mut apply = |cx: &CodegenCx<'_, '_>, attrs: &ArgAttributes| {
@ -537,13 +494,8 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => { PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => {
assert!(!on_stack); assert!(!on_stack);
let i = apply(bx.cx, attrs); let i = apply(bx.cx, attrs);
unsafe { let sret = llvm::CreateStructRetAttr(bx.cx.llcx, self.ret.layout.llvm_type(bx));
llvm::LLVMRustAddStructRetCallSiteAttr( attributes::apply_to_callsite(callsite, llvm::AttributePlace::Argument(i), &[sret]);
callsite,
llvm::AttributePlace::Argument(i).as_uint(),
self.ret.layout.llvm_type(bx),
);
}
} }
PassMode::Cast(cast) => { PassMode::Cast(cast) => {
cast.attrs.apply_attrs_to_callsite( cast.attrs.apply_attrs_to_callsite(
@ -572,13 +524,12 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
PassMode::Ignore => {} PassMode::Ignore => {}
PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: true } => { PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: true } => {
let i = apply(bx.cx, attrs); let i = apply(bx.cx, attrs);
unsafe { let byval = llvm::CreateByValAttr(bx.cx.llcx, arg.layout.llvm_type(bx));
llvm::LLVMRustAddByValCallSiteAttr( attributes::apply_to_callsite(
callsite, callsite,
llvm::AttributePlace::Argument(i).as_uint(), llvm::AttributePlace::Argument(i),
arg.layout.llvm_type(bx), &[byval],
); );
}
} }
PassMode::Direct(ref attrs) PassMode::Direct(ref attrs)
| PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: false } => { | PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: false } => {
@ -610,10 +561,12 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
if self.conv == Conv::CCmseNonSecureCall { if self.conv == Conv::CCmseNonSecureCall {
// This will probably get ignored on all targets but those supporting the TrustZone-M // This will probably get ignored on all targets but those supporting the TrustZone-M
// extension (thumbv8m targets). // extension (thumbv8m targets).
llvm::AddCallSiteAttrString( let cmse_nonsecure_call =
llvm::CreateAttrString(bx.cx.llcx, cstr::cstr!("cmse_nonsecure_call"));
attributes::apply_to_callsite(
callsite, callsite,
llvm::AttributePlace::Function, llvm::AttributePlace::Function,
cstr::cstr!("cmse_nonsecure_call"), &[cmse_nonsecure_call],
); );
} }
} }

@ -64,7 +64,8 @@ pub(crate) unsafe fn codegen(
llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden); llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
} }
if tcx.sess.must_emit_unwind_tables() { if tcx.sess.must_emit_unwind_tables() {
attributes::emit_uwtable(llfn); let uwtable = attributes::uwtable_attr(llcx);
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[uwtable]);
} }
let callee = kind.fn_name(method.name); let callee = kind.fn_name(method.name);
@ -105,20 +106,22 @@ pub(crate) unsafe fn codegen(
let name = "__rust_alloc_error_handler"; let name = "__rust_alloc_error_handler";
let llfn = llvm::LLVMRustGetOrInsertFunction(llmod, name.as_ptr().cast(), name.len(), ty); let llfn = llvm::LLVMRustGetOrInsertFunction(llmod, name.as_ptr().cast(), name.len(), ty);
// -> ! DIFlagNoReturn // -> ! DIFlagNoReturn
llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, llfn); let no_return = llvm::AttributeKind::NoReturn.create_attr(llcx);
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[no_return]);
if tcx.sess.target.default_hidden_visibility { if tcx.sess.target.default_hidden_visibility {
llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden); llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
} }
if tcx.sess.must_emit_unwind_tables() { if tcx.sess.must_emit_unwind_tables() {
attributes::emit_uwtable(llfn); let uwtable = attributes::uwtable_attr(llcx);
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[uwtable]);
} }
let kind = if has_alloc_error_handler { AllocatorKind::Global } else { AllocatorKind::Default }; let kind = if has_alloc_error_handler { AllocatorKind::Global } else { AllocatorKind::Default };
let callee = kind.fn_name(sym::oom); let callee = kind.fn_name(sym::oom);
let callee = llvm::LLVMRustGetOrInsertFunction(llmod, callee.as_ptr().cast(), callee.len(), ty); let callee = llvm::LLVMRustGetOrInsertFunction(llmod, callee.as_ptr().cast(), callee.len(), ty);
// -> ! DIFlagNoReturn // -> ! DIFlagNoReturn
llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, callee); attributes::apply_to_llfn(callee, llvm::AttributePlace::Function, &[no_return]);
llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden); llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, "entry\0".as_ptr().cast()); let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, "entry\0".as_ptr().cast());

@ -1,3 +1,4 @@
use crate::attributes;
use crate::builder::Builder; use crate::builder::Builder;
use crate::common::Funclet; use crate::common::Funclet;
use crate::context::CodegenCx; use crate::context::CodegenCx;
@ -18,6 +19,7 @@ use rustc_target::abi::*;
use rustc_target::asm::*; use rustc_target::asm::*;
use libc::{c_char, c_uint}; use libc::{c_char, c_uint};
use smallvec::SmallVec;
use tracing::debug; use tracing::debug;
impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
@ -273,19 +275,20 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
) )
.unwrap_or_else(|| span_bug!(line_spans[0], "LLVM asm constraint validation failed")); .unwrap_or_else(|| span_bug!(line_spans[0], "LLVM asm constraint validation failed"));
let mut attrs = SmallVec::<[_; 2]>::new();
if options.contains(InlineAsmOptions::PURE) { if options.contains(InlineAsmOptions::PURE) {
if options.contains(InlineAsmOptions::NOMEM) { if options.contains(InlineAsmOptions::NOMEM) {
llvm::Attribute::ReadNone.apply_callsite(llvm::AttributePlace::Function, result); attrs.push(llvm::AttributeKind::ReadNone.create_attr(self.cx.llcx));
} else if options.contains(InlineAsmOptions::READONLY) { } else if options.contains(InlineAsmOptions::READONLY) {
llvm::Attribute::ReadOnly.apply_callsite(llvm::AttributePlace::Function, result); attrs.push(llvm::AttributeKind::ReadOnly.create_attr(self.cx.llcx));
} }
llvm::Attribute::WillReturn.apply_callsite(llvm::AttributePlace::Function, result); attrs.push(llvm::AttributeKind::WillReturn.create_attr(self.cx.llcx));
} else if options.contains(InlineAsmOptions::NOMEM) { } else if options.contains(InlineAsmOptions::NOMEM) {
llvm::Attribute::InaccessibleMemOnly attrs.push(llvm::AttributeKind::InaccessibleMemOnly.create_attr(self.cx.llcx));
.apply_callsite(llvm::AttributePlace::Function, result);
} else { } else {
// LLVM doesn't have an attribute to represent ReadOnly + SideEffect // LLVM doesn't have an attribute to represent ReadOnly + SideEffect
} }
attributes::apply_to_callsite(result, llvm::AttributePlace::Function, &{ attrs });
// Write results to outputs // Write results to outputs
for (idx, op) in operands.iter().enumerate() { for (idx, op) in operands.iter().enumerate() {

@ -7,53 +7,75 @@ use rustc_codegen_ssa::traits::*;
use rustc_data_structures::small_c_str::SmallCStr; use rustc_data_structures::small_c_str::SmallCStr;
use rustc_hir::def_id::DefId; use rustc_hir::def_id::DefId;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::ty::layout::HasTyCtxt;
use rustc_middle::ty::{self, TyCtxt}; use rustc_middle::ty::{self, TyCtxt};
use rustc_session::config::OptLevel; use rustc_session::config::OptLevel;
use rustc_session::Session;
use rustc_target::spec::abi::Abi; use rustc_target::spec::abi::Abi;
use rustc_target::spec::{FramePointer, SanitizerSet, StackProbeType, StackProtector}; use rustc_target::spec::{FramePointer, SanitizerSet, StackProbeType, StackProtector};
use smallvec::SmallVec;
use crate::attributes; use crate::attributes;
use crate::llvm::AttributePlace::Function; use crate::llvm::AttributePlace::Function;
use crate::llvm::{self, Attribute}; use crate::llvm::{self, Attribute, AttributeKind, AttributePlace};
use crate::llvm_util; use crate::llvm_util;
pub use rustc_attr::{InlineAttr, InstructionSetAttr, OptimizeAttr}; pub use rustc_attr::{InlineAttr, InstructionSetAttr, OptimizeAttr};
use crate::context::CodegenCx; use crate::context::CodegenCx;
use crate::value::Value; use crate::value::Value;
/// Mark LLVM function to use provided inline heuristic. pub fn apply_to_llfn(llfn: &Value, idx: AttributePlace, attrs: &[&Attribute]) {
#[inline] if !attrs.is_empty() {
fn inline<'ll>(cx: &CodegenCx<'ll, '_>, val: &'ll Value, inline: InlineAttr) { llvm::AddFunctionAttributes(llfn, idx, attrs);
use self::InlineAttr::*; }
match inline {
Hint => Attribute::InlineHint.apply_llfn(Function, val),
Always => Attribute::AlwaysInline.apply_llfn(Function, val),
Never => {
if cx.tcx().sess.target.arch != "amdgpu" {
Attribute::NoInline.apply_llfn(Function, val);
}
}
None => {}
};
} }
/// Apply LLVM sanitize attributes. pub fn remove_from_llfn(llfn: &Value, idx: AttributePlace, attrs: &[AttributeKind]) {
if !attrs.is_empty() {
llvm::RemoveFunctionAttributes(llfn, idx, attrs);
}
}
pub fn apply_to_callsite(callsite: &Value, idx: AttributePlace, attrs: &[&Attribute]) {
if !attrs.is_empty() {
llvm::AddCallSiteAttributes(callsite, idx, attrs);
}
}
/// Get LLVM attribute for the provided inline heuristic.
#[inline] #[inline]
pub fn sanitize<'ll>(cx: &CodegenCx<'ll, '_>, no_sanitize: SanitizerSet, llfn: &'ll Value) { fn inline_attr<'ll>(cx: &CodegenCx<'ll, '_>, inline: InlineAttr) -> Option<&'ll Attribute> {
match inline {
InlineAttr::Hint => Some(AttributeKind::InlineHint.create_attr(cx.llcx)),
InlineAttr::Always => Some(AttributeKind::AlwaysInline.create_attr(cx.llcx)),
InlineAttr::Never => {
if cx.sess().target.arch != "amdgpu" {
Some(AttributeKind::NoInline.create_attr(cx.llcx))
} else {
None
}
}
InlineAttr::None => None,
}
}
/// Get LLVM sanitize attributes.
#[inline]
pub fn sanitize_attrs<'ll>(
cx: &CodegenCx<'ll, '_>,
no_sanitize: SanitizerSet,
) -> SmallVec<[&'ll Attribute; 4]> {
let mut attrs = SmallVec::new();
let enabled = cx.tcx.sess.opts.debugging_opts.sanitizer - no_sanitize; let enabled = cx.tcx.sess.opts.debugging_opts.sanitizer - no_sanitize;
if enabled.contains(SanitizerSet::ADDRESS) { if enabled.contains(SanitizerSet::ADDRESS) {
llvm::Attribute::SanitizeAddress.apply_llfn(Function, llfn); attrs.push(llvm::AttributeKind::SanitizeAddress.create_attr(cx.llcx));
} }
if enabled.contains(SanitizerSet::MEMORY) { if enabled.contains(SanitizerSet::MEMORY) {
llvm::Attribute::SanitizeMemory.apply_llfn(Function, llfn); attrs.push(llvm::AttributeKind::SanitizeMemory.create_attr(cx.llcx));
} }
if enabled.contains(SanitizerSet::THREAD) { if enabled.contains(SanitizerSet::THREAD) {
llvm::Attribute::SanitizeThread.apply_llfn(Function, llfn); attrs.push(llvm::AttributeKind::SanitizeThread.create_attr(cx.llcx));
} }
if enabled.contains(SanitizerSet::HWADDRESS) { if enabled.contains(SanitizerSet::HWADDRESS) {
llvm::Attribute::SanitizeHWAddress.apply_llfn(Function, llfn); attrs.push(llvm::AttributeKind::SanitizeHWAddress.create_attr(cx.llcx));
} }
if enabled.contains(SanitizerSet::MEMTAG) { if enabled.contains(SanitizerSet::MEMTAG) {
// Check to make sure the mte target feature is actually enabled. // Check to make sure the mte target feature is actually enabled.
@ -66,26 +88,21 @@ pub fn sanitize<'ll>(cx: &CodegenCx<'ll, '_>, no_sanitize: SanitizerSet, llfn: &
sess.err("`-Zsanitizer=memtag` requires `-Ctarget-feature=+mte`"); sess.err("`-Zsanitizer=memtag` requires `-Ctarget-feature=+mte`");
} }
llvm::Attribute::SanitizeMemTag.apply_llfn(Function, llfn); attrs.push(llvm::AttributeKind::SanitizeMemTag.create_attr(cx.llcx));
} }
attrs
} }
/// Tell LLVM to emit or not emit the information necessary to unwind the stack for the function. /// Tell LLVM to emit or not emit the information necessary to unwind the stack for the function.
#[inline] #[inline]
pub fn emit_uwtable(val: &Value) { pub fn uwtable_attr(llcx: &llvm::Context) -> &Attribute {
// NOTE: We should determine if we even need async unwind tables, as they // NOTE: We should determine if we even need async unwind tables, as they
// take have more overhead and if we can use sync unwind tables we // take have more overhead and if we can use sync unwind tables we
// probably should. // probably should.
llvm::EmitUWTableAttr(val, true); llvm::CreateUWTableAttr(llcx, true)
} }
/// Tell LLVM if this function should be 'naked', i.e., skip the epilogue and prologue. pub fn frame_pointer_type_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
#[inline]
fn naked(val: &Value, is_naked: bool) {
Attribute::Naked.toggle_llfn(Function, val, is_naked);
}
pub fn set_frame_pointer_type<'ll>(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) {
let mut fp = cx.sess().target.frame_pointer; let mut fp = cx.sess().target.frame_pointer;
// "mcount" function relies on stack pointer. // "mcount" function relies on stack pointer.
// See <https://sourceware.org/binutils/docs/gprof/Implementation.html>. // See <https://sourceware.org/binutils/docs/gprof/Implementation.html>.
@ -96,19 +113,14 @@ pub fn set_frame_pointer_type<'ll>(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) {
let attr_value = match fp { let attr_value = match fp {
FramePointer::Always => cstr!("all"), FramePointer::Always => cstr!("all"),
FramePointer::NonLeaf => cstr!("non-leaf"), FramePointer::NonLeaf => cstr!("non-leaf"),
FramePointer::MayOmit => return, FramePointer::MayOmit => return None,
}; };
llvm::AddFunctionAttrStringValue( Some(llvm::CreateAttrStringValue(cx.llcx, cstr!("frame-pointer"), attr_value))
llfn,
llvm::AttributePlace::Function,
cstr!("frame-pointer"),
attr_value,
);
} }
/// Tell LLVM what instrument function to insert. /// Tell LLVM what instrument function to insert.
#[inline] #[inline]
fn set_instrument_function<'ll>(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { fn instrument_function_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
if cx.sess().instrument_mcount() { if cx.sess().instrument_mcount() {
// Similar to `clang -pg` behavior. Handled by the // Similar to `clang -pg` behavior. Handled by the
// `post-inline-ee-instrument` LLVM pass. // `post-inline-ee-instrument` LLVM pass.
@ -117,16 +129,17 @@ fn set_instrument_function<'ll>(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) {
// See test/CodeGen/mcount.c in clang. // See test/CodeGen/mcount.c in clang.
let mcount_name = CString::new(cx.sess().target.mcount.as_str().as_bytes()).unwrap(); let mcount_name = CString::new(cx.sess().target.mcount.as_str().as_bytes()).unwrap();
llvm::AddFunctionAttrStringValue( Some(llvm::CreateAttrStringValue(
llfn, cx.llcx,
llvm::AttributePlace::Function,
cstr!("instrument-function-entry-inlined"), cstr!("instrument-function-entry-inlined"),
&mcount_name, &mcount_name,
); ))
} else {
None
} }
} }
fn set_probestack<'ll>(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { fn probestack_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
// Currently stack probes seem somewhat incompatible with the address // Currently stack probes seem somewhat incompatible with the address
// sanitizer and thread sanitizer. With asan we're already protected from // sanitizer and thread sanitizer. With asan we're already protected from
// stack overflow anyway so we don't really need stack probes regardless. // stack overflow anyway so we don't really need stack probes regardless.
@ -137,107 +150,105 @@ fn set_probestack<'ll>(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) {
.sanitizer .sanitizer
.intersects(SanitizerSet::ADDRESS | SanitizerSet::THREAD) .intersects(SanitizerSet::ADDRESS | SanitizerSet::THREAD)
{ {
return; return None;
} }
// probestack doesn't play nice either with `-C profile-generate`. // probestack doesn't play nice either with `-C profile-generate`.
if cx.sess().opts.cg.profile_generate.enabled() { if cx.sess().opts.cg.profile_generate.enabled() {
return; return None;
} }
// probestack doesn't play nice either with gcov profiling. // probestack doesn't play nice either with gcov profiling.
if cx.sess().opts.debugging_opts.profile { if cx.sess().opts.debugging_opts.profile {
return; return None;
} }
let attr_value = match cx.sess().target.stack_probes { let attr_value = match cx.sess().target.stack_probes {
StackProbeType::None => None, StackProbeType::None => return None,
// Request LLVM to generate the probes inline. If the given LLVM version does not support // Request LLVM to generate the probes inline. If the given LLVM version does not support
// this, no probe is generated at all (even if the attribute is specified). // this, no probe is generated at all (even if the attribute is specified).
StackProbeType::Inline => Some(cstr!("inline-asm")), StackProbeType::Inline => cstr!("inline-asm"),
// Flag our internal `__rust_probestack` function as the stack probe symbol. // Flag our internal `__rust_probestack` function as the stack probe symbol.
// This is defined in the `compiler-builtins` crate for each architecture. // This is defined in the `compiler-builtins` crate for each architecture.
StackProbeType::Call => Some(cstr!("__rust_probestack")), StackProbeType::Call => cstr!("__rust_probestack"),
// Pick from the two above based on the LLVM version. // Pick from the two above based on the LLVM version.
StackProbeType::InlineOrCall { min_llvm_version_for_inline } => { StackProbeType::InlineOrCall { min_llvm_version_for_inline } => {
if llvm_util::get_version() < min_llvm_version_for_inline { if llvm_util::get_version() < min_llvm_version_for_inline {
Some(cstr!("__rust_probestack")) cstr!("__rust_probestack")
} else { } else {
Some(cstr!("inline-asm")) cstr!("inline-asm")
} }
} }
}; };
if let Some(attr_value) = attr_value { Some(llvm::CreateAttrStringValue(cx.llcx, cstr!("probe-stack"), attr_value))
llvm::AddFunctionAttrStringValue(
llfn,
llvm::AttributePlace::Function,
cstr!("probe-stack"),
attr_value,
);
}
} }
fn set_stackprotector<'ll>(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { fn stackprotector_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
let sspattr = match cx.sess().stack_protector() { let sspattr = match cx.sess().stack_protector() {
StackProtector::None => return, StackProtector::None => return None,
StackProtector::All => Attribute::StackProtectReq, StackProtector::All => AttributeKind::StackProtectReq,
StackProtector::Strong => Attribute::StackProtectStrong, StackProtector::Strong => AttributeKind::StackProtectStrong,
StackProtector::Basic => Attribute::StackProtect, StackProtector::Basic => AttributeKind::StackProtect,
}; };
sspattr.apply_llfn(Function, llfn) Some(sspattr.create_attr(cx.llcx))
} }
pub fn apply_target_cpu_attr<'ll>(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { pub fn target_cpu_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll Attribute {
let target_cpu = SmallCStr::new(llvm_util::target_cpu(cx.tcx.sess)); let target_cpu = SmallCStr::new(llvm_util::target_cpu(cx.tcx.sess));
llvm::AddFunctionAttrStringValue( llvm::CreateAttrStringValue(cx.llcx, cstr!("target-cpu"), target_cpu.as_c_str())
llfn,
llvm::AttributePlace::Function,
cstr!("target-cpu"),
target_cpu.as_c_str(),
);
} }
pub fn apply_tune_cpu_attr<'ll>(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { pub fn tune_cpu_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
if let Some(tune) = llvm_util::tune_cpu(cx.tcx.sess) { llvm_util::tune_cpu(cx.tcx.sess).map(|tune| {
let tune_cpu = SmallCStr::new(tune); let tune_cpu = SmallCStr::new(tune);
llvm::AddFunctionAttrStringValue( llvm::CreateAttrStringValue(cx.llcx, cstr!("tune-cpu"), tune_cpu.as_c_str())
llfn, })
llvm::AttributePlace::Function,
cstr!("tune-cpu"),
tune_cpu.as_c_str(),
);
}
} }
/// Sets the `NonLazyBind` LLVM attribute on a given function, /// Get the `NonLazyBind` LLVM attribute,
/// assuming the codegen options allow skipping the PLT. /// if the codegen options allow skipping the PLT.
pub fn non_lazy_bind<'ll>(sess: &Session, llfn: &'ll Value) { pub fn non_lazy_bind_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
// Don't generate calls through PLT if it's not necessary // Don't generate calls through PLT if it's not necessary
if !sess.needs_plt() { if !cx.sess().needs_plt() {
Attribute::NonLazyBind.apply_llfn(Function, llfn); Some(AttributeKind::NonLazyBind.create_attr(cx.llcx))
} else {
None
} }
} }
pub(crate) fn default_optimisation_attrs<'ll>(sess: &Session, llfn: &'ll Value) { /// Returns attributes to remove and to add, respectively,
match sess.opts.optimize { /// to set the default optimizations attrs on a function.
#[inline]
pub(crate) fn default_optimisation_attrs<'ll>(
cx: &CodegenCx<'ll, '_>,
) -> (
// Attributes to remove
SmallVec<[AttributeKind; 3]>,
// Attributes to add
SmallVec<[&'ll Attribute; 2]>,
) {
let mut to_remove = SmallVec::new();
let mut to_add = SmallVec::new();
match cx.sess().opts.optimize {
OptLevel::Size => { OptLevel::Size => {
llvm::Attribute::MinSize.unapply_llfn(Function, llfn); to_remove.push(llvm::AttributeKind::MinSize);
llvm::Attribute::OptimizeForSize.apply_llfn(Function, llfn); to_add.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
llvm::Attribute::OptimizeNone.unapply_llfn(Function, llfn); to_remove.push(llvm::AttributeKind::OptimizeNone);
} }
OptLevel::SizeMin => { OptLevel::SizeMin => {
llvm::Attribute::MinSize.apply_llfn(Function, llfn); to_add.push(llvm::AttributeKind::MinSize.create_attr(cx.llcx));
llvm::Attribute::OptimizeForSize.apply_llfn(Function, llfn); to_add.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
llvm::Attribute::OptimizeNone.unapply_llfn(Function, llfn); to_remove.push(llvm::AttributeKind::OptimizeNone);
} }
OptLevel::No => { OptLevel::No => {
llvm::Attribute::MinSize.unapply_llfn(Function, llfn); to_remove.push(llvm::AttributeKind::MinSize);
llvm::Attribute::OptimizeForSize.unapply_llfn(Function, llfn); to_remove.push(llvm::AttributeKind::OptimizeForSize);
llvm::Attribute::OptimizeNone.unapply_llfn(Function, llfn); to_remove.push(llvm::AttributeKind::OptimizeNone);
} }
_ => {} _ => {}
} }
(to_remove, to_add)
} }
/// Composite function which sets LLVM attributes for function depending on its AST (`#[attribute]`) /// Composite function which sets LLVM attributes for function depending on its AST (`#[attribute]`)
@ -249,30 +260,35 @@ pub fn from_fn_attrs<'ll, 'tcx>(
) { ) {
let codegen_fn_attrs = cx.tcx.codegen_fn_attrs(instance.def_id()); let codegen_fn_attrs = cx.tcx.codegen_fn_attrs(instance.def_id());
let mut to_remove = SmallVec::<[_; 4]>::new();
let mut to_add = SmallVec::<[_; 16]>::new();
match codegen_fn_attrs.optimize { match codegen_fn_attrs.optimize {
OptimizeAttr::None => { OptimizeAttr::None => {
default_optimisation_attrs(cx.tcx.sess, llfn); let (to_remove_opt, to_add_opt) = default_optimisation_attrs(cx);
to_remove.extend(to_remove_opt);
to_add.extend(to_add_opt);
} }
OptimizeAttr::Speed => { OptimizeAttr::Speed => {
llvm::Attribute::MinSize.unapply_llfn(Function, llfn); to_remove.push(llvm::AttributeKind::MinSize);
llvm::Attribute::OptimizeForSize.unapply_llfn(Function, llfn); to_remove.push(llvm::AttributeKind::OptimizeForSize);
llvm::Attribute::OptimizeNone.unapply_llfn(Function, llfn); to_remove.push(llvm::AttributeKind::OptimizeNone);
} }
OptimizeAttr::Size => { OptimizeAttr::Size => {
llvm::Attribute::MinSize.apply_llfn(Function, llfn); to_add.push(llvm::AttributeKind::MinSize.create_attr(cx.llcx));
llvm::Attribute::OptimizeForSize.apply_llfn(Function, llfn); to_add.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
llvm::Attribute::OptimizeNone.unapply_llfn(Function, llfn); to_remove.push(llvm::AttributeKind::OptimizeNone);
} }
} }
let inline_attr = if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) { let inline = if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
InlineAttr::Never InlineAttr::Never
} else if codegen_fn_attrs.inline == InlineAttr::None && instance.def.requires_inline(cx.tcx) { } else if codegen_fn_attrs.inline == InlineAttr::None && instance.def.requires_inline(cx.tcx) {
InlineAttr::Hint InlineAttr::Hint
} else { } else {
codegen_fn_attrs.inline codegen_fn_attrs.inline
}; };
inline(cx, llfn, inline_attr); to_add.extend(inline_attr(cx, inline));
// The `uwtable` attribute according to LLVM is: // The `uwtable` attribute according to LLVM is:
// //
@ -291,52 +307,54 @@ pub fn from_fn_attrs<'ll, 'tcx>(
// You can also find more info on why Windows always requires uwtables here: // You can also find more info on why Windows always requires uwtables here:
// https://bugzilla.mozilla.org/show_bug.cgi?id=1302078 // https://bugzilla.mozilla.org/show_bug.cgi?id=1302078
if cx.sess().must_emit_unwind_tables() { if cx.sess().must_emit_unwind_tables() {
attributes::emit_uwtable(llfn); to_add.push(uwtable_attr(cx.llcx));
} }
if cx.sess().opts.debugging_opts.profile_sample_use.is_some() { if cx.sess().opts.debugging_opts.profile_sample_use.is_some() {
llvm::AddFunctionAttrString(llfn, Function, cstr!("use-sample-profile")); to_add.push(llvm::CreateAttrString(cx.llcx, cstr!("use-sample-profile")));
} }
// FIXME: none of these three functions interact with source level attributes. // FIXME: none of these three functions interact with source level attributes.
set_frame_pointer_type(cx, llfn); to_add.extend(frame_pointer_type_attr(cx));
set_instrument_function(cx, llfn); to_add.extend(instrument_function_attr(cx));
set_probestack(cx, llfn); to_add.extend(probestack_attr(cx));
set_stackprotector(cx, llfn); to_add.extend(stackprotector_attr(cx));
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) { if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
Attribute::Cold.apply_llfn(Function, llfn); to_add.push(AttributeKind::Cold.create_attr(cx.llcx));
} }
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_RETURNS_TWICE) { if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_RETURNS_TWICE) {
Attribute::ReturnsTwice.apply_llfn(Function, llfn); to_add.push(AttributeKind::ReturnsTwice.create_attr(cx.llcx));
} }
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_PURE) { if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_PURE) {
Attribute::ReadOnly.apply_llfn(Function, llfn); to_add.push(AttributeKind::ReadOnly.create_attr(cx.llcx));
} }
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_CONST) { if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_CONST) {
Attribute::ReadNone.apply_llfn(Function, llfn); to_add.push(AttributeKind::ReadNone.create_attr(cx.llcx));
} }
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) { if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
naked(llfn, true); to_add.push(AttributeKind::Naked.create_attr(cx.llcx));
} }
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR) { if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR) {
Attribute::NoAlias.apply_llfn(llvm::AttributePlace::ReturnValue, llfn); // apply to return place instead of function (unlike all other attributes applied in this function)
let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx);
attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]);
} }
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::CMSE_NONSECURE_ENTRY) { if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::CMSE_NONSECURE_ENTRY) {
llvm::AddFunctionAttrString(llfn, Function, cstr!("cmse_nonsecure_entry")); to_add.push(llvm::CreateAttrString(cx.llcx, cstr!("cmse_nonsecure_entry")));
} }
if let Some(align) = codegen_fn_attrs.alignment { if let Some(align) = codegen_fn_attrs.alignment {
llvm::set_alignment(llfn, align as usize); llvm::set_alignment(llfn, align as usize);
} }
sanitize(cx, codegen_fn_attrs.no_sanitize, llfn); to_add.extend(sanitize_attrs(cx, codegen_fn_attrs.no_sanitize));
// Always annotate functions with the target-cpu they are compiled for. // Always annotate functions with the target-cpu they are compiled for.
// Without this, ThinLTO won't inline Rust functions into Clang generated // Without this, ThinLTO won't inline Rust functions into Clang generated
// functions (because Clang annotates functions this way too). // functions (because Clang annotates functions this way too).
apply_target_cpu_attr(cx, llfn); to_add.push(target_cpu_attr(cx));
// tune-cpu is only conveyed through the attribute for our purpose. // tune-cpu is only conveyed through the attribute for our purpose.
// The target doesn't care; the subtarget reads our attribute. // The target doesn't care; the subtarget reads our attribute.
apply_tune_cpu_attr(cx, llfn); to_add.extend(tune_cpu_attr(cx));
let function_features = let function_features =
codegen_fn_attrs.target_features.iter().map(|f| f.as_str()).collect::<Vec<&str>>(); codegen_fn_attrs.target_features.iter().map(|f| f.as_str()).collect::<Vec<&str>>();
@ -379,22 +397,12 @@ pub fn from_fn_attrs<'ll, 'tcx>(
// If this function is an import from the environment but the wasm // If this function is an import from the environment but the wasm
// import has a specific module/name, apply them here. // import has a specific module/name, apply them here.
if let Some(module) = wasm_import_module(cx.tcx, instance.def_id()) { if let Some(module) = wasm_import_module(cx.tcx, instance.def_id()) {
llvm::AddFunctionAttrStringValue( to_add.push(llvm::CreateAttrStringValue(cx.llcx, cstr!("wasm-import-module"), &module));
llfn,
llvm::AttributePlace::Function,
cstr!("wasm-import-module"),
&module,
);
let name = let name =
codegen_fn_attrs.link_name.unwrap_or_else(|| cx.tcx.item_name(instance.def_id())); codegen_fn_attrs.link_name.unwrap_or_else(|| cx.tcx.item_name(instance.def_id()));
let name = CString::new(name.as_str()).unwrap(); let name = CString::new(name.as_str()).unwrap();
llvm::AddFunctionAttrStringValue( to_add.push(llvm::CreateAttrStringValue(cx.llcx, cstr!("wasm-import-name"), &name));
llfn,
llvm::AttributePlace::Function,
cstr!("wasm-import-name"),
&name,
);
} }
// The `"wasm"` abi on wasm targets automatically enables the // The `"wasm"` abi on wasm targets automatically enables the
@ -414,13 +422,11 @@ pub fn from_fn_attrs<'ll, 'tcx>(
global_features.extend(function_features.into_iter()); global_features.extend(function_features.into_iter());
let features = global_features.join(","); let features = global_features.join(",");
let val = CString::new(features).unwrap(); let val = CString::new(features).unwrap();
llvm::AddFunctionAttrStringValue( to_add.push(llvm::CreateAttrStringValue(cx.llcx, cstr!("target-features"), &val));
llfn,
llvm::AttributePlace::Function,
cstr!("target-features"),
&val,
);
} }
attributes::remove_from_llfn(llfn, Function, &to_remove);
attributes::apply_to_llfn(llfn, Function, &to_add);
} }
fn wasm_import_module(tcx: TyCtxt<'_>, id: DefId) -> Option<CString> { fn wasm_import_module(tcx: TyCtxt<'_>, id: DefId) -> Option<CString> {

@ -95,7 +95,8 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen
// If this codegen unit contains the main function, also create the // If this codegen unit contains the main function, also create the
// wrapper here // wrapper here
if let Some(entry) = maybe_create_entry_wrapper::<Builder<'_, '_, '_>>(&cx) { if let Some(entry) = maybe_create_entry_wrapper::<Builder<'_, '_, '_>>(&cx) {
attributes::sanitize(&cx, SanitizerSet::empty(), entry); let attrs = attributes::sanitize_attrs(&cx, SanitizerSet::empty());
attributes::apply_to_llfn(entry, llvm::AttributePlace::Function, &attrs);
} }
// Run replace-all-uses-with for statics that need it // Run replace-all-uses-with for statics that need it

@ -1,3 +1,4 @@
use crate::attributes;
use crate::common::Funclet; use crate::common::Funclet;
use crate::context::CodegenCx; use crate::context::CodegenCx;
use crate::llvm::{self, BasicBlock, False}; use crate::llvm::{self, BasicBlock, False};
@ -22,6 +23,7 @@ use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_span::Span; use rustc_span::Span;
use rustc_target::abi::{self, call::FnAbi, Align, Size, WrappingRange}; use rustc_target::abi::{self, call::FnAbi, Align, Size, WrappingRange};
use rustc_target::spec::{HasTargetSpec, Target}; use rustc_target::spec::{HasTargetSpec, Target};
use smallvec::SmallVec;
use std::borrow::Cow; use std::borrow::Cow;
use std::ffi::CStr; use std::ffi::CStr;
use std::iter; use std::iter;
@ -1174,14 +1176,18 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
} }
fn apply_attrs_to_cleanup_callsite(&mut self, llret: &'ll Value) { fn apply_attrs_to_cleanup_callsite(&mut self, llret: &'ll Value) {
let mut attrs = SmallVec::<[_; 2]>::new();
// Cleanup is always the cold path. // Cleanup is always the cold path.
llvm::Attribute::Cold.apply_callsite(llvm::AttributePlace::Function, llret); attrs.push(llvm::AttributeKind::Cold.create_attr(self.llcx));
// In LLVM versions with deferred inlining (currently, system LLVM < 14), // In LLVM versions with deferred inlining (currently, system LLVM < 14),
// inlining drop glue can lead to exponential size blowup, see #41696 and #92110. // inlining drop glue can lead to exponential size blowup, see #41696 and #92110.
if !llvm_util::is_rust_llvm() && llvm_util::get_version() < (14, 0, 0) { if !llvm_util::is_rust_llvm() && llvm_util::get_version() < (14, 0, 0) {
llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret); attrs.push(llvm::AttributeKind::NoInline.create_attr(self.llcx));
} }
attributes::apply_to_callsite(llret, llvm::AttributePlace::Function, &attrs);
} }
} }

@ -520,7 +520,8 @@ impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
} else { } else {
let fty = self.type_variadic_func(&[], self.type_i32()); let fty = self.type_variadic_func(&[], self.type_i32());
let llfn = self.declare_cfn(name, llvm::UnnamedAddr::Global, fty); let llfn = self.declare_cfn(name, llvm::UnnamedAddr::Global, fty);
attributes::apply_target_cpu_attr(self, llfn); let target_cpu = attributes::target_cpu_attr(self);
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[target_cpu]);
llfn llfn
} }
} }
@ -550,12 +551,16 @@ impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
} }
fn set_frame_pointer_type(&self, llfn: &'ll Value) { fn set_frame_pointer_type(&self, llfn: &'ll Value) {
attributes::set_frame_pointer_type(self, llfn) if let Some(attr) = attributes::frame_pointer_type_attr(self) {
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[attr]);
}
} }
fn apply_target_cpu_attr(&self, llfn: &'ll Value) { fn apply_target_cpu_attr(&self, llfn: &'ll Value) {
attributes::apply_target_cpu_attr(self, llfn); let mut attrs = SmallVec::<[_; 2]>::new();
attributes::apply_tune_cpu_attr(self, llfn); attrs.push(attributes::target_cpu_attr(self));
attrs.extend(attributes::tune_cpu_attr(self));
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &attrs);
} }
fn create_used_variable(&self) { fn create_used_variable(&self) {

@ -18,8 +18,8 @@ use crate::llvm;
use crate::llvm::AttributePlace::Function; use crate::llvm::AttributePlace::Function;
use crate::type_::Type; use crate::type_::Type;
use crate::value::Value; use crate::value::Value;
use rustc_codegen_ssa::traits::*;
use rustc_middle::ty::Ty; use rustc_middle::ty::Ty;
use smallvec::SmallVec;
use tracing::debug; use tracing::debug;
/// Declare a function. /// Declare a function.
@ -41,12 +41,21 @@ fn declare_raw_fn<'ll>(
llvm::SetFunctionCallConv(llfn, callconv); llvm::SetFunctionCallConv(llfn, callconv);
llvm::SetUnnamedAddress(llfn, unnamed); llvm::SetUnnamedAddress(llfn, unnamed);
let mut attrs_to_remove = SmallVec::<[_; 4]>::new();
let mut attrs_to_add = SmallVec::<[_; 4]>::new();
if cx.tcx.sess.opts.cg.no_redzone.unwrap_or(cx.tcx.sess.target.disable_redzone) { if cx.tcx.sess.opts.cg.no_redzone.unwrap_or(cx.tcx.sess.target.disable_redzone) {
llvm::Attribute::NoRedZone.apply_llfn(Function, llfn); attrs_to_add.push(llvm::AttributeKind::NoRedZone.create_attr(cx.llcx));
} }
attributes::default_optimisation_attrs(cx.tcx.sess, llfn); let (to_remove, to_add) = attributes::default_optimisation_attrs(cx);
attributes::non_lazy_bind(cx.sess(), llfn); attrs_to_remove.extend(to_remove);
attrs_to_add.extend(to_add);
attrs_to_add.extend(attributes::non_lazy_bind_attr(cx));
attributes::remove_from_llfn(llfn, Function, &attrs_to_remove);
attributes::apply_to_llfn(llfn, Function, &attrs_to_add);
llfn llfn
} }

@ -155,7 +155,7 @@ pub enum DLLStorageClass {
/// though it is not ABI compatible (since it's a C++ enum) /// though it is not ABI compatible (since it's a C++ enum)
#[repr(C)] #[repr(C)]
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
pub enum Attribute { pub enum AttributeKind {
AlwaysInline = 0, AlwaysInline = 0,
ByVal = 1, ByVal = 1,
Cold = 2, Cold = 2,
@ -643,6 +643,9 @@ extern "C" {
extern "C" { extern "C" {
pub type ConstantInt; pub type ConstantInt;
} }
extern "C" {
pub type Attribute;
}
extern "C" { extern "C" {
pub type Metadata; pub type Metadata;
} }
@ -1169,6 +1172,21 @@ extern "C" {
) -> Option<&Value>; ) -> Option<&Value>;
pub fn LLVMSetTailCall(CallInst: &Value, IsTailCall: Bool); pub fn LLVMSetTailCall(CallInst: &Value, IsTailCall: Bool);
// Operations on attributes
pub fn LLVMRustCreateAttrNoValue(C: &Context, attr: AttributeKind) -> &Attribute;
pub fn LLVMRustCreateAttrString(C: &Context, Name: *const c_char) -> &Attribute;
pub fn LLVMRustCreateAttrStringValue(
C: &Context,
Name: *const c_char,
Value: *const c_char,
) -> &Attribute;
pub fn LLVMRustCreateAlignmentAttr(C: &Context, bytes: u64) -> &Attribute;
pub fn LLVMRustCreateDereferenceableAttr(C: &Context, bytes: u64) -> &Attribute;
pub fn LLVMRustCreateDereferenceableOrNullAttr(C: &Context, bytes: u64) -> &Attribute;
pub fn LLVMRustCreateByValAttr<'a>(C: &'a Context, ty: &'a Type) -> &'a Attribute;
pub fn LLVMRustCreateStructRetAttr<'a>(C: &'a Context, ty: &'a Type) -> &'a Attribute;
pub fn LLVMRustCreateUWTableAttr(C: &Context, async_: bool) -> &Attribute;
// Operations on functions // Operations on functions
pub fn LLVMRustGetOrInsertFunction<'a>( pub fn LLVMRustGetOrInsertFunction<'a>(
M: &'a Module, M: &'a Module,
@ -1177,20 +1195,18 @@ extern "C" {
FunctionTy: &'a Type, FunctionTy: &'a Type,
) -> &'a Value; ) -> &'a Value;
pub fn LLVMSetFunctionCallConv(Fn: &Value, CC: c_uint); pub fn LLVMSetFunctionCallConv(Fn: &Value, CC: c_uint);
pub fn LLVMRustAddAlignmentAttr(Fn: &Value, index: c_uint, bytes: u32); pub fn LLVMRustAddFunctionAttributes<'a>(
pub fn LLVMRustAddDereferenceableAttr(Fn: &Value, index: c_uint, bytes: u64); Fn: &'a Value,
pub fn LLVMRustAddDereferenceableOrNullAttr(Fn: &Value, index: c_uint, bytes: u64); index: c_uint,
pub fn LLVMRustAddByValAttr(Fn: &Value, index: c_uint, ty: &Type); Attrs: *const &'a Attribute,
pub fn LLVMRustAddStructRetAttr(Fn: &Value, index: c_uint, ty: &Type); AttrsLen: size_t,
pub fn LLVMRustAddFunctionAttribute(Fn: &Value, index: c_uint, attr: Attribute); );
pub fn LLVMRustEmitUWTableAttr(Fn: &Value, async_: bool); pub fn LLVMRustRemoveFunctionAttributes(
pub fn LLVMRustAddFunctionAttrStringValue(
Fn: &Value, Fn: &Value,
index: c_uint, index: c_uint,
Name: *const c_char, Attrs: *const AttributeKind,
Value: *const c_char, AttrsLen: size_t,
); );
pub fn LLVMRustRemoveFunctionAttributes(Fn: &Value, index: c_uint, attr: Attribute);
// Operations on parameters // Operations on parameters
pub fn LLVMIsAArgument(Val: &Value) -> Option<&Value>; pub fn LLVMIsAArgument(Val: &Value) -> Option<&Value>;
@ -1211,13 +1227,12 @@ extern "C" {
// Operations on call sites // Operations on call sites
pub fn LLVMSetInstructionCallConv(Instr: &Value, CC: c_uint); pub fn LLVMSetInstructionCallConv(Instr: &Value, CC: c_uint);
pub fn LLVMRustAddCallSiteAttribute(Instr: &Value, index: c_uint, attr: Attribute); pub fn LLVMRustAddCallSiteAttributes<'a>(
pub fn LLVMRustAddCallSiteAttrString(Instr: &Value, index: c_uint, Name: *const c_char); Instr: &'a Value,
pub fn LLVMRustAddAlignmentCallSiteAttr(Instr: &Value, index: c_uint, bytes: u32); index: c_uint,
pub fn LLVMRustAddDereferenceableCallSiteAttr(Instr: &Value, index: c_uint, bytes: u64); Attrs: *const &'a Attribute,
pub fn LLVMRustAddDereferenceableOrNullCallSiteAttr(Instr: &Value, index: c_uint, bytes: u64); AttrsLen: size_t,
pub fn LLVMRustAddByValCallSiteAttr(Instr: &Value, index: c_uint, ty: &Type); );
pub fn LLVMRustAddStructRetCallSiteAttr(Instr: &Value, index: c_uint, ty: &Type);
// Operations on load/store instructions (only) // Operations on load/store instructions (only)
pub fn LLVMSetVolatile(MemoryAccessInst: &Value, volatile: Bool); pub fn LLVMSetVolatile(MemoryAccessInst: &Value, volatile: Bool);

@ -31,24 +31,58 @@ impl LLVMRustResult {
} }
} }
pub fn EmitUWTableAttr(llfn: &Value, async_: bool) { pub fn AddFunctionAttributes<'ll>(llfn: &'ll Value, idx: AttributePlace, attrs: &[&'ll Attribute]) {
unsafe { LLVMRustEmitUWTableAttr(llfn, async_) }
}
pub fn AddFunctionAttrStringValue(llfn: &Value, idx: AttributePlace, attr: &CStr, value: &CStr) {
unsafe { unsafe {
LLVMRustAddFunctionAttrStringValue(llfn, idx.as_uint(), attr.as_ptr(), value.as_ptr()) LLVMRustAddFunctionAttributes(llfn, idx.as_uint(), attrs.as_ptr(), attrs.len());
} }
} }
pub fn AddFunctionAttrString(llfn: &Value, idx: AttributePlace, attr: &CStr) { pub fn RemoveFunctionAttributes(llfn: &Value, idx: AttributePlace, attrs: &[AttributeKind]) {
unsafe { unsafe {
LLVMRustAddFunctionAttrStringValue(llfn, idx.as_uint(), attr.as_ptr(), std::ptr::null()) LLVMRustRemoveFunctionAttributes(llfn, idx.as_uint(), attrs.as_ptr(), attrs.len());
} }
} }
pub fn AddCallSiteAttrString(callsite: &Value, idx: AttributePlace, attr: &CStr) { pub fn AddCallSiteAttributes<'ll>(
unsafe { LLVMRustAddCallSiteAttrString(callsite, idx.as_uint(), attr.as_ptr()) } callsite: &'ll Value,
idx: AttributePlace,
attrs: &[&'ll Attribute],
) {
unsafe {
LLVMRustAddCallSiteAttributes(callsite, idx.as_uint(), attrs.as_ptr(), attrs.len());
}
}
pub fn CreateAttrStringValue<'ll>(llcx: &'ll Context, attr: &CStr, value: &CStr) -> &'ll Attribute {
unsafe { LLVMRustCreateAttrStringValue(llcx, attr.as_ptr(), value.as_ptr()) }
}
pub fn CreateAttrString<'ll>(llcx: &'ll Context, attr: &CStr) -> &'ll Attribute {
unsafe { LLVMRustCreateAttrStringValue(llcx, attr.as_ptr(), std::ptr::null()) }
}
pub fn CreateAlignmentAttr(llcx: &Context, bytes: u64) -> &Attribute {
unsafe { LLVMRustCreateAlignmentAttr(llcx, bytes) }
}
pub fn CreateDereferenceableAttr(llcx: &Context, bytes: u64) -> &Attribute {
unsafe { LLVMRustCreateDereferenceableAttr(llcx, bytes) }
}
pub fn CreateDereferenceableOrNullAttr(llcx: &Context, bytes: u64) -> &Attribute {
unsafe { LLVMRustCreateDereferenceableOrNullAttr(llcx, bytes) }
}
pub fn CreateByValAttr<'ll>(llcx: &'ll Context, ty: &'ll Type) -> &'ll Attribute {
unsafe { LLVMRustCreateByValAttr(llcx, ty) }
}
pub fn CreateStructRetAttr<'ll>(llcx: &'ll Context, ty: &'ll Type) -> &'ll Attribute {
unsafe { LLVMRustCreateStructRetAttr(llcx, ty) }
}
pub fn CreateUWTableAttr(llcx: &Context, async_: bool) -> &Attribute {
unsafe { LLVMRustCreateUWTableAttr(llcx, async_) }
} }
#[derive(Copy, Clone)] #[derive(Copy, Clone)]
@ -132,25 +166,10 @@ pub fn set_thread_local_mode(global: &Value, mode: ThreadLocalMode) {
} }
} }
impl Attribute { impl AttributeKind {
pub fn apply_llfn(&self, idx: AttributePlace, llfn: &Value) { /// Create an LLVM Attribute with no associated value.
unsafe { LLVMRustAddFunctionAttribute(llfn, idx.as_uint(), *self) } pub fn create_attr(self, llcx: &Context) -> &Attribute {
} unsafe { LLVMRustCreateAttrNoValue(llcx, self) }
pub fn apply_callsite(&self, idx: AttributePlace, callsite: &Value) {
unsafe { LLVMRustAddCallSiteAttribute(callsite, idx.as_uint(), *self) }
}
pub fn unapply_llfn(&self, idx: AttributePlace, llfn: &Value) {
unsafe { LLVMRustRemoveFunctionAttributes(llfn, idx.as_uint(), *self) }
}
pub fn toggle_llfn(&self, idx: AttributePlace, llfn: &Value, set: bool) {
if set {
self.apply_llfn(idx, llfn);
} else {
self.unapply_llfn(idx, llfn);
}
} }
} }

@ -232,142 +232,103 @@ static Attribute::AttrKind fromRust(LLVMRustAttribute Kind) {
report_fatal_error("bad AttributeKind"); report_fatal_error("bad AttributeKind");
} }
template<typename T> static inline void AddAttribute(T *t, unsigned Index, Attribute Attr) { template<typename T> static inline void AddAttributes(T *t, unsigned Index,
#if LLVM_VERSION_LT(14, 0) LLVMAttributeRef *Attrs, size_t AttrsLen) {
t->addAttribute(Index, Attr); AttributeList PAL = t->getAttributes();
#else
t->addAttributeAtIndex(Index, Attr);
#endif
}
extern "C" void LLVMRustAddCallSiteAttribute(LLVMValueRef Instr, unsigned Index,
LLVMRustAttribute RustAttr) {
CallBase *Call = unwrap<CallBase>(Instr);
Attribute Attr = Attribute::get(Call->getContext(), fromRust(RustAttr));
AddAttribute(Call, Index, Attr);
}
extern "C" void LLVMRustAddCallSiteAttrString(LLVMValueRef Instr, unsigned Index,
const char *Name) {
CallBase *Call = unwrap<CallBase>(Instr);
Attribute Attr = Attribute::get(Call->getContext(), Name);
AddAttribute(Call, Index, Attr);
}
extern "C" void LLVMRustAddAlignmentCallSiteAttr(LLVMValueRef Instr,
unsigned Index,
uint32_t Bytes) {
CallBase *Call = unwrap<CallBase>(Instr);
Attribute Attr = Attribute::getWithAlignment(Call->getContext(), Align(Bytes));
AddAttribute(Call, Index, Attr);
}
extern "C" void LLVMRustAddDereferenceableCallSiteAttr(LLVMValueRef Instr,
unsigned Index,
uint64_t Bytes) {
CallBase *Call = unwrap<CallBase>(Instr);
Attribute Attr = Attribute::getWithDereferenceableBytes(Call->getContext(), Bytes);
AddAttribute(Call, Index, Attr);
}
extern "C" void LLVMRustAddDereferenceableOrNullCallSiteAttr(LLVMValueRef Instr,
unsigned Index,
uint64_t Bytes) {
CallBase *Call = unwrap<CallBase>(Instr);
Attribute Attr = Attribute::getWithDereferenceableOrNullBytes(Call->getContext(), Bytes);
AddAttribute(Call, Index, Attr);
}
extern "C" void LLVMRustAddByValCallSiteAttr(LLVMValueRef Instr, unsigned Index,
LLVMTypeRef Ty) {
CallBase *Call = unwrap<CallBase>(Instr);
Attribute Attr = Attribute::getWithByValType(Call->getContext(), unwrap(Ty));
AddAttribute(Call, Index, Attr);
}
extern "C" void LLVMRustAddStructRetCallSiteAttr(LLVMValueRef Instr, unsigned Index,
LLVMTypeRef Ty) {
CallBase *Call = unwrap<CallBase>(Instr);
Attribute Attr = Attribute::getWithStructRetType(Call->getContext(), unwrap(Ty));
AddAttribute(Call, Index, Attr);
}
extern "C" void LLVMRustAddFunctionAttribute(LLVMValueRef Fn, unsigned Index,
LLVMRustAttribute RustAttr) {
Function *A = unwrap<Function>(Fn);
Attribute Attr = Attribute::get(A->getContext(), fromRust(RustAttr));
AddAttribute(A, Index, Attr);
}
extern "C" void LLVMRustAddAlignmentAttr(LLVMValueRef Fn,
unsigned Index,
uint32_t Bytes) {
Function *A = unwrap<Function>(Fn);
AddAttribute(A, Index, Attribute::getWithAlignment(
A->getContext(), llvm::Align(Bytes)));
}
extern "C" void LLVMRustAddDereferenceableAttr(LLVMValueRef Fn, unsigned Index,
uint64_t Bytes) {
Function *A = unwrap<Function>(Fn);
AddAttribute(A, Index, Attribute::getWithDereferenceableBytes(A->getContext(),
Bytes));
}
extern "C" void LLVMRustAddDereferenceableOrNullAttr(LLVMValueRef Fn,
unsigned Index,
uint64_t Bytes) {
Function *A = unwrap<Function>(Fn);
AddAttribute(A, Index, Attribute::getWithDereferenceableOrNullBytes(
A->getContext(), Bytes));
}
extern "C" void LLVMRustAddByValAttr(LLVMValueRef Fn, unsigned Index,
LLVMTypeRef Ty) {
Function *F = unwrap<Function>(Fn);
Attribute Attr = Attribute::getWithByValType(F->getContext(), unwrap(Ty));
AddAttribute(F, Index, Attr);
}
extern "C" void LLVMRustAddStructRetAttr(LLVMValueRef Fn, unsigned Index,
LLVMTypeRef Ty) {
Function *F = unwrap<Function>(Fn);
Attribute Attr = Attribute::getWithStructRetType(F->getContext(), unwrap(Ty));
AddAttribute(F, Index, Attr);
}
extern "C" void LLVMRustEmitUWTableAttr(LLVMValueRef Fn, bool Async) {
Function *F = unwrap<Function>(Fn);
#if LLVM_VERSION_LT(15, 0)
Attribute Attr = Attribute::get(F->getContext(), Attribute::UWTable);
#else
Attribute Attr = Attribute::getWithUWTableKind(
F->getContext(), Async ? UWTableKind::Async : UWTableKind::Sync);
#endif
AddAttribute(F, AttributeList::AttrIndex::FunctionIndex, Attr);
}
extern "C" void LLVMRustAddFunctionAttrStringValue(LLVMValueRef Fn,
unsigned Index,
const char *Name,
const char *Value) {
Function *F = unwrap<Function>(Fn);
AddAttribute(F, Index, Attribute::get(
F->getContext(), StringRef(Name), StringRef(Value)));
}
extern "C" void LLVMRustRemoveFunctionAttributes(LLVMValueRef Fn,
unsigned Index,
LLVMRustAttribute RustAttr) {
Function *F = unwrap<Function>(Fn);
AttributeList PAL = F->getAttributes();
AttributeList PALNew; AttributeList PALNew;
#if LLVM_VERSION_LT(14, 0) #if LLVM_VERSION_LT(14, 0)
PALNew = PAL.removeAttribute(F->getContext(), Index, fromRust(RustAttr)); AttrBuilder B;
for (LLVMAttributeRef Attr : makeArrayRef(Attrs, AttrsLen))
B.addAttribute(unwrap(Attr));
PALNew = PAL.addAttributes(t->getContext(), Index, B);
#else #else
PALNew = PAL.removeAttributeAtIndex(F->getContext(), Index, fromRust(RustAttr)); AttrBuilder B(t->getContext());
for (LLVMAttributeRef Attr : makeArrayRef(Attrs, AttrsLen))
B.addAttribute(unwrap(Attr));
PALNew = PAL.addAttributesAtIndex(t->getContext(), Index, B);
#endif
t->setAttributes(PALNew);
}
template<typename T> static inline void RemoveAttributes(T *t, unsigned Index,
LLVMRustAttribute *RustAttrs,
size_t RustAttrsLen) {
AttributeList PAL = t->getAttributes();
AttributeList PALNew;
#if LLVM_VERSION_LT(14, 0)
AttrBuilder B;
for (LLVMRustAttribute RustAttr : makeArrayRef(RustAttrs, RustAttrsLen))
B.addAttribute(fromRust(RustAttr));
PALNew = PAL.removeAttributes(t->getContext(), Index, B);
#else
AttributeMask Mask;
for (LLVMRustAttribute RustAttr : makeArrayRef(RustAttrs, RustAttrsLen))
Mask.addAttribute(fromRust(RustAttr));
PALNew = PAL.removeAttributesAtIndex(t->getContext(), Index, Mask);
#endif
t->setAttributes(PALNew);
}
extern "C" void LLVMRustAddFunctionAttributes(LLVMValueRef Fn, unsigned Index,
LLVMAttributeRef *Attrs, size_t AttrsLen) {
Function *F = unwrap<Function>(Fn);
AddAttributes(F, Index, Attrs, AttrsLen);
}
extern "C" void LLVMRustRemoveFunctionAttributes(LLVMValueRef Fn, unsigned Index,
LLVMRustAttribute *RustAttrs,
size_t RustAttrsLen) {
Function *F = unwrap<Function>(Fn);
RemoveAttributes(F, Index, RustAttrs, RustAttrsLen);
}
extern "C" void LLVMRustAddCallSiteAttributes(LLVMValueRef Instr, unsigned Index,
LLVMAttributeRef *Attrs, size_t AttrsLen) {
CallBase *Call = unwrap<CallBase>(Instr);
AddAttributes(Call, Index, Attrs, AttrsLen);
}
extern "C" LLVMAttributeRef LLVMRustCreateAttrNoValue(LLVMContextRef C,
LLVMRustAttribute RustAttr) {
return wrap(Attribute::get(*unwrap(C), fromRust(RustAttr)));
}
extern "C" LLVMAttributeRef LLVMRustCreateAttrStringValue(LLVMContextRef C,
const char *Name,
const char *Value) {
return wrap(Attribute::get(*unwrap(C), StringRef(Name), StringRef(Value)));
}
extern "C" LLVMAttributeRef LLVMRustCreateAlignmentAttr(LLVMContextRef C,
uint64_t Bytes) {
return wrap(Attribute::getWithAlignment(*unwrap(C), llvm::Align(Bytes)));
}
extern "C" LLVMAttributeRef LLVMRustCreateDereferenceableAttr(LLVMContextRef C,
uint64_t Bytes) {
return wrap(Attribute::getWithDereferenceableBytes(*unwrap(C), Bytes));
}
extern "C" LLVMAttributeRef LLVMRustCreateDereferenceableOrNullAttr(LLVMContextRef C,
uint64_t Bytes) {
return wrap(Attribute::getWithDereferenceableOrNullBytes(*unwrap(C), Bytes));
}
extern "C" LLVMAttributeRef LLVMRustCreateByValAttr(LLVMContextRef C, LLVMTypeRef Ty) {
return wrap(Attribute::getWithByValType(*unwrap(C), unwrap(Ty)));
}
extern "C" LLVMAttributeRef LLVMRustCreateStructRetAttr(LLVMContextRef C, LLVMTypeRef Ty) {
return wrap(Attribute::getWithStructRetType(*unwrap(C), unwrap(Ty)));
}
extern "C" LLVMAttributeRef LLVMRustCreateUWTableAttr(LLVMContextRef C, bool Async) {
#if LLVM_VERSION_LT(15, 0)
return wrap(Attribute::get(*unwrap(C), Attribute::UWTable));
#else
return wrap(Attribute::getWithUWTableKind(
*unwrap(C), Async ? UWTableKind::Async : UWTableKind::Sync));
#endif #endif
F->setAttributes(PALNew);
} }
// Enable a fast-math flag // Enable a fast-math flag