rust/src/librustc_trans/context.rs

659 lines
27 KiB
Rust
Raw Normal View History

// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use common;
use llvm;
2016-12-16 19:48:25 -06:00
use llvm::{ContextRef, ModuleRef, ValueRef};
use rustc::dep_graph::DepGraphSafe;
use rustc::hir;
2017-04-13 17:08:36 -05:00
use rustc::hir::def_id::DefId;
use debuginfo;
use callee;
use base;
use declare;
use monomorphize::Instance;
2017-11-23 09:02:02 -06:00
use monomorphize::partitioning::CodegenUnit;
use type_::Type;
use type_of::PointeeInfo;
use rustc_data_structures::base_n;
use rustc::mir::mono::Stats;
use rustc::session::config::{self, NoDebugInfo};
2017-09-18 05:14:52 -05:00
use rustc::session::Session;
use rustc::ty::layout::{LayoutError, LayoutOf, Size, TyLayout};
use rustc::ty::{self, Ty, TyCtxt};
use rustc::util::nodemap::FxHashMap;
use std::ffi::{CStr, CString};
use std::cell::{Cell, RefCell};
use std::ptr;
use std::iter;
use std::str;
use std::sync::Arc;
use syntax::symbol::InternedString;
use abi::Abi;
/// There is one `CodegenCx` per compilation unit. Each one has its own LLVM
/// `ContextRef` so that several compilation units may be optimized in parallel.
/// All other LLVM data structures in the `CodegenCx` are tied to that `ContextRef`.
pub struct CodegenCx<'a, 'tcx: 'a> {
pub tcx: TyCtxt<'a, 'tcx, 'tcx>,
pub check_overflow: bool,
pub use_dll_storage_attrs: bool,
pub tls_model: llvm::ThreadLocalMode,
pub llmod: ModuleRef,
pub llcx: ContextRef,
pub stats: RefCell<Stats>,
pub codegen_unit: Arc<CodegenUnit<'tcx>>,
/// Cache instances of monomorphic and polymorphic items
pub instances: RefCell<FxHashMap<Instance<'tcx>, ValueRef>>,
/// Cache generated vtables
pub vtables: RefCell<FxHashMap<(Ty<'tcx>,
Option<ty::PolyExistentialTraitRef<'tcx>>), ValueRef>>,
/// Cache of constant strings,
pub const_cstr_cache: RefCell<FxHashMap<InternedString, ValueRef>>,
2013-06-12 21:02:33 -05:00
/// Reverse-direction for const ptrs cast from globals.
/// Key is a ValueRef holding a *T,
/// Val is a ValueRef holding a *[T].
///
/// Needed because LLVM loses pointer->pointee association
/// when we ptrcast, and we have to ptrcast during translation
/// of a [T] const because we form a slice, a (*T,usize) pair, not
/// a pointer to an LLVM array type. Similar for trait objects.
pub const_unsized: RefCell<FxHashMap<ValueRef, ValueRef>>,
/// Cache of emitted const globals (value -> global)
pub const_globals: RefCell<FxHashMap<ValueRef, ValueRef>>,
2013-06-12 21:02:33 -05:00
/// Mapping from static definitions to their DefId's.
pub statics: RefCell<FxHashMap<ValueRef, DefId>>,
/// List of globals for static variables which need to be passed to the
/// LLVM function ReplaceAllUsesWith (RAUW) when translation is complete.
/// (We have to make sure we don't invalidate any ValueRefs referring
/// to constants.)
pub statics_to_rauw: RefCell<Vec<(ValueRef, ValueRef)>>,
2017-04-05 21:11:22 -05:00
/// Statics that will be placed in the llvm.used variable
/// See http://llvm.org/docs/LangRef.html#the-llvm-used-global-variable for details
pub used_statics: RefCell<Vec<ValueRef>>,
add an #[used] attribute similar to GCC's __attribute((used))__. This attribute prevents LLVM from optimizing away a non-exported symbol, within a compilation unit (object file), when there are no references to it. This is better explained with an example: ``` #[used] static LIVE: i32 = 0; static REFERENCED: i32 = 0; static DEAD: i32 = 0; fn internal() {} pub fn exported() -> &'static i32 { &REFERENCED } ``` Without optimizations, LLVM pretty much preserves all the static variables and functions within the compilation unit. ``` $ rustc --crate-type=lib --emit=obj symbols.rs && nm -C symbols.o 0000000000000000 t drop::h1be0f8f27a2ba94a 0000000000000000 r symbols::REFERENCED::hb3bdfd46050bc84c 0000000000000000 r symbols::DEAD::hc2ea8f9bd06f380b 0000000000000000 r symbols::LIVE::h0970cf9889edb56e 0000000000000000 T symbols::exported::h6f096c2b1fc292b2 0000000000000000 t symbols::internal::h0ac1aadbc1e3a494 ``` With optimizations, LLVM will drop dead code. Here `internal` is dropped because it's not a exported function/symbol (i.e. not `pub`lic). `DEAD` is dropped for the same reason. `REFERENCED` is preserved, even though it's not exported, because it's referenced by the `exported` function. Finally, `LIVE` survives because of the `#[used]` attribute even though it's not exported or referenced. ``` $ rustc --crate-type=lib -C opt-level=3 --emit=obj symbols.rs && nm -C symbols.o 0000000000000000 r symbols::REFERENCED::hb3bdfd46050bc84c 0000000000000000 r symbols::LIVE::h0970cf9889edb56e 0000000000000000 T symbols::exported::h6f096c2b1fc292b2 ``` Note that the linker knows nothing about `#[used]` and will drop `LIVE` because no other object references to it. ``` $ echo 'fn main() {}' >> symbols.rs $ rustc symbols.rs && nm -C symbols | grep LIVE ``` At this time, `#[used]` only works on `static` variables.
2017-02-20 13:42:47 -06:00
pub lltypes: RefCell<FxHashMap<(Ty<'tcx>, Option<usize>), Type>>,
pub scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, Type>>,
pub pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>,
pub isize_ty: Type,
pub dbg_cx: Option<debuginfo::CrateDebugContext<'tcx>>,
2014-04-09 18:56:31 -05:00
eh_personality: Cell<Option<ValueRef>>,
eh_unwind_resume: Cell<Option<ValueRef>>,
pub rust_try_fn: Cell<Option<ValueRef>>,
rustc: Add official support for weak failure This commit is part of the ongoing libstd facade efforts (cc #13851). The compiler now recognizes some language items as "extern { fn foo(...); }" and will automatically perform the following actions: 1. The foreign function has a pre-defined name. 2. The crate and downstream crates can only be built as rlibs until a crate defines the lang item itself. 3. The actual lang item has a pre-defined name. This is essentially nicer compiler support for the hokey core-depends-on-std-failure scheme today, but it is implemented the same way. The details are a little more hidden under the covers. In addition to failure, this commit promotes the eh_personality and rust_stack_exhausted functions to official lang items. The compiler can generate calls to these functions, causing linkage errors if they are left undefined. The checking for these items is not as precise as it could be. Crates compiling with `-Z no-landing-pads` will not need the eh_personality lang item, and crates compiling with no split stacks won't need the stack exhausted lang item. For ease, however, these items are checked for presence in all final outputs of the compiler. It is quite easy to define dummy versions of the functions necessary: #[lang = "stack_exhausted"] extern fn stack_exhausted() { /* ... */ } #[lang = "eh_personality"] extern fn eh_personality() { /* ... */ } cc #11922, rust_stack_exhausted is now a lang item cc #13851, libcollections is blocked on eh_personality becoming weak
2014-05-19 11:30:09 -05:00
intrinsics: RefCell<FxHashMap<&'static str, ValueRef>>,
/// A counter that is used for generating local symbol names
local_gen_sym_counter: Cell<usize>,
}
impl<'a, 'tcx> DepGraphSafe for CodegenCx<'a, 'tcx> {
}
pub fn get_reloc_model(sess: &Session) -> llvm::RelocMode {
let reloc_model_arg = match sess.opts.cg.relocation_model {
Some(ref s) => &s[..],
None => &sess.target.target.options.relocation_model[..],
};
2016-08-06 00:50:48 -05:00
match ::back::write::RELOC_MODEL_ARGS.iter().find(
|&&arg| arg.0 == reloc_model_arg) {
Some(x) => x.1,
_ => {
sess.err(&format!("{:?} is not a valid relocation mode",
reloc_model_arg));
sess.abort_if_errors();
bug!();
}
}
}
fn get_tls_model(sess: &Session) -> llvm::ThreadLocalMode {
let tls_model_arg = match sess.opts.debugging_opts.tls_model {
Some(ref s) => &s[..],
None => &sess.target.target.options.tls_model[..],
};
match ::back::write::TLS_MODEL_ARGS.iter().find(
|&&arg| arg.0 == tls_model_arg) {
Some(x) => x.1,
_ => {
sess.err(&format!("{:?} is not a valid TLS model",
tls_model_arg));
sess.abort_if_errors();
bug!();
}
}
}
fn is_any_library(sess: &Session) -> bool {
sess.crate_types.borrow().iter().any(|ty| {
*ty != config::CrateTypeExecutable
})
}
pub fn is_pie_binary(sess: &Session) -> bool {
!is_any_library(sess) && get_reloc_model(sess) == llvm::RelocMode::PIC
}
pub unsafe fn create_context_and_module(sess: &Session, mod_name: &str) -> (ContextRef, ModuleRef) {
let llcx = llvm::LLVMRustContextCreate(sess.fewer_names());
std: Implement CString-related RFCs This commit is an implementation of [RFC 592][r592] and [RFC 840][r840]. These two RFCs tweak the behavior of `CString` and add a new `CStr` unsized slice type to the module. [r592]: https://github.com/rust-lang/rfcs/blob/master/text/0592-c-str-deref.md [r840]: https://github.com/rust-lang/rfcs/blob/master/text/0840-no-panic-in-c-string.md The new `CStr` type is only constructable via two methods: 1. By `deref`'ing from a `CString` 2. Unsafely via `CStr::from_ptr` The purpose of `CStr` is to be an unsized type which is a thin pointer to a `libc::c_char` (currently it is a fat pointer slice due to implementation limitations). Strings from C can be safely represented with a `CStr` and an appropriate lifetime as well. Consumers of `&CString` should now consume `&CStr` instead to allow producers to pass in C-originating strings instead of just Rust-allocated strings. A new constructor was added to `CString`, `new`, which takes `T: IntoBytes` instead of separate `from_slice` and `from_vec` methods (both have been deprecated in favor of `new`). The `new` method returns a `Result` instead of panicking. The error variant contains the relevant information about where the error happened and bytes (if present). Conversions are provided to the `io::Error` and `old_io::IoError` types via the `FromError` trait which translate to `InvalidInput`. This is a breaking change due to the modification of existing `#[unstable]` APIs and new deprecation, and more detailed information can be found in the two RFCs. Notable breakage includes: * All construction of `CString` now needs to use `new` and handle the outgoing `Result`. * Usage of `CString` as a byte slice now explicitly needs a `.as_bytes()` call. * The `as_slice*` methods have been removed in favor of just having the `as_bytes*` methods. Closes #22469 Closes #22470 [breaking-change]
2015-02-18 00:47:40 -06:00
let mod_name = CString::new(mod_name).unwrap();
let llmod = llvm::LLVMModuleCreateWithNameInContext(mod_name.as_ptr(), llcx);
// Ensure the data-layout values hardcoded remain the defaults.
if sess.target.target.options.is_builtin {
let tm = ::back::write::create_target_machine(sess);
llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, tm);
llvm::LLVMRustDisposeTargetMachine(tm);
let data_layout = llvm::LLVMGetDataLayout(llmod);
let data_layout = str::from_utf8(CStr::from_ptr(data_layout).to_bytes())
.ok().expect("got a non-UTF8 data-layout from LLVM");
// Unfortunately LLVM target specs change over time, and right now we
// don't have proper support to work with any more than one
// `data_layout` than the one that is in the rust-lang/rust repo. If
// this compiler is configured against a custom LLVM, we may have a
// differing data layout, even though we should update our own to use
// that one.
//
// As an interim hack, if CFG_LLVM_ROOT is not an empty string then we
// disable this check entirely as we may be configured with something
// that has a different target layout.
//
// Unsure if this will actually cause breakage when rustc is configured
// as such.
//
// FIXME(#34960)
let cfg_llvm_root = option_env!("CFG_LLVM_ROOT").unwrap_or("");
let custom_llvm_used = cfg_llvm_root.trim() != "";
if !custom_llvm_used && sess.target.target.data_layout != data_layout {
bug!("data-layout for builtin `{}` target, `{}`, \
differs from LLVM default, `{}`",
sess.target.target.llvm_target,
sess.target.target.data_layout,
data_layout);
}
}
let data_layout = CString::new(&sess.target.target.data_layout[..]).unwrap();
llvm::LLVMSetDataLayout(llmod, data_layout.as_ptr());
std: Implement CString-related RFCs This commit is an implementation of [RFC 592][r592] and [RFC 840][r840]. These two RFCs tweak the behavior of `CString` and add a new `CStr` unsized slice type to the module. [r592]: https://github.com/rust-lang/rfcs/blob/master/text/0592-c-str-deref.md [r840]: https://github.com/rust-lang/rfcs/blob/master/text/0840-no-panic-in-c-string.md The new `CStr` type is only constructable via two methods: 1. By `deref`'ing from a `CString` 2. Unsafely via `CStr::from_ptr` The purpose of `CStr` is to be an unsized type which is a thin pointer to a `libc::c_char` (currently it is a fat pointer slice due to implementation limitations). Strings from C can be safely represented with a `CStr` and an appropriate lifetime as well. Consumers of `&CString` should now consume `&CStr` instead to allow producers to pass in C-originating strings instead of just Rust-allocated strings. A new constructor was added to `CString`, `new`, which takes `T: IntoBytes` instead of separate `from_slice` and `from_vec` methods (both have been deprecated in favor of `new`). The `new` method returns a `Result` instead of panicking. The error variant contains the relevant information about where the error happened and bytes (if present). Conversions are provided to the `io::Error` and `old_io::IoError` types via the `FromError` trait which translate to `InvalidInput`. This is a breaking change due to the modification of existing `#[unstable]` APIs and new deprecation, and more detailed information can be found in the two RFCs. Notable breakage includes: * All construction of `CString` now needs to use `new` and handle the outgoing `Result`. * Usage of `CString` as a byte slice now explicitly needs a `.as_bytes()` call. * The `as_slice*` methods have been removed in favor of just having the `as_bytes*` methods. Closes #22469 Closes #22470 [breaking-change]
2015-02-18 00:47:40 -06:00
let llvm_target = sess.target.target.llvm_target.as_bytes();
let llvm_target = CString::new(llvm_target).unwrap();
llvm::LLVMRustSetNormalizedTarget(llmod, llvm_target.as_ptr());
if is_pie_binary(sess) {
llvm::LLVMRustSetModulePIELevel(llmod);
}
(llcx, llmod)
}
impl<'a, 'tcx> CodegenCx<'a, 'tcx> {
pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>,
codegen_unit: Arc<CodegenUnit<'tcx>>,
llmod_id: &str)
-> CodegenCx<'a, 'tcx> {
// An interesting part of Windows which MSVC forces our hand on (and
// apparently MinGW didn't) is the usage of `dllimport` and `dllexport`
// attributes in LLVM IR as well as native dependencies (in C these
// correspond to `__declspec(dllimport)`).
//
// Whenever a dynamic library is built by MSVC it must have its public
// interface specified by functions tagged with `dllexport` or otherwise
// they're not available to be linked against. This poses a few problems
// for the compiler, some of which are somewhat fundamental, but we use
// the `use_dll_storage_attrs` variable below to attach the `dllexport`
// attribute to all LLVM functions that are exported e.g. they're
// already tagged with external linkage). This is suboptimal for a few
// reasons:
//
// * If an object file will never be included in a dynamic library,
// there's no need to attach the dllexport attribute. Most object
// files in Rust are not destined to become part of a dll as binaries
// are statically linked by default.
// * If the compiler is emitting both an rlib and a dylib, the same
// source object file is currently used but with MSVC this may be less
// feasible. The compiler may be able to get around this, but it may
// involve some invasive changes to deal with this.
//
// The flipside of this situation is that whenever you link to a dll and
// you import a function from it, the import should be tagged with
// `dllimport`. At this time, however, the compiler does not emit
// `dllimport` for any declarations other than constants (where it is
// required), which is again suboptimal for even more reasons!
//
// * Calling a function imported from another dll without using
// `dllimport` causes the linker/compiler to have extra overhead (one
// `jmp` instruction on x86) when calling the function.
// * The same object file may be used in different circumstances, so a
// function may be imported from a dll if the object is linked into a
// dll, but it may be just linked against if linked into an rlib.
// * The compiler has no knowledge about whether native functions should
// be tagged dllimport or not.
//
// For now the compiler takes the perf hit (I do not have any numbers to
// this effect) by marking very little as `dllimport` and praying the
// linker will take care of everything. Fixing this problem will likely
// require adding a few attributes to Rust itself (feature gated at the
// start) and then strongly recommending static linkage on MSVC!
let use_dll_storage_attrs = tcx.sess.target.target.options.is_like_msvc;
let check_overflow = tcx.sess.overflow_checks();
let tls_model = get_tls_model(&tcx.sess);
unsafe {
let (llcx, llmod) = create_context_and_module(&tcx.sess,
&llmod_id[..]);
let dbg_cx = if tcx.sess.opts.debuginfo != NoDebugInfo {
let dctx = debuginfo::CrateDebugContext::new(llmod);
debuginfo::metadata::compile_unit_metadata(tcx,
codegen_unit.name(),
&dctx);
Some(dctx)
} else {
None
};
2018-01-04 23:04:08 -06:00
let mut cx = CodegenCx {
tcx,
check_overflow,
use_dll_storage_attrs,
tls_model,
llmod,
llcx,
stats: RefCell::new(Stats::default()),
codegen_unit,
instances: RefCell::new(FxHashMap()),
vtables: RefCell::new(FxHashMap()),
const_cstr_cache: RefCell::new(FxHashMap()),
const_unsized: RefCell::new(FxHashMap()),
const_globals: RefCell::new(FxHashMap()),
statics: RefCell::new(FxHashMap()),
statics_to_rauw: RefCell::new(Vec::new()),
add an #[used] attribute similar to GCC's __attribute((used))__. This attribute prevents LLVM from optimizing away a non-exported symbol, within a compilation unit (object file), when there are no references to it. This is better explained with an example: ``` #[used] static LIVE: i32 = 0; static REFERENCED: i32 = 0; static DEAD: i32 = 0; fn internal() {} pub fn exported() -> &'static i32 { &REFERENCED } ``` Without optimizations, LLVM pretty much preserves all the static variables and functions within the compilation unit. ``` $ rustc --crate-type=lib --emit=obj symbols.rs && nm -C symbols.o 0000000000000000 t drop::h1be0f8f27a2ba94a 0000000000000000 r symbols::REFERENCED::hb3bdfd46050bc84c 0000000000000000 r symbols::DEAD::hc2ea8f9bd06f380b 0000000000000000 r symbols::LIVE::h0970cf9889edb56e 0000000000000000 T symbols::exported::h6f096c2b1fc292b2 0000000000000000 t symbols::internal::h0ac1aadbc1e3a494 ``` With optimizations, LLVM will drop dead code. Here `internal` is dropped because it's not a exported function/symbol (i.e. not `pub`lic). `DEAD` is dropped for the same reason. `REFERENCED` is preserved, even though it's not exported, because it's referenced by the `exported` function. Finally, `LIVE` survives because of the `#[used]` attribute even though it's not exported or referenced. ``` $ rustc --crate-type=lib -C opt-level=3 --emit=obj symbols.rs && nm -C symbols.o 0000000000000000 r symbols::REFERENCED::hb3bdfd46050bc84c 0000000000000000 r symbols::LIVE::h0970cf9889edb56e 0000000000000000 T symbols::exported::h6f096c2b1fc292b2 ``` Note that the linker knows nothing about `#[used]` and will drop `LIVE` because no other object references to it. ``` $ echo 'fn main() {}' >> symbols.rs $ rustc symbols.rs && nm -C symbols | grep LIVE ``` At this time, `#[used]` only works on `static` variables.
2017-02-20 13:42:47 -06:00
used_statics: RefCell::new(Vec::new()),
lltypes: RefCell::new(FxHashMap()),
scalar_lltypes: RefCell::new(FxHashMap()),
pointee_infos: RefCell::new(FxHashMap()),
isize_ty: Type::from_ref(ptr::null_mut()),
dbg_cx,
eh_personality: Cell::new(None),
eh_unwind_resume: Cell::new(None),
rust_try_fn: Cell::new(None),
intrinsics: RefCell::new(FxHashMap()),
local_gen_sym_counter: Cell::new(0),
};
2018-01-04 23:04:08 -06:00
cx.isize_ty = Type::isize(&cx);
cx
}
}
pub fn into_stats(self) -> Stats {
self.stats.into_inner()
}
}
impl<'b, 'tcx> CodegenCx<'b, 'tcx> {
2014-03-05 08:36:01 -06:00
pub fn sess<'a>(&'a self) -> &'a Session {
&self.tcx.sess
2014-03-05 08:36:01 -06:00
}
pub fn get_intrinsic(&self, key: &str) -> ValueRef {
if let Some(v) = self.intrinsics.borrow().get(key).cloned() {
return v;
2014-04-09 18:56:31 -05:00
}
match declare_intrinsic(self, key) {
Some(v) => return v,
None => bug!("unknown intrinsic '{}'", key)
2014-04-09 18:56:31 -05:00
}
}
/// Generate a new symbol name with the given prefix. This symbol name must
/// only be used for definitions with `internal` or `private` linkage.
pub fn generate_local_symbol_name(&self, prefix: &str) -> String {
let idx = self.local_gen_sym_counter.get();
self.local_gen_sym_counter.set(idx + 1);
// Include a '.' character, so there can be no accidental conflicts with
// user defined names
let mut name = String::with_capacity(prefix.len() + 6);
name.push_str(prefix);
name.push_str(".");
base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name);
name
}
pub fn eh_personality(&self) -> ValueRef {
// The exception handling personality function.
//
// If our compilation unit has the `eh_personality` lang item somewhere
// within it, then we just need to translate that. Otherwise, we're
// building an rlib which will depend on some upstream implementation of
// this function, so we just codegen a generic reference to it. We don't
// specify any of the types for the function, we just make it a symbol
// that LLVM can later use.
//
// Note that MSVC is a little special here in that we don't use the
// `eh_personality` lang item at all. Currently LLVM has support for
// both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
// *name of the personality function* to decide what kind of unwind side
// tables/landing pads to emit. It looks like Dwarf is used by default,
// injecting a dependency on the `_Unwind_Resume` symbol for resuming
// an "exception", but for MSVC we want to force SEH. This means that we
// can't actually have the personality function be our standard
// `rust_eh_personality` function, but rather we wired it up to the
// CRT's custom personality function, which forces LLVM to consider
// landing pads as "landing pads for SEH".
if let Some(llpersonality) = self.eh_personality.get() {
return llpersonality
}
let tcx = self.tcx;
let llfn = match tcx.lang_items().eh_personality() {
Some(def_id) if !base::wants_msvc_seh(self.sess()) => {
callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[]))
}
_ => {
let name = if base::wants_msvc_seh(self.sess()) {
"__CxxFrameHandler3"
} else {
"rust_eh_personality"
};
let fty = Type::variadic_func(&[], &Type::i32(self));
declare::declare_cfn(self, name, fty)
}
};
self.eh_personality.set(Some(llfn));
llfn
}
// Returns a ValueRef of the "eh_unwind_resume" lang item if one is defined,
// otherwise declares it as an external function.
pub fn eh_unwind_resume(&self) -> ValueRef {
use attributes;
let unwresume = &self.eh_unwind_resume;
if let Some(llfn) = unwresume.get() {
return llfn;
}
let tcx = self.tcx;
assert!(self.sess().target.target.options.custom_unwind_resume);
if let Some(def_id) = tcx.lang_items().eh_unwind_resume() {
let llfn = callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[]));
unwresume.set(Some(llfn));
return llfn;
}
let ty = tcx.mk_fn_ptr(ty::Binder(tcx.mk_fn_sig(
iter::once(tcx.mk_mut_ptr(tcx.types.u8)),
tcx.types.never,
false,
hir::Unsafety::Unsafe,
Abi::C
)));
let llfn = declare::declare_fn(self, "rust_eh_unwind_resume", ty);
attributes::unwind(llfn, true);
unwresume.set(Some(llfn));
llfn
}
2014-04-09 18:56:31 -05:00
pub fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool {
common::type_needs_drop(self.tcx, ty)
}
pub fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
common::type_is_sized(self.tcx, ty)
}
pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool {
common::type_is_freeze(self.tcx, ty)
}
pub fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool {
use syntax_pos::DUMMY_SP;
if ty.is_sized(self.tcx.at(DUMMY_SP), ty::ParamEnv::reveal_all()) {
return false;
}
let tail = self.tcx.struct_tail(ty);
match tail.sty {
ty::TyForeign(..) => false,
ty::TyStr | ty::TySlice(..) | ty::TyDynamic(..) => true,
_ => bug!("unexpected unsized tail: {:?}", tail.sty),
}
}
}
impl<'a, 'tcx> ty::layout::HasDataLayout for &'a CodegenCx<'a, 'tcx> {
fn data_layout(&self) -> &ty::layout::TargetDataLayout {
&self.tcx.data_layout
}
}
impl<'a, 'tcx> ty::layout::HasTyCtxt<'tcx> for &'a CodegenCx<'a, 'tcx> {
fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> {
self.tcx
}
}
impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for &'a CodegenCx<'a, 'tcx> {
type TyLayout = TyLayout<'tcx>;
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
self.tcx.layout_of(ty::ParamEnv::reveal_all().and(ty))
.unwrap_or_else(|e| match e {
LayoutError::SizeOverflow(_) => self.sess().fatal(&e.to_string()),
_ => bug!("failed to get layout for `{}`: {}", ty, e)
})
}
}
/// Declare any llvm intrinsics that you might need
2018-01-04 23:04:08 -06:00
fn declare_intrinsic(cx: &CodegenCx, key: &str) -> Option<ValueRef> {
macro_rules! ifn {
2015-01-05 00:51:03 -06:00
($name:expr, fn() -> $ret:expr) => (
if key == $name {
2018-01-04 23:04:08 -06:00
let f = declare::declare_cfn(cx, $name, Type::func(&[], &$ret));
llvm::SetUnnamedAddr(f, false);
2018-01-04 23:04:08 -06:00
cx.intrinsics.borrow_mut().insert($name, f.clone());
2014-04-09 18:56:31 -05:00
return Some(f);
}
);
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
($name:expr, fn(...) -> $ret:expr) => (
if key == $name {
2018-01-04 23:04:08 -06:00
let f = declare::declare_cfn(cx, $name, Type::variadic_func(&[], &$ret));
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
llvm::SetUnnamedAddr(f, false);
2018-01-04 23:04:08 -06:00
cx.intrinsics.borrow_mut().insert($name, f.clone());
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
return Some(f);
}
);
2015-01-05 00:51:03 -06:00
($name:expr, fn($($arg:expr),*) -> $ret:expr) => (
if key == $name {
2018-01-04 23:04:08 -06:00
let f = declare::declare_cfn(cx, $name, Type::func(&[$($arg),*], &$ret));
llvm::SetUnnamedAddr(f, false);
2018-01-04 23:04:08 -06:00
cx.intrinsics.borrow_mut().insert($name, f.clone());
2014-04-09 18:56:31 -05:00
return Some(f);
}
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
);
}
macro_rules! mk_struct {
2018-01-04 23:04:08 -06:00
($($field_ty:expr),*) => (Type::struct_(cx, &[$($field_ty),*], false))
}
2014-04-09 18:56:31 -05:00
2018-01-04 23:04:08 -06:00
let i8p = Type::i8p(cx);
let void = Type::void(cx);
let i1 = Type::i1(cx);
let t_i8 = Type::i8(cx);
let t_i16 = Type::i16(cx);
let t_i32 = Type::i32(cx);
let t_i64 = Type::i64(cx);
let t_i128 = Type::i128(cx);
let t_f32 = Type::f32(cx);
let t_f64 = Type::f64(cx);
2014-04-09 18:56:31 -05:00
ifn!("llvm.memcpy.p0i8.p0i8.i16", fn(i8p, i8p, t_i16, t_i32, i1) -> void);
2015-01-05 00:51:03 -06:00
ifn!("llvm.memcpy.p0i8.p0i8.i32", fn(i8p, i8p, t_i32, t_i32, i1) -> void);
ifn!("llvm.memcpy.p0i8.p0i8.i64", fn(i8p, i8p, t_i64, t_i32, i1) -> void);
ifn!("llvm.memmove.p0i8.p0i8.i16", fn(i8p, i8p, t_i16, t_i32, i1) -> void);
2015-01-05 00:51:03 -06:00
ifn!("llvm.memmove.p0i8.p0i8.i32", fn(i8p, i8p, t_i32, t_i32, i1) -> void);
ifn!("llvm.memmove.p0i8.p0i8.i64", fn(i8p, i8p, t_i64, t_i32, i1) -> void);
ifn!("llvm.memset.p0i8.i16", fn(i8p, t_i8, t_i16, t_i32, i1) -> void);
2015-01-05 00:51:03 -06:00
ifn!("llvm.memset.p0i8.i32", fn(i8p, t_i8, t_i32, t_i32, i1) -> void);
ifn!("llvm.memset.p0i8.i64", fn(i8p, t_i8, t_i64, t_i32, i1) -> void);
ifn!("llvm.trap", fn() -> void);
ifn!("llvm.debugtrap", fn() -> void);
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
ifn!("llvm.frameaddress", fn(t_i32) -> i8p);
2015-01-05 00:51:03 -06:00
ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32);
ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64);
ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32);
ifn!("llvm.pow.f64", fn(t_f64, t_f64) -> t_f64);
ifn!("llvm.sqrt.f32", fn(t_f32) -> t_f32);
ifn!("llvm.sqrt.f64", fn(t_f64) -> t_f64);
ifn!("llvm.sin.f32", fn(t_f32) -> t_f32);
ifn!("llvm.sin.f64", fn(t_f64) -> t_f64);
ifn!("llvm.cos.f32", fn(t_f32) -> t_f32);
ifn!("llvm.cos.f64", fn(t_f64) -> t_f64);
ifn!("llvm.exp.f32", fn(t_f32) -> t_f32);
ifn!("llvm.exp.f64", fn(t_f64) -> t_f64);
ifn!("llvm.exp2.f32", fn(t_f32) -> t_f32);
ifn!("llvm.exp2.f64", fn(t_f64) -> t_f64);
ifn!("llvm.log.f32", fn(t_f32) -> t_f32);
ifn!("llvm.log.f64", fn(t_f64) -> t_f64);
ifn!("llvm.log10.f32", fn(t_f32) -> t_f32);
ifn!("llvm.log10.f64", fn(t_f64) -> t_f64);
ifn!("llvm.log2.f32", fn(t_f32) -> t_f32);
ifn!("llvm.log2.f64", fn(t_f64) -> t_f64);
ifn!("llvm.fma.f32", fn(t_f32, t_f32, t_f32) -> t_f32);
ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64);
ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32);
ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64);
ifn!("llvm.floor.f32", fn(t_f32) -> t_f32);
ifn!("llvm.floor.f64", fn(t_f64) -> t_f64);
ifn!("llvm.ceil.f32", fn(t_f32) -> t_f32);
ifn!("llvm.ceil.f64", fn(t_f64) -> t_f64);
ifn!("llvm.trunc.f32", fn(t_f32) -> t_f32);
ifn!("llvm.trunc.f64", fn(t_f64) -> t_f64);
ifn!("llvm.copysign.f32", fn(t_f32, t_f32) -> t_f32);
ifn!("llvm.copysign.f64", fn(t_f64, t_f64) -> t_f64);
ifn!("llvm.round.f32", fn(t_f32) -> t_f32);
ifn!("llvm.round.f64", fn(t_f64) -> t_f64);
2015-01-05 00:51:03 -06:00
ifn!("llvm.rint.f32", fn(t_f32) -> t_f32);
ifn!("llvm.rint.f64", fn(t_f64) -> t_f64);
ifn!("llvm.nearbyint.f32", fn(t_f32) -> t_f32);
ifn!("llvm.nearbyint.f64", fn(t_f64) -> t_f64);
ifn!("llvm.ctpop.i8", fn(t_i8) -> t_i8);
ifn!("llvm.ctpop.i16", fn(t_i16) -> t_i16);
ifn!("llvm.ctpop.i32", fn(t_i32) -> t_i32);
ifn!("llvm.ctpop.i64", fn(t_i64) -> t_i64);
ifn!("llvm.ctpop.i128", fn(t_i128) -> t_i128);
2015-01-05 00:51:03 -06:00
ifn!("llvm.ctlz.i8", fn(t_i8 , i1) -> t_i8);
ifn!("llvm.ctlz.i16", fn(t_i16, i1) -> t_i16);
ifn!("llvm.ctlz.i32", fn(t_i32, i1) -> t_i32);
ifn!("llvm.ctlz.i64", fn(t_i64, i1) -> t_i64);
ifn!("llvm.ctlz.i128", fn(t_i128, i1) -> t_i128);
2015-01-05 00:51:03 -06:00
ifn!("llvm.cttz.i8", fn(t_i8 , i1) -> t_i8);
ifn!("llvm.cttz.i16", fn(t_i16, i1) -> t_i16);
ifn!("llvm.cttz.i32", fn(t_i32, i1) -> t_i32);
ifn!("llvm.cttz.i64", fn(t_i64, i1) -> t_i64);
ifn!("llvm.cttz.i128", fn(t_i128, i1) -> t_i128);
2015-01-05 00:51:03 -06:00
ifn!("llvm.bswap.i16", fn(t_i16) -> t_i16);
ifn!("llvm.bswap.i32", fn(t_i32) -> t_i32);
ifn!("llvm.bswap.i64", fn(t_i64) -> t_i64);
ifn!("llvm.bswap.i128", fn(t_i128) -> t_i128);
2015-01-05 00:51:03 -06:00
2016-04-07 12:16:40 -05:00
ifn!("llvm.bitreverse.i8", fn(t_i8) -> t_i8);
ifn!("llvm.bitreverse.i16", fn(t_i16) -> t_i16);
ifn!("llvm.bitreverse.i32", fn(t_i32) -> t_i32);
ifn!("llvm.bitreverse.i64", fn(t_i64) -> t_i64);
ifn!("llvm.bitreverse.i128", fn(t_i128) -> t_i128);
2015-01-05 00:51:03 -06:00
ifn!("llvm.sadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1});
ifn!("llvm.sadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1});
ifn!("llvm.sadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1});
ifn!("llvm.sadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1});
ifn!("llvm.sadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1});
2015-01-05 00:51:03 -06:00
ifn!("llvm.uadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1});
ifn!("llvm.uadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1});
ifn!("llvm.uadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1});
ifn!("llvm.uadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1});
ifn!("llvm.uadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1});
2015-01-05 00:51:03 -06:00
ifn!("llvm.ssub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1});
ifn!("llvm.ssub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1});
ifn!("llvm.ssub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1});
ifn!("llvm.ssub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1});
ifn!("llvm.ssub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1});
2015-01-05 00:51:03 -06:00
ifn!("llvm.usub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1});
ifn!("llvm.usub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1});
ifn!("llvm.usub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1});
ifn!("llvm.usub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1});
ifn!("llvm.usub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1});
2015-01-05 00:51:03 -06:00
ifn!("llvm.smul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1});
ifn!("llvm.smul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1});
ifn!("llvm.smul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1});
ifn!("llvm.smul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1});
ifn!("llvm.smul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1});
2015-01-05 00:51:03 -06:00
ifn!("llvm.umul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1});
ifn!("llvm.umul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1});
ifn!("llvm.umul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1});
ifn!("llvm.umul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1});
ifn!("llvm.umul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1});
2015-01-05 00:51:03 -06:00
ifn!("llvm.lifetime.start", fn(t_i64,i8p) -> void);
ifn!("llvm.lifetime.end", fn(t_i64, i8p) -> void);
ifn!("llvm.expect.i1", fn(i1, i1) -> i1);
ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32);
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
ifn!("llvm.localescape", fn(...) -> void);
ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p);
ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p);
2014-04-09 18:56:31 -05:00
ifn!("llvm.assume", fn(i1) -> void);
ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void);
2014-04-09 18:56:31 -05:00
2018-01-04 23:04:08 -06:00
if cx.sess().opts.debuginfo != NoDebugInfo {
ifn!("llvm.dbg.declare", fn(Type::metadata(cx), Type::metadata(cx)) -> void);
ifn!("llvm.dbg.value", fn(Type::metadata(cx), t_i64, Type::metadata(cx)) -> void);
2014-04-09 18:56:31 -05:00
}
return None;
}