rust/compiler/rustc_codegen_llvm/src/common.rs

351 lines
13 KiB
Rust
Raw Normal View History

2018-05-08 08:10:16 -05:00
//! Code that is useful in various codegen modules.
use crate::consts::{self, const_alloc_to_llvm};
pub use crate::context::CodegenCx;
2019-12-22 16:42:04 -06:00
use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, OperandBundleDef, True};
2019-02-17 12:58:58 -06:00
use crate::type_::Type;
use crate::type_of::LayoutLlvmExt;
use crate::value::Value;
2020-04-27 12:56:11 -05:00
use rustc_ast::Mutability;
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::traits::*;
use rustc_middle::bug;
2020-03-29 09:41:09 -05:00
use rustc_middle::mir::interpret::{Allocation, GlobalAlloc, Scalar};
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::ScalarInt;
2020-01-01 12:30:57 -06:00
use rustc_span::symbol::Symbol;
use rustc_target::abi::{self, AddressSpace, HasDataLayout, Pointer, Size};
use libc::{c_char, c_uint};
2020-08-05 06:35:53 -05:00
use tracing::debug;
2013-06-12 21:02:33 -05:00
/*
* A note on nomenclature of linking: "extern", "foreign", and "upcall".
*
* An "extern" is an LLVM symbol we wind up emitting an undefined external
* reference to. This means "we don't have the thing in this compilation unit,
* please make sure you link it in at runtime". This could be a reference to
* C code found in a C library, or rust code found in a rust crate.
*
* Most "externs" are implicitly declared (automatically) as a result of a
* user declaring an extern _module_ dependency; this causes the rust driver
* to locate an extern crate, scan its compilation metadata, and emit extern
* declarations for any symbols used by the declaring crate.
*
* A "foreign" is an extern that references C (or other non-rust ABI) code.
* There is no metadata to scan for extern references so in these cases either
* a header-digester like bindgen, or manual function prototypes, have to
* serve as declarators. So these are usually given explicitly as prototype
* declarations, in rust code, with ABI attributes on them noting which ABI to
* link via.
*
* An "upcall" is a foreign call generated by the compiler (not corresponding
* to any user-written call in the code) into the runtime library, to perform
* some helper task such as bringing a task to life, allocating memory, etc.
*
*/
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
/// A structure representing an active landing pad for the duration of a basic
/// block.
///
/// Each `Block` may contain an instance of this, indicating whether the block
/// is part of a landing pad or not. This is used to make decision about whether
/// to emit `invoke` instructions (e.g., in a landing pad we don't continue to
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
/// use `invoke`) and also about various function call metadata.
///
/// For GNU exceptions (`landingpad` + `resume` instructions) this structure is
/// just a bunch of `None` instances (not too interesting), but for MSVC
/// exceptions (`cleanuppad` + `cleanupret` instructions) this contains data.
/// When inside of a landing pad, each function call in LLVM IR needs to be
/// annotated with which landing pad it's a part of. This is accomplished via
/// the `OperandBundleDef` value created for MSVC landing pads.
pub struct Funclet<'ll> {
cleanuppad: &'ll Value,
operand: OperandBundleDef<'ll>,
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
}
impl Funclet<'ll> {
pub fn new(cleanuppad: &'ll Value) -> Self {
2019-12-22 16:42:04 -06:00
Funclet { cleanuppad, operand: OperandBundleDef::new("funclet", &[cleanuppad]) }
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
}
pub fn cleanuppad(&self) -> &'ll Value {
self.cleanuppad
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
}
pub fn bundle(&self) -> &OperandBundleDef<'ll> {
&self.operand
}
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
}
2018-09-13 07:58:19 -05:00
impl BackendTypes for CodegenCx<'ll, 'tcx> {
2018-08-28 10:03:46 -05:00
type Value = &'ll Value;
// FIXME(eddyb) replace this with a `Function` "subclass" of `Value`.
2019-10-13 04:28:19 -05:00
type Function = &'ll Value;
2019-08-27 04:45:03 -05:00
2018-08-28 10:03:46 -05:00
type BasicBlock = &'ll BasicBlock;
type Type = &'ll Type;
type Funclet = Funclet<'ll>;
type DIScope = &'ll llvm::debuginfo::DIScope;
type DILocation = &'ll llvm::debuginfo::DILocation;
type DIVariable = &'ll llvm::debuginfo::DIVariable;
}
impl CodegenCx<'ll, 'tcx> {
pub fn const_array(&self, ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value {
unsafe { llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint) }
}
pub fn const_vector(&self, elts: &[&'ll Value]) -> &'ll Value {
unsafe { llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint) }
}
pub fn const_bytes(&self, bytes: &[u8]) -> &'ll Value {
bytes_in_context(self.llcx, bytes)
}
2019-12-22 16:42:04 -06:00
fn const_cstr(&self, s: Symbol, null_terminated: bool) -> &'ll Value {
unsafe {
if let Some(&llval) = self.const_cstr_cache.borrow().get(&s) {
return llval;
}
let s_str = s.as_str();
2019-12-22 16:42:04 -06:00
let sc = llvm::LLVMConstStringInContext(
self.llcx,
s_str.as_ptr() as *const c_char,
s_str.len() as c_uint,
!null_terminated as Bool,
);
let sym = self.generate_local_symbol_name("str");
2021-12-02 20:06:36 -06:00
let g = self.define_global(&sym, self.val_ty(sc)).unwrap_or_else(|| {
bug!("symbol `{}` is already defined", sym);
});
llvm::LLVMSetInitializer(g, sc);
llvm::LLVMSetGlobalConstant(g, True);
llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage);
self.const_cstr_cache.borrow_mut().insert(s, g);
g
}
}
pub fn const_get_elt(&self, v: &'ll Value, idx: u64) -> &'ll Value {
unsafe {
assert_eq!(idx as c_uint as u64, idx);
let us = &[idx as c_uint];
let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint);
2019-12-22 16:42:04 -06:00
debug!("const_get_elt(v={:?}, idx={}, r={:?})", v, idx, r);
r
}
}
}
2018-09-13 07:58:19 -05:00
impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn const_null(&self, t: &'ll Type) -> &'ll Value {
2019-12-22 16:42:04 -06:00
unsafe { llvm::LLVMConstNull(t) }
}
fn const_undef(&self, t: &'ll Type) -> &'ll Value {
2019-12-22 16:42:04 -06:00
unsafe { llvm::LLVMGetUndef(t) }
}
fn const_int(&self, t: &'ll Type, i: i64) -> &'ll Value {
2019-12-22 16:42:04 -06:00
unsafe { llvm::LLVMConstInt(t, i as u64, True) }
}
fn const_uint(&self, t: &'ll Type, i: u64) -> &'ll Value {
2019-12-22 16:42:04 -06:00
unsafe { llvm::LLVMConstInt(t, i, False) }
}
fn const_uint_big(&self, t: &'ll Type, u: u128) -> &'ll Value {
2018-08-28 10:03:46 -05:00
unsafe {
let words = [u as u64, (u >> 64) as u64];
llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr())
}
}
fn const_bool(&self, val: bool) -> &'ll Value {
2018-11-07 04:08:41 -06:00
self.const_uint(self.type_i1(), val as u64)
}
fn const_i32(&self, i: i32) -> &'ll Value {
2018-11-07 04:08:41 -06:00
self.const_int(self.type_i32(), i as i64)
2018-08-28 10:03:46 -05:00
}
fn const_u32(&self, i: u32) -> &'ll Value {
2018-11-07 04:08:41 -06:00
self.const_uint(self.type_i32(), i as u64)
2018-08-28 10:03:46 -05:00
}
fn const_u64(&self, i: u64) -> &'ll Value {
2018-11-07 04:08:41 -06:00
self.const_uint(self.type_i64(), i)
2018-08-28 10:03:46 -05:00
}
fn const_usize(&self, i: u64) -> &'ll Value {
2018-08-28 10:03:46 -05:00
let bit_size = self.data_layout().pointer_size.bits();
if bit_size < 64 {
// make sure it doesn't overflow
2019-12-22 16:42:04 -06:00
assert!(i < (1 << bit_size));
}
2018-11-07 04:08:41 -06:00
self.const_uint(self.isize_ty, i)
}
fn const_u8(&self, i: u8) -> &'ll Value {
2018-11-07 04:08:41 -06:00
self.const_uint(self.type_i8(), i as u64)
2018-08-28 10:03:46 -05:00
}
fn const_real(&self, t: &'ll Type, val: f64) -> &'ll Value {
unsafe { llvm::LLVMConstReal(t, val) }
}
fn const_str(&self, s: Symbol) -> (&'ll Value, &'ll Value) {
let len = s.as_str().len();
2019-12-22 16:42:04 -06:00
let cs = consts::ptrcast(
self.const_cstr(s, false),
2020-05-28 06:02:02 -05:00
self.type_ptr_to(self.layout_of(self.tcx.types.str_).llvm_type(self)),
2019-12-22 16:42:04 -06:00
);
(cs, self.const_usize(len as u64))
}
2019-12-22 16:42:04 -06:00
fn const_struct(&self, elts: &[&'ll Value], packed: bool) -> &'ll Value {
2018-11-07 04:08:41 -06:00
struct_in_context(self.llcx, elts, packed)
}
fn const_to_opt_uint(&self, v: &'ll Value) -> Option<u64> {
2019-12-22 16:42:04 -06:00
try_as_const_integral(v).map(|v| unsafe { llvm::LLVMConstIntGetZExtValue(v) })
2018-08-28 10:03:46 -05:00
}
2018-03-15 10:36:02 -05:00
fn const_to_opt_u128(&self, v: &'ll Value, sign_ext: bool) -> Option<u128> {
2019-10-13 05:19:14 -05:00
try_as_const_integral(v).and_then(|v| unsafe {
let (mut lo, mut hi) = (0u64, 0u64);
2019-12-22 16:42:04 -06:00
let success = llvm::LLVMRustConstInt128Get(v, sign_ext, &mut hi, &mut lo);
2019-12-06 06:18:32 -06:00
success.then_some(hi_lo_to_u128(lo, hi))
2019-10-13 05:19:14 -05:00
})
}
fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: &'ll Type) -> &'ll Value {
let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() };
match cv {
2020-11-01 10:57:03 -06:00
Scalar::Int(ScalarInt::ZST) => {
assert_eq!(0, layout.value.size(self).bytes());
self.const_undef(self.type_ix(0))
2019-12-22 16:42:04 -06:00
}
2020-11-01 10:57:03 -06:00
Scalar::Int(int) => {
let data = int.assert_bits(layout.value.size(self));
let llval = self.const_uint_big(self.type_ix(bitsize), data);
if layout.value == Pointer {
unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
} else {
2018-11-24 10:45:05 -06:00
self.const_bitcast(llval, llty)
}
2019-12-22 16:42:04 -06:00
}
Scalar::Ptr(ptr, _size) => {
let (alloc_id, offset) = ptr.into_parts();
let (base_addr, base_addr_space) = match self.tcx.global_alloc(alloc_id) {
GlobalAlloc::Memory(alloc) => {
let init = const_alloc_to_llvm(self, alloc);
let value = match alloc.mutability {
Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None),
_ => self.static_addr_of(init, alloc.align, None),
};
if !self.sess().fewer_names() {
llvm::set_value_name(value, format!("{:?}", alloc_id).as_bytes());
}
(value, AddressSpace::DATA)
}
GlobalAlloc::Function(fn_instance) => (
2020-07-22 05:50:26 -05:00
self.get_fn_addr(fn_instance.polymorphize(self.tcx)),
self.data_layout().instruction_address_space,
),
GlobalAlloc::Static(def_id) => {
assert!(self.tcx.is_static(def_id));
2020-05-02 14:44:25 -05:00
assert!(!self.tcx.is_thread_local_static(def_id));
(self.get_static(def_id), AddressSpace::DATA)
}
};
2019-12-22 16:42:04 -06:00
let llval = unsafe {
llvm::LLVMRustConstInBoundsGEP2(
self.type_i8(),
self.const_bitcast(base_addr, self.type_i8p_ext(base_addr_space)),
&self.const_usize(offset.bytes()),
2019-12-22 16:42:04 -06:00
1,
)
};
if layout.value != Pointer {
unsafe { llvm::LLVMConstPtrToInt(llval, llty) }
} else {
2018-11-24 10:45:05 -06:00
self.const_bitcast(llval, llty)
}
}
}
}
fn const_data_from_alloc(&self, alloc: &Allocation) -> Self::Value {
const_alloc_to_llvm(self, alloc)
}
fn from_const_alloc(
&self,
2020-03-04 08:50:21 -06:00
layout: TyAndLayout<'tcx>,
alloc: &Allocation,
offset: Size,
) -> PlaceRef<'tcx, &'ll Value> {
2019-07-28 11:17:58 -05:00
assert_eq!(alloc.align, layout.align.abi);
2019-08-17 04:29:17 -05:00
let llty = self.type_ptr_to(layout.llvm_type(self));
let llval = if layout.size == Size::ZERO {
let llval = self.const_usize(alloc.align.bytes());
2019-08-17 04:29:17 -05:00
unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
} else {
let init = const_alloc_to_llvm(self, alloc);
let base_addr = self.static_addr_of(init, alloc.align, None);
2019-12-22 16:42:04 -06:00
let llval = unsafe {
llvm::LLVMRustConstInBoundsGEP2(
self.type_i8(),
2019-12-22 16:42:04 -06:00
self.const_bitcast(base_addr, self.type_i8p()),
&self.const_usize(offset.bytes()),
1,
)
};
2019-08-17 04:29:17 -05:00
self.const_bitcast(llval, llty)
};
PlaceRef::new_sized(llval, layout)
}
fn const_ptrcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
consts::ptrcast(val, ty)
}
}
/// Get the [LLVM type][Type] of a [`Value`].
pub fn val_ty(v: &Value) -> &Type {
2019-12-22 16:42:04 -06:00
unsafe { llvm::LLVMTypeOf(v) }
}
pub fn bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value {
unsafe {
let ptr = bytes.as_ptr() as *const c_char;
llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True)
}
}
2019-12-22 16:42:04 -06:00
pub fn struct_in_context(llcx: &'a llvm::Context, elts: &[&'a Value], packed: bool) -> &'a Value {
unsafe {
2019-12-22 16:42:04 -06:00
llvm::LLVMConstStructInContext(llcx, elts.as_ptr(), elts.len() as c_uint, packed as Bool)
}
}
2018-08-28 10:03:46 -05:00
#[inline]
fn hi_lo_to_u128(lo: u64, hi: u64) -> u128 {
((hi as u128) << 64) | (lo as u128)
}
fn try_as_const_integral(v: &Value) -> Option<&ConstantInt> {
2019-12-22 16:42:04 -06:00
unsafe { llvm::LLVMIsAConstantInt(v) }
}