2014-05-28 14:36:05 -05:00
|
|
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
|
2013-05-21 14:25:44 -05:00
|
|
|
// file at the top-level directory of this distribution and at
|
|
|
|
// http://rust-lang.org/COPYRIGHT.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
|
|
// option. This file may not be copied, modified, or distributed
|
|
|
|
// except according to those terms.
|
|
|
|
|
2014-10-27 17:37:07 -05:00
|
|
|
#![allow(non_upper_case_globals)]
|
2013-09-30 10:44:58 -05:00
|
|
|
|
2014-07-07 19:58:01 -05:00
|
|
|
use llvm;
|
Special-case transmute for primitive, SIMD & pointer types.
This detects (a subset of) the cases when `transmute::<T, U>(x)` can be
lowered to a direct `bitcast T x to U` in LLVM. This assists with
efficiently handling a SIMD vector as multiple different types,
e.g. swapping bytes/words/double words around inside some larger vector
type.
C compilers like GCC and Clang handle integer vector types as `__m128i`
for all widths, and implicitly insert bitcasts as required. This patch
allows Rust to express this, even if it takes a bit of `unsafe`, whereas
previously it was impossible to do at all without inline assembly.
Example:
pub fn reverse_u32s(u: u64x2) -> u64x2 {
unsafe {
let tmp = mem::transmute::<_, u32x4>(u);
let swapped = u32x4(tmp.3, tmp.2, tmp.1, tmp.0);
mem::transmute::<_, u64x2>(swapped)
}
}
Compiling with `--opt-level=3` gives:
Before
define <2 x i64> @_ZN12reverse_u32s20hbdb206aba18a03d8tbaE(<2 x i64>) unnamed_addr #0 {
entry-block:
%1 = bitcast <2 x i64> %0 to i128
%u.0.extract.trunc = trunc i128 %1 to i32
%u.4.extract.shift = lshr i128 %1, 32
%u.4.extract.trunc = trunc i128 %u.4.extract.shift to i32
%u.8.extract.shift = lshr i128 %1, 64
%u.8.extract.trunc = trunc i128 %u.8.extract.shift to i32
%u.12.extract.shift = lshr i128 %1, 96
%u.12.extract.trunc = trunc i128 %u.12.extract.shift to i32
%2 = insertelement <4 x i32> undef, i32 %u.12.extract.trunc, i64 0
%3 = insertelement <4 x i32> %2, i32 %u.8.extract.trunc, i64 1
%4 = insertelement <4 x i32> %3, i32 %u.4.extract.trunc, i64 2
%5 = insertelement <4 x i32> %4, i32 %u.0.extract.trunc, i64 3
%6 = bitcast <4 x i32> %5 to <2 x i64>
ret <2 x i64> %6
}
_ZN12reverse_u32s20hbdb206aba18a03d8tbaE:
.cfi_startproc
movd %xmm0, %rax
punpckhqdq %xmm0, %xmm0
movd %xmm0, %rcx
movq %rcx, %rdx
shrq $32, %rdx
movq %rax, %rsi
shrq $32, %rsi
movd %eax, %xmm0
movd %ecx, %xmm1
punpckldq %xmm0, %xmm1
movd %esi, %xmm2
movd %edx, %xmm0
punpckldq %xmm2, %xmm0
punpckldq %xmm1, %xmm0
retq
After
define <2 x i64> @_ZN12reverse_u32s20hbdb206aba18a03d8tbaE(<2 x i64>) unnamed_addr #0 {
entry-block:
%1 = bitcast <2 x i64> %0 to <4 x i32>
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
%3 = bitcast <4 x i32> %2 to <2 x i64>
ret <2 x i64> %3
}
_ZN12reverse_u32s20hbdb206aba18a03d8tbaE:
.cfi_startproc
pshufd $27, %xmm0, %xmm0
retq
2014-11-25 01:09:35 -06:00
|
|
|
use llvm::{SequentiallyConsistent, Acquire, Release, AtomicXchg, ValueRef, TypeKind};
|
2014-07-09 17:31:45 -05:00
|
|
|
use middle::subst;
|
2014-05-31 17:53:13 -05:00
|
|
|
use middle::subst::FnSpace;
|
2014-11-15 19:30:33 -06:00
|
|
|
use trans::base::*;
|
|
|
|
use trans::build::*;
|
|
|
|
use trans::callee;
|
|
|
|
use trans::cleanup;
|
|
|
|
use trans::cleanup::CleanupMethods;
|
|
|
|
use trans::common::*;
|
|
|
|
use trans::datum::*;
|
|
|
|
use trans::expr;
|
|
|
|
use trans::glue;
|
|
|
|
use trans::type_of::*;
|
|
|
|
use trans::type_of;
|
|
|
|
use trans::machine;
|
|
|
|
use trans::machine::llsize_of;
|
|
|
|
use trans::type_::Type;
|
2014-09-13 13:09:25 -05:00
|
|
|
use middle::ty::{mod, Ty};
|
2014-05-29 00:26:56 -05:00
|
|
|
use syntax::abi::RustIntrinsic;
|
2013-05-21 14:25:44 -05:00
|
|
|
use syntax::ast;
|
2014-02-13 23:07:09 -06:00
|
|
|
use syntax::parse::token;
|
2014-12-22 19:57:14 -06:00
|
|
|
use util::ppaux::{Repr, ty_to_string};
|
2013-05-21 14:25:44 -05:00
|
|
|
|
2014-03-06 10:47:24 -06:00
|
|
|
pub fn get_simple_intrinsic(ccx: &CrateContext, item: &ast::ForeignItem) -> Option<ValueRef> {
|
2014-02-13 23:07:09 -06:00
|
|
|
let name = match token::get_ident(item.ident).get() {
|
|
|
|
"sqrtf32" => "llvm.sqrt.f32",
|
|
|
|
"sqrtf64" => "llvm.sqrt.f64",
|
|
|
|
"powif32" => "llvm.powi.f32",
|
|
|
|
"powif64" => "llvm.powi.f64",
|
|
|
|
"sinf32" => "llvm.sin.f32",
|
|
|
|
"sinf64" => "llvm.sin.f64",
|
|
|
|
"cosf32" => "llvm.cos.f32",
|
|
|
|
"cosf64" => "llvm.cos.f64",
|
|
|
|
"powf32" => "llvm.pow.f32",
|
|
|
|
"powf64" => "llvm.pow.f64",
|
|
|
|
"expf32" => "llvm.exp.f32",
|
|
|
|
"expf64" => "llvm.exp.f64",
|
|
|
|
"exp2f32" => "llvm.exp2.f32",
|
|
|
|
"exp2f64" => "llvm.exp2.f64",
|
|
|
|
"logf32" => "llvm.log.f32",
|
|
|
|
"logf64" => "llvm.log.f64",
|
|
|
|
"log10f32" => "llvm.log10.f32",
|
|
|
|
"log10f64" => "llvm.log10.f64",
|
|
|
|
"log2f32" => "llvm.log2.f32",
|
|
|
|
"log2f64" => "llvm.log2.f64",
|
|
|
|
"fmaf32" => "llvm.fma.f32",
|
|
|
|
"fmaf64" => "llvm.fma.f64",
|
|
|
|
"fabsf32" => "llvm.fabs.f32",
|
|
|
|
"fabsf64" => "llvm.fabs.f64",
|
|
|
|
"copysignf32" => "llvm.copysign.f32",
|
|
|
|
"copysignf64" => "llvm.copysign.f64",
|
|
|
|
"floorf32" => "llvm.floor.f32",
|
|
|
|
"floorf64" => "llvm.floor.f64",
|
|
|
|
"ceilf32" => "llvm.ceil.f32",
|
|
|
|
"ceilf64" => "llvm.ceil.f64",
|
|
|
|
"truncf32" => "llvm.trunc.f32",
|
|
|
|
"truncf64" => "llvm.trunc.f64",
|
|
|
|
"rintf32" => "llvm.rint.f32",
|
|
|
|
"rintf64" => "llvm.rint.f64",
|
|
|
|
"nearbyintf32" => "llvm.nearbyint.f32",
|
|
|
|
"nearbyintf64" => "llvm.nearbyint.f64",
|
|
|
|
"roundf32" => "llvm.round.f32",
|
|
|
|
"roundf64" => "llvm.round.f64",
|
|
|
|
"ctpop8" => "llvm.ctpop.i8",
|
|
|
|
"ctpop16" => "llvm.ctpop.i16",
|
|
|
|
"ctpop32" => "llvm.ctpop.i32",
|
|
|
|
"ctpop64" => "llvm.ctpop.i64",
|
|
|
|
"bswap16" => "llvm.bswap.i16",
|
|
|
|
"bswap32" => "llvm.bswap.i32",
|
|
|
|
"bswap64" => "llvm.bswap.i64",
|
2014-10-15 18:44:44 -05:00
|
|
|
"assume" => "llvm.assume",
|
2014-02-13 23:07:09 -06:00
|
|
|
_ => return None
|
|
|
|
};
|
2014-04-09 18:56:31 -05:00
|
|
|
Some(ccx.get_intrinsic(&name))
|
2014-01-29 17:44:14 -06:00
|
|
|
}
|
|
|
|
|
2014-06-12 16:08:44 -05:00
|
|
|
/// Performs late verification that intrinsics are used correctly. At present,
|
|
|
|
/// the only intrinsic that needs such verification is `transmute`.
|
|
|
|
pub fn check_intrinsics(ccx: &CrateContext) {
|
2014-12-22 19:57:14 -06:00
|
|
|
let mut last_failing_id = None;
|
|
|
|
for transmute_restriction in ccx.tcx().transmute_restrictions.borrow().iter() {
|
|
|
|
// Sometimes, a single call to transmute will push multiple
|
|
|
|
// type pairs to test in order to exhaustively test the
|
|
|
|
// possibility around a type parameter. If one of those fails,
|
|
|
|
// there is no sense reporting errors on the others.
|
|
|
|
if last_failing_id == Some(transmute_restriction.id) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
debug!("transmute_restriction: {}", transmute_restriction.repr(ccx.tcx()));
|
|
|
|
|
|
|
|
assert!(!ty::type_has_params(transmute_restriction.substituted_from));
|
|
|
|
assert!(!ty::type_has_params(transmute_restriction.substituted_to));
|
|
|
|
|
2014-06-12 16:08:44 -05:00
|
|
|
let llfromtype = type_of::sizing_type_of(ccx,
|
2014-12-22 19:57:14 -06:00
|
|
|
transmute_restriction.substituted_from);
|
2014-06-12 16:08:44 -05:00
|
|
|
let lltotype = type_of::sizing_type_of(ccx,
|
2014-12-22 19:57:14 -06:00
|
|
|
transmute_restriction.substituted_to);
|
2014-06-12 16:08:44 -05:00
|
|
|
let from_type_size = machine::llbitsize_of_real(ccx, llfromtype);
|
|
|
|
let to_type_size = machine::llbitsize_of_real(ccx, lltotype);
|
|
|
|
if from_type_size != to_type_size {
|
2014-12-22 19:57:14 -06:00
|
|
|
last_failing_id = Some(transmute_restriction.id);
|
|
|
|
|
|
|
|
if transmute_restriction.original_from != transmute_restriction.substituted_from {
|
|
|
|
ccx.sess().span_err(
|
|
|
|
transmute_restriction.span,
|
|
|
|
format!("transmute called on types with potentially different sizes: \
|
|
|
|
{} (could be {} bit{}) to {} (could be {} bit{})",
|
|
|
|
ty_to_string(ccx.tcx(), transmute_restriction.original_from),
|
|
|
|
from_type_size as uint,
|
|
|
|
if from_type_size == 1 {""} else {"s"},
|
|
|
|
ty_to_string(ccx.tcx(), transmute_restriction.original_to),
|
|
|
|
to_type_size as uint,
|
|
|
|
if to_type_size == 1 {""} else {"s"}).as_slice());
|
|
|
|
} else {
|
|
|
|
ccx.sess().span_err(
|
|
|
|
transmute_restriction.span,
|
|
|
|
format!("transmute called on types with different sizes: \
|
|
|
|
{} ({} bit{}) to {} ({} bit{})",
|
|
|
|
ty_to_string(ccx.tcx(), transmute_restriction.original_from),
|
|
|
|
from_type_size as uint,
|
|
|
|
if from_type_size == 1 {""} else {"s"},
|
|
|
|
ty_to_string(ccx.tcx(), transmute_restriction.original_to),
|
|
|
|
to_type_size as uint,
|
|
|
|
if to_type_size == 1 {""} else {"s"}).as_slice());
|
|
|
|
}
|
2014-08-06 04:59:40 -05:00
|
|
|
}
|
2014-06-12 16:08:44 -05:00
|
|
|
}
|
|
|
|
ccx.sess().abort_if_errors();
|
|
|
|
}
|
|
|
|
|
2014-09-29 14:11:30 -05:00
|
|
|
pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
|
|
|
|
node: ast::NodeId,
|
|
|
|
callee_ty: Ty<'tcx>,
|
|
|
|
cleanup_scope: cleanup::CustomScopeIndex,
|
|
|
|
args: callee::CallArgs<'a, 'tcx>,
|
|
|
|
dest: expr::Dest,
|
|
|
|
substs: subst::Substs<'tcx>,
|
|
|
|
call_info: NodeInfo)
|
|
|
|
-> Result<'blk, 'tcx> {
|
2014-07-09 17:31:45 -05:00
|
|
|
|
|
|
|
let fcx = bcx.fcx;
|
|
|
|
let ccx = fcx.ccx;
|
|
|
|
let tcx = bcx.tcx();
|
|
|
|
|
2014-10-31 03:51:16 -05:00
|
|
|
let ret_ty = match callee_ty.sty {
|
2014-11-26 05:01:28 -06:00
|
|
|
ty::ty_bare_fn(_, ref f) => f.sig.0.output,
|
2014-10-09 14:17:22 -05:00
|
|
|
_ => panic!("expected bare_fn in trans_intrinsic_call")
|
2014-07-09 17:31:45 -05:00
|
|
|
};
|
2014-07-09 22:09:57 -05:00
|
|
|
let foreign_item = tcx.map.expect_foreign_item(node);
|
|
|
|
let name = token::get_ident(foreign_item.ident);
|
|
|
|
|
|
|
|
// For `transmute` we can just trans the input expr directly into dest
|
|
|
|
if name.get() == "transmute" {
|
2014-10-24 14:14:37 -05:00
|
|
|
let llret_ty = type_of::type_of(ccx, ret_ty.unwrap());
|
2014-07-09 22:09:57 -05:00
|
|
|
match args {
|
|
|
|
callee::ArgExprs(arg_exprs) => {
|
|
|
|
assert_eq!(arg_exprs.len(), 1);
|
|
|
|
|
|
|
|
let (in_type, out_type) = (*substs.types.get(FnSpace, 0),
|
|
|
|
*substs.types.get(FnSpace, 1));
|
|
|
|
let llintype = type_of::type_of(ccx, in_type);
|
|
|
|
let llouttype = type_of::type_of(ccx, out_type);
|
|
|
|
|
|
|
|
let in_type_size = machine::llbitsize_of_real(ccx, llintype);
|
|
|
|
let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
|
|
|
|
|
|
|
|
// This should be caught by the intrinsicck pass
|
|
|
|
assert_eq!(in_type_size, out_type_size);
|
|
|
|
|
2014-12-30 19:58:47 -06:00
|
|
|
let nonpointer_nonaggregate = |&: llkind: TypeKind| -> bool {
|
Special-case transmute for primitive, SIMD & pointer types.
This detects (a subset of) the cases when `transmute::<T, U>(x)` can be
lowered to a direct `bitcast T x to U` in LLVM. This assists with
efficiently handling a SIMD vector as multiple different types,
e.g. swapping bytes/words/double words around inside some larger vector
type.
C compilers like GCC and Clang handle integer vector types as `__m128i`
for all widths, and implicitly insert bitcasts as required. This patch
allows Rust to express this, even if it takes a bit of `unsafe`, whereas
previously it was impossible to do at all without inline assembly.
Example:
pub fn reverse_u32s(u: u64x2) -> u64x2 {
unsafe {
let tmp = mem::transmute::<_, u32x4>(u);
let swapped = u32x4(tmp.3, tmp.2, tmp.1, tmp.0);
mem::transmute::<_, u64x2>(swapped)
}
}
Compiling with `--opt-level=3` gives:
Before
define <2 x i64> @_ZN12reverse_u32s20hbdb206aba18a03d8tbaE(<2 x i64>) unnamed_addr #0 {
entry-block:
%1 = bitcast <2 x i64> %0 to i128
%u.0.extract.trunc = trunc i128 %1 to i32
%u.4.extract.shift = lshr i128 %1, 32
%u.4.extract.trunc = trunc i128 %u.4.extract.shift to i32
%u.8.extract.shift = lshr i128 %1, 64
%u.8.extract.trunc = trunc i128 %u.8.extract.shift to i32
%u.12.extract.shift = lshr i128 %1, 96
%u.12.extract.trunc = trunc i128 %u.12.extract.shift to i32
%2 = insertelement <4 x i32> undef, i32 %u.12.extract.trunc, i64 0
%3 = insertelement <4 x i32> %2, i32 %u.8.extract.trunc, i64 1
%4 = insertelement <4 x i32> %3, i32 %u.4.extract.trunc, i64 2
%5 = insertelement <4 x i32> %4, i32 %u.0.extract.trunc, i64 3
%6 = bitcast <4 x i32> %5 to <2 x i64>
ret <2 x i64> %6
}
_ZN12reverse_u32s20hbdb206aba18a03d8tbaE:
.cfi_startproc
movd %xmm0, %rax
punpckhqdq %xmm0, %xmm0
movd %xmm0, %rcx
movq %rcx, %rdx
shrq $32, %rdx
movq %rax, %rsi
shrq $32, %rsi
movd %eax, %xmm0
movd %ecx, %xmm1
punpckldq %xmm0, %xmm1
movd %esi, %xmm2
movd %edx, %xmm0
punpckldq %xmm2, %xmm0
punpckldq %xmm1, %xmm0
retq
After
define <2 x i64> @_ZN12reverse_u32s20hbdb206aba18a03d8tbaE(<2 x i64>) unnamed_addr #0 {
entry-block:
%1 = bitcast <2 x i64> %0 to <4 x i32>
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
%3 = bitcast <4 x i32> %2 to <2 x i64>
ret <2 x i64> %3
}
_ZN12reverse_u32s20hbdb206aba18a03d8tbaE:
.cfi_startproc
pshufd $27, %xmm0, %xmm0
retq
2014-11-25 01:09:35 -06:00
|
|
|
use llvm::TypeKind::*;
|
|
|
|
match llkind {
|
|
|
|
Half | Float | Double | X86_FP80 | FP128 |
|
|
|
|
PPC_FP128 | Integer | Vector | X86_MMX => true,
|
|
|
|
_ => false
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// An approximation to which types can be directly cast via
|
|
|
|
// LLVM's bitcast. This doesn't cover pointer -> pointer casts,
|
|
|
|
// but does, importantly, cover SIMD types.
|
|
|
|
let in_kind = llintype.kind();
|
|
|
|
let ret_kind = llret_ty.kind();
|
|
|
|
let bitcast_compatible =
|
|
|
|
(nonpointer_nonaggregate(in_kind) && nonpointer_nonaggregate(ret_kind)) || {
|
|
|
|
in_kind == TypeKind::Pointer && ret_kind == TypeKind::Pointer
|
|
|
|
};
|
|
|
|
|
|
|
|
let dest = if bitcast_compatible {
|
|
|
|
// if we're here, the type is scalar-like (a primitive, a
|
|
|
|
// SIMD type or a pointer), and so can be handled as a
|
|
|
|
// by-value ValueRef and can also be directly bitcast to the
|
|
|
|
// target type. Doing this special case makes conversions
|
|
|
|
// like `u32x4` -> `u64x2` much nicer for LLVM and so more
|
|
|
|
// efficient (these are done efficiently implicitly in C
|
|
|
|
// with the `__m128i` type and so this means Rust doesn't
|
|
|
|
// lose out there).
|
|
|
|
let expr = &*arg_exprs[0];
|
|
|
|
let datum = unpack_datum!(bcx, expr::trans(bcx, expr));
|
|
|
|
let datum = unpack_datum!(bcx, datum.to_rvalue_datum(bcx, "transmute_temp"));
|
|
|
|
let val = if datum.kind.is_by_ref() {
|
|
|
|
load_ty(bcx, datum.val, datum.ty)
|
|
|
|
} else {
|
|
|
|
datum.val
|
|
|
|
};
|
|
|
|
|
|
|
|
let cast_val = BitCast(bcx, val, llret_ty);
|
|
|
|
|
|
|
|
match dest {
|
|
|
|
expr::SaveIn(d) => {
|
|
|
|
// this often occurs in a sequence like `Store(val,
|
|
|
|
// d); val2 = Load(d)`, so disappears easily.
|
|
|
|
Store(bcx, cast_val, d);
|
|
|
|
}
|
|
|
|
expr::Ignore => {}
|
|
|
|
}
|
|
|
|
dest
|
|
|
|
} else {
|
|
|
|
// The types are too complicated to do with a by-value
|
|
|
|
// bitcast, so pointer cast instead. We need to cast the
|
|
|
|
// dest so the types work out.
|
|
|
|
let dest = match dest {
|
|
|
|
expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())),
|
|
|
|
expr::Ignore => expr::Ignore
|
|
|
|
};
|
|
|
|
bcx = expr::trans_into(bcx, &*arg_exprs[0], dest);
|
|
|
|
dest
|
2014-07-09 22:09:57 -05:00
|
|
|
};
|
|
|
|
|
|
|
|
fcx.pop_custom_cleanup_scope(cleanup_scope);
|
|
|
|
|
|
|
|
return match dest {
|
|
|
|
expr::SaveIn(d) => Result::new(bcx, d),
|
|
|
|
expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to()))
|
|
|
|
};
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
_ => {
|
|
|
|
ccx.sess().bug("expected expr as argument for transmute");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-07-09 17:31:45 -05:00
|
|
|
|
2014-10-24 14:14:37 -05:00
|
|
|
// Push the arguments.
|
|
|
|
let mut llargs = Vec::new();
|
|
|
|
bcx = callee::trans_args(bcx,
|
|
|
|
args,
|
|
|
|
callee_ty,
|
|
|
|
&mut llargs,
|
|
|
|
cleanup::CustomScope(cleanup_scope),
|
|
|
|
false,
|
|
|
|
RustIntrinsic);
|
|
|
|
|
|
|
|
fcx.pop_custom_cleanup_scope(cleanup_scope);
|
|
|
|
|
2014-10-28 12:32:05 -05:00
|
|
|
// These are the only intrinsic functions that diverge.
|
2014-10-24 14:14:37 -05:00
|
|
|
if name.get() == "abort" {
|
|
|
|
let llfn = ccx.get_intrinsic(&("llvm.trap"));
|
2014-11-17 02:39:01 -06:00
|
|
|
Call(bcx, llfn, &[], None);
|
2014-10-24 14:14:37 -05:00
|
|
|
Unreachable(bcx);
|
|
|
|
return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to()));
|
|
|
|
} else if name.get() == "unreachable" {
|
|
|
|
Unreachable(bcx);
|
|
|
|
return Result::new(bcx, C_nil(ccx));
|
|
|
|
}
|
|
|
|
|
|
|
|
let ret_ty = match ret_ty {
|
|
|
|
ty::FnConverging(ret_ty) => ret_ty,
|
|
|
|
ty::FnDiverging => unreachable!()
|
|
|
|
};
|
|
|
|
|
|
|
|
let llret_ty = type_of::type_of(ccx, ret_ty);
|
|
|
|
|
2014-07-09 17:31:45 -05:00
|
|
|
// Get location to store the result. If the user does
|
|
|
|
// not care about the result, just make a stack slot
|
|
|
|
let llresult = match dest {
|
|
|
|
expr::SaveIn(d) => d,
|
|
|
|
expr::Ignore => {
|
|
|
|
if !type_is_zero_size(ccx, ret_ty) {
|
|
|
|
alloc_ty(bcx, ret_ty, "intrinsic_result")
|
|
|
|
} else {
|
|
|
|
C_undef(llret_ty.ptr_to())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
let simple = get_simple_intrinsic(ccx, &*foreign_item);
|
|
|
|
let llval = match (simple, name.get()) {
|
|
|
|
(Some(llfn), _) => {
|
2014-07-25 18:06:44 -05:00
|
|
|
Call(bcx, llfn, llargs.as_slice(), None)
|
2014-07-09 17:31:45 -05:00
|
|
|
}
|
|
|
|
(_, "breakpoint") => {
|
|
|
|
let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
|
2014-11-17 02:39:01 -06:00
|
|
|
Call(bcx, llfn, &[], None)
|
2014-07-09 17:31:45 -05:00
|
|
|
}
|
|
|
|
(_, "size_of") => {
|
|
|
|
let tp_ty = *substs.types.get(FnSpace, 0);
|
|
|
|
let lltp_ty = type_of::type_of(ccx, tp_ty);
|
2014-10-14 15:36:11 -05:00
|
|
|
C_uint(ccx, machine::llsize_of_real(ccx, lltp_ty))
|
2014-07-09 17:31:45 -05:00
|
|
|
}
|
|
|
|
(_, "min_align_of") => {
|
|
|
|
let tp_ty = *substs.types.get(FnSpace, 0);
|
2014-10-14 15:36:11 -05:00
|
|
|
C_uint(ccx, type_of::align_of(ccx, tp_ty))
|
2014-07-09 17:31:45 -05:00
|
|
|
}
|
|
|
|
(_, "pref_align_of") => {
|
|
|
|
let tp_ty = *substs.types.get(FnSpace, 0);
|
|
|
|
let lltp_ty = type_of::type_of(ccx, tp_ty);
|
2014-10-14 15:36:11 -05:00
|
|
|
C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty))
|
2014-07-09 17:31:45 -05:00
|
|
|
}
|
|
|
|
(_, "move_val_init") => {
|
|
|
|
// Create a datum reflecting the value being moved.
|
|
|
|
// Use `appropriate_mode` so that the datum is by ref
|
|
|
|
// if the value is non-immediate. Note that, with
|
|
|
|
// intrinsics, there are no argument cleanups to
|
|
|
|
// concern ourselves with, so we can use an rvalue datum.
|
|
|
|
let tp_ty = *substs.types.get(FnSpace, 0);
|
|
|
|
let mode = appropriate_rvalue_mode(ccx, tp_ty);
|
|
|
|
let src = Datum {
|
2014-10-15 01:05:01 -05:00
|
|
|
val: llargs[1],
|
2014-07-09 17:31:45 -05:00
|
|
|
ty: tp_ty,
|
|
|
|
kind: Rvalue::new(mode)
|
|
|
|
};
|
2014-10-15 01:05:01 -05:00
|
|
|
bcx = src.store_to(bcx, llargs[0]);
|
2014-07-09 17:31:45 -05:00
|
|
|
C_nil(ccx)
|
|
|
|
}
|
|
|
|
(_, "get_tydesc") => {
|
|
|
|
let tp_ty = *substs.types.get(FnSpace, 0);
|
|
|
|
let static_ti = get_tydesc(ccx, tp_ty);
|
|
|
|
|
|
|
|
// FIXME (#3730): ideally this shouldn't need a cast,
|
|
|
|
// but there's a circularity between translating rust types to llvm
|
|
|
|
// types and having a tydesc type available. So I can't directly access
|
|
|
|
// the llvm type of intrinsic::TyDesc struct.
|
|
|
|
PointerCast(bcx, static_ti.tydesc, llret_ty)
|
|
|
|
}
|
|
|
|
(_, "type_id") => {
|
|
|
|
let hash = ty::hash_crate_independent(
|
|
|
|
ccx.tcx(),
|
|
|
|
*substs.types.get(FnSpace, 0),
|
2014-09-05 11:18:53 -05:00
|
|
|
&ccx.link_meta().crate_hash);
|
2014-07-09 17:31:45 -05:00
|
|
|
// NB: This needs to be kept in lockstep with the TypeId struct in
|
|
|
|
// the intrinsic module
|
2014-11-17 02:39:01 -06:00
|
|
|
C_named_struct(llret_ty, &[C_u64(ccx, hash)])
|
2014-07-09 17:31:45 -05:00
|
|
|
}
|
|
|
|
(_, "init") => {
|
|
|
|
let tp_ty = *substs.types.get(FnSpace, 0);
|
|
|
|
let lltp_ty = type_of::type_of(ccx, tp_ty);
|
|
|
|
if return_type_is_void(ccx, tp_ty) {
|
|
|
|
C_nil(ccx)
|
|
|
|
} else {
|
|
|
|
C_null(lltp_ty)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Effectively no-ops
|
|
|
|
(_, "uninit") | (_, "forget") => {
|
|
|
|
C_nil(ccx)
|
|
|
|
}
|
|
|
|
(_, "needs_drop") => {
|
|
|
|
let tp_ty = *substs.types.get(FnSpace, 0);
|
2014-12-16 14:00:05 -06:00
|
|
|
C_bool(ccx, type_needs_drop(ccx.tcx(), tp_ty))
|
2014-07-09 17:31:45 -05:00
|
|
|
}
|
|
|
|
(_, "owns_managed") => {
|
|
|
|
let tp_ty = *substs.types.get(FnSpace, 0);
|
|
|
|
C_bool(ccx, ty::type_contents(ccx.tcx(), tp_ty).owns_managed())
|
|
|
|
}
|
|
|
|
(_, "offset") => {
|
2014-10-15 01:05:01 -05:00
|
|
|
let ptr = llargs[0];
|
|
|
|
let offset = llargs[1];
|
2014-11-17 02:39:01 -06:00
|
|
|
InBoundsGEP(bcx, ptr, &[offset])
|
2014-07-09 17:31:45 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
(_, "copy_nonoverlapping_memory") => {
|
|
|
|
copy_intrinsic(bcx, false, false, *substs.types.get(FnSpace, 0),
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1], llargs[2])
|
2014-07-09 17:31:45 -05:00
|
|
|
}
|
|
|
|
(_, "copy_memory") => {
|
|
|
|
copy_intrinsic(bcx, true, false, *substs.types.get(FnSpace, 0),
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1], llargs[2])
|
2014-07-09 17:31:45 -05:00
|
|
|
}
|
|
|
|
(_, "set_memory") => {
|
|
|
|
memset_intrinsic(bcx, false, *substs.types.get(FnSpace, 0),
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1], llargs[2])
|
2014-07-09 17:31:45 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
(_, "volatile_copy_nonoverlapping_memory") => {
|
|
|
|
copy_intrinsic(bcx, false, true, *substs.types.get(FnSpace, 0),
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1], llargs[2])
|
2014-07-09 17:31:45 -05:00
|
|
|
}
|
|
|
|
(_, "volatile_copy_memory") => {
|
|
|
|
copy_intrinsic(bcx, true, true, *substs.types.get(FnSpace, 0),
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1], llargs[2])
|
2014-07-09 17:31:45 -05:00
|
|
|
}
|
|
|
|
(_, "volatile_set_memory") => {
|
|
|
|
memset_intrinsic(bcx, true, *substs.types.get(FnSpace, 0),
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1], llargs[2])
|
2014-07-09 17:31:45 -05:00
|
|
|
}
|
|
|
|
(_, "volatile_load") => {
|
2014-10-15 01:05:01 -05:00
|
|
|
VolatileLoad(bcx, llargs[0])
|
2014-07-09 17:31:45 -05:00
|
|
|
},
|
|
|
|
(_, "volatile_store") => {
|
2014-10-15 01:05:01 -05:00
|
|
|
VolatileStore(bcx, llargs[1], llargs[0]);
|
2014-07-09 17:31:45 -05:00
|
|
|
C_nil(ccx)
|
|
|
|
},
|
|
|
|
|
2014-10-15 01:05:01 -05:00
|
|
|
(_, "ctlz8") => count_zeros_intrinsic(bcx, "llvm.ctlz.i8", llargs[0]),
|
|
|
|
(_, "ctlz16") => count_zeros_intrinsic(bcx, "llvm.ctlz.i16", llargs[0]),
|
|
|
|
(_, "ctlz32") => count_zeros_intrinsic(bcx, "llvm.ctlz.i32", llargs[0]),
|
|
|
|
(_, "ctlz64") => count_zeros_intrinsic(bcx, "llvm.ctlz.i64", llargs[0]),
|
|
|
|
(_, "cttz8") => count_zeros_intrinsic(bcx, "llvm.cttz.i8", llargs[0]),
|
|
|
|
(_, "cttz16") => count_zeros_intrinsic(bcx, "llvm.cttz.i16", llargs[0]),
|
|
|
|
(_, "cttz32") => count_zeros_intrinsic(bcx, "llvm.cttz.i32", llargs[0]),
|
|
|
|
(_, "cttz64") => count_zeros_intrinsic(bcx, "llvm.cttz.i64", llargs[0]),
|
2014-07-09 17:31:45 -05:00
|
|
|
|
|
|
|
(_, "i8_add_with_overflow") =>
|
|
|
|
with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i8", ret_ty,
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1]),
|
2014-07-09 17:31:45 -05:00
|
|
|
(_, "i16_add_with_overflow") =>
|
|
|
|
with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i16", ret_ty,
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1]),
|
2014-07-09 17:31:45 -05:00
|
|
|
(_, "i32_add_with_overflow") =>
|
|
|
|
with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i32", ret_ty,
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1]),
|
2014-07-09 17:31:45 -05:00
|
|
|
(_, "i64_add_with_overflow") =>
|
|
|
|
with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i64", ret_ty,
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1]),
|
2014-07-09 17:31:45 -05:00
|
|
|
|
|
|
|
(_, "u8_add_with_overflow") =>
|
|
|
|
with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i8", ret_ty,
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1]),
|
2014-07-09 17:31:45 -05:00
|
|
|
(_, "u16_add_with_overflow") =>
|
|
|
|
with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i16", ret_ty,
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1]),
|
2014-07-09 17:31:45 -05:00
|
|
|
(_, "u32_add_with_overflow") =>
|
|
|
|
with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i32", ret_ty,
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1]),
|
2014-07-09 17:31:45 -05:00
|
|
|
(_, "u64_add_with_overflow") =>
|
|
|
|
with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i64", ret_ty,
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1]),
|
2014-07-09 17:31:45 -05:00
|
|
|
|
|
|
|
(_, "i8_sub_with_overflow") =>
|
|
|
|
with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i8", ret_ty,
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1]),
|
2014-07-09 17:31:45 -05:00
|
|
|
(_, "i16_sub_with_overflow") =>
|
|
|
|
with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i16", ret_ty,
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1]),
|
2014-07-09 17:31:45 -05:00
|
|
|
(_, "i32_sub_with_overflow") =>
|
|
|
|
with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i32", ret_ty,
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1]),
|
2014-07-09 17:31:45 -05:00
|
|
|
(_, "i64_sub_with_overflow") =>
|
|
|
|
with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i64", ret_ty,
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1]),
|
2014-07-09 17:31:45 -05:00
|
|
|
|
|
|
|
(_, "u8_sub_with_overflow") =>
|
|
|
|
with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i8", ret_ty,
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1]),
|
2014-07-09 17:31:45 -05:00
|
|
|
(_, "u16_sub_with_overflow") =>
|
|
|
|
with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i16", ret_ty,
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1]),
|
2014-07-09 17:31:45 -05:00
|
|
|
(_, "u32_sub_with_overflow") =>
|
|
|
|
with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i32", ret_ty,
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1]),
|
2014-07-09 17:31:45 -05:00
|
|
|
(_, "u64_sub_with_overflow") =>
|
|
|
|
with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i64", ret_ty,
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1]),
|
2014-07-09 17:31:45 -05:00
|
|
|
|
|
|
|
(_, "i8_mul_with_overflow") =>
|
|
|
|
with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i8", ret_ty,
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1]),
|
2014-07-09 17:31:45 -05:00
|
|
|
(_, "i16_mul_with_overflow") =>
|
|
|
|
with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i16", ret_ty,
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1]),
|
2014-07-09 17:31:45 -05:00
|
|
|
(_, "i32_mul_with_overflow") =>
|
|
|
|
with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i32", ret_ty,
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1]),
|
2014-07-09 17:31:45 -05:00
|
|
|
(_, "i64_mul_with_overflow") =>
|
|
|
|
with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i64", ret_ty,
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1]),
|
2014-07-09 17:31:45 -05:00
|
|
|
|
|
|
|
(_, "u8_mul_with_overflow") =>
|
|
|
|
with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i8", ret_ty,
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1]),
|
2014-07-09 17:31:45 -05:00
|
|
|
(_, "u16_mul_with_overflow") =>
|
|
|
|
with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i16", ret_ty,
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1]),
|
2014-07-09 17:31:45 -05:00
|
|
|
(_, "u32_mul_with_overflow") =>
|
|
|
|
with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i32", ret_ty,
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1]),
|
2014-07-09 17:31:45 -05:00
|
|
|
(_, "u64_mul_with_overflow") =>
|
|
|
|
with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i64", ret_ty,
|
2014-10-15 01:05:01 -05:00
|
|
|
llargs[0], llargs[1]),
|
2014-07-09 17:31:45 -05:00
|
|
|
|
2014-08-04 13:48:26 -05:00
|
|
|
(_, "return_address") => {
|
2014-08-05 16:38:14 -05:00
|
|
|
if !fcx.caller_expects_out_pointer {
|
|
|
|
tcx.sess.span_err(call_info.span,
|
|
|
|
"invalid use of `return_address` intrinsic: function \
|
|
|
|
does not use out pointer");
|
|
|
|
C_null(Type::i8p(ccx))
|
|
|
|
} else {
|
|
|
|
PointerCast(bcx, llvm::get_param(fcx.llfn, 0), Type::i8p(ccx))
|
|
|
|
}
|
2014-08-04 13:48:26 -05:00
|
|
|
}
|
|
|
|
|
2014-07-09 17:31:45 -05:00
|
|
|
// This requires that atomic intrinsics follow a specific naming pattern:
|
|
|
|
// "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
|
|
|
|
(_, name) if name.starts_with("atomic_") => {
|
|
|
|
let split: Vec<&str> = name.split('_').collect();
|
|
|
|
assert!(split.len() >= 2, "Atomic intrinsic not correct format");
|
|
|
|
|
|
|
|
let order = if split.len() == 2 {
|
2014-07-11 19:57:45 -05:00
|
|
|
llvm::SequentiallyConsistent
|
2014-07-09 17:31:45 -05:00
|
|
|
} else {
|
2014-10-15 01:05:01 -05:00
|
|
|
match split[2] {
|
2014-07-11 19:57:45 -05:00
|
|
|
"relaxed" => llvm::Monotonic,
|
|
|
|
"acq" => llvm::Acquire,
|
|
|
|
"rel" => llvm::Release,
|
|
|
|
"acqrel" => llvm::AcquireRelease,
|
2014-07-09 17:31:45 -05:00
|
|
|
_ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2014-10-15 01:05:01 -05:00
|
|
|
match split[1] {
|
2014-07-09 17:31:45 -05:00
|
|
|
"cxchg" => {
|
|
|
|
// See include/llvm/IR/Instructions.h for their implementation
|
|
|
|
// of this, I assume that it's good enough for us to use for
|
|
|
|
// now.
|
|
|
|
let strongest_failure_ordering = match order {
|
2014-07-11 19:57:45 -05:00
|
|
|
llvm::NotAtomic | llvm::Unordered =>
|
2014-07-09 17:31:45 -05:00
|
|
|
ccx.sess().fatal("cmpxchg must be atomic"),
|
|
|
|
|
2014-07-11 19:57:45 -05:00
|
|
|
llvm::Monotonic | llvm::Release =>
|
|
|
|
llvm::Monotonic,
|
2014-07-09 17:31:45 -05:00
|
|
|
|
2014-07-11 19:57:45 -05:00
|
|
|
llvm::Acquire | llvm::AcquireRelease =>
|
|
|
|
llvm::Acquire,
|
2014-07-09 17:31:45 -05:00
|
|
|
|
2014-07-11 19:57:45 -05:00
|
|
|
llvm::SequentiallyConsistent =>
|
|
|
|
llvm::SequentiallyConsistent
|
2014-07-09 17:31:45 -05:00
|
|
|
};
|
|
|
|
|
2014-10-15 01:05:01 -05:00
|
|
|
let res = AtomicCmpXchg(bcx, llargs[0], llargs[1],
|
|
|
|
llargs[2], order,
|
2014-07-09 17:31:45 -05:00
|
|
|
strongest_failure_ordering);
|
2014-07-11 19:57:45 -05:00
|
|
|
if unsafe { llvm::LLVMVersionMinor() >= 5 } {
|
2014-07-09 17:31:45 -05:00
|
|
|
ExtractValue(bcx, res, 0)
|
|
|
|
} else {
|
|
|
|
res
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
"load" => {
|
2014-10-15 01:05:01 -05:00
|
|
|
AtomicLoad(bcx, llargs[0], order)
|
2014-07-09 17:31:45 -05:00
|
|
|
}
|
|
|
|
"store" => {
|
2014-10-15 01:05:01 -05:00
|
|
|
AtomicStore(bcx, llargs[1], llargs[0], order);
|
2014-07-09 17:31:45 -05:00
|
|
|
C_nil(ccx)
|
|
|
|
}
|
|
|
|
|
|
|
|
"fence" => {
|
|
|
|
AtomicFence(bcx, order);
|
|
|
|
C_nil(ccx)
|
|
|
|
}
|
|
|
|
|
|
|
|
// These are all AtomicRMW ops
|
|
|
|
op => {
|
|
|
|
let atom_op = match op {
|
2014-09-19 14:15:39 -05:00
|
|
|
"xchg" => llvm::AtomicXchg,
|
|
|
|
"xadd" => llvm::AtomicAdd,
|
|
|
|
"xsub" => llvm::AtomicSub,
|
|
|
|
"and" => llvm::AtomicAnd,
|
|
|
|
"nand" => llvm::AtomicNand,
|
|
|
|
"or" => llvm::AtomicOr,
|
|
|
|
"xor" => llvm::AtomicXor,
|
|
|
|
"max" => llvm::AtomicMax,
|
|
|
|
"min" => llvm::AtomicMin,
|
|
|
|
"umax" => llvm::AtomicUMax,
|
|
|
|
"umin" => llvm::AtomicUMin,
|
2014-07-09 17:31:45 -05:00
|
|
|
_ => ccx.sess().fatal("unknown atomic operation")
|
|
|
|
};
|
|
|
|
|
2014-10-15 01:05:01 -05:00
|
|
|
AtomicRMW(bcx, atom_op, llargs[0], llargs[1], order)
|
2014-07-09 17:31:45 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
(_, _) => ccx.sess().span_bug(foreign_item.span, "unknown intrinsic")
|
|
|
|
};
|
|
|
|
|
|
|
|
if val_ty(llval) != Type::void(ccx) &&
|
|
|
|
machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
|
|
|
|
store_ty(bcx, llval, llresult, ret_ty);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we made a temporary stack slot, let's clean it up
|
|
|
|
match dest {
|
|
|
|
expr::Ignore => {
|
debuginfo: Make sure that all calls to drop glue are associated with debug locations.
This commit makes rustc emit debug locations for all call
and invoke statements in LLVM IR, if they are contained
within a function that debuginfo is enabled for. This is
important because LLVM does not handle the case where a
function body containing debuginfo is inlined into another
function with debuginfo, but the inlined call statement
does not have a debug location. In this case, LLVM will
not know where (in terms of source code coordinates) the
function was inlined to and we end up with some statements
still linked to the source locations in there original,
non-inlined function without any indication that they are
indeed an inline-copy. Later, when generating DWARF from
the IR, LLVM will interpret this as corrupt IR and abort.
Unfortunately, the undesirable case described above can
still occur when using LTO. If there is a crate compiled
without debuginfo calling into a crate compiled with
debuginfo, we again end up with the conditions triggering
the error. This is why some LTO tests still fail with the
dreaded assertion, if the standard library was built with
debuginfo enabled.
That is, `RUSTFLAGS_STAGE2=-g make rustc-stage2` will
succeed but `RUSTFLAGS_STAGE2=-g make check` will still
fail after this commit has been merged. This is a problem
that has to be dealt with separately.
Fixes #17201
Fixes #15816
Fixes #15156
2014-09-24 01:49:38 -05:00
|
|
|
bcx = glue::drop_ty(bcx, llresult, ret_ty, Some(call_info));
|
2014-07-09 17:31:45 -05:00
|
|
|
}
|
|
|
|
expr::SaveIn(_) => {}
|
|
|
|
}
|
|
|
|
|
|
|
|
Result::new(bcx, llresult)
|
|
|
|
}
|
|
|
|
|
2014-09-29 14:11:30 -05:00
|
|
|
fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
|
|
|
allow_overlap: bool, volatile: bool, tp_ty: Ty<'tcx>,
|
|
|
|
dst: ValueRef, src: ValueRef, count: ValueRef) -> ValueRef {
|
2014-07-09 17:31:45 -05:00
|
|
|
let ccx = bcx.ccx();
|
|
|
|
let lltp_ty = type_of::type_of(ccx, tp_ty);
|
2014-08-06 04:59:40 -05:00
|
|
|
let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
|
2014-07-09 17:31:45 -05:00
|
|
|
let size = machine::llsize_of(ccx, lltp_ty);
|
2014-09-05 11:18:53 -05:00
|
|
|
let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
|
2014-07-09 17:31:45 -05:00
|
|
|
let name = if allow_overlap {
|
|
|
|
if int_size == 32 {
|
|
|
|
"llvm.memmove.p0i8.p0i8.i32"
|
|
|
|
} else {
|
|
|
|
"llvm.memmove.p0i8.p0i8.i64"
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if int_size == 32 {
|
|
|
|
"llvm.memcpy.p0i8.p0i8.i32"
|
|
|
|
} else {
|
|
|
|
"llvm.memcpy.p0i8.p0i8.i64"
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
|
|
|
|
let src_ptr = PointerCast(bcx, src, Type::i8p(ccx));
|
|
|
|
let llfn = ccx.get_intrinsic(&name);
|
|
|
|
|
2014-11-17 02:39:01 -06:00
|
|
|
Call(bcx, llfn, &[dst_ptr, src_ptr, Mul(bcx, size, count), align,
|
|
|
|
C_bool(ccx, volatile)], None)
|
2014-07-09 17:31:45 -05:00
|
|
|
}
|
|
|
|
|
2014-09-29 14:11:30 -05:00
|
|
|
fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, volatile: bool, tp_ty: Ty<'tcx>,
|
|
|
|
dst: ValueRef, val: ValueRef, count: ValueRef) -> ValueRef {
|
2014-07-09 17:31:45 -05:00
|
|
|
let ccx = bcx.ccx();
|
|
|
|
let lltp_ty = type_of::type_of(ccx, tp_ty);
|
2014-08-06 04:59:40 -05:00
|
|
|
let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
|
2014-07-09 17:31:45 -05:00
|
|
|
let size = machine::llsize_of(ccx, lltp_ty);
|
2014-09-05 11:18:53 -05:00
|
|
|
let name = if machine::llbitsize_of_real(ccx, ccx.int_type()) == 32 {
|
2014-07-09 17:31:45 -05:00
|
|
|
"llvm.memset.p0i8.i32"
|
|
|
|
} else {
|
|
|
|
"llvm.memset.p0i8.i64"
|
|
|
|
};
|
|
|
|
|
|
|
|
let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
|
|
|
|
let llfn = ccx.get_intrinsic(&name);
|
|
|
|
|
2014-11-17 02:39:01 -06:00
|
|
|
Call(bcx, llfn, &[dst_ptr, val, Mul(bcx, size, count), align,
|
|
|
|
C_bool(ccx, volatile)], None)
|
2014-07-09 17:31:45 -05:00
|
|
|
}
|
|
|
|
|
2014-09-06 11:13:04 -05:00
|
|
|
fn count_zeros_intrinsic(bcx: Block, name: &'static str, val: ValueRef) -> ValueRef {
|
2014-07-09 17:31:45 -05:00
|
|
|
let y = C_bool(bcx.ccx(), false);
|
|
|
|
let llfn = bcx.ccx().get_intrinsic(&name);
|
2014-11-17 02:39:01 -06:00
|
|
|
Call(bcx, llfn, &[val, y], None)
|
2014-07-09 17:31:45 -05:00
|
|
|
}
|
|
|
|
|
2014-09-29 14:11:30 -05:00
|
|
|
fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, name: &'static str,
|
|
|
|
t: Ty<'tcx>, a: ValueRef, b: ValueRef) -> ValueRef {
|
2014-07-09 17:31:45 -05:00
|
|
|
let llfn = bcx.ccx().get_intrinsic(&name);
|
|
|
|
|
|
|
|
// Convert `i1` to a `bool`, and write it to the out parameter
|
2014-11-17 02:39:01 -06:00
|
|
|
let val = Call(bcx, llfn, &[a, b], None);
|
2014-07-09 17:31:45 -05:00
|
|
|
let result = ExtractValue(bcx, val, 0);
|
|
|
|
let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
|
|
|
|
let ret = C_undef(type_of::type_of(bcx.ccx(), t));
|
|
|
|
let ret = InsertValue(bcx, ret, result, 0);
|
|
|
|
let ret = InsertValue(bcx, ret, overflow, 1);
|
|
|
|
|
|
|
|
ret
|
|
|
|
}
|