rust/src/librustc_codegen_llvm/intrinsic.rs

2134 lines
83 KiB
Rust
Raw Normal View History

use crate::abi::{Abi, FnAbi, LlvmType, PassMode};
2019-12-22 16:42:04 -06:00
use crate::builder::Builder;
2019-02-17 12:58:58 -06:00
use crate::context::CodegenCx;
2019-12-22 16:42:04 -06:00
use crate::llvm;
use crate::llvm_util;
2019-02-17 12:58:58 -06:00
use crate::type_::Type;
use crate::type_of::LayoutLlvmExt;
use crate::va_arg::emit_va_arg;
2019-12-22 16:42:04 -06:00
use crate::value::Value;
use rustc::ty::layout::{self, FnAbiExt, HasTyCtxt, LayoutOf, Primitive};
use rustc::ty::{self, Ty};
2019-12-22 16:42:04 -06:00
use rustc::{bug, span_bug};
use rustc_ast::ast;
2019-12-22 16:42:04 -06:00
use rustc_codegen_ssa::base::{compare_simd_types, to_immediate, wants_msvc_seh};
use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
2019-12-22 16:42:04 -06:00
use rustc_codegen_ssa::glue;
use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::MemFlags;
use rustc_hir as hir;
use rustc_target::abi::HasDataLayout;
use rustc_codegen_ssa::common::span_invalid_monomorphization_error;
use rustc_codegen_ssa::traits::*;
use rustc_span::Span;
2015-09-18 17:42:57 -05:00
2015-08-14 17:46:51 -05:00
use std::cmp::Ordering;
2019-12-22 16:42:04 -06:00
use std::{i128, iter, u128};
2015-08-14 17:46:51 -05:00
fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Value> {
let llvm_name = match name {
"sqrtf32" => "llvm.sqrt.f32",
"sqrtf64" => "llvm.sqrt.f64",
"powif32" => "llvm.powi.f32",
"powif64" => "llvm.powi.f64",
"sinf32" => "llvm.sin.f32",
"sinf64" => "llvm.sin.f64",
"cosf32" => "llvm.cos.f32",
"cosf64" => "llvm.cos.f64",
"powf32" => "llvm.pow.f32",
"powf64" => "llvm.pow.f64",
"expf32" => "llvm.exp.f32",
"expf64" => "llvm.exp.f64",
"exp2f32" => "llvm.exp2.f32",
"exp2f64" => "llvm.exp2.f64",
"logf32" => "llvm.log.f32",
"logf64" => "llvm.log.f64",
"log10f32" => "llvm.log10.f32",
"log10f64" => "llvm.log10.f64",
"log2f32" => "llvm.log2.f32",
"log2f64" => "llvm.log2.f64",
"fmaf32" => "llvm.fma.f32",
"fmaf64" => "llvm.fma.f64",
"fabsf32" => "llvm.fabs.f32",
"fabsf64" => "llvm.fabs.f64",
"minnumf32" => "llvm.minnum.f32",
"minnumf64" => "llvm.minnum.f64",
"maxnumf32" => "llvm.maxnum.f32",
"maxnumf64" => "llvm.maxnum.f64",
"copysignf32" => "llvm.copysign.f32",
"copysignf64" => "llvm.copysign.f64",
"floorf32" => "llvm.floor.f32",
"floorf64" => "llvm.floor.f64",
"ceilf32" => "llvm.ceil.f32",
"ceilf64" => "llvm.ceil.f64",
"truncf32" => "llvm.trunc.f32",
"truncf64" => "llvm.trunc.f64",
"rintf32" => "llvm.rint.f32",
"rintf64" => "llvm.rint.f64",
"nearbyintf32" => "llvm.nearbyint.f32",
"nearbyintf64" => "llvm.nearbyint.f64",
"roundf32" => "llvm.round.f32",
"roundf64" => "llvm.round.f64",
"assume" => "llvm.assume",
"abort" => "llvm.trap",
2019-12-22 16:42:04 -06:00
_ => return None,
};
2018-01-04 23:04:08 -06:00
Some(cx.get_intrinsic(&llvm_name))
}
impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
2018-09-11 04:46:03 -05:00
fn codegen_intrinsic_call(
&mut self,
instance: ty::Instance<'tcx>,
fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
2018-09-11 04:46:03 -05:00
args: &[OperandRef<'tcx, &'ll Value>],
llresult: &'ll Value,
span: Span,
) {
let tcx = self.tcx;
let callee_ty = instance.monomorphic_ty(tcx);
2018-09-11 04:46:03 -05:00
2019-09-16 13:08:35 -05:00
let (def_id, substs) = match callee_ty.kind {
2018-09-11 04:46:03 -05:00
ty::FnDef(def_id, substs) => (def_id, substs),
2019-12-22 16:42:04 -06:00
_ => bug!("expected fn item type, found {}", callee_ty),
2018-09-11 04:46:03 -05:00
};
let sig = callee_ty.fn_sig(tcx);
let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
let arg_tys = sig.inputs();
let ret_ty = sig.output();
let name = &*tcx.item_name(def_id).as_str();
let llret_ty = self.layout_of(ret_ty).llvm_type(self);
let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
2018-09-11 04:46:03 -05:00
let simple = get_simple_intrinsic(self, name);
2018-09-11 04:46:03 -05:00
let llval = match name {
2019-12-22 16:42:04 -06:00
_ if simple.is_some() => self.call(
simple.unwrap(),
&args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
None,
),
2018-09-11 04:46:03 -05:00
"unreachable" => {
return;
2019-12-22 16:42:04 -06:00
}
2018-09-11 04:46:03 -05:00
"likely" => {
let expect = self.get_intrinsic(&("llvm.expect.i1"));
self.call(expect, &[args[0].immediate(), self.const_bool(true)], None)
2018-09-11 04:46:03 -05:00
}
"unlikely" => {
let expect = self.get_intrinsic(&("llvm.expect.i1"));
self.call(expect, &[args[0].immediate(), self.const_bool(false)], None)
2018-09-11 04:46:03 -05:00
}
"try" => {
2019-12-22 16:42:04 -06:00
try_intrinsic(
self,
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
llresult,
);
2018-09-11 04:46:03 -05:00
return;
}
"breakpoint" => {
let llfn = self.get_intrinsic(&("llvm.debugtrap"));
2018-09-11 04:46:03 -05:00
self.call(llfn, &[], None)
}
2019-12-22 16:42:04 -06:00
"va_start" => self.va_start(args[0].immediate()),
"va_end" => self.va_end(args[0].immediate()),
"va_copy" => {
let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy"));
self.call(intrinsic, &[args[0].immediate(), args[1].immediate()], None)
}
"va_arg" => {
match fn_abi.ret.layout.abi {
layout::Abi::Scalar(ref scalar) => {
match scalar.value {
Primitive::Int(..) => {
if self.cx().size_of(ret_ty).bytes() < 4 {
// `va_arg` should not be called on a integer type
// less than 4 bytes in length. If it is, promote
// the integer to a `i32` and truncate the result
// back to the smaller type.
2019-12-22 16:42:04 -06:00
let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
self.trunc(promoted_result, llret_ty)
} else {
emit_va_arg(self, args[0], ret_ty)
}
}
2019-12-22 16:42:04 -06:00
Primitive::F64 | Primitive::Pointer => {
emit_va_arg(self, args[0], ret_ty)
}
// `va_arg` should never be used with the return type f32.
2019-12-22 16:42:04 -06:00
Primitive::F32 => bug!("the va_arg intrinsic does not work with `f32`"),
}
}
2019-12-22 16:42:04 -06:00
_ => bug!("the va_arg intrinsic does not work with non-scalar types"),
}
}
2018-09-11 04:46:03 -05:00
"size_of_val" => {
let tp_ty = substs.type_at(0);
if let OperandValue::Pair(_, meta) = args[0].val {
let (llsize, _) = glue::size_and_align_of_dst(self, tp_ty, Some(meta));
2018-09-11 04:46:03 -05:00
llsize
} else {
self.const_usize(self.size_of(tp_ty).bytes())
2018-09-11 04:46:03 -05:00
}
}
"min_align_of_val" => {
let tp_ty = substs.type_at(0);
if let OperandValue::Pair(_, meta) = args[0].val {
let (_, llalign) = glue::size_and_align_of_dst(self, tp_ty, Some(meta));
2018-09-11 04:46:03 -05:00
llalign
} else {
self.const_usize(self.align_of(tp_ty).bytes())
2018-09-11 04:46:03 -05:00
}
}
2019-12-22 16:42:04 -06:00
"size_of" | "pref_align_of" | "min_align_of" | "needs_drop" | "type_id"
| "type_name" => {
let ty_name = self
.tcx
.const_eval_instance(ty::ParamEnv::reveal_all(), instance, None)
.unwrap();
OperandRef::from_const(self, ty_name, ret_ty).immediate_or_packed_pair(self)
}
// Effectively no-op
"forget" => {
2018-09-11 04:46:03 -05:00
return;
}
2018-09-11 04:46:03 -05:00
"offset" => {
let ptr = args[0].immediate();
let offset = args[1].immediate();
self.inbounds_gep(ptr, &[offset])
}
"arith_offset" => {
let ptr = args[0].immediate();
let offset = args[1].immediate();
self.gep(ptr, &[offset])
}
2018-09-11 04:46:03 -05:00
"copy_nonoverlapping" => {
2019-12-22 16:42:04 -06:00
copy_intrinsic(
self,
false,
false,
substs.type_at(0),
args[1].immediate(),
args[0].immediate(),
args[2].immediate(),
);
2018-09-11 04:46:03 -05:00
return;
}
"copy" => {
2019-12-22 16:42:04 -06:00
copy_intrinsic(
self,
true,
false,
substs.type_at(0),
args[1].immediate(),
args[0].immediate(),
args[2].immediate(),
);
2018-09-11 04:46:03 -05:00
return;
}
"write_bytes" => {
2019-12-22 16:42:04 -06:00
memset_intrinsic(
self,
false,
substs.type_at(0),
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
);
2018-09-11 04:46:03 -05:00
return;
}
2018-09-11 04:46:03 -05:00
"volatile_copy_nonoverlapping_memory" => {
2019-12-22 16:42:04 -06:00
copy_intrinsic(
self,
false,
true,
substs.type_at(0),
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
);
2018-09-11 04:46:03 -05:00
return;
}
2018-09-11 04:46:03 -05:00
"volatile_copy_memory" => {
2019-12-22 16:42:04 -06:00
copy_intrinsic(
self,
true,
true,
substs.type_at(0),
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
);
2018-09-11 04:46:03 -05:00
return;
}
"volatile_set_memory" => {
2019-12-22 16:42:04 -06:00
memset_intrinsic(
self,
true,
substs.type_at(0),
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
);
2018-09-11 04:46:03 -05:00
return;
}
"volatile_load" | "unaligned_volatile_load" => {
let tp_ty = substs.type_at(0);
let mut ptr = args[0].immediate();
if let PassMode::Cast(ty) = fn_abi.ret.mode {
ptr = self.pointercast(ptr, self.type_ptr_to(ty.llvm_type(self)));
}
2018-09-11 04:46:03 -05:00
let load = self.volatile_load(ptr);
let align = if name == "unaligned_volatile_load" {
1
} else {
self.align_of(tp_ty).bytes() as u32
2018-09-11 04:46:03 -05:00
};
unsafe {
llvm::LLVMSetAlignment(load, align);
}
to_immediate(self, load, self.layout_of(tp_ty))
2019-12-22 16:42:04 -06:00
}
2018-09-11 04:46:03 -05:00
"volatile_store" => {
let dst = args[0].deref(self.cx());
args[1].val.volatile_store(self, dst);
2018-09-11 04:46:03 -05:00
return;
2019-12-22 16:42:04 -06:00
}
2018-09-11 04:46:03 -05:00
"unaligned_volatile_store" => {
let dst = args[0].deref(self.cx());
args[1].val.unaligned_volatile_store(self, dst);
2018-09-11 04:46:03 -05:00
return;
2019-12-22 16:42:04 -06:00
}
"prefetch_read_data"
| "prefetch_write_data"
| "prefetch_read_instruction"
| "prefetch_write_instruction" => {
let expect = self.get_intrinsic(&("llvm.prefetch"));
2018-09-11 04:46:03 -05:00
let (rw, cache_type) = match name {
"prefetch_read_data" => (0, 1),
"prefetch_write_data" => (1, 1),
"prefetch_read_instruction" => (0, 0),
"prefetch_write_instruction" => (1, 0),
2019-12-22 16:42:04 -06:00
_ => bug!(),
2018-09-11 04:46:03 -05:00
};
2019-12-22 16:42:04 -06:00
self.call(
expect,
&[
args[0].immediate(),
self.const_i32(rw),
args[1].immediate(),
self.const_i32(cache_type),
],
None,
)
}
"ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap"
| "bitreverse" | "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow"
| "wrapping_add" | "wrapping_sub" | "wrapping_mul" | "unchecked_div"
| "unchecked_rem" | "unchecked_shl" | "unchecked_shr" | "unchecked_add"
| "unchecked_sub" | "unchecked_mul" | "exact_div" | "rotate_left" | "rotate_right"
| "saturating_add" | "saturating_sub" => {
2018-09-11 04:46:03 -05:00
let ty = arg_tys[0];
match int_type_width_signed(ty, self) {
2019-12-22 16:42:04 -06:00
Some((width, signed)) => match name {
"ctlz" | "cttz" => {
let y = self.const_bool(false);
let llfn = self.get_intrinsic(&format!("llvm.{}.i{}", name, width));
self.call(llfn, &[args[0].immediate(), y], None)
}
"ctlz_nonzero" | "cttz_nonzero" => {
let y = self.const_bool(true);
let llvm_name = &format!("llvm.{}.i{}", &name[..4], width);
let llfn = self.get_intrinsic(llvm_name);
self.call(llfn, &[args[0].immediate(), y], None)
}
"ctpop" => self.call(
self.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
&[args[0].immediate()],
None,
),
"bswap" => {
if width == 8 {
args[0].immediate() // byte swap a u8/i8 is just a no-op
} else {
self.call(
2019-12-22 16:42:04 -06:00
self.get_intrinsic(&format!("llvm.bswap.i{}", width)),
&[args[0].immediate()],
None,
)
2018-09-11 04:46:03 -05:00
}
2019-12-22 16:42:04 -06:00
}
"bitreverse" => self.call(
self.get_intrinsic(&format!("llvm.bitreverse.i{}", width)),
&[args[0].immediate()],
None,
),
"add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
let intrinsic = format!(
"llvm.{}{}.with.overflow.i{}",
if signed { 's' } else { 'u' },
&name[..3],
width
);
let llfn = self.get_intrinsic(&intrinsic);
// Convert `i1` to a `bool`, and write it to the out parameter
let pair =
self.call(llfn, &[args[0].immediate(), args[1].immediate()], None);
let val = self.extract_value(pair, 0);
let overflow = self.extract_value(pair, 1);
let overflow = self.zext(overflow, self.type_bool());
let dest = result.project_field(self, 0);
self.store(val, dest.llval, dest.align);
let dest = result.project_field(self, 1);
self.store(overflow, dest.llval, dest.align);
return;
}
"wrapping_add" => self.add(args[0].immediate(), args[1].immediate()),
"wrapping_sub" => self.sub(args[0].immediate(), args[1].immediate()),
"wrapping_mul" => self.mul(args[0].immediate(), args[1].immediate()),
"exact_div" => {
if signed {
self.exactsdiv(args[0].immediate(), args[1].immediate())
} else {
self.exactudiv(args[0].immediate(), args[1].immediate())
}
}
"unchecked_div" => {
if signed {
self.sdiv(args[0].immediate(), args[1].immediate())
} else {
self.udiv(args[0].immediate(), args[1].immediate())
}
}
"unchecked_rem" => {
if signed {
self.srem(args[0].immediate(), args[1].immediate())
} else {
self.urem(args[0].immediate(), args[1].immediate())
}
}
"unchecked_shl" => self.shl(args[0].immediate(), args[1].immediate()),
"unchecked_shr" => {
if signed {
self.ashr(args[0].immediate(), args[1].immediate())
} else {
self.lshr(args[0].immediate(), args[1].immediate())
}
}
"unchecked_add" => {
if signed {
self.unchecked_sadd(args[0].immediate(), args[1].immediate())
} else {
self.unchecked_uadd(args[0].immediate(), args[1].immediate())
}
}
"unchecked_sub" => {
if signed {
self.unchecked_ssub(args[0].immediate(), args[1].immediate())
} else {
self.unchecked_usub(args[0].immediate(), args[1].immediate())
}
}
"unchecked_mul" => {
if signed {
self.unchecked_smul(args[0].immediate(), args[1].immediate())
} else {
self.unchecked_umul(args[0].immediate(), args[1].immediate())
}
}
"rotate_left" | "rotate_right" => {
let is_left = name == "rotate_left";
let val = args[0].immediate();
let raw_shift = args[1].immediate();
// rotate = funnel shift with first two args the same
let llvm_name =
&format!("llvm.fsh{}.i{}", if is_left { 'l' } else { 'r' }, width);
let llfn = self.get_intrinsic(llvm_name);
self.call(llfn, &[val, val, raw_shift], None)
}
"saturating_add" | "saturating_sub" => {
let is_add = name == "saturating_add";
let lhs = args[0].immediate();
let rhs = args[1].immediate();
if llvm_util::get_major_version() >= 8 {
let llvm_name = &format!(
"llvm.{}{}.sat.i{}",
if signed { 's' } else { 'u' },
if is_add { "add" } else { "sub" },
width
);
let llfn = self.get_intrinsic(llvm_name);
self.call(llfn, &[lhs, rhs], None)
} else {
let llvm_name = &format!(
"llvm.{}{}.with.overflow.i{}",
if signed { 's' } else { 'u' },
if is_add { "add" } else { "sub" },
width
);
let llfn = self.get_intrinsic(llvm_name);
let pair = self.call(llfn, &[lhs, rhs], None);
2018-09-11 04:46:03 -05:00
let val = self.extract_value(pair, 0);
let overflow = self.extract_value(pair, 1);
2019-12-22 16:42:04 -06:00
let llty = self.type_ix(width);
let limit = if signed {
let limit_lo = self
.const_uint_big(llty, (i128::MIN >> (128 - width)) as u128);
let limit_hi = self
.const_uint_big(llty, (i128::MAX >> (128 - width)) as u128);
let neg = self.icmp(
IntPredicate::IntSLT,
val,
self.const_uint(llty, 0),
);
self.select(neg, limit_hi, limit_lo)
} else if is_add {
self.const_uint_big(llty, u128::MAX >> (128 - width))
2019-06-03 05:59:48 -05:00
} else {
2019-12-22 16:42:04 -06:00
self.const_uint(llty, 0)
};
self.select(overflow, limit, val)
}
}
_ => bug!(),
},
2018-09-11 04:46:03 -05:00
None => {
span_invalid_monomorphization_error(
2019-12-22 16:42:04 -06:00
tcx.sess,
span,
&format!(
"invalid monomorphization of `{}` intrinsic: \
expected basic integer type, found `{}`",
name, ty
),
);
2018-09-11 04:46:03 -05:00
return;
}
}
2019-12-22 16:42:04 -06:00
}
2018-09-11 04:46:03 -05:00
"fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
match float_type_width(arg_tys[0]) {
2019-12-22 16:42:04 -06:00
Some(_width) => match name {
"fadd_fast" => self.fadd_fast(args[0].immediate(), args[1].immediate()),
"fsub_fast" => self.fsub_fast(args[0].immediate(), args[1].immediate()),
"fmul_fast" => self.fmul_fast(args[0].immediate(), args[1].immediate()),
"fdiv_fast" => self.fdiv_fast(args[0].immediate(), args[1].immediate()),
"frem_fast" => self.frem_fast(args[0].immediate(), args[1].immediate()),
_ => bug!(),
},
2018-09-11 04:46:03 -05:00
None => {
span_invalid_monomorphization_error(
2019-12-22 16:42:04 -06:00
tcx.sess,
span,
&format!(
"invalid monomorphization of `{}` intrinsic: \
expected basic float type, found `{}`",
name, arg_tys[0]
),
);
return;
}
}
2019-12-22 16:42:04 -06:00
}
"float_to_int_approx_unchecked" => {
if float_type_width(arg_tys[0]).is_none() {
span_invalid_monomorphization_error(
2019-12-22 16:42:04 -06:00
tcx.sess,
span,
&format!(
"invalid monomorphization of `float_to_int_approx_unchecked` \
intrinsic: expected basic float type, \
2019-12-22 16:42:04 -06:00
found `{}`",
arg_tys[0]
),
);
return;
}
match int_type_width_signed(ret_ty, self.cx) {
Some((width, signed)) => {
if signed {
self.fptosi(args[0].immediate(), self.cx.type_ix(width))
} else {
self.fptoui(args[0].immediate(), self.cx.type_ix(width))
}
}
None => {
span_invalid_monomorphization_error(
2019-12-22 16:42:04 -06:00
tcx.sess,
span,
&format!(
"invalid monomorphization of `float_to_int_approx_unchecked` \
intrinsic: expected basic integer type, \
2019-12-22 16:42:04 -06:00
found `{}`",
ret_ty
),
);
return;
}
}
}
2019-12-22 16:42:04 -06:00
"discriminant_value" => args[0].deref(self.cx()).codegen_get_discr(self, ret_ty),
2018-09-11 04:46:03 -05:00
name if name.starts_with("simd_") => {
2019-12-22 16:42:04 -06:00
match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
2018-09-11 04:46:03 -05:00
Ok(llval) => llval,
2019-12-22 16:42:04 -06:00
Err(()) => return,
}
}
2018-09-11 04:46:03 -05:00
// This requires that atomic intrinsics follow a specific naming pattern:
// "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
name if name.starts_with("atomic_") => {
use rustc_codegen_ssa::common::AtomicOrdering::*;
2019-12-22 16:42:04 -06:00
use rustc_codegen_ssa::common::{AtomicRmwBinOp, SynchronizationScope};
2018-09-11 04:46:03 -05:00
let split: Vec<&str> = name.split('_').collect();
let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
let (order, failorder) = match split.len() {
2 => (SequentiallyConsistent, SequentiallyConsistent),
3 => match split[2] {
"unordered" => (Unordered, Unordered),
"relaxed" => (Monotonic, Monotonic),
2019-12-22 16:42:04 -06:00
"acq" => (Acquire, Acquire),
"rel" => (Release, Monotonic),
"acqrel" => (AcquireRelease, Acquire),
"failrelaxed" if is_cxchg => (SequentiallyConsistent, Monotonic),
"failacq" if is_cxchg => (SequentiallyConsistent, Acquire),
_ => self.sess().fatal("unknown ordering in atomic intrinsic"),
2018-09-11 04:46:03 -05:00
},
4 => match (split[2], split[3]) {
2019-12-22 16:42:04 -06:00
("acq", "failrelaxed") if is_cxchg => (Acquire, Monotonic),
("acqrel", "failrelaxed") if is_cxchg => (AcquireRelease, Monotonic),
_ => self.sess().fatal("unknown ordering in atomic intrinsic"),
2018-09-11 04:46:03 -05:00
},
_ => self.sess().fatal("Atomic intrinsic not in correct format"),
2018-09-11 04:46:03 -05:00
};
2018-10-08 09:58:26 -05:00
2018-09-11 04:46:03 -05:00
let invalid_monomorphization = |ty| {
2019-12-22 16:42:04 -06:00
span_invalid_monomorphization_error(
tcx.sess,
span,
&format!(
"invalid monomorphization of `{}` intrinsic: \
expected basic integer type, found `{}`",
name, ty
),
);
2018-09-11 04:46:03 -05:00
};
match split[1] {
"cxchg" | "cxchgweak" => {
let ty = substs.type_at(0);
if int_type_width_signed(ty, self).is_some() {
2018-09-11 04:46:03 -05:00
let weak = split[1] == "cxchgweak";
let pair = self.atomic_cmpxchg(
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
order,
failorder,
2019-12-22 16:42:04 -06:00
weak,
);
2018-09-11 04:46:03 -05:00
let val = self.extract_value(pair, 0);
let success = self.extract_value(pair, 1);
let success = self.zext(success, self.type_bool());
2018-09-11 04:46:03 -05:00
2018-09-14 10:48:57 -05:00
let dest = result.project_field(self, 0);
2018-09-11 04:46:03 -05:00
self.store(val, dest.llval, dest.align);
2018-09-14 10:48:57 -05:00
let dest = result.project_field(self, 1);
2018-09-11 04:46:03 -05:00
self.store(success, dest.llval, dest.align);
return;
} else {
return invalid_monomorphization(ty);
}
}
2018-09-11 04:46:03 -05:00
"load" => {
let ty = substs.type_at(0);
if int_type_width_signed(ty, self).is_some() {
let size = self.size_of(ty);
2018-09-11 04:46:03 -05:00
self.atomic_load(args[0].immediate(), order, size)
} else {
return invalid_monomorphization(ty);
}
}
2018-09-11 04:46:03 -05:00
"store" => {
let ty = substs.type_at(0);
if int_type_width_signed(ty, self).is_some() {
let size = self.size_of(ty);
2018-09-11 04:46:03 -05:00
self.atomic_store(
args[1].immediate(),
args[0].immediate(),
order,
2019-12-22 16:42:04 -06:00
size,
2018-09-11 04:46:03 -05:00
);
return;
} else {
return invalid_monomorphization(ty);
}
}
2018-09-11 04:46:03 -05:00
"fence" => {
self.atomic_fence(order, SynchronizationScope::CrossThread);
return;
}
2018-09-11 04:46:03 -05:00
"singlethreadfence" => {
self.atomic_fence(order, SynchronizationScope::SingleThread);
return;
}
2018-09-11 04:46:03 -05:00
// These are all AtomicRMW ops
op => {
let atom_op = match op {
2019-12-22 16:42:04 -06:00
"xchg" => AtomicRmwBinOp::AtomicXchg,
"xadd" => AtomicRmwBinOp::AtomicAdd,
"xsub" => AtomicRmwBinOp::AtomicSub,
"and" => AtomicRmwBinOp::AtomicAnd,
"nand" => AtomicRmwBinOp::AtomicNand,
"or" => AtomicRmwBinOp::AtomicOr,
"xor" => AtomicRmwBinOp::AtomicXor,
"max" => AtomicRmwBinOp::AtomicMax,
"min" => AtomicRmwBinOp::AtomicMin,
"umax" => AtomicRmwBinOp::AtomicUMax,
"umin" => AtomicRmwBinOp::AtomicUMin,
_ => self.sess().fatal("unknown atomic operation"),
2018-09-11 04:46:03 -05:00
};
let ty = substs.type_at(0);
if int_type_width_signed(ty, self).is_some() {
2018-09-11 04:46:03 -05:00
self.atomic_rmw(
atom_op,
args[0].immediate(),
args[1].immediate(),
2019-12-22 16:42:04 -06:00
order,
2018-09-11 04:46:03 -05:00
)
} else {
return invalid_monomorphization(ty);
}
}
}
}
2018-09-11 04:46:03 -05:00
"nontemporal_store" => {
let dst = args[0].deref(self.cx());
args[1].val.nontemporal_store(self, dst);
2018-09-11 04:46:03 -05:00
return;
}
"ptr_offset_from" => {
let ty = substs.type_at(0);
let pointee_size = self.size_of(ty);
// This is the same sequence that Clang emits for pointer subtraction.
// It can be neither `nsw` nor `nuw` because the input is treated as
// unsigned but then the output is treated as signed, so neither works.
let a = args[0].immediate();
let b = args[1].immediate();
let a = self.ptrtoint(a, self.type_isize());
let b = self.ptrtoint(b, self.type_isize());
let d = self.sub(a, b);
let pointee_size = self.const_usize(pointee_size.bytes());
// this is where the signed magic happens (notice the `s` in `exactsdiv`)
self.exactsdiv(d, pointee_size)
}
_ => bug!("unknown intrinsic '{}'", name),
2018-09-11 04:46:03 -05:00
};
if !fn_abi.ret.is_ignore() {
if let PassMode::Cast(ty) = fn_abi.ret.mode {
let ptr_llty = self.type_ptr_to(ty.llvm_type(self));
let ptr = self.pointercast(result.llval, ptr_llty);
2018-09-11 04:46:03 -05:00
self.store(llval, ptr, result.align);
} else {
OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
2019-12-22 16:42:04 -06:00
.val
.store(self, result);
2018-09-11 04:46:03 -05:00
}
}
}
fn abort(&mut self) {
let fnname = self.get_intrinsic(&("llvm.trap"));
self.call(fnname, &[], None);
}
fn assume(&mut self, val: Self::Value) {
let assume_intrinsic = self.get_intrinsic("llvm.assume");
self.call(assume_intrinsic, &[val], None);
}
fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value {
let expect = self.get_intrinsic(&"llvm.expect.i1");
self.call(expect, &[cond, self.const_bool(expected)], None)
}
fn sideeffect(&mut self) {
if self.tcx.sess.opts.debugging_opts.insert_sideeffect {
let fnname = self.get_intrinsic(&("llvm.sideeffect"));
self.call(fnname, &[], None);
}
}
fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value {
let intrinsic = self.cx().get_intrinsic("llvm.va_start");
self.call(intrinsic, &[va_list], None)
}
fn va_end(&mut self, va_list: &'ll Value) -> &'ll Value {
let intrinsic = self.cx().get_intrinsic("llvm.va_end");
self.call(intrinsic, &[va_list], None)
}
}
fn copy_intrinsic(
bx: &mut Builder<'a, 'll, 'tcx>,
allow_overlap: bool,
volatile: bool,
ty: Ty<'tcx>,
dst: &'ll Value,
src: &'ll Value,
count: &'ll Value,
) {
let (size, align) = bx.size_and_align_of(ty);
let size = bx.mul(bx.const_usize(size.bytes()), count);
2019-12-22 16:42:04 -06:00
let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
if allow_overlap {
bx.memmove(dst, align, src, align, size, flags);
} else {
bx.memcpy(dst, align, src, align, size, flags);
}
}
fn memset_intrinsic(
bx: &mut Builder<'a, 'll, 'tcx>,
volatile: bool,
ty: Ty<'tcx>,
dst: &'ll Value,
val: &'ll Value,
2019-12-22 16:42:04 -06:00
count: &'ll Value,
) {
let (size, align) = bx.size_and_align_of(ty);
let size = bx.mul(bx.const_usize(size.bytes()), count);
2019-12-22 16:42:04 -06:00
let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
bx.memset(dst, val, size, align, flags);
}
fn try_intrinsic(
bx: &mut Builder<'a, 'll, 'tcx>,
try_func: &'ll Value,
data: &'ll Value,
catch_func: &'ll Value,
dest: &'ll Value,
2016-12-10 21:32:44 -06:00
) {
if bx.sess().no_landing_pads() {
bx.call(try_func, &[data], None);
// Return 0 unconditionally from the intrinsic call;
// we can never unwind.
let ret_align = bx.tcx().data_layout.i32_align.abi;
bx.store(bx.const_i32(0), dest, ret_align);
} else if wants_msvc_seh(bx.sess()) {
codegen_msvc_try(bx, try_func, data, catch_func, dest);
} else {
codegen_gnu_try(bx, try_func, data, catch_func, dest);
}
}
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
// MSVC's definition of the `rust_try` function.
//
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
// This implementation uses the new exception handling instructions in LLVM
// which have support in LLVM for SEH on MSVC targets. Although these
// instructions are meant to work for all targets, as of the time of this
// writing, however, LLVM does not recommend the usage of these new instructions
// as the old ones are still more optimized.
fn codegen_msvc_try(
bx: &mut Builder<'a, 'll, 'tcx>,
try_func: &'ll Value,
data: &'ll Value,
catch_func: &'ll Value,
dest: &'ll Value,
) {
let llfn = get_rust_try_fn(bx, &mut |mut bx| {
bx.set_personality_fn(bx.eh_personality());
bx.sideeffect();
let mut normal = bx.build_sibling_block("normal");
let mut catchswitch = bx.build_sibling_block("catchswitch");
let mut catchpad = bx.build_sibling_block("catchpad");
let mut caught = bx.build_sibling_block("caught");
let try_func = llvm::get_param(bx.llfn(), 0);
2018-01-04 23:12:32 -06:00
let data = llvm::get_param(bx.llfn(), 1);
let catch_func = llvm::get_param(bx.llfn(), 2);
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
// We're generating an IR snippet that looks like:
//
// declare i32 @rust_try(%try_func, %data, %catch_func) {
// %slot = alloca u8*
// invoke %try_func(%data) to label %normal unwind label %catchswitch
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
//
// normal:
// ret i32 0
//
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
// catchswitch:
// %cs = catchswitch within none [%catchpad] unwind to caller
//
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
// catchpad:
rustc: Use C++ personalities on MSVC Currently the compiler has two relatively critical bugs in the implementation of MSVC unwinding: * #33112 - faults like segfaults and illegal instructions will run destructors in Rust, meaning we keep running code after a super-fatal exception has happened. * #33116 - When compiling with LTO plus `-Z no-landing-pads` (or `-C panic=abort` with the previous commit) LLVM won't remove all `invoke` instructions, meaning that some landing pads stick around and cleanups may be run due to the previous bug. These both stem from the flavor of "personality function" that Rust uses for unwinding on MSVC. On 32-bit this is `_except_handler3` and on 64-bit this is `__C_specific_handler`, but they both essentially are the "most generic" personality functions for catching exceptions and running cleanups. That is, thse two personalities will run cleanups for all exceptions unconditionally, so when we use them we run cleanups for **all SEH exceptions** (include things like segfaults). Note that this also explains why LLVM won't optimize away `invoke` instructions. These functions can legitimately still unwind (the `nounwind` attribute only seems to apply to "C++ exception-like unwining"). Also note that the standard library only *catches* Rust exceptions, not others like segfaults and illegal instructions. LLVM has support for another personality, `__CxxFrameHandler3`, which does not run cleanups for general exceptions, only C++ exceptions thrown by `_CxxThrowException`. This essentially ideally matches our use case, so this commit moves us over to using this well-known personality function as well as exception-throwing function. This doesn't *seem* to pull in any extra runtime dependencies just yet, but if it does we can perhaps try to work out how to implement more of it in Rust rather than relying on MSVCRT runtime bits. More details about how this is actually implemented can be found in the changes itself, but this... Closes #33112 Closes #33116
2016-04-26 16:30:01 -05:00
// %tok = catchpad within %cs [%type_descriptor, 0, %slot]
// %ptr = load %slot
// call %catch_func(%data, %ptr)
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
// catchret from %tok to label %caught
//
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
// caught:
// ret i32 1
// }
//
rustc: Use C++ personalities on MSVC Currently the compiler has two relatively critical bugs in the implementation of MSVC unwinding: * #33112 - faults like segfaults and illegal instructions will run destructors in Rust, meaning we keep running code after a super-fatal exception has happened. * #33116 - When compiling with LTO plus `-Z no-landing-pads` (or `-C panic=abort` with the previous commit) LLVM won't remove all `invoke` instructions, meaning that some landing pads stick around and cleanups may be run due to the previous bug. These both stem from the flavor of "personality function" that Rust uses for unwinding on MSVC. On 32-bit this is `_except_handler3` and on 64-bit this is `__C_specific_handler`, but they both essentially are the "most generic" personality functions for catching exceptions and running cleanups. That is, thse two personalities will run cleanups for all exceptions unconditionally, so when we use them we run cleanups for **all SEH exceptions** (include things like segfaults). Note that this also explains why LLVM won't optimize away `invoke` instructions. These functions can legitimately still unwind (the `nounwind` attribute only seems to apply to "C++ exception-like unwining"). Also note that the standard library only *catches* Rust exceptions, not others like segfaults and illegal instructions. LLVM has support for another personality, `__CxxFrameHandler3`, which does not run cleanups for general exceptions, only C++ exceptions thrown by `_CxxThrowException`. This essentially ideally matches our use case, so this commit moves us over to using this well-known personality function as well as exception-throwing function. This doesn't *seem* to pull in any extra runtime dependencies just yet, but if it does we can perhaps try to work out how to implement more of it in Rust rather than relying on MSVCRT runtime bits. More details about how this is actually implemented can be found in the changes itself, but this... Closes #33112 Closes #33116
2016-04-26 16:30:01 -05:00
// This structure follows the basic usage of throw/try/catch in LLVM.
// For example, compile this C++ snippet to see what LLVM generates:
//
// #include <stdint.h>
//
// struct rust_panic {
// rust_panic(const rust_panic&);
// ~rust_panic();
//
// uint64_t x[2];
// };
//
// int __rust_try(
// void (*try_func)(void*),
// void *data,
// void (*catch_func)(void*, void*) noexcept
// ) {
rustc: Use C++ personalities on MSVC Currently the compiler has two relatively critical bugs in the implementation of MSVC unwinding: * #33112 - faults like segfaults and illegal instructions will run destructors in Rust, meaning we keep running code after a super-fatal exception has happened. * #33116 - When compiling with LTO plus `-Z no-landing-pads` (or `-C panic=abort` with the previous commit) LLVM won't remove all `invoke` instructions, meaning that some landing pads stick around and cleanups may be run due to the previous bug. These both stem from the flavor of "personality function" that Rust uses for unwinding on MSVC. On 32-bit this is `_except_handler3` and on 64-bit this is `__C_specific_handler`, but they both essentially are the "most generic" personality functions for catching exceptions and running cleanups. That is, thse two personalities will run cleanups for all exceptions unconditionally, so when we use them we run cleanups for **all SEH exceptions** (include things like segfaults). Note that this also explains why LLVM won't optimize away `invoke` instructions. These functions can legitimately still unwind (the `nounwind` attribute only seems to apply to "C++ exception-like unwining"). Also note that the standard library only *catches* Rust exceptions, not others like segfaults and illegal instructions. LLVM has support for another personality, `__CxxFrameHandler3`, which does not run cleanups for general exceptions, only C++ exceptions thrown by `_CxxThrowException`. This essentially ideally matches our use case, so this commit moves us over to using this well-known personality function as well as exception-throwing function. This doesn't *seem* to pull in any extra runtime dependencies just yet, but if it does we can perhaps try to work out how to implement more of it in Rust rather than relying on MSVCRT runtime bits. More details about how this is actually implemented can be found in the changes itself, but this... Closes #33112 Closes #33116
2016-04-26 16:30:01 -05:00
// try {
// try_func(data);
rustc: Use C++ personalities on MSVC Currently the compiler has two relatively critical bugs in the implementation of MSVC unwinding: * #33112 - faults like segfaults and illegal instructions will run destructors in Rust, meaning we keep running code after a super-fatal exception has happened. * #33116 - When compiling with LTO plus `-Z no-landing-pads` (or `-C panic=abort` with the previous commit) LLVM won't remove all `invoke` instructions, meaning that some landing pads stick around and cleanups may be run due to the previous bug. These both stem from the flavor of "personality function" that Rust uses for unwinding on MSVC. On 32-bit this is `_except_handler3` and on 64-bit this is `__C_specific_handler`, but they both essentially are the "most generic" personality functions for catching exceptions and running cleanups. That is, thse two personalities will run cleanups for all exceptions unconditionally, so when we use them we run cleanups for **all SEH exceptions** (include things like segfaults). Note that this also explains why LLVM won't optimize away `invoke` instructions. These functions can legitimately still unwind (the `nounwind` attribute only seems to apply to "C++ exception-like unwining"). Also note that the standard library only *catches* Rust exceptions, not others like segfaults and illegal instructions. LLVM has support for another personality, `__CxxFrameHandler3`, which does not run cleanups for general exceptions, only C++ exceptions thrown by `_CxxThrowException`. This essentially ideally matches our use case, so this commit moves us over to using this well-known personality function as well as exception-throwing function. This doesn't *seem* to pull in any extra runtime dependencies just yet, but if it does we can perhaps try to work out how to implement more of it in Rust rather than relying on MSVCRT runtime bits. More details about how this is actually implemented can be found in the changes itself, but this... Closes #33112 Closes #33116
2016-04-26 16:30:01 -05:00
// return 0;
// } catch(rust_panic& a) {
// catch_func(data, &a);
rustc: Use C++ personalities on MSVC Currently the compiler has two relatively critical bugs in the implementation of MSVC unwinding: * #33112 - faults like segfaults and illegal instructions will run destructors in Rust, meaning we keep running code after a super-fatal exception has happened. * #33116 - When compiling with LTO plus `-Z no-landing-pads` (or `-C panic=abort` with the previous commit) LLVM won't remove all `invoke` instructions, meaning that some landing pads stick around and cleanups may be run due to the previous bug. These both stem from the flavor of "personality function" that Rust uses for unwinding on MSVC. On 32-bit this is `_except_handler3` and on 64-bit this is `__C_specific_handler`, but they both essentially are the "most generic" personality functions for catching exceptions and running cleanups. That is, thse two personalities will run cleanups for all exceptions unconditionally, so when we use them we run cleanups for **all SEH exceptions** (include things like segfaults). Note that this also explains why LLVM won't optimize away `invoke` instructions. These functions can legitimately still unwind (the `nounwind` attribute only seems to apply to "C++ exception-like unwining"). Also note that the standard library only *catches* Rust exceptions, not others like segfaults and illegal instructions. LLVM has support for another personality, `__CxxFrameHandler3`, which does not run cleanups for general exceptions, only C++ exceptions thrown by `_CxxThrowException`. This essentially ideally matches our use case, so this commit moves us over to using this well-known personality function as well as exception-throwing function. This doesn't *seem* to pull in any extra runtime dependencies just yet, but if it does we can perhaps try to work out how to implement more of it in Rust rather than relying on MSVCRT runtime bits. More details about how this is actually implemented can be found in the changes itself, but this... Closes #33112 Closes #33116
2016-04-26 16:30:01 -05:00
// return 1;
// }
// }
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
//
// More information can be found in libstd's seh.rs implementation.
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let slot = bx.alloca(bx.type_i8p(), ptr_align);
bx.invoke(try_func, &[data], normal.llbb(), catchswitch.llbb(), None);
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
normal.ret(bx.const_i32(0));
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
let cs = catchswitch.catch_switch(None, None, 1);
catchswitch.add_handler(cs, catchpad.llbb());
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
2020-01-12 18:55:36 -06:00
// We can't use the TypeDescriptor defined in libpanic_unwind because it
// might be in another DLL and the SEH encoding only supports specifying
// a TypeDescriptor from the current module.
//
// However this isn't an issue since the MSVC runtime uses string
// comparison on the type name to match TypeDescriptors rather than
// pointer equality.
//
// So instead we generate a new TypeDescriptor in each module that uses
// `try` and let the linker merge duplicate definitions in the same
// module.
//
// When modifying, make sure that the type_name string exactly matches
// the one used in src/libpanic_unwind/seh.rs.
let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p());
let type_name = bx.const_bytes(b"rust_panic\0");
let type_info =
bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false);
let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info));
unsafe {
llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
llvm::SetUniqueComdat(bx.llmod, tydesc);
llvm::LLVMSetInitializer(tydesc, type_info);
}
2020-01-01 15:24:39 -06:00
// The flag value of 8 indicates that we are catching the exception by
2020-01-07 04:36:57 -06:00
// reference instead of by value. We can't use catch by value because
// that requires copying the exception object, which we don't support
// since our exception object effectively contains a Box.
2020-01-01 15:24:39 -06:00
//
// Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
let flags = bx.const_i32(8);
let funclet = catchpad.catch_pad(cs, &[tydesc, flags, slot]);
let ptr = catchpad.load(slot, ptr_align);
catchpad.call(catch_func, &[data, ptr], Some(&funclet));
catchpad.catch_ret(&funclet, caught.llbb());
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
caught.ret(bx.const_i32(1));
});
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
let ret = bx.call(llfn, &[try_func, data, catch_func], None);
let i32_align = bx.tcx().data_layout.i32_align.abi;
2018-01-04 23:12:32 -06:00
bx.store(ret, dest, i32_align);
}
// Definition of the standard `try` function for Rust using the GNU-like model
// of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
// instructions).
//
2018-05-08 08:10:16 -05:00
// This codegen is a little surprising because we always call a shim
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
// function instead of inlining the call to `invoke` manually here. This is done
// because in LLVM we're only allowed to have one personality per function
// definition. The call to the `try` intrinsic is being inlined into the
// function calling it, and that function may already have other personality
// functions in play. By calling a shim we're guaranteed that our shim will have
// the right personality function.
fn codegen_gnu_try(
bx: &mut Builder<'a, 'll, 'tcx>,
try_func: &'ll Value,
data: &'ll Value,
catch_func: &'ll Value,
dest: &'ll Value,
) {
let llfn = get_rust_try_fn(bx, &mut |mut bx| {
2018-05-08 08:10:16 -05:00
// Codegens the shims described above:
//
2018-01-04 23:12:32 -06:00
// bx:
2020-03-07 10:31:30 -06:00
// invoke %try_func(%data) normal %normal unwind %catch
//
// normal:
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
// ret 0
//
// catch:
// (%ptr, _) = landingpad
// call %catch_func(%data, %ptr)
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
// ret 1
bx.sideeffect();
let mut then = bx.build_sibling_block("then");
let mut catch = bx.build_sibling_block("catch");
let try_func = llvm::get_param(bx.llfn(), 0);
2018-01-04 23:12:32 -06:00
let data = llvm::get_param(bx.llfn(), 1);
let catch_func = llvm::get_param(bx.llfn(), 2);
bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None);
then.ret(bx.const_i32(0));
// Type indicator for the exception being thrown.
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
//
// The first value in this tuple is a pointer to the exception object
// being thrown. The second value is a "selector" indicating which of
// the landing pad clauses the exception's type had been matched to.
// rust_try ignores the selector.
let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 1);
let tydesc = match bx.tcx().lang_items().eh_catch_typeinfo() {
Some(tydesc) => {
let tydesc = bx.get_static(tydesc);
bx.bitcast(tydesc, bx.type_i8p())
}
None => bx.const_null(bx.type_i8p()),
};
catch.add_clause(vals, tydesc);
let ptr = catch.extract_value(vals, 0);
catch.call(catch_func, &[data, ptr], None);
catch.ret(bx.const_i32(1));
});
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
let ret = bx.call(llfn, &[try_func, data, catch_func], None);
let i32_align = bx.tcx().data_layout.i32_align.abi;
2018-01-04 23:12:32 -06:00
bx.store(ret, dest, i32_align);
}
2018-05-08 08:10:16 -05:00
// Helper function to give a Block to a closure to codegen a shim function.
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
// This is currently primarily used for the `try` intrinsic functions above.
fn gen_fn<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
name: &str,
inputs: Vec<Ty<'tcx>>,
output: Ty<'tcx>,
codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
) -> &'ll Value {
let rust_fn_sig = ty::Binder::bind(cx.tcx.mk_fn_sig(
inputs.into_iter(),
output,
false,
hir::Unsafety::Unsafe,
2019-12-22 16:42:04 -06:00
Abi::Rust,
));
let fn_abi = FnAbi::of_fn_ptr(cx, rust_fn_sig, &[]);
let llfn = cx.declare_fn(name, &fn_abi);
2020-01-14 11:42:47 -06:00
cx.set_frame_pointer_elimination(llfn);
cx.apply_target_cpu_attr(llfn);
// FIXME(eddyb) find a nicer way to do this.
unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) };
2018-01-04 23:12:32 -06:00
let bx = Builder::new_block(cx, llfn, "entry-block");
2018-05-08 08:10:16 -05:00
codegen(bx);
llfn
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
}
// Helper function used to get a handle to the `__rust_try` function used to
// catch exceptions.
//
// This function is only generated once and is then cached.
fn get_rust_try_fn<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
) -> &'ll Value {
2018-01-04 23:04:08 -06:00
if let Some(llfn) = cx.rust_try_fn.get() {
return llfn;
}
// Define the type up front for the signature of the rust_try function.
2018-01-04 23:04:08 -06:00
let tcx = cx.tcx;
let i8p = tcx.mk_mut_ptr(tcx.types.i8);
let try_fn_ty = tcx.mk_fn_ptr(ty::Binder::bind(tcx.mk_fn_sig(
iter::once(i8p),
2018-09-09 21:07:13 -05:00
tcx.mk_unit(),
false,
hir::Unsafety::Unsafe,
2019-12-22 16:42:04 -06:00
Abi::Rust,
)));
let catch_fn_ty = tcx.mk_fn_ptr(ty::Binder::bind(tcx.mk_fn_sig(
[i8p, i8p].iter().cloned(),
tcx.mk_unit(),
false,
hir::Unsafety::Unsafe,
Abi::Rust,
)));
let output = tcx.types.i32;
let rust_try = gen_fn(cx, "__rust_try", vec![try_fn_ty, i8p, catch_fn_ty], output, codegen);
2018-01-04 23:04:08 -06:00
cx.rust_try_fn.set(Some(rust_try));
2018-10-08 10:00:30 -05:00
rust_try
}
fn generic_simd_intrinsic(
bx: &mut Builder<'a, 'll, 'tcx>,
2016-12-10 21:32:44 -06:00
name: &str,
callee_ty: Ty<'tcx>,
args: &[OperandRef<'tcx, &'ll Value>],
2016-12-10 21:32:44 -06:00
ret_ty: Ty<'tcx>,
llret_ty: &'ll Type,
2019-12-22 16:42:04 -06:00
span: Span,
) -> Result<&'ll Value, ()> {
// macros for error handling:
macro_rules! emit_error {
($msg: tt) => {
emit_error!($msg, )
};
($msg: tt, $($fmt: tt)*) => {
2015-09-18 17:42:57 -05:00
span_invalid_monomorphization_error(
bx.sess(), span,
&format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
2015-09-18 17:42:57 -05:00
name, $($fmt)*));
}
}
macro_rules! return_error {
($($fmt: tt)*) => {
{
emit_error!($($fmt)*);
return Err(());
2015-08-13 18:00:44 -05:00
}
}
}
macro_rules! require {
($cond: expr, $($fmt: tt)*) => {
if !$cond {
return_error!($($fmt)*);
}
};
}
macro_rules! require_simd {
($ty: expr, $position: expr) => {
require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
2019-12-22 16:42:04 -06:00
};
}
2018-01-04 23:12:32 -06:00
let tcx = bx.tcx();
2019-12-22 16:42:04 -06:00
let sig = tcx
.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &callee_ty.fn_sig(tcx));
let arg_tys = sig.inputs();
if name == "simd_select_bitmask" {
let in_ty = arg_tys[0];
2019-09-16 13:08:35 -05:00
let m_len = match in_ty.kind {
// Note that this `.unwrap()` crashes for isize/usize, that's sort
// of intentional as there's not currently a use case for that.
2019-11-16 06:31:09 -06:00
ty::Int(i) => i.bit_width().unwrap() as u64,
ty::Uint(i) => i.bit_width().unwrap() as u64,
_ => return_error!("`{}` is not an integral type", in_ty),
};
require_simd!(arg_tys[1], "argument");
let v_len = arg_tys[1].simd_size(tcx);
2019-12-22 16:42:04 -06:00
require!(
m_len == v_len,
"mismatched lengths: mask length `{}` != other vector length `{}`",
m_len,
v_len
);
let i1 = bx.type_i1();
2019-11-16 06:31:09 -06:00
let i1xn = bx.type_vector(i1, m_len);
let m_i1s = bx.bitcast(args[0].immediate(), i1xn);
return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
}
// every intrinsic below takes a SIMD vector as its first argument
require_simd!(arg_tys[0], "input");
let in_ty = arg_tys[0];
let in_elem = arg_tys[0].simd_type(tcx);
let in_len = arg_tys[0].simd_size(tcx);
let comparison = match name {
2018-07-11 05:44:53 -05:00
"simd_eq" => Some(hir::BinOpKind::Eq),
"simd_ne" => Some(hir::BinOpKind::Ne),
"simd_lt" => Some(hir::BinOpKind::Lt),
"simd_le" => Some(hir::BinOpKind::Le),
"simd_gt" => Some(hir::BinOpKind::Gt),
"simd_ge" => Some(hir::BinOpKind::Ge),
2019-12-22 16:42:04 -06:00
_ => None,
};
if let Some(cmp_op) = comparison {
require_simd!(ret_ty, "return");
let out_len = ret_ty.simd_size(tcx);
2019-12-22 16:42:04 -06:00
require!(
in_len == out_len,
"expected return type with length {} (same as input type `{}`), \
found `{}` with length {}",
2019-12-22 16:42:04 -06:00
in_len,
in_ty,
ret_ty,
out_len
);
require!(
bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
"expected return type with integer elements, found `{}` with non-integer `{}`",
ret_ty,
ret_ty.simd_type(tcx)
);
return Ok(compare_simd_types(
bx,
args[0].immediate(),
args[1].immediate(),
in_elem,
llret_ty,
cmp_op,
));
}
if name.starts_with("simd_shuffle") {
2019-12-22 16:42:04 -06:00
let n: u64 = name["simd_shuffle".len()..].parse().unwrap_or_else(|_| {
span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?")
});
require_simd!(ret_ty, "return");
let out_len = ret_ty.simd_size(tcx);
2019-12-22 16:42:04 -06:00
require!(
out_len == n,
"expected return type of length {}, found `{}` with length {}",
n,
ret_ty,
out_len
);
require!(
in_elem == ret_ty.simd_type(tcx),
"expected return element type `{}` (element of input `{}`), \
found `{}` with element type `{}`",
2019-12-22 16:42:04 -06:00
in_elem,
in_ty,
ret_ty,
ret_ty.simd_type(tcx)
);
2019-11-16 09:09:45 -06:00
let total_len = u128::from(in_len) * 2;
let vector = args[2].immediate();
let indices: Option<Vec<_>> = (0..n)
.map(|i| {
let arg_idx = i;
let val = bx.const_get_elt(vector, i as u64);
match bx.const_to_opt_u128(val, true) {
None => {
emit_error!("shuffle index #{} is not a constant", arg_idx);
None
}
Some(idx) if idx >= total_len => {
2019-12-22 16:42:04 -06:00
emit_error!(
"shuffle index #{} is out of bounds (limit {})",
arg_idx,
total_len
);
None
}
Some(idx) => Some(bx.const_i32(idx as i32)),
}
})
.collect();
let indices = match indices {
Some(i) => i,
2019-12-22 16:42:04 -06:00
None => return Ok(bx.const_null(llret_ty)),
};
2019-12-22 16:42:04 -06:00
return Ok(bx.shuffle_vector(
args[0].immediate(),
args[1].immediate(),
bx.const_vector(&indices),
));
}
if name == "simd_insert" {
2019-12-22 16:42:04 -06:00
require!(
in_elem == arg_tys[2],
"expected inserted type `{}` (element of input `{}`), found `{}`",
in_elem,
in_ty,
arg_tys[2]
);
return Ok(bx.insert_element(
args[0].immediate(),
args[2].immediate(),
args[1].immediate(),
));
}
if name == "simd_extract" {
2019-12-22 16:42:04 -06:00
require!(
ret_ty == in_elem,
"expected return type `{}` (element of input `{}`), found `{}`",
in_elem,
in_ty,
ret_ty
);
return Ok(bx.extract_element(args[0].immediate(), args[1].immediate()));
}
2015-07-29 18:40:22 -05:00
2018-03-18 12:33:36 -05:00
if name == "simd_select" {
let m_elem_ty = in_elem;
let m_len = in_len;
require_simd!(arg_tys[1], "argument");
2018-03-18 12:33:36 -05:00
let v_len = arg_tys[1].simd_size(tcx);
2019-12-22 16:42:04 -06:00
require!(
m_len == v_len,
"mismatched lengths: mask length `{}` != other vector length `{}`",
m_len,
v_len
2018-03-18 12:33:36 -05:00
);
2019-09-16 13:08:35 -05:00
match m_elem_ty.kind {
2019-12-22 16:42:04 -06:00
ty::Int(_) => {}
_ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty),
2018-03-18 12:33:36 -05:00
}
// truncate the mask to a vector of i1s
let i1 = bx.type_i1();
let i1xn = bx.type_vector(i1, m_len as u64);
2018-03-18 12:33:36 -05:00
let m_i1s = bx.trunc(args[0].immediate(), i1xn);
return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
}
if name == "simd_bitmask" {
// The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a
// vector mask and returns an unsigned integer containing the most
// significant bit (MSB) of each lane.
// If the vector has less than 8 lanes, an u8 is returned with zeroed
// trailing bits.
let expected_int_bits = in_len.max(8);
2019-09-16 13:08:35 -05:00
match ret_ty.kind {
2019-12-22 16:42:04 -06:00
ty::Uint(i) if i.bit_width() == Some(expected_int_bits as usize) => (),
_ => return_error!("bitmask `{}`, expected `u{}`", ret_ty, expected_int_bits),
}
// Integer vector <i{in_bitwidth} x in_len>:
2019-09-16 13:08:35 -05:00
let (i_xn, in_elem_bitwidth) = match in_elem.kind {
ty::Int(i) => (
args[0].immediate(),
2019-12-22 16:42:04 -06:00
i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits() as _),
),
ty::Uint(i) => (
args[0].immediate(),
2019-12-22 16:42:04 -06:00
i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits() as _),
),
_ => return_error!(
"vector argument `{}`'s element type `{}`, expected integer element type",
2019-12-22 16:42:04 -06:00
in_ty,
in_elem
),
};
// Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
2019-12-22 16:42:04 -06:00
let shift_indices =
vec![
bx.cx.const_int(bx.type_ix(in_elem_bitwidth as _), (in_elem_bitwidth - 1) as _);
in_len as _
];
let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
// Truncate vector to an <i1 x N>
let i1xn = bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len as _));
// Bitcast <i1 x N> to iN:
let i_ = bx.bitcast(i1xn, bx.type_ix(in_len as _));
// Zero-extend iN to the bitmask type:
return Ok(bx.zext(i_, bx.type_ix(expected_int_bits as _)));
}
fn simd_simple_float_intrinsic(
name: &str,
in_elem: &::rustc::ty::TyS<'_>,
in_ty: &::rustc::ty::TyS<'_>,
2019-11-16 06:31:09 -06:00
in_len: u64,
bx: &mut Builder<'a, 'll, 'tcx>,
span: Span,
args: &[OperandRef<'tcx, &'ll Value>],
) -> Result<&'ll Value, ()> {
macro_rules! emit_error {
($msg: tt) => {
emit_error!($msg, )
};
($msg: tt, $($fmt: tt)*) => {
span_invalid_monomorphization_error(
bx.sess(), span,
&format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
name, $($fmt)*));
}
}
macro_rules! return_error {
($($fmt: tt)*) => {
{
emit_error!($($fmt)*);
return Err(());
}
}
}
2019-09-16 13:08:35 -05:00
let ety = match in_elem.kind {
ty::Float(f) if f.bit_width() == 32 => {
if in_len < 2 || in_len > 16 {
2018-05-08 09:16:26 -05:00
return_error!(
"unsupported floating-point vector `{}` with length `{}` \
out-of-range [2, 16]",
2019-12-22 16:42:04 -06:00
in_ty,
in_len
);
}
"f32"
2019-12-22 16:42:04 -06:00
}
ty::Float(f) if f.bit_width() == 64 => {
if in_len < 2 || in_len > 8 {
2019-12-22 16:42:04 -06:00
return_error!(
"unsupported floating-point vector `{}` with length `{}` \
2018-05-08 09:16:26 -05:00
out-of-range [2, 8]",
2019-12-22 16:42:04 -06:00
in_ty,
in_len
);
}
"f64"
2019-12-22 16:42:04 -06:00
}
ty::Float(f) => {
2019-12-22 16:42:04 -06:00
return_error!(
"unsupported element type `{}` of floating-point vector `{}`",
f.name_str(),
in_ty
);
}
_ => {
return_error!("`{}` is not a floating-point type", in_ty);
}
};
let llvm_name = &format!("llvm.{0}.v{1}{2}", name, in_len, ety);
let intrinsic = bx.get_intrinsic(&llvm_name);
2019-12-22 16:42:04 -06:00
let c =
bx.call(intrinsic, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None);
2018-05-14 05:03:04 -05:00
unsafe { llvm::LLVMRustSetHasUnsafeAlgebra(c) };
2018-10-08 09:58:26 -05:00
Ok(c)
}
2018-10-08 09:58:26 -05:00
match name {
"simd_fsqrt" => {
return simd_simple_float_intrinsic("sqrt", in_elem, in_ty, in_len, bx, span, args);
}
"simd_fsin" => {
return simd_simple_float_intrinsic("sin", in_elem, in_ty, in_len, bx, span, args);
}
"simd_fcos" => {
return simd_simple_float_intrinsic("cos", in_elem, in_ty, in_len, bx, span, args);
}
"simd_fabs" => {
return simd_simple_float_intrinsic("fabs", in_elem, in_ty, in_len, bx, span, args);
}
"simd_floor" => {
return simd_simple_float_intrinsic("floor", in_elem, in_ty, in_len, bx, span, args);
}
"simd_ceil" => {
return simd_simple_float_intrinsic("ceil", in_elem, in_ty, in_len, bx, span, args);
}
"simd_fexp" => {
return simd_simple_float_intrinsic("exp", in_elem, in_ty, in_len, bx, span, args);
}
"simd_fexp2" => {
return simd_simple_float_intrinsic("exp2", in_elem, in_ty, in_len, bx, span, args);
}
"simd_flog10" => {
return simd_simple_float_intrinsic("log10", in_elem, in_ty, in_len, bx, span, args);
}
"simd_flog2" => {
return simd_simple_float_intrinsic("log2", in_elem, in_ty, in_len, bx, span, args);
}
"simd_flog" => {
return simd_simple_float_intrinsic("log", in_elem, in_ty, in_len, bx, span, args);
}
"simd_fpowi" => {
return simd_simple_float_intrinsic("powi", in_elem, in_ty, in_len, bx, span, args);
}
"simd_fpow" => {
return simd_simple_float_intrinsic("pow", in_elem, in_ty, in_len, bx, span, args);
}
"simd_fma" => {
return simd_simple_float_intrinsic("fma", in_elem, in_ty, in_len, bx, span, args);
}
_ => { /* fallthrough */ }
}
2018-05-16 13:47:28 -05:00
// FIXME: use:
// https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
// https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
2019-11-16 09:09:45 -06:00
fn llvm_vector_str(elem_ty: Ty<'_>, vec_len: u64, no_pointers: usize) -> String {
2018-05-16 13:47:28 -05:00
let p0s: String = "p0".repeat(no_pointers);
2019-09-16 13:08:35 -05:00
match elem_ty.kind {
ty::Int(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
ty::Uint(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()),
2018-05-16 13:47:28 -05:00
_ => unreachable!(),
}
}
2019-12-22 16:42:04 -06:00
fn llvm_vector_ty(
cx: &CodegenCx<'ll, '_>,
elem_ty: Ty<'_>,
vec_len: u64,
mut no_pointers: usize,
) -> &'ll Type {
2018-05-16 13:47:28 -05:00
// FIXME: use cx.layout_of(ty).llvm_type() ?
2019-09-16 13:08:35 -05:00
let mut elem_ty = match elem_ty.kind {
2019-12-22 16:42:04 -06:00
ty::Int(v) => cx.type_int_from_ty(v),
ty::Uint(v) => cx.type_uint_from_ty(v),
ty::Float(v) => cx.type_float_from_ty(v),
2018-05-16 13:47:28 -05:00
_ => unreachable!(),
};
while no_pointers > 0 {
elem_ty = cx.type_ptr_to(elem_ty);
2018-05-16 13:47:28 -05:00
no_pointers -= 1;
}
2019-11-16 09:09:45 -06:00
cx.type_vector(elem_ty, vec_len)
2018-05-16 13:47:28 -05:00
}
if name == "simd_gather" {
// simd_gather(values: <N x T>, pointers: <N x *_ T>,
// mask: <N x i{M}>) -> <N x T>
// * N: number of elements in the input vectors
// * T: type of the element to load
// * M: any integer width is supported, will be truncated to i1
// All types must be simd vector types
require_simd!(in_ty, "first");
require_simd!(arg_tys[1], "second");
require_simd!(arg_tys[2], "third");
require_simd!(ret_ty, "return");
// Of the same length:
2019-12-22 16:42:04 -06:00
require!(
in_len == arg_tys[1].simd_size(tcx),
"expected {} argument with length {} (same as input type `{}`), \
found `{}` with length {}",
"second",
in_len,
in_ty,
arg_tys[1],
arg_tys[1].simd_size(tcx)
);
require!(
in_len == arg_tys[2].simd_size(tcx),
"expected {} argument with length {} (same as input type `{}`), \
found `{}` with length {}",
"third",
in_len,
in_ty,
arg_tys[2],
arg_tys[2].simd_size(tcx)
);
// The return type must match the first argument type
2019-12-22 16:42:04 -06:00
require!(ret_ty == in_ty, "expected return type `{}`, found `{}`", in_ty, ret_ty);
// This counts how many pointers
2019-06-12 08:06:35 -05:00
fn ptr_count(t: Ty<'_>) -> usize {
2019-09-16 13:08:35 -05:00
match t.kind {
ty::RawPtr(p) => 1 + ptr_count(p.ty),
_ => 0,
}
}
// Non-ptr type
2019-06-12 08:06:35 -05:00
fn non_ptr(t: Ty<'_>) -> Ty<'_> {
2019-09-16 13:08:35 -05:00
match t.kind {
ty::RawPtr(p) => non_ptr(p.ty),
_ => t,
}
}
// The second argument must be a simd vector with an element type that's a pointer
// to the element type of the first argument
2019-09-16 13:08:35 -05:00
let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).kind {
2019-12-22 16:42:04 -06:00
ty::RawPtr(p) if p.ty == in_elem => {
(ptr_count(arg_tys[1].simd_type(tcx)), non_ptr(arg_tys[1].simd_type(tcx)))
}
_ => {
2019-12-22 16:42:04 -06:00
require!(
false,
"expected element type `{}` of second argument `{}` \
to be a pointer to the element type `{}` of the first \
argument `{}`, found `{}` != `*_ {}`",
2019-12-22 16:42:04 -06:00
arg_tys[1].simd_type(tcx),
arg_tys[1],
in_elem,
in_ty,
arg_tys[1].simd_type(tcx),
in_elem
);
unreachable!();
}
};
assert!(pointer_count > 0);
2018-10-08 09:58:26 -05:00
assert_eq!(pointer_count - 1, ptr_count(arg_tys[0].simd_type(tcx)));
assert_eq!(underlying_ty, non_ptr(arg_tys[0].simd_type(tcx)));
// The element type of the third argument must be a signed integer type of any width:
2019-09-16 13:08:35 -05:00
match arg_tys[2].simd_type(tcx).kind {
ty::Int(_) => (),
_ => {
2019-12-22 16:42:04 -06:00
require!(
false,
"expected element type `{}` of third argument `{}` \
to be a signed integer type",
2019-12-22 16:42:04 -06:00
arg_tys[2].simd_type(tcx),
arg_tys[2]
);
}
}
// Alignment of T, must be a constant integer value:
let alignment_ty = bx.type_i32();
let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
// Truncate the mask vector to a vector of i1s:
let (mask, mask_ty) = {
let i1 = bx.type_i1();
2019-11-16 06:31:09 -06:00
let i1xn = bx.type_vector(i1, in_len);
(bx.trunc(args[2].immediate(), i1xn), i1xn)
};
// Type of the vector of pointers:
let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
// Type of the vector of elements:
let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
2019-12-22 16:42:04 -06:00
let llvm_intrinsic =
format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
let f = bx.declare_cfn(
&llvm_intrinsic,
bx.type_func(
&[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty],
llvm_elem_vec_ty,
),
);
llvm::SetUnnamedAddr(f, false);
2019-12-22 16:42:04 -06:00
let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None);
return Ok(v);
}
if name == "simd_scatter" {
// simd_scatter(values: <N x T>, pointers: <N x *mut T>,
// mask: <N x i{M}>) -> ()
// * N: number of elements in the input vectors
// * T: type of the element to load
// * M: any integer width is supported, will be truncated to i1
// All types must be simd vector types
require_simd!(in_ty, "first");
require_simd!(arg_tys[1], "second");
require_simd!(arg_tys[2], "third");
// Of the same length:
2019-12-22 16:42:04 -06:00
require!(
in_len == arg_tys[1].simd_size(tcx),
"expected {} argument with length {} (same as input type `{}`), \
found `{}` with length {}",
"second",
in_len,
in_ty,
arg_tys[1],
arg_tys[1].simd_size(tcx)
);
require!(
in_len == arg_tys[2].simd_size(tcx),
"expected {} argument with length {} (same as input type `{}`), \
found `{}` with length {}",
"third",
in_len,
in_ty,
arg_tys[2],
arg_tys[2].simd_size(tcx)
);
// This counts how many pointers
2019-06-12 08:06:35 -05:00
fn ptr_count(t: Ty<'_>) -> usize {
2019-09-16 13:08:35 -05:00
match t.kind {
ty::RawPtr(p) => 1 + ptr_count(p.ty),
_ => 0,
}
}
// Non-ptr type
2019-06-12 08:06:35 -05:00
fn non_ptr(t: Ty<'_>) -> Ty<'_> {
2019-09-16 13:08:35 -05:00
match t.kind {
ty::RawPtr(p) => non_ptr(p.ty),
_ => t,
}
}
// The second argument must be a simd vector with an element type that's a pointer
// to the element type of the first argument
2019-09-16 13:08:35 -05:00
let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).kind {
2019-12-22 16:42:04 -06:00
ty::RawPtr(p) if p.ty == in_elem && p.mutbl == hir::Mutability::Mut => {
(ptr_count(arg_tys[1].simd_type(tcx)), non_ptr(arg_tys[1].simd_type(tcx)))
}
_ => {
2019-12-22 16:42:04 -06:00
require!(
false,
"expected element type `{}` of second argument `{}` \
to be a pointer to the element type `{}` of the first \
argument `{}`, found `{}` != `*mut {}`",
2019-12-22 16:42:04 -06:00
arg_tys[1].simd_type(tcx),
arg_tys[1],
in_elem,
in_ty,
arg_tys[1].simd_type(tcx),
in_elem
);
unreachable!();
}
};
assert!(pointer_count > 0);
2018-10-08 09:58:26 -05:00
assert_eq!(pointer_count - 1, ptr_count(arg_tys[0].simd_type(tcx)));
assert_eq!(underlying_ty, non_ptr(arg_tys[0].simd_type(tcx)));
// The element type of the third argument must be a signed integer type of any width:
2019-09-16 13:08:35 -05:00
match arg_tys[2].simd_type(tcx).kind {
ty::Int(_) => (),
_ => {
2019-12-22 16:42:04 -06:00
require!(
false,
"expected element type `{}` of third argument `{}` \
to be a signed integer type",
2019-12-22 16:42:04 -06:00
arg_tys[2].simd_type(tcx),
arg_tys[2]
);
}
}
// Alignment of T, must be a constant integer value:
let alignment_ty = bx.type_i32();
let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
// Truncate the mask vector to a vector of i1s:
let (mask, mask_ty) = {
let i1 = bx.type_i1();
2019-11-16 06:31:09 -06:00
let i1xn = bx.type_vector(i1, in_len);
(bx.trunc(args[2].immediate(), i1xn), i1xn)
};
let ret_t = bx.type_void();
// Type of the vector of pointers:
let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
// Type of the vector of elements:
let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
2019-12-22 16:42:04 -06:00
let llvm_intrinsic =
format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
let f = bx.declare_cfn(
&llvm_intrinsic,
bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t),
);
llvm::SetUnnamedAddr(f, false);
2019-12-22 16:42:04 -06:00
let v = bx.call(f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None);
return Ok(v);
}
macro_rules! arith_red {
($name:tt : $integer_reduce:ident, $float_reduce:ident, $ordered:expr) => {
if name == $name {
2019-12-22 16:42:04 -06:00
require!(
ret_ty == in_elem,
"expected return type `{}` (element of input `{}`), found `{}`",
in_elem,
in_ty,
ret_ty
);
2019-09-16 13:08:35 -05:00
return match in_elem.kind {
ty::Int(_) | ty::Uint(_) => {
let r = bx.$integer_reduce(args[0].immediate());
if $ordered {
// if overflow occurs, the result is the
// mathematical result modulo 2^n:
if name.contains("mul") {
Ok(bx.mul(args[1].immediate(), r))
} else {
Ok(bx.add(args[1].immediate(), r))
}
} else {
Ok(bx.$integer_reduce(args[0].immediate()))
}
2019-12-22 16:42:04 -06:00
}
ty::Float(f) => {
let acc = if $ordered {
// ordered arithmetic reductions take an accumulator
args[1].immediate()
} else {
// unordered arithmetic reductions use the identity accumulator
let identity_acc = if $name.contains("mul") { 1.0 } else { 0.0 };
match f.bit_width() {
32 => bx.const_real(bx.type_f32(), identity_acc),
64 => bx.const_real(bx.type_f64(), identity_acc),
2019-12-22 16:42:04 -06:00
v => return_error!(
r#"
2018-03-14 14:14:47 -05:00
unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
2019-12-22 16:42:04 -06:00
$name,
in_ty,
in_elem,
v,
ret_ty
),
}
};
Ok(bx.$float_reduce(acc, args[0].immediate()))
}
2019-12-22 16:42:04 -06:00
_ => return_error!(
"unsupported {} from `{}` with element `{}` to `{}`",
$name,
in_ty,
in_elem,
ret_ty
),
};
}
2019-12-22 16:42:04 -06:00
};
}
arith_red!("simd_reduce_add_ordered": vector_reduce_add, vector_reduce_fadd, true);
arith_red!("simd_reduce_mul_ordered": vector_reduce_mul, vector_reduce_fmul, true);
arith_red!("simd_reduce_add_unordered": vector_reduce_add, vector_reduce_fadd_fast, false);
arith_red!("simd_reduce_mul_unordered": vector_reduce_mul, vector_reduce_fmul_fast, false);
macro_rules! minmax_red {
($name:tt: $int_red:ident, $float_red:ident) => {
if name == $name {
2019-12-22 16:42:04 -06:00
require!(
ret_ty == in_elem,
"expected return type `{}` (element of input `{}`), found `{}`",
in_elem,
in_ty,
ret_ty
);
2019-09-16 13:08:35 -05:00
return match in_elem.kind {
2019-12-22 16:42:04 -06:00
ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)),
ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)),
ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())),
_ => return_error!(
"unsupported {} from `{}` with element `{}` to `{}`",
$name,
in_ty,
in_elem,
ret_ty
),
};
}
2019-12-22 16:42:04 -06:00
};
}
minmax_red!("simd_reduce_min": vector_reduce_min, vector_reduce_fmin);
minmax_red!("simd_reduce_max": vector_reduce_max, vector_reduce_fmax);
minmax_red!("simd_reduce_min_nanless": vector_reduce_min, vector_reduce_fmin_fast);
minmax_red!("simd_reduce_max_nanless": vector_reduce_max, vector_reduce_fmax_fast);
macro_rules! bitwise_red {
($name:tt : $red:ident, $boolean:expr) => {
if name == $name {
let input = if !$boolean {
2019-12-22 16:42:04 -06:00
require!(
ret_ty == in_elem,
"expected return type `{}` (element of input `{}`), found `{}`",
in_elem,
in_ty,
ret_ty
);
args[0].immediate()
} else {
2019-09-16 13:08:35 -05:00
match in_elem.kind {
2019-12-22 16:42:04 -06:00
ty::Int(_) | ty::Uint(_) => {}
_ => return_error!(
"unsupported {} from `{}` with element `{}` to `{}`",
$name,
in_ty,
in_elem,
ret_ty
),
2018-03-15 10:36:02 -05:00
}
// boolean reductions operate on vectors of i1s:
let i1 = bx.type_i1();
let i1xn = bx.type_vector(i1, in_len as u64);
bx.trunc(args[0].immediate(), i1xn)
};
2019-09-16 13:08:35 -05:00
return match in_elem.kind {
ty::Int(_) | ty::Uint(_) => {
let r = bx.$red(input);
2019-12-22 16:42:04 -06:00
Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
}
_ => return_error!(
"unsupported {} from `{}` with element `{}` to `{}`",
$name,
in_ty,
in_elem,
ret_ty
),
};
}
2019-12-22 16:42:04 -06:00
};
}
bitwise_red!("simd_reduce_and": vector_reduce_and, false);
bitwise_red!("simd_reduce_or": vector_reduce_or, false);
bitwise_red!("simd_reduce_xor": vector_reduce_xor, false);
bitwise_red!("simd_reduce_all": vector_reduce_and, true);
bitwise_red!("simd_reduce_any": vector_reduce_or, true);
2015-07-29 18:40:22 -05:00
if name == "simd_cast" {
require_simd!(ret_ty, "return");
let out_len = ret_ty.simd_size(tcx);
2019-12-22 16:42:04 -06:00
require!(
in_len == out_len,
"expected return type with length {} (same as input type `{}`), \
found `{}` with length {}",
2019-12-22 16:42:04 -06:00
in_len,
in_ty,
ret_ty,
out_len
);
2015-07-29 18:40:22 -05:00
// casting cares about nominal type, not just structural type
let out_elem = ret_ty.simd_type(tcx);
2015-07-29 18:40:22 -05:00
2019-12-22 16:42:04 -06:00
if in_elem == out_elem {
return Ok(args[0].immediate());
}
2015-07-29 18:40:22 -05:00
2019-12-22 16:42:04 -06:00
enum Style {
Float,
Int(/* is signed? */ bool),
Unsupported,
}
2015-08-14 17:46:51 -05:00
2019-09-16 13:08:35 -05:00
let (in_style, in_width) = match in_elem.kind {
2015-08-14 17:46:51 -05:00
// vectors of pointer-sized integers should've been
// disallowed before here, so this unwrap is safe.
ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
ty::Float(f) => (Style::Float, f.bit_width()),
2019-12-22 16:42:04 -06:00
_ => (Style::Unsupported, 0),
2015-08-14 17:46:51 -05:00
};
2019-09-16 13:08:35 -05:00
let (out_style, out_width) = match out_elem.kind {
ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
ty::Float(f) => (Style::Float, f.bit_width()),
2019-12-22 16:42:04 -06:00
_ => (Style::Unsupported, 0),
2015-08-14 17:46:51 -05:00
};
match (in_style, out_style) {
(Style::Int(in_is_signed), Style::Int(_)) => {
return Ok(match in_width.cmp(&out_width) {
2018-01-04 23:12:32 -06:00
Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
Ordering::Equal => args[0].immediate(),
2019-12-22 16:42:04 -06:00
Ordering::Less => {
if in_is_signed {
bx.sext(args[0].immediate(), llret_ty)
} else {
bx.zext(args[0].immediate(), llret_ty)
}
2015-08-14 17:46:51 -05:00
}
2019-12-22 16:42:04 -06:00
});
}
2015-08-14 17:46:51 -05:00
(Style::Int(in_is_signed), Style::Float) => {
return Ok(if in_is_signed {
2018-01-04 23:12:32 -06:00
bx.sitofp(args[0].immediate(), llret_ty)
2015-08-14 17:46:51 -05:00
} else {
2018-01-04 23:12:32 -06:00
bx.uitofp(args[0].immediate(), llret_ty)
2019-12-22 16:42:04 -06:00
});
}
2015-08-14 17:46:51 -05:00
(Style::Float, Style::Int(out_is_signed)) => {
return Ok(if out_is_signed {
2018-01-04 23:12:32 -06:00
bx.fptosi(args[0].immediate(), llret_ty)
2015-08-14 17:46:51 -05:00
} else {
2018-01-04 23:12:32 -06:00
bx.fptoui(args[0].immediate(), llret_ty)
2019-12-22 16:42:04 -06:00
});
2015-07-29 18:40:22 -05:00
}
2015-08-14 17:46:51 -05:00
(Style::Float, Style::Float) => {
return Ok(match in_width.cmp(&out_width) {
2018-01-04 23:12:32 -06:00
Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
Ordering::Equal => args[0].immediate(),
2019-12-22 16:42:04 -06:00
Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
});
2015-07-29 18:40:22 -05:00
}
2019-12-22 16:42:04 -06:00
_ => { /* Unsupported. Fallthrough. */ }
2015-07-29 18:40:22 -05:00
}
2019-12-22 16:42:04 -06:00
require!(
false,
"unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
in_ty,
in_elem,
ret_ty,
out_elem
);
2015-07-29 18:40:22 -05:00
}
2015-07-31 13:23:12 -05:00
macro_rules! arith {
($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
$(if name == stringify!($name) {
2019-09-16 13:08:35 -05:00
match in_elem.kind {
$($(ty::$p(_))|* => {
2018-01-04 23:12:32 -06:00
return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
})*
_ => {},
}
require!(false,
"unsupported operation on `{}` with element `{}`",
in_ty,
in_elem)
})*
2015-07-31 13:23:12 -05:00
}
}
arith! {
simd_add: Uint, Int => add, Float => fadd;
simd_sub: Uint, Int => sub, Float => fsub;
simd_mul: Uint, Int => mul, Float => fmul;
simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
simd_rem: Uint => urem, Int => srem, Float => frem;
simd_shl: Uint, Int => shl;
simd_shr: Uint => lshr, Int => ashr;
simd_and: Uint, Int => and;
simd_or: Uint, Int => or;
simd_xor: Uint, Int => xor;
simd_fmax: Float => maxnum;
simd_fmin: Float => minnum;
}
if name == "simd_saturating_add" || name == "simd_saturating_sub" {
let lhs = args[0].immediate();
let rhs = args[1].immediate();
let is_add = name == "simd_saturating_add";
let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _;
2019-09-16 13:08:35 -05:00
let (signed, elem_width, elem_ty) = match in_elem.kind {
2019-12-22 16:42:04 -06:00
ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)),
ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)),
_ => {
return_error!(
"expected element type `{}` of vector type `{}` \
to be a signed or unsigned integer type",
2019-12-22 16:42:04 -06:00
arg_tys[0].simd_type(tcx),
arg_tys[0]
);
}
};
let llvm_intrinsic = &format!(
"llvm.{}{}.sat.v{}i{}",
if signed { 's' } else { 'u' },
if is_add { "add" } else { "sub" },
2019-12-22 16:42:04 -06:00
in_len,
elem_width
);
let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
2019-12-22 16:42:04 -06:00
let f = bx.declare_cfn(&llvm_intrinsic, bx.type_func(&[vec_ty, vec_ty], vec_ty));
llvm::SetUnnamedAddr(f, false);
let v = bx.call(f, &[lhs, rhs], None);
return Ok(v);
2015-07-31 13:23:12 -05:00
}
span_bug!(span, "unknown SIMD intrinsic");
}
// Returns the width of an int Ty, and if it's signed or not
// Returns None if the type is not an integer
// FIXME: theres multiple of this functions, investigate using some of the already existing
// stuffs.
fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_, '_>) -> Option<(u64, bool)> {
2019-09-16 13:08:35 -05:00
match ty.kind {
2019-12-22 16:42:04 -06:00
ty::Int(t) => Some((
match t {
ast::IntTy::Isize => cx.tcx.sess.target.ptr_width as u64,
ast::IntTy::I8 => 8,
ast::IntTy::I16 => 16,
ast::IntTy::I32 => 32,
ast::IntTy::I64 => 64,
ast::IntTy::I128 => 128,
},
true,
)),
ty::Uint(t) => Some((
match t {
ast::UintTy::Usize => cx.tcx.sess.target.ptr_width as u64,
ast::UintTy::U8 => 8,
ast::UintTy::U16 => 16,
ast::UintTy::U32 => 32,
ast::UintTy::U64 => 64,
ast::UintTy::U128 => 128,
},
false,
)),
_ => None,
}
}
// Returns the width of a float Ty
// Returns None if the type is not a float
fn float_type_width(ty: Ty<'_>) -> Option<u64> {
2019-09-16 13:08:35 -05:00
match ty.kind {
ty::Float(t) => Some(t.bit_width() as u64),
_ => None,
}
}