2020-09-15 23:35:31 +02:00
|
|
|
|
use super::operand::{OperandRef, OperandValue};
|
|
|
|
|
use super::place::PlaceRef;
|
|
|
|
|
use super::FunctionCx;
|
|
|
|
|
use crate::common::{span_invalid_monomorphization_error, IntPredicate};
|
|
|
|
|
use crate::glue;
|
2022-07-20 13:40:30 +00:00
|
|
|
|
use crate::meth;
|
2020-09-15 23:35:31 +02:00
|
|
|
|
use crate::traits::*;
|
|
|
|
|
use crate::MemFlags;
|
|
|
|
|
|
|
|
|
|
use rustc_middle::ty::{self, Ty, TyCtxt};
|
|
|
|
|
use rustc_span::{sym, Span};
|
2022-07-20 10:45:09 -04:00
|
|
|
|
use rustc_target::abi::{WrappingRange, call::{FnAbi, PassMode}};
|
2020-09-15 23:35:31 +02:00
|
|
|
|
|
|
|
|
|
fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
|
|
|
|
|
bx: &mut Bx,
|
|
|
|
|
allow_overlap: bool,
|
|
|
|
|
volatile: bool,
|
|
|
|
|
ty: Ty<'tcx>,
|
|
|
|
|
dst: Bx::Value,
|
|
|
|
|
src: Bx::Value,
|
|
|
|
|
count: Bx::Value,
|
|
|
|
|
) {
|
|
|
|
|
let layout = bx.layout_of(ty);
|
|
|
|
|
let size = layout.size;
|
|
|
|
|
let align = layout.align.abi;
|
|
|
|
|
let size = bx.mul(bx.const_usize(size.bytes()), count);
|
|
|
|
|
let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
|
|
|
|
|
if allow_overlap {
|
|
|
|
|
bx.memmove(dst, align, src, align, size, flags);
|
|
|
|
|
} else {
|
|
|
|
|
bx.memcpy(dst, align, src, align, size, flags);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
|
|
|
|
|
bx: &mut Bx,
|
|
|
|
|
volatile: bool,
|
|
|
|
|
ty: Ty<'tcx>,
|
|
|
|
|
dst: Bx::Value,
|
|
|
|
|
val: Bx::Value,
|
|
|
|
|
count: Bx::Value,
|
|
|
|
|
) {
|
|
|
|
|
let layout = bx.layout_of(ty);
|
|
|
|
|
let size = layout.size;
|
|
|
|
|
let align = layout.align.abi;
|
|
|
|
|
let size = bx.mul(bx.const_usize(size.bytes()), count);
|
|
|
|
|
let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
|
|
|
|
|
bx.memset(dst, val, size, align, flags);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|
|
|
|
pub fn codegen_intrinsic_call(
|
|
|
|
|
bx: &mut Bx,
|
|
|
|
|
instance: ty::Instance<'tcx>,
|
|
|
|
|
fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
|
|
|
|
|
args: &[OperandRef<'tcx, Bx::Value>],
|
|
|
|
|
llresult: Bx::Value,
|
|
|
|
|
span: Span,
|
|
|
|
|
) {
|
|
|
|
|
let callee_ty = instance.ty(bx.tcx(), ty::ParamEnv::reveal_all());
|
|
|
|
|
|
2022-02-19 00:48:49 +01:00
|
|
|
|
let ty::FnDef(def_id, substs) = *callee_ty.kind() else {
|
|
|
|
|
bug!("expected fn item type, found {}", callee_ty);
|
2020-09-15 23:35:31 +02:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let sig = callee_ty.fn_sig(bx.tcx());
|
2020-10-24 02:21:18 +02:00
|
|
|
|
let sig = bx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
|
2020-09-15 23:35:31 +02:00
|
|
|
|
let arg_tys = sig.inputs();
|
|
|
|
|
let ret_ty = sig.output();
|
|
|
|
|
let name = bx.tcx().item_name(def_id);
|
2021-12-15 14:39:23 +11:00
|
|
|
|
let name_str = name.as_str();
|
2020-09-15 23:35:31 +02:00
|
|
|
|
|
|
|
|
|
let llret_ty = bx.backend_type(bx.layout_of(ret_ty));
|
|
|
|
|
let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
|
|
|
|
|
|
|
|
|
|
let llval = match name {
|
|
|
|
|
sym::assume => {
|
|
|
|
|
bx.assume(args[0].immediate());
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
sym::abort => {
|
|
|
|
|
bx.abort();
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
sym::va_start => bx.va_start(args[0].immediate()),
|
|
|
|
|
sym::va_end => bx.va_end(args[0].immediate()),
|
|
|
|
|
sym::size_of_val => {
|
|
|
|
|
let tp_ty = substs.type_at(0);
|
|
|
|
|
if let OperandValue::Pair(_, meta) = args[0].val {
|
|
|
|
|
let (llsize, _) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
|
|
|
|
|
llsize
|
|
|
|
|
} else {
|
|
|
|
|
bx.const_usize(bx.layout_of(tp_ty).size.bytes())
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
sym::min_align_of_val => {
|
|
|
|
|
let tp_ty = substs.type_at(0);
|
|
|
|
|
if let OperandValue::Pair(_, meta) = args[0].val {
|
|
|
|
|
let (_, llalign) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
|
|
|
|
|
llalign
|
|
|
|
|
} else {
|
|
|
|
|
bx.const_usize(bx.layout_of(tp_ty).align.abi.bytes())
|
|
|
|
|
}
|
|
|
|
|
}
|
2022-07-20 13:40:30 +00:00
|
|
|
|
sym::vtable_size | sym::vtable_align => {
|
|
|
|
|
let vtable = args[0].immediate();
|
|
|
|
|
let idx = match name {
|
|
|
|
|
sym::vtable_size => ty::COMMON_VTABLE_ENTRIES_SIZE,
|
|
|
|
|
sym::vtable_align => ty::COMMON_VTABLE_ENTRIES_ALIGN,
|
|
|
|
|
_ => bug!(),
|
|
|
|
|
};
|
2022-07-20 10:45:09 -04:00
|
|
|
|
let value = meth::VirtualIndex::from_index(idx).get_usize(bx, vtable);
|
|
|
|
|
if name == sym::vtable_align {
|
|
|
|
|
// Alignment is always nonzero.
|
|
|
|
|
bx.range_metadata(value, WrappingRange { start: 1, end: !0 });
|
|
|
|
|
};
|
|
|
|
|
value
|
2022-07-20 13:40:30 +00:00
|
|
|
|
}
|
2020-12-15 00:00:00 +00:00
|
|
|
|
sym::pref_align_of
|
2020-09-15 23:35:31 +02:00
|
|
|
|
| sym::needs_drop
|
|
|
|
|
| sym::type_id
|
|
|
|
|
| sym::type_name
|
|
|
|
|
| sym::variant_count => {
|
|
|
|
|
let value = bx
|
|
|
|
|
.tcx()
|
|
|
|
|
.const_eval_instance(ty::ParamEnv::reveal_all(), instance, None)
|
|
|
|
|
.unwrap();
|
|
|
|
|
OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx)
|
|
|
|
|
}
|
|
|
|
|
sym::offset => {
|
2021-08-01 00:00:00 +00:00
|
|
|
|
let ty = substs.type_at(0);
|
|
|
|
|
let layout = bx.layout_of(ty);
|
2020-09-15 23:35:31 +02:00
|
|
|
|
let ptr = args[0].immediate();
|
|
|
|
|
let offset = args[1].immediate();
|
2021-08-01 00:00:00 +00:00
|
|
|
|
bx.inbounds_gep(bx.backend_type(layout), ptr, &[offset])
|
2020-09-15 23:35:31 +02:00
|
|
|
|
}
|
|
|
|
|
sym::arith_offset => {
|
2021-07-31 00:00:00 +00:00
|
|
|
|
let ty = substs.type_at(0);
|
|
|
|
|
let layout = bx.layout_of(ty);
|
2020-09-15 23:35:31 +02:00
|
|
|
|
let ptr = args[0].immediate();
|
|
|
|
|
let offset = args[1].immediate();
|
2021-07-31 00:00:00 +00:00
|
|
|
|
bx.gep(bx.backend_type(layout), ptr, &[offset])
|
2020-09-15 23:35:31 +02:00
|
|
|
|
}
|
|
|
|
|
sym::copy => {
|
|
|
|
|
copy_intrinsic(
|
|
|
|
|
bx,
|
|
|
|
|
true,
|
|
|
|
|
false,
|
|
|
|
|
substs.type_at(0),
|
|
|
|
|
args[1].immediate(),
|
|
|
|
|
args[0].immediate(),
|
|
|
|
|
args[2].immediate(),
|
|
|
|
|
);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
sym::write_bytes => {
|
|
|
|
|
memset_intrinsic(
|
|
|
|
|
bx,
|
|
|
|
|
false,
|
|
|
|
|
substs.type_at(0),
|
|
|
|
|
args[0].immediate(),
|
|
|
|
|
args[1].immediate(),
|
|
|
|
|
args[2].immediate(),
|
|
|
|
|
);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
sym::volatile_copy_nonoverlapping_memory => {
|
|
|
|
|
copy_intrinsic(
|
|
|
|
|
bx,
|
|
|
|
|
false,
|
|
|
|
|
true,
|
|
|
|
|
substs.type_at(0),
|
|
|
|
|
args[0].immediate(),
|
|
|
|
|
args[1].immediate(),
|
|
|
|
|
args[2].immediate(),
|
|
|
|
|
);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
sym::volatile_copy_memory => {
|
|
|
|
|
copy_intrinsic(
|
|
|
|
|
bx,
|
|
|
|
|
true,
|
|
|
|
|
true,
|
|
|
|
|
substs.type_at(0),
|
|
|
|
|
args[0].immediate(),
|
|
|
|
|
args[1].immediate(),
|
|
|
|
|
args[2].immediate(),
|
|
|
|
|
);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
sym::volatile_set_memory => {
|
|
|
|
|
memset_intrinsic(
|
|
|
|
|
bx,
|
|
|
|
|
true,
|
|
|
|
|
substs.type_at(0),
|
|
|
|
|
args[0].immediate(),
|
|
|
|
|
args[1].immediate(),
|
|
|
|
|
args[2].immediate(),
|
|
|
|
|
);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
sym::volatile_store => {
|
|
|
|
|
let dst = args[0].deref(bx.cx());
|
|
|
|
|
args[1].val.volatile_store(bx, dst);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
sym::unaligned_volatile_store => {
|
|
|
|
|
let dst = args[0].deref(bx.cx());
|
|
|
|
|
args[1].val.unaligned_volatile_store(bx, dst);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
sym::add_with_overflow
|
|
|
|
|
| sym::sub_with_overflow
|
|
|
|
|
| sym::mul_with_overflow
|
|
|
|
|
| sym::unchecked_div
|
|
|
|
|
| sym::unchecked_rem
|
|
|
|
|
| sym::unchecked_shl
|
|
|
|
|
| sym::unchecked_shr
|
|
|
|
|
| sym::unchecked_add
|
|
|
|
|
| sym::unchecked_sub
|
|
|
|
|
| sym::unchecked_mul
|
|
|
|
|
| sym::exact_div => {
|
|
|
|
|
let ty = arg_tys[0];
|
|
|
|
|
match int_type_width_signed(ty, bx.tcx()) {
|
|
|
|
|
Some((_width, signed)) => match name {
|
|
|
|
|
sym::add_with_overflow
|
|
|
|
|
| sym::sub_with_overflow
|
|
|
|
|
| sym::mul_with_overflow => {
|
|
|
|
|
let op = match name {
|
|
|
|
|
sym::add_with_overflow => OverflowOp::Add,
|
|
|
|
|
sym::sub_with_overflow => OverflowOp::Sub,
|
|
|
|
|
sym::mul_with_overflow => OverflowOp::Mul,
|
|
|
|
|
_ => bug!(),
|
|
|
|
|
};
|
|
|
|
|
let (val, overflow) =
|
|
|
|
|
bx.checked_binop(op, ty, args[0].immediate(), args[1].immediate());
|
|
|
|
|
// Convert `i1` to a `bool`, and write it to the out parameter
|
|
|
|
|
let val = bx.from_immediate(val);
|
|
|
|
|
let overflow = bx.from_immediate(overflow);
|
|
|
|
|
|
|
|
|
|
let dest = result.project_field(bx, 0);
|
|
|
|
|
bx.store(val, dest.llval, dest.align);
|
|
|
|
|
let dest = result.project_field(bx, 1);
|
|
|
|
|
bx.store(overflow, dest.llval, dest.align);
|
|
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
sym::exact_div => {
|
|
|
|
|
if signed {
|
|
|
|
|
bx.exactsdiv(args[0].immediate(), args[1].immediate())
|
|
|
|
|
} else {
|
|
|
|
|
bx.exactudiv(args[0].immediate(), args[1].immediate())
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
sym::unchecked_div => {
|
|
|
|
|
if signed {
|
|
|
|
|
bx.sdiv(args[0].immediate(), args[1].immediate())
|
|
|
|
|
} else {
|
|
|
|
|
bx.udiv(args[0].immediate(), args[1].immediate())
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
sym::unchecked_rem => {
|
|
|
|
|
if signed {
|
|
|
|
|
bx.srem(args[0].immediate(), args[1].immediate())
|
|
|
|
|
} else {
|
|
|
|
|
bx.urem(args[0].immediate(), args[1].immediate())
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
sym::unchecked_shl => bx.shl(args[0].immediate(), args[1].immediate()),
|
|
|
|
|
sym::unchecked_shr => {
|
|
|
|
|
if signed {
|
|
|
|
|
bx.ashr(args[0].immediate(), args[1].immediate())
|
|
|
|
|
} else {
|
|
|
|
|
bx.lshr(args[0].immediate(), args[1].immediate())
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
sym::unchecked_add => {
|
|
|
|
|
if signed {
|
|
|
|
|
bx.unchecked_sadd(args[0].immediate(), args[1].immediate())
|
|
|
|
|
} else {
|
|
|
|
|
bx.unchecked_uadd(args[0].immediate(), args[1].immediate())
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
sym::unchecked_sub => {
|
|
|
|
|
if signed {
|
|
|
|
|
bx.unchecked_ssub(args[0].immediate(), args[1].immediate())
|
|
|
|
|
} else {
|
|
|
|
|
bx.unchecked_usub(args[0].immediate(), args[1].immediate())
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
sym::unchecked_mul => {
|
|
|
|
|
if signed {
|
|
|
|
|
bx.unchecked_smul(args[0].immediate(), args[1].immediate())
|
|
|
|
|
} else {
|
|
|
|
|
bx.unchecked_umul(args[0].immediate(), args[1].immediate())
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
_ => bug!(),
|
|
|
|
|
},
|
|
|
|
|
None => {
|
|
|
|
|
span_invalid_monomorphization_error(
|
|
|
|
|
bx.tcx().sess,
|
|
|
|
|
span,
|
|
|
|
|
&format!(
|
|
|
|
|
"invalid monomorphization of `{}` intrinsic: \
|
|
|
|
|
expected basic integer type, found `{}`",
|
|
|
|
|
name, ty
|
|
|
|
|
),
|
|
|
|
|
);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
|
|
|
|
|
match float_type_width(arg_tys[0]) {
|
|
|
|
|
Some(_width) => match name {
|
|
|
|
|
sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
|
|
|
|
|
sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
|
|
|
|
|
sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
|
|
|
|
|
sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
|
|
|
|
|
sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
|
|
|
|
|
_ => bug!(),
|
|
|
|
|
},
|
|
|
|
|
None => {
|
|
|
|
|
span_invalid_monomorphization_error(
|
|
|
|
|
bx.tcx().sess,
|
|
|
|
|
span,
|
|
|
|
|
&format!(
|
|
|
|
|
"invalid monomorphization of `{}` intrinsic: \
|
|
|
|
|
expected basic float type, found `{}`",
|
|
|
|
|
name, arg_tys[0]
|
|
|
|
|
),
|
|
|
|
|
);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
sym::float_to_int_unchecked => {
|
|
|
|
|
if float_type_width(arg_tys[0]).is_none() {
|
|
|
|
|
span_invalid_monomorphization_error(
|
|
|
|
|
bx.tcx().sess,
|
|
|
|
|
span,
|
|
|
|
|
&format!(
|
|
|
|
|
"invalid monomorphization of `float_to_int_unchecked` \
|
|
|
|
|
intrinsic: expected basic float type, \
|
|
|
|
|
found `{}`",
|
|
|
|
|
arg_tys[0]
|
|
|
|
|
),
|
|
|
|
|
);
|
|
|
|
|
return;
|
|
|
|
|
}
|
2022-02-19 00:48:49 +01:00
|
|
|
|
let Some((_width, signed)) = int_type_width_signed(ret_ty, bx.tcx()) else {
|
|
|
|
|
span_invalid_monomorphization_error(
|
|
|
|
|
bx.tcx().sess,
|
|
|
|
|
span,
|
|
|
|
|
&format!(
|
|
|
|
|
"invalid monomorphization of `float_to_int_unchecked` \
|
|
|
|
|
intrinsic: expected basic integer type, \
|
|
|
|
|
found `{}`",
|
|
|
|
|
ret_ty
|
|
|
|
|
),
|
|
|
|
|
);
|
|
|
|
|
return;
|
2020-09-15 23:35:31 +02:00
|
|
|
|
};
|
|
|
|
|
if signed {
|
|
|
|
|
bx.fptosi(args[0].immediate(), llret_ty)
|
|
|
|
|
} else {
|
|
|
|
|
bx.fptoui(args[0].immediate(), llret_ty)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
sym::discriminant_value => {
|
|
|
|
|
if ret_ty.is_integral() {
|
|
|
|
|
args[0].deref(bx.cx()).codegen_get_discr(bx, ret_ty)
|
|
|
|
|
} else {
|
|
|
|
|
span_bug!(span, "Invalid discriminant type for `{:?}`", arg_tys[0])
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-26 13:06:09 +09:00
|
|
|
|
sym::const_allocate => {
|
|
|
|
|
// returns a null pointer at runtime.
|
|
|
|
|
bx.const_null(bx.type_i8p())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
sym::const_deallocate => {
|
|
|
|
|
// nop at runtime.
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-15 23:35:31 +02:00
|
|
|
|
// This requires that atomic intrinsics follow a specific naming pattern:
|
2022-06-22 13:02:23 +02:00
|
|
|
|
// "atomic_<operation>[_<ordering>]"
|
|
|
|
|
name if let Some(atomic) = name_str.strip_prefix("atomic_") => {
|
2020-09-15 23:35:31 +02:00
|
|
|
|
use crate::common::AtomicOrdering::*;
|
|
|
|
|
use crate::common::{AtomicRmwBinOp, SynchronizationScope};
|
|
|
|
|
|
2022-06-22 13:02:23 +02:00
|
|
|
|
let Some((instruction, ordering)) = atomic.split_once('_') else {
|
|
|
|
|
bx.sess().fatal("Atomic intrinsic missing memory ordering");
|
|
|
|
|
};
|
2020-09-15 23:35:31 +02:00
|
|
|
|
|
2022-06-22 13:02:23 +02:00
|
|
|
|
let parse_ordering = |bx: &Bx, s| match s {
|
|
|
|
|
"unordered" => Unordered,
|
|
|
|
|
"relaxed" => Relaxed,
|
|
|
|
|
"acquire" => Acquire,
|
|
|
|
|
"release" => Release,
|
|
|
|
|
"acqrel" => AcquireRelease,
|
|
|
|
|
"seqcst" => SequentiallyConsistent,
|
|
|
|
|
_ => bx.sess().fatal("unknown ordering in atomic intrinsic"),
|
2020-09-15 23:35:31 +02:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let invalid_monomorphization = |ty| {
|
|
|
|
|
span_invalid_monomorphization_error(
|
|
|
|
|
bx.tcx().sess,
|
|
|
|
|
span,
|
|
|
|
|
&format!(
|
|
|
|
|
"invalid monomorphization of `{}` intrinsic: \
|
|
|
|
|
expected basic integer type, found `{}`",
|
|
|
|
|
name, ty
|
|
|
|
|
),
|
|
|
|
|
);
|
|
|
|
|
};
|
|
|
|
|
|
2022-06-22 13:02:23 +02:00
|
|
|
|
match instruction {
|
2020-09-15 23:35:31 +02:00
|
|
|
|
"cxchg" | "cxchgweak" => {
|
2022-06-22 13:02:23 +02:00
|
|
|
|
let Some((success, failure)) = ordering.split_once('_') else {
|
|
|
|
|
bx.sess().fatal("Atomic compare-exchange intrinsic missing failure memory ordering");
|
|
|
|
|
};
|
2020-09-15 23:35:31 +02:00
|
|
|
|
let ty = substs.type_at(0);
|
2020-10-06 10:03:52 +00:00
|
|
|
|
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
|
2022-06-22 13:02:23 +02:00
|
|
|
|
let weak = instruction == "cxchgweak";
|
2020-11-28 18:12:45 +00:00
|
|
|
|
let mut dst = args[0].immediate();
|
|
|
|
|
let mut cmp = args[1].immediate();
|
|
|
|
|
let mut src = args[2].immediate();
|
|
|
|
|
if ty.is_unsafe_ptr() {
|
|
|
|
|
// Some platforms do not support atomic operations on pointers,
|
|
|
|
|
// so we cast to integer first.
|
|
|
|
|
let ptr_llty = bx.type_ptr_to(bx.type_isize());
|
|
|
|
|
dst = bx.pointercast(dst, ptr_llty);
|
|
|
|
|
cmp = bx.ptrtoint(cmp, bx.type_isize());
|
|
|
|
|
src = bx.ptrtoint(src, bx.type_isize());
|
|
|
|
|
}
|
2022-06-22 13:02:23 +02:00
|
|
|
|
let pair = bx.atomic_cmpxchg(dst, cmp, src, parse_ordering(bx, success), parse_ordering(bx, failure), weak);
|
2020-09-15 23:35:31 +02:00
|
|
|
|
let val = bx.extract_value(pair, 0);
|
|
|
|
|
let success = bx.extract_value(pair, 1);
|
|
|
|
|
let val = bx.from_immediate(val);
|
|
|
|
|
let success = bx.from_immediate(success);
|
|
|
|
|
|
|
|
|
|
let dest = result.project_field(bx, 0);
|
|
|
|
|
bx.store(val, dest.llval, dest.align);
|
|
|
|
|
let dest = result.project_field(bx, 1);
|
|
|
|
|
bx.store(success, dest.llval, dest.align);
|
|
|
|
|
return;
|
|
|
|
|
} else {
|
|
|
|
|
return invalid_monomorphization(ty);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
"load" => {
|
|
|
|
|
let ty = substs.type_at(0);
|
2020-10-06 10:03:52 +00:00
|
|
|
|
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
|
2020-11-28 18:12:45 +00:00
|
|
|
|
let layout = bx.layout_of(ty);
|
|
|
|
|
let size = layout.size;
|
|
|
|
|
let mut source = args[0].immediate();
|
|
|
|
|
if ty.is_unsafe_ptr() {
|
|
|
|
|
// Some platforms do not support atomic operations on pointers,
|
|
|
|
|
// so we cast to integer first...
|
2021-07-04 17:49:51 +02:00
|
|
|
|
let llty = bx.type_isize();
|
|
|
|
|
let ptr_llty = bx.type_ptr_to(llty);
|
2020-11-28 18:12:45 +00:00
|
|
|
|
source = bx.pointercast(source, ptr_llty);
|
2022-06-22 13:02:23 +02:00
|
|
|
|
let result = bx.atomic_load(llty, source, parse_ordering(bx, ordering), size);
|
2020-11-28 18:12:45 +00:00
|
|
|
|
// ... and then cast the result back to a pointer
|
|
|
|
|
bx.inttoptr(result, bx.backend_type(layout))
|
|
|
|
|
} else {
|
2022-06-22 13:02:23 +02:00
|
|
|
|
bx.atomic_load(bx.backend_type(layout), source, parse_ordering(bx, ordering), size)
|
2020-11-28 18:12:45 +00:00
|
|
|
|
}
|
2020-09-15 23:35:31 +02:00
|
|
|
|
} else {
|
|
|
|
|
return invalid_monomorphization(ty);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
"store" => {
|
|
|
|
|
let ty = substs.type_at(0);
|
2020-10-06 10:03:52 +00:00
|
|
|
|
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
|
2020-09-15 23:35:31 +02:00
|
|
|
|
let size = bx.layout_of(ty).size;
|
2020-11-28 18:12:45 +00:00
|
|
|
|
let mut val = args[1].immediate();
|
|
|
|
|
let mut ptr = args[0].immediate();
|
|
|
|
|
if ty.is_unsafe_ptr() {
|
|
|
|
|
// Some platforms do not support atomic operations on pointers,
|
|
|
|
|
// so we cast to integer first.
|
|
|
|
|
let ptr_llty = bx.type_ptr_to(bx.type_isize());
|
|
|
|
|
ptr = bx.pointercast(ptr, ptr_llty);
|
|
|
|
|
val = bx.ptrtoint(val, bx.type_isize());
|
|
|
|
|
}
|
2022-06-22 13:02:23 +02:00
|
|
|
|
bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size);
|
2020-09-15 23:35:31 +02:00
|
|
|
|
return;
|
|
|
|
|
} else {
|
|
|
|
|
return invalid_monomorphization(ty);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
"fence" => {
|
2022-06-22 13:02:23 +02:00
|
|
|
|
bx.atomic_fence(parse_ordering(bx, ordering), SynchronizationScope::CrossThread);
|
2020-09-15 23:35:31 +02:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
"singlethreadfence" => {
|
2022-06-22 13:02:23 +02:00
|
|
|
|
bx.atomic_fence(parse_ordering(bx, ordering), SynchronizationScope::SingleThread);
|
2020-09-15 23:35:31 +02:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// These are all AtomicRMW ops
|
|
|
|
|
op => {
|
|
|
|
|
let atom_op = match op {
|
|
|
|
|
"xchg" => AtomicRmwBinOp::AtomicXchg,
|
|
|
|
|
"xadd" => AtomicRmwBinOp::AtomicAdd,
|
|
|
|
|
"xsub" => AtomicRmwBinOp::AtomicSub,
|
|
|
|
|
"and" => AtomicRmwBinOp::AtomicAnd,
|
|
|
|
|
"nand" => AtomicRmwBinOp::AtomicNand,
|
|
|
|
|
"or" => AtomicRmwBinOp::AtomicOr,
|
|
|
|
|
"xor" => AtomicRmwBinOp::AtomicXor,
|
|
|
|
|
"max" => AtomicRmwBinOp::AtomicMax,
|
|
|
|
|
"min" => AtomicRmwBinOp::AtomicMin,
|
|
|
|
|
"umax" => AtomicRmwBinOp::AtomicUMax,
|
|
|
|
|
"umin" => AtomicRmwBinOp::AtomicUMin,
|
|
|
|
|
_ => bx.sess().fatal("unknown atomic operation"),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let ty = substs.type_at(0);
|
2022-05-10 17:04:26 -07:00
|
|
|
|
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
|
2020-12-20 00:00:00 +00:00
|
|
|
|
let mut ptr = args[0].immediate();
|
|
|
|
|
let mut val = args[1].immediate();
|
|
|
|
|
if ty.is_unsafe_ptr() {
|
|
|
|
|
// Some platforms do not support atomic operations on pointers,
|
|
|
|
|
// so we cast to integer first.
|
|
|
|
|
let ptr_llty = bx.type_ptr_to(bx.type_isize());
|
|
|
|
|
ptr = bx.pointercast(ptr, ptr_llty);
|
|
|
|
|
val = bx.ptrtoint(val, bx.type_isize());
|
|
|
|
|
}
|
2022-06-22 13:02:23 +02:00
|
|
|
|
bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
|
2020-09-15 23:35:31 +02:00
|
|
|
|
} else {
|
|
|
|
|
return invalid_monomorphization(ty);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
sym::nontemporal_store => {
|
|
|
|
|
let dst = args[0].deref(bx.cx());
|
|
|
|
|
args[1].val.nontemporal_store(bx, dst);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
|
|
|
|
|
let a = args[0].immediate();
|
|
|
|
|
let b = args[1].immediate();
|
|
|
|
|
if name == sym::ptr_guaranteed_eq {
|
|
|
|
|
bx.icmp(IntPredicate::IntEQ, a, b)
|
|
|
|
|
} else {
|
|
|
|
|
bx.icmp(IntPredicate::IntNE, a, b)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-04-09 01:27:47 -07:00
|
|
|
|
sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
|
2020-09-15 23:35:31 +02:00
|
|
|
|
let ty = substs.type_at(0);
|
|
|
|
|
let pointee_size = bx.layout_of(ty).size;
|
|
|
|
|
|
|
|
|
|
let a = args[0].immediate();
|
|
|
|
|
let b = args[1].immediate();
|
|
|
|
|
let a = bx.ptrtoint(a, bx.type_isize());
|
|
|
|
|
let b = bx.ptrtoint(b, bx.type_isize());
|
|
|
|
|
let pointee_size = bx.const_usize(pointee_size.bytes());
|
2022-04-09 01:27:47 -07:00
|
|
|
|
if name == sym::ptr_offset_from {
|
|
|
|
|
// This is the same sequence that Clang emits for pointer subtraction.
|
|
|
|
|
// It can be neither `nsw` nor `nuw` because the input is treated as
|
|
|
|
|
// unsigned but then the output is treated as signed, so neither works.
|
|
|
|
|
let d = bx.sub(a, b);
|
|
|
|
|
// this is where the signed magic happens (notice the `s` in `exactsdiv`)
|
|
|
|
|
bx.exactsdiv(d, pointee_size)
|
|
|
|
|
} else {
|
|
|
|
|
// The `_unsigned` version knows the relative ordering of the pointers,
|
|
|
|
|
// so can use `sub nuw` and `udiv exact` instead of dealing in signed.
|
|
|
|
|
let d = bx.unchecked_usub(a, b);
|
|
|
|
|
bx.exactudiv(d, pointee_size)
|
|
|
|
|
}
|
2020-09-15 23:35:31 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
_ => {
|
|
|
|
|
// Need to use backend-specific things in the implementation.
|
|
|
|
|
bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if !fn_abi.ret.is_ignore() {
|
|
|
|
|
if let PassMode::Cast(ty) = fn_abi.ret.mode {
|
|
|
|
|
let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(&ty));
|
|
|
|
|
let ptr = bx.pointercast(result.llval, ptr_llty);
|
|
|
|
|
bx.store(llval, ptr, result.align);
|
|
|
|
|
} else {
|
|
|
|
|
OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
|
|
|
|
|
.val
|
|
|
|
|
.store(bx, result);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Returns the width of an int Ty, and if it's signed or not
|
|
|
|
|
// Returns None if the type is not an integer
|
|
|
|
|
// FIXME: there’s multiple of this functions, investigate using some of the already existing
|
|
|
|
|
// stuffs.
|
|
|
|
|
fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
|
|
|
|
|
match ty.kind() {
|
2020-10-15 11:44:00 +02:00
|
|
|
|
ty::Int(t) => {
|
|
|
|
|
Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
|
|
|
|
|
}
|
|
|
|
|
ty::Uint(t) => {
|
|
|
|
|
Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
|
|
|
|
|
}
|
2020-09-15 23:35:31 +02:00
|
|
|
|
_ => None,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Returns the width of a float Ty
|
|
|
|
|
// Returns None if the type is not a float
|
|
|
|
|
fn float_type_width(ty: Ty<'_>) -> Option<u64> {
|
|
|
|
|
match ty.kind() {
|
|
|
|
|
ty::Float(t) => Some(t.bit_width()),
|
|
|
|
|
_ => None,
|
|
|
|
|
}
|
|
|
|
|
}
|