2019-11-18 20:58:33 +01:00
|
|
|
use crate::prelude::*;
|
|
|
|
use super::*;
|
|
|
|
|
2020-01-14 17:11:06 +01:00
|
|
|
pub(super) fn codegen_simd_intrinsic_call<'tcx>(
|
2019-11-18 20:58:33 +01:00
|
|
|
fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
|
|
|
|
instance: Instance<'tcx>,
|
|
|
|
args: &[mir::Operand<'tcx>],
|
|
|
|
ret: CPlace<'tcx>,
|
|
|
|
span: Span,
|
|
|
|
) {
|
|
|
|
let def_id = instance.def_id();
|
|
|
|
let substs = instance.substs;
|
|
|
|
|
|
|
|
let intrinsic = fx.tcx.item_name(def_id).as_str();
|
|
|
|
let intrinsic = &intrinsic[..];
|
|
|
|
|
|
|
|
intrinsic_match! {
|
|
|
|
fx, intrinsic, substs, args,
|
|
|
|
_ => {
|
2020-02-22 15:17:30 +01:00
|
|
|
fx.tcx.sess.span_fatal(span, &format!("Unknown SIMD intrinsic {}", intrinsic));
|
2019-11-18 20:58:33 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
simd_cast, (c a) {
|
2019-12-31 15:53:18 +01:00
|
|
|
simd_for_each_lane(fx, a, ret, |fx, lane_layout, ret_lane_layout, lane| {
|
2019-11-18 21:29:34 +01:00
|
|
|
let ret_lane_ty = fx.clif_type(ret_lane_layout.ty).unwrap();
|
2019-11-18 20:58:33 +01:00
|
|
|
|
2019-11-18 21:29:34 +01:00
|
|
|
let from_signed = type_sign(lane_layout.ty);
|
|
|
|
let to_signed = type_sign(ret_lane_layout.ty);
|
2019-11-18 20:58:33 +01:00
|
|
|
|
2019-11-18 21:29:34 +01:00
|
|
|
let ret_lane = clif_int_or_float_cast(fx, lane, from_signed, ret_lane_ty, to_signed);
|
|
|
|
CValue::by_val(ret_lane, ret_lane_layout)
|
|
|
|
});
|
2019-11-18 20:58:33 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
simd_eq, (c x, c y) {
|
2019-12-31 15:53:18 +01:00
|
|
|
simd_cmp!(fx, Equal(x, y) -> ret);
|
2019-11-18 20:58:33 +01:00
|
|
|
};
|
|
|
|
simd_ne, (c x, c y) {
|
2019-12-31 15:53:18 +01:00
|
|
|
simd_cmp!(fx, NotEqual(x, y) -> ret);
|
2019-11-18 20:58:33 +01:00
|
|
|
};
|
|
|
|
simd_lt, (c x, c y) {
|
2019-12-31 15:53:18 +01:00
|
|
|
simd_cmp!(fx, UnsignedLessThan|SignedLessThan(x, y) -> ret);
|
2019-11-18 20:58:33 +01:00
|
|
|
};
|
|
|
|
simd_le, (c x, c y) {
|
2019-12-31 15:53:18 +01:00
|
|
|
simd_cmp!(fx, UnsignedLessThanOrEqual|SignedLessThanOrEqual(x, y) -> ret);
|
2019-11-18 20:58:33 +01:00
|
|
|
};
|
|
|
|
simd_gt, (c x, c y) {
|
2019-12-31 15:53:18 +01:00
|
|
|
simd_cmp!(fx, UnsignedGreaterThan|SignedGreaterThan(x, y) -> ret);
|
2019-11-18 20:58:33 +01:00
|
|
|
};
|
|
|
|
simd_ge, (c x, c y) {
|
2019-12-31 15:53:18 +01:00
|
|
|
simd_cmp!(fx, UnsignedGreaterThanOrEqual|SignedGreaterThanOrEqual(x, y) -> ret);
|
2019-11-18 20:58:33 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
// simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U
|
|
|
|
_ if intrinsic.starts_with("simd_shuffle"), (c x, c y, o idx) {
|
|
|
|
let n: u32 = intrinsic["simd_shuffle".len()..].parse().unwrap();
|
|
|
|
|
|
|
|
assert_eq!(x.layout(), y.layout());
|
|
|
|
let layout = x.layout();
|
|
|
|
|
2019-11-18 21:15:43 +01:00
|
|
|
let (lane_type, lane_count) = lane_type_and_count(fx.tcx, layout);
|
|
|
|
let (ret_lane_type, ret_lane_count) = lane_type_and_count(fx.tcx, ret.layout());
|
2019-11-18 20:58:33 +01:00
|
|
|
|
|
|
|
assert_eq!(lane_type, ret_lane_type);
|
|
|
|
assert_eq!(n, ret_lane_count);
|
|
|
|
|
|
|
|
let total_len = lane_count * 2;
|
|
|
|
|
|
|
|
let indexes = {
|
|
|
|
use rustc::mir::interpret::*;
|
|
|
|
let idx_const = crate::constant::mir_operand_get_const_val(fx, idx).expect("simd_shuffle* idx not const");
|
|
|
|
|
|
|
|
let idx_bytes = match idx_const.val {
|
|
|
|
ty::ConstKind::Value(ConstValue::ByRef { alloc, offset }) => {
|
|
|
|
let ptr = Pointer::new(AllocId(0 /* dummy */), offset);
|
|
|
|
let size = Size::from_bytes(4 * u64::from(ret_lane_count) /* size_of([u32; ret_lane_count]) */);
|
|
|
|
alloc.get_bytes(fx, ptr, size).unwrap()
|
|
|
|
}
|
|
|
|
_ => unreachable!("{:?}", idx_const),
|
|
|
|
};
|
|
|
|
|
|
|
|
(0..ret_lane_count).map(|i| {
|
|
|
|
let i = usize::try_from(i).unwrap();
|
|
|
|
let idx = rustc::mir::interpret::read_target_uint(
|
|
|
|
fx.tcx.data_layout.endian,
|
|
|
|
&idx_bytes[4*i.. 4*i + 4],
|
|
|
|
).expect("read_target_uint");
|
|
|
|
u32::try_from(idx).expect("try_from u32")
|
|
|
|
}).collect::<Vec<u32>>()
|
|
|
|
};
|
|
|
|
|
|
|
|
for &idx in &indexes {
|
|
|
|
assert!(idx < total_len, "idx {} out of range 0..{}", idx, total_len);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (out_idx, in_idx) in indexes.into_iter().enumerate() {
|
|
|
|
let in_lane = if in_idx < lane_count {
|
|
|
|
x.value_field(fx, mir::Field::new(in_idx.try_into().unwrap()))
|
|
|
|
} else {
|
|
|
|
y.value_field(fx, mir::Field::new((in_idx - lane_count).try_into().unwrap()))
|
|
|
|
};
|
|
|
|
let out_lane = ret.place_field(fx, mir::Field::new(out_idx));
|
|
|
|
out_lane.write_cvalue(fx, in_lane);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-02-22 15:17:30 +01:00
|
|
|
simd_insert, (c base, o idx, v _val) {
|
2020-01-04 18:20:00 +01:00
|
|
|
let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) {
|
|
|
|
idx_const
|
|
|
|
} else {
|
|
|
|
fx.tcx.sess.span_warn(
|
|
|
|
fx.mir.span,
|
|
|
|
"`#[rustc_arg_required_const(..)]` is not yet supported. Calling this function will panic.",
|
|
|
|
);
|
|
|
|
crate::trap::trap_unimplemented(fx, "`#[rustc_arg_required_const(..)]` is not yet supported.");
|
|
|
|
return;
|
|
|
|
};
|
|
|
|
|
|
|
|
let idx = idx_const.val.try_to_bits(Size::from_bytes(4 /* u32*/)).expect(&format!("kind not scalar: {:?}", idx_const));
|
|
|
|
let (_lane_type, lane_count) = lane_type_and_count(fx.tcx, base.layout());
|
|
|
|
if idx >= lane_count.into() {
|
|
|
|
fx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_insert] idx {} >= lane_count {}", idx, lane_count));
|
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME implement this
|
|
|
|
fx.tcx.sess.span_warn(
|
|
|
|
fx.mir.span,
|
|
|
|
"`simd_insert` is not yet implemented. Calling this function will panic.",
|
|
|
|
);
|
2020-03-14 14:34:07 +01:00
|
|
|
let val = crate::trap::trap_unimplemented_ret_value(fx, ret.layout(), "`simd_insert` is not yet implemented");
|
|
|
|
ret.write_cvalue(fx, val);
|
2020-01-04 18:20:00 +01:00
|
|
|
};
|
|
|
|
|
2019-11-18 20:58:33 +01:00
|
|
|
simd_extract, (c v, o idx) {
|
|
|
|
let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) {
|
|
|
|
idx_const
|
|
|
|
} else {
|
|
|
|
fx.tcx.sess.span_warn(
|
|
|
|
fx.mir.span,
|
|
|
|
"`#[rustc_arg_required_const(..)]` is not yet supported. Calling this function will panic.",
|
|
|
|
);
|
2020-03-14 14:34:07 +01:00
|
|
|
let val = crate::trap::trap_unimplemented_ret_value(fx, ret.layout(), "`#[rustc_arg_required_const(..)]` is not yet supported.");
|
|
|
|
ret.write_cvalue(fx, val);
|
2019-11-18 20:58:33 +01:00
|
|
|
return;
|
|
|
|
};
|
|
|
|
|
|
|
|
let idx = idx_const.val.try_to_bits(Size::from_bytes(4 /* u32*/)).expect(&format!("kind not scalar: {:?}", idx_const));
|
2019-11-18 21:15:43 +01:00
|
|
|
let (_lane_type, lane_count) = lane_type_and_count(fx.tcx, v.layout());
|
2019-11-18 20:58:33 +01:00
|
|
|
if idx >= lane_count.into() {
|
|
|
|
fx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_extract] idx {} >= lane_count {}", idx, lane_count));
|
|
|
|
}
|
|
|
|
|
|
|
|
let ret_lane = v.value_field(fx, mir::Field::new(idx.try_into().unwrap()));
|
|
|
|
ret.write_cvalue(fx, ret_lane);
|
|
|
|
};
|
|
|
|
|
|
|
|
simd_add, (c x, c y) {
|
2019-12-23 17:25:32 +01:00
|
|
|
simd_int_flt_binop!(fx, iadd|fadd(x, y) -> ret);
|
2019-11-18 20:58:33 +01:00
|
|
|
};
|
|
|
|
simd_sub, (c x, c y) {
|
2019-12-23 17:25:32 +01:00
|
|
|
simd_int_flt_binop!(fx, isub|fsub(x, y) -> ret);
|
2019-11-18 20:58:33 +01:00
|
|
|
};
|
|
|
|
simd_mul, (c x, c y) {
|
2019-12-23 17:25:32 +01:00
|
|
|
simd_int_flt_binop!(fx, imul|fmul(x, y) -> ret);
|
2019-11-18 20:58:33 +01:00
|
|
|
};
|
|
|
|
simd_div, (c x, c y) {
|
2019-12-23 17:25:32 +01:00
|
|
|
simd_int_flt_binop!(fx, udiv|sdiv|fdiv(x, y) -> ret);
|
2019-11-18 20:58:33 +01:00
|
|
|
};
|
|
|
|
simd_shl, (c x, c y) {
|
2019-12-23 17:25:32 +01:00
|
|
|
simd_int_binop!(fx, ishl(x, y) -> ret);
|
2019-11-18 20:58:33 +01:00
|
|
|
};
|
|
|
|
simd_shr, (c x, c y) {
|
2019-12-23 17:25:32 +01:00
|
|
|
simd_int_binop!(fx, ushr|sshr(x, y) -> ret);
|
2019-11-18 20:58:33 +01:00
|
|
|
};
|
|
|
|
simd_and, (c x, c y) {
|
2019-12-23 17:25:32 +01:00
|
|
|
simd_int_binop!(fx, band(x, y) -> ret);
|
2019-11-18 20:58:33 +01:00
|
|
|
};
|
|
|
|
simd_or, (c x, c y) {
|
2019-12-23 17:25:32 +01:00
|
|
|
simd_int_binop!(fx, bor(x, y) -> ret);
|
2019-11-18 20:58:33 +01:00
|
|
|
};
|
|
|
|
simd_xor, (c x, c y) {
|
2019-12-23 17:25:32 +01:00
|
|
|
simd_int_binop!(fx, bxor(x, y) -> ret);
|
2019-11-18 20:58:33 +01:00
|
|
|
};
|
|
|
|
|
2020-02-26 16:35:13 +01:00
|
|
|
simd_fma, (c a, c b, c c) {
|
|
|
|
assert_eq!(a.layout(), b.layout());
|
|
|
|
assert_eq!(a.layout(), c.layout());
|
|
|
|
let layout = a.layout();
|
|
|
|
|
|
|
|
let (_lane_layout, lane_count) = lane_type_and_count(fx.tcx, layout);
|
|
|
|
let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fx.tcx, ret.layout());
|
|
|
|
assert_eq!(lane_count, ret_lane_count);
|
|
|
|
|
|
|
|
for lane in 0..lane_count {
|
|
|
|
let lane = mir::Field::new(lane.try_into().unwrap());
|
|
|
|
let a_lane = a.value_field(fx, lane).load_scalar(fx);
|
|
|
|
let b_lane = b.value_field(fx, lane).load_scalar(fx);
|
|
|
|
let c_lane = c.value_field(fx, lane).load_scalar(fx);
|
|
|
|
|
|
|
|
let mul_lane = fx.bcx.ins().fmul(a_lane, b_lane);
|
|
|
|
let res_lane = CValue::by_val(fx.bcx.ins().fadd(mul_lane, c_lane), ret_lane_layout);
|
|
|
|
|
|
|
|
ret.place_field(fx, lane).write_cvalue(fx, res_lane);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-11-18 20:58:33 +01:00
|
|
|
simd_fmin, (c x, c y) {
|
2019-12-23 17:25:32 +01:00
|
|
|
simd_flt_binop!(fx, fmin(x, y) -> ret);
|
2019-11-18 20:58:33 +01:00
|
|
|
};
|
|
|
|
simd_fmax, (c x, c y) {
|
2019-12-23 17:25:32 +01:00
|
|
|
simd_flt_binop!(fx, fmax(x, y) -> ret);
|
2019-11-18 20:58:33 +01:00
|
|
|
};
|
|
|
|
}
|
|
|
|
}
|