Support repr(simd) on ADTs containing a single array field
This is the cg_clif half of rust PR 78863
This commit is contained in:
parent
356360836e
commit
c42be7975f
@ -175,12 +175,11 @@ fn simd_for_each_lane<'tcx>(
|
||||
assert_eq!(lane_count, ret_lane_count);
|
||||
|
||||
for lane_idx in 0..lane_count {
|
||||
let lane_idx = mir::Field::new(lane_idx.try_into().unwrap());
|
||||
let lane = val.value_field(fx, lane_idx).load_scalar(fx);
|
||||
let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
|
||||
|
||||
let res_lane = f(fx, lane_layout, ret_lane_layout, lane);
|
||||
|
||||
ret.place_field(fx, lane_idx).write_cvalue(fx, res_lane);
|
||||
ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
|
||||
}
|
||||
}
|
||||
|
||||
@ -206,14 +205,13 @@ fn simd_pair_for_each_lane<'tcx>(
|
||||
let ret_lane_layout = fx.layout_of(ret_lane_ty);
|
||||
assert_eq!(lane_count, ret_lane_count);
|
||||
|
||||
for lane in 0..lane_count {
|
||||
let lane = mir::Field::new(lane.try_into().unwrap());
|
||||
let x_lane = x.value_field(fx, lane).load_scalar(fx);
|
||||
let y_lane = y.value_field(fx, lane).load_scalar(fx);
|
||||
for lane_idx in 0..lane_count {
|
||||
let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
|
||||
let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
|
||||
|
||||
let res_lane = f(fx, lane_layout, ret_lane_layout, x_lane, y_lane);
|
||||
|
||||
ret.place_field(fx, lane).write_cvalue(fx, res_lane);
|
||||
ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
|
||||
}
|
||||
}
|
||||
|
||||
@ -227,10 +225,9 @@ fn simd_reduce<'tcx>(
|
||||
let lane_layout = fx.layout_of(lane_ty);
|
||||
assert_eq!(lane_layout, ret.layout());
|
||||
|
||||
let mut res_val = val.value_field(fx, mir::Field::new(0)).load_scalar(fx);
|
||||
let mut res_val = val.value_lane(fx, 0).load_scalar(fx);
|
||||
for lane_idx in 1..lane_count {
|
||||
let lane =
|
||||
val.value_field(fx, mir::Field::new(lane_idx.try_into().unwrap())).load_scalar(fx);
|
||||
let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
|
||||
res_val = f(fx, lane_layout, res_val, lane);
|
||||
}
|
||||
let res = CValue::by_val(res_val, lane_layout);
|
||||
@ -246,11 +243,10 @@ fn simd_reduce_bool<'tcx>(
|
||||
let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
|
||||
assert!(ret.layout().ty.is_bool());
|
||||
|
||||
let res_val = val.value_field(fx, mir::Field::new(0)).load_scalar(fx);
|
||||
let res_val = val.value_lane(fx, 0).load_scalar(fx);
|
||||
let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
|
||||
for lane_idx in 1..lane_count {
|
||||
let lane =
|
||||
val.value_field(fx, mir::Field::new(lane_idx.try_into().unwrap())).load_scalar(fx);
|
||||
let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
|
||||
let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
|
||||
res_val = f(fx, res_val, lane);
|
||||
}
|
||||
|
@ -108,11 +108,11 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
|
||||
|
||||
for (out_idx, in_idx) in indexes.into_iter().enumerate() {
|
||||
let in_lane = if u64::from(in_idx) < lane_count {
|
||||
x.value_field(fx, mir::Field::new(in_idx.into()))
|
||||
x.value_lane(fx, in_idx.into())
|
||||
} else {
|
||||
y.value_field(fx, mir::Field::new(usize::from(in_idx) - usize::try_from(lane_count).unwrap()))
|
||||
y.value_lane(fx, u64::from(in_idx) - lane_count)
|
||||
};
|
||||
let out_lane = ret.place_field(fx, mir::Field::new(out_idx));
|
||||
let out_lane = ret.place_lane(fx, u64::try_from(out_idx).unwrap());
|
||||
out_lane.write_cvalue(fx, in_lane);
|
||||
}
|
||||
};
|
||||
@ -163,7 +163,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
|
||||
fx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_extract] idx {} >= lane_count {}", idx, lane_count));
|
||||
}
|
||||
|
||||
let ret_lane = v.value_field(fx, mir::Field::new(idx.try_into().unwrap()));
|
||||
let ret_lane = v.value_lane(fx, idx.try_into().unwrap());
|
||||
ret.write_cvalue(fx, ret_lane);
|
||||
};
|
||||
|
||||
@ -216,15 +216,14 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
|
||||
let ret_lane_layout = fx.layout_of(ret_lane_ty);
|
||||
|
||||
for lane in 0..lane_count {
|
||||
let lane = mir::Field::new(lane.try_into().unwrap());
|
||||
let a_lane = a.value_field(fx, lane).load_scalar(fx);
|
||||
let b_lane = b.value_field(fx, lane).load_scalar(fx);
|
||||
let c_lane = c.value_field(fx, lane).load_scalar(fx);
|
||||
let a_lane = a.value_lane(fx, lane).load_scalar(fx);
|
||||
let b_lane = b.value_lane(fx, lane).load_scalar(fx);
|
||||
let c_lane = c.value_lane(fx, lane).load_scalar(fx);
|
||||
|
||||
let mul_lane = fx.bcx.ins().fmul(a_lane, b_lane);
|
||||
let res_lane = CValue::by_val(fx.bcx.ins().fadd(mul_lane, c_lane), ret_lane_layout);
|
||||
|
||||
ret.place_field(fx, lane).write_cvalue(fx, res_lane);
|
||||
ret.place_lane(fx, lane).write_cvalue(fx, res_lane);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -206,6 +206,38 @@ pub(crate) fn value_field(
|
||||
}
|
||||
}
|
||||
|
||||
/// Like [`CValue::value_field`] except handling ADTs containing a single array field in a way
|
||||
/// such that you can access individual lanes.
|
||||
pub(crate) fn value_lane(
|
||||
self,
|
||||
fx: &mut FunctionCx<'_, '_, 'tcx>,
|
||||
lane_idx: u64,
|
||||
) -> CValue<'tcx> {
|
||||
let layout = self.1;
|
||||
assert!(layout.ty.is_simd());
|
||||
let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
|
||||
let lane_layout = fx.layout_of(lane_ty);
|
||||
assert!(lane_idx < lane_count);
|
||||
match self.0 {
|
||||
CValueInner::ByVal(val) => match layout.abi {
|
||||
Abi::Vector { element: _, count: _ } => {
|
||||
assert!(lane_count <= u8::MAX.into(), "SIMD type with more than 255 lanes???");
|
||||
let lane_idx = u8::try_from(lane_idx).unwrap();
|
||||
let lane = fx.bcx.ins().extractlane(val, lane_idx);
|
||||
CValue::by_val(lane, lane_layout)
|
||||
}
|
||||
_ => unreachable!("value_lane for ByVal with abi {:?}", layout.abi),
|
||||
},
|
||||
CValueInner::ByValPair(_, _) => unreachable!(),
|
||||
CValueInner::ByRef(ptr, None) => {
|
||||
let field_offset = lane_layout.size * lane_idx;
|
||||
let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
|
||||
CValue::by_ref(field_ptr, lane_layout)
|
||||
}
|
||||
CValueInner::ByRef(_, Some(_)) => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn unsize_value(self, fx: &mut FunctionCx<'_, '_, 'tcx>, dest: CPlace<'tcx>) {
|
||||
crate::unsize::coerce_unsized_into(fx, self, dest);
|
||||
}
|
||||
@ -610,6 +642,38 @@ pub(crate) fn place_field(
|
||||
}
|
||||
}
|
||||
|
||||
/// Like [`CPlace::place_field`] except handling ADTs containing a single array field in a way
|
||||
/// such that you can access individual lanes.
|
||||
pub(crate) fn place_lane(
|
||||
self,
|
||||
fx: &mut FunctionCx<'_, '_, 'tcx>,
|
||||
lane_idx: u64,
|
||||
) -> CPlace<'tcx> {
|
||||
let layout = self.layout();
|
||||
assert!(layout.ty.is_simd());
|
||||
let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
|
||||
let lane_layout = fx.layout_of(lane_ty);
|
||||
assert!(lane_idx < lane_count);
|
||||
|
||||
match self.inner {
|
||||
CPlaceInner::Var(local, var) => {
|
||||
assert!(matches!(layout.abi, Abi::Vector { .. }));
|
||||
CPlace {
|
||||
inner: CPlaceInner::VarLane(local, var, lane_idx.try_into().unwrap()),
|
||||
layout: lane_layout,
|
||||
}
|
||||
}
|
||||
CPlaceInner::VarPair(_, _, _) => unreachable!(),
|
||||
CPlaceInner::VarLane(_, _, _) => unreachable!(),
|
||||
CPlaceInner::Addr(ptr, None) => {
|
||||
let field_offset = lane_layout.size * lane_idx;
|
||||
let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
|
||||
CPlace::for_ptr(field_ptr, lane_layout)
|
||||
}
|
||||
CPlaceInner::Addr(_, Some(_)) => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn place_index(
|
||||
self,
|
||||
fx: &mut FunctionCx<'_, '_, 'tcx>,
|
||||
|
Loading…
Reference in New Issue
Block a user