Sync from rust 06e02d5b259c1e88cbf0c74366d9e0a4c7cfd6d9
This commit is contained in:
commit
fc1b10c72c
@ -126,7 +126,8 @@ pub(crate) fn codegen_const_value<'tcx>(
|
||||
}
|
||||
}
|
||||
Scalar::Ptr(ptr, _size) => {
|
||||
let (alloc_id, offset) = ptr.into_parts(); // we know the `offset` is relative
|
||||
let (prov, offset) = ptr.into_parts(); // we know the `offset` is relative
|
||||
let alloc_id = prov.alloc_id();
|
||||
let base_addr = match fx.tcx.global_alloc(alloc_id) {
|
||||
GlobalAlloc::Memory(alloc) => {
|
||||
let data_id = data_id_for_alloc_id(
|
||||
@ -374,7 +375,8 @@ fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut dyn Module, cx: &mut Constant
|
||||
let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len()).to_vec();
|
||||
data.define(bytes.into_boxed_slice());
|
||||
|
||||
for &(offset, alloc_id) in alloc.provenance().ptrs().iter() {
|
||||
for &(offset, prov) in alloc.provenance().ptrs().iter() {
|
||||
let alloc_id = prov.alloc_id();
|
||||
let addend = {
|
||||
let endianness = tcx.data_layout.endian;
|
||||
let offset = offset.bytes() as usize;
|
||||
|
@ -1,5 +1,6 @@
|
||||
//! Codegen `extern "platform-intrinsic"` intrinsics.
|
||||
|
||||
use cranelift_codegen::ir::immediates::Offset32;
|
||||
use rustc_middle::ty::GenericArgsRef;
|
||||
use rustc_span::Symbol;
|
||||
use rustc_target::abi::Endian;
|
||||
@ -1008,8 +1009,57 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
|
||||
}
|
||||
}
|
||||
|
||||
sym::simd_masked_load => {
|
||||
intrinsic_args!(fx, args => (mask, ptr, val); intrinsic);
|
||||
|
||||
let (val_lane_count, val_lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
|
||||
let (mask_lane_count, _mask_lane_ty) = mask.layout().ty.simd_size_and_type(fx.tcx);
|
||||
let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
|
||||
assert_eq!(val_lane_count, mask_lane_count);
|
||||
assert_eq!(val_lane_count, ret_lane_count);
|
||||
|
||||
let lane_clif_ty = fx.clif_type(val_lane_ty).unwrap();
|
||||
let ret_lane_layout = fx.layout_of(ret_lane_ty);
|
||||
let ptr_val = ptr.load_scalar(fx);
|
||||
|
||||
for lane_idx in 0..ret_lane_count {
|
||||
let val_lane = val.value_lane(fx, lane_idx).load_scalar(fx);
|
||||
let mask_lane = mask.value_lane(fx, lane_idx).load_scalar(fx);
|
||||
|
||||
let if_enabled = fx.bcx.create_block();
|
||||
let if_disabled = fx.bcx.create_block();
|
||||
let next = fx.bcx.create_block();
|
||||
let res_lane = fx.bcx.append_block_param(next, lane_clif_ty);
|
||||
|
||||
fx.bcx.ins().brif(mask_lane, if_enabled, &[], if_disabled, &[]);
|
||||
fx.bcx.seal_block(if_enabled);
|
||||
fx.bcx.seal_block(if_disabled);
|
||||
|
||||
fx.bcx.switch_to_block(if_enabled);
|
||||
let offset = lane_idx as i32 * lane_clif_ty.bytes() as i32;
|
||||
let res = fx.bcx.ins().load(
|
||||
lane_clif_ty,
|
||||
MemFlags::trusted(),
|
||||
ptr_val,
|
||||
Offset32::new(offset),
|
||||
);
|
||||
fx.bcx.ins().jump(next, &[res]);
|
||||
|
||||
fx.bcx.switch_to_block(if_disabled);
|
||||
fx.bcx.ins().jump(next, &[val_lane]);
|
||||
|
||||
fx.bcx.seal_block(next);
|
||||
fx.bcx.switch_to_block(next);
|
||||
|
||||
fx.bcx.ins().nop();
|
||||
|
||||
ret.place_lane(fx, lane_idx)
|
||||
.write_cvalue(fx, CValue::by_val(res_lane, ret_lane_layout));
|
||||
}
|
||||
}
|
||||
|
||||
sym::simd_scatter => {
|
||||
intrinsic_args!(fx, args => (val, ptr, mask); intrinsic);
|
||||
intrinsic_args!(fx, args => (mask, ptr, val); intrinsic);
|
||||
|
||||
let (val_lane_count, _val_lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
|
||||
let (ptr_lane_count, _ptr_lane_ty) = ptr.layout().ty.simd_size_and_type(fx.tcx);
|
||||
|
Loading…
x
Reference in New Issue
Block a user