diff --git a/src/tools/miri/src/shims/intrinsics/simd.rs b/src/tools/miri/src/shims/intrinsics/simd.rs index 114c66253f7..4acb2201ade 100644 --- a/src/tools/miri/src/shims/intrinsics/simd.rs +++ b/src/tools/miri/src/shims/intrinsics/simd.rs @@ -563,7 +563,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { let (op, op_len) = this.operand_to_simd(op)?; let bitmask_len = op_len.max(8); - assert!(dest.layout.ty.is_integral()); + // Returns either an unsigned integer or array of `u8`. + assert!( + dest.layout.ty.is_integral() + || matches!(dest.layout.ty.kind(), ty::Array(elemty, _) if elemty == &this.tcx.types.u8) + ); assert!(bitmask_len <= 64); assert_eq!(bitmask_len, dest.layout.size.bits()); let op_len = u32::try_from(op_len).unwrap(); @@ -577,7 +581,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { .unwrap(); } } - this.write_int(res, dest)?; + // We have to force the place type to be an int so that we can write `res` into it. + let mut dest = this.force_allocation(dest)?; + dest.layout = this.machine.layouts.uint(dest.layout.size).unwrap(); + this.write_int(res, &dest.into())?; } name => throw_unsup_format!("unimplemented intrinsic: `simd_{name}`"), diff --git a/src/tools/miri/tests/pass/portable-simd.rs b/src/tools/miri/tests/pass/portable-simd.rs index 173ac654b03..ee67a65a4f9 100644 --- a/src/tools/miri/tests/pass/portable-simd.rs +++ b/src/tools/miri/tests/pass/portable-simd.rs @@ -2,6 +2,10 @@ #![feature(portable_simd, platform_intrinsics)] use std::simd::*; +extern "platform-intrinsic" { + pub(crate) fn simd_bitmask(x: T) -> U; +} + fn simd_ops_f32() { let a = f32x4::splat(10.0); let b = f32x4::from_array([1.0, 2.0, 3.0, -4.0]); @@ -208,11 +212,40 @@ fn simd_mask() { assert_eq!(bitmask, 0b1010001101001001); assert_eq!(Mask::::from_bitmask(bitmask), mask); + // Also directly call intrinsic, to test both kinds of return types. + unsafe { + let bitmask1: u16 = simd_bitmask(mask.to_int()); + let bitmask2: [u8; 2] = simd_bitmask(mask.to_int()); + if cfg!(target_endian = "little") { + assert_eq!(bitmask1, 0b1010001101001001); + assert_eq!(bitmask2, [0b01001001, 0b10100011]); + } else { + // All the bitstrings are reversed compared to above, but the array elements are in the + // same order. + assert_eq!(bitmask1, 0b1001001011000101); + assert_eq!(bitmask2, [0b10010010, 0b11000101]); + } + } + + // Mask less than 8 bits long, which is a special case (padding with 0s). let values = [false, false, false, true]; let mask = Mask::::from_array(values); let bitmask = mask.to_bitmask(); assert_eq!(bitmask, 0b1000); assert_eq!(Mask::::from_bitmask(bitmask), mask); + + // Also directly call intrinsic, to test both kinds of return types. + unsafe { + let bitmask1: u8 = simd_bitmask(mask.to_int()); + let bitmask2: [u8; 1] = simd_bitmask(mask.to_int()); + if cfg!(target_endian = "little") { + assert_eq!(bitmask1, 0b1000); + assert_eq!(bitmask2, [0b1000]); + } else { + assert_eq!(bitmask1, 0b0001); + assert_eq!(bitmask2, [0b0001]); + } + } } fn simd_cast() {