diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs index 4a86ec3f57a..046ff34e3d0 100644 --- a/compiler/rustc_const_eval/src/interpret/place.rs +++ b/compiler/rustc_const_eval/src/interpret/place.rs @@ -499,13 +499,14 @@ pub fn mplace_to_simd( &self, mplace: &MPlaceTy<'tcx, M::Provenance>, ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::Provenance>, u64)> { - // Basically we just transmute this place into an array following simd_size_and_type. - // (Transmuting is okay since this is an in-memory place. We also double-check the size - // stays the same.) + // Basically we want to transmute this place into an array following simd_size_and_type. let (len, e_ty) = mplace.layout.ty.simd_size_and_type(*self.tcx); - let array = Ty::new_array(self.tcx.tcx, e_ty, len); - let layout = self.layout_of(array)?; - let mplace = mplace.transmute(layout, self)?; + // Some SIMD types have padding, so `len` many `e_ty` does not cover the entire place. + // Therefore we cannot transmute, and instead we project at offset 0, which side-steps + // the size check. + let array_layout = self.layout_of(Ty::new_array(self.tcx.tcx, e_ty, len))?; + assert!(array_layout.size <= mplace.layout.size); + let mplace = mplace.offset(Size::ZERO, array_layout, self)?; Ok((mplace, len)) } diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs index 0e594914c3a..09e1a59dfa1 100644 --- a/compiler/rustc_const_eval/src/interpret/projection.rs +++ b/compiler/rustc_const_eval/src/interpret/projection.rs @@ -81,6 +81,8 @@ fn offset>( ecx: &InterpCx<'tcx, M>, ) -> InterpResult<'tcx, Self> { assert!(layout.is_sized()); + // We sometimes do pointer arithmetic with this function, disregarding the source type. + // So we don't check the sizes here. self.offset_with_meta(offset, OffsetMode::Inbounds, MemPlaceMeta::None, layout, ecx) } diff --git a/src/tools/miri/tests/pass/intrinsics/portable-simd.rs b/src/tools/miri/tests/pass/intrinsics/portable-simd.rs index 248a57d6850..5250a1138b7 100644 --- a/src/tools/miri/tests/pass/intrinsics/portable-simd.rs +++ b/src/tools/miri/tests/pass/intrinsics/portable-simd.rs @@ -1,5 +1,5 @@ //@compile-flags: -Zmiri-strict-provenance -#![feature(portable_simd, adt_const_params, core_intrinsics)] +#![feature(portable_simd, adt_const_params, core_intrinsics, repr_simd)] #![allow(incomplete_features, internal_features)] use std::intrinsics::simd as intrinsics; use std::ptr; @@ -581,11 +581,32 @@ fn simd_masked_loadstore() { assert_eq!(buf, [2, 3, 4]); } +fn simd_ops_non_pow2() { + // Just a little smoke test for operations on non-power-of-two vectors. + #[repr(simd, packed)] + #[derive(Copy, Clone)] + pub struct SimdPacked([T; N]); + #[repr(simd)] + #[derive(Copy, Clone)] + pub struct SimdPadded([T; N]); + + let x = SimdPacked([1u32; 3]); + let y = SimdPacked([2u32; 3]); + let z = unsafe { intrinsics::simd_add(x, y) }; + assert_eq!({ z.0 }, [3u32; 3]); + + let x = SimdPadded([1u32; 3]); + let y = SimdPadded([2u32; 3]); + let z = unsafe { intrinsics::simd_add(x, y) }; + assert_eq!(z.0, [3u32; 3]); +} + fn main() { simd_mask(); simd_ops_f32(); simd_ops_f64(); simd_ops_i32(); + simd_ops_non_pow2(); simd_cast(); simd_swizzle(); simd_gather_scatter();