Add the missing inttoptr when we ptrtoint in ptr atomics

This commit is contained in:
Ben Kimock 2024-03-22 18:07:43 -04:00
parent b3df0d7e5e
commit 6b794f6c80
2 changed files with 46 additions and 4 deletions

View File

@ -1136,12 +1136,12 @@ fn atomic_rmw(
order: rustc_codegen_ssa::common::AtomicOrdering, order: rustc_codegen_ssa::common::AtomicOrdering,
) -> &'ll Value { ) -> &'ll Value {
// The only RMW operation that LLVM supports on pointers is compare-exchange. // The only RMW operation that LLVM supports on pointers is compare-exchange.
if self.val_ty(src) == self.type_ptr() let requires_cast_to_int = self.val_ty(src) == self.type_ptr()
&& op != rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg && op != rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg;
{ if requires_cast_to_int {
src = self.ptrtoint(src, self.type_isize()); src = self.ptrtoint(src, self.type_isize());
} }
unsafe { let mut res = unsafe {
llvm::LLVMBuildAtomicRMW( llvm::LLVMBuildAtomicRMW(
self.llbuilder, self.llbuilder,
AtomicRmwBinOp::from_generic(op), AtomicRmwBinOp::from_generic(op),
@ -1150,7 +1150,11 @@ fn atomic_rmw(
AtomicOrdering::from_generic(order), AtomicOrdering::from_generic(order),
llvm::False, // SingleThreaded llvm::False, // SingleThreaded
) )
};
if requires_cast_to_int {
res = self.inttoptr(res, self.type_ptr());
} }
res
} }
fn atomic_fence( fn atomic_fence(

View File

@ -0,0 +1,38 @@
// LLVM does not support some atomic RMW operations on pointers, so inside codegen we lower those
// to integer atomics, surrounded by casts to and from integer type.
// This test ensures that we do the round-trip correctly for AtomicPtr::fetch_byte_add, and also
// ensures that we do not have such a round-trip for AtomicPtr::swap, because LLVM supports pointer
// arguments to `atomicrmw xchg`.
//@ compile-flags: -O -Cno-prepopulate-passes
#![crate_type = "lib"]
#![feature(strict_provenance)]
#![feature(strict_provenance_atomic_ptr)]
use std::sync::atomic::AtomicPtr;
use std::sync::atomic::Ordering::Relaxed;
use std::ptr::without_provenance_mut;
// Portability hack so that we can say [[USIZE]] instead of i64/i32/i16 for usize.
// CHECK: @helper([[USIZE:i[0-9]+]] noundef %_1)
#[no_mangle]
pub fn helper(_: usize) {}
// CHECK-LABEL: @atomicptr_fetch_byte_add
#[no_mangle]
pub fn atomicptr_fetch_byte_add(a: &AtomicPtr<u8>, v: usize) -> *mut u8 {
// CHECK: %[[INTPTR:.*]] = ptrtoint ptr %{{.*}} to [[USIZE]]
// CHECK-NEXT: %[[RET:.*]] = atomicrmw add ptr %{{.*}}, [[USIZE]] %[[INTPTR]]
// CHECK-NEXT: inttoptr [[USIZE]] %[[RET]] to ptr
a.fetch_byte_add(v, Relaxed)
}
// CHECK-LABEL: @atomicptr_swap
#[no_mangle]
pub fn atomicptr_swap(a: &AtomicPtr<u8>, ptr: *mut u8) -> *mut u8 {
// CHECK-NOT: ptrtoint
// CHECK: atomicrmw xchg ptr %{{.*}}, ptr %{{.*}} monotonic
// CHECK-NOT: inttoptr
a.swap(ptr, Relaxed)
}