diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 63e59ea13fc..7f87d06d6ef 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -1136,12 +1136,12 @@ fn atomic_rmw( order: rustc_codegen_ssa::common::AtomicOrdering, ) -> &'ll Value { // The only RMW operation that LLVM supports on pointers is compare-exchange. - if self.val_ty(src) == self.type_ptr() - && op != rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg - { + let requires_cast_to_int = self.val_ty(src) == self.type_ptr() + && op != rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg; + if requires_cast_to_int { src = self.ptrtoint(src, self.type_isize()); } - unsafe { + let mut res = unsafe { llvm::LLVMBuildAtomicRMW( self.llbuilder, AtomicRmwBinOp::from_generic(op), @@ -1150,7 +1150,11 @@ fn atomic_rmw( AtomicOrdering::from_generic(order), llvm::False, // SingleThreaded ) + }; + if requires_cast_to_int { + res = self.inttoptr(res, self.type_ptr()); } + res } fn atomic_fence( diff --git a/tests/codegen/atomicptr.rs b/tests/codegen/atomicptr.rs new file mode 100644 index 00000000000..cbbd5615512 --- /dev/null +++ b/tests/codegen/atomicptr.rs @@ -0,0 +1,38 @@ +// LLVM does not support some atomic RMW operations on pointers, so inside codegen we lower those +// to integer atomics, surrounded by casts to and from integer type. +// This test ensures that we do the round-trip correctly for AtomicPtr::fetch_byte_add, and also +// ensures that we do not have such a round-trip for AtomicPtr::swap, because LLVM supports pointer +// arguments to `atomicrmw xchg`. + +//@ compile-flags: -O -Cno-prepopulate-passes +#![crate_type = "lib"] + +#![feature(strict_provenance)] +#![feature(strict_provenance_atomic_ptr)] + +use std::sync::atomic::AtomicPtr; +use std::sync::atomic::Ordering::Relaxed; +use std::ptr::without_provenance_mut; + +// Portability hack so that we can say [[USIZE]] instead of i64/i32/i16 for usize. +// CHECK: @helper([[USIZE:i[0-9]+]] noundef %_1) +#[no_mangle] +pub fn helper(_: usize) {} + +// CHECK-LABEL: @atomicptr_fetch_byte_add +#[no_mangle] +pub fn atomicptr_fetch_byte_add(a: &AtomicPtr, v: usize) -> *mut u8 { + // CHECK: %[[INTPTR:.*]] = ptrtoint ptr %{{.*}} to [[USIZE]] + // CHECK-NEXT: %[[RET:.*]] = atomicrmw add ptr %{{.*}}, [[USIZE]] %[[INTPTR]] + // CHECK-NEXT: inttoptr [[USIZE]] %[[RET]] to ptr + a.fetch_byte_add(v, Relaxed) +} + +// CHECK-LABEL: @atomicptr_swap +#[no_mangle] +pub fn atomicptr_swap(a: &AtomicPtr, ptr: *mut u8) -> *mut u8 { + // CHECK-NOT: ptrtoint + // CHECK: atomicrmw xchg ptr %{{.*}}, ptr %{{.*}} monotonic + // CHECK-NOT: inttoptr + a.swap(ptr, Relaxed) +}