Sink ptrtoint for RMW ops on pointers to cg_llvm
This commit is contained in:
parent
42825768b1
commit
aa6cfb2669
@ -1132,9 +1132,15 @@ fn atomic_rmw(
|
|||||||
&mut self,
|
&mut self,
|
||||||
op: rustc_codegen_ssa::common::AtomicRmwBinOp,
|
op: rustc_codegen_ssa::common::AtomicRmwBinOp,
|
||||||
dst: &'ll Value,
|
dst: &'ll Value,
|
||||||
src: &'ll Value,
|
mut src: &'ll Value,
|
||||||
order: rustc_codegen_ssa::common::AtomicOrdering,
|
order: rustc_codegen_ssa::common::AtomicOrdering,
|
||||||
) -> &'ll Value {
|
) -> &'ll Value {
|
||||||
|
// The only RMW operation that LLVM supports on pointers is compare-exchange.
|
||||||
|
if self.val_ty(src) == self.type_ptr()
|
||||||
|
&& op != rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg
|
||||||
|
{
|
||||||
|
src = self.ptrtoint(src, self.type_isize());
|
||||||
|
}
|
||||||
unsafe {
|
unsafe {
|
||||||
llvm::LLVMBuildAtomicRMW(
|
llvm::LLVMBuildAtomicRMW(
|
||||||
self.llbuilder,
|
self.llbuilder,
|
||||||
|
@ -42,7 +42,7 @@ pub enum RealPredicate {
|
|||||||
RealPredicateTrue,
|
RealPredicateTrue,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Copy, Clone)]
|
#[derive(Copy, Clone, PartialEq)]
|
||||||
pub enum AtomicRmwBinOp {
|
pub enum AtomicRmwBinOp {
|
||||||
AtomicXchg,
|
AtomicXchg,
|
||||||
AtomicAdd,
|
AtomicAdd,
|
||||||
|
@ -350,14 +350,8 @@ pub fn codegen_intrinsic_call(
|
|||||||
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
|
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
|
||||||
let weak = instruction == "cxchgweak";
|
let weak = instruction == "cxchgweak";
|
||||||
let dst = args[0].immediate();
|
let dst = args[0].immediate();
|
||||||
let mut cmp = args[1].immediate();
|
let cmp = args[1].immediate();
|
||||||
let mut src = args[2].immediate();
|
let src = args[2].immediate();
|
||||||
if ty.is_unsafe_ptr() {
|
|
||||||
// Some platforms do not support atomic operations on pointers,
|
|
||||||
// so we cast to integer first.
|
|
||||||
cmp = bx.ptrtoint(cmp, bx.type_isize());
|
|
||||||
src = bx.ptrtoint(src, bx.type_isize());
|
|
||||||
}
|
|
||||||
let (val, success) = bx.atomic_cmpxchg(
|
let (val, success) = bx.atomic_cmpxchg(
|
||||||
dst,
|
dst,
|
||||||
cmp,
|
cmp,
|
||||||
@ -385,26 +379,12 @@ pub fn codegen_intrinsic_call(
|
|||||||
let layout = bx.layout_of(ty);
|
let layout = bx.layout_of(ty);
|
||||||
let size = layout.size;
|
let size = layout.size;
|
||||||
let source = args[0].immediate();
|
let source = args[0].immediate();
|
||||||
if ty.is_unsafe_ptr() {
|
bx.atomic_load(
|
||||||
// Some platforms do not support atomic operations on pointers,
|
bx.backend_type(layout),
|
||||||
// so we cast to integer first...
|
source,
|
||||||
let llty = bx.type_isize();
|
parse_ordering(bx, ordering),
|
||||||
let result = bx.atomic_load(
|
size,
|
||||||
llty,
|
)
|
||||||
source,
|
|
||||||
parse_ordering(bx, ordering),
|
|
||||||
size,
|
|
||||||
);
|
|
||||||
// ... and then cast the result back to a pointer
|
|
||||||
bx.inttoptr(result, bx.backend_type(layout))
|
|
||||||
} else {
|
|
||||||
bx.atomic_load(
|
|
||||||
bx.backend_type(layout),
|
|
||||||
source,
|
|
||||||
parse_ordering(bx, ordering),
|
|
||||||
size,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
invalid_monomorphization(ty);
|
invalid_monomorphization(ty);
|
||||||
return Ok(());
|
return Ok(());
|
||||||
@ -415,13 +395,8 @@ pub fn codegen_intrinsic_call(
|
|||||||
let ty = fn_args.type_at(0);
|
let ty = fn_args.type_at(0);
|
||||||
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
|
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
|
||||||
let size = bx.layout_of(ty).size;
|
let size = bx.layout_of(ty).size;
|
||||||
let mut val = args[1].immediate();
|
let val = args[1].immediate();
|
||||||
let ptr = args[0].immediate();
|
let ptr = args[0].immediate();
|
||||||
if ty.is_unsafe_ptr() {
|
|
||||||
// Some platforms do not support atomic operations on pointers,
|
|
||||||
// so we cast to integer first.
|
|
||||||
val = bx.ptrtoint(val, bx.type_isize());
|
|
||||||
}
|
|
||||||
bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size);
|
bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size);
|
||||||
} else {
|
} else {
|
||||||
invalid_monomorphization(ty);
|
invalid_monomorphization(ty);
|
||||||
@ -465,12 +440,7 @@ pub fn codegen_intrinsic_call(
|
|||||||
let ty = fn_args.type_at(0);
|
let ty = fn_args.type_at(0);
|
||||||
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
|
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
|
||||||
let ptr = args[0].immediate();
|
let ptr = args[0].immediate();
|
||||||
let mut val = args[1].immediate();
|
let val = args[1].immediate();
|
||||||
if ty.is_unsafe_ptr() {
|
|
||||||
// Some platforms do not support atomic operations on pointers,
|
|
||||||
// so we cast to integer first.
|
|
||||||
val = bx.ptrtoint(val, bx.type_isize());
|
|
||||||
}
|
|
||||||
bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
|
bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
|
||||||
} else {
|
} else {
|
||||||
invalid_monomorphization(ty);
|
invalid_monomorphization(ty);
|
||||||
|
Loading…
Reference in New Issue
Block a user