move atomic intrinsics to their own file

This commit is contained in:
Ralf Jung 2022-07-18 07:55:11 -04:00
parent 5a4d71faad
commit 52a6ac96b0
4 changed files with 473 additions and 448 deletions

View File

@ -1 +1 @@
db41351753df840773ca628d8daa040e95d00eef
880416180b0a9ee1141c07d4d17667edb77daebd

View File

@ -62,6 +62,7 @@ pub use rustc_const_eval::interpret::{self, AllocMap, PlaceTy};
pub use crate::shims::dlsym::{Dlsym, EvalContextExt as _};
pub use crate::shims::env::{EnvVars, EvalContextExt as _};
pub use crate::shims::foreign_items::EvalContextExt as _;
pub use crate::shims::intrinsics::atomic::EvalContextExt as _;
pub use crate::shims::intrinsics::EvalContextExt as _;
pub use crate::shims::os_str::EvalContextExt as _;
pub use crate::shims::panic::{CatchUnwindData, EvalContextExt as _};

View File

@ -0,0 +1,437 @@
use rustc_middle::{mir, mir::BinOp};
use rustc_target::abi::Align;
use crate::*;
use helpers::check_arg_count;
pub enum AtomicOp {
MirOp(mir::BinOp, bool),
Max,
Min,
}
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
/// Calls the atomic intrinsic `intrinsic`; the `atomic_` prefix has already been removed.
fn emulate_atomic_intrinsic(
&mut self,
intrinsic_name: &str,
args: &[OpTy<'tcx, Tag>],
dest: &PlaceTy<'tcx, Tag>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
match intrinsic_name {
// Atomic operations
"load_seqcst" => this.atomic_load(args, dest, AtomicReadOrd::SeqCst)?,
"load_relaxed" => this.atomic_load(args, dest, AtomicReadOrd::Relaxed)?,
"load_acquire" => this.atomic_load(args, dest, AtomicReadOrd::Acquire)?,
"store_seqcst" => this.atomic_store(args, AtomicWriteOrd::SeqCst)?,
"store_relaxed" => this.atomic_store(args, AtomicWriteOrd::Relaxed)?,
"store_release" => this.atomic_store(args, AtomicWriteOrd::Release)?,
"fence_acquire" => this.atomic_fence(args, AtomicFenceOrd::Acquire)?,
"fence_release" => this.atomic_fence(args, AtomicFenceOrd::Release)?,
"fence_acqrel" => this.atomic_fence(args, AtomicFenceOrd::AcqRel)?,
"fence_seqcst" => this.atomic_fence(args, AtomicFenceOrd::SeqCst)?,
"singlethreadfence_acquire" => this.compiler_fence(args, AtomicFenceOrd::Acquire)?,
"singlethreadfence_release" => this.compiler_fence(args, AtomicFenceOrd::Release)?,
"singlethreadfence_acqrel" => this.compiler_fence(args, AtomicFenceOrd::AcqRel)?,
"singlethreadfence_seqcst" => this.compiler_fence(args, AtomicFenceOrd::SeqCst)?,
"xchg_seqcst" => this.atomic_exchange(args, dest, AtomicRwOrd::SeqCst)?,
"xchg_acquire" => this.atomic_exchange(args, dest, AtomicRwOrd::Acquire)?,
"xchg_release" => this.atomic_exchange(args, dest, AtomicRwOrd::Release)?,
"xchg_acqrel" => this.atomic_exchange(args, dest, AtomicRwOrd::AcqRel)?,
"xchg_relaxed" => this.atomic_exchange(args, dest, AtomicRwOrd::Relaxed)?,
#[rustfmt::skip]
"cxchg_seqcst_seqcst" =>
this.atomic_compare_exchange(args, dest, AtomicRwOrd::SeqCst, AtomicReadOrd::SeqCst)?,
#[rustfmt::skip]
"cxchg_acquire_acquire" =>
this.atomic_compare_exchange(args, dest, AtomicRwOrd::Acquire, AtomicReadOrd::Acquire)?,
#[rustfmt::skip]
"cxchg_release_relaxed" =>
this.atomic_compare_exchange(args, dest, AtomicRwOrd::Release, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"cxchg_acqrel_acquire" =>
this.atomic_compare_exchange(args, dest, AtomicRwOrd::AcqRel, AtomicReadOrd::Acquire)?,
#[rustfmt::skip]
"cxchg_relaxed_relaxed" =>
this.atomic_compare_exchange(args, dest, AtomicRwOrd::Relaxed, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"cxchg_acquire_relaxed" =>
this.atomic_compare_exchange(args, dest, AtomicRwOrd::Acquire, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"cxchg_acqrel_relaxed" =>
this.atomic_compare_exchange(args, dest, AtomicRwOrd::AcqRel, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"cxchg_seqcst_relaxed" =>
this.atomic_compare_exchange(args, dest, AtomicRwOrd::SeqCst, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"cxchg_seqcst_acquire" =>
this.atomic_compare_exchange(args, dest, AtomicRwOrd::SeqCst, AtomicReadOrd::Acquire)?,
#[rustfmt::skip]
"cxchgweak_seqcst_seqcst" =>
this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::SeqCst, AtomicReadOrd::SeqCst)?,
#[rustfmt::skip]
"cxchgweak_acquire_acquire" =>
this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::Acquire, AtomicReadOrd::Acquire)?,
#[rustfmt::skip]
"cxchgweak_release_relaxed" =>
this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::Release, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"cxchgweak_acqrel_acquire" =>
this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::AcqRel, AtomicReadOrd::Acquire)?,
#[rustfmt::skip]
"cxchgweak_relaxed_relaxed" =>
this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::Relaxed, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"cxchgweak_acquire_relaxed" =>
this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::Acquire, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"cxchgweak_acqrel_relaxed" =>
this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::AcqRel, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"cxchgweak_seqcst_relaxed" =>
this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::SeqCst, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"cxchgweak_seqcst_acquire" =>
this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::SeqCst, AtomicReadOrd::Acquire)?,
#[rustfmt::skip]
"or_seqcst" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOrd::SeqCst)?,
#[rustfmt::skip]
"or_acquire" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOrd::Acquire)?,
#[rustfmt::skip]
"or_release" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOrd::Release)?,
#[rustfmt::skip]
"or_acqrel" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOrd::AcqRel)?,
#[rustfmt::skip]
"or_relaxed" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOrd::Relaxed)?,
#[rustfmt::skip]
"xor_seqcst" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOrd::SeqCst)?,
#[rustfmt::skip]
"xor_acquire" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOrd::Acquire)?,
#[rustfmt::skip]
"xor_release" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOrd::Release)?,
#[rustfmt::skip]
"xor_acqrel" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOrd::AcqRel)?,
#[rustfmt::skip]
"xor_relaxed" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOrd::Relaxed)?,
#[rustfmt::skip]
"and_seqcst" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOrd::SeqCst)?,
#[rustfmt::skip]
"and_acquire" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOrd::Acquire)?,
#[rustfmt::skip]
"and_release" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOrd::Release)?,
#[rustfmt::skip]
"and_acqrel" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOrd::AcqRel)?,
#[rustfmt::skip]
"and_relaxed" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOrd::Relaxed)?,
#[rustfmt::skip]
"nand_seqcst" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOrd::SeqCst)?,
#[rustfmt::skip]
"nand_acquire" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOrd::Acquire)?,
#[rustfmt::skip]
"nand_release" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOrd::Release)?,
#[rustfmt::skip]
"nand_acqrel" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOrd::AcqRel)?,
#[rustfmt::skip]
"nand_relaxed" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOrd::Relaxed)?,
#[rustfmt::skip]
"xadd_seqcst" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOrd::SeqCst)?,
#[rustfmt::skip]
"xadd_acquire" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOrd::Acquire)?,
#[rustfmt::skip]
"xadd_release" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOrd::Release)?,
#[rustfmt::skip]
"xadd_acqrel" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOrd::AcqRel)?,
#[rustfmt::skip]
"xadd_relaxed" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOrd::Relaxed)?,
#[rustfmt::skip]
"xsub_seqcst" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOrd::SeqCst)?,
#[rustfmt::skip]
"xsub_acquire" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOrd::Acquire)?,
#[rustfmt::skip]
"xsub_release" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOrd::Release)?,
#[rustfmt::skip]
"xsub_acqrel" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOrd::AcqRel)?,
#[rustfmt::skip]
"xsub_relaxed" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOrd::Relaxed)?,
"min_seqcst" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::SeqCst)?,
"min_acquire" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::Acquire)?,
"min_release" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::Release)?,
"min_acqrel" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::AcqRel)?,
"min_relaxed" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::Relaxed)?,
"max_seqcst" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::SeqCst)?,
"max_acquire" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::Acquire)?,
"max_release" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::Release)?,
"max_acqrel" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::AcqRel)?,
"max_relaxed" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::Relaxed)?,
"umin_seqcst" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::SeqCst)?,
"umin_acquire" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::Acquire)?,
"umin_release" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::Release)?,
"umin_acqrel" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::AcqRel)?,
"umin_relaxed" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::Relaxed)?,
"umax_seqcst" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::SeqCst)?,
"umax_acquire" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::Acquire)?,
"umax_release" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::Release)?,
"umax_acqrel" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::AcqRel)?,
"umax_relaxed" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::Relaxed)?,
name => throw_unsup_format!("unimplemented intrinsic: `atomic_{name}`"),
}
Ok(())
}
fn atomic_load(
&mut self,
args: &[OpTy<'tcx, Tag>],
dest: &PlaceTy<'tcx, Tag>,
atomic: AtomicReadOrd,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let [place] = check_arg_count(args)?;
let place = this.deref_operand(place)?;
// make sure it fits into a scalar; otherwise it cannot be atomic
let val = this.read_scalar_atomic(&place, atomic)?;
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.check_ptr_access_align(
place.ptr,
place.layout.size,
align,
CheckInAllocMsg::MemoryAccessTest,
)?;
// Perform regular access.
this.write_scalar(val, dest)?;
Ok(())
}
fn atomic_store(
&mut self,
args: &[OpTy<'tcx, Tag>],
atomic: AtomicWriteOrd,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let [place, val] = check_arg_count(args)?;
let place = this.deref_operand(place)?;
let val = this.read_scalar(val)?; // make sure it fits into a scalar; otherwise it cannot be atomic
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.check_ptr_access_align(
place.ptr,
place.layout.size,
align,
CheckInAllocMsg::MemoryAccessTest,
)?;
// Perform atomic store
this.write_scalar_atomic(val, &place, atomic)?;
Ok(())
}
fn compiler_fence(
&mut self,
args: &[OpTy<'tcx, Tag>],
atomic: AtomicFenceOrd,
) -> InterpResult<'tcx> {
let [] = check_arg_count(args)?;
let _ = atomic;
//FIXME: compiler fences are currently ignored
Ok(())
}
fn atomic_fence(
&mut self,
args: &[OpTy<'tcx, Tag>],
atomic: AtomicFenceOrd,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let [] = check_arg_count(args)?;
this.validate_atomic_fence(atomic)?;
Ok(())
}
fn atomic_op(
&mut self,
args: &[OpTy<'tcx, Tag>],
dest: &PlaceTy<'tcx, Tag>,
atomic_op: AtomicOp,
atomic: AtomicRwOrd,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let [place, rhs] = check_arg_count(args)?;
let place = this.deref_operand(place)?;
if !place.layout.ty.is_integral() && !place.layout.ty.is_unsafe_ptr() {
span_bug!(
this.cur_span(),
"atomic arithmetic operations only work on integer and raw pointer types",
);
}
let rhs = this.read_immediate(rhs)?;
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.check_ptr_access_align(
place.ptr,
place.layout.size,
align,
CheckInAllocMsg::MemoryAccessTest,
)?;
match atomic_op {
AtomicOp::Min => {
let old = this.atomic_min_max_scalar(&place, rhs, true, atomic)?;
this.write_immediate(*old, dest)?; // old value is returned
Ok(())
}
AtomicOp::Max => {
let old = this.atomic_min_max_scalar(&place, rhs, false, atomic)?;
this.write_immediate(*old, dest)?; // old value is returned
Ok(())
}
AtomicOp::MirOp(op, neg) => {
let old = this.atomic_op_immediate(&place, &rhs, op, neg, atomic)?;
this.write_immediate(*old, dest)?; // old value is returned
Ok(())
}
}
}
fn atomic_exchange(
&mut self,
args: &[OpTy<'tcx, Tag>],
dest: &PlaceTy<'tcx, Tag>,
atomic: AtomicRwOrd,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let [place, new] = check_arg_count(args)?;
let place = this.deref_operand(place)?;
let new = this.read_scalar(new)?;
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.check_ptr_access_align(
place.ptr,
place.layout.size,
align,
CheckInAllocMsg::MemoryAccessTest,
)?;
let old = this.atomic_exchange_scalar(&place, new, atomic)?;
this.write_scalar(old, dest)?; // old value is returned
Ok(())
}
fn atomic_compare_exchange_impl(
&mut self,
args: &[OpTy<'tcx, Tag>],
dest: &PlaceTy<'tcx, Tag>,
success: AtomicRwOrd,
fail: AtomicReadOrd,
can_fail_spuriously: bool,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let [place, expect_old, new] = check_arg_count(args)?;
let place = this.deref_operand(place)?;
let expect_old = this.read_immediate(expect_old)?; // read as immediate for the sake of `binary_op()`
let new = this.read_scalar(new)?;
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.check_ptr_access_align(
place.ptr,
place.layout.size,
align,
CheckInAllocMsg::MemoryAccessTest,
)?;
let old = this.atomic_compare_exchange_scalar(
&place,
&expect_old,
new,
success,
fail,
can_fail_spuriously,
)?;
// Return old value.
this.write_immediate(old, dest)?;
Ok(())
}
fn atomic_compare_exchange(
&mut self,
args: &[OpTy<'tcx, Tag>],
dest: &PlaceTy<'tcx, Tag>,
success: AtomicRwOrd,
fail: AtomicReadOrd,
) -> InterpResult<'tcx> {
self.atomic_compare_exchange_impl(args, dest, success, fail, false)
}
fn atomic_compare_exchange_weak(
&mut self,
args: &[OpTy<'tcx, Tag>],
dest: &PlaceTy<'tcx, Tag>,
success: AtomicRwOrd,
fail: AtomicReadOrd,
) -> InterpResult<'tcx> {
self.atomic_compare_exchange_impl(args, dest, success, fail, true)
}
}

View File

@ -1,21 +1,17 @@
pub mod atomic;
use std::iter;
use log::trace;
use rustc_apfloat::{Float, Round};
use rustc_middle::ty::layout::{HasParamEnv, IntegerExt, LayoutOf};
use rustc_middle::{mir, mir::BinOp, ty, ty::FloatTy};
use rustc_target::abi::{Align, Endian, HasDataLayout, Integer, Size};
use rustc_middle::{mir, ty, ty::FloatTy};
use rustc_target::abi::{Endian, HasDataLayout, Integer, Size};
use crate::*;
use helpers::check_arg_count;
pub enum AtomicOp {
MirOp(mir::BinOp, bool),
Max,
Min,
}
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
fn call_intrinsic(
@ -28,11 +24,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
// See if the core engine can handle this intrinsic.
if this.emulate_intrinsic(instance, args, dest, ret)? {
return Ok(());
}
// All supported intrinsics have a return place.
// All remaining supported intrinsics have a return place.
let intrinsic_name = this.tcx.item_name(instance.def_id());
let intrinsic_name = intrinsic_name.as_str();
let ret = match ret {
@ -40,7 +37,33 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
Some(p) => p,
};
// Then handle terminating intrinsics.
// Some intrinsics are special and need the "ret".
match intrinsic_name {
"try" => return this.handle_try(args, dest, ret),
_ => {}
}
// The rest jumps to `ret` immediately.
this.emulate_intrinsic_by_name(intrinsic_name, args, dest)?;
trace!("{:?}", this.dump_place(**dest));
this.go_to_block(ret);
Ok(())
}
/// Emulates a Miri-supported intrinsic (not supported by the core engine).
fn emulate_intrinsic_by_name(
&mut self,
intrinsic_name: &str,
args: &[OpTy<'tcx, Tag>],
dest: &PlaceTy<'tcx, Tag>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
if let Some(name) = intrinsic_name.strip_prefix("atomic_") {
return this.emulate_atomic_intrinsic(name, args, dest);
}
match intrinsic_name {
// Miri overwriting CTFE intrinsics.
"ptr_guaranteed_eq" => {
@ -78,7 +101,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
"write_bytes" | "volatile_set_memory" => {
let [ptr, val_byte, count] = check_arg_count(args)?;
let ty = instance.substs.type_at(0);
let ty = ptr.layout.ty.builtin_deref(true).unwrap().ty;
let ty_layout = this.layout_of(ty)?;
let val_byte = this.read_scalar(val_byte)?.to_u8()?;
let ptr = this.read_pointer(ptr)?;
@ -859,230 +882,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
this.write_int(res, dest)?;
}
// Atomic operations
"atomic_load_seqcst" => this.atomic_load(args, dest, AtomicReadOrd::SeqCst)?,
"atomic_load_relaxed" => this.atomic_load(args, dest, AtomicReadOrd::Relaxed)?,
"atomic_load_acquire" => this.atomic_load(args, dest, AtomicReadOrd::Acquire)?,
"atomic_store_seqcst" => this.atomic_store(args, AtomicWriteOrd::SeqCst)?,
"atomic_store_relaxed" => this.atomic_store(args, AtomicWriteOrd::Relaxed)?,
"atomic_store_release" => this.atomic_store(args, AtomicWriteOrd::Release)?,
"atomic_fence_acquire" => this.atomic_fence(args, AtomicFenceOrd::Acquire)?,
"atomic_fence_release" => this.atomic_fence(args, AtomicFenceOrd::Release)?,
"atomic_fence_acqrel" => this.atomic_fence(args, AtomicFenceOrd::AcqRel)?,
"atomic_fence_seqcst" => this.atomic_fence(args, AtomicFenceOrd::SeqCst)?,
"atomic_singlethreadfence_acquire" =>
this.compiler_fence(args, AtomicFenceOrd::Acquire)?,
"atomic_singlethreadfence_release" =>
this.compiler_fence(args, AtomicFenceOrd::Release)?,
"atomic_singlethreadfence_acqrel" =>
this.compiler_fence(args, AtomicFenceOrd::AcqRel)?,
"atomic_singlethreadfence_seqcst" =>
this.compiler_fence(args, AtomicFenceOrd::SeqCst)?,
"atomic_xchg_seqcst" => this.atomic_exchange(args, dest, AtomicRwOrd::SeqCst)?,
"atomic_xchg_acquire" => this.atomic_exchange(args, dest, AtomicRwOrd::Acquire)?,
"atomic_xchg_release" => this.atomic_exchange(args, dest, AtomicRwOrd::Release)?,
"atomic_xchg_acqrel" => this.atomic_exchange(args, dest, AtomicRwOrd::AcqRel)?,
"atomic_xchg_relaxed" => this.atomic_exchange(args, dest, AtomicRwOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_cxchg_seqcst_seqcst" =>
this.atomic_compare_exchange(args, dest, AtomicRwOrd::SeqCst, AtomicReadOrd::SeqCst)?,
#[rustfmt::skip]
"atomic_cxchg_acquire_acquire" =>
this.atomic_compare_exchange(args, dest, AtomicRwOrd::Acquire, AtomicReadOrd::Acquire)?,
#[rustfmt::skip]
"atomic_cxchg_release_relaxed" =>
this.atomic_compare_exchange(args, dest, AtomicRwOrd::Release, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_cxchg_acqrel_acquire" =>
this.atomic_compare_exchange(args, dest, AtomicRwOrd::AcqRel, AtomicReadOrd::Acquire)?,
#[rustfmt::skip]
"atomic_cxchg_relaxed_relaxed" =>
this.atomic_compare_exchange(args, dest, AtomicRwOrd::Relaxed, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_cxchg_acquire_relaxed" =>
this.atomic_compare_exchange(args, dest, AtomicRwOrd::Acquire, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_cxchg_acqrel_relaxed" =>
this.atomic_compare_exchange(args, dest, AtomicRwOrd::AcqRel, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_cxchg_seqcst_relaxed" =>
this.atomic_compare_exchange(args, dest, AtomicRwOrd::SeqCst, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_cxchg_seqcst_acquire" =>
this.atomic_compare_exchange(args, dest, AtomicRwOrd::SeqCst, AtomicReadOrd::Acquire)?,
#[rustfmt::skip]
"atomic_cxchgweak_seqcst_seqcst" =>
this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::SeqCst, AtomicReadOrd::SeqCst)?,
#[rustfmt::skip]
"atomic_cxchgweak_acquire_acquire" =>
this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::Acquire, AtomicReadOrd::Acquire)?,
#[rustfmt::skip]
"atomic_cxchgweak_release_relaxed" =>
this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::Release, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_cxchgweak_acqrel_acquire" =>
this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::AcqRel, AtomicReadOrd::Acquire)?,
#[rustfmt::skip]
"atomic_cxchgweak_relaxed_relaxed" =>
this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::Relaxed, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_cxchgweak_acquire_relaxed" =>
this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::Acquire, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_cxchgweak_acqrel_relaxed" =>
this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::AcqRel, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_cxchgweak_seqcst_relaxed" =>
this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::SeqCst, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_cxchgweak_seqcst_acquire" =>
this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::SeqCst, AtomicReadOrd::Acquire)?,
#[rustfmt::skip]
"atomic_or_seqcst" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOrd::SeqCst)?,
#[rustfmt::skip]
"atomic_or_acquire" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOrd::Acquire)?,
#[rustfmt::skip]
"atomic_or_release" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOrd::Release)?,
#[rustfmt::skip]
"atomic_or_acqrel" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOrd::AcqRel)?,
#[rustfmt::skip]
"atomic_or_relaxed" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_xor_seqcst" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOrd::SeqCst)?,
#[rustfmt::skip]
"atomic_xor_acquire" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOrd::Acquire)?,
#[rustfmt::skip]
"atomic_xor_release" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOrd::Release)?,
#[rustfmt::skip]
"atomic_xor_acqrel" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOrd::AcqRel)?,
#[rustfmt::skip]
"atomic_xor_relaxed" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_and_seqcst" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOrd::SeqCst)?,
#[rustfmt::skip]
"atomic_and_acquire" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOrd::Acquire)?,
#[rustfmt::skip]
"atomic_and_release" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOrd::Release)?,
#[rustfmt::skip]
"atomic_and_acqrel" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOrd::AcqRel)?,
#[rustfmt::skip]
"atomic_and_relaxed" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_nand_seqcst" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOrd::SeqCst)?,
#[rustfmt::skip]
"atomic_nand_acquire" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOrd::Acquire)?,
#[rustfmt::skip]
"atomic_nand_release" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOrd::Release)?,
#[rustfmt::skip]
"atomic_nand_acqrel" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOrd::AcqRel)?,
#[rustfmt::skip]
"atomic_nand_relaxed" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_xadd_seqcst" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOrd::SeqCst)?,
#[rustfmt::skip]
"atomic_xadd_acquire" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOrd::Acquire)?,
#[rustfmt::skip]
"atomic_xadd_release" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOrd::Release)?,
#[rustfmt::skip]
"atomic_xadd_acqrel" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOrd::AcqRel)?,
#[rustfmt::skip]
"atomic_xadd_relaxed" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_xsub_seqcst" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOrd::SeqCst)?,
#[rustfmt::skip]
"atomic_xsub_acquire" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOrd::Acquire)?,
#[rustfmt::skip]
"atomic_xsub_release" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOrd::Release)?,
#[rustfmt::skip]
"atomic_xsub_acqrel" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOrd::AcqRel)?,
#[rustfmt::skip]
"atomic_xsub_relaxed" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOrd::Relaxed)?,
"atomic_min_seqcst" =>
this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::SeqCst)?,
"atomic_min_acquire" =>
this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::Acquire)?,
"atomic_min_release" =>
this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::Release)?,
"atomic_min_acqrel" =>
this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::AcqRel)?,
"atomic_min_relaxed" =>
this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::Relaxed)?,
"atomic_max_seqcst" =>
this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::SeqCst)?,
"atomic_max_acquire" =>
this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::Acquire)?,
"atomic_max_release" =>
this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::Release)?,
"atomic_max_acqrel" =>
this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::AcqRel)?,
"atomic_max_relaxed" =>
this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::Relaxed)?,
"atomic_umin_seqcst" =>
this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::SeqCst)?,
"atomic_umin_acquire" =>
this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::Acquire)?,
"atomic_umin_release" =>
this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::Release)?,
"atomic_umin_acqrel" =>
this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::AcqRel)?,
"atomic_umin_relaxed" =>
this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::Relaxed)?,
"atomic_umax_seqcst" =>
this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::SeqCst)?,
"atomic_umax_acquire" =>
this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::Acquire)?,
"atomic_umax_release" =>
this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::Release)?,
"atomic_umax_acqrel" =>
this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::AcqRel)?,
"atomic_umax_relaxed" =>
this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::Relaxed)?,
// Other
"exact_div" => {
let [num, denom] = check_arg_count(args)?;
this.exact_div(&this.read_immediate(num)?, &this.read_immediate(denom)?, dest)?;
}
"try" => return this.handle_try(args, dest, ret),
"breakpoint" => {
let [] = check_arg_count(args)?;
// normally this would raise a SIGTRAP, which aborts if no debugger is connected
@ -1092,227 +897,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
name => throw_unsup_format!("unimplemented intrinsic: `{name}`"),
}
trace!("{:?}", this.dump_place(**dest));
this.go_to_block(ret);
Ok(())
}
fn atomic_load(
&mut self,
args: &[OpTy<'tcx, Tag>],
dest: &PlaceTy<'tcx, Tag>,
atomic: AtomicReadOrd,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let [place] = check_arg_count(args)?;
let place = this.deref_operand(place)?;
// make sure it fits into a scalar; otherwise it cannot be atomic
let val = this.read_scalar_atomic(&place, atomic)?;
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.check_ptr_access_align(
place.ptr,
place.layout.size,
align,
CheckInAllocMsg::MemoryAccessTest,
)?;
// Perform regular access.
this.write_scalar(val, dest)?;
Ok(())
}
fn atomic_store(
&mut self,
args: &[OpTy<'tcx, Tag>],
atomic: AtomicWriteOrd,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let [place, val] = check_arg_count(args)?;
let place = this.deref_operand(place)?;
let val = this.read_scalar(val)?; // make sure it fits into a scalar; otherwise it cannot be atomic
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.check_ptr_access_align(
place.ptr,
place.layout.size,
align,
CheckInAllocMsg::MemoryAccessTest,
)?;
// Perform atomic store
this.write_scalar_atomic(val, &place, atomic)?;
Ok(())
}
fn compiler_fence(
&mut self,
args: &[OpTy<'tcx, Tag>],
atomic: AtomicFenceOrd,
) -> InterpResult<'tcx> {
let [] = check_arg_count(args)?;
let _ = atomic;
//FIXME: compiler fences are currently ignored
Ok(())
}
fn atomic_fence(
&mut self,
args: &[OpTy<'tcx, Tag>],
atomic: AtomicFenceOrd,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let [] = check_arg_count(args)?;
this.validate_atomic_fence(atomic)?;
Ok(())
}
fn atomic_op(
&mut self,
args: &[OpTy<'tcx, Tag>],
dest: &PlaceTy<'tcx, Tag>,
atomic_op: AtomicOp,
atomic: AtomicRwOrd,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let [place, rhs] = check_arg_count(args)?;
let place = this.deref_operand(place)?;
if !place.layout.ty.is_integral() && !place.layout.ty.is_unsafe_ptr() {
span_bug!(
this.cur_span(),
"atomic arithmetic operations only work on integer and raw pointer types",
);
}
let rhs = this.read_immediate(rhs)?;
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.check_ptr_access_align(
place.ptr,
place.layout.size,
align,
CheckInAllocMsg::MemoryAccessTest,
)?;
match atomic_op {
AtomicOp::Min => {
let old = this.atomic_min_max_scalar(&place, rhs, true, atomic)?;
this.write_immediate(*old, dest)?; // old value is returned
Ok(())
}
AtomicOp::Max => {
let old = this.atomic_min_max_scalar(&place, rhs, false, atomic)?;
this.write_immediate(*old, dest)?; // old value is returned
Ok(())
}
AtomicOp::MirOp(op, neg) => {
let old = this.atomic_op_immediate(&place, &rhs, op, neg, atomic)?;
this.write_immediate(*old, dest)?; // old value is returned
Ok(())
}
}
}
fn atomic_exchange(
&mut self,
args: &[OpTy<'tcx, Tag>],
dest: &PlaceTy<'tcx, Tag>,
atomic: AtomicRwOrd,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let [place, new] = check_arg_count(args)?;
let place = this.deref_operand(place)?;
let new = this.read_scalar(new)?;
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.check_ptr_access_align(
place.ptr,
place.layout.size,
align,
CheckInAllocMsg::MemoryAccessTest,
)?;
let old = this.atomic_exchange_scalar(&place, new, atomic)?;
this.write_scalar(old, dest)?; // old value is returned
Ok(())
}
fn atomic_compare_exchange_impl(
&mut self,
args: &[OpTy<'tcx, Tag>],
dest: &PlaceTy<'tcx, Tag>,
success: AtomicRwOrd,
fail: AtomicReadOrd,
can_fail_spuriously: bool,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let [place, expect_old, new] = check_arg_count(args)?;
let place = this.deref_operand(place)?;
let expect_old = this.read_immediate(expect_old)?; // read as immediate for the sake of `binary_op()`
let new = this.read_scalar(new)?;
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.check_ptr_access_align(
place.ptr,
place.layout.size,
align,
CheckInAllocMsg::MemoryAccessTest,
)?;
let old = this.atomic_compare_exchange_scalar(
&place,
&expect_old,
new,
success,
fail,
can_fail_spuriously,
)?;
// Return old value.
this.write_immediate(old, dest)?;
Ok(())
}
fn atomic_compare_exchange(
&mut self,
args: &[OpTy<'tcx, Tag>],
dest: &PlaceTy<'tcx, Tag>,
success: AtomicRwOrd,
fail: AtomicReadOrd,
) -> InterpResult<'tcx> {
self.atomic_compare_exchange_impl(args, dest, success, fail, false)
}
fn atomic_compare_exchange_weak(
&mut self,
args: &[OpTy<'tcx, Tag>],
dest: &PlaceTy<'tcx, Tag>,
success: AtomicRwOrd,
fail: AtomicReadOrd,
) -> InterpResult<'tcx> {
self.atomic_compare_exchange_impl(args, dest, success, fail, true)
}
fn float_to_int_unchecked<F>(
&self,
f: F,