move atomic access alginment check to helper function and inside atomic access lib
This commit is contained in:
parent
df3c141762
commit
d630671a33
@ -49,7 +49,7 @@
|
|||||||
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
|
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
|
||||||
use rustc_index::vec::{Idx, IndexVec};
|
use rustc_index::vec::{Idx, IndexVec};
|
||||||
use rustc_middle::{mir, ty::layout::TyAndLayout};
|
use rustc_middle::{mir, ty::layout::TyAndLayout};
|
||||||
use rustc_target::abi::Size;
|
use rustc_target::abi::{Align, Size};
|
||||||
|
|
||||||
use crate::*;
|
use crate::*;
|
||||||
|
|
||||||
@ -463,6 +463,22 @@ fn write_scalar_at_offset_atomic(
|
|||||||
this.write_scalar_atomic(value.into(), &value_place, atomic)
|
this.write_scalar_atomic(value.into(), &value_place, atomic)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Checks that an atomic access is legal at the given place.
|
||||||
|
fn atomic_access_check(&self, place: &MPlaceTy<'tcx, Provenance>) -> InterpResult<'tcx> {
|
||||||
|
let this = self.eval_context_ref();
|
||||||
|
// Check alignment requirements. Atomics must always be aligned to their size,
|
||||||
|
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
|
||||||
|
// be 8-aligned).
|
||||||
|
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
|
||||||
|
this.check_ptr_access_align(
|
||||||
|
place.ptr,
|
||||||
|
place.layout.size,
|
||||||
|
align,
|
||||||
|
CheckInAllocMsg::MemoryAccessTest,
|
||||||
|
)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Perform an atomic read operation at the memory location.
|
/// Perform an atomic read operation at the memory location.
|
||||||
fn read_scalar_atomic(
|
fn read_scalar_atomic(
|
||||||
&self,
|
&self,
|
||||||
@ -470,6 +486,7 @@ fn read_scalar_atomic(
|
|||||||
atomic: AtomicReadOrd,
|
atomic: AtomicReadOrd,
|
||||||
) -> InterpResult<'tcx, ScalarMaybeUninit<Provenance>> {
|
) -> InterpResult<'tcx, ScalarMaybeUninit<Provenance>> {
|
||||||
let this = self.eval_context_ref();
|
let this = self.eval_context_ref();
|
||||||
|
this.atomic_access_check(place)?;
|
||||||
// This will read from the last store in the modification order of this location. In case
|
// This will read from the last store in the modification order of this location. In case
|
||||||
// weak memory emulation is enabled, this may not be the store we will pick to actually read from and return.
|
// weak memory emulation is enabled, this may not be the store we will pick to actually read from and return.
|
||||||
// This is fine with StackedBorrow and race checks because they don't concern metadata on
|
// This is fine with StackedBorrow and race checks because they don't concern metadata on
|
||||||
@ -490,6 +507,8 @@ fn write_scalar_atomic(
|
|||||||
atomic: AtomicWriteOrd,
|
atomic: AtomicWriteOrd,
|
||||||
) -> InterpResult<'tcx> {
|
) -> InterpResult<'tcx> {
|
||||||
let this = self.eval_context_mut();
|
let this = self.eval_context_mut();
|
||||||
|
this.atomic_access_check(dest)?;
|
||||||
|
|
||||||
this.validate_overlapping_atomic(dest)?;
|
this.validate_overlapping_atomic(dest)?;
|
||||||
this.allow_data_races_mut(move |this| this.write_scalar(val, &dest.into()))?;
|
this.allow_data_races_mut(move |this| this.write_scalar(val, &dest.into()))?;
|
||||||
this.validate_atomic_store(dest, atomic)?;
|
this.validate_atomic_store(dest, atomic)?;
|
||||||
@ -511,6 +530,7 @@ fn atomic_op_immediate(
|
|||||||
atomic: AtomicRwOrd,
|
atomic: AtomicRwOrd,
|
||||||
) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>> {
|
) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>> {
|
||||||
let this = self.eval_context_mut();
|
let this = self.eval_context_mut();
|
||||||
|
this.atomic_access_check(place)?;
|
||||||
|
|
||||||
this.validate_overlapping_atomic(place)?;
|
this.validate_overlapping_atomic(place)?;
|
||||||
let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
|
let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
|
||||||
@ -540,6 +560,7 @@ fn atomic_exchange_scalar(
|
|||||||
atomic: AtomicRwOrd,
|
atomic: AtomicRwOrd,
|
||||||
) -> InterpResult<'tcx, ScalarMaybeUninit<Provenance>> {
|
) -> InterpResult<'tcx, ScalarMaybeUninit<Provenance>> {
|
||||||
let this = self.eval_context_mut();
|
let this = self.eval_context_mut();
|
||||||
|
this.atomic_access_check(place)?;
|
||||||
|
|
||||||
this.validate_overlapping_atomic(place)?;
|
this.validate_overlapping_atomic(place)?;
|
||||||
let old = this.allow_data_races_mut(|this| this.read_scalar(&place.into()))?;
|
let old = this.allow_data_races_mut(|this| this.read_scalar(&place.into()))?;
|
||||||
@ -561,6 +582,7 @@ fn atomic_min_max_scalar(
|
|||||||
atomic: AtomicRwOrd,
|
atomic: AtomicRwOrd,
|
||||||
) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>> {
|
) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>> {
|
||||||
let this = self.eval_context_mut();
|
let this = self.eval_context_mut();
|
||||||
|
this.atomic_access_check(place)?;
|
||||||
|
|
||||||
this.validate_overlapping_atomic(place)?;
|
this.validate_overlapping_atomic(place)?;
|
||||||
let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
|
let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
|
||||||
@ -604,6 +626,7 @@ fn atomic_compare_exchange_scalar(
|
|||||||
) -> InterpResult<'tcx, Immediate<Provenance>> {
|
) -> InterpResult<'tcx, Immediate<Provenance>> {
|
||||||
use rand::Rng as _;
|
use rand::Rng as _;
|
||||||
let this = self.eval_context_mut();
|
let this = self.eval_context_mut();
|
||||||
|
this.atomic_access_check(place)?;
|
||||||
|
|
||||||
this.validate_overlapping_atomic(place)?;
|
this.validate_overlapping_atomic(place)?;
|
||||||
// Failure ordering cannot be stronger than success ordering, therefore first attempt
|
// Failure ordering cannot be stronger than success ordering, therefore first attempt
|
||||||
@ -1016,6 +1039,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
|||||||
fn allow_data_races_ref<R>(&self, op: impl FnOnce(&MiriEvalContext<'mir, 'tcx>) -> R) -> R {
|
fn allow_data_races_ref<R>(&self, op: impl FnOnce(&MiriEvalContext<'mir, 'tcx>) -> R) -> R {
|
||||||
let this = self.eval_context_ref();
|
let this = self.eval_context_ref();
|
||||||
if let Some(data_race) = &this.machine.data_race {
|
if let Some(data_race) = &this.machine.data_race {
|
||||||
|
assert!(!data_race.ongoing_action_data_race_free.get(), "cannot nest allow_data_races");
|
||||||
data_race.ongoing_action_data_race_free.set(true);
|
data_race.ongoing_action_data_race_free.set(true);
|
||||||
}
|
}
|
||||||
let result = op(this);
|
let result = op(this);
|
||||||
@ -1035,6 +1059,7 @@ fn allow_data_races_mut<R>(
|
|||||||
) -> R {
|
) -> R {
|
||||||
let this = self.eval_context_mut();
|
let this = self.eval_context_mut();
|
||||||
if let Some(data_race) = &this.machine.data_race {
|
if let Some(data_race) = &this.machine.data_race {
|
||||||
|
assert!(!data_race.ongoing_action_data_race_free.get(), "cannot nest allow_data_races");
|
||||||
data_race.ongoing_action_data_race_free.set(true);
|
data_race.ongoing_action_data_race_free.set(true);
|
||||||
}
|
}
|
||||||
let result = op(this);
|
let result = op(this);
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
use rustc_middle::{mir, mir::BinOp, ty};
|
use rustc_middle::{mir, mir::BinOp, ty};
|
||||||
use rustc_target::abi::Align;
|
|
||||||
|
|
||||||
use crate::*;
|
use crate::*;
|
||||||
use helpers::check_arg_count;
|
use helpers::check_arg_count;
|
||||||
@ -130,20 +129,9 @@ fn atomic_load(
|
|||||||
let [place] = check_arg_count(args)?;
|
let [place] = check_arg_count(args)?;
|
||||||
let place = this.deref_operand(place)?;
|
let place = this.deref_operand(place)?;
|
||||||
|
|
||||||
// make sure it fits into a scalar; otherwise it cannot be atomic
|
// Perform atomic load.
|
||||||
let val = this.read_scalar_atomic(&place, atomic)?;
|
let val = this.read_scalar_atomic(&place, atomic)?;
|
||||||
|
// Perform regular store.
|
||||||
// Check alignment requirements. Atomics must always be aligned to their size,
|
|
||||||
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
|
|
||||||
// be 8-aligned).
|
|
||||||
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
|
|
||||||
this.check_ptr_access_align(
|
|
||||||
place.ptr,
|
|
||||||
place.layout.size,
|
|
||||||
align,
|
|
||||||
CheckInAllocMsg::MemoryAccessTest,
|
|
||||||
)?;
|
|
||||||
// Perform regular access.
|
|
||||||
this.write_scalar(val, dest)?;
|
this.write_scalar(val, dest)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -157,19 +145,9 @@ fn atomic_store(
|
|||||||
|
|
||||||
let [place, val] = check_arg_count(args)?;
|
let [place, val] = check_arg_count(args)?;
|
||||||
let place = this.deref_operand(place)?;
|
let place = this.deref_operand(place)?;
|
||||||
let val = this.read_scalar(val)?; // make sure it fits into a scalar; otherwise it cannot be atomic
|
|
||||||
|
|
||||||
// Check alignment requirements. Atomics must always be aligned to their size,
|
|
||||||
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
|
|
||||||
// be 8-aligned).
|
|
||||||
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
|
|
||||||
this.check_ptr_access_align(
|
|
||||||
place.ptr,
|
|
||||||
place.layout.size,
|
|
||||||
align,
|
|
||||||
CheckInAllocMsg::MemoryAccessTest,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
|
// Perform regular load.
|
||||||
|
let val = this.read_scalar(val)?;
|
||||||
// Perform atomic store
|
// Perform atomic store
|
||||||
this.write_scalar_atomic(val, &place, atomic)?;
|
this.write_scalar_atomic(val, &place, atomic)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -220,17 +198,6 @@ fn atomic_op(
|
|||||||
span_bug!(this.cur_span(), "atomic arithmetic operation type mismatch");
|
span_bug!(this.cur_span(), "atomic arithmetic operation type mismatch");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check alignment requirements. Atomics must always be aligned to their size,
|
|
||||||
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
|
|
||||||
// be 8-aligned).
|
|
||||||
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
|
|
||||||
this.check_ptr_access_align(
|
|
||||||
place.ptr,
|
|
||||||
place.layout.size,
|
|
||||||
align,
|
|
||||||
CheckInAllocMsg::MemoryAccessTest,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
match atomic_op {
|
match atomic_op {
|
||||||
AtomicOp::Min => {
|
AtomicOp::Min => {
|
||||||
let old = this.atomic_min_max_scalar(&place, rhs, true, atomic)?;
|
let old = this.atomic_min_max_scalar(&place, rhs, true, atomic)?;
|
||||||
@ -262,17 +229,6 @@ fn atomic_exchange(
|
|||||||
let place = this.deref_operand(place)?;
|
let place = this.deref_operand(place)?;
|
||||||
let new = this.read_scalar(new)?;
|
let new = this.read_scalar(new)?;
|
||||||
|
|
||||||
// Check alignment requirements. Atomics must always be aligned to their size,
|
|
||||||
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
|
|
||||||
// be 8-aligned).
|
|
||||||
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
|
|
||||||
this.check_ptr_access_align(
|
|
||||||
place.ptr,
|
|
||||||
place.layout.size,
|
|
||||||
align,
|
|
||||||
CheckInAllocMsg::MemoryAccessTest,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let old = this.atomic_exchange_scalar(&place, new, atomic)?;
|
let old = this.atomic_exchange_scalar(&place, new, atomic)?;
|
||||||
this.write_scalar(old, dest)?; // old value is returned
|
this.write_scalar(old, dest)?; // old value is returned
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -293,17 +249,6 @@ fn atomic_compare_exchange_impl(
|
|||||||
let expect_old = this.read_immediate(expect_old)?; // read as immediate for the sake of `binary_op()`
|
let expect_old = this.read_immediate(expect_old)?; // read as immediate for the sake of `binary_op()`
|
||||||
let new = this.read_scalar(new)?;
|
let new = this.read_scalar(new)?;
|
||||||
|
|
||||||
// Check alignment requirements. Atomics must always be aligned to their size,
|
|
||||||
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
|
|
||||||
// be 8-aligned).
|
|
||||||
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
|
|
||||||
this.check_ptr_access_align(
|
|
||||||
place.ptr,
|
|
||||||
place.layout.size,
|
|
||||||
align,
|
|
||||||
CheckInAllocMsg::MemoryAccessTest,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let old = this.atomic_compare_exchange_scalar(
|
let old = this.atomic_compare_exchange_scalar(
|
||||||
&place,
|
&place,
|
||||||
&expect_old,
|
&expect_old,
|
||||||
|
Loading…
Reference in New Issue
Block a user