diff --git a/src/concurrency/data_race.rs b/src/concurrency/data_race.rs index 35baf97b727..f6f0ce528ed 100644 --- a/src/concurrency/data_race.rs +++ b/src/concurrency/data_race.rs @@ -455,11 +455,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { fn allow_data_races_ref(&self, op: impl FnOnce(&MiriEvalContext<'mir, 'tcx>) -> R) -> R { let this = self.eval_context_ref(); if let Some(data_race) = &this.machine.data_race { - data_race.ongoing_atomic_access.set(true); + data_race.ongoing_action_data_race_free.set(true); } let result = op(this); if let Some(data_race) = &this.machine.data_race { - data_race.ongoing_atomic_access.set(false); + data_race.ongoing_action_data_race_free.set(false); } result } @@ -474,11 +474,11 @@ fn allow_data_races_mut( ) -> R { let this = self.eval_context_mut(); if let Some(data_race) = &this.machine.data_race { - data_race.ongoing_atomic_access.set(true); + data_race.ongoing_action_data_race_free.set(true); } let result = op(this); if let Some(data_race) = &this.machine.data_race { - data_race.ongoing_atomic_access.set(false); + data_race.ongoing_action_data_race_free.set(false); } result } @@ -1151,8 +1151,9 @@ pub struct GlobalState { multi_threaded: Cell, /// A flag to mark we are currently performing - /// an atomic access to supress data race detection - ongoing_atomic_access: Cell, + /// a data race free action (such as atomic access) + /// to supress the race detector + ongoing_action_data_race_free: Cell, /// Mapping of a vector index to a known set of thread /// clocks, this is not directly mapping from a thread id @@ -1205,7 +1206,7 @@ impl GlobalState { pub fn new() -> Self { let mut global_state = GlobalState { multi_threaded: Cell::new(false), - ongoing_atomic_access: Cell::new(false), + ongoing_action_data_race_free: Cell::new(false), vector_clocks: RefCell::new(IndexVec::new()), vector_info: RefCell::new(IndexVec::new()), thread_info: RefCell::new(IndexVec::new()), @@ -1232,14 +1233,14 @@ pub fn new() -> Self { } // We perform data race detection when there are more than 1 active thread - // and we are not currently in the middle of an atomic acces where data race - // is impossible + // and we have not temporarily disabled race detection to perform something + // data race free fn race_detecting(&self) -> bool { - self.multi_threaded.get() && !self.ongoing_atomic_access.get() + self.multi_threaded.get() && !self.ongoing_action_data_race_free.get() } - pub fn ongoing_atomic_access(&self) -> bool { - self.ongoing_atomic_access.get() + pub fn ongoing_action_data_race_free(&self) -> bool { + self.ongoing_action_data_race_free.get() } // Try to find vector index values that can potentially be re-used diff --git a/src/concurrency/weak_memory.rs b/src/concurrency/weak_memory.rs index 237a13ea864..dc32a3ddca4 100644 --- a/src/concurrency/weak_memory.rs +++ b/src/concurrency/weak_memory.rs @@ -139,7 +139,7 @@ fn is_overlapping(&self, range: AllocRange) -> bool { /// after all the prior atomic accesses so the location no longer needs to exhibit /// any weak memory behaviours until further atomic accesses. pub fn memory_accessed<'tcx>(&self, range: AllocRange, global: &GlobalState) { - if !global.ongoing_atomic_access() { + if !global.ongoing_action_data_race_free() { let mut buffers = self.store_buffers.borrow_mut(); let access_type = buffers.access_type(range); match access_type { @@ -420,7 +420,9 @@ fn validate_overlapping_atomic(&self, place: &MPlaceTy<'tcx, Tag>) -> InterpResu && !alloc_clocks .race_free_with_atomic(range, this.machine.data_race.as_ref().unwrap()) { - throw_ub_format!("racy imperfectly overlapping atomic access is not possible in the C++20 memory model"); + throw_ub_format!( + "racy imperfectly overlapping atomic access is not possible in the C++20 memory model" + ); } } Ok(())