Give flag temp disabling race detector a better name

This commit is contained in:
Andy Wang 2022-05-29 21:10:36 +01:00
parent 8215702d5a
commit c731071640
No known key found for this signature in database
GPG Key ID: 181B49F9F38F3374
2 changed files with 17 additions and 14 deletions

View File

@ -455,11 +455,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
fn allow_data_races_ref<R>(&self, op: impl FnOnce(&MiriEvalContext<'mir, 'tcx>) -> R) -> R { fn allow_data_races_ref<R>(&self, op: impl FnOnce(&MiriEvalContext<'mir, 'tcx>) -> R) -> R {
let this = self.eval_context_ref(); let this = self.eval_context_ref();
if let Some(data_race) = &this.machine.data_race { if let Some(data_race) = &this.machine.data_race {
data_race.ongoing_atomic_access.set(true); data_race.ongoing_action_data_race_free.set(true);
} }
let result = op(this); let result = op(this);
if let Some(data_race) = &this.machine.data_race { if let Some(data_race) = &this.machine.data_race {
data_race.ongoing_atomic_access.set(false); data_race.ongoing_action_data_race_free.set(false);
} }
result result
} }
@ -474,11 +474,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
) -> R { ) -> R {
let this = self.eval_context_mut(); let this = self.eval_context_mut();
if let Some(data_race) = &this.machine.data_race { if let Some(data_race) = &this.machine.data_race {
data_race.ongoing_atomic_access.set(true); data_race.ongoing_action_data_race_free.set(true);
} }
let result = op(this); let result = op(this);
if let Some(data_race) = &this.machine.data_race { if let Some(data_race) = &this.machine.data_race {
data_race.ongoing_atomic_access.set(false); data_race.ongoing_action_data_race_free.set(false);
} }
result result
} }
@ -1151,8 +1151,9 @@ pub struct GlobalState {
multi_threaded: Cell<bool>, multi_threaded: Cell<bool>,
/// A flag to mark we are currently performing /// A flag to mark we are currently performing
/// an atomic access to supress data race detection /// a data race free action (such as atomic access)
ongoing_atomic_access: Cell<bool>, /// to supress the race detector
ongoing_action_data_race_free: Cell<bool>,
/// Mapping of a vector index to a known set of thread /// Mapping of a vector index to a known set of thread
/// clocks, this is not directly mapping from a thread id /// clocks, this is not directly mapping from a thread id
@ -1205,7 +1206,7 @@ impl GlobalState {
pub fn new() -> Self { pub fn new() -> Self {
let mut global_state = GlobalState { let mut global_state = GlobalState {
multi_threaded: Cell::new(false), multi_threaded: Cell::new(false),
ongoing_atomic_access: Cell::new(false), ongoing_action_data_race_free: Cell::new(false),
vector_clocks: RefCell::new(IndexVec::new()), vector_clocks: RefCell::new(IndexVec::new()),
vector_info: RefCell::new(IndexVec::new()), vector_info: RefCell::new(IndexVec::new()),
thread_info: RefCell::new(IndexVec::new()), thread_info: RefCell::new(IndexVec::new()),
@ -1232,14 +1233,14 @@ impl GlobalState {
} }
// We perform data race detection when there are more than 1 active thread // We perform data race detection when there are more than 1 active thread
// and we are not currently in the middle of an atomic acces where data race // and we have not temporarily disabled race detection to perform something
// is impossible // data race free
fn race_detecting(&self) -> bool { fn race_detecting(&self) -> bool {
self.multi_threaded.get() && !self.ongoing_atomic_access.get() self.multi_threaded.get() && !self.ongoing_action_data_race_free.get()
} }
pub fn ongoing_atomic_access(&self) -> bool { pub fn ongoing_action_data_race_free(&self) -> bool {
self.ongoing_atomic_access.get() self.ongoing_action_data_race_free.get()
} }
// Try to find vector index values that can potentially be re-used // Try to find vector index values that can potentially be re-used

View File

@ -139,7 +139,7 @@ impl StoreBufferAlloc {
/// after all the prior atomic accesses so the location no longer needs to exhibit /// after all the prior atomic accesses so the location no longer needs to exhibit
/// any weak memory behaviours until further atomic accesses. /// any weak memory behaviours until further atomic accesses.
pub fn memory_accessed<'tcx>(&self, range: AllocRange, global: &GlobalState) { pub fn memory_accessed<'tcx>(&self, range: AllocRange, global: &GlobalState) {
if !global.ongoing_atomic_access() { if !global.ongoing_action_data_race_free() {
let mut buffers = self.store_buffers.borrow_mut(); let mut buffers = self.store_buffers.borrow_mut();
let access_type = buffers.access_type(range); let access_type = buffers.access_type(range);
match access_type { match access_type {
@ -420,7 +420,9 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
&& !alloc_clocks && !alloc_clocks
.race_free_with_atomic(range, this.machine.data_race.as_ref().unwrap()) .race_free_with_atomic(range, this.machine.data_race.as_ref().unwrap())
{ {
throw_ub_format!("racy imperfectly overlapping atomic access is not possible in the C++20 memory model"); throw_ub_format!(
"racy imperfectly overlapping atomic access is not possible in the C++20 memory model"
);
} }
} }
Ok(()) Ok(())