use early return for race_detecting() logic

This commit is contained in:
Ralf Jung 2024-09-15 13:25:26 +02:00
parent a888905226
commit 339f68bd6c

View File

@ -1048,32 +1048,31 @@ pub fn read<'tcx>(
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
let current_span = machine.current_span(); let current_span = machine.current_span();
let global = machine.data_race.as_ref().unwrap(); let global = machine.data_race.as_ref().unwrap();
if global.race_detecting() { if !global.race_detecting() {
let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads); return Ok(());
let mut alloc_ranges = self.alloc_ranges.borrow_mut();
for (mem_clocks_range, mem_clocks) in
alloc_ranges.iter_mut(access_range.start, access_range.size)
{
if let Err(DataRace) =
mem_clocks.read_race_detect(&mut thread_clocks, index, read_type, current_span)
{
drop(thread_clocks);
// Report data-race.
return Self::report_data_race(
global,
&machine.threads,
mem_clocks,
AccessType::NaRead(read_type),
access_range.size,
interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
ty,
);
}
}
Ok(())
} else {
Ok(())
} }
let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
let mut alloc_ranges = self.alloc_ranges.borrow_mut();
for (mem_clocks_range, mem_clocks) in
alloc_ranges.iter_mut(access_range.start, access_range.size)
{
if let Err(DataRace) =
mem_clocks.read_race_detect(&mut thread_clocks, index, read_type, current_span)
{
drop(thread_clocks);
// Report data-race.
return Self::report_data_race(
global,
&machine.threads,
mem_clocks,
AccessType::NaRead(read_type),
access_range.size,
interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
ty,
);
}
}
Ok(())
} }
/// Detect data-races for an unsynchronized write operation. It will not perform /// Detect data-races for an unsynchronized write operation. It will not perform
@ -1091,34 +1090,30 @@ pub fn write<'tcx>(
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
let current_span = machine.current_span(); let current_span = machine.current_span();
let global = machine.data_race.as_mut().unwrap(); let global = machine.data_race.as_mut().unwrap();
if global.race_detecting() { if !global.race_detecting() {
let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads); return Ok(());
for (mem_clocks_range, mem_clocks) in
self.alloc_ranges.get_mut().iter_mut(access_range.start, access_range.size)
{
if let Err(DataRace) = mem_clocks.write_race_detect(
&mut thread_clocks,
index,
write_type,
current_span,
) {
drop(thread_clocks);
// Report data-race
return Self::report_data_race(
global,
&machine.threads,
mem_clocks,
AccessType::NaWrite(write_type),
access_range.size,
interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
ty,
);
}
}
Ok(())
} else {
Ok(())
} }
let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
for (mem_clocks_range, mem_clocks) in
self.alloc_ranges.get_mut().iter_mut(access_range.start, access_range.size)
{
if let Err(DataRace) =
mem_clocks.write_race_detect(&mut thread_clocks, index, write_type, current_span)
{
drop(thread_clocks);
// Report data-race
return Self::report_data_race(
global,
&machine.threads,
mem_clocks,
AccessType::NaWrite(write_type),
access_range.size,
interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
ty,
);
}
}
Ok(())
} }
} }
@ -1149,48 +1144,50 @@ impl FrameState {
pub fn local_write(&self, local: mir::Local, storage_live: bool, machine: &MiriMachine<'_>) { pub fn local_write(&self, local: mir::Local, storage_live: bool, machine: &MiriMachine<'_>) {
let current_span = machine.current_span(); let current_span = machine.current_span();
let global = machine.data_race.as_ref().unwrap(); let global = machine.data_race.as_ref().unwrap();
if global.race_detecting() { if !global.race_detecting() {
let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads); return;
// This should do the same things as `MemoryCellClocks::write_race_detect`. }
if !current_span.is_dummy() { let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
thread_clocks.clock.index_mut(index).span = current_span; // This should do the same things as `MemoryCellClocks::write_race_detect`.
} if !current_span.is_dummy() {
let mut clocks = self.local_clocks.borrow_mut(); thread_clocks.clock.index_mut(index).span = current_span;
if storage_live { }
let new_clocks = LocalClocks { let mut clocks = self.local_clocks.borrow_mut();
write: thread_clocks.clock[index], if storage_live {
write_type: NaWriteType::Allocate, let new_clocks = LocalClocks {
read: VTimestamp::ZERO, write: thread_clocks.clock[index],
}; write_type: NaWriteType::Allocate,
// There might already be an entry in the map for this, if the local was previously read: VTimestamp::ZERO,
// live already. };
clocks.insert(local, new_clocks); // There might already be an entry in the map for this, if the local was previously
} else { // live already.
// This can fail to exist if `race_detecting` was false when the allocation clocks.insert(local, new_clocks);
// occurred, in which case we can backdate this to the beginning of time. } else {
let clocks = clocks.entry(local).or_insert_with(Default::default); // This can fail to exist if `race_detecting` was false when the allocation
clocks.write = thread_clocks.clock[index]; // occurred, in which case we can backdate this to the beginning of time.
clocks.write_type = NaWriteType::Write; let clocks = clocks.entry(local).or_insert_with(Default::default);
} clocks.write = thread_clocks.clock[index];
clocks.write_type = NaWriteType::Write;
} }
} }
pub fn local_read(&self, local: mir::Local, machine: &MiriMachine<'_>) { pub fn local_read(&self, local: mir::Local, machine: &MiriMachine<'_>) {
let current_span = machine.current_span(); let current_span = machine.current_span();
let global = machine.data_race.as_ref().unwrap(); let global = machine.data_race.as_ref().unwrap();
if global.race_detecting() { if !global.race_detecting() {
let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads); return;
// This should do the same things as `MemoryCellClocks::read_race_detect`.
if !current_span.is_dummy() {
thread_clocks.clock.index_mut(index).span = current_span;
}
thread_clocks.clock.index_mut(index).set_read_type(NaReadType::Read);
// This can fail to exist if `race_detecting` was false when the allocation
// occurred, in which case we can backdate this to the beginning of time.
let mut clocks = self.local_clocks.borrow_mut();
let clocks = clocks.entry(local).or_insert_with(Default::default);
clocks.read = thread_clocks.clock[index];
} }
let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
// This should do the same things as `MemoryCellClocks::read_race_detect`.
if !current_span.is_dummy() {
thread_clocks.clock.index_mut(index).span = current_span;
}
thread_clocks.clock.index_mut(index).set_read_type(NaReadType::Read);
// This can fail to exist if `race_detecting` was false when the allocation
// occurred, in which case we can backdate this to the beginning of time.
let mut clocks = self.local_clocks.borrow_mut();
let clocks = clocks.entry(local).or_insert_with(Default::default);
clocks.read = thread_clocks.clock[index];
} }
pub fn local_moved_to_memory( pub fn local_moved_to_memory(
@ -1200,21 +1197,22 @@ pub fn local_moved_to_memory(
machine: &MiriMachine<'_>, machine: &MiriMachine<'_>,
) { ) {
let global = machine.data_race.as_ref().unwrap(); let global = machine.data_race.as_ref().unwrap();
if global.race_detecting() { if !global.race_detecting() {
let (index, _thread_clocks) = global.active_thread_state_mut(&machine.threads); return;
// Get the time the last write actually happened. This can fail to exist if }
// `race_detecting` was false when the write occurred, in that case we can backdate this let (index, _thread_clocks) = global.active_thread_state_mut(&machine.threads);
// to the beginning of time. // Get the time the last write actually happened. This can fail to exist if
let local_clocks = self.local_clocks.borrow_mut().remove(&local).unwrap_or_default(); // `race_detecting` was false when the write occurred, in that case we can backdate this
for (_mem_clocks_range, mem_clocks) in alloc.alloc_ranges.get_mut().iter_mut_all() { // to the beginning of time.
// The initialization write for this already happened, just at the wrong timestamp. let local_clocks = self.local_clocks.borrow_mut().remove(&local).unwrap_or_default();
// Check that the thread index matches what we expect. for (_mem_clocks_range, mem_clocks) in alloc.alloc_ranges.get_mut().iter_mut_all() {
assert_eq!(mem_clocks.write.0, index); // The initialization write for this already happened, just at the wrong timestamp.
// Convert the local's clocks into memory clocks. // Check that the thread index matches what we expect.
mem_clocks.write = (index, local_clocks.write); assert_eq!(mem_clocks.write.0, index);
mem_clocks.write_type = local_clocks.write_type; // Convert the local's clocks into memory clocks.
mem_clocks.read = VClock::new_with_index(index, local_clocks.read); mem_clocks.write = (index, local_clocks.write);
} mem_clocks.write_type = local_clocks.write_type;
mem_clocks.read = VClock::new_with_index(index, local_clocks.read);
} }
} }
} }
@ -1403,69 +1401,67 @@ fn validate_atomic_op<A: Debug + Copy>(
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
let this = self.eval_context_ref(); let this = self.eval_context_ref();
assert!(access.is_atomic()); assert!(access.is_atomic());
if let Some(data_race) = &this.machine.data_race { let Some(data_race) = &this.machine.data_race else { return Ok(()) };
if data_race.race_detecting() { if !data_race.race_detecting() {
let size = place.layout.size; return Ok(());
let (alloc_id, base_offset, _prov) = this.ptr_get_alloc_id(place.ptr(), 0)?; }
// Load and log the atomic operation. let size = place.layout.size;
// Note that atomic loads are possible even from read-only allocations, so `get_alloc_extra_mut` is not an option. let (alloc_id, base_offset, _prov) = this.ptr_get_alloc_id(place.ptr(), 0)?;
let alloc_meta = this.get_alloc_extra(alloc_id)?.data_race.as_ref().unwrap(); // Load and log the atomic operation.
trace!( // Note that atomic loads are possible even from read-only allocations, so `get_alloc_extra_mut` is not an option.
"Atomic op({}) with ordering {:?} on {:?} (size={})", let alloc_meta = this.get_alloc_extra(alloc_id)?.data_race.as_ref().unwrap();
access.description(None, None), trace!(
&atomic, "Atomic op({}) with ordering {:?} on {:?} (size={})",
place.ptr(), access.description(None, None),
size.bytes() &atomic,
); place.ptr(),
size.bytes()
);
let current_span = this.machine.current_span(); let current_span = this.machine.current_span();
// Perform the atomic operation. // Perform the atomic operation.
data_race.maybe_perform_sync_operation( data_race.maybe_perform_sync_operation(
&this.machine.threads, &this.machine.threads,
current_span, current_span,
|index, mut thread_clocks| { |index, mut thread_clocks| {
for (mem_clocks_range, mem_clocks) in for (mem_clocks_range, mem_clocks) in
alloc_meta.alloc_ranges.borrow_mut().iter_mut(base_offset, size) alloc_meta.alloc_ranges.borrow_mut().iter_mut(base_offset, size)
{ {
if let Err(DataRace) = op(mem_clocks, &mut thread_clocks, index, atomic) if let Err(DataRace) = op(mem_clocks, &mut thread_clocks, index, atomic) {
{ mem::drop(thread_clocks);
mem::drop(thread_clocks); return VClockAlloc::report_data_race(
return VClockAlloc::report_data_race( data_race,
data_race, &this.machine.threads,
&this.machine.threads, mem_clocks,
mem_clocks, access,
access, place.layout.size,
place.layout.size, interpret::Pointer::new(
interpret::Pointer::new( alloc_id,
alloc_id, Size::from_bytes(mem_clocks_range.start),
Size::from_bytes(mem_clocks_range.start), ),
), None,
None, )
) .map(|_| true);
.map(|_| true);
}
}
// This conservatively assumes all operations have release semantics
Ok(true)
},
)?;
// Log changes to atomic memory.
if tracing::enabled!(tracing::Level::TRACE) {
for (_offset, mem_clocks) in
alloc_meta.alloc_ranges.borrow().iter(base_offset, size)
{
trace!(
"Updated atomic memory({:?}, size={}) to {:#?}",
place.ptr(),
size.bytes(),
mem_clocks.atomic_ops
);
} }
} }
// This conservatively assumes all operations have release semantics
Ok(true)
},
)?;
// Log changes to atomic memory.
if tracing::enabled!(tracing::Level::TRACE) {
for (_offset, mem_clocks) in alloc_meta.alloc_ranges.borrow().iter(base_offset, size) {
trace!(
"Updated atomic memory({:?}, size={}) to {:#?}",
place.ptr(),
size.bytes(),
mem_clocks.atomic_ops
);
} }
} }
Ok(()) Ok(())
} }
} }