Rework to work with machine hook.

This commit is contained in:
JCTyblaidd 2020-12-11 19:32:25 +00:00
parent 81c4eb7d74
commit e73579632b
2 changed files with 49 additions and 23 deletions

View File

@ -76,7 +76,7 @@
use crate::{
ImmTy, Immediate, InterpResult, MPlaceTy, MemPlaceMeta, MiriEvalContext, MiriEvalContextExt,
OpTy, Pointer, RangeMap, ScalarMaybeUninit, Tag, ThreadId, VClock, VTimestamp,
VectorIdx,
VectorIdx, MemoryKind, MiriMemoryKind
};
pub type AllocExtra = VClockAlloc;
@ -674,6 +674,21 @@ fn validate_atomic_fence(&mut self, atomic: AtomicFenceOp) -> InterpResult<'tcx>
Ok(())
}
}
fn reset_vector_clocks(
&mut self,
ptr: Pointer<Tag>,
size: Size
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
if let Some(data_race) = &mut this.memory.extra.data_race {
if data_race.multi_threaded.get() {
let alloc_meta = this.memory.get_raw_mut(ptr.alloc_id)?.extra.data_race.as_mut().unwrap();
alloc_meta.reset_clocks(ptr.offset, size);
}
}
Ok(())
}
}
/// Vector clock metadata for a logical memory allocation.
@ -688,7 +703,18 @@ pub struct VClockAlloc {
impl VClockAlloc {
/// Create a new data-race detector for newly allocated memory.
pub fn new_allocation(global: &MemoryExtra, len: Size, track_alloc: bool) -> VClockAlloc {
pub fn new_allocation(global: &MemoryExtra, len: Size, kind: MemoryKind<MiriMemoryKind>) -> VClockAlloc {
let track_alloc = match kind {
// User allocated and stack memory should track allocation.
MemoryKind::Machine(
MiriMemoryKind::Rust | MiriMemoryKind::C | MiriMemoryKind::WinHeap
) | MemoryKind::Stack => true,
// Other global memory should trace races but be allocated at the 0 timestamp.
MemoryKind::Machine(
MiriMemoryKind::Global | MiriMemoryKind::Machine | MiriMemoryKind::Env |
MiriMemoryKind::ExternStatic | MiriMemoryKind::Tls
) | MemoryKind::CallerLocation | MemoryKind::Vtable => false
};
let (alloc_timestamp, alloc_index) = if track_alloc {
let (alloc_index, clocks) = global.current_thread_state();
let alloc_timestamp = clocks.clock[alloc_index];
@ -704,6 +730,14 @@ pub fn new_allocation(global: &MemoryExtra, len: Size, track_alloc: bool) -> VCl
}
}
fn reset_clocks(&mut self, offset: Size, len: Size) {
let mut alloc_ranges = self.alloc_ranges.borrow_mut();
for (_, range) in alloc_ranges.iter_mut(offset, len) {
// Reset the portion of the range
*range = MemoryCellClocks::new(0, VectorIdx::MAX_INDEX);
}
}
// Find an index, if one exists where the value
// in `l` is greater than the value in `r`.
fn find_gt_index(l: &VClock, r: &VClock) -> Option<VectorIdx> {

View File

@ -478,27 +478,7 @@ fn init_allocation_extra<'b>(
(None, Tag::Untagged)
};
let race_alloc = if let Some(data_race) = &memory_extra.data_race {
match kind {
// V-Table generation is lazy and so racy, so do not track races.
// Also V-Tables are read only so no data races can be occur.
// Must be disabled since V-Tables are initialized via interpreter
// writes on demand and can incorrectly cause the data-race detector
// to trigger.
MemoryKind::Vtable => None,
// User allocated and stack memory should track allocation.
MemoryKind::Machine(
MiriMemoryKind::Rust | MiriMemoryKind::C | MiriMemoryKind::WinHeap
) | MemoryKind::Stack => Some(
data_race::AllocExtra::new_allocation(&data_race, alloc.size, true)
),
// Other global memory should trace races but be allocated at the 0 timestamp.
MemoryKind::Machine(
MiriMemoryKind::Global | MiriMemoryKind::Machine | MiriMemoryKind::Env |
MiriMemoryKind::ExternStatic | MiriMemoryKind::Tls
) | MemoryKind::CallerLocation => Some(
data_race::AllocExtra::new_allocation(&data_race, alloc.size, false)
)
}
Some(data_race::AllocExtra::new_allocation(&data_race, alloc.size, kind))
} else {
None
};
@ -530,6 +510,18 @@ fn before_deallocation(
Ok(())
}
fn after_static_mem_initialized(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
ptr: Pointer<Self::PointerTag>,
size: Size,
) -> InterpResult<'tcx> {
if ecx.memory.extra.data_race.is_some() {
ecx.reset_vector_clocks(ptr, size)?;
}
Ok(())
}
#[inline(always)]
fn tag_global_base_pointer(memory_extra: &MemoryExtra, id: AllocId) -> Self::PointerTag {
if let Some(stacked_borrows) = &memory_extra.stacked_borrows {