Run rustfmt on vector_clock.rs and data_race.rs
This commit is contained in:
parent
a3b7839bbd
commit
0b0264fc82
431
src/data_race.rs
431
src/data_race.rs
@ -11,7 +11,7 @@
|
|||||||
//! a data race occurs between two memory accesses if they are on different threads, at least one operation
|
//! a data race occurs between two memory accesses if they are on different threads, at least one operation
|
||||||
//! is non-atomic, at least one operation is a write and neither access happens-before the other. Read the link
|
//! is non-atomic, at least one operation is a write and neither access happens-before the other. Read the link
|
||||||
//! for full definition.
|
//! for full definition.
|
||||||
//!
|
//!
|
||||||
//! This re-uses vector indexes for threads that are known to be unable to report data-races, this is valid
|
//! This re-uses vector indexes for threads that are known to be unable to report data-races, this is valid
|
||||||
//! because it only re-uses vector indexes once all currently-active (not-terminated) threads have an internal
|
//! because it only re-uses vector indexes once all currently-active (not-terminated) threads have an internal
|
||||||
//! vector clock that happens-after the join operation of the candidate thread. Threads that have not been joined
|
//! vector clock that happens-after the join operation of the candidate thread. Threads that have not been joined
|
||||||
@ -43,21 +43,21 @@
|
|||||||
//! read, write and deallocate functions and should be cleaned up in the future.
|
//! read, write and deallocate functions and should be cleaned up in the future.
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
fmt::Debug, rc::Rc,
|
cell::{Cell, Ref, RefCell, RefMut},
|
||||||
cell::{Cell, RefCell, Ref, RefMut}, mem
|
fmt::Debug,
|
||||||
|
mem,
|
||||||
|
rc::Rc,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
|
||||||
use rustc_index::vec::{Idx, IndexVec};
|
use rustc_index::vec::{Idx, IndexVec};
|
||||||
use rustc_target::abi::Size;
|
|
||||||
use rustc_middle::{mir, ty::layout::TyAndLayout};
|
use rustc_middle::{mir, ty::layout::TyAndLayout};
|
||||||
use rustc_data_structures::fx::{FxHashSet, FxHashMap};
|
use rustc_target::abi::Size;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
MiriEvalContext, MiriEvalContextExt,
|
ImmTy, Immediate, InterpResult, MPlaceTy, MemPlaceMeta, MiriEvalContext, MiriEvalContextExt,
|
||||||
ThreadId, Tag, RangeMap,
|
OpTy, Pointer, RangeMap, ScalarMaybeUninit, Tag, ThreadId, VClock, VSmallClockMap, VTimestamp,
|
||||||
InterpResult, Pointer, ScalarMaybeUninit,
|
VectorIdx,
|
||||||
MPlaceTy, OpTy, MemPlaceMeta, ImmTy, Immediate,
|
|
||||||
VClock, VSmallClockMap, VectorIdx, VTimestamp
|
|
||||||
};
|
};
|
||||||
|
|
||||||
pub type AllocExtra = VClockAlloc;
|
pub type AllocExtra = VClockAlloc;
|
||||||
@ -89,7 +89,6 @@ pub enum AtomicWriteOp {
|
|||||||
SeqCst,
|
SeqCst,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Valid atomic fence operations, subset of atomic::Ordering.
|
/// Valid atomic fence operations, subset of atomic::Ordering.
|
||||||
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
|
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
|
||||||
pub enum AtomicFenceOp {
|
pub enum AtomicFenceOp {
|
||||||
@ -99,14 +98,11 @@ pub enum AtomicFenceOp {
|
|||||||
SeqCst,
|
SeqCst,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/// The current set of vector clocks describing the state
|
/// The current set of vector clocks describing the state
|
||||||
/// of a thread, contains the happens-before clock and
|
/// of a thread, contains the happens-before clock and
|
||||||
/// additional metadata to model atomic fence operations.
|
/// additional metadata to model atomic fence operations.
|
||||||
#[derive(Clone, Default, Debug)]
|
#[derive(Clone, Default, Debug)]
|
||||||
struct ThreadClockSet {
|
struct ThreadClockSet {
|
||||||
|
|
||||||
/// The increasing clock representing timestamps
|
/// The increasing clock representing timestamps
|
||||||
/// that happen-before this thread.
|
/// that happen-before this thread.
|
||||||
clock: VClock,
|
clock: VClock,
|
||||||
@ -120,9 +116,7 @@ struct ThreadClockSet {
|
|||||||
fence_release: VClock,
|
fence_release: VClock,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
impl ThreadClockSet {
|
impl ThreadClockSet {
|
||||||
|
|
||||||
/// Apply the effects of a release fence to this
|
/// Apply the effects of a release fence to this
|
||||||
/// set of thread vector clocks.
|
/// set of thread vector clocks.
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -152,7 +146,6 @@ impl ThreadClockSet {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Error returned by finding a data race
|
/// Error returned by finding a data race
|
||||||
/// should be elaborated upon.
|
/// should be elaborated upon.
|
||||||
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
|
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
|
||||||
@ -164,7 +157,6 @@ pub struct DataRace;
|
|||||||
/// exists on the memory cell.
|
/// exists on the memory cell.
|
||||||
#[derive(Clone, PartialEq, Eq, Default, Debug)]
|
#[derive(Clone, PartialEq, Eq, Default, Debug)]
|
||||||
struct AtomicMemoryCellClocks {
|
struct AtomicMemoryCellClocks {
|
||||||
|
|
||||||
/// The clock-vector of the timestamp of the last atomic
|
/// The clock-vector of the timestamp of the last atomic
|
||||||
/// read operation performed by each thread.
|
/// read operation performed by each thread.
|
||||||
/// This detects potential data-races between atomic read
|
/// This detects potential data-races between atomic read
|
||||||
@ -179,7 +171,7 @@ struct AtomicMemoryCellClocks {
|
|||||||
|
|
||||||
/// Synchronization vector for acquire-release semantics
|
/// Synchronization vector for acquire-release semantics
|
||||||
/// contains the vector of timestamps that will
|
/// contains the vector of timestamps that will
|
||||||
/// happen-before a thread if an acquire-load is
|
/// happen-before a thread if an acquire-load is
|
||||||
/// performed on the data.
|
/// performed on the data.
|
||||||
sync_vector: VClock,
|
sync_vector: VClock,
|
||||||
|
|
||||||
@ -195,7 +187,6 @@ struct AtomicMemoryCellClocks {
|
|||||||
/// for data-race detection.
|
/// for data-race detection.
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||||
struct MemoryCellClocks {
|
struct MemoryCellClocks {
|
||||||
|
|
||||||
/// The vector-clock timestamp of the last write
|
/// The vector-clock timestamp of the last write
|
||||||
/// corresponding to the writing threads timestamp.
|
/// corresponding to the writing threads timestamp.
|
||||||
write: VTimestamp,
|
write: VTimestamp,
|
||||||
@ -215,7 +206,6 @@ struct MemoryCellClocks {
|
|||||||
atomic_ops: Option<Box<AtomicMemoryCellClocks>>,
|
atomic_ops: Option<Box<AtomicMemoryCellClocks>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Create a default memory cell clocks instance
|
/// Create a default memory cell clocks instance
|
||||||
/// for uninitialized memory.
|
/// for uninitialized memory.
|
||||||
impl Default for MemoryCellClocks {
|
impl Default for MemoryCellClocks {
|
||||||
@ -224,20 +214,18 @@ impl Default for MemoryCellClocks {
|
|||||||
read: VClock::default(),
|
read: VClock::default(),
|
||||||
write: 0,
|
write: 0,
|
||||||
write_index: VectorIdx::MAX_INDEX,
|
write_index: VectorIdx::MAX_INDEX,
|
||||||
atomic_ops: None
|
atomic_ops: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
impl MemoryCellClocks {
|
impl MemoryCellClocks {
|
||||||
|
|
||||||
/// Load the internal atomic memory cells if they exist.
|
/// Load the internal atomic memory cells if they exist.
|
||||||
#[inline]
|
#[inline]
|
||||||
fn atomic(&self) -> Option<&AtomicMemoryCellClocks> {
|
fn atomic(&self) -> Option<&AtomicMemoryCellClocks> {
|
||||||
match &self.atomic_ops {
|
match &self.atomic_ops {
|
||||||
Some(op) => Some(&*op),
|
Some(op) => Some(&*op),
|
||||||
None => None
|
None => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -251,7 +239,11 @@ impl MemoryCellClocks {
|
|||||||
/// Update memory cell data-race tracking for atomic
|
/// Update memory cell data-race tracking for atomic
|
||||||
/// load acquire semantics, is a no-op if this memory was
|
/// load acquire semantics, is a no-op if this memory was
|
||||||
/// not used previously as atomic memory.
|
/// not used previously as atomic memory.
|
||||||
fn load_acquire(&mut self, clocks: &mut ThreadClockSet, index: VectorIdx) -> Result<(), DataRace> {
|
fn load_acquire(
|
||||||
|
&mut self,
|
||||||
|
clocks: &mut ThreadClockSet,
|
||||||
|
index: VectorIdx,
|
||||||
|
) -> Result<(), DataRace> {
|
||||||
self.atomic_read_detect(clocks, index)?;
|
self.atomic_read_detect(clocks, index)?;
|
||||||
if let Some(atomic) = self.atomic() {
|
if let Some(atomic) = self.atomic() {
|
||||||
clocks.clock.join(&atomic.sync_vector);
|
clocks.clock.join(&atomic.sync_vector);
|
||||||
@ -262,7 +254,11 @@ impl MemoryCellClocks {
|
|||||||
/// Update memory cell data-race tracking for atomic
|
/// Update memory cell data-race tracking for atomic
|
||||||
/// load relaxed semantics, is a no-op if this memory was
|
/// load relaxed semantics, is a no-op if this memory was
|
||||||
/// not used previously as atomic memory.
|
/// not used previously as atomic memory.
|
||||||
fn load_relaxed(&mut self, clocks: &mut ThreadClockSet, index: VectorIdx) -> Result<(), DataRace> {
|
fn load_relaxed(
|
||||||
|
&mut self,
|
||||||
|
clocks: &mut ThreadClockSet,
|
||||||
|
index: VectorIdx,
|
||||||
|
) -> Result<(), DataRace> {
|
||||||
self.atomic_read_detect(clocks, index)?;
|
self.atomic_read_detect(clocks, index)?;
|
||||||
if let Some(atomic) = self.atomic() {
|
if let Some(atomic) = self.atomic() {
|
||||||
clocks.fence_acquire.join(&atomic.sync_vector);
|
clocks.fence_acquire.join(&atomic.sync_vector);
|
||||||
@ -270,7 +266,6 @@ impl MemoryCellClocks {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Update the memory cell data-race tracking for atomic
|
/// Update the memory cell data-race tracking for atomic
|
||||||
/// store release semantics.
|
/// store release semantics.
|
||||||
fn store_release(&mut self, clocks: &ThreadClockSet, index: VectorIdx) -> Result<(), DataRace> {
|
fn store_release(&mut self, clocks: &ThreadClockSet, index: VectorIdx) -> Result<(), DataRace> {
|
||||||
@ -313,10 +308,14 @@ impl MemoryCellClocks {
|
|||||||
atomic.sync_vector.join(&clocks.fence_release);
|
atomic.sync_vector.join(&clocks.fence_release);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Detect data-races with an atomic read, caused by a non-atomic write that does
|
/// Detect data-races with an atomic read, caused by a non-atomic write that does
|
||||||
/// not happen-before the atomic-read.
|
/// not happen-before the atomic-read.
|
||||||
fn atomic_read_detect(&mut self, clocks: &ThreadClockSet, index: VectorIdx) -> Result<(), DataRace> {
|
fn atomic_read_detect(
|
||||||
|
&mut self,
|
||||||
|
clocks: &ThreadClockSet,
|
||||||
|
index: VectorIdx,
|
||||||
|
) -> Result<(), DataRace> {
|
||||||
log::trace!("Atomic read with vectors: {:#?} :: {:#?}", self, clocks);
|
log::trace!("Atomic read with vectors: {:#?} :: {:#?}", self, clocks);
|
||||||
if self.write <= clocks.clock[self.write_index] {
|
if self.write <= clocks.clock[self.write_index] {
|
||||||
let atomic = self.atomic_mut();
|
let atomic = self.atomic_mut();
|
||||||
@ -329,7 +328,11 @@ impl MemoryCellClocks {
|
|||||||
|
|
||||||
/// Detect data-races with an atomic write, either with a non-atomic read or with
|
/// Detect data-races with an atomic write, either with a non-atomic read or with
|
||||||
/// a non-atomic write.
|
/// a non-atomic write.
|
||||||
fn atomic_write_detect(&mut self, clocks: &ThreadClockSet, index: VectorIdx) -> Result<(), DataRace> {
|
fn atomic_write_detect(
|
||||||
|
&mut self,
|
||||||
|
clocks: &ThreadClockSet,
|
||||||
|
index: VectorIdx,
|
||||||
|
) -> Result<(), DataRace> {
|
||||||
log::trace!("Atomic write with vectors: {:#?} :: {:#?}", self, clocks);
|
log::trace!("Atomic write with vectors: {:#?} :: {:#?}", self, clocks);
|
||||||
if self.write <= clocks.clock[self.write_index] && self.read <= clocks.clock {
|
if self.write <= clocks.clock[self.write_index] && self.read <= clocks.clock {
|
||||||
let atomic = self.atomic_mut();
|
let atomic = self.atomic_mut();
|
||||||
@ -342,7 +345,11 @@ impl MemoryCellClocks {
|
|||||||
|
|
||||||
/// Detect races for non-atomic read operations at the current memory cell
|
/// Detect races for non-atomic read operations at the current memory cell
|
||||||
/// returns true if a data-race is detected.
|
/// returns true if a data-race is detected.
|
||||||
fn read_race_detect(&mut self, clocks: &ThreadClockSet, index: VectorIdx) -> Result<(), DataRace> {
|
fn read_race_detect(
|
||||||
|
&mut self,
|
||||||
|
clocks: &ThreadClockSet,
|
||||||
|
index: VectorIdx,
|
||||||
|
) -> Result<(), DataRace> {
|
||||||
log::trace!("Unsynchronized read with vectors: {:#?} :: {:#?}", self, clocks);
|
log::trace!("Unsynchronized read with vectors: {:#?} :: {:#?}", self, clocks);
|
||||||
if self.write <= clocks.clock[self.write_index] {
|
if self.write <= clocks.clock[self.write_index] {
|
||||||
let race_free = if let Some(atomic) = self.atomic() {
|
let race_free = if let Some(atomic) = self.atomic() {
|
||||||
@ -363,7 +370,11 @@ impl MemoryCellClocks {
|
|||||||
|
|
||||||
/// Detect races for non-atomic write operations at the current memory cell
|
/// Detect races for non-atomic write operations at the current memory cell
|
||||||
/// returns true if a data-race is detected.
|
/// returns true if a data-race is detected.
|
||||||
fn write_race_detect(&mut self, clocks: &ThreadClockSet, index: VectorIdx) -> Result<(), DataRace> {
|
fn write_race_detect(
|
||||||
|
&mut self,
|
||||||
|
clocks: &ThreadClockSet,
|
||||||
|
index: VectorIdx,
|
||||||
|
) -> Result<(), DataRace> {
|
||||||
log::trace!("Unsynchronized write with vectors: {:#?} :: {:#?}", self, clocks);
|
log::trace!("Unsynchronized write with vectors: {:#?} :: {:#?}", self, clocks);
|
||||||
if self.write <= clocks.clock[self.write_index] && self.read <= clocks.clock {
|
if self.write <= clocks.clock[self.write_index] && self.read <= clocks.clock {
|
||||||
let race_free = if let Some(atomic) = self.atomic() {
|
let race_free = if let Some(atomic) = self.atomic() {
|
||||||
@ -385,18 +396,16 @@ impl MemoryCellClocks {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Evaluation context extensions.
|
/// Evaluation context extensions.
|
||||||
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> {}
|
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> {}
|
||||||
pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
||||||
|
|
||||||
/// Atomic variant of read_scalar_at_offset.
|
/// Atomic variant of read_scalar_at_offset.
|
||||||
fn read_scalar_at_offset_atomic(
|
fn read_scalar_at_offset_atomic(
|
||||||
&self,
|
&self,
|
||||||
op: OpTy<'tcx, Tag>,
|
op: OpTy<'tcx, Tag>,
|
||||||
offset: u64,
|
offset: u64,
|
||||||
layout: TyAndLayout<'tcx>,
|
layout: TyAndLayout<'tcx>,
|
||||||
atomic: AtomicReadOp
|
atomic: AtomicReadOp,
|
||||||
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
|
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
|
||||||
let this = self.eval_context_ref();
|
let this = self.eval_context_ref();
|
||||||
let op_place = this.deref_operand(op)?;
|
let op_place = this.deref_operand(op)?;
|
||||||
@ -415,7 +424,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
|||||||
offset: u64,
|
offset: u64,
|
||||||
value: impl Into<ScalarMaybeUninit<Tag>>,
|
value: impl Into<ScalarMaybeUninit<Tag>>,
|
||||||
layout: TyAndLayout<'tcx>,
|
layout: TyAndLayout<'tcx>,
|
||||||
atomic: AtomicWriteOp
|
atomic: AtomicWriteOp,
|
||||||
) -> InterpResult<'tcx> {
|
) -> InterpResult<'tcx> {
|
||||||
let this = self.eval_context_mut();
|
let this = self.eval_context_mut();
|
||||||
let op_place = this.deref_operand(op)?;
|
let op_place = this.deref_operand(op)?;
|
||||||
@ -429,46 +438,45 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
|||||||
|
|
||||||
/// Perform an atomic read operation at the memory location.
|
/// Perform an atomic read operation at the memory location.
|
||||||
fn read_scalar_atomic(
|
fn read_scalar_atomic(
|
||||||
&self, place: MPlaceTy<'tcx, Tag>, atomic: AtomicReadOp
|
&self,
|
||||||
|
place: MPlaceTy<'tcx, Tag>,
|
||||||
|
atomic: AtomicReadOp,
|
||||||
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
|
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
|
||||||
let this = self.eval_context_ref();
|
let this = self.eval_context_ref();
|
||||||
let scalar = this.allow_data_races_ref(move |this| {
|
let scalar = this.allow_data_races_ref(move |this| this.read_scalar(place.into()))?;
|
||||||
this.read_scalar(place.into())
|
|
||||||
})?;
|
|
||||||
self.validate_atomic_load(place, atomic)?;
|
self.validate_atomic_load(place, atomic)?;
|
||||||
Ok(scalar)
|
Ok(scalar)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Perform an atomic write operation at the memory location.
|
/// Perform an atomic write operation at the memory location.
|
||||||
fn write_scalar_atomic(
|
fn write_scalar_atomic(
|
||||||
&mut self, val: ScalarMaybeUninit<Tag>, dest: MPlaceTy<'tcx, Tag>,
|
&mut self,
|
||||||
atomic: AtomicWriteOp
|
val: ScalarMaybeUninit<Tag>,
|
||||||
|
dest: MPlaceTy<'tcx, Tag>,
|
||||||
|
atomic: AtomicWriteOp,
|
||||||
) -> InterpResult<'tcx> {
|
) -> InterpResult<'tcx> {
|
||||||
let this = self.eval_context_mut();
|
let this = self.eval_context_mut();
|
||||||
this.allow_data_races_mut(move |this| {
|
this.allow_data_races_mut(move |this| this.write_scalar(val, dest.into()))?;
|
||||||
this.write_scalar(val, dest.into())
|
|
||||||
})?;
|
|
||||||
self.validate_atomic_store(dest, atomic)
|
self.validate_atomic_store(dest, atomic)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Perform a atomic operation on a memory location.
|
/// Perform a atomic operation on a memory location.
|
||||||
fn atomic_op_immediate(
|
fn atomic_op_immediate(
|
||||||
&mut self,
|
&mut self,
|
||||||
place: MPlaceTy<'tcx, Tag>, rhs: ImmTy<'tcx, Tag>,
|
place: MPlaceTy<'tcx, Tag>,
|
||||||
op: mir::BinOp, neg: bool, atomic: AtomicRwOp
|
rhs: ImmTy<'tcx, Tag>,
|
||||||
|
op: mir::BinOp,
|
||||||
|
neg: bool,
|
||||||
|
atomic: AtomicRwOp,
|
||||||
) -> InterpResult<'tcx, ImmTy<'tcx, Tag>> {
|
) -> InterpResult<'tcx, ImmTy<'tcx, Tag>> {
|
||||||
let this = self.eval_context_mut();
|
let this = self.eval_context_mut();
|
||||||
|
|
||||||
let old = this.allow_data_races_mut(|this| {
|
let old = this.allow_data_races_mut(|this| this.read_immediate(place.into()))?;
|
||||||
this.read_immediate(place. into())
|
|
||||||
})?;
|
|
||||||
|
|
||||||
// Atomics wrap around on overflow.
|
// Atomics wrap around on overflow.
|
||||||
let val = this.binary_op(op, old, rhs)?;
|
let val = this.binary_op(op, old, rhs)?;
|
||||||
let val = if neg { this.unary_op(mir::UnOp::Not, val)? } else { val };
|
let val = if neg { this.unary_op(mir::UnOp::Not, val)? } else { val };
|
||||||
this.allow_data_races_mut(|this| {
|
this.allow_data_races_mut(|this| this.write_immediate(*val, place.into()))?;
|
||||||
this.write_immediate(*val, place.into())
|
|
||||||
})?;
|
|
||||||
|
|
||||||
this.validate_atomic_rmw(place, atomic)?;
|
this.validate_atomic_rmw(place, atomic)?;
|
||||||
Ok(old)
|
Ok(old)
|
||||||
@ -478,17 +486,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
|||||||
/// scalar value, the old value is returned.
|
/// scalar value, the old value is returned.
|
||||||
fn atomic_exchange_scalar(
|
fn atomic_exchange_scalar(
|
||||||
&mut self,
|
&mut self,
|
||||||
place: MPlaceTy<'tcx, Tag>, new: ScalarMaybeUninit<Tag>,
|
place: MPlaceTy<'tcx, Tag>,
|
||||||
atomic: AtomicRwOp
|
new: ScalarMaybeUninit<Tag>,
|
||||||
|
atomic: AtomicRwOp,
|
||||||
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
|
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
|
||||||
let this = self.eval_context_mut();
|
let this = self.eval_context_mut();
|
||||||
|
|
||||||
let old = this.allow_data_races_mut(|this| {
|
let old = this.allow_data_races_mut(|this| this.read_scalar(place.into()))?;
|
||||||
this.read_scalar(place.into())
|
this.allow_data_races_mut(|this| this.write_scalar(new, place.into()))?;
|
||||||
})?;
|
|
||||||
this.allow_data_races_mut(|this| {
|
|
||||||
this.write_scalar(new, place.into())
|
|
||||||
})?;
|
|
||||||
this.validate_atomic_rmw(place, atomic)?;
|
this.validate_atomic_rmw(place, atomic)?;
|
||||||
Ok(old)
|
Ok(old)
|
||||||
}
|
}
|
||||||
@ -497,9 +502,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
|||||||
/// on success an atomic RMW operation is performed and on failure
|
/// on success an atomic RMW operation is performed and on failure
|
||||||
/// only an atomic read occurs.
|
/// only an atomic read occurs.
|
||||||
fn atomic_compare_exchange_scalar(
|
fn atomic_compare_exchange_scalar(
|
||||||
&mut self, place: MPlaceTy<'tcx, Tag>,
|
&mut self,
|
||||||
expect_old: ImmTy<'tcx, Tag>, new: ScalarMaybeUninit<Tag>,
|
place: MPlaceTy<'tcx, Tag>,
|
||||||
success: AtomicRwOp, fail: AtomicReadOp
|
expect_old: ImmTy<'tcx, Tag>,
|
||||||
|
new: ScalarMaybeUninit<Tag>,
|
||||||
|
success: AtomicRwOp,
|
||||||
|
fail: AtomicReadOp,
|
||||||
) -> InterpResult<'tcx, Immediate<Tag>> {
|
) -> InterpResult<'tcx, Immediate<Tag>> {
|
||||||
let this = self.eval_context_mut();
|
let this = self.eval_context_mut();
|
||||||
|
|
||||||
@ -507,9 +515,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
|||||||
// to read with the failure ordering and if successfull then try again with the success
|
// to read with the failure ordering and if successfull then try again with the success
|
||||||
// read ordering and write in the success case.
|
// read ordering and write in the success case.
|
||||||
// Read as immediate for the sake of `binary_op()`
|
// Read as immediate for the sake of `binary_op()`
|
||||||
let old = this.allow_data_races_mut(|this| {
|
let old = this.allow_data_races_mut(|this| this.read_immediate(place.into()))?;
|
||||||
this.read_immediate(place.into())
|
|
||||||
})?;
|
|
||||||
|
|
||||||
// `binary_op` will bail if either of them is not a scalar.
|
// `binary_op` will bail if either of them is not a scalar.
|
||||||
let eq = this.overflowing_binary_op(mir::BinOp::Eq, old, expect_old)?.0;
|
let eq = this.overflowing_binary_op(mir::BinOp::Eq, old, expect_old)?.0;
|
||||||
@ -519,9 +525,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
|||||||
// if successful, perform a full rw-atomic validation
|
// if successful, perform a full rw-atomic validation
|
||||||
// otherwise treat this as an atomic load with the fail ordering.
|
// otherwise treat this as an atomic load with the fail ordering.
|
||||||
if eq.to_bool()? {
|
if eq.to_bool()? {
|
||||||
this.allow_data_races_mut(|this| {
|
this.allow_data_races_mut(|this| this.write_scalar(new, place.into()))?;
|
||||||
this.write_scalar(new, place.into())
|
|
||||||
})?;
|
|
||||||
this.validate_atomic_rmw(place, success)?;
|
this.validate_atomic_rmw(place, success)?;
|
||||||
} else {
|
} else {
|
||||||
this.validate_atomic_load(place, fail)?;
|
this.validate_atomic_load(place, fail)?;
|
||||||
@ -530,68 +534,74 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
|||||||
// Return the old value.
|
// Return the old value.
|
||||||
Ok(res)
|
Ok(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Update the data-race detector for an atomic read occuring at the
|
/// Update the data-race detector for an atomic read occuring at the
|
||||||
/// associated memory-place and on the current thread.
|
/// associated memory-place and on the current thread.
|
||||||
fn validate_atomic_load(
|
fn validate_atomic_load(
|
||||||
&self, place: MPlaceTy<'tcx, Tag>, atomic: AtomicReadOp
|
&self,
|
||||||
|
place: MPlaceTy<'tcx, Tag>,
|
||||||
|
atomic: AtomicReadOp,
|
||||||
) -> InterpResult<'tcx> {
|
) -> InterpResult<'tcx> {
|
||||||
let this = self.eval_context_ref();
|
let this = self.eval_context_ref();
|
||||||
this.validate_atomic_op(
|
this.validate_atomic_op(
|
||||||
place, atomic, "Atomic Load",
|
place,
|
||||||
|
atomic,
|
||||||
|
"Atomic Load",
|
||||||
move |memory, clocks, index, atomic| {
|
move |memory, clocks, index, atomic| {
|
||||||
if atomic == AtomicReadOp::Relaxed {
|
if atomic == AtomicReadOp::Relaxed {
|
||||||
memory.load_relaxed(&mut *clocks, index)
|
memory.load_relaxed(&mut *clocks, index)
|
||||||
} else {
|
} else {
|
||||||
memory.load_acquire(&mut *clocks, index)
|
memory.load_acquire(&mut *clocks, index)
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Update the data-race detector for an atomic write occuring at the
|
/// Update the data-race detector for an atomic write occuring at the
|
||||||
/// associated memory-place and on the current thread.
|
/// associated memory-place and on the current thread.
|
||||||
fn validate_atomic_store(
|
fn validate_atomic_store(
|
||||||
&mut self, place: MPlaceTy<'tcx, Tag>, atomic: AtomicWriteOp
|
&mut self,
|
||||||
|
place: MPlaceTy<'tcx, Tag>,
|
||||||
|
atomic: AtomicWriteOp,
|
||||||
) -> InterpResult<'tcx> {
|
) -> InterpResult<'tcx> {
|
||||||
let this = self.eval_context_ref();
|
let this = self.eval_context_ref();
|
||||||
this.validate_atomic_op(
|
this.validate_atomic_op(
|
||||||
place, atomic, "Atomic Store",
|
place,
|
||||||
|
atomic,
|
||||||
|
"Atomic Store",
|
||||||
move |memory, clocks, index, atomic| {
|
move |memory, clocks, index, atomic| {
|
||||||
if atomic == AtomicWriteOp::Relaxed {
|
if atomic == AtomicWriteOp::Relaxed {
|
||||||
memory.store_relaxed(clocks, index)
|
memory.store_relaxed(clocks, index)
|
||||||
} else {
|
} else {
|
||||||
memory.store_release(clocks, index)
|
memory.store_release(clocks, index)
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Update the data-race detector for an atomic read-modify-write occuring
|
/// Update the data-race detector for an atomic read-modify-write occuring
|
||||||
/// at the associated memory place and on the current thread.
|
/// at the associated memory place and on the current thread.
|
||||||
fn validate_atomic_rmw(
|
fn validate_atomic_rmw(
|
||||||
&mut self, place: MPlaceTy<'tcx, Tag>, atomic: AtomicRwOp
|
&mut self,
|
||||||
|
place: MPlaceTy<'tcx, Tag>,
|
||||||
|
atomic: AtomicRwOp,
|
||||||
) -> InterpResult<'tcx> {
|
) -> InterpResult<'tcx> {
|
||||||
use AtomicRwOp::*;
|
use AtomicRwOp::*;
|
||||||
let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
|
let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
|
||||||
let release = matches!(atomic, Release | AcqRel | SeqCst);
|
let release = matches!(atomic, Release | AcqRel | SeqCst);
|
||||||
let this = self.eval_context_ref();
|
let this = self.eval_context_ref();
|
||||||
this.validate_atomic_op(
|
this.validate_atomic_op(place, atomic, "Atomic RMW", move |memory, clocks, index, _| {
|
||||||
place, atomic, "Atomic RMW",
|
if acquire {
|
||||||
move |memory, clocks, index, _| {
|
memory.load_acquire(clocks, index)?;
|
||||||
if acquire {
|
} else {
|
||||||
memory.load_acquire(clocks, index)?;
|
memory.load_relaxed(clocks, index)?;
|
||||||
} else {
|
|
||||||
memory.load_relaxed(clocks, index)?;
|
|
||||||
}
|
|
||||||
if release {
|
|
||||||
memory.rmw_release(clocks, index)
|
|
||||||
} else {
|
|
||||||
memory.rmw_relaxed(clocks, index)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
)
|
if release {
|
||||||
|
memory.rmw_release(clocks, index)
|
||||||
|
} else {
|
||||||
|
memory.rmw_relaxed(clocks, index)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Update the data-race detector for an atomic fence on the current thread.
|
/// Update the data-race detector for an atomic fence on the current thread.
|
||||||
@ -620,12 +630,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/// Vector clock metadata for a logical memory allocation.
|
/// Vector clock metadata for a logical memory allocation.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct VClockAlloc {
|
pub struct VClockAlloc {
|
||||||
|
|
||||||
/// Range of Vector clocks, this gives each byte a potentially
|
/// Range of Vector clocks, this gives each byte a potentially
|
||||||
/// unqiue set of vector clocks, but merges identical information
|
/// unqiue set of vector clocks, but merges identical information
|
||||||
/// together for improved efficiency.
|
/// together for improved efficiency.
|
||||||
@ -635,16 +642,12 @@ pub struct VClockAlloc {
|
|||||||
global: MemoryExtra,
|
global: MemoryExtra,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
impl VClockAlloc {
|
impl VClockAlloc {
|
||||||
|
|
||||||
/// Create a new data-race allocation detector.
|
/// Create a new data-race allocation detector.
|
||||||
pub fn new_allocation(global: &MemoryExtra, len: Size) -> VClockAlloc {
|
pub fn new_allocation(global: &MemoryExtra, len: Size) -> VClockAlloc {
|
||||||
VClockAlloc {
|
VClockAlloc {
|
||||||
global: Rc::clone(global),
|
global: Rc::clone(global),
|
||||||
alloc_ranges: RefCell::new(
|
alloc_ranges: RefCell::new(RangeMap::new(len, MemoryCellClocks::default())),
|
||||||
RangeMap::new(len, MemoryCellClocks::default())
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -653,27 +656,29 @@ impl VClockAlloc {
|
|||||||
fn find_gt_index(l: &VClock, r: &VClock) -> Option<VectorIdx> {
|
fn find_gt_index(l: &VClock, r: &VClock) -> Option<VectorIdx> {
|
||||||
let l_slice = l.as_slice();
|
let l_slice = l.as_slice();
|
||||||
let r_slice = r.as_slice();
|
let r_slice = r.as_slice();
|
||||||
l_slice.iter().zip(r_slice.iter())
|
l_slice
|
||||||
|
.iter()
|
||||||
|
.zip(r_slice.iter())
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.find_map(|(idx, (&l, &r))| {
|
.find_map(|(idx, (&l, &r))| if l > r { Some(idx) } else { None })
|
||||||
if l > r { Some(idx) } else { None }
|
.or_else(|| {
|
||||||
}).or_else(|| {
|
|
||||||
if l_slice.len() > r_slice.len() {
|
if l_slice.len() > r_slice.len() {
|
||||||
|
|
||||||
// By invariant, if l_slice is longer
|
// By invariant, if l_slice is longer
|
||||||
// then one element must be larger.
|
// then one element must be larger.
|
||||||
// This just validates that this is true
|
// This just validates that this is true
|
||||||
// and reports earlier elements first.
|
// and reports earlier elements first.
|
||||||
let l_remainder_slice = &l_slice[r_slice.len()..];
|
let l_remainder_slice = &l_slice[r_slice.len()..];
|
||||||
let idx = l_remainder_slice.iter().enumerate()
|
let idx = l_remainder_slice
|
||||||
.find_map(|(idx, &r)| {
|
.iter()
|
||||||
if r == 0 { None } else { Some(idx) }
|
.enumerate()
|
||||||
}).expect("Invalid VClock Invariant");
|
.find_map(|(idx, &r)| if r == 0 { None } else { Some(idx) })
|
||||||
|
.expect("Invalid VClock Invariant");
|
||||||
Some(idx)
|
Some(idx)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
}).map(|idx| VectorIdx::new(idx))
|
})
|
||||||
|
.map(|idx| VectorIdx::new(idx))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Report a data-race found in the program.
|
/// Report a data-race found in the program.
|
||||||
@ -684,39 +689,42 @@ impl VClockAlloc {
|
|||||||
#[cold]
|
#[cold]
|
||||||
#[inline(never)]
|
#[inline(never)]
|
||||||
fn report_data_race<'tcx>(
|
fn report_data_race<'tcx>(
|
||||||
global: &MemoryExtra, range: &MemoryCellClocks,
|
global: &MemoryExtra,
|
||||||
action: &str, is_atomic: bool,
|
range: &MemoryCellClocks,
|
||||||
pointer: Pointer<Tag>, len: Size
|
action: &str,
|
||||||
|
is_atomic: bool,
|
||||||
|
pointer: Pointer<Tag>,
|
||||||
|
len: Size,
|
||||||
) -> InterpResult<'tcx> {
|
) -> InterpResult<'tcx> {
|
||||||
let (current_index, current_clocks) = global.current_thread_state();
|
let (current_index, current_clocks) = global.current_thread_state();
|
||||||
let write_clock;
|
let write_clock;
|
||||||
let (
|
let (other_action, other_thread, other_clock) = if range.write
|
||||||
other_action, other_thread, other_clock
|
> current_clocks.clock[range.write_index]
|
||||||
) = if range.write > current_clocks.clock[range.write_index] {
|
{
|
||||||
|
|
||||||
// Convert the write action into the vector clock it
|
// Convert the write action into the vector clock it
|
||||||
// represents for diagnostic purposes.
|
// represents for diagnostic purposes.
|
||||||
write_clock = VClock::new_with_index(range.write_index, range.write);
|
write_clock = VClock::new_with_index(range.write_index, range.write);
|
||||||
("WRITE", range.write_index, &write_clock)
|
("WRITE", range.write_index, &write_clock)
|
||||||
} else if let Some(idx) = Self::find_gt_index(
|
} else if let Some(idx) = Self::find_gt_index(&range.read, ¤t_clocks.clock) {
|
||||||
&range.read, ¤t_clocks.clock
|
|
||||||
){
|
|
||||||
("READ", idx, &range.read)
|
("READ", idx, &range.read)
|
||||||
} else if !is_atomic {
|
} else if !is_atomic {
|
||||||
if let Some(atomic) = range.atomic() {
|
if let Some(atomic) = range.atomic() {
|
||||||
if let Some(idx) = Self::find_gt_index(
|
if let Some(idx) = Self::find_gt_index(&atomic.write_vector, ¤t_clocks.clock)
|
||||||
&atomic.write_vector, ¤t_clocks.clock
|
{
|
||||||
) {
|
|
||||||
("ATOMIC_STORE", idx, &atomic.write_vector)
|
("ATOMIC_STORE", idx, &atomic.write_vector)
|
||||||
} else if let Some(idx) = Self::find_gt_index(
|
} else if let Some(idx) =
|
||||||
&atomic.read_vector, ¤t_clocks.clock
|
Self::find_gt_index(&atomic.read_vector, ¤t_clocks.clock)
|
||||||
) {
|
{
|
||||||
("ATOMIC_LOAD", idx, &atomic.read_vector)
|
("ATOMIC_LOAD", idx, &atomic.read_vector)
|
||||||
} else {
|
} else {
|
||||||
unreachable!("Failed to report data-race for non-atomic operation: no race found")
|
unreachable!(
|
||||||
|
"Failed to report data-race for non-atomic operation: no race found"
|
||||||
|
)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
unreachable!("Failed to report data-race for non-atomic operation: no atomic component")
|
unreachable!(
|
||||||
|
"Failed to report data-race for non-atomic operation: no atomic component"
|
||||||
|
)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
unreachable!("Failed to report data-race for atomic operation")
|
unreachable!("Failed to report data-race for atomic operation")
|
||||||
@ -725,15 +733,19 @@ impl VClockAlloc {
|
|||||||
// Load elaborated thread information about the racing thread actions.
|
// Load elaborated thread information about the racing thread actions.
|
||||||
let current_thread_info = global.print_thread_metadata(current_index);
|
let current_thread_info = global.print_thread_metadata(current_index);
|
||||||
let other_thread_info = global.print_thread_metadata(other_thread);
|
let other_thread_info = global.print_thread_metadata(other_thread);
|
||||||
|
|
||||||
// Throw the data-race detection.
|
// Throw the data-race detection.
|
||||||
throw_ub_format!(
|
throw_ub_format!(
|
||||||
"Data race detected between {} on {} and {} on {}, memory({:?},offset={},size={})\
|
"Data race detected between {} on {} and {} on {}, memory({:?},offset={},size={})\
|
||||||
\n\t\t -current vector clock = {:?}\
|
\n\t\t -current vector clock = {:?}\
|
||||||
\n\t\t -conflicting timestamp = {:?}",
|
\n\t\t -conflicting timestamp = {:?}",
|
||||||
action, current_thread_info,
|
action,
|
||||||
other_action, other_thread_info,
|
current_thread_info,
|
||||||
pointer.alloc_id, pointer.offset.bytes(), len.bytes(),
|
other_action,
|
||||||
|
other_thread_info,
|
||||||
|
pointer.alloc_id,
|
||||||
|
pointer.offset.bytes(),
|
||||||
|
len.bytes(),
|
||||||
current_clocks.clock,
|
current_clocks.clock,
|
||||||
other_clock
|
other_clock
|
||||||
)
|
)
|
||||||
@ -748,12 +760,16 @@ impl VClockAlloc {
|
|||||||
if self.global.multi_threaded.get() {
|
if self.global.multi_threaded.get() {
|
||||||
let (index, clocks) = self.global.current_thread_state();
|
let (index, clocks) = self.global.current_thread_state();
|
||||||
let mut alloc_ranges = self.alloc_ranges.borrow_mut();
|
let mut alloc_ranges = self.alloc_ranges.borrow_mut();
|
||||||
for (_,range) in alloc_ranges.iter_mut(pointer.offset, len) {
|
for (_, range) in alloc_ranges.iter_mut(pointer.offset, len) {
|
||||||
if let Err(DataRace) = range.read_race_detect(&*clocks, index) {
|
if let Err(DataRace) = range.read_race_detect(&*clocks, index) {
|
||||||
|
|
||||||
// Report data-race.
|
// Report data-race.
|
||||||
return Self::report_data_race(
|
return Self::report_data_race(
|
||||||
&self.global,range, "READ", false, pointer, len
|
&self.global,
|
||||||
|
range,
|
||||||
|
"READ",
|
||||||
|
false,
|
||||||
|
pointer,
|
||||||
|
len,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -763,17 +779,25 @@ impl VClockAlloc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Shared code for detecting data-races on unique access to a section of memory
|
// Shared code for detecting data-races on unique access to a section of memory
|
||||||
fn unique_access<'tcx>(&mut self, pointer: Pointer<Tag>, len: Size, action: &str) -> InterpResult<'tcx> {
|
fn unique_access<'tcx>(
|
||||||
|
&mut self,
|
||||||
|
pointer: Pointer<Tag>,
|
||||||
|
len: Size,
|
||||||
|
action: &str,
|
||||||
|
) -> InterpResult<'tcx> {
|
||||||
if self.global.multi_threaded.get() {
|
if self.global.multi_threaded.get() {
|
||||||
let (index, clocks) = self.global.current_thread_state();
|
let (index, clocks) = self.global.current_thread_state();
|
||||||
for (_,range) in self.alloc_ranges.get_mut().iter_mut(pointer.offset, len) {
|
for (_, range) in self.alloc_ranges.get_mut().iter_mut(pointer.offset, len) {
|
||||||
if let Err(DataRace) = range.write_race_detect(&*clocks, index) {
|
if let Err(DataRace) = range.write_race_detect(&*clocks, index) {
|
||||||
|
|
||||||
// Report data-race
|
// Report data-race
|
||||||
return Self::report_data_race(
|
return Self::report_data_race(
|
||||||
&self.global, range, action, false, pointer, len
|
&self.global,
|
||||||
|
range,
|
||||||
|
action,
|
||||||
|
false,
|
||||||
|
pointer,
|
||||||
|
len,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -802,7 +826,6 @@ impl VClockAlloc {
|
|||||||
|
|
||||||
impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> {}
|
impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> {}
|
||||||
trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
||||||
|
|
||||||
// Temporarily allow data-races to occur, this should only be
|
// Temporarily allow data-races to occur, this should only be
|
||||||
// used if either one of the appropiate `validate_atomic` functions
|
// used if either one of the appropiate `validate_atomic` functions
|
||||||
// will be called to treat a memory access as atomic or if the memory
|
// will be called to treat a memory access as atomic or if the memory
|
||||||
@ -827,7 +850,10 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
|||||||
/// so should only be used for atomic operations or internal state that the program cannot
|
/// so should only be used for atomic operations or internal state that the program cannot
|
||||||
/// access.
|
/// access.
|
||||||
#[inline]
|
#[inline]
|
||||||
fn allow_data_races_mut<R>(&mut self, op: impl FnOnce(&mut MiriEvalContext<'mir, 'tcx>) -> R) -> R {
|
fn allow_data_races_mut<R>(
|
||||||
|
&mut self,
|
||||||
|
op: impl FnOnce(&mut MiriEvalContext<'mir, 'tcx>) -> R,
|
||||||
|
) -> R {
|
||||||
let this = self.eval_context_mut();
|
let this = self.eval_context_mut();
|
||||||
let old = if let Some(data_race) = &this.memory.extra.data_race {
|
let old = if let Some(data_race) = &this.memory.extra.data_race {
|
||||||
data_race.multi_threaded.replace(false)
|
data_race.multi_threaded.replace(false)
|
||||||
@ -848,34 +874,49 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
|||||||
/// FIXME: is this valid, or should get_raw_mut be used for
|
/// FIXME: is this valid, or should get_raw_mut be used for
|
||||||
/// atomic-stores/atomic-rmw?
|
/// atomic-stores/atomic-rmw?
|
||||||
fn validate_atomic_op<A: Debug + Copy>(
|
fn validate_atomic_op<A: Debug + Copy>(
|
||||||
&self, place: MPlaceTy<'tcx, Tag>,
|
&self,
|
||||||
atomic: A, description: &str,
|
place: MPlaceTy<'tcx, Tag>,
|
||||||
|
atomic: A,
|
||||||
|
description: &str,
|
||||||
mut op: impl FnMut(
|
mut op: impl FnMut(
|
||||||
&mut MemoryCellClocks, &mut ThreadClockSet, VectorIdx, A
|
&mut MemoryCellClocks,
|
||||||
) -> Result<(), DataRace>
|
&mut ThreadClockSet,
|
||||||
|
VectorIdx,
|
||||||
|
A,
|
||||||
|
) -> Result<(), DataRace>,
|
||||||
) -> InterpResult<'tcx> {
|
) -> InterpResult<'tcx> {
|
||||||
let this = self.eval_context_ref();
|
let this = self.eval_context_ref();
|
||||||
if let Some(data_race) = &this.memory.extra.data_race {
|
if let Some(data_race) = &this.memory.extra.data_race {
|
||||||
if data_race.multi_threaded.get() {
|
if data_race.multi_threaded.get() {
|
||||||
|
|
||||||
// Load and log the atomic operation.
|
// Load and log the atomic operation.
|
||||||
let place_ptr = place.ptr.assert_ptr();
|
let place_ptr = place.ptr.assert_ptr();
|
||||||
let size = place.layout.size;
|
let size = place.layout.size;
|
||||||
let alloc_meta = &this.memory.get_raw(place_ptr.alloc_id)?.extra.data_race.as_ref().unwrap();
|
let alloc_meta =
|
||||||
|
&this.memory.get_raw(place_ptr.alloc_id)?.extra.data_race.as_ref().unwrap();
|
||||||
log::trace!(
|
log::trace!(
|
||||||
"Atomic op({}) with ordering {:?} on memory({:?}, offset={}, size={})",
|
"Atomic op({}) with ordering {:?} on memory({:?}, offset={}, size={})",
|
||||||
description, &atomic, place_ptr.alloc_id, place_ptr.offset.bytes(), size.bytes()
|
description,
|
||||||
|
&atomic,
|
||||||
|
place_ptr.alloc_id,
|
||||||
|
place_ptr.offset.bytes(),
|
||||||
|
size.bytes()
|
||||||
);
|
);
|
||||||
|
|
||||||
// Perform the atomic operation.
|
// Perform the atomic operation.
|
||||||
let data_race = &alloc_meta.global;
|
let data_race = &alloc_meta.global;
|
||||||
data_race.maybe_perform_sync_operation(|index, mut clocks| {
|
data_race.maybe_perform_sync_operation(|index, mut clocks| {
|
||||||
for (_,range) in alloc_meta.alloc_ranges.borrow_mut().iter_mut(place_ptr.offset, size) {
|
for (_, range) in
|
||||||
|
alloc_meta.alloc_ranges.borrow_mut().iter_mut(place_ptr.offset, size)
|
||||||
|
{
|
||||||
if let Err(DataRace) = op(range, &mut *clocks, index, atomic) {
|
if let Err(DataRace) = op(range, &mut *clocks, index, atomic) {
|
||||||
mem::drop(clocks);
|
mem::drop(clocks);
|
||||||
return VClockAlloc::report_data_race(
|
return VClockAlloc::report_data_race(
|
||||||
&alloc_meta.global, range, description, true,
|
&alloc_meta.global,
|
||||||
place_ptr, size
|
range,
|
||||||
|
description,
|
||||||
|
true,
|
||||||
|
place_ptr,
|
||||||
|
size,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -884,10 +925,13 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
|||||||
|
|
||||||
// Log changes to atomic memory.
|
// Log changes to atomic memory.
|
||||||
if log::log_enabled!(log::Level::Trace) {
|
if log::log_enabled!(log::Level::Trace) {
|
||||||
for (_,range) in alloc_meta.alloc_ranges.borrow().iter(place_ptr.offset, size) {
|
for (_, range) in alloc_meta.alloc_ranges.borrow().iter(place_ptr.offset, size)
|
||||||
|
{
|
||||||
log::trace!(
|
log::trace!(
|
||||||
"Updated atomic memory({:?}, offset={}, size={}) to {:#?}",
|
"Updated atomic memory({:?}, offset={}, size={}) to {:#?}",
|
||||||
place.ptr.assert_ptr().alloc_id, place_ptr.offset.bytes(), size.bytes(),
|
place.ptr.assert_ptr().alloc_id,
|
||||||
|
place_ptr.offset.bytes(),
|
||||||
|
size.bytes(),
|
||||||
range.atomic_ops
|
range.atomic_ops
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@ -896,14 +940,11 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
|||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Extra metadata associated with a thread.
|
/// Extra metadata associated with a thread.
|
||||||
#[derive(Debug, Clone, Default)]
|
#[derive(Debug, Clone, Default)]
|
||||||
struct ThreadExtraState {
|
struct ThreadExtraState {
|
||||||
|
|
||||||
/// The current vector index in use by the
|
/// The current vector index in use by the
|
||||||
/// thread currently, this is set to None
|
/// thread currently, this is set to None
|
||||||
/// after the vector index has been re-used
|
/// after the vector index has been re-used
|
||||||
@ -915,7 +956,7 @@ struct ThreadExtraState {
|
|||||||
/// diagnostics when reporting detected data
|
/// diagnostics when reporting detected data
|
||||||
/// races.
|
/// races.
|
||||||
thread_name: Option<Box<str>>,
|
thread_name: Option<Box<str>>,
|
||||||
|
|
||||||
/// Thread termination vector clock, this
|
/// Thread termination vector clock, this
|
||||||
/// is set on thread termination and is used
|
/// is set on thread termination and is used
|
||||||
/// for joining on threads since the vector_index
|
/// for joining on threads since the vector_index
|
||||||
@ -928,7 +969,6 @@ struct ThreadExtraState {
|
|||||||
/// with each of the threads.
|
/// with each of the threads.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct GlobalState {
|
pub struct GlobalState {
|
||||||
|
|
||||||
/// Set to true once the first additional
|
/// Set to true once the first additional
|
||||||
/// thread has launched, due to the dependency
|
/// thread has launched, due to the dependency
|
||||||
/// between before and after a thread launch.
|
/// between before and after a thread launch.
|
||||||
@ -966,7 +1006,7 @@ pub struct GlobalState {
|
|||||||
/// if the number of active threads reduces to 1 and then
|
/// if the number of active threads reduces to 1 and then
|
||||||
/// a join operation occures with the remaining main thread
|
/// a join operation occures with the remaining main thread
|
||||||
/// then multi-threaded execution may be disabled.
|
/// then multi-threaded execution may be disabled.
|
||||||
active_thread_count: Cell<usize>,
|
active_thread_count: Cell<usize>,
|
||||||
|
|
||||||
/// This contains threads that have terminated, but not yet joined
|
/// This contains threads that have terminated, but not yet joined
|
||||||
/// and so cannot become re-use candidates until a join operation
|
/// and so cannot become re-use candidates until a join operation
|
||||||
@ -977,7 +1017,6 @@ pub struct GlobalState {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl GlobalState {
|
impl GlobalState {
|
||||||
|
|
||||||
/// Create a new global state, setup with just thread-id=0
|
/// Create a new global state, setup with just thread-id=0
|
||||||
/// advanced to timestamp = 1.
|
/// advanced to timestamp = 1.
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
@ -989,7 +1028,7 @@ impl GlobalState {
|
|||||||
current_index: Cell::new(VectorIdx::new(0)),
|
current_index: Cell::new(VectorIdx::new(0)),
|
||||||
active_thread_count: Cell::new(1),
|
active_thread_count: Cell::new(1),
|
||||||
reuse_candidates: RefCell::new(FxHashSet::default()),
|
reuse_candidates: RefCell::new(FxHashSet::default()),
|
||||||
terminated_threads: RefCell::new(FxHashMap::default())
|
terminated_threads: RefCell::new(FxHashMap::default()),
|
||||||
};
|
};
|
||||||
|
|
||||||
// Setup the main-thread since it is not explicitly created:
|
// Setup the main-thread since it is not explicitly created:
|
||||||
@ -997,17 +1036,15 @@ impl GlobalState {
|
|||||||
// the main-thread a name of "main".
|
// the main-thread a name of "main".
|
||||||
let index = global_state.vector_clocks.borrow_mut().push(ThreadClockSet::default());
|
let index = global_state.vector_clocks.borrow_mut().push(ThreadClockSet::default());
|
||||||
global_state.vector_info.borrow_mut().push(ThreadId::new(0));
|
global_state.vector_info.borrow_mut().push(ThreadId::new(0));
|
||||||
global_state.thread_info.borrow_mut().push(
|
global_state.thread_info.borrow_mut().push(ThreadExtraState {
|
||||||
ThreadExtraState {
|
vector_index: Some(index),
|
||||||
vector_index: Some(index),
|
thread_name: Some("main".to_string().into_boxed_str()),
|
||||||
thread_name: Some("main".to_string().into_boxed_str()),
|
termination_vector_clock: None,
|
||||||
termination_vector_clock: None
|
});
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
global_state
|
global_state
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to find vector index values that can potentially be re-used
|
// Try to find vector index values that can potentially be re-used
|
||||||
// by a new thread instead of a new vector index being created.
|
// by a new thread instead of a new vector index being created.
|
||||||
fn find_vector_index_reuse_candidate(&self) -> Option<VectorIdx> {
|
fn find_vector_index_reuse_candidate(&self) -> Option<VectorIdx> {
|
||||||
@ -1015,10 +1052,9 @@ impl GlobalState {
|
|||||||
let vector_clocks = self.vector_clocks.borrow();
|
let vector_clocks = self.vector_clocks.borrow();
|
||||||
let vector_info = self.vector_info.borrow();
|
let vector_info = self.vector_info.borrow();
|
||||||
let terminated_threads = self.terminated_threads.borrow();
|
let terminated_threads = self.terminated_threads.borrow();
|
||||||
for &candidate in reuse.iter() {
|
for &candidate in reuse.iter() {
|
||||||
let target_timestamp = vector_clocks[candidate].clock[candidate];
|
let target_timestamp = vector_clocks[candidate].clock[candidate];
|
||||||
if vector_clocks.iter_enumerated().all(|(clock_idx, clock)| {
|
if vector_clocks.iter_enumerated().all(|(clock_idx, clock)| {
|
||||||
|
|
||||||
// The thread happens before the clock, and hence cannot report
|
// The thread happens before the clock, and hence cannot report
|
||||||
// a data-race with this the candidate index.
|
// a data-race with this the candidate index.
|
||||||
let no_data_race = clock.clock[candidate] >= target_timestamp;
|
let no_data_race = clock.clock[candidate] >= target_timestamp;
|
||||||
@ -1026,20 +1062,19 @@ impl GlobalState {
|
|||||||
// The vector represents a thread that has terminated and hence cannot
|
// The vector represents a thread that has terminated and hence cannot
|
||||||
// report a data-race with the candidate index.
|
// report a data-race with the candidate index.
|
||||||
let thread_id = vector_info[clock_idx];
|
let thread_id = vector_info[clock_idx];
|
||||||
let vector_terminated = reuse.contains(&clock_idx)
|
let vector_terminated =
|
||||||
|| terminated_threads.contains_key(&thread_id);
|
reuse.contains(&clock_idx) || terminated_threads.contains_key(&thread_id);
|
||||||
|
|
||||||
// The vector index cannot report a race with the candidate index
|
// The vector index cannot report a race with the candidate index
|
||||||
// and hence allows the candidate index to be re-used.
|
// and hence allows the candidate index to be re-used.
|
||||||
no_data_race || vector_terminated
|
no_data_race || vector_terminated
|
||||||
}) {
|
}) {
|
||||||
|
|
||||||
// All vector clocks for each vector index are equal to
|
// All vector clocks for each vector index are equal to
|
||||||
// the target timestamp, and the thread is known to have
|
// the target timestamp, and the thread is known to have
|
||||||
// terminated, therefore this vector clock index cannot
|
// terminated, therefore this vector clock index cannot
|
||||||
// report any more data-races.
|
// report any more data-races.
|
||||||
assert!(reuse.remove(&candidate));
|
assert!(reuse.remove(&candidate));
|
||||||
return Some(candidate)
|
return Some(candidate);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None
|
None
|
||||||
@ -1065,10 +1100,7 @@ impl GlobalState {
|
|||||||
|
|
||||||
// Assign a vector index for the thread, attempting to re-use an old
|
// Assign a vector index for the thread, attempting to re-use an old
|
||||||
// vector index that can no longer report any data-races if possible.
|
// vector index that can no longer report any data-races if possible.
|
||||||
let created_index = if let Some(
|
let created_index = if let Some(reuse_index) = self.find_vector_index_reuse_candidate() {
|
||||||
reuse_index
|
|
||||||
) = self.find_vector_index_reuse_candidate() {
|
|
||||||
|
|
||||||
// Now re-configure the re-use candidate, increment the clock
|
// Now re-configure the re-use candidate, increment the clock
|
||||||
// for the new sync use of the vector.
|
// for the new sync use of the vector.
|
||||||
let mut vector_clocks = self.vector_clocks.borrow_mut();
|
let mut vector_clocks = self.vector_clocks.borrow_mut();
|
||||||
@ -1086,7 +1118,6 @@ impl GlobalState {
|
|||||||
|
|
||||||
reuse_index
|
reuse_index
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
// No vector re-use candidates available, instead create
|
// No vector re-use candidates available, instead create
|
||||||
// a new vector index.
|
// a new vector index.
|
||||||
let mut vector_info = self.vector_info.borrow_mut();
|
let mut vector_info = self.vector_info.borrow_mut();
|
||||||
@ -1125,13 +1156,16 @@ impl GlobalState {
|
|||||||
let thread_info = self.thread_info.borrow();
|
let thread_info = self.thread_info.borrow();
|
||||||
|
|
||||||
// Load the vector clock of the current thread.
|
// Load the vector clock of the current thread.
|
||||||
let current_index = thread_info[current_thread].vector_index
|
let current_index = thread_info[current_thread]
|
||||||
|
.vector_index
|
||||||
.expect("Performed thread join on thread with no assigned vector");
|
.expect("Performed thread join on thread with no assigned vector");
|
||||||
let current = &mut clocks_vec[current_index];
|
let current = &mut clocks_vec[current_index];
|
||||||
|
|
||||||
// Load the associated vector clock for the terminated thread.
|
// Load the associated vector clock for the terminated thread.
|
||||||
let join_clock = thread_info[join_thread].termination_vector_clock
|
let join_clock = thread_info[join_thread]
|
||||||
.as_ref().expect("Joined with thread but thread has not terminated");
|
.termination_vector_clock
|
||||||
|
.as_ref()
|
||||||
|
.expect("Joined with thread but thread has not terminated");
|
||||||
|
|
||||||
// Pre increment clocks before atomic operation.
|
// Pre increment clocks before atomic operation.
|
||||||
current.increment_clock(current_index);
|
current.increment_clock(current_index);
|
||||||
@ -1147,13 +1181,12 @@ impl GlobalState {
|
|||||||
// then test for potentially disabling multi-threaded execution.
|
// then test for potentially disabling multi-threaded execution.
|
||||||
let active_threads = self.active_thread_count.get();
|
let active_threads = self.active_thread_count.get();
|
||||||
if active_threads == 1 {
|
if active_threads == 1 {
|
||||||
|
|
||||||
// May potentially be able to disable multi-threaded execution.
|
// May potentially be able to disable multi-threaded execution.
|
||||||
let current_clock = &clocks_vec[current_index];
|
let current_clock = &clocks_vec[current_index];
|
||||||
if clocks_vec.iter_enumerated().all(|(idx, clocks)| {
|
if clocks_vec
|
||||||
clocks.clock[idx] <= current_clock.clock[idx]
|
.iter_enumerated()
|
||||||
}) {
|
.all(|(idx, clocks)| clocks.clock[idx] <= current_clock.clock[idx])
|
||||||
|
{
|
||||||
// The all thread termations happen-before the current clock
|
// The all thread termations happen-before the current clock
|
||||||
// therefore no data-races can be reported until a new thread
|
// therefore no data-races can be reported until a new thread
|
||||||
// is created, so disable multi-threaded execution.
|
// is created, so disable multi-threaded execution.
|
||||||
@ -1180,7 +1213,7 @@ impl GlobalState {
|
|||||||
#[inline]
|
#[inline]
|
||||||
pub fn thread_terminated(&self) {
|
pub fn thread_terminated(&self) {
|
||||||
let current_index = self.current_index();
|
let current_index = self.current_index();
|
||||||
|
|
||||||
// Increment the clock to a unique termination timestamp.
|
// Increment the clock to a unique termination timestamp.
|
||||||
let mut vector_clocks = self.vector_clocks.borrow_mut();
|
let mut vector_clocks = self.vector_clocks.borrow_mut();
|
||||||
let current_clocks = &mut vector_clocks[current_index];
|
let current_clocks = &mut vector_clocks[current_index];
|
||||||
@ -1201,7 +1234,7 @@ impl GlobalState {
|
|||||||
// occurs.
|
// occurs.
|
||||||
let mut termination = self.terminated_threads.borrow_mut();
|
let mut termination = self.terminated_threads.borrow_mut();
|
||||||
termination.insert(current_thread, current_index);
|
termination.insert(current_thread, current_index);
|
||||||
|
|
||||||
// Reduce the number of active threads, now that a thread has
|
// Reduce the number of active threads, now that a thread has
|
||||||
// terminated.
|
// terminated.
|
||||||
let mut active_threads = self.active_thread_count.get();
|
let mut active_threads = self.active_thread_count.get();
|
||||||
@ -1215,7 +1248,8 @@ impl GlobalState {
|
|||||||
#[inline]
|
#[inline]
|
||||||
pub fn thread_set_active(&self, thread: ThreadId) {
|
pub fn thread_set_active(&self, thread: ThreadId) {
|
||||||
let thread_info = self.thread_info.borrow();
|
let thread_info = self.thread_info.borrow();
|
||||||
let vector_idx = thread_info[thread].vector_index
|
let vector_idx = thread_info[thread]
|
||||||
|
.vector_index
|
||||||
.expect("Setting thread active with no assigned vector");
|
.expect("Setting thread active with no assigned vector");
|
||||||
self.current_index.set(vector_idx);
|
self.current_index.set(vector_idx);
|
||||||
}
|
}
|
||||||
@ -1231,7 +1265,6 @@ impl GlobalState {
|
|||||||
thread_info[thread].thread_name = Some(name);
|
thread_info[thread].thread_name = Some(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Attempt to perform a synchronized operation, this
|
/// Attempt to perform a synchronized operation, this
|
||||||
/// will perform no operation if multi-threading is
|
/// will perform no operation if multi-threading is
|
||||||
/// not currently enabled.
|
/// not currently enabled.
|
||||||
@ -1240,7 +1273,8 @@ impl GlobalState {
|
|||||||
/// detection between any happens-before edges the
|
/// detection between any happens-before edges the
|
||||||
/// operation may create.
|
/// operation may create.
|
||||||
fn maybe_perform_sync_operation<'tcx>(
|
fn maybe_perform_sync_operation<'tcx>(
|
||||||
&self, op: impl FnOnce(VectorIdx, RefMut<'_,ThreadClockSet>) -> InterpResult<'tcx>,
|
&self,
|
||||||
|
op: impl FnOnce(VectorIdx, RefMut<'_, ThreadClockSet>) -> InterpResult<'tcx>,
|
||||||
) -> InterpResult<'tcx> {
|
) -> InterpResult<'tcx> {
|
||||||
if self.multi_threaded.get() {
|
if self.multi_threaded.get() {
|
||||||
let (index, mut clocks) = self.current_thread_state_mut();
|
let (index, mut clocks) = self.current_thread_state_mut();
|
||||||
@ -1251,7 +1285,6 @@ impl GlobalState {
|
|||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Internal utility to identify a thread stored internally
|
/// Internal utility to identify a thread stored internally
|
||||||
/// returns the id and the name for better diagnostics.
|
/// returns the id and the name for better diagnostics.
|
||||||
@ -1266,7 +1299,6 @@ impl GlobalState {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Acquire a lock, express that the previous call of
|
/// Acquire a lock, express that the previous call of
|
||||||
/// `validate_lock_release` must happen before this.
|
/// `validate_lock_release` must happen before this.
|
||||||
pub fn validate_lock_acquire(&self, lock: &VClock, thread: ThreadId) {
|
pub fn validate_lock_acquire(&self, lock: &VClock, thread: ThreadId) {
|
||||||
@ -1300,7 +1332,8 @@ impl GlobalState {
|
|||||||
/// used by the thread.
|
/// used by the thread.
|
||||||
#[inline]
|
#[inline]
|
||||||
fn load_thread_state_mut(&self, thread: ThreadId) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
|
fn load_thread_state_mut(&self, thread: ThreadId) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
|
||||||
let index = self.thread_info.borrow()[thread].vector_index
|
let index = self.thread_info.borrow()[thread]
|
||||||
|
.vector_index
|
||||||
.expect("Loading thread state for thread with no assigned vector");
|
.expect("Loading thread state for thread with no assigned vector");
|
||||||
let ref_vector = self.vector_clocks.borrow_mut();
|
let ref_vector = self.vector_clocks.borrow_mut();
|
||||||
let clocks = RefMut::map(ref_vector, |vec| &mut vec[index]);
|
let clocks = RefMut::map(ref_vector, |vec| &mut vec[index]);
|
||||||
|
@ -1,10 +1,13 @@
|
|||||||
use std::{
|
|
||||||
fmt::{self, Debug}, cmp::Ordering, ops::Index,
|
|
||||||
convert::TryFrom, mem
|
|
||||||
};
|
|
||||||
use smallvec::SmallVec;
|
|
||||||
use rustc_index::vec::Idx;
|
|
||||||
use rustc_data_structures::fx::FxHashMap;
|
use rustc_data_structures::fx::FxHashMap;
|
||||||
|
use rustc_index::vec::Idx;
|
||||||
|
use smallvec::SmallVec;
|
||||||
|
use std::{
|
||||||
|
cmp::Ordering,
|
||||||
|
convert::TryFrom,
|
||||||
|
fmt::{self, Debug},
|
||||||
|
mem,
|
||||||
|
ops::Index,
|
||||||
|
};
|
||||||
|
|
||||||
/// A vector clock index, this is associated with a thread id
|
/// A vector clock index, this is associated with a thread id
|
||||||
/// but in some cases one vector index may be shared with
|
/// but in some cases one vector index may be shared with
|
||||||
@ -13,18 +16,15 @@ use rustc_data_structures::fx::FxHashMap;
|
|||||||
pub struct VectorIdx(u32);
|
pub struct VectorIdx(u32);
|
||||||
|
|
||||||
impl VectorIdx {
|
impl VectorIdx {
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn to_u32(self) -> u32 {
|
pub fn to_u32(self) -> u32 {
|
||||||
self.0
|
self.0
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const MAX_INDEX: VectorIdx = VectorIdx(u32::MAX);
|
pub const MAX_INDEX: VectorIdx = VectorIdx(u32::MAX);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Idx for VectorIdx {
|
impl Idx for VectorIdx {
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn new(idx: usize) -> Self {
|
fn new(idx: usize) -> Self {
|
||||||
VectorIdx(u32::try_from(idx).unwrap())
|
VectorIdx(u32::try_from(idx).unwrap())
|
||||||
@ -34,16 +34,13 @@ impl Idx for VectorIdx {
|
|||||||
fn index(self) -> usize {
|
fn index(self) -> usize {
|
||||||
usize::try_from(self.0).unwrap()
|
usize::try_from(self.0).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<u32> for VectorIdx {
|
impl From<u32> for VectorIdx {
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn from(id: u32) -> Self {
|
fn from(id: u32) -> Self {
|
||||||
Self(id)
|
Self(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A sparse mapping of vector index values to vector clocks, this
|
/// A sparse mapping of vector index values to vector clocks, this
|
||||||
@ -52,7 +49,7 @@ impl From<u32> for VectorIdx {
|
|||||||
/// This is used to store the set of currently active release
|
/// This is used to store the set of currently active release
|
||||||
/// sequences at a given memory location, since RMW operations
|
/// sequences at a given memory location, since RMW operations
|
||||||
/// allow for multiple release sequences to be active at once
|
/// allow for multiple release sequences to be active at once
|
||||||
/// and to be collapsed back to one active release sequence
|
/// and to be collapsed back to one active release sequence
|
||||||
/// once a non RMW atomic store operation occurs.
|
/// once a non RMW atomic store operation occurs.
|
||||||
/// An all zero vector is considered to be equal to no
|
/// An all zero vector is considered to be equal to no
|
||||||
/// element stored internally since it will never be
|
/// element stored internally since it will never be
|
||||||
@ -63,7 +60,6 @@ pub struct VSmallClockMap(VSmallClockMapInner);
|
|||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
enum VSmallClockMapInner {
|
enum VSmallClockMapInner {
|
||||||
|
|
||||||
/// Zero or 1 vector elements, common
|
/// Zero or 1 vector elements, common
|
||||||
/// case for the sparse set.
|
/// case for the sparse set.
|
||||||
/// The all zero vector clock is treated
|
/// The all zero vector clock is treated
|
||||||
@ -71,18 +67,15 @@ enum VSmallClockMapInner {
|
|||||||
Small(VectorIdx, VClock),
|
Small(VectorIdx, VClock),
|
||||||
|
|
||||||
/// Hash-map of vector clocks.
|
/// Hash-map of vector clocks.
|
||||||
Large(FxHashMap<VectorIdx, VClock>)
|
Large(FxHashMap<VectorIdx, VClock>),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VSmallClockMap {
|
impl VSmallClockMap {
|
||||||
|
|
||||||
/// Remove all clock vectors from the map, setting them
|
/// Remove all clock vectors from the map, setting them
|
||||||
/// to the zero vector.
|
/// to the zero vector.
|
||||||
pub fn clear(&mut self) {
|
pub fn clear(&mut self) {
|
||||||
match &mut self.0 {
|
match &mut self.0 {
|
||||||
VSmallClockMapInner::Small(_, clock) => {
|
VSmallClockMapInner::Small(_, clock) => clock.set_zero_vector(),
|
||||||
clock.set_zero_vector()
|
|
||||||
}
|
|
||||||
VSmallClockMapInner::Large(hash_map) => {
|
VSmallClockMapInner::Large(hash_map) => {
|
||||||
hash_map.clear();
|
hash_map.clear();
|
||||||
}
|
}
|
||||||
@ -95,12 +88,11 @@ impl VSmallClockMap {
|
|||||||
match &mut self.0 {
|
match &mut self.0 {
|
||||||
VSmallClockMapInner::Small(small_idx, clock) => {
|
VSmallClockMapInner::Small(small_idx, clock) => {
|
||||||
if index != *small_idx {
|
if index != *small_idx {
|
||||||
|
|
||||||
// The zero-vector is considered to equal
|
// The zero-vector is considered to equal
|
||||||
// the empty element.
|
// the empty element.
|
||||||
clock.set_zero_vector()
|
clock.set_zero_vector()
|
||||||
}
|
}
|
||||||
},
|
}
|
||||||
VSmallClockMapInner::Large(hash_map) => {
|
VSmallClockMapInner::Large(hash_map) => {
|
||||||
let value = hash_map.remove(&index).unwrap_or_default();
|
let value = hash_map.remove(&index).unwrap_or_default();
|
||||||
self.0 = VSmallClockMapInner::Small(index, value);
|
self.0 = VSmallClockMapInner::Small(index, value);
|
||||||
@ -114,23 +106,20 @@ impl VSmallClockMap {
|
|||||||
match &mut self.0 {
|
match &mut self.0 {
|
||||||
VSmallClockMapInner::Small(small_idx, small_clock) => {
|
VSmallClockMapInner::Small(small_idx, small_clock) => {
|
||||||
if small_clock.is_zero_vector() {
|
if small_clock.is_zero_vector() {
|
||||||
|
|
||||||
*small_idx = index;
|
*small_idx = index;
|
||||||
small_clock.clone_from(clock);
|
small_clock.clone_from(clock);
|
||||||
} else if !clock.is_zero_vector() {
|
} else if !clock.is_zero_vector() {
|
||||||
|
|
||||||
// Convert to using the hash-map representation.
|
// Convert to using the hash-map representation.
|
||||||
let mut hash_map = FxHashMap::default();
|
let mut hash_map = FxHashMap::default();
|
||||||
hash_map.insert(*small_idx, mem::take(small_clock));
|
hash_map.insert(*small_idx, mem::take(small_clock));
|
||||||
hash_map.insert(index, clock.clone());
|
hash_map.insert(index, clock.clone());
|
||||||
self.0 = VSmallClockMapInner::Large(hash_map);
|
self.0 = VSmallClockMapInner::Large(hash_map);
|
||||||
}
|
}
|
||||||
},
|
}
|
||||||
VSmallClockMapInner::Large(hash_map) => {
|
VSmallClockMapInner::Large(hash_map) =>
|
||||||
if !clock.is_zero_vector() {
|
if !clock.is_zero_vector() {
|
||||||
hash_map.insert(index, clock.clone());
|
hash_map.insert(index, clock.clone());
|
||||||
}
|
},
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -144,51 +133,39 @@ impl VSmallClockMap {
|
|||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
},
|
|
||||||
VSmallClockMapInner::Large(hash_map) => {
|
|
||||||
hash_map.get(&index)
|
|
||||||
}
|
}
|
||||||
|
VSmallClockMapInner::Large(hash_map) => hash_map.get(&index),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for VSmallClockMap {
|
impl Default for VSmallClockMap {
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
VSmallClockMap(
|
VSmallClockMap(VSmallClockMapInner::Small(VectorIdx::new(0), VClock::default()))
|
||||||
VSmallClockMapInner::Small(VectorIdx::new(0), VClock::default())
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Debug for VSmallClockMap {
|
impl Debug for VSmallClockMap {
|
||||||
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
// Print the contents of the small vector clock set as the map
|
// Print the contents of the small vector clock set as the map
|
||||||
// of vector index to vector clock that they represent.
|
// of vector index to vector clock that they represent.
|
||||||
let mut map = f.debug_map();
|
let mut map = f.debug_map();
|
||||||
match &self.0 {
|
match &self.0 {
|
||||||
VSmallClockMapInner::Small(small_idx, small_clock) => {
|
VSmallClockMapInner::Small(small_idx, small_clock) =>
|
||||||
if !small_clock.is_zero_vector() {
|
if !small_clock.is_zero_vector() {
|
||||||
map.entry(&small_idx, &small_clock);
|
map.entry(&small_idx, &small_clock);
|
||||||
}
|
},
|
||||||
},
|
VSmallClockMapInner::Large(hash_map) =>
|
||||||
VSmallClockMapInner::Large(hash_map) => {
|
|
||||||
for (idx, elem) in hash_map.iter() {
|
for (idx, elem) in hash_map.iter() {
|
||||||
map.entry(idx, elem);
|
map.entry(idx, elem);
|
||||||
}
|
},
|
||||||
}
|
|
||||||
}
|
}
|
||||||
map.finish()
|
map.finish()
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
impl PartialEq for VSmallClockMap {
|
impl PartialEq for VSmallClockMap {
|
||||||
|
|
||||||
fn eq(&self, other: &Self) -> bool {
|
fn eq(&self, other: &Self) -> bool {
|
||||||
use VSmallClockMapInner::*;
|
use VSmallClockMapInner::*;
|
||||||
match (&self.0, &other.0) {
|
match (&self.0, &other.0) {
|
||||||
@ -201,9 +178,7 @@ impl PartialEq for VSmallClockMap {
|
|||||||
i1 == i2 && c1 == c2
|
i1 == i2 && c1 == c2
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
(Small(idx, clock), Large(hash_map)) |
|
(Small(idx, clock), Large(hash_map)) | (Large(hash_map), Small(idx, clock)) => {
|
||||||
(Large(hash_map), Small(idx, clock)) => {
|
|
||||||
|
|
||||||
if hash_map.len() == 0 {
|
if hash_map.len() == 0 {
|
||||||
// Equal to the empty hash-map
|
// Equal to the empty hash-map
|
||||||
clock.is_zero_vector()
|
clock.is_zero_vector()
|
||||||
@ -215,18 +190,13 @@ impl PartialEq for VSmallClockMap {
|
|||||||
false
|
false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
(Large(map1), Large(map2)) => {
|
(Large(map1), Large(map2)) => map1 == map2,
|
||||||
map1 == map2
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Eq for VSmallClockMap {}
|
impl Eq for VSmallClockMap {}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/// The size of the vector-clock to store inline
|
/// The size of the vector-clock to store inline
|
||||||
/// clock vectors larger than this will be stored on the heap
|
/// clock vectors larger than this will be stored on the heap
|
||||||
const SMALL_VECTOR: usize = 4;
|
const SMALL_VECTOR: usize = 4;
|
||||||
@ -249,7 +219,6 @@ pub type VTimestamp = u32;
|
|||||||
pub struct VClock(SmallVec<[VTimestamp; SMALL_VECTOR]>);
|
pub struct VClock(SmallVec<[VTimestamp; SMALL_VECTOR]>);
|
||||||
|
|
||||||
impl VClock {
|
impl VClock {
|
||||||
|
|
||||||
/// Create a new vector-clock containing all zeros except
|
/// Create a new vector-clock containing all zeros except
|
||||||
/// for a value at the given index
|
/// for a value at the given index
|
||||||
pub fn new_with_index(index: VectorIdx, timestamp: VTimestamp) -> VClock {
|
pub fn new_with_index(index: VectorIdx, timestamp: VTimestamp) -> VClock {
|
||||||
@ -316,11 +285,9 @@ impl VClock {
|
|||||||
pub fn is_zero_vector(&self) -> bool {
|
pub fn is_zero_vector(&self) -> bool {
|
||||||
self.0.is_empty()
|
self.0.is_empty()
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Clone for VClock {
|
impl Clone for VClock {
|
||||||
|
|
||||||
fn clone(&self) -> Self {
|
fn clone(&self) -> Self {
|
||||||
VClock(self.0.clone())
|
VClock(self.0.clone())
|
||||||
}
|
}
|
||||||
@ -334,13 +301,10 @@ impl Clone for VClock {
|
|||||||
self.0.clear();
|
self.0.clear();
|
||||||
self.0.extend_from_slice(source_slice);
|
self.0.extend_from_slice(source_slice);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PartialOrd for VClock {
|
impl PartialOrd for VClock {
|
||||||
|
|
||||||
fn partial_cmp(&self, other: &VClock) -> Option<Ordering> {
|
fn partial_cmp(&self, other: &VClock) -> Option<Ordering> {
|
||||||
|
|
||||||
// Load the values as slices
|
// Load the values as slices
|
||||||
let lhs_slice = self.as_slice();
|
let lhs_slice = self.as_slice();
|
||||||
let rhs_slice = other.as_slice();
|
let rhs_slice = other.as_slice();
|
||||||
@ -356,17 +320,19 @@ impl PartialOrd for VClock {
|
|||||||
let mut iter = lhs_slice.iter().zip(rhs_slice.iter());
|
let mut iter = lhs_slice.iter().zip(rhs_slice.iter());
|
||||||
let mut order = match iter.next() {
|
let mut order = match iter.next() {
|
||||||
Some((lhs, rhs)) => lhs.cmp(rhs),
|
Some((lhs, rhs)) => lhs.cmp(rhs),
|
||||||
None => Ordering::Equal
|
None => Ordering::Equal,
|
||||||
};
|
};
|
||||||
for (l, r) in iter {
|
for (l, r) in iter {
|
||||||
match order {
|
match order {
|
||||||
Ordering::Equal => order = l.cmp(r),
|
Ordering::Equal => order = l.cmp(r),
|
||||||
Ordering::Less => if l > r {
|
Ordering::Less =>
|
||||||
return None
|
if l > r {
|
||||||
},
|
return None;
|
||||||
Ordering::Greater => if l < r {
|
},
|
||||||
return None
|
Ordering::Greater =>
|
||||||
}
|
if l < r {
|
||||||
|
return None;
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -383,14 +349,14 @@ impl PartialOrd for VClock {
|
|||||||
// so the only valid values are Ordering::Less or None.
|
// so the only valid values are Ordering::Less or None.
|
||||||
Ordering::Less => match order {
|
Ordering::Less => match order {
|
||||||
Ordering::Less | Ordering::Equal => Some(Ordering::Less),
|
Ordering::Less | Ordering::Equal => Some(Ordering::Less),
|
||||||
Ordering::Greater => None
|
Ordering::Greater => None,
|
||||||
}
|
},
|
||||||
// Left has at least 1 element > than the implicit 0,
|
// Left has at least 1 element > than the implicit 0,
|
||||||
// so the only valid values are Ordering::Greater or None.
|
// so the only valid values are Ordering::Greater or None.
|
||||||
Ordering::Greater => match order {
|
Ordering::Greater => match order {
|
||||||
Ordering::Greater | Ordering::Equal => Some(Ordering::Greater),
|
Ordering::Greater | Ordering::Equal => Some(Ordering::Greater),
|
||||||
Ordering::Less => None
|
Ordering::Less => None,
|
||||||
}
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -415,13 +381,13 @@ impl PartialOrd for VClock {
|
|||||||
let mut equal = l_len == r_len;
|
let mut equal = l_len == r_len;
|
||||||
for (&l, &r) in lhs_slice.iter().zip(rhs_slice.iter()) {
|
for (&l, &r) in lhs_slice.iter().zip(rhs_slice.iter()) {
|
||||||
if l > r {
|
if l > r {
|
||||||
return false
|
return false;
|
||||||
} else if l < r {
|
} else if l < r {
|
||||||
equal = false;
|
equal = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
!equal
|
!equal
|
||||||
} else {
|
} else {
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -469,7 +435,7 @@ impl PartialOrd for VClock {
|
|||||||
let mut equal = l_len == r_len;
|
let mut equal = l_len == r_len;
|
||||||
for (&l, &r) in lhs_slice.iter().zip(rhs_slice.iter()) {
|
for (&l, &r) in lhs_slice.iter().zip(rhs_slice.iter()) {
|
||||||
if l < r {
|
if l < r {
|
||||||
return false
|
return false;
|
||||||
} else if l > r {
|
} else if l > r {
|
||||||
equal = false;
|
equal = false;
|
||||||
}
|
}
|
||||||
@ -501,28 +467,24 @@ impl PartialOrd for VClock {
|
|||||||
false
|
false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Index<VectorIdx> for VClock {
|
impl Index<VectorIdx> for VClock {
|
||||||
|
|
||||||
type Output = VTimestamp;
|
type Output = VTimestamp;
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn index(&self, index: VectorIdx) -> &VTimestamp {
|
fn index(&self, index: VectorIdx) -> &VTimestamp {
|
||||||
self.as_slice().get(index.to_u32() as usize).unwrap_or(&0)
|
self.as_slice().get(index.to_u32() as usize).unwrap_or(&0)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Test vector clock ordering operations
|
/// Test vector clock ordering operations
|
||||||
/// data-race detection is tested in the external
|
/// data-race detection is tested in the external
|
||||||
/// test suite
|
/// test suite
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use super::{VClock, VTimestamp, VectorIdx, VSmallClockMap};
|
use super::{VClock, VSmallClockMap, VTimestamp, VectorIdx};
|
||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -546,19 +508,43 @@ mod tests {
|
|||||||
assert_order(&[1], &[1], Some(Ordering::Equal));
|
assert_order(&[1], &[1], Some(Ordering::Equal));
|
||||||
assert_order(&[1], &[2], Some(Ordering::Less));
|
assert_order(&[1], &[2], Some(Ordering::Less));
|
||||||
assert_order(&[2], &[1], Some(Ordering::Greater));
|
assert_order(&[2], &[1], Some(Ordering::Greater));
|
||||||
assert_order(&[1], &[1,2], Some(Ordering::Less));
|
assert_order(&[1], &[1, 2], Some(Ordering::Less));
|
||||||
assert_order(&[2], &[1,2], None);
|
assert_order(&[2], &[1, 2], None);
|
||||||
|
|
||||||
// Misc tests
|
// Misc tests
|
||||||
assert_order(&[400], &[0, 1], None);
|
assert_order(&[400], &[0, 1], None);
|
||||||
|
|
||||||
// Large test
|
// Large test
|
||||||
assert_order(&[0,1,2,3,4,5,6,7,8,9,10], &[0,1,2,3,4,5,6,7,8,9,10,0,0,0], Some(Ordering::Equal));
|
assert_order(
|
||||||
assert_order(&[0,1,2,3,4,5,6,7,8,9,10], &[0,1,2,3,4,5,6,7,8,9,10,0,1,0], Some(Ordering::Less));
|
&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
|
||||||
assert_order(&[0,1,2,3,4,5,6,7,8,9,11], &[0,1,2,3,4,5,6,7,8,9,10,0,0,0], Some(Ordering::Greater));
|
&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 0, 0],
|
||||||
assert_order(&[0,1,2,3,4,5,6,7,8,9,11], &[0,1,2,3,4,5,6,7,8,9,10,0,1,0], None);
|
Some(Ordering::Equal),
|
||||||
assert_order(&[0,1,2,3,4,5,6,7,8,9,9 ], &[0,1,2,3,4,5,6,7,8,9,10,0,0,0], Some(Ordering::Less));
|
);
|
||||||
assert_order(&[0,1,2,3,4,5,6,7,8,9,9 ], &[0,1,2,3,4,5,6,7,8,9,10,0,1,0], Some(Ordering::Less));
|
assert_order(
|
||||||
|
&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
|
||||||
|
&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 0],
|
||||||
|
Some(Ordering::Less),
|
||||||
|
);
|
||||||
|
assert_order(
|
||||||
|
&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11],
|
||||||
|
&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 0, 0],
|
||||||
|
Some(Ordering::Greater),
|
||||||
|
);
|
||||||
|
assert_order(
|
||||||
|
&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11],
|
||||||
|
&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 0],
|
||||||
|
None,
|
||||||
|
);
|
||||||
|
assert_order(
|
||||||
|
&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9],
|
||||||
|
&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 0, 0],
|
||||||
|
Some(Ordering::Less),
|
||||||
|
);
|
||||||
|
assert_order(
|
||||||
|
&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9],
|
||||||
|
&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 0],
|
||||||
|
Some(Ordering::Less),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn from_slice(mut slice: &[VTimestamp]) -> VClock {
|
fn from_slice(mut slice: &[VTimestamp]) -> VClock {
|
||||||
@ -574,51 +560,81 @@ mod tests {
|
|||||||
|
|
||||||
//Test partial_cmp
|
//Test partial_cmp
|
||||||
let compare = l.partial_cmp(&r);
|
let compare = l.partial_cmp(&r);
|
||||||
assert_eq!(compare, o, "Invalid comparison\n l: {:?}\n r: {:?}",l,r);
|
assert_eq!(compare, o, "Invalid comparison\n l: {:?}\n r: {:?}", l, r);
|
||||||
let alt_compare = r.partial_cmp(&l);
|
let alt_compare = r.partial_cmp(&l);
|
||||||
assert_eq!(alt_compare, o.map(Ordering::reverse), "Invalid alt comparison\n l: {:?}\n r: {:?}",l,r);
|
assert_eq!(
|
||||||
|
alt_compare,
|
||||||
|
o.map(Ordering::reverse),
|
||||||
|
"Invalid alt comparison\n l: {:?}\n r: {:?}",
|
||||||
|
l,
|
||||||
|
r
|
||||||
|
);
|
||||||
|
|
||||||
//Test operators with faster implementations
|
//Test operators with faster implementations
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
matches!(compare,Some(Ordering::Less)), l < r,
|
matches!(compare, Some(Ordering::Less)),
|
||||||
"Invalid (<):\n l: {:?}\n r: {:?}",l,r
|
l < r,
|
||||||
|
"Invalid (<):\n l: {:?}\n r: {:?}",
|
||||||
|
l,
|
||||||
|
r
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
matches!(compare,Some(Ordering::Less) | Some(Ordering::Equal)), l <= r,
|
matches!(compare, Some(Ordering::Less) | Some(Ordering::Equal)),
|
||||||
"Invalid (<=):\n l: {:?}\n r: {:?}",l,r
|
l <= r,
|
||||||
|
"Invalid (<=):\n l: {:?}\n r: {:?}",
|
||||||
|
l,
|
||||||
|
r
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
matches!(compare,Some(Ordering::Greater)), l > r,
|
matches!(compare, Some(Ordering::Greater)),
|
||||||
"Invalid (>):\n l: {:?}\n r: {:?}",l,r
|
l > r,
|
||||||
|
"Invalid (>):\n l: {:?}\n r: {:?}",
|
||||||
|
l,
|
||||||
|
r
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
matches!(compare,Some(Ordering::Greater) | Some(Ordering::Equal)), l >= r,
|
matches!(compare, Some(Ordering::Greater) | Some(Ordering::Equal)),
|
||||||
"Invalid (>=):\n l: {:?}\n r: {:?}",l,r
|
l >= r,
|
||||||
|
"Invalid (>=):\n l: {:?}\n r: {:?}",
|
||||||
|
l,
|
||||||
|
r
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
matches!(alt_compare,Some(Ordering::Less)), r < l,
|
matches!(alt_compare, Some(Ordering::Less)),
|
||||||
"Invalid alt (<):\n l: {:?}\n r: {:?}",l,r
|
r < l,
|
||||||
|
"Invalid alt (<):\n l: {:?}\n r: {:?}",
|
||||||
|
l,
|
||||||
|
r
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
matches!(alt_compare,Some(Ordering::Less) | Some(Ordering::Equal)), r <= l,
|
matches!(alt_compare, Some(Ordering::Less) | Some(Ordering::Equal)),
|
||||||
"Invalid alt (<=):\n l: {:?}\n r: {:?}",l,r
|
r <= l,
|
||||||
|
"Invalid alt (<=):\n l: {:?}\n r: {:?}",
|
||||||
|
l,
|
||||||
|
r
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
matches!(alt_compare,Some(Ordering::Greater)), r > l,
|
matches!(alt_compare, Some(Ordering::Greater)),
|
||||||
"Invalid alt (>):\n l: {:?}\n r: {:?}",l,r
|
r > l,
|
||||||
|
"Invalid alt (>):\n l: {:?}\n r: {:?}",
|
||||||
|
l,
|
||||||
|
r
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
matches!(alt_compare,Some(Ordering::Greater) | Some(Ordering::Equal)), r >= l,
|
matches!(alt_compare, Some(Ordering::Greater) | Some(Ordering::Equal)),
|
||||||
"Invalid alt (>=):\n l: {:?}\n r: {:?}",l,r
|
r >= l,
|
||||||
|
"Invalid alt (>=):\n l: {:?}\n r: {:?}",
|
||||||
|
l,
|
||||||
|
r
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_vclock_set() {
|
pub fn test_vclock_set() {
|
||||||
let mut map = VSmallClockMap::default();
|
let mut map = VSmallClockMap::default();
|
||||||
let v1 = from_slice(&[3,0,1]);
|
let v1 = from_slice(&[3, 0, 1]);
|
||||||
let v2 = from_slice(&[4,2,3]);
|
let v2 = from_slice(&[4, 2, 3]);
|
||||||
let v3 = from_slice(&[4,8,3]);
|
let v3 = from_slice(&[4, 8, 3]);
|
||||||
map.insert(VectorIdx(0), &v1);
|
map.insert(VectorIdx(0), &v1);
|
||||||
assert_eq!(map.get(VectorIdx(0)), Some(&v1));
|
assert_eq!(map.get(VectorIdx(0)), Some(&v1));
|
||||||
map.insert(VectorIdx(5), &v2);
|
map.insert(VectorIdx(5), &v2);
|
||||||
@ -641,5 +657,4 @@ mod tests {
|
|||||||
assert_eq!(map.get(VectorIdx(5)), None);
|
assert_eq!(map.get(VectorIdx(5)), None);
|
||||||
assert_eq!(map.get(VectorIdx(53)), Some(&v3));
|
assert_eq!(map.get(VectorIdx(53)), Some(&v3));
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user