Apply review changes, incrementing the clocks twice is an unnecessary hold-over from earlier versions so fixed.

This commit is contained in:
JCTyBlaidd 2020-11-27 19:26:06 +00:00
parent 3268f56a97
commit 55fc552d99
25 changed files with 95 additions and 40 deletions

View File

@ -37,6 +37,24 @@
//! to a acquire load and a release store given the global sequentially consistent order
//! of the schedule.
//!
//! The timestamps used in the data-race detector assign each sequence of non-atomic operations
//! followed by a single atomic or concurrent operation a single timestamp.
//! Write, Read, Write, ThreadJoin will be represented by a single timestamp value on a thread
//! This is because extra increment operations between the operations in the sequence are not
//! required for accurate reporting of data-race values.
//!
//! If the timestamp was not incremented after the atomic operation, then data-races would not be detected:
//! Example - this should report a data-race but does not:
//! t1: (x,0), atomic[release A], t1=(x+1, 0 ), write(var B),
//! t2: (0,y) , atomic[acquire A], t2=(x+1, y+1), ,write(var B)
//!
//! The timestamp is not incremented before an atomic operation, since the result is indistinguishable
//! from the value not being incremented.
//! t: (x, 0), atomic[release _], (x + 1, 0) || (0, y), atomic[acquire _], (x, _)
//! vs t: (x, 0), atomic[release _], (x + 1, 0) || (0, y), atomic[acquire _], (x+1, _)
//! Both result in the sequence on thread x up to and including the atomic release as happening
//! before the acquire.
//!
//! FIXME:
//! currently we have our own local copy of the currently active thread index and names, this is due
//! in part to the inability to access the current location of threads.active_thread inside the AllocExtra
@ -499,7 +517,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
}
/// Perform an atomic compare and exchange at a given memory location
/// on success an atomic RMW operation is performed and on failure
/// On success an atomic RMW operation is performed and on failure
/// only an atomic read occurs.
fn atomic_compare_exchange_scalar(
&mut self,
@ -1136,9 +1154,6 @@ impl GlobalState {
// Now load the two clocks and configure the initial state.
let (current, created) = vector_clocks.pick2_mut(current_index, created_index);
// Advance the current thread before the synchronized operation.
current.increment_clock(current_index);
// Join the created with current, since the current threads
// previous actions happen-before the created thread.
created.join_with(current);
@ -1167,14 +1182,12 @@ impl GlobalState {
.as_ref()
.expect("Joined with thread but thread has not terminated");
// Pre increment clocks before atomic operation.
current.increment_clock(current_index);
// The join thread happens-before the current thread
// so update the current vector clock.
current.clock.join(join_clock);
// Post increment clocks after atomic operation.
// Increment clocks after atomic operation.
current.increment_clock(current_index);
// Check the number of active threads, if the value is 1
@ -1277,8 +1290,7 @@ impl GlobalState {
op: impl FnOnce(VectorIdx, RefMut<'_, ThreadClockSet>) -> InterpResult<'tcx>,
) -> InterpResult<'tcx> {
if self.multi_threaded.get() {
let (index, mut clocks) = self.current_thread_state_mut();
clocks.increment_clock(index);
let (index, clocks) = self.current_thread_state_mut();
op(index, clocks)?;
let (_, mut clocks) = self.current_thread_state_mut();
clocks.increment_clock(index);
@ -1303,16 +1315,18 @@ impl GlobalState {
/// `validate_lock_release` must happen before this.
pub fn validate_lock_acquire(&self, lock: &VClock, thread: ThreadId) {
let (index, mut clocks) = self.load_thread_state_mut(thread);
clocks.increment_clock(index);
clocks.clock.join(&lock);
clocks.increment_clock(index);
}
/// Release a lock handle, express that this happens-before
/// any subsequent calls to `validate_lock_acquire`.
/// For normal locks this should be equivalent to `validate_lock_release_shared`
/// since an acquire operation should have occured before, however
/// for futex & cond-var operations this is not the case and this
/// operation must be used.
pub fn validate_lock_release(&self, lock: &mut VClock, thread: ThreadId) {
let (index, mut clocks) = self.load_thread_state_mut(thread);
clocks.increment_clock(index);
lock.clone_from(&clocks.clock);
clocks.increment_clock(index);
}
@ -1321,9 +1335,11 @@ impl GlobalState {
/// any subsequent calls to `validate_lock_acquire` as well
/// as any previous calls to this function after any
/// `validate_lock_release` calls.
/// For normal locks this should be equivalent to `validate_lock_release`
/// this function only exists for joining over the set of concurrent readers
/// in a read-write lock and should not be used for anything else.
pub fn validate_lock_release_shared(&self, lock: &mut VClock, thread: ThreadId) {
let (index, mut clocks) = self.load_thread_state_mut(thread);
clocks.increment_clock(index);
lock.join(&clocks.clock);
clocks.increment_clock(index);
}

View File

@ -62,9 +62,11 @@ fn mutex_get_kind<'mir, 'tcx: 'mir>(
mutex_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
//FIXME: this has been made atomic to fix data-race reporting inside the internal
// mutex implementation, it may not need to be atomic.
ecx.read_scalar_at_offset_atomic(
mutex_op, offset, ecx.machine.layouts.i32,
AtomicReadOp::Acquire
AtomicReadOp::Relaxed
)
}
@ -74,9 +76,11 @@ fn mutex_set_kind<'mir, 'tcx: 'mir>(
kind: impl Into<ScalarMaybeUninit<Tag>>,
) -> InterpResult<'tcx, ()> {
let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
//FIXME: this has been made atomic to fix data-race reporting inside the internal
// mutex implementation, it may not need to be atomic.
ecx.write_scalar_at_offset_atomic(
mutex_op, offset, kind, ecx.machine.layouts.i32,
AtomicWriteOp::Release
mutex_op, offset, kind, ecx.machine.layouts.i32,
AtomicWriteOp::Relaxed
)
}
@ -84,8 +88,11 @@ fn mutex_get_id<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
mutex_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
//FIXME: this has been made atomic to fix data-race reporting inside the internal
// mutex implementation, it may not need to be atomic.
ecx.read_scalar_at_offset_atomic(
mutex_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Acquire
mutex_op, 4, ecx.machine.layouts.u32,
AtomicReadOp::Relaxed
)
}
@ -94,9 +101,11 @@ fn mutex_set_id<'mir, 'tcx: 'mir>(
mutex_op: OpTy<'tcx, Tag>,
id: impl Into<ScalarMaybeUninit<Tag>>,
) -> InterpResult<'tcx, ()> {
//FIXME: this has been made atomic to fix data-race reporting inside the internal
// mutex implementation, it may not need to be atomic.
ecx.write_scalar_at_offset_atomic(
mutex_op, 4, id, ecx.machine.layouts.u32,
AtomicWriteOp::Release
AtomicWriteOp::Relaxed
)
}
@ -126,10 +135,12 @@ fn mutex_get_or_create_id<'mir, 'tcx: 'mir>(
fn rwlock_get_id<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
rwlock_op: OpTy<'tcx, Tag>,
//FIXME: this has been made atomic to fix data-race reporting inside the internal
// rw-lock implementation, it may not need to be atomic.
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
ecx.read_scalar_at_offset_atomic(
rwlock_op, 4, ecx.machine.layouts.u32,
AtomicReadOp::Acquire
AtomicReadOp::Relaxed
)
}
@ -138,9 +149,11 @@ fn rwlock_set_id<'mir, 'tcx: 'mir>(
rwlock_op: OpTy<'tcx, Tag>,
id: impl Into<ScalarMaybeUninit<Tag>>,
) -> InterpResult<'tcx, ()> {
//FIXME: this has been made atomic to fix data-race reporting inside the internal
// rw-lock implementation, it may not need to be atomic.
ecx.write_scalar_at_offset_atomic(
rwlock_op, 4, id, ecx.machine.layouts.u32,
AtomicWriteOp::Release
AtomicWriteOp::Relaxed
)
}
@ -194,9 +207,11 @@ fn cond_get_id<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
cond_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
//FIXME: this has been made atomic to fix data-race reporting inside the internal
// cond-var implementation, it may not need to be atomic.
ecx.read_scalar_at_offset_atomic(
cond_op, 4, ecx.machine.layouts.u32,
AtomicReadOp::Acquire
AtomicReadOp::Relaxed
)
}
@ -205,9 +220,11 @@ fn cond_set_id<'mir, 'tcx: 'mir>(
cond_op: OpTy<'tcx, Tag>,
id: impl Into<ScalarMaybeUninit<Tag>>,
) -> InterpResult<'tcx, ()> {
//FIXME: this has been made atomic to fix data-race reporting inside the internal
// cond-var implementation, it may not need to be atomic.
ecx.write_scalar_at_offset_atomic(
cond_op, 4, id, ecx.machine.layouts.u32,
AtomicWriteOp::Release
AtomicWriteOp::Relaxed
)
}

View File

@ -15,7 +15,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let this = self.eval_context_mut();
this.tcx.sess.warn(
"thread support is experimental, no weak memory effects are currently emulated.",
"thread support is experimental and incomplete: weak memory effects are not emulated."
);
// Create the new thread

View File

@ -29,9 +29,9 @@ fn main() {
sleep(Duration::from_millis(100));
// Spawn and immediately join a thread
// to execute the join code-path
// and ensure that data-race detection
// remains enabled
// to execute the join code-path
// and ensure that data-race detection
// remains enabled nevertheless.
spawn(|| ()).join().unwrap();
let join2 = unsafe {

View File

@ -29,9 +29,9 @@ fn main() {
sleep(Duration::from_millis(100));
// Spawn and immediately join a thread
// to execute the join code-path
// and ensure that data-race detection
// remains enabled
// to execute the join code-path
// and ensure that data-race detection
// remains enabled nevertheless.
spawn(|| ()).join().unwrap();

View File

@ -9,7 +9,7 @@ unsafe impl<T> Send for EvilSend<T> {}
unsafe impl<T> Sync for EvilSend<T> {}
pub fn main() {
// Enable and the join with multiple threads
// Enable and then join with multiple threads.
let t1 = spawn(|| ());
let t2 = spawn(|| ());
let t3 = spawn(|| ());
@ -19,7 +19,7 @@ pub fn main() {
t3.join().unwrap();
t4.join().unwrap();
// Perform write-write data race detection
// Perform write-write data race detection.
let mut a = 0u32;
let b = &mut a as *mut u32;
let c = EvilSend(b);

View File

@ -16,6 +16,13 @@ pub fn main() {
let b = &mut a as *mut u32;
let c = EvilSend(b);
// Note: this is scheduler-dependent
// the operations need to occur in
// order:
// 1. store release : 1
// 2. load acquire : 1
// 3. store relaxed : 2
// 4. load acquire : 2
unsafe {
let j1 = spawn(move || {
*c.0 = 1;

View File

@ -18,6 +18,14 @@ pub fn main() {
let b = &mut a as *mut u32;
let c = EvilSend(b);
// Note: this is scheduler-dependent
// the operations need to occur in
// order, the sleep operations currently
// force the desired ordering:
// 1. store release : 1
// 2. store relaxed : 2
// 3. store relaxed : 3
// 4. load acquire : 3
unsafe {
let j1 = spawn(move || {
*c.0 = 1;

View File

@ -16,6 +16,13 @@ pub fn main() {
let b = &mut a as *mut u32;
let c = EvilSend(b);
// Note: this is scheduler-dependent
// the operations need to occur in
// order:
// 1. store release : 1
// 2. RMW relaxed : 1 -> 2
// 3. store relaxed : 3
// 4. load acquire : 3
unsafe {
let j1 = spawn(move || {
*c.0 = 1;

View File

@ -1,2 +1,2 @@
warning: thread support is experimental, no weak memory effects are currently emulated.
warning: thread support is experimental and incomplete: weak memory effects are not emulated.

View File

@ -19,7 +19,7 @@ pub fn main() {
});
let j2 = spawn(move || {
*c.0 = 64; //~ ERROR Data race
*c.0 = 64; //~ ERROR Data race (but not detected as the detector is disabled)
});
j1.join().unwrap();

View File

@ -1,2 +1,2 @@
warning: thread support is experimental, no weak memory effects are currently emulated.
warning: thread support is experimental and incomplete: weak memory effects are not emulated.

View File

@ -1,2 +1,2 @@
warning: thread support is experimental, no weak memory effects are currently emulated.
warning: thread support is experimental and incomplete: weak memory effects are not emulated.

View File

@ -1,4 +1,4 @@
warning: thread support is experimental, no weak memory effects are currently emulated.
warning: thread support is experimental and incomplete: weak memory effects are not emulated.
thread '<unnamed>' panicked at 'Hello!', $DIR/simple.rs:54:9
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace

View File

@ -1,2 +1,2 @@
warning: thread support is experimental, no weak memory effects are currently emulated.
warning: thread support is experimental and incomplete: weak memory effects are not emulated.

View File

@ -1,2 +1,2 @@
warning: thread support is experimental, no weak memory effects are currently emulated.
warning: thread support is experimental and incomplete: weak memory effects are not emulated.

View File

@ -1,2 +1,2 @@
warning: thread support is experimental, no weak memory effects are currently emulated.
warning: thread support is experimental and incomplete: weak memory effects are not emulated.

View File

@ -1,2 +1,2 @@
warning: thread support is experimental, no weak memory effects are currently emulated.
warning: thread support is experimental and incomplete: weak memory effects are not emulated.

View File

@ -1,4 +1,4 @@
warning: thread support is experimental, no weak memory effects are currently emulated.
warning: thread support is experimental and incomplete: weak memory effects are not emulated.
Thread 1 starting, will block on mutex
Thread 1 reported it has started