diff --git a/src/tools/miri/src/alloc_addresses/mod.rs b/src/tools/miri/src/alloc_addresses/mod.rs index 612649c90c6..d59a2cee94e 100644 --- a/src/tools/miri/src/alloc_addresses/mod.rs +++ b/src/tools/miri/src/alloc_addresses/mod.rs @@ -13,7 +13,7 @@ use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_span::Span; use rustc_target::abi::{Align, HasDataLayout, Size}; -use crate::*; +use crate::{concurrency::VClock, *}; use self::reuse_pool::ReusePool; @@ -172,7 +172,7 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { ecx.get_active_thread(), ) { if let Some(data_race) = &ecx.machine.data_race { - data_race.validate_lock_acquire(&clock, ecx.get_active_thread()); + data_race.acquire_clock(&clock, ecx.get_active_thread()); } reuse_addr } else { @@ -369,15 +369,13 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> { // Also remember this address for future reuse. let thread = self.threads.get_active_thread_id(); global_state.reuse.add_addr(rng, addr, size, align, kind, thread, || { - let mut clock = concurrency::VClock::default(); if let Some(data_race) = &self.data_race { - data_race.validate_lock_release( - &mut clock, - thread, - self.threads.active_thread_ref().current_span(), - ); + data_race + .release_clock(thread, self.threads.active_thread_ref().current_span()) + .clone() + } else { + VClock::default() } - clock }) } } diff --git a/src/tools/miri/src/concurrency/data_race.rs b/src/tools/miri/src/concurrency/data_race.rs index 95049b91cba..6c34716b592 100644 --- a/src/tools/miri/src/concurrency/data_race.rs +++ b/src/tools/miri/src/concurrency/data_race.rs @@ -1701,49 +1701,34 @@ impl GlobalState { format!("thread `{thread_name}`") } - /// Acquire a lock, express that the previous call of - /// `validate_lock_release` must happen before this. + /// Acquire the given clock into the given thread, establishing synchronization with + /// the moment when that clock snapshot was taken via `release_clock`. /// As this is an acquire operation, the thread timestamp is not /// incremented. - pub fn validate_lock_acquire(&self, lock: &VClock, thread: ThreadId) { - let (_, mut clocks) = self.load_thread_state_mut(thread); + pub fn acquire_clock(&self, lock: &VClock, thread: ThreadId) { + let (_, mut clocks) = self.thread_state_mut(thread); clocks.clock.join(lock); } - /// Release a lock handle, express that this happens-before - /// any subsequent calls to `validate_lock_acquire`. - /// For normal locks this should be equivalent to `validate_lock_release_shared` - /// since an acquire operation should have occurred before, however - /// for futex & condvar operations this is not the case and this - /// operation must be used. - pub fn validate_lock_release(&self, lock: &mut VClock, thread: ThreadId, current_span: Span) { - let (index, mut clocks) = self.load_thread_state_mut(thread); - lock.clone_from(&clocks.clock); - clocks.increment_clock(index, current_span); - } - - /// Release a lock handle, express that this happens-before - /// any subsequent calls to `validate_lock_acquire` as well - /// as any previous calls to this function after any - /// `validate_lock_release` calls. - /// For normal locks this should be equivalent to `validate_lock_release`. - /// This function only exists for joining over the set of concurrent readers - /// in a read-write lock and should not be used for anything else. - pub fn validate_lock_release_shared( - &self, - lock: &mut VClock, - thread: ThreadId, - current_span: Span, - ) { - let (index, mut clocks) = self.load_thread_state_mut(thread); - lock.join(&clocks.clock); + /// Returns the `release` clock of the given thread. + /// Other threads can acquire this clock in the future to establish synchronization + /// with this program point. + pub fn release_clock(&self, thread: ThreadId, current_span: Span) -> Ref<'_, VClock> { + // We increment the clock each time this happens, to ensure no two releases + // can be confused with each other. + let (index, mut clocks) = self.thread_state_mut(thread); clocks.increment_clock(index, current_span); + drop(clocks); + // To return a read-only view, we need to release the RefCell + // and borrow it again. + let (_index, clocks) = self.thread_state(thread); + Ref::map(clocks, |c| &c.clock) } /// Load the vector index used by the given thread as well as the set of vector clocks /// used by the thread. #[inline] - fn load_thread_state_mut(&self, thread: ThreadId) -> (VectorIdx, RefMut<'_, ThreadClockSet>) { + fn thread_state_mut(&self, thread: ThreadId) -> (VectorIdx, RefMut<'_, ThreadClockSet>) { let index = self.thread_info.borrow()[thread] .vector_index .expect("Loading thread state for thread with no assigned vector"); @@ -1752,6 +1737,18 @@ impl GlobalState { (index, clocks) } + /// Load the vector index used by the given thread as well as the set of vector clocks + /// used by the thread. + #[inline] + fn thread_state(&self, thread: ThreadId) -> (VectorIdx, Ref<'_, ThreadClockSet>) { + let index = self.thread_info.borrow()[thread] + .vector_index + .expect("Loading thread state for thread with no assigned vector"); + let ref_vector = self.vector_clocks.borrow(); + let clocks = Ref::map(ref_vector, |vec| &vec[index]); + (index, clocks) + } + /// Load the current vector clock in use and the current set of thread clocks /// in use for the vector. #[inline] @@ -1759,10 +1756,7 @@ impl GlobalState { &self, thread_mgr: &ThreadManager<'_, '_>, ) -> (VectorIdx, Ref<'_, ThreadClockSet>) { - let index = self.current_index(thread_mgr); - let ref_vector = self.vector_clocks.borrow(); - let clocks = Ref::map(ref_vector, |vec| &vec[index]); - (index, clocks) + self.thread_state(thread_mgr.get_active_thread_id()) } /// Load the current vector clock in use and the current set of thread clocks @@ -1772,10 +1766,7 @@ impl GlobalState { &self, thread_mgr: &ThreadManager<'_, '_>, ) -> (VectorIdx, RefMut<'_, ThreadClockSet>) { - let index = self.current_index(thread_mgr); - let ref_vector = self.vector_clocks.borrow_mut(); - let clocks = RefMut::map(ref_vector, |vec| &mut vec[index]); - (index, clocks) + self.thread_state_mut(thread_mgr.get_active_thread_id()) } /// Return the current thread, should be the same diff --git a/src/tools/miri/src/concurrency/init_once.rs b/src/tools/miri/src/concurrency/init_once.rs index 9dbea08f3ef..a01b59c9165 100644 --- a/src/tools/miri/src/concurrency/init_once.rs +++ b/src/tools/miri/src/concurrency/init_once.rs @@ -41,7 +41,7 @@ pub enum InitOnceStatus { pub(super) struct InitOnce<'mir, 'tcx> { status: InitOnceStatus, waiters: VecDeque>, - data_race: VClock, + clock: VClock, } impl<'mir, 'tcx> VisitProvenance for InitOnce<'mir, 'tcx> { @@ -61,10 +61,8 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { let current_thread = this.get_active_thread(); if let Some(data_race) = &this.machine.data_race { - data_race.validate_lock_acquire( - &this.machine.threads.sync.init_onces[id].data_race, - current_thread, - ); + data_race + .acquire_clock(&this.machine.threads.sync.init_onces[id].clock, current_thread); } } @@ -176,7 +174,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { // Each complete happens-before the end of the wait if let Some(data_race) = &this.machine.data_race { - data_race.validate_lock_release(&mut init_once.data_race, current_thread, current_span); + init_once.clock.clone_from(&data_race.release_clock(current_thread, current_span)); } // Wake up everyone. @@ -202,7 +200,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { // Each complete happens-before the end of the wait if let Some(data_race) = &this.machine.data_race { - data_race.validate_lock_release(&mut init_once.data_race, current_thread, current_span); + init_once.clock.clone_from(&data_race.release_clock(current_thread, current_span)); } // Wake up one waiting thread, so they can go ahead and try to init this. diff --git a/src/tools/miri/src/concurrency/sync.rs b/src/tools/miri/src/concurrency/sync.rs index 0a428715690..d3cef8bf5f3 100644 --- a/src/tools/miri/src/concurrency/sync.rs +++ b/src/tools/miri/src/concurrency/sync.rs @@ -69,12 +69,8 @@ struct Mutex { lock_count: usize, /// The queue of threads waiting for this mutex. queue: VecDeque, - /// Data race handle. This tracks the happens-before - /// relationship between each mutex access. It is - /// released to during unlock and acquired from during - /// locking, and therefore stores the clock of the last - /// thread to release this mutex. - data_race: VClock, + /// Mutex clock. This tracks the moment of the last unlock. + clock: VClock, } declare_id!(RwLockId); @@ -91,7 +87,7 @@ struct RwLock { writer_queue: VecDeque, /// The queue of reader threads waiting for this lock. reader_queue: VecDeque, - /// Data race handle for writers. Tracks the happens-before + /// Data race clock for writers. Tracks the happens-before /// ordering between each write access to a rwlock and is updated /// after a sequence of concurrent readers to track the happens- /// before ordering between the set of previous readers and @@ -99,8 +95,8 @@ struct RwLock { /// Contains the clock of the last thread to release a writer /// lock or the joined clock of the set of last threads to release /// shared reader locks. - data_race: VClock, - /// Data race handle for readers. This is temporary storage + clock_unlocked: VClock, + /// Data race clock for readers. This is temporary storage /// for the combined happens-before ordering for between all /// concurrent readers and the next writer, and the value /// is stored to the main data_race variable once all @@ -110,7 +106,7 @@ struct RwLock { /// add happens-before orderings between shared reader /// locks. /// This is only relevant when there is an active reader. - data_race_reader: VClock, + clock_current_readers: VClock, } declare_id!(CondvarId); @@ -132,8 +128,8 @@ struct Condvar { /// between a cond-var signal and a cond-var /// wait during a non-spurious signal event. /// Contains the clock of the last thread to - /// perform a futex-signal. - data_race: VClock, + /// perform a condvar-signal. + clock: VClock, } /// The futex state. @@ -145,7 +141,7 @@ struct Futex { /// during a non-spurious wake event. /// Contains the clock of the last thread to /// perform a futex-wake. - data_race: VClock, + clock: VClock, } /// A thread waiting on a futex. @@ -346,7 +342,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { } mutex.lock_count = mutex.lock_count.checked_add(1).unwrap(); if let Some(data_race) = &this.machine.data_race { - data_race.validate_lock_acquire(&mutex.data_race, thread); + data_race.acquire_clock(&mutex.clock, thread); } } @@ -373,11 +369,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { // The mutex is completely unlocked. Try transferring ownership // to another thread. if let Some(data_race) = &this.machine.data_race { - data_race.validate_lock_release( - &mut mutex.data_race, - current_owner, - current_span, - ); + mutex.clock.clone_from(&data_race.release_clock(current_owner, current_span)); } this.mutex_dequeue_and_lock(id); } @@ -448,7 +440,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { let count = rwlock.readers.entry(reader).or_insert(0); *count = count.checked_add(1).expect("the reader counter overflowed"); if let Some(data_race) = &this.machine.data_race { - data_race.validate_lock_acquire(&rwlock.data_race, reader); + data_race.acquire_clock(&rwlock.clock_unlocked, reader); } } @@ -474,20 +466,16 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { } if let Some(data_race) = &this.machine.data_race { // Add this to the shared-release clock of all concurrent readers. - data_race.validate_lock_release_shared( - &mut rwlock.data_race_reader, - reader, - current_span, - ); + rwlock.clock_current_readers.join(&data_race.release_clock(reader, current_span)); } // The thread was a reader. If the lock is not held any more, give it to a writer. if this.rwlock_is_locked(id).not() { // All the readers are finished, so set the writer data-race handle to the value - // of the union of all reader data race handles, since the set of readers - // happen-before the writers + // of the union of all reader data race handles, since the set of readers + // happen-before the writers let rwlock = &mut this.machine.threads.sync.rwlocks[id]; - rwlock.data_race.clone_from(&rwlock.data_race_reader); + rwlock.clock_unlocked.clone_from(&rwlock.clock_current_readers); this.rwlock_dequeue_and_lock_writer(id); } true @@ -511,7 +499,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { let rwlock = &mut this.machine.threads.sync.rwlocks[id]; rwlock.writer = Some(writer); if let Some(data_race) = &this.machine.data_race { - data_race.validate_lock_acquire(&rwlock.data_race, writer); + data_race.acquire_clock(&rwlock.clock_unlocked, writer); } } @@ -530,11 +518,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { trace!("rwlock_writer_unlock: {:?} unlocked by {:?}", id, expected_writer); // Release memory to next lock holder. if let Some(data_race) = &this.machine.data_race { - data_race.validate_lock_release( - &mut rwlock.data_race, - current_writer, - current_span, - ); + rwlock + .clock_unlocked + .clone_from(&*data_race.release_clock(current_writer, current_span)); } // The thread was a writer. // @@ -611,11 +597,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { // Each condvar signal happens-before the end of the condvar wake if let Some(data_race) = data_race { - data_race.validate_lock_release(&mut condvar.data_race, current_thread, current_span); + condvar.clock.clone_from(&*data_race.release_clock(current_thread, current_span)); } condvar.waiters.pop_front().map(|waiter| { if let Some(data_race) = data_race { - data_race.validate_lock_acquire(&condvar.data_race, waiter.thread); + data_race.acquire_clock(&condvar.clock, waiter.thread); } (waiter.thread, waiter.lock) }) @@ -645,14 +631,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { // Each futex-wake happens-before the end of the futex wait if let Some(data_race) = data_race { - data_race.validate_lock_release(&mut futex.data_race, current_thread, current_span); + futex.clock.clone_from(&*data_race.release_clock(current_thread, current_span)); } // Wake up the first thread in the queue that matches any of the bits in the bitset. futex.waiters.iter().position(|w| w.bitset & bitset != 0).map(|i| { let waiter = futex.waiters.remove(i).unwrap(); if let Some(data_race) = data_race { - data_race.validate_lock_acquire(&futex.data_race, waiter.thread); + data_race.acquire_clock(&futex.clock, waiter.thread); } waiter.thread })