Destroy store buffers on non-racy non-atomic accesses

This commit is contained in:
Andy Wang 2022-05-24 22:03:04 +01:00
parent 2321b15342
commit 226ed41cca
No known key found for this signature in database
GPG Key ID: 181B49F9F38F3374
4 changed files with 53 additions and 10 deletions

View File

@ -125,6 +125,14 @@ pub fn insert_at_pos(&mut self, pos: Position, range: AllocRange, data: T) {
debug_assert!(range.end() <= self.v[pos + 1].range.start);
}
}
pub fn remove_pos_range(&mut self, pos_range: Range<Position>) {
self.v.drain(pos_range);
}
pub fn remove_from_pos(&mut self, pos: Position) {
self.v.remove(pos);
}
}
impl<T> Index<Position> for AllocationMap<T> {

View File

@ -1200,6 +1200,10 @@ fn race_detecting(&self) -> bool {
self.multi_threaded.get() && !self.ongoing_atomic_access.get()
}
pub fn ongoing_atomic_access(&self) -> bool {
self.ongoing_atomic_access.get()
}
// Try to find vector index values that can potentially be re-used
// by a new thread instead of a new vector index being created.
fn find_vector_index_reuse_candidate(&self) -> Option<VectorIdx> {

View File

@ -11,10 +11,10 @@
//! This implementation is not fully correct under the revised C++20 model and may generate behaviours C++20
//! disallows.
//!
//! Rust follows the full C++20 memory model (except for the Consume ordering). It is therefore
//! possible for this implementation to generate behaviours never observable when the same program is compiled and
//! run natively. Unfortunately, no literature exists at the time of writing which proposes an implementable and C++20-compatible
//! relaxed memory model that supports all atomic operation existing in Rust. The closest one is
//! Rust follows the C++20 memory model (except for the Consume ordering and some operations not performable through C++'s
//! std::atomic<T> API). It is therefore possible for this implementation to generate behaviours never observable when the
//! same program is compiled and run natively. Unfortunately, no literature exists at the time of writing which proposes
//! an implementable and C++20-compatible relaxed memory model that supports all atomic operation existing in Rust. The closest one is
//! A Promising Semantics for Relaxed-Memory Concurrency by Jeehoon Kang et al. (https://www.cs.tau.ac.il/~orilahav/papers/popl17.pdf)
//! However, this model lacks SC accesses and is therefore unusable by Miri (SC accesses are everywhere in library code).
//!
@ -117,6 +117,26 @@ pub fn new_allocation() -> Self {
Self { store_buffers: RefCell::new(AllocationMap::new()) }
}
/// When a non-atomic access happens on a location that has been atomically accessed
/// before without data race, we can determine that the non-atomic access fully happens
/// before all the prior atomic accesses so the location no longer needs to exhibit
/// any weak memory behaviours until further atomic accesses.
pub fn destroy_atomicity<'tcx>(&self, range: AllocRange) {
let mut buffers = self.store_buffers.borrow_mut();
let access_type = buffers.access_type(range);
match access_type {
AccessType::PerfectlyOverlapping(pos) => {
buffers.remove_from_pos(pos);
}
AccessType::ImperfectlyOverlapping(pos_range) => {
buffers.remove_pos_range(pos_range);
}
AccessType::Empty(_) => {
// Do nothing
}
}
}
/// Gets a store buffer associated with an atomic object in this allocation
/// Or creates one with the specified initial value
fn get_or_create_store_buffer<'tcx>(

View File

@ -738,10 +738,17 @@ fn memory_read(
range,
machine.stacked_borrows.as_ref().unwrap(),
machine.current_span(),
)
} else {
Ok(())
)?;
}
if let Some(weak_memory) = &alloc_extra.weak_memory {
if !machine.data_race.as_ref().unwrap().ongoing_atomic_access() {
// This is a non-atomic access. And if we are accessing a previously atomically
// accessed location without racing with them, then the location no longer needs
// to exhibit weak-memory behaviours until a fresh atomic access happens
weak_memory.destroy_atomicity(range);
}
}
Ok(())
}
#[inline(always)]
@ -762,10 +769,14 @@ fn memory_written(
range,
machine.stacked_borrows.as_ref().unwrap(),
machine.current_span(),
)
} else {
Ok(())
)?;
}
if let Some(weak_memory) = &alloc_extra.weak_memory {
if !machine.data_race.as_ref().unwrap().ongoing_atomic_access() {
weak_memory.destroy_atomicity(range);
}
}
Ok(())
}
#[inline(always)]