From 11ca975cd83351aae3b113ceb754dde4a304a6b5 Mon Sep 17 00:00:00 2001 From: Andy Wang Date: Sat, 7 May 2022 01:07:16 +0100 Subject: [PATCH] Move type definitions together and clarify fetch_store on empty buffer --- src/weak_memory.rs | 59 +++++++++++++++++++++++++--------------------- 1 file changed, 32 insertions(+), 27 deletions(-) diff --git a/src/weak_memory.rs b/src/weak_memory.rs index 34c669239d5..0d892a5b38d 100644 --- a/src/weak_memory.rs +++ b/src/weak_memory.rs @@ -40,6 +40,8 @@ use crate::{ pub type AllocExtra = StoreBufferAlloc; +const STORE_BUFFER_LIMIT: usize = 128; + #[derive(Debug, Clone)] pub struct StoreBufferAlloc { /// Store buffer of each atomic object in this allocation @@ -47,6 +49,31 @@ pub struct StoreBufferAlloc { store_buffer: RefCell>, } +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct StoreBuffer { + // Stores to this location in modification order + buffer: VecDeque, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct StoreElement { + /// The identifier of the vector index, corresponding to a thread + /// that performed the store. + store_index: VectorIdx, + + /// Whether this store is SC. + is_seqcst: bool, + + /// The timestamp of the storing thread when it performed the store + timestamp: VTimestamp, + /// The value of this store + val: ScalarMaybeUninit, + + /// Timestamp of first loads from this store element by each thread + /// Behind a RefCell to keep load op take &self + loads: RefCell>, +} + impl StoreBufferAlloc { pub fn new_allocation() -> Self { Self { store_buffer: RefCell::new(AllocationMap::new()) } @@ -105,13 +132,6 @@ impl StoreBufferAlloc { } } -const STORE_BUFFER_LIMIT: usize = 128; -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct StoreBuffer { - // Stores to this location in modification order - buffer: VecDeque, -} - impl Default for StoreBuffer { fn default() -> Self { let mut buffer = VecDeque::new(); @@ -175,7 +195,11 @@ impl<'mir, 'tcx: 'mir> StoreBuffer { /// Selects a valid store element in the buffer. /// The buffer does not contain the value used to initialise the atomic object - /// so a fresh atomic object has an empty store buffer until an explicit store. + /// so a fresh atomic object has an empty store buffer and this function + /// will return `None`. In this case, the caller should ensure that the non-buffered + /// value from `MiriEvalContext::read_scalar()` is observed by the program, which is + /// the initial value of the atomic object. `MiriEvalContext::read_scalar()` is always + /// the latest value in modification order so it is always correct to be observed by any thread. fn fetch_store( &self, is_seqcst: bool, @@ -294,25 +318,6 @@ impl<'mir, 'tcx: 'mir> StoreBuffer { } } -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct StoreElement { - /// The identifier of the vector index, corresponding to a thread - /// that performed the store. - store_index: VectorIdx, - - /// Whether this store is SC. - is_seqcst: bool, - - /// The timestamp of the storing thread when it performed the store - timestamp: VTimestamp, - /// The value of this store - val: ScalarMaybeUninit, - - /// Timestamp of first loads from this store element by each thread - /// Behind a RefCell to keep load op take &self - loads: RefCell>, -} - impl StoreElement { /// ATOMIC LOAD IMPL in the paper /// Unlike the operational semantics in the paper, we don't need to keep track