Use a new AllocationMap to store store buffers in the same allocation

This commit is contained in:
Andy Wang 2022-05-06 23:46:29 +01:00
parent ecdab5ff35
commit 53f4887659
No known key found for this signature in database
GPG Key ID: 181B49F9F38F3374
5 changed files with 342 additions and 39 deletions

272
src/allocation_map.rs Normal file
View File

@ -0,0 +1,272 @@
//! Implements a map from allocation ranges to data.
//! This is somewhat similar to RangeMap, but the ranges
//! and data are discrete and non-splittable. An allocation in the
//! map will always have the same range until explicitly removed
use rustc_target::abi::Size;
use std::ops::{Index, IndexMut, Range};
use rustc_const_eval::interpret::AllocRange;
#[derive(Clone, Debug)]
struct Elem<T> {
/// The range covered by this element; never empty.
range: AllocRange,
/// The data stored for this element.
data: T,
}
/// Index of an allocation within the map
type Position = usize;
#[derive(Clone, Debug)]
pub struct AllocationMap<T> {
v: Vec<Elem<T>>,
}
#[derive(Clone, Debug, PartialEq)]
pub enum AccessType {
/// The access perfectly overlaps (same offset and range) with the exsiting allocation
PerfectlyOverlapping(Position),
/// The access does not touch any exising allocation
Empty(Position),
/// The access overlaps with one or more existing allocations
ImperfectlyOverlapping(Range<Position>),
}
impl<T> AllocationMap<T> {
pub fn new() -> Self {
Self { v: Vec::new() }
}
/// Finds the position of the allocation containing the given offset. If the offset is not
/// in an existing allocation, then returns Err containing the position
/// where such allocation should be inserted
fn find_offset(&self, offset: Size) -> Result<Position, Position> {
// We do a binary search.
let mut left = 0usize; // inclusive
let mut right = self.v.len(); // exclusive
loop {
if left == right {
// No element contains the given offset. But the
// index is where such element should be placed at.
return Err(left);
}
let candidate = left.checked_add(right).unwrap() / 2;
let elem = &self.v[candidate];
if offset < elem.range.start {
// We are too far right (offset is further left).
debug_assert!(candidate < right); // we are making progress
right = candidate;
} else if offset >= elem.range.end() {
// We are too far left (offset is further right).
debug_assert!(candidate >= left); // we are making progress
left = candidate + 1;
} else {
// This is it!
return Ok(candidate);
}
}
}
/// Determines whether a given access on `range` overlaps with
/// an existing allocation
pub fn access_type(&self, range: AllocRange) -> AccessType {
match self.find_offset(range.start) {
Ok(index) => {
// Start of the range belongs to an existing object, now let's check the overlapping situation
let elem = &self.v[index];
// FIXME: derive Eq for AllocRange in rustc
if elem.range.start == range.start && elem.range.size == range.size {
// Happy case: perfectly overlapping access
AccessType::PerfectlyOverlapping(index)
} else {
// FIXME: add a last() method to AllocRange that returns the last inclusive offset (end() is exclusive)
let end_index = match self.find_offset(range.end() - Size::from_bytes(1)) {
// If the end lands in an existing object, add one to get the exclusive index
Ok(inclusive) => inclusive + 1,
Err(exclusive) => exclusive,
};
AccessType::ImperfectlyOverlapping(index..end_index)
}
}
Err(index) => {
// Start of the range doesn't belong to an existing object
match self.find_offset(range.end() - Size::from_bytes(1)) {
// Neither does the end
Err(end_index) =>
if index == end_index {
// There's nothing between the start and the end, so the range thing is empty
AccessType::Empty(index)
} else {
// Otherwise we have entirely covered an existing object
AccessType::ImperfectlyOverlapping(index..end_index)
},
// Otherwise at least part of it overlaps with something else
Ok(end_index) => AccessType::ImperfectlyOverlapping(index..end_index + 1),
}
}
}
}
/// Inserts an object and its occupied range at given position
pub fn insert(&mut self, index: Position, range: AllocRange, data: T) {
self.v.insert(index, Elem { range, data });
// If we aren't the first element, then our start must be greater than the preivous element's end
if index > 0 {
debug_assert!(self.v[index - 1].range.end() <= range.start);
}
// If we aren't the last element, then our end must be smaller than next element's start
if index < self.v.len() - 1 {
debug_assert!(range.end() <= self.v[index + 1].range.start);
}
}
/// Removes an object at given position
pub fn remove(&mut self, index: Position) -> T {
self.v.remove(index).data
}
}
impl<T> Index<Position> for AllocationMap<T> {
type Output = T;
fn index(&self, index: usize) -> &Self::Output {
&self.v[index].data
}
}
impl<T> IndexMut<Position> for AllocationMap<T> {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
&mut self.v[index].data
}
}
#[cfg(test)]
mod tests {
use rustc_const_eval::interpret::alloc_range;
use super::*;
#[test]
fn empty_map() {
// FIXME: make Size::from_bytes const
let four = Size::from_bytes(4);
let map = AllocationMap::<()>::new();
// Correctly tells where we should insert the first element (at index 0)
assert_eq!(map.find_offset(Size::from_bytes(3)), Err(0));
// Correctly tells the access type along with the supposed index
assert_eq!(map.access_type(alloc_range(Size::ZERO, four)), AccessType::Empty(0));
}
#[test]
#[should_panic]
fn no_overlapping_inserts() {
let four = Size::from_bytes(4);
let mut map = AllocationMap::<&str>::new();
// |_|_|_|_|#|#|#|#|_|_|_|_|...
// 0 1 2 3 4 5 6 7 8 9 a b c d
map.insert(0, alloc_range(four, four), "#");
// |_|_|_|_|#|#|#|#|_|_|_|_|...
// 0 ^ ^ ^ ^ 5 6 7 8 9 a b c d
map.insert(0, alloc_range(Size::from_bytes(1), four), "@");
}
#[test]
fn boundaries() {
let four = Size::from_bytes(4);
let mut map = AllocationMap::<&str>::new();
// |#|#|#|#|_|_|...
// 0 1 2 3 4 5
map.insert(0, alloc_range(Size::ZERO, four), "#");
// |#|#|#|#|_|_|...
// 0 1 2 3 ^ 5
assert_eq!(map.find_offset(four), Err(1));
// |#|#|#|#|_|_|_|_|_|...
// 0 1 2 3 ^ ^ ^ ^ 8
assert_eq!(map.access_type(alloc_range(four, four)), AccessType::Empty(1));
let eight = Size::from_bytes(8);
// |#|#|#|#|_|_|_|_|@|@|@|@|_|_|...
// 0 1 2 3 4 5 6 7 8 9 a b c d
map.insert(1, alloc_range(eight, four), "@");
// |#|#|#|#|_|_|_|_|@|@|@|@|_|_|...
// 0 1 2 3 4 5 6 ^ 8 9 a b c d
assert_eq!(map.find_offset(Size::from_bytes(7)), Err(1));
// |#|#|#|#|_|_|_|_|@|@|@|@|_|_|...
// 0 1 2 3 ^ ^ ^ ^ 8 9 a b c d
assert_eq!(map.access_type(alloc_range(four, four)), AccessType::Empty(1));
}
#[test]
fn perfectly_overlapping() {
let four = Size::from_bytes(4);
let mut map = AllocationMap::<&str>::new();
// |#|#|#|#|_|_|...
// 0 1 2 3 4 5
map.insert(0, alloc_range(Size::ZERO, four), "#");
// |#|#|#|#|_|_|...
// ^ ^ ^ ^ 4 5
assert_eq!(map.find_offset(Size::ZERO), Ok(0));
assert_eq!(
map.access_type(alloc_range(Size::ZERO, four)),
AccessType::PerfectlyOverlapping(0)
);
// |#|#|#|#|@|@|@|@|_|...
// 0 1 2 3 4 5 6 7 8
map.insert(1, alloc_range(four, four), "@");
// |#|#|#|#|@|@|@|@|_|...
// 0 1 2 3 ^ ^ ^ ^ 8
assert_eq!(map.find_offset(four), Ok(1));
assert_eq!(map.access_type(alloc_range(four, four)), AccessType::PerfectlyOverlapping(1));
}
#[test]
fn straddling() {
let four = Size::from_bytes(4);
let mut map = AllocationMap::<&str>::new();
// |_|_|_|_|#|#|#|#|_|_|_|_|...
// 0 1 2 3 4 5 6 7 8 9 a b c d
map.insert(0, alloc_range(four, four), "#");
// |_|_|_|_|#|#|#|#|_|_|_|_|...
// 0 1 ^ ^ ^ ^ 6 7 8 9 a b c d
assert_eq!(
map.access_type(alloc_range(Size::from_bytes(2), four)),
AccessType::ImperfectlyOverlapping(0..1)
);
// |_|_|_|_|#|#|#|#|_|_|_|_|...
// 0 1 2 3 4 5 ^ ^ ^ ^ a b c d
assert_eq!(
map.access_type(alloc_range(Size::from_bytes(6), four)),
AccessType::ImperfectlyOverlapping(0..1)
);
// |_|_|_|_|#|#|#|#|_|_|_|_|...
// 0 1 ^ ^ ^ ^ ^ ^ ^ ^ a b c d
assert_eq!(
map.access_type(alloc_range(Size::from_bytes(2), Size::from_bytes(8))),
AccessType::ImperfectlyOverlapping(0..1)
);
// |_|_|_|_|#|#|#|#|_|_|@|@|_|_|...
// 0 1 2 3 4 5 6 7 8 9 a b c d
map.insert(1, alloc_range(Size::from_bytes(10), Size::from_bytes(2)), "@");
// |_|_|_|_|#|#|#|#|_|_|@|@|_|_|...
// 0 1 2 3 4 5 ^ ^ ^ ^ ^ ^ ^ ^
assert_eq!(
map.access_type(alloc_range(Size::from_bytes(6), Size::from_bytes(8))),
AccessType::ImperfectlyOverlapping(0..2)
);
}
}

View File

@ -519,7 +519,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
global.sc_read();
}
let mut rng = this.machine.rng.borrow_mut();
let buffer = alloc_buffers.get_store_buffer(alloc_range(base_offset, place.layout.size));
let buffer =
alloc_buffers.get_store_buffer(alloc_range(base_offset, place.layout.size));
let loaded = buffer.buffered_read(
global,
atomic == AtomicReadOp::SeqCst,
@ -555,12 +556,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
if atomic == AtomicWriteOp::SeqCst {
global.sc_write();
}
let mut buffer = alloc_buffers.get_store_buffer_mut(alloc_range(base_offset, dest.layout.size));
buffer.buffered_write(
val,
global,
atomic == AtomicWriteOp::SeqCst,
)?;
let buffer =
alloc_buffers.get_store_buffer_mut(alloc_range(base_offset, dest.layout.size));
buffer.buffered_write(val, global, atomic == AtomicWriteOp::SeqCst)?;
}
Ok(())
@ -624,17 +622,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
let lt = this.binary_op(mir::BinOp::Lt, &old, &rhs)?.to_scalar()?.to_bool()?;
let new_val = if min {
if lt {
&old
} else {
&rhs
}
if lt { &old } else { &rhs }
} else {
if lt {
&rhs
} else {
&old
}
if lt { &rhs } else { &old }
};
this.allow_data_races_mut(|this| this.write_immediate(**new_val, &(*place).into()))?;
@ -736,7 +726,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
global.sc_write();
}
let range = alloc_range(base_offset, place.layout.size);
let mut buffer = alloc_buffers.get_store_buffer_mut(range);
let buffer = alloc_buffers.get_store_buffer_mut(range);
buffer.read_from_last_store(global);
buffer.buffered_write(new_val, global, atomic == AtomicRwOp::SeqCst)?;
}

View File

@ -31,6 +31,7 @@ extern crate rustc_session;
extern crate rustc_span;
extern crate rustc_target;
mod allocation_map;
mod data_race;
mod diagnostics;
mod eval;

View File

@ -636,13 +636,17 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
let buffer_alloc = if ecx.machine.weak_memory {
// FIXME: if this is an atomic obejct, we want to supply its initial value
// while allocating the store buffer here.
Some(weak_memory::AllocExtra::new_allocation(alloc.size()))
Some(weak_memory::AllocExtra::new_allocation())
} else {
None
};
let alloc: Allocation<Tag, Self::AllocExtra> = alloc.convert_tag_add_extra(
&ecx.tcx,
AllocExtra { stacked_borrows: stacks, data_race: race_alloc, weak_memory: buffer_alloc },
AllocExtra {
stacked_borrows: stacks,
data_race: race_alloc,
weak_memory: buffer_alloc,
},
|ptr| Evaluator::tag_alloc_base_pointer(ecx, ptr),
);
Cow::Owned(alloc)

View File

@ -25,48 +25,84 @@
// and here.
use std::{
cell::{Ref, RefCell, RefMut},
cell::{Ref, RefCell},
collections::VecDeque,
};
use rustc_const_eval::interpret::{AllocRange, InterpResult, ScalarMaybeUninit};
use rustc_data_structures::fx::FxHashMap;
use rustc_target::abi::Size;
use crate::{
allocation_map::{AccessType, AllocationMap},
data_race::{GlobalState, ThreadClockSet},
RangeMap, Tag, VClock, VTimestamp, VectorIdx,
Tag, VClock, VTimestamp, VectorIdx,
};
pub type AllocExtra = StoreBufferAlloc;
#[derive(Debug, Clone)]
pub struct StoreBufferAlloc {
/// Store buffer of each atomic object in this allocation
// Load may modify a StoreBuffer to record the loading thread's
// timestamp so we need interior mutability here.
store_buffer: RefCell<RangeMap<StoreBuffer>>,
// Behind a RefCell because we need to allocate/remove on read access
store_buffer: RefCell<AllocationMap<StoreBuffer>>,
}
impl StoreBufferAlloc {
pub fn new_allocation(len: Size) -> Self {
Self { store_buffer: RefCell::new(RangeMap::new(len, StoreBuffer::default())) }
pub fn new_allocation() -> Self {
Self { store_buffer: RefCell::new(AllocationMap::new()) }
}
/// Gets a store buffer associated with an atomic object in this allocation
pub fn get_store_buffer(&self, range: AllocRange) -> Ref<'_, StoreBuffer> {
Ref::map(self.store_buffer.borrow(), |range_map| {
let (.., store_buffer) = range_map.iter(range.start, range.size).next().unwrap();
store_buffer
})
let access_type = self.store_buffer.borrow().access_type(range);
let index = match access_type {
AccessType::PerfectlyOverlapping(index) => index,
AccessType::Empty(index) => {
// First atomic access on this range, allocate a new StoreBuffer
let mut buffer = self.store_buffer.borrow_mut();
buffer.insert(index, range, StoreBuffer::default());
index
}
AccessType::ImperfectlyOverlapping(index_range) => {
// Accesses that imperfectly overlaps with existing atomic objects
// do not have well-defined behaviours. But we don't throw a UB here
// because we have (or will) checked that all bytes in the current
// access are non-racy.
// The behaviour here is that we delete all the existing objects this
// access touches, and allocate a new and empty one for the exact range.
// A read on an empty buffer returns None, which means the program will
// observe the latest value in modification order at every byte.
let mut buffer = self.store_buffer.borrow_mut();
for index in index_range.clone() {
buffer.remove(index);
}
buffer.insert(index_range.start, range, StoreBuffer::default());
index_range.start
}
};
Ref::map(self.store_buffer.borrow(), |buffer| &buffer[index])
}
pub fn get_store_buffer_mut(&self, range: AllocRange) -> RefMut<'_, StoreBuffer> {
RefMut::map(self.store_buffer.borrow_mut(), |range_map| {
let (.., store_buffer) = range_map.iter_mut(range.start, range.size).next().unwrap();
store_buffer
})
/// Gets a mutable store buffer associated with an atomic object in this allocation
pub fn get_store_buffer_mut(&mut self, range: AllocRange) -> &mut StoreBuffer {
let buffer = self.store_buffer.get_mut();
let access_type = buffer.access_type(range);
let index = match access_type {
AccessType::PerfectlyOverlapping(index) => index,
AccessType::Empty(index) => {
buffer.insert(index, range, StoreBuffer::default());
index
}
AccessType::ImperfectlyOverlapping(index_range) => {
for index in index_range.clone() {
buffer.remove(index);
}
buffer.insert(index_range.start, range, StoreBuffer::default());
index_range.start
}
};
&mut buffer[index]
}
}
const STORE_BUFFER_LIMIT: usize = 128;