Auto merge of #2302 - RalfJung:datarace, r=RalfJung

avoid copying thread manager state in data race detector

When doing https://github.com/rust-lang/miri/pull/2047 I did not realize that there is some redundant state here that we can now remove from the data race detector.

Also this removes the vector clocks from the data race errors since those don't really help diagnose the problem.
This commit is contained in:
bors 2022-07-02 00:14:06 +00:00
commit d118a949f7
61 changed files with 431 additions and 419 deletions

View File

@ -39,11 +39,6 @@
//! so some atomic operations that only perform acquires do not increment the timestamp. Due to shared
//! code some atomic operations may increment the timestamp when not necessary but this has no effect
//! on the data-race detection code.
//!
//! FIXME:
//! currently we have our own local copy of the currently active thread index and names, this is due
//! in part to the inability to access the current location of threads.active_thread inside the AllocExtra
//! read, write and deallocate functions and should be cleaned up in the future.
use std::{
cell::{Cell, Ref, RefCell, RefMut},
@ -62,9 +57,9 @@ use super::weak_memory::EvalContextExt as _;
pub type AllocExtra = VClockAlloc;
/// Valid atomic read-write operations, alias of atomic::Ordering (not non-exhaustive).
/// Valid atomic read-write orderings, alias of atomic::Ordering (not non-exhaustive).
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum AtomicRwOp {
pub enum AtomicRwOrd {
Relaxed,
Acquire,
Release,
@ -72,25 +67,25 @@ pub enum AtomicRwOp {
SeqCst,
}
/// Valid atomic read operations, subset of atomic::Ordering.
/// Valid atomic read orderings, subset of atomic::Ordering.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum AtomicReadOp {
pub enum AtomicReadOrd {
Relaxed,
Acquire,
SeqCst,
}
/// Valid atomic write operations, subset of atomic::Ordering.
/// Valid atomic write orderings, subset of atomic::Ordering.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum AtomicWriteOp {
pub enum AtomicWriteOrd {
Relaxed,
Release,
SeqCst,
}
/// Valid atomic fence operations, subset of atomic::Ordering.
/// Valid atomic fence orderings, subset of atomic::Ordering.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum AtomicFenceOp {
pub enum AtomicFenceOrd {
Acquire,
Release,
AcqRel,
@ -486,7 +481,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
op: &OpTy<'tcx, Tag>,
offset: u64,
layout: TyAndLayout<'tcx>,
atomic: AtomicReadOp,
atomic: AtomicReadOrd,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
let this = self.eval_context_ref();
let value_place = this.deref_operand_and_offset(op, offset, layout)?;
@ -500,7 +495,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
offset: u64,
value: impl Into<ScalarMaybeUninit<Tag>>,
layout: TyAndLayout<'tcx>,
atomic: AtomicWriteOp,
atomic: AtomicWriteOrd,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let value_place = this.deref_operand_and_offset(op, offset, layout)?;
@ -511,7 +506,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
fn read_scalar_atomic(
&self,
place: &MPlaceTy<'tcx, Tag>,
atomic: AtomicReadOp,
atomic: AtomicReadOrd,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
let this = self.eval_context_ref();
// This will read from the last store in the modification order of this location. In case
@ -531,7 +526,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
&mut self,
val: ScalarMaybeUninit<Tag>,
dest: &MPlaceTy<'tcx, Tag>,
atomic: AtomicWriteOp,
atomic: AtomicWriteOrd,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
this.validate_overlapping_atomic(dest)?;
@ -552,7 +547,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
rhs: &ImmTy<'tcx, Tag>,
op: mir::BinOp,
neg: bool,
atomic: AtomicRwOp,
atomic: AtomicRwOrd,
) -> InterpResult<'tcx, ImmTy<'tcx, Tag>> {
let this = self.eval_context_mut();
@ -581,7 +576,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
&mut self,
place: &MPlaceTy<'tcx, Tag>,
new: ScalarMaybeUninit<Tag>,
atomic: AtomicRwOp,
atomic: AtomicRwOrd,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
let this = self.eval_context_mut();
@ -602,7 +597,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
place: &MPlaceTy<'tcx, Tag>,
rhs: ImmTy<'tcx, Tag>,
min: bool,
atomic: AtomicRwOp,
atomic: AtomicRwOrd,
) -> InterpResult<'tcx, ImmTy<'tcx, Tag>> {
let this = self.eval_context_mut();
@ -642,8 +637,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
place: &MPlaceTy<'tcx, Tag>,
expect_old: &ImmTy<'tcx, Tag>,
new: ScalarMaybeUninit<Tag>,
success: AtomicRwOp,
fail: AtomicReadOp,
success: AtomicRwOrd,
fail: AtomicReadOrd,
can_fail_spuriously: bool,
) -> InterpResult<'tcx, Immediate<Tag>> {
use rand::Rng as _;
@ -696,7 +691,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
fn validate_atomic_load(
&self,
place: &MPlaceTy<'tcx, Tag>,
atomic: AtomicReadOp,
atomic: AtomicReadOrd,
) -> InterpResult<'tcx> {
let this = self.eval_context_ref();
this.validate_overlapping_atomic(place)?;
@ -705,7 +700,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
atomic,
"Atomic Load",
move |memory, clocks, index, atomic| {
if atomic == AtomicReadOp::Relaxed {
if atomic == AtomicReadOrd::Relaxed {
memory.load_relaxed(&mut *clocks, index)
} else {
memory.load_acquire(&mut *clocks, index)
@ -719,7 +714,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
fn validate_atomic_store(
&mut self,
place: &MPlaceTy<'tcx, Tag>,
atomic: AtomicWriteOp,
atomic: AtomicWriteOrd,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
this.validate_overlapping_atomic(place)?;
@ -728,7 +723,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
atomic,
"Atomic Store",
move |memory, clocks, index, atomic| {
if atomic == AtomicWriteOp::Relaxed {
if atomic == AtomicWriteOrd::Relaxed {
memory.store_relaxed(clocks, index)
} else {
memory.store_release(clocks, index)
@ -742,9 +737,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
fn validate_atomic_rmw(
&mut self,
place: &MPlaceTy<'tcx, Tag>,
atomic: AtomicRwOp,
atomic: AtomicRwOrd,
) -> InterpResult<'tcx> {
use AtomicRwOp::*;
use AtomicRwOrd::*;
let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
let release = matches!(atomic, Release | AcqRel | SeqCst);
let this = self.eval_context_mut();
@ -764,31 +759,31 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
}
/// Update the data-race detector for an atomic fence on the current thread.
fn validate_atomic_fence(&mut self, atomic: AtomicFenceOp) -> InterpResult<'tcx> {
fn validate_atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
if let Some(data_race) = &mut this.machine.data_race {
data_race.maybe_perform_sync_operation(|index, mut clocks| {
data_race.maybe_perform_sync_operation(&this.machine.threads, |index, mut clocks| {
log::trace!("Atomic fence on {:?} with ordering {:?}", index, atomic);
// Apply data-race detection for the current fences
// this treats AcqRel and SeqCst as the same as an acquire
// and release fence applied in the same timestamp.
if atomic != AtomicFenceOp::Release {
if atomic != AtomicFenceOrd::Release {
// Either Acquire | AcqRel | SeqCst
clocks.apply_acquire_fence();
}
if atomic != AtomicFenceOp::Acquire {
if atomic != AtomicFenceOrd::Acquire {
// Either Release | AcqRel | SeqCst
clocks.apply_release_fence();
}
if atomic == AtomicFenceOp::SeqCst {
if atomic == AtomicFenceOrd::SeqCst {
data_race.last_sc_fence.borrow_mut().set_at_index(&clocks.clock, index);
clocks.fence_seqcst.join(&data_race.last_sc_fence.borrow());
clocks.write_seqcst.join(&data_race.last_sc_write.borrow());
}
// Increment timestamp in case of release semantics.
Ok(atomic != AtomicFenceOp::Acquire)
Ok(atomic != AtomicFenceOrd::Acquire)
})
} else {
Ok(())
@ -807,6 +802,7 @@ impl VClockAlloc {
/// Create a new data-race detector for newly allocated memory.
pub fn new_allocation(
global: &GlobalState,
thread_mgr: &ThreadManager<'_, '_>,
len: Size,
kind: MemoryKind<MiriMemoryKind>,
) -> VClockAlloc {
@ -816,7 +812,7 @@ impl VClockAlloc {
MiriMemoryKind::Rust | MiriMemoryKind::C | MiriMemoryKind::WinHeap,
)
| MemoryKind::Stack => {
let (alloc_index, clocks) = global.current_thread_state();
let (alloc_index, clocks) = global.current_thread_state(thread_mgr);
let alloc_timestamp = clocks.clock[alloc_index];
(alloc_timestamp, alloc_index)
}
@ -878,14 +874,15 @@ impl VClockAlloc {
#[inline(never)]
fn report_data_race<'tcx>(
global: &GlobalState,
thread_mgr: &ThreadManager<'_, '_>,
range: &MemoryCellClocks,
action: &str,
is_atomic: bool,
ptr_dbg: Pointer<AllocId>,
) -> InterpResult<'tcx> {
let (current_index, current_clocks) = global.current_thread_state();
let (current_index, current_clocks) = global.current_thread_state(thread_mgr);
let write_clock;
let (other_action, other_thread, other_clock) = if range.write
let (other_action, other_thread, _other_clock) = if range.write
> current_clocks.clock[range.write_index]
{
// Convert the write action into the vector clock it
@ -918,27 +915,30 @@ impl VClockAlloc {
};
// Load elaborated thread information about the racing thread actions.
let current_thread_info = global.print_thread_metadata(current_index);
let other_thread_info = global.print_thread_metadata(other_thread);
let current_thread_info = global.print_thread_metadata(thread_mgr, current_index);
let other_thread_info = global.print_thread_metadata(thread_mgr, other_thread);
// Throw the data-race detection.
throw_ub_format!(
"Data race detected between {} on {} and {} on {} at {:?} (current vector clock = {:?}, conflicting timestamp = {:?})",
"Data race detected between {} on {} and {} on {} at {:?}",
action,
current_thread_info,
other_action,
other_thread_info,
ptr_dbg,
current_clocks.clock,
other_clock
)
}
/// Detect racing atomic read and writes (not data races)
/// on every byte of the current access range
pub(super) fn race_free_with_atomic(&self, range: AllocRange, global: &GlobalState) -> bool {
pub(super) fn race_free_with_atomic(
&self,
range: AllocRange,
global: &GlobalState,
thread_mgr: &ThreadManager<'_, '_>,
) -> bool {
if global.race_detecting() {
let (_, clocks) = global.current_thread_state();
let (_, clocks) = global.current_thread_state(thread_mgr);
let alloc_ranges = self.alloc_ranges.borrow();
for (_, range) in alloc_ranges.iter(range.start, range.size) {
if !range.race_free_with_atomic(&clocks) {
@ -959,15 +959,17 @@ impl VClockAlloc {
alloc_id: AllocId,
range: AllocRange,
global: &GlobalState,
thread_mgr: &ThreadManager<'_, '_>,
) -> InterpResult<'tcx> {
if global.race_detecting() {
let (index, clocks) = global.current_thread_state();
let (index, clocks) = global.current_thread_state(thread_mgr);
let mut alloc_ranges = self.alloc_ranges.borrow_mut();
for (offset, range) in alloc_ranges.iter_mut(range.start, range.size) {
if let Err(DataRace) = range.read_race_detect(&clocks, index) {
// Report data-race.
return Self::report_data_race(
global,
thread_mgr,
range,
"Read",
false,
@ -988,14 +990,16 @@ impl VClockAlloc {
range: AllocRange,
write_type: WriteType,
global: &mut GlobalState,
thread_mgr: &ThreadManager<'_, '_>,
) -> InterpResult<'tcx> {
if global.race_detecting() {
let (index, clocks) = global.current_thread_state();
let (index, clocks) = global.current_thread_state(thread_mgr);
for (offset, range) in self.alloc_ranges.get_mut().iter_mut(range.start, range.size) {
if let Err(DataRace) = range.write_race_detect(&clocks, index, write_type) {
// Report data-race
return Self::report_data_race(
global,
thread_mgr,
range,
write_type.get_descriptor(),
false,
@ -1018,8 +1022,9 @@ impl VClockAlloc {
alloc_id: AllocId,
range: AllocRange,
global: &mut GlobalState,
thread_mgr: &ThreadManager<'_, '_>,
) -> InterpResult<'tcx> {
self.unique_access(alloc_id, range, WriteType::Write, global)
self.unique_access(alloc_id, range, WriteType::Write, global, thread_mgr)
}
/// Detect data-races for an unsynchronized deallocate operation, will not perform
@ -1031,8 +1036,9 @@ impl VClockAlloc {
alloc_id: AllocId,
range: AllocRange,
global: &mut GlobalState,
thread_mgr: &ThreadManager<'_, '_>,
) -> InterpResult<'tcx> {
self.unique_access(alloc_id, range, WriteType::Deallocate, global)
self.unique_access(alloc_id, range, WriteType::Deallocate, global, thread_mgr)
}
}
@ -1068,26 +1074,30 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
);
// Perform the atomic operation.
data_race.maybe_perform_sync_operation(|index, mut clocks| {
for (offset, range) in
alloc_meta.alloc_ranges.borrow_mut().iter_mut(base_offset, size)
{
if let Err(DataRace) = op(range, &mut clocks, index, atomic) {
mem::drop(clocks);
return VClockAlloc::report_data_race(
data_race,
range,
description,
true,
Pointer::new(alloc_id, offset),
)
.map(|_| true);
data_race.maybe_perform_sync_operation(
&this.machine.threads,
|index, mut clocks| {
for (offset, range) in
alloc_meta.alloc_ranges.borrow_mut().iter_mut(base_offset, size)
{
if let Err(DataRace) = op(range, &mut clocks, index, atomic) {
mem::drop(clocks);
return VClockAlloc::report_data_race(
data_race,
&this.machine.threads,
range,
description,
true,
Pointer::new(alloc_id, offset),
)
.map(|_| true);
}
}
}
// This conservatively assumes all operations have release semantics
Ok(true)
})?;
// This conservatively assumes all operations have release semantics
Ok(true)
},
)?;
// Log changes to atomic memory.
if log::log_enabled!(log::Level::Trace) {
@ -1117,11 +1127,6 @@ struct ThreadExtraState {
/// read during data-race reporting.
vector_index: Option<VectorIdx>,
/// The name of the thread, updated for better
/// diagnostics when reporting detected data
/// races.
thread_name: Option<Box<str>>,
/// Thread termination vector clock, this
/// is set on thread termination and is used
/// for joining on threads since the vector_index
@ -1161,9 +1166,6 @@ pub struct GlobalState {
/// The mapping of a given thread to associated thread metadata.
thread_info: RefCell<IndexVec<ThreadId, ThreadExtraState>>,
/// The current vector index being executed.
current_index: Cell<VectorIdx>,
/// Potential vector indices that could be re-used on thread creation
/// values are inserted here on after the thread has terminated and
/// been joined with, and hence may potentially become free
@ -1173,12 +1175,6 @@ pub struct GlobalState {
/// active vector-clocks catch up with the threads timestamp.
reuse_candidates: RefCell<FxHashSet<VectorIdx>>,
/// Counts the number of threads that are currently active
/// if the number of active threads reduces to 1 and then
/// a join operation occurs with the remaining main thread
/// then multi-threaded execution may be disabled.
active_thread_count: Cell<usize>,
/// This contains threads that have terminated, but not yet joined
/// and so cannot become re-use candidates until a join operation
/// occurs.
@ -1203,8 +1199,6 @@ impl GlobalState {
vector_clocks: RefCell::new(IndexVec::new()),
vector_info: RefCell::new(IndexVec::new()),
thread_info: RefCell::new(IndexVec::new()),
current_index: Cell::new(VectorIdx::new(0)),
active_thread_count: Cell::new(1),
reuse_candidates: RefCell::new(FxHashSet::default()),
terminated_threads: RefCell::new(FxHashMap::default()),
last_sc_fence: RefCell::new(VClock::default()),
@ -1212,15 +1206,13 @@ impl GlobalState {
};
// Setup the main-thread since it is not explicitly created:
// uses vector index and thread-id 0, also the rust runtime gives
// the main-thread a name of "main".
// uses vector index and thread-id 0.
let index = global_state.vector_clocks.get_mut().push(ThreadClockSet::default());
global_state.vector_info.get_mut().push(ThreadId::new(0));
global_state.thread_info.get_mut().push(ThreadExtraState {
vector_index: Some(index),
thread_name: Some("main".to_string().into_boxed_str()),
termination_vector_clock: None,
});
global_state
.thread_info
.get_mut()
.push(ThreadExtraState { vector_index: Some(index), termination_vector_clock: None });
global_state
}
@ -1274,14 +1266,10 @@ impl GlobalState {
// Hook for thread creation, enabled multi-threaded execution and marks
// the current thread timestamp as happening-before the current thread.
#[inline]
pub fn thread_created(&mut self, thread: ThreadId) {
let current_index = self.current_index();
pub fn thread_created(&mut self, thread_mgr: &ThreadManager<'_, '_>, thread: ThreadId) {
let current_index = self.current_index(thread_mgr);
// Increment the number of active threads.
let active_threads = self.active_thread_count.get();
self.active_thread_count.set(active_threads + 1);
// Enable multi-threaded execution, there are now two threads
// Enable multi-threaded execution, there are now at least two threads
// so data-races are now possible.
self.multi_threaded.set(true);
@ -1339,21 +1327,27 @@ impl GlobalState {
created.increment_clock(created_index);
}
/// Hook on a thread join to update the implicit happens-before relation
/// between the joined thread and the current thread.
/// Hook on a thread join to update the implicit happens-before relation between the joined
/// thread (the joinee, the thread that someone waited on) and the current thread (the joiner,
/// the thread who was waiting).
#[inline]
pub fn thread_joined(&mut self, current_thread: ThreadId, join_thread: ThreadId) {
pub fn thread_joined(
&mut self,
thread_mgr: &ThreadManager<'_, '_>,
joiner: ThreadId,
joinee: ThreadId,
) {
let clocks_vec = self.vector_clocks.get_mut();
let thread_info = self.thread_info.get_mut();
// Load the vector clock of the current thread.
let current_index = thread_info[current_thread]
let current_index = thread_info[joiner]
.vector_index
.expect("Performed thread join on thread with no assigned vector");
let current = &mut clocks_vec[current_index];
// Load the associated vector clock for the terminated thread.
let join_clock = thread_info[join_thread]
let join_clock = thread_info[joinee]
.termination_vector_clock
.as_ref()
.expect("Joined with thread but thread has not terminated");
@ -1363,10 +1357,9 @@ impl GlobalState {
// Is not a release operation so the clock is not incremented.
current.clock.join(join_clock);
// Check the number of active threads, if the value is 1
// Check the number of live threads, if the value is 1
// then test for potentially disabling multi-threaded execution.
let active_threads = self.active_thread_count.get();
if active_threads == 1 {
if thread_mgr.get_live_thread_count() == 1 {
// May potentially be able to disable multi-threaded execution.
let current_clock = &clocks_vec[current_index];
if clocks_vec
@ -1383,7 +1376,7 @@ impl GlobalState {
// If the thread is marked as terminated but not joined
// then move the thread to the re-use set.
let termination = self.terminated_threads.get_mut();
if let Some(index) = termination.remove(&join_thread) {
if let Some(index) = termination.remove(&joinee) {
let reuse = self.reuse_candidates.get_mut();
reuse.insert(index);
}
@ -1397,8 +1390,8 @@ impl GlobalState {
/// This should be called strictly before any calls to
/// `thread_joined`.
#[inline]
pub fn thread_terminated(&mut self) {
let current_index = self.current_index();
pub fn thread_terminated(&mut self, thread_mgr: &ThreadManager<'_, '_>) {
let current_index = self.current_index(thread_mgr);
// Increment the clock to a unique termination timestamp.
let vector_clocks = self.vector_clocks.get_mut();
@ -1420,35 +1413,6 @@ impl GlobalState {
// occurs.
let termination = self.terminated_threads.get_mut();
termination.insert(current_thread, current_index);
// Reduce the number of active threads, now that a thread has
// terminated.
let mut active_threads = self.active_thread_count.get();
active_threads -= 1;
self.active_thread_count.set(active_threads);
}
/// Hook for updating the local tracker of the currently
/// enabled thread, should always be updated whenever
/// `active_thread` in thread.rs is updated.
#[inline]
pub fn thread_set_active(&self, thread: ThreadId) {
let thread_info = self.thread_info.borrow();
let vector_idx = thread_info[thread]
.vector_index
.expect("Setting thread active with no assigned vector");
self.current_index.set(vector_idx);
}
/// Hook for updating the local tracker of the threads name
/// this should always mirror the local value in thread.rs
/// the thread name is used for improved diagnostics
/// during a data-race.
#[inline]
pub fn thread_set_name(&mut self, thread: ThreadId, name: String) {
let name = name.into_boxed_str();
let thread_info = self.thread_info.get_mut();
thread_info[thread].thread_name = Some(name);
}
/// Attempt to perform a synchronized operation, this
@ -1460,12 +1424,13 @@ impl GlobalState {
/// operation may create.
fn maybe_perform_sync_operation<'tcx>(
&self,
thread_mgr: &ThreadManager<'_, '_>,
op: impl FnOnce(VectorIdx, RefMut<'_, ThreadClockSet>) -> InterpResult<'tcx, bool>,
) -> InterpResult<'tcx> {
if self.multi_threaded.get() {
let (index, clocks) = self.current_thread_state_mut();
let (index, clocks) = self.current_thread_state_mut(thread_mgr);
if op(index, clocks)? {
let (_, mut clocks) = self.current_thread_state_mut();
let (_, mut clocks) = self.current_thread_state_mut(thread_mgr);
clocks.increment_clock(index);
}
}
@ -1474,15 +1439,14 @@ impl GlobalState {
/// Internal utility to identify a thread stored internally
/// returns the id and the name for better diagnostics.
fn print_thread_metadata(&self, vector: VectorIdx) -> String {
fn print_thread_metadata(
&self,
thread_mgr: &ThreadManager<'_, '_>,
vector: VectorIdx,
) -> String {
let thread = self.vector_info.borrow()[vector];
let thread_name = &self.thread_info.borrow()[thread].thread_name;
if let Some(name) = thread_name {
let name: &str = name;
format!("Thread(id = {:?}, name = {:?})", thread.to_u32(), name)
} else {
format!("Thread(id = {:?})", thread.to_u32())
}
let thread_name = thread_mgr.get_thread_name(thread);
format!("thread `{}`", String::from_utf8_lossy(thread_name))
}
/// Acquire a lock, express that the previous call of
@ -1534,8 +1498,11 @@ impl GlobalState {
/// Load the current vector clock in use and the current set of thread clocks
/// in use for the vector.
#[inline]
pub(super) fn current_thread_state(&self) -> (VectorIdx, Ref<'_, ThreadClockSet>) {
let index = self.current_index();
pub(super) fn current_thread_state(
&self,
thread_mgr: &ThreadManager<'_, '_>,
) -> (VectorIdx, Ref<'_, ThreadClockSet>) {
let index = self.current_index(thread_mgr);
let ref_vector = self.vector_clocks.borrow();
let clocks = Ref::map(ref_vector, |vec| &vec[index]);
(index, clocks)
@ -1544,8 +1511,11 @@ impl GlobalState {
/// Load the current vector clock in use and the current set of thread clocks
/// in use for the vector mutably for modification.
#[inline]
pub(super) fn current_thread_state_mut(&self) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
let index = self.current_index();
pub(super) fn current_thread_state_mut(
&self,
thread_mgr: &ThreadManager<'_, '_>,
) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
let index = self.current_index(thread_mgr);
let ref_vector = self.vector_clocks.borrow_mut();
let clocks = RefMut::map(ref_vector, |vec| &mut vec[index]);
(index, clocks)
@ -1554,19 +1524,22 @@ impl GlobalState {
/// Return the current thread, should be the same
/// as the data-race active thread.
#[inline]
fn current_index(&self) -> VectorIdx {
self.current_index.get()
fn current_index(&self, thread_mgr: &ThreadManager<'_, '_>) -> VectorIdx {
let active_thread_id = thread_mgr.get_active_thread_id();
self.thread_info.borrow()[active_thread_id]
.vector_index
.expect("active thread has no assigned vector")
}
// SC ATOMIC STORE rule in the paper.
pub(super) fn sc_write(&self) {
let (index, clocks) = self.current_thread_state();
pub(super) fn sc_write(&self, thread_mgr: &ThreadManager<'_, '_>) {
let (index, clocks) = self.current_thread_state(thread_mgr);
self.last_sc_write.borrow_mut().set_at_index(&clocks.clock, index);
}
// SC ATOMIC READ rule in the paper.
pub(super) fn sc_read(&self) {
let (.., mut clocks) = self.current_thread_state_mut();
pub(super) fn sc_read(&self, thread_mgr: &ThreadManager<'_, '_>) {
let (.., mut clocks) = self.current_thread_state_mut(thread_mgr);
clocks.read_seqcst.join(&self.last_sc_fence.borrow());
}
}

View File

@ -9,7 +9,7 @@
//! Note that this implementation does not take into account of C++20's memory model revision to SC accesses
//! and fences introduced by P0668 (<https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0668r5.html>).
//! This implementation is not fully correct under the revised C++20 model and may generate behaviours C++20
//! disallows.
//! disallows (<https://github.com/rust-lang/miri/issues/2301>).
//!
//! Rust follows the C++20 memory model (except for the Consume ordering and some operations not performable through C++'s
//! std::atomic<T> API). It is therefore possible for this implementation to generate behaviours never observable when the
@ -82,10 +82,12 @@ use rustc_const_eval::interpret::{
};
use rustc_data_structures::fx::FxHashMap;
use crate::{AtomicReadOp, AtomicRwOp, AtomicWriteOp, Tag, VClock, VTimestamp, VectorIdx};
use crate::{
AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, Tag, ThreadManager, VClock, VTimestamp, VectorIdx,
};
use super::{
data_race::{GlobalState, ThreadClockSet},
data_race::{GlobalState as DataRaceState, ThreadClockSet},
range_object_map::{AccessType, RangeObjectMap},
};
@ -149,7 +151,7 @@ impl StoreBufferAlloc {
/// before without data race, we can determine that the non-atomic access fully happens
/// after all the prior atomic accesses so the location no longer needs to exhibit
/// any weak memory behaviours until further atomic accesses.
pub fn memory_accessed(&self, range: AllocRange, global: &GlobalState) {
pub fn memory_accessed(&self, range: AllocRange, global: &DataRaceState) {
if !global.ongoing_action_data_race_free() {
let mut buffers = self.store_buffers.borrow_mut();
let access_type = buffers.access_type(range);
@ -236,17 +238,18 @@ impl<'mir, 'tcx: 'mir> StoreBuffer {
}
/// Reads from the last store in modification order
fn read_from_last_store(&self, global: &GlobalState) {
fn read_from_last_store(&self, global: &DataRaceState, thread_mgr: &ThreadManager<'_, '_>) {
let store_elem = self.buffer.back();
if let Some(store_elem) = store_elem {
let (index, clocks) = global.current_thread_state();
let (index, clocks) = global.current_thread_state(thread_mgr);
store_elem.load_impl(index, &clocks);
}
}
fn buffered_read(
&self,
global: &GlobalState,
global: &DataRaceState,
thread_mgr: &ThreadManager<'_, '_>,
is_seqcst: bool,
rng: &mut (impl rand::Rng + ?Sized),
validate: impl FnOnce() -> InterpResult<'tcx>,
@ -257,7 +260,7 @@ impl<'mir, 'tcx: 'mir> StoreBuffer {
let store_elem = {
// The `clocks` we got here must be dropped before calling validate_atomic_load
// as the race detector will update it
let (.., clocks) = global.current_thread_state();
let (.., clocks) = global.current_thread_state(thread_mgr);
// Load from a valid entry in the store buffer
self.fetch_store(is_seqcst, &clocks, &mut *rng)
};
@ -268,7 +271,7 @@ impl<'mir, 'tcx: 'mir> StoreBuffer {
// requires access to ThreadClockSet.clock, which is updated by the race detector
validate()?;
let (index, clocks) = global.current_thread_state();
let (index, clocks) = global.current_thread_state(thread_mgr);
let loaded = store_elem.load_impl(index, &clocks);
Ok(loaded)
}
@ -276,10 +279,11 @@ impl<'mir, 'tcx: 'mir> StoreBuffer {
fn buffered_write(
&mut self,
val: ScalarMaybeUninit<Tag>,
global: &GlobalState,
global: &DataRaceState,
thread_mgr: &ThreadManager<'_, '_>,
is_seqcst: bool,
) -> InterpResult<'tcx> {
let (index, clocks) = global.current_thread_state();
let (index, clocks) = global.current_thread_state(thread_mgr);
self.store_impl(val, index, &clocks.clock, is_seqcst);
Ok(())
@ -428,8 +432,11 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
{
let range = alloc_range(base_offset, place.layout.size);
if alloc_buffers.is_overlapping(range)
&& !alloc_clocks
.race_free_with_atomic(range, this.machine.data_race.as_ref().unwrap())
&& !alloc_clocks.race_free_with_atomic(
range,
this.machine.data_race.as_ref().unwrap(),
&this.machine.threads,
)
{
throw_unsup_format!(
"racy imperfectly overlapping atomic access is not possible in the C++20 memory model, and not supported by Miri's weak memory emulation"
@ -443,24 +450,24 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
&mut self,
new_val: ScalarMaybeUninit<Tag>,
place: &MPlaceTy<'tcx, Tag>,
atomic: AtomicRwOp,
atomic: AtomicRwOrd,
init: ScalarMaybeUninit<Tag>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(place.ptr)?;
if let (
crate::AllocExtra { weak_memory: Some(alloc_buffers), .. },
crate::Evaluator { data_race: Some(global), .. },
crate::Evaluator { data_race: Some(global), threads, .. },
) = this.get_alloc_extra_mut(alloc_id)?
{
if atomic == AtomicRwOp::SeqCst {
global.sc_read();
global.sc_write();
if atomic == AtomicRwOrd::SeqCst {
global.sc_read(threads);
global.sc_write(threads);
}
let range = alloc_range(base_offset, place.layout.size);
let buffer = alloc_buffers.get_or_create_store_buffer_mut(range, init)?;
buffer.read_from_last_store(global);
buffer.buffered_write(new_val, global, atomic == AtomicRwOp::SeqCst)?;
buffer.read_from_last_store(global, threads);
buffer.buffered_write(new_val, global, threads, atomic == AtomicRwOrd::SeqCst)?;
}
Ok(())
}
@ -468,7 +475,7 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
fn buffered_atomic_read(
&self,
place: &MPlaceTy<'tcx, Tag>,
atomic: AtomicReadOp,
atomic: AtomicReadOrd,
latest_in_mo: ScalarMaybeUninit<Tag>,
validate: impl FnOnce() -> InterpResult<'tcx>,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
@ -476,8 +483,8 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
if let Some(global) = &this.machine.data_race {
let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(place.ptr)?;
if let Some(alloc_buffers) = this.get_alloc_extra(alloc_id)?.weak_memory.as_ref() {
if atomic == AtomicReadOp::SeqCst {
global.sc_read();
if atomic == AtomicReadOrd::SeqCst {
global.sc_read(&this.machine.threads);
}
let mut rng = this.machine.rng.borrow_mut();
let buffer = alloc_buffers.get_or_create_store_buffer(
@ -486,7 +493,8 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
)?;
let loaded = buffer.buffered_read(
global,
atomic == AtomicReadOp::SeqCst,
&this.machine.threads,
atomic == AtomicReadOrd::SeqCst,
&mut *rng,
validate,
)?;
@ -504,18 +512,18 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
&mut self,
val: ScalarMaybeUninit<Tag>,
dest: &MPlaceTy<'tcx, Tag>,
atomic: AtomicWriteOp,
atomic: AtomicWriteOrd,
init: ScalarMaybeUninit<Tag>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(dest.ptr)?;
if let (
crate::AllocExtra { weak_memory: Some(alloc_buffers), .. },
crate::Evaluator { data_race: Some(global), .. },
crate::Evaluator { data_race: Some(global), threads, .. },
) = this.get_alloc_extra_mut(alloc_id)?
{
if atomic == AtomicWriteOp::SeqCst {
global.sc_write();
if atomic == AtomicWriteOrd::SeqCst {
global.sc_write(threads);
}
// UGLY HACK: in write_scalar_atomic() we don't know the value before our write,
@ -535,7 +543,7 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
buffer.buffer.pop_front();
}
buffer.buffered_write(val, global, atomic == AtomicWriteOp::SeqCst)?;
buffer.buffered_write(val, global, threads, atomic == AtomicWriteOrd::SeqCst)?;
}
// Caller should've written to dest with the vanilla scalar write, we do nothing here
@ -548,21 +556,21 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
fn perform_read_on_buffered_latest(
&self,
place: &MPlaceTy<'tcx, Tag>,
atomic: AtomicReadOp,
atomic: AtomicReadOrd,
init: ScalarMaybeUninit<Tag>,
) -> InterpResult<'tcx> {
let this = self.eval_context_ref();
if let Some(global) = &this.machine.data_race {
if atomic == AtomicReadOp::SeqCst {
global.sc_read();
if atomic == AtomicReadOrd::SeqCst {
global.sc_read(&this.machine.threads);
}
let size = place.layout.size;
let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(place.ptr)?;
if let Some(alloc_buffers) = this.get_alloc_extra(alloc_id)?.weak_memory.as_ref() {
let buffer = alloc_buffers
.get_or_create_store_buffer(alloc_range(base_offset, size), init)?;
buffer.read_from_last_store(global);
buffer.read_from_last_store(global, &this.machine.threads);
}
}
Ok(())

View File

@ -70,7 +70,7 @@ pub use crate::shims::tls::{EvalContextExt as _, TlsData};
pub use crate::shims::EvalContextExt as _;
pub use crate::concurrency::data_race::{
AtomicFenceOp, AtomicReadOp, AtomicRwOp, AtomicWriteOp,
AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd,
EvalContextExt as DataRaceEvalContextExt,
};
pub use crate::diagnostics::{

View File

@ -647,7 +647,12 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
None
};
let race_alloc = if let Some(data_race) = &ecx.machine.data_race {
Some(data_race::AllocExtra::new_allocation(data_race, alloc.size(), kind))
Some(data_race::AllocExtra::new_allocation(
data_race,
&ecx.machine.threads,
alloc.size(),
kind,
))
} else {
None
};
@ -756,7 +761,12 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
range: AllocRange,
) -> InterpResult<'tcx> {
if let Some(data_race) = &alloc_extra.data_race {
data_race.read(alloc_id, range, machine.data_race.as_ref().unwrap())?;
data_race.read(
alloc_id,
range,
machine.data_race.as_ref().unwrap(),
&machine.threads,
)?;
}
if let Some(stacked_borrows) = &alloc_extra.stacked_borrows {
stacked_borrows.borrow_mut().memory_read(
@ -782,7 +792,12 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
range: AllocRange,
) -> InterpResult<'tcx> {
if let Some(data_race) = &mut alloc_extra.data_race {
data_race.write(alloc_id, range, machine.data_race.as_mut().unwrap())?;
data_race.write(
alloc_id,
range,
machine.data_race.as_mut().unwrap(),
&machine.threads,
)?;
}
if let Some(stacked_borrows) = &mut alloc_extra.stacked_borrows {
stacked_borrows.get_mut().memory_written(
@ -811,7 +826,12 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
register_diagnostic(NonHaltingDiagnostic::FreedAlloc(alloc_id));
}
if let Some(data_race) = &mut alloc_extra.data_race {
data_race.deallocate(alloc_id, range, machine.data_race.as_mut().unwrap())?;
data_race.deallocate(
alloc_id,
range,
machine.data_race.as_mut().unwrap(),
&machine.threads,
)?;
}
if let Some(stacked_borrows) = &mut alloc_extra.stacked_borrows {
stacked_borrows.get_mut().memory_deallocated(

View File

@ -864,216 +864,220 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}
// Atomic operations
"atomic_load_seqcst" => this.atomic_load(args, dest, AtomicReadOp::SeqCst)?,
"atomic_load_relaxed" => this.atomic_load(args, dest, AtomicReadOp::Relaxed)?,
"atomic_load_acquire" => this.atomic_load(args, dest, AtomicReadOp::Acquire)?,
"atomic_load_seqcst" => this.atomic_load(args, dest, AtomicReadOrd::SeqCst)?,
"atomic_load_relaxed" => this.atomic_load(args, dest, AtomicReadOrd::Relaxed)?,
"atomic_load_acquire" => this.atomic_load(args, dest, AtomicReadOrd::Acquire)?,
"atomic_store_seqcst" => this.atomic_store(args, AtomicWriteOp::SeqCst)?,
"atomic_store_relaxed" => this.atomic_store(args, AtomicWriteOp::Relaxed)?,
"atomic_store_release" => this.atomic_store(args, AtomicWriteOp::Release)?,
"atomic_store_seqcst" => this.atomic_store(args, AtomicWriteOrd::SeqCst)?,
"atomic_store_relaxed" => this.atomic_store(args, AtomicWriteOrd::Relaxed)?,
"atomic_store_release" => this.atomic_store(args, AtomicWriteOrd::Release)?,
"atomic_fence_acquire" => this.atomic_fence(args, AtomicFenceOp::Acquire)?,
"atomic_fence_release" => this.atomic_fence(args, AtomicFenceOp::Release)?,
"atomic_fence_acqrel" => this.atomic_fence(args, AtomicFenceOp::AcqRel)?,
"atomic_fence_seqcst" => this.atomic_fence(args, AtomicFenceOp::SeqCst)?,
"atomic_fence_acquire" => this.atomic_fence(args, AtomicFenceOrd::Acquire)?,
"atomic_fence_release" => this.atomic_fence(args, AtomicFenceOrd::Release)?,
"atomic_fence_acqrel" => this.atomic_fence(args, AtomicFenceOrd::AcqRel)?,
"atomic_fence_seqcst" => this.atomic_fence(args, AtomicFenceOrd::SeqCst)?,
"atomic_singlethreadfence_acquire" =>
this.compiler_fence(args, AtomicFenceOp::Acquire)?,
this.compiler_fence(args, AtomicFenceOrd::Acquire)?,
"atomic_singlethreadfence_release" =>
this.compiler_fence(args, AtomicFenceOp::Release)?,
this.compiler_fence(args, AtomicFenceOrd::Release)?,
"atomic_singlethreadfence_acqrel" =>
this.compiler_fence(args, AtomicFenceOp::AcqRel)?,
this.compiler_fence(args, AtomicFenceOrd::AcqRel)?,
"atomic_singlethreadfence_seqcst" =>
this.compiler_fence(args, AtomicFenceOp::SeqCst)?,
this.compiler_fence(args, AtomicFenceOrd::SeqCst)?,
"atomic_xchg_seqcst" => this.atomic_exchange(args, dest, AtomicRwOp::SeqCst)?,
"atomic_xchg_acquire" => this.atomic_exchange(args, dest, AtomicRwOp::Acquire)?,
"atomic_xchg_release" => this.atomic_exchange(args, dest, AtomicRwOp::Release)?,
"atomic_xchg_acqrel" => this.atomic_exchange(args, dest, AtomicRwOp::AcqRel)?,
"atomic_xchg_relaxed" => this.atomic_exchange(args, dest, AtomicRwOp::Relaxed)?,
"atomic_xchg_seqcst" => this.atomic_exchange(args, dest, AtomicRwOrd::SeqCst)?,
"atomic_xchg_acquire" => this.atomic_exchange(args, dest, AtomicRwOrd::Acquire)?,
"atomic_xchg_release" => this.atomic_exchange(args, dest, AtomicRwOrd::Release)?,
"atomic_xchg_acqrel" => this.atomic_exchange(args, dest, AtomicRwOrd::AcqRel)?,
"atomic_xchg_relaxed" => this.atomic_exchange(args, dest, AtomicRwOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_cxchg_seqcst_seqcst" =>
this.atomic_compare_exchange(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::SeqCst)?,
this.atomic_compare_exchange(args, dest, AtomicRwOrd::SeqCst, AtomicReadOrd::SeqCst)?,
#[rustfmt::skip]
"atomic_cxchg_acquire_acquire" =>
this.atomic_compare_exchange(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Acquire)?,
this.atomic_compare_exchange(args, dest, AtomicRwOrd::Acquire, AtomicReadOrd::Acquire)?,
#[rustfmt::skip]
"atomic_cxchg_release_relaxed" =>
this.atomic_compare_exchange(args, dest, AtomicRwOp::Release, AtomicReadOp::Relaxed)?,
this.atomic_compare_exchange(args, dest, AtomicRwOrd::Release, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_cxchg_acqrel_acquire" =>
this.atomic_compare_exchange(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Acquire)?,
this.atomic_compare_exchange(args, dest, AtomicRwOrd::AcqRel, AtomicReadOrd::Acquire)?,
#[rustfmt::skip]
"atomic_cxchg_relaxed_relaxed" =>
this.atomic_compare_exchange(args, dest, AtomicRwOp::Relaxed, AtomicReadOp::Relaxed)?,
this.atomic_compare_exchange(args, dest, AtomicRwOrd::Relaxed, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_cxchg_acquire_relaxed" =>
this.atomic_compare_exchange(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Relaxed)?,
this.atomic_compare_exchange(args, dest, AtomicRwOrd::Acquire, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_cxchg_acqrel_relaxed" =>
this.atomic_compare_exchange(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Relaxed)?,
this.atomic_compare_exchange(args, dest, AtomicRwOrd::AcqRel, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_cxchg_seqcst_relaxed" =>
this.atomic_compare_exchange(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Relaxed)?,
this.atomic_compare_exchange(args, dest, AtomicRwOrd::SeqCst, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_cxchg_seqcst_acquire" =>
this.atomic_compare_exchange(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Acquire)?,
this.atomic_compare_exchange(args, dest, AtomicRwOrd::SeqCst, AtomicReadOrd::Acquire)?,
#[rustfmt::skip]
"atomic_cxchgweak_seqcst_seqcst" =>
this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::SeqCst)?,
this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::SeqCst, AtomicReadOrd::SeqCst)?,
#[rustfmt::skip]
"atomic_cxchgweak_acquire_acquire" =>
this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Acquire)?,
this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::Acquire, AtomicReadOrd::Acquire)?,
#[rustfmt::skip]
"atomic_cxchgweak_release_relaxed" =>
this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Release, AtomicReadOp::Relaxed)?,
this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::Release, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_cxchgweak_acqrel_acquire" =>
this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Acquire)?,
this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::AcqRel, AtomicReadOrd::Acquire)?,
#[rustfmt::skip]
"atomic_cxchgweak_relaxed_relaxed" =>
this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Relaxed, AtomicReadOp::Relaxed)?,
this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::Relaxed, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_cxchgweak_acquire_relaxed" =>
this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Relaxed)?,
this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::Acquire, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_cxchgweak_acqrel_relaxed" =>
this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Relaxed)?,
this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::AcqRel, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_cxchgweak_seqcst_relaxed" =>
this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Relaxed)?,
this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::SeqCst, AtomicReadOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_cxchgweak_seqcst_acquire" =>
this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Acquire)?,
this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::SeqCst, AtomicReadOrd::Acquire)?,
#[rustfmt::skip]
"atomic_or_seqcst" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::SeqCst)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOrd::SeqCst)?,
#[rustfmt::skip]
"atomic_or_acquire" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::Acquire)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOrd::Acquire)?,
#[rustfmt::skip]
"atomic_or_release" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::Release)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOrd::Release)?,
#[rustfmt::skip]
"atomic_or_acqrel" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::AcqRel)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOrd::AcqRel)?,
#[rustfmt::skip]
"atomic_or_relaxed" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::Relaxed)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_xor_seqcst" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::SeqCst)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOrd::SeqCst)?,
#[rustfmt::skip]
"atomic_xor_acquire" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::Acquire)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOrd::Acquire)?,
#[rustfmt::skip]
"atomic_xor_release" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::Release)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOrd::Release)?,
#[rustfmt::skip]
"atomic_xor_acqrel" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::AcqRel)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOrd::AcqRel)?,
#[rustfmt::skip]
"atomic_xor_relaxed" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::Relaxed)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_and_seqcst" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::SeqCst)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOrd::SeqCst)?,
#[rustfmt::skip]
"atomic_and_acquire" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::Acquire)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOrd::Acquire)?,
#[rustfmt::skip]
"atomic_and_release" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::Release)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOrd::Release)?,
#[rustfmt::skip]
"atomic_and_acqrel" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::AcqRel)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOrd::AcqRel)?,
#[rustfmt::skip]
"atomic_and_relaxed" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::Relaxed)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_nand_seqcst" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::SeqCst)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOrd::SeqCst)?,
#[rustfmt::skip]
"atomic_nand_acquire" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::Acquire)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOrd::Acquire)?,
#[rustfmt::skip]
"atomic_nand_release" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::Release)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOrd::Release)?,
#[rustfmt::skip]
"atomic_nand_acqrel" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::AcqRel)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOrd::AcqRel)?,
#[rustfmt::skip]
"atomic_nand_relaxed" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::Relaxed)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_xadd_seqcst" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::SeqCst)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOrd::SeqCst)?,
#[rustfmt::skip]
"atomic_xadd_acquire" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::Acquire)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOrd::Acquire)?,
#[rustfmt::skip]
"atomic_xadd_release" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::Release)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOrd::Release)?,
#[rustfmt::skip]
"atomic_xadd_acqrel" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::AcqRel)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOrd::AcqRel)?,
#[rustfmt::skip]
"atomic_xadd_relaxed" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::Relaxed)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOrd::Relaxed)?,
#[rustfmt::skip]
"atomic_xsub_seqcst" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::SeqCst)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOrd::SeqCst)?,
#[rustfmt::skip]
"atomic_xsub_acquire" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::Acquire)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOrd::Acquire)?,
#[rustfmt::skip]
"atomic_xsub_release" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::Release)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOrd::Release)?,
#[rustfmt::skip]
"atomic_xsub_acqrel" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::AcqRel)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOrd::AcqRel)?,
#[rustfmt::skip]
"atomic_xsub_relaxed" =>
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::Relaxed)?,
"atomic_min_seqcst" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::SeqCst)?,
this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOrd::Relaxed)?,
"atomic_min_seqcst" =>
this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::SeqCst)?,
"atomic_min_acquire" =>
this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Acquire)?,
this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::Acquire)?,
"atomic_min_release" =>
this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Release)?,
"atomic_min_acqrel" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::AcqRel)?,
this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::Release)?,
"atomic_min_acqrel" =>
this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::AcqRel)?,
"atomic_min_relaxed" =>
this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Relaxed)?,
"atomic_max_seqcst" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::SeqCst)?,
this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::Relaxed)?,
"atomic_max_seqcst" =>
this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::SeqCst)?,
"atomic_max_acquire" =>
this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Acquire)?,
this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::Acquire)?,
"atomic_max_release" =>
this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Release)?,
"atomic_max_acqrel" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::AcqRel)?,
this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::Release)?,
"atomic_max_acqrel" =>
this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::AcqRel)?,
"atomic_max_relaxed" =>
this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Relaxed)?,
this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::Relaxed)?,
"atomic_umin_seqcst" =>
this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::SeqCst)?,
this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::SeqCst)?,
"atomic_umin_acquire" =>
this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Acquire)?,
this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::Acquire)?,
"atomic_umin_release" =>
this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Release)?,
this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::Release)?,
"atomic_umin_acqrel" =>
this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::AcqRel)?,
this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::AcqRel)?,
"atomic_umin_relaxed" =>
this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Relaxed)?,
this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::Relaxed)?,
"atomic_umax_seqcst" =>
this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::SeqCst)?,
this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::SeqCst)?,
"atomic_umax_acquire" =>
this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Acquire)?,
this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::Acquire)?,
"atomic_umax_release" =>
this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Release)?,
this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::Release)?,
"atomic_umax_acqrel" =>
this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::AcqRel)?,
this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::AcqRel)?,
"atomic_umax_relaxed" =>
this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Relaxed)?,
this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::Relaxed)?,
// Other
"exact_div" => {
@ -1101,7 +1105,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
&mut self,
args: &[OpTy<'tcx, Tag>],
dest: &PlaceTy<'tcx, Tag>,
atomic: AtomicReadOp,
atomic: AtomicReadOrd,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
@ -1129,7 +1133,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
fn atomic_store(
&mut self,
args: &[OpTy<'tcx, Tag>],
atomic: AtomicWriteOp,
atomic: AtomicWriteOrd,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
@ -1156,7 +1160,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
fn compiler_fence(
&mut self,
args: &[OpTy<'tcx, Tag>],
atomic: AtomicFenceOp,
atomic: AtomicFenceOrd,
) -> InterpResult<'tcx> {
let [] = check_arg_count(args)?;
let _ = atomic;
@ -1167,7 +1171,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
fn atomic_fence(
&mut self,
args: &[OpTy<'tcx, Tag>],
atomic: AtomicFenceOp,
atomic: AtomicFenceOrd,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let [] = check_arg_count(args)?;
@ -1180,7 +1184,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
args: &[OpTy<'tcx, Tag>],
dest: &PlaceTy<'tcx, Tag>,
atomic_op: AtomicOp,
atomic: AtomicRwOp,
atomic: AtomicRwOrd,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
@ -1226,7 +1230,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
&mut self,
args: &[OpTy<'tcx, Tag>],
dest: &PlaceTy<'tcx, Tag>,
atomic: AtomicRwOp,
atomic: AtomicRwOrd,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
@ -1254,8 +1258,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
&mut self,
args: &[OpTy<'tcx, Tag>],
dest: &PlaceTy<'tcx, Tag>,
success: AtomicRwOp,
fail: AtomicReadOp,
success: AtomicRwOrd,
fail: AtomicReadOrd,
can_fail_spuriously: bool,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
@ -1294,8 +1298,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
&mut self,
args: &[OpTy<'tcx, Tag>],
dest: &PlaceTy<'tcx, Tag>,
success: AtomicRwOp,
fail: AtomicReadOp,
success: AtomicRwOrd,
fail: AtomicReadOrd,
) -> InterpResult<'tcx> {
self.atomic_compare_exchange_impl(args, dest, success, fail, false)
}
@ -1304,8 +1308,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
&mut self,
args: &[OpTy<'tcx, Tag>],
dest: &PlaceTy<'tcx, Tag>,
success: AtomicRwOp,
fail: AtomicReadOp,
success: AtomicRwOrd,
fail: AtomicReadOrd,
) -> InterpResult<'tcx> {
self.atomic_compare_exchange_impl(args, dest, success, fail, true)
}

View File

@ -169,7 +169,7 @@ pub fn futex<'tcx>(
//
// Thankfully, preemptions cannot happen inside a Miri shim, so we do not need to
// do anything special to guarantee fence-load-comparison atomicity.
this.atomic_fence(&[], AtomicFenceOp::SeqCst)?;
this.atomic_fence(&[], AtomicFenceOrd::SeqCst)?;
// Read an `i32` through the pointer, regardless of any wrapper types.
// It's not uncommon for `addr` to be passed as another type than `*mut i32`, such as `*const AtomicI32`.
let futex_val = this
@ -177,7 +177,7 @@ pub fn futex<'tcx>(
&addr.into(),
0,
this.machine.layouts.i32,
AtomicReadOp::Relaxed,
AtomicReadOrd::Relaxed,
)?
.to_i32()?;
if val == futex_val {
@ -240,7 +240,7 @@ pub fn futex<'tcx>(
// Together with the SeqCst fence in futex_wait, this makes sure that futex_wait
// will see the latest value on addr which could be changed by our caller
// before doing the syscall.
this.atomic_fence(&[], AtomicFenceOp::SeqCst)?;
this.atomic_fence(&[], AtomicFenceOrd::SeqCst)?;
let mut n = 0;
for _ in 0..val {
if let Some(thread) = this.futex_wake(addr_usize, bitset) {

View File

@ -68,7 +68,7 @@ fn mutex_get_kind<'mir, 'tcx: 'mir>(
mutex_op,
offset,
ecx.machine.layouts.i32,
AtomicReadOp::Relaxed,
AtomicReadOrd::Relaxed,
)
}
@ -83,7 +83,7 @@ fn mutex_set_kind<'mir, 'tcx: 'mir>(
offset,
kind,
layout_of_maybe_uninit(ecx.tcx, ecx.tcx.types.i32),
AtomicWriteOp::Relaxed,
AtomicWriteOrd::Relaxed,
)
}
@ -91,7 +91,7 @@ fn mutex_get_id<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
mutex_op: &OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
ecx.read_scalar_at_offset_atomic(mutex_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Relaxed)
ecx.read_scalar_at_offset_atomic(mutex_op, 4, ecx.machine.layouts.u32, AtomicReadOrd::Relaxed)
}
fn mutex_set_id<'mir, 'tcx: 'mir>(
@ -104,7 +104,7 @@ fn mutex_set_id<'mir, 'tcx: 'mir>(
4,
id,
layout_of_maybe_uninit(ecx.tcx, ecx.tcx.types.u32),
AtomicWriteOp::Relaxed,
AtomicWriteOrd::Relaxed,
)
}
@ -120,8 +120,8 @@ fn mutex_get_or_create_id<'mir, 'tcx: 'mir>(
&value_place,
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
next_id.to_u32_scalar().into(),
AtomicRwOp::Relaxed,
AtomicReadOp::Relaxed,
AtomicRwOrd::Relaxed,
AtomicReadOrd::Relaxed,
false,
)?
.to_scalar_pair()
@ -147,7 +147,7 @@ fn rwlock_get_id<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
rwlock_op: &OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
ecx.read_scalar_at_offset_atomic(rwlock_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Relaxed)
ecx.read_scalar_at_offset_atomic(rwlock_op, 4, ecx.machine.layouts.u32, AtomicReadOrd::Relaxed)
}
fn rwlock_set_id<'mir, 'tcx: 'mir>(
@ -160,7 +160,7 @@ fn rwlock_set_id<'mir, 'tcx: 'mir>(
4,
id,
layout_of_maybe_uninit(ecx.tcx, ecx.tcx.types.u32),
AtomicWriteOp::Relaxed,
AtomicWriteOrd::Relaxed,
)
}
@ -176,8 +176,8 @@ fn rwlock_get_or_create_id<'mir, 'tcx: 'mir>(
&value_place,
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
next_id.to_u32_scalar().into(),
AtomicRwOp::Relaxed,
AtomicReadOp::Relaxed,
AtomicRwOrd::Relaxed,
AtomicReadOrd::Relaxed,
false,
)?
.to_scalar_pair()
@ -231,7 +231,7 @@ fn cond_get_id<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
cond_op: &OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
ecx.read_scalar_at_offset_atomic(cond_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Relaxed)
ecx.read_scalar_at_offset_atomic(cond_op, 4, ecx.machine.layouts.u32, AtomicReadOrd::Relaxed)
}
fn cond_set_id<'mir, 'tcx: 'mir>(
@ -244,7 +244,7 @@ fn cond_set_id<'mir, 'tcx: 'mir>(
4,
id,
layout_of_maybe_uninit(ecx.tcx, ecx.tcx.types.u32),
AtomicWriteOp::Relaxed,
AtomicWriteOrd::Relaxed,
)
}
@ -260,8 +260,8 @@ fn cond_get_or_create_id<'mir, 'tcx: 'mir>(
&value_place,
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
next_id.to_u32_scalar().into(),
AtomicRwOp::Relaxed,
AtomicReadOp::Relaxed,
AtomicRwOrd::Relaxed,
AtomicReadOrd::Relaxed,
false,
)?
.to_scalar_pair()

View File

@ -15,8 +15,8 @@ fn srwlock_get_or_create_id<'mir, 'tcx: 'mir>(
&value_place,
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
next_id.to_u32_scalar().into(),
AtomicRwOp::Relaxed,
AtomicReadOp::Relaxed,
AtomicRwOrd::Relaxed,
AtomicReadOrd::Relaxed,
false,
)?
.to_scalar_pair()

View File

@ -170,6 +170,14 @@ impl<'mir, 'tcx> Default for Thread<'mir, 'tcx> {
}
}
impl<'mir, 'tcx> Thread<'mir, 'tcx> {
fn new(name: &str) -> Self {
let mut thread = Thread::default();
thread.thread_name = Some(Vec::from(name.as_bytes()));
thread
}
}
/// A specific moment in time.
#[derive(Debug)]
pub enum Time {
@ -230,7 +238,7 @@ impl<'mir, 'tcx> Default for ThreadManager<'mir, 'tcx> {
fn default() -> Self {
let mut threads = IndexVec::new();
// Create the main thread and add it to the list of threads.
let mut main_thread = Thread::default();
let mut main_thread = Thread::new("main");
// The main thread can *not* be joined on.
main_thread.join_status = ThreadJoinStatus::Detached;
threads.push(main_thread);
@ -289,15 +297,21 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
}
/// Get the id of the currently active thread.
fn get_active_thread_id(&self) -> ThreadId {
pub fn get_active_thread_id(&self) -> ThreadId {
self.active_thread
}
/// Get the total number of threads that were ever spawn by this program.
fn get_total_thread_count(&self) -> usize {
pub fn get_total_thread_count(&self) -> usize {
self.threads.len()
}
/// Get the total of threads that are currently live, i.e., not yet terminated.
/// (They might be blocked.)
pub fn get_live_thread_count(&self) -> usize {
self.threads.iter().filter(|t| !matches!(t.state, ThreadState::Terminated)).count()
}
/// Has the given thread terminated?
fn has_terminated(&self, thread_id: ThreadId) -> bool {
self.threads[thread_id].state == ThreadState::Terminated
@ -366,22 +380,27 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
} else {
// The thread has already terminated - mark join happens-before
if let Some(data_race) = data_race {
data_race.thread_joined(self.active_thread, joined_thread_id);
data_race.thread_joined(self, self.active_thread, joined_thread_id);
}
}
Ok(())
}
/// Set the name of the active thread.
fn set_thread_name(&mut self, new_thread_name: Vec<u8>) {
fn set_active_thread_name(&mut self, new_thread_name: Vec<u8>) {
self.active_thread_mut().thread_name = Some(new_thread_name);
}
/// Get the name of the active thread.
fn get_thread_name(&self) -> &[u8] {
pub fn get_active_thread_name(&self) -> &[u8] {
self.active_thread_ref().thread_name()
}
/// Get the name of the given thread.
pub fn get_thread_name(&self, thread: ThreadId) -> &[u8] {
self.threads[thread].thread_name()
}
/// Put the thread into the blocked state.
fn block_thread(&mut self, thread: ThreadId) {
let state = &mut self.threads[thread].state;
@ -460,21 +479,25 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
false
});
}
// Set the thread into a terminated state in the data-race detector
// Set the thread into a terminated state in the data-race detector.
if let Some(ref mut data_race) = data_race {
data_race.thread_terminated();
data_race.thread_terminated(self);
}
// Check if we need to unblock any threads.
let mut joined_threads = vec![]; // store which threads joined, we'll need it
for (i, thread) in self.threads.iter_enumerated_mut() {
if thread.state == ThreadState::BlockedOnJoin(self.active_thread) {
// The thread has terminated, mark happens-before edge to joining thread
if let Some(ref mut data_race) = data_race {
data_race.thread_joined(i, self.active_thread);
if data_race.is_some() {
joined_threads.push(i);
}
trace!("unblocking {:?} because {:?} terminated", i, self.active_thread);
thread.state = ThreadState::Enabled;
}
}
for &i in &joined_threads {
data_race.as_mut().unwrap().thread_joined(self, i, self.active_thread);
}
free_tls_statics
}
@ -484,10 +507,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
/// used in stateless model checkers such as Loom: run the active thread as
/// long as we can and switch only when we have to (the active thread was
/// blocked, terminated, or has explicitly asked to be preempted).
fn schedule(
&mut self,
data_race: &Option<data_race::GlobalState>,
) -> InterpResult<'tcx, SchedulingAction> {
fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
// Check whether the thread has **just** terminated (`check_terminated`
// checks whether the thread has popped all its stack and if yes, sets
// the thread state to terminated).
@ -535,9 +555,6 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
debug_assert_ne!(self.active_thread, id);
if thread.state == ThreadState::Enabled {
self.active_thread = id;
if let Some(data_race) = data_race {
data_race.thread_set_active(self.active_thread);
}
break;
}
}
@ -598,7 +615,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let this = self.eval_context_mut();
let id = this.machine.threads.create_thread();
if let Some(data_race) = &mut this.machine.data_race {
data_race.thread_created(id);
data_race.thread_created(&this.machine.threads, id);
}
id
}
@ -619,9 +636,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
#[inline]
fn set_active_thread(&mut self, thread_id: ThreadId) -> ThreadId {
let this = self.eval_context_mut();
if let Some(data_race) = &this.machine.data_race {
data_race.thread_set_active(thread_id);
}
this.machine.threads.set_active_thread_id(thread_id)
}
@ -682,12 +696,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
#[inline]
fn set_active_thread_name(&mut self, new_thread_name: Vec<u8>) {
let this = self.eval_context_mut();
if let Some(data_race) = &mut this.machine.data_race {
if let Ok(string) = String::from_utf8(new_thread_name.clone()) {
data_race.thread_set_name(this.machine.threads.active_thread, string);
}
}
this.machine.threads.set_thread_name(new_thread_name);
this.machine.threads.set_active_thread_name(new_thread_name);
}
#[inline]
@ -696,7 +705,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
'mir: 'c,
{
let this = self.eval_context_ref();
this.machine.threads.get_thread_name()
this.machine.threads.get_active_thread_name()
}
#[inline]
@ -776,8 +785,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
#[inline]
fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
let this = self.eval_context_mut();
let data_race = &this.machine.data_race;
this.machine.threads.schedule(data_race)
this.machine.threads.schedule()
}
/// Handles thread termination of the active thread: wakes up threads joining on this one,

View File

@ -94,8 +94,6 @@ regexes! {
"([0-9]+: ) +0x[0-9a-f]+ - (.*)" => "$1$2",
// erase long hexadecimals
r"0x[0-9a-fA-F]+[0-9a-fA-F]{2,2}" => "$$HEX",
// erase clocks
r"VClock\(\[[^\]]+\]\)" => "VClock",
// erase specific alignments
"alignment [0-9]+" => "alignment ALIGN",
// erase thread caller ids

View File

@ -38,7 +38,7 @@ pub fn main() {
let pointer = &*ptr.0;
// Note: could also error due to reading uninitialized memory, but the data-race detector triggers first.
*pointer.load(Ordering::Relaxed) //~ ERROR Data race detected between Read on Thread(id = 2) and Allocate on Thread(id = 1)
*pointer.load(Ordering::Relaxed) //~ ERROR Data race detected between Read on thread `<unnamed>` and Allocate on thread `<unnamed>`
});
j1.join().unwrap();

View File

@ -1,8 +1,8 @@
error: Undefined Behavior: Data race detected between Read on Thread(id = 2) and Allocate on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
error: Undefined Behavior: Data race detected between Read on thread `<unnamed>` and Allocate on thread `<unnamed>` at ALLOC
--> $DIR/alloc_read_race.rs:LL:CC
|
LL | *pointer.load(Ordering::Relaxed)
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between Read on Thread(id = 2) and Allocate on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between Read on thread `<unnamed>` and Allocate on thread `<unnamed>` at ALLOC
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information

View File

@ -36,7 +36,7 @@ pub fn main() {
let j2 = spawn(move || {
let pointer = &*ptr.0;
*pointer.load(Ordering::Relaxed) = 2; //~ ERROR Data race detected between Write on Thread(id = 2) and Allocate on Thread(id = 1)
*pointer.load(Ordering::Relaxed) = 2; //~ ERROR Data race detected between Write on thread `<unnamed>` and Allocate on thread `<unnamed>`
});
j1.join().unwrap();

View File

@ -1,8 +1,8 @@
error: Undefined Behavior: Data race detected between Write on Thread(id = 2) and Allocate on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
error: Undefined Behavior: Data race detected between Write on thread `<unnamed>` and Allocate on thread `<unnamed>` at ALLOC
--> $DIR/alloc_write_race.rs:LL:CC
|
LL | *pointer.load(Ordering::Relaxed) = 2;
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between Write on Thread(id = 2) and Allocate on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between Write on thread `<unnamed>` and Allocate on thread `<unnamed>` at ALLOC
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information

View File

@ -22,7 +22,7 @@ pub fn main() {
let j2 = spawn(move || {
//Equivalent to: (&*c.0).load(Ordering::SeqCst)
intrinsics::atomic_load_seqcst(c.0 as *mut usize) //~ ERROR Data race detected between Atomic Load on Thread(id = 2) and Write on Thread(id = 1)
intrinsics::atomic_load_seqcst(c.0 as *mut usize) //~ ERROR Data race detected between Atomic Load on thread `<unnamed>` and Write on thread `<unnamed>`
});
j1.join().unwrap();

View File

@ -1,8 +1,8 @@
error: Undefined Behavior: Data race detected between Atomic Load on Thread(id = 2) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
error: Undefined Behavior: Data race detected between Atomic Load on thread `<unnamed>` and Write on thread `<unnamed>` at ALLOC
--> $DIR/atomic_read_na_write_race1.rs:LL:CC
|
LL | intrinsics::atomic_load_seqcst(c.0 as *mut usize)
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between Atomic Load on Thread(id = 2) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between Atomic Load on thread `<unnamed>` and Write on thread `<unnamed>` at ALLOC
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information

View File

@ -22,7 +22,7 @@ pub fn main() {
let j2 = spawn(move || {
let atomic_ref = &mut *c.0;
*atomic_ref.get_mut() = 32; //~ ERROR Data race detected between Write on Thread(id = 2) and Atomic Load on Thread(id = 1)
*atomic_ref.get_mut() = 32; //~ ERROR Data race detected between Write on thread `<unnamed>` and Atomic Load on thread `<unnamed>`
});
j1.join().unwrap();

View File

@ -1,8 +1,8 @@
error: Undefined Behavior: Data race detected between Write on Thread(id = 2) and Atomic Load on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
error: Undefined Behavior: Data race detected between Write on thread `<unnamed>` and Atomic Load on thread `<unnamed>` at ALLOC
--> $DIR/atomic_read_na_write_race2.rs:LL:CC
|
LL | *atomic_ref.get_mut() = 32;
| ^^^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between Write on Thread(id = 2) and Atomic Load on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
| ^^^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between Write on thread `<unnamed>` and Atomic Load on thread `<unnamed>` at ALLOC
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information

View File

@ -22,7 +22,7 @@ pub fn main() {
let j2 = spawn(move || {
let atomic_ref = &mut *c.0;
*atomic_ref.get_mut() //~ ERROR Data race detected between Read on Thread(id = 2) and Atomic Store on Thread(id = 1)
*atomic_ref.get_mut() //~ ERROR Data race detected between Read on thread `<unnamed>` and Atomic Store on thread `<unnamed>`
});
j1.join().unwrap();

View File

@ -1,8 +1,8 @@
error: Undefined Behavior: Data race detected between Read on Thread(id = 2) and Atomic Store on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
error: Undefined Behavior: Data race detected between Read on thread `<unnamed>` and Atomic Store on thread `<unnamed>` at ALLOC
--> $DIR/atomic_write_na_read_race1.rs:LL:CC
|
LL | *atomic_ref.get_mut()
| ^^^^^^^^^^^^^^^^^^^^^ Data race detected between Read on Thread(id = 2) and Atomic Store on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
| ^^^^^^^^^^^^^^^^^^^^^ Data race detected between Read on thread `<unnamed>` and Atomic Store on thread `<unnamed>` at ALLOC
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information

View File

@ -22,7 +22,7 @@ pub fn main() {
let j2 = spawn(move || {
//Equivalent to: (&*c.0).store(32, Ordering::SeqCst)
atomic_store(c.0 as *mut usize, 32); //~ ERROR Data race detected between Atomic Store on Thread(id = 2) and Read on Thread(id = 1)
atomic_store(c.0 as *mut usize, 32); //~ ERROR Data race detected between Atomic Store on thread `<unnamed>` and Read on thread `<unnamed>`
});
j1.join().unwrap();

View File

@ -1,8 +1,8 @@
error: Undefined Behavior: Data race detected between Atomic Store on Thread(id = 2) and Read on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
error: Undefined Behavior: Data race detected between Atomic Store on thread `<unnamed>` and Read on thread `<unnamed>` at ALLOC
--> $DIR/atomic_write_na_read_race2.rs:LL:CC
|
LL | atomic_store(c.0 as *mut usize, 32);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between Atomic Store on Thread(id = 2) and Read on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between Atomic Store on thread `<unnamed>` and Read on thread `<unnamed>` at ALLOC
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information

View File

@ -22,7 +22,7 @@ pub fn main() {
let j2 = spawn(move || {
//Equivalent to: (&*c.0).store(64, Ordering::SeqCst)
atomic_store(c.0 as *mut usize, 64); //~ ERROR Data race detected between Atomic Store on Thread(id = 2) and Write on Thread(id = 1)
atomic_store(c.0 as *mut usize, 64); //~ ERROR Data race detected between Atomic Store on thread `<unnamed>` and Write on thread `<unnamed>`
});
j1.join().unwrap();

View File

@ -1,8 +1,8 @@
error: Undefined Behavior: Data race detected between Atomic Store on Thread(id = 2) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
error: Undefined Behavior: Data race detected between Atomic Store on thread `<unnamed>` and Write on thread `<unnamed>` at ALLOC
--> $DIR/atomic_write_na_write_race1.rs:LL:CC
|
LL | atomic_store(c.0 as *mut usize, 64);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between Atomic Store on Thread(id = 2) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between Atomic Store on thread `<unnamed>` and Write on thread `<unnamed>` at ALLOC
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information

View File

@ -22,7 +22,7 @@ pub fn main() {
let j2 = spawn(move || {
let atomic_ref = &mut *c.0;
*atomic_ref.get_mut() = 32; //~ ERROR Data race detected between Write on Thread(id = 2) and Atomic Store on Thread(id = 1)
*atomic_ref.get_mut() = 32; //~ ERROR Data race detected between Write on thread `<unnamed>` and Atomic Store on thread `<unnamed>`
});
j1.join().unwrap();

View File

@ -1,8 +1,8 @@
error: Undefined Behavior: Data race detected between Write on Thread(id = 2) and Atomic Store on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
error: Undefined Behavior: Data race detected between Write on thread `<unnamed>` and Atomic Store on thread `<unnamed>` at ALLOC
--> $DIR/atomic_write_na_write_race2.rs:LL:CC
|
LL | *atomic_ref.get_mut() = 32;
| ^^^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between Write on Thread(id = 2) and Atomic Store on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
| ^^^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between Write on thread `<unnamed>` and Atomic Store on thread `<unnamed>` at ALLOC
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information

View File

@ -34,7 +34,7 @@ fn main() {
let join2 = unsafe {
spawn(move || {
*c.0 = 64; //~ ERROR Data race detected between Write on Thread(id = 3) and Write on Thread(id = 1)
*c.0 = 64; //~ ERROR Data race detected between Write on thread `<unnamed>` and Write on thread `<unnamed>`
})
};

View File

@ -1,8 +1,8 @@
error: Undefined Behavior: Data race detected between Write on Thread(id = 3) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
error: Undefined Behavior: Data race detected between Write on thread `<unnamed>` and Write on thread `<unnamed>` at ALLOC
--> $DIR/dangling_thread_async_race.rs:LL:CC
|
LL | *c.0 = 64;
| ^^^^^^^^^ Data race detected between Write on Thread(id = 3) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
| ^^^^^^^^^ Data race detected between Write on thread `<unnamed>` and Write on thread `<unnamed>` at ALLOC
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information

View File

@ -33,6 +33,6 @@ fn main() {
spawn(|| ()).join().unwrap();
unsafe {
*c.0 = 64; //~ ERROR Data race detected between Write on Thread(id = 0, name = "main") and Write on Thread(id = 1)
*c.0 = 64; //~ ERROR Data race detected between Write on thread `main` and Write on thread `<unnamed>`
}
}

View File

@ -1,8 +1,8 @@
error: Undefined Behavior: Data race detected between Write on Thread(id = 0, name = "main") and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
error: Undefined Behavior: Data race detected between Write on thread `main` and Write on thread `<unnamed>` at ALLOC
--> $DIR/dangling_thread_race.rs:LL:CC
|
LL | *c.0 = 64;
| ^^^^^^^^^ Data race detected between Write on Thread(id = 0, name = "main") and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
| ^^^^^^^^^ Data race detected between Write on thread `main` and Write on thread `<unnamed>` at ALLOC
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information

View File

@ -24,7 +24,7 @@ pub fn main() {
let j2 = spawn(move || {
__rust_dealloc(
//~^ ERROR Data race detected between Deallocate on Thread(id = 2) and Read on Thread(id = 1)
//~^ ERROR Data race detected between Deallocate on thread `<unnamed>` and Read on thread `<unnamed>`
ptr.0 as *mut _,
std::mem::size_of::<usize>(),
std::mem::align_of::<usize>(),

View File

@ -1,4 +1,4 @@
error: Undefined Behavior: Data race detected between Deallocate on Thread(id = 2) and Read on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
error: Undefined Behavior: Data race detected between Deallocate on thread `<unnamed>` and Read on thread `<unnamed>` at ALLOC
--> $DIR/dealloc_read_race1.rs:LL:CC
|
LL | / __rust_dealloc(
@ -7,7 +7,7 @@ LL | | ptr.0 as *mut _,
LL | | std::mem::size_of::<usize>(),
LL | | std::mem::align_of::<usize>(),
LL | | );
| |_____________^ Data race detected between Deallocate on Thread(id = 2) and Read on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
| |_____________^ Data race detected between Deallocate on thread `<unnamed>` and Read on thread `<unnamed>` at ALLOC
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information

View File

@ -27,7 +27,7 @@ pub fn main() {
});
let j2 = spawn(move || {
// Also an error of the form: Data race detected between Read on Thread(id = 2) and Deallocate on Thread(id = 1)
// Also an error of the form: Data race detected between Read on thread `<unnamed>` and Deallocate on thread `<unnamed>`
// but the invalid allocation is detected first.
*ptr.0 //~ ERROR dereferenced after this allocation got freed
});

View File

@ -36,7 +36,7 @@ pub fn main() {
sleep(Duration::from_millis(200));
// Now `stack_var` gets deallocated.
} //~ ERROR Data race detected between Deallocate on Thread(id = 1) and Read on Thread(id = 2)
} //~ ERROR Data race detected between Deallocate on thread `<unnamed>` and Read on thread `<unnamed>`
});
let j2 = spawn(move || {

View File

@ -1,8 +1,8 @@
error: Undefined Behavior: Data race detected between Deallocate on Thread(id = 1) and Read on Thread(id = 2) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
error: Undefined Behavior: Data race detected between Deallocate on thread `<unnamed>` and Read on thread `<unnamed>` at ALLOC
--> $DIR/dealloc_read_race_stack.rs:LL:CC
|
LL | }
| ^ Data race detected between Deallocate on Thread(id = 1) and Read on Thread(id = 2) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
| ^ Data race detected between Deallocate on thread `<unnamed>` and Read on thread `<unnamed>` at ALLOC
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information

View File

@ -23,7 +23,7 @@ pub fn main() {
let j2 = spawn(move || {
__rust_dealloc(
//~^ ERROR Data race detected between Deallocate on Thread(id = 2) and Write on Thread(id = 1)
//~^ ERROR Data race detected between Deallocate on thread `<unnamed>` and Write on thread `<unnamed>`
ptr.0 as *mut _,
std::mem::size_of::<usize>(),
std::mem::align_of::<usize>(),

View File

@ -1,4 +1,4 @@
error: Undefined Behavior: Data race detected between Deallocate on Thread(id = 2) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
error: Undefined Behavior: Data race detected between Deallocate on thread `<unnamed>` and Write on thread `<unnamed>` at ALLOC
--> $DIR/dealloc_write_race1.rs:LL:CC
|
LL | / __rust_dealloc(
@ -7,7 +7,7 @@ LL | | ptr.0 as *mut _,
LL | | std::mem::size_of::<usize>(),
LL | | std::mem::align_of::<usize>(),
LL | | );
| |_____________^ Data race detected between Deallocate on Thread(id = 2) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
| |_____________^ Data race detected between Deallocate on thread `<unnamed>` and Write on thread `<unnamed>` at ALLOC
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information

View File

@ -26,7 +26,7 @@ pub fn main() {
});
let j2 = spawn(move || {
// Also an error of the form: Data race detected between Write on Thread(id = 2) and Deallocate on Thread(id = 1)
// Also an error of the form: Data race detected between Write on thread `<unnamed>` and Deallocate on thread `<unnamed>`
// but the invalid allocation is detected first.
*ptr.0 = 2; //~ ERROR dereferenced after this allocation got freed
});

View File

@ -36,7 +36,7 @@ pub fn main() {
sleep(Duration::from_millis(200));
// Now `stack_var` gets deallocated.
} //~ ERROR Data race detected between Deallocate on Thread(id = 1) and Write on Thread(id = 2)
} //~ ERROR Data race detected between Deallocate on thread `<unnamed>` and Write on thread `<unnamed>`
});
let j2 = spawn(move || {

View File

@ -1,8 +1,8 @@
error: Undefined Behavior: Data race detected between Deallocate on Thread(id = 1) and Write on Thread(id = 2) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
error: Undefined Behavior: Data race detected between Deallocate on thread `<unnamed>` and Write on thread `<unnamed>` at ALLOC
--> $DIR/dealloc_write_race_stack.rs:LL:CC
|
LL | }
| ^ Data race detected between Deallocate on Thread(id = 1) and Write on Thread(id = 2) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
| ^ Data race detected between Deallocate on thread `<unnamed>` and Write on thread `<unnamed>` at ALLOC
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information

View File

@ -29,7 +29,7 @@ pub fn main() {
});
let j2 = spawn(move || {
*c.0 = 64; //~ ERROR Data race detected between Write on Thread(id = 6) and Write on Thread(id = 5)
*c.0 = 64; //~ ERROR Data race detected between Write on thread `<unnamed>` and Write on thread `<unnamed>`
});
j1.join().unwrap();

View File

@ -1,8 +1,8 @@
error: Undefined Behavior: Data race detected between Write on Thread(id = 6) and Write on Thread(id = 5) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
error: Undefined Behavior: Data race detected between Write on thread `<unnamed>` and Write on thread `<unnamed>` at ALLOC
--> $DIR/enable_after_join_to_main.rs:LL:CC
|
LL | *c.0 = 64;
| ^^^^^^^^^ Data race detected between Write on Thread(id = 6) and Write on Thread(id = 5) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
| ^^^^^^^^^ Data race detected between Write on thread `<unnamed>` and Write on thread `<unnamed>` at ALLOC
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information

View File

@ -21,5 +21,5 @@ fn main() {
// The fence is useless, since it did not happen-after the `store` in the other thread.
// Hence this is a data race.
// Also see https://github.com/rust-lang/miri/issues/2192.
unsafe { V = 2 } //~ERROR Data race detected
unsafe { V = 2 } //~ERROR Data race detected between Write on thread `main` and Write on thread `<unnamed>`
}

View File

@ -1,8 +1,8 @@
error: Undefined Behavior: Data race detected between Write on Thread(id = 0, name = "main") and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
error: Undefined Behavior: Data race detected between Write on thread `main` and Write on thread `<unnamed>` at ALLOC
--> $DIR/fence_after_load.rs:LL:CC
|
LL | unsafe { V = 2 }
| ^^^^^ Data race detected between Write on Thread(id = 0, name = "main") and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
| ^^^^^ Data race detected between Write on thread `main` and Write on thread `<unnamed>` at ALLOC
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information

View File

@ -18,7 +18,7 @@ pub fn main() {
});
let j2 = spawn(move || {
*c.0 = 64; //~ ERROR Data race detected between Write on Thread(id = 2) and Read on Thread(id = 1)
*c.0 = 64; //~ ERROR Data race detected between Write on thread `<unnamed>` and Read on thread `<unnamed>`
});
j1.join().unwrap();

View File

@ -1,8 +1,8 @@
error: Undefined Behavior: Data race detected between Write on Thread(id = 2) and Read on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
error: Undefined Behavior: Data race detected between Write on thread `<unnamed>` and Read on thread `<unnamed>` at ALLOC
--> $DIR/read_write_race.rs:LL:CC
|
LL | *c.0 = 64;
| ^^^^^^^^^ Data race detected between Write on Thread(id = 2) and Read on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
| ^^^^^^^^^ Data race detected between Write on thread `<unnamed>` and Read on thread `<unnamed>` at ALLOC
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information

View File

@ -43,7 +43,7 @@ pub fn main() {
sleep(Duration::from_millis(200));
stack_var //~ ERROR Data race detected between Read on Thread(id = 1) and Write on Thread(id = 2)
stack_var //~ ERROR Data race detected between Read on thread `<unnamed>` and Write on thread `<unnamed>`
});
let j2 = spawn(move || {

View File

@ -1,8 +1,8 @@
error: Undefined Behavior: Data race detected between Read on Thread(id = 1) and Write on Thread(id = 2) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
error: Undefined Behavior: Data race detected between Read on thread `<unnamed>` and Write on thread `<unnamed>` at ALLOC
--> $DIR/read_write_race_stack.rs:LL:CC
|
LL | stack_var
| ^^^^^^^^^ Data race detected between Read on Thread(id = 1) and Write on Thread(id = 2) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
| ^^^^^^^^^ Data race detected between Read on thread `<unnamed>` and Write on thread `<unnamed>` at ALLOC
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information

View File

@ -38,7 +38,7 @@ pub fn main() {
let j3 = spawn(move || {
if SYNC.load(Ordering::Acquire) == 2 {
*c.0 //~ ERROR Data race detected between Read on Thread(id = 3) and Write on Thread(id = 1)
*c.0 //~ ERROR Data race detected between Read on thread `<unnamed>` and Write on thread `<unnamed>`
} else {
0
}

View File

@ -1,8 +1,8 @@
error: Undefined Behavior: Data race detected between Read on Thread(id = 3) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
error: Undefined Behavior: Data race detected between Read on thread `<unnamed>` and Write on thread `<unnamed>` at ALLOC
--> $DIR/relax_acquire_race.rs:LL:CC
|
LL | *c.0
| ^^^^ Data race detected between Read on Thread(id = 3) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
| ^^^^ Data race detected between Read on thread `<unnamed>` and Write on thread `<unnamed>` at ALLOC
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information

View File

@ -42,7 +42,7 @@ pub fn main() {
let j3 = spawn(move || {
sleep(Duration::from_millis(500));
if SYNC.load(Ordering::Acquire) == 3 {
*c.0 //~ ERROR Data race detected between Read on Thread(id = 3) and Write on Thread(id = 1)
*c.0 //~ ERROR Data race detected between Read on thread `<unnamed>` and Write on thread `<unnamed>`
} else {
0
}

View File

@ -1,8 +1,8 @@
error: Undefined Behavior: Data race detected between Read on Thread(id = 3) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
error: Undefined Behavior: Data race detected between Read on thread `<unnamed>` and Write on thread `<unnamed>` at ALLOC
--> $DIR/release_seq_race.rs:LL:CC
|
LL | *c.0
| ^^^^ Data race detected between Read on Thread(id = 3) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
| ^^^^ Data race detected between Read on thread `<unnamed>` and Write on thread `<unnamed>` at ALLOC
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information

View File

@ -38,7 +38,7 @@ pub fn main() {
let j2 = spawn(move || {
if SYNC.load(Ordering::Acquire) == 2 {
*c.0 //~ ERROR Data race detected between Read on Thread(id = 2) and Write on Thread(id = 1)
*c.0 //~ ERROR Data race detected between Read on thread `<unnamed>` and Write on thread `<unnamed>`
} else {
0
}

View File

@ -1,8 +1,8 @@
error: Undefined Behavior: Data race detected between Read on Thread(id = 2) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
error: Undefined Behavior: Data race detected between Read on thread `<unnamed>` and Write on thread `<unnamed>` at ALLOC
--> $DIR/release_seq_race_same_thread.rs:LL:CC
|
LL | *c.0
| ^^^^ Data race detected between Read on Thread(id = 2) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
| ^^^^ Data race detected between Read on thread `<unnamed>` and Write on thread `<unnamed>` at ALLOC
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information

View File

@ -39,7 +39,7 @@ pub fn main() {
let j3 = spawn(move || {
if SYNC.load(Ordering::Acquire) == 3 {
*c.0 //~ ERROR Data race detected between Read on Thread(id = 3) and Write on Thread(id = 1)
*c.0 //~ ERROR Data race detected between Read on thread `<unnamed>` and Write on thread `<unnamed>`
} else {
0
}

View File

@ -1,8 +1,8 @@
error: Undefined Behavior: Data race detected between Read on Thread(id = 3) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
error: Undefined Behavior: Data race detected between Read on thread `<unnamed>` and Write on thread `<unnamed>` at ALLOC
--> $DIR/rmw_race.rs:LL:CC
|
LL | *c.0
| ^^^^ Data race detected between Read on Thread(id = 3) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
| ^^^^ Data race detected between Read on thread `<unnamed>` and Write on thread `<unnamed>` at ALLOC
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information

View File

@ -18,7 +18,7 @@ pub fn main() {
});
let j2 = spawn(move || {
*c.0 = 64; //~ ERROR Data race detected between Write on Thread(id = 2) and Write on Thread(id = 1)
*c.0 = 64; //~ ERROR Data race detected between Write on thread `<unnamed>` and Write on thread `<unnamed>`
});
j1.join().unwrap();

View File

@ -1,8 +1,8 @@
error: Undefined Behavior: Data race detected between Write on Thread(id = 2) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
error: Undefined Behavior: Data race detected between Write on thread `<unnamed>` and Write on thread `<unnamed>` at ALLOC
--> $DIR/write_write_race.rs:LL:CC
|
LL | *c.0 = 64;
| ^^^^^^^^^ Data race detected between Write on Thread(id = 2) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
| ^^^^^^^^^ Data race detected between Write on thread `<unnamed>` and Write on thread `<unnamed>` at ALLOC
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information

View File

@ -40,7 +40,7 @@ pub fn main() {
sleep(Duration::from_millis(200));
stack_var = 1usize; //~ ERROR Data race detected between Write on Thread(id = 1) and Write on Thread(id = 2)
stack_var = 1usize; //~ ERROR Data race detected between Write on thread `<unnamed>` and Write on thread `<unnamed>`
// read to silence errors
stack_var

View File

@ -1,8 +1,8 @@
error: Undefined Behavior: Data race detected between Write on Thread(id = 1) and Write on Thread(id = 2) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
error: Undefined Behavior: Data race detected between Write on thread `<unnamed>` and Write on thread `<unnamed>` at ALLOC
--> $DIR/write_write_race_stack.rs:LL:CC
|
LL | stack_var = 1usize;
| ^^^^^^^^^^^^^^^^^^ Data race detected between Write on Thread(id = 1) and Write on Thread(id = 2) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock)
| ^^^^^^^^^^^^^^^^^^ Data race detected between Write on thread `<unnamed>` and Write on thread `<unnamed>` at ALLOC
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information

View File

@ -218,7 +218,8 @@ fn test_prctl_thread_name() {
libc::prctl(libc::PR_GET_NAME, buf.as_mut_ptr(), 0 as c_long, 0 as c_long, 0 as c_long),
0,
);
assert_eq!(b"<unnamed>\0", &buf);
// Rust runtime might set thread name, so we allow two options here.
assert!(&buf[..10] == b"<unnamed>\0" || &buf[..5] == b"main\0");
let thread_name = CString::new("hello").expect("CString::new failed");
assert_eq!(
libc::prctl(