Auto merge of #1644 - JCTyblaidd:detect_race_with_alloc, r=RalfJung
More tests, fix issue 1643 and detect races with allocation. Fixes #1643 by disabling race detection for V-Table memory, adds race detection between r/w & memory allocation, and adds more tests. ~~There is one unusual result in dealloc_read_race_stack_drop.rs, where the stack variable is read by thread 0 & thread 2 and so reports a race with thread 0, any ideas for the cause of the read on thread 0?~~ Fixed, bug with reporting the index a read race occured in correctly.
This commit is contained in:
commit
85e56131a1
@ -1 +1 @@
|
||||
a2e29d67c26bdf8f278c98ee02d6cc77a279ed2e
|
||||
12813159a985d87a98578e05cc39200e4e8c2102
|
||||
|
131
src/data_race.rs
131
src/data_race.rs
@ -9,6 +9,9 @@
|
||||
//! Relaxed stores now unconditionally block all currently active release sequences and so per-thread tracking of release
|
||||
//! sequences is not needed.
|
||||
//!
|
||||
//! The implementation also models races with memory allocation and deallocation via treating allocation and
|
||||
//! deallocation as a type of write internally for detecting data-races.
|
||||
//!
|
||||
//! This does not explore weak memory orders and so can still miss data-races
|
||||
//! but should not report false-positives
|
||||
//!
|
||||
@ -73,7 +76,7 @@
|
||||
use crate::{
|
||||
ImmTy, Immediate, InterpResult, MPlaceTy, MemPlaceMeta, MiriEvalContext, MiriEvalContextExt,
|
||||
OpTy, Pointer, RangeMap, ScalarMaybeUninit, Tag, ThreadId, VClock, VTimestamp,
|
||||
VectorIdx,
|
||||
VectorIdx, MemoryKind, MiriMemoryKind
|
||||
};
|
||||
|
||||
pub type AllocExtra = VClockAlloc;
|
||||
@ -192,6 +195,34 @@ struct AtomicMemoryCellClocks {
|
||||
sync_vector: VClock,
|
||||
}
|
||||
|
||||
/// Type of write operation: allocating memory
|
||||
/// non-atomic writes and deallocating memory
|
||||
/// are all treated as writes for the purpose
|
||||
/// of the data-race detector.
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
|
||||
enum WriteType {
|
||||
/// Allocate memory.
|
||||
Allocate,
|
||||
|
||||
/// Standard unsynchronized write.
|
||||
Write,
|
||||
|
||||
/// Deallocate memory.
|
||||
/// Note that when memory is deallocated first, later non-atomic accesses
|
||||
/// will be reported as use-after-free, not as data races.
|
||||
/// (Same for `Allocate` above.)
|
||||
Deallocate,
|
||||
}
|
||||
impl WriteType {
|
||||
fn get_descriptor(self) -> &'static str {
|
||||
match self {
|
||||
WriteType::Allocate => "Allocate",
|
||||
WriteType::Write => "Write",
|
||||
WriteType::Deallocate => "Deallocate",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Memory Cell vector clock metadata
|
||||
/// for data-race detection.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
@ -204,6 +235,11 @@ struct MemoryCellClocks {
|
||||
/// that performed the last write operation.
|
||||
write_index: VectorIdx,
|
||||
|
||||
/// The type of operation that the write index represents,
|
||||
/// either newly allocated memory, a non-atomic write or
|
||||
/// a deallocation of memory.
|
||||
write_type: WriteType,
|
||||
|
||||
/// The vector-clock of the timestamp of the last read operation
|
||||
/// performed by a thread since the last write operation occurred.
|
||||
/// It is reset to zero on each write operation.
|
||||
@ -215,20 +251,18 @@ struct MemoryCellClocks {
|
||||
atomic_ops: Option<Box<AtomicMemoryCellClocks>>,
|
||||
}
|
||||
|
||||
/// Create a default memory cell clocks instance
|
||||
/// for uninitialized memory.
|
||||
impl Default for MemoryCellClocks {
|
||||
fn default() -> Self {
|
||||
impl MemoryCellClocks {
|
||||
/// Create a new set of clocks representing memory allocated
|
||||
/// at a given vector timestamp and index.
|
||||
fn new(alloc: VTimestamp, alloc_index: VectorIdx) -> Self {
|
||||
MemoryCellClocks {
|
||||
read: VClock::default(),
|
||||
write: 0,
|
||||
write_index: VectorIdx::MAX_INDEX,
|
||||
write: alloc,
|
||||
write_index: alloc_index,
|
||||
write_type: WriteType::Allocate,
|
||||
atomic_ops: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MemoryCellClocks {
|
||||
|
||||
/// Load the internal atomic memory cells if they exist.
|
||||
#[inline]
|
||||
@ -382,6 +416,7 @@ fn write_race_detect(
|
||||
&mut self,
|
||||
clocks: &ThreadClockSet,
|
||||
index: VectorIdx,
|
||||
write_type: WriteType,
|
||||
) -> Result<(), DataRace> {
|
||||
log::trace!("Unsynchronized write with vectors: {:#?} :: {:#?}", self, clocks);
|
||||
if self.write <= clocks.clock[self.write_index] && self.read <= clocks.clock {
|
||||
@ -393,6 +428,7 @@ fn write_race_detect(
|
||||
if race_free {
|
||||
self.write = clocks.clock[index];
|
||||
self.write_index = index;
|
||||
self.write_type = write_type;
|
||||
self.read.set_zero_vector();
|
||||
Ok(())
|
||||
} else {
|
||||
@ -638,6 +674,21 @@ fn validate_atomic_fence(&mut self, atomic: AtomicFenceOp) -> InterpResult<'tcx>
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn reset_vector_clocks(
|
||||
&mut self,
|
||||
ptr: Pointer<Tag>,
|
||||
size: Size
|
||||
) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
if let Some(data_race) = &mut this.memory.extra.data_race {
|
||||
if data_race.multi_threaded.get() {
|
||||
let alloc_meta = this.memory.get_raw_mut(ptr.alloc_id)?.extra.data_race.as_mut().unwrap();
|
||||
alloc_meta.reset_clocks(ptr.offset, size);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Vector clock metadata for a logical memory allocation.
|
||||
@ -646,22 +697,50 @@ pub struct VClockAlloc {
|
||||
/// Assigning each byte a MemoryCellClocks.
|
||||
alloc_ranges: RefCell<RangeMap<MemoryCellClocks>>,
|
||||
|
||||
// Pointer to global state.
|
||||
/// Pointer to global state.
|
||||
global: MemoryExtra,
|
||||
}
|
||||
|
||||
impl VClockAlloc {
|
||||
/// Create a new data-race allocation detector.
|
||||
pub fn new_allocation(global: &MemoryExtra, len: Size) -> VClockAlloc {
|
||||
/// Create a new data-race detector for newly allocated memory.
|
||||
pub fn new_allocation(global: &MemoryExtra, len: Size, kind: MemoryKind<MiriMemoryKind>) -> VClockAlloc {
|
||||
let (alloc_timestamp, alloc_index) = match kind {
|
||||
// User allocated and stack memory should track allocation.
|
||||
MemoryKind::Machine(
|
||||
MiriMemoryKind::Rust | MiriMemoryKind::C | MiriMemoryKind::WinHeap
|
||||
) | MemoryKind::Stack => {
|
||||
let (alloc_index, clocks) = global.current_thread_state();
|
||||
let alloc_timestamp = clocks.clock[alloc_index];
|
||||
(alloc_timestamp, alloc_index)
|
||||
}
|
||||
// Other global memory should trace races but be allocated at the 0 timestamp.
|
||||
MemoryKind::Machine(
|
||||
MiriMemoryKind::Global | MiriMemoryKind::Machine | MiriMemoryKind::Env |
|
||||
MiriMemoryKind::ExternStatic | MiriMemoryKind::Tls
|
||||
) | MemoryKind::CallerLocation | MemoryKind::Vtable => {
|
||||
(0, VectorIdx::MAX_INDEX)
|
||||
}
|
||||
};
|
||||
VClockAlloc {
|
||||
global: Rc::clone(global),
|
||||
alloc_ranges: RefCell::new(RangeMap::new(len, MemoryCellClocks::default())),
|
||||
alloc_ranges: RefCell::new(RangeMap::new(
|
||||
len, MemoryCellClocks::new(alloc_timestamp, alloc_index)
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn reset_clocks(&mut self, offset: Size, len: Size) {
|
||||
let mut alloc_ranges = self.alloc_ranges.borrow_mut();
|
||||
for (_, range) in alloc_ranges.iter_mut(offset, len) {
|
||||
// Reset the portion of the range
|
||||
*range = MemoryCellClocks::new(0, VectorIdx::MAX_INDEX);
|
||||
}
|
||||
}
|
||||
|
||||
// Find an index, if one exists where the value
|
||||
// in `l` is greater than the value in `r`.
|
||||
fn find_gt_index(l: &VClock, r: &VClock) -> Option<VectorIdx> {
|
||||
log::trace!("Find index where not {:?} <= {:?}", l, r);
|
||||
let l_slice = l.as_slice();
|
||||
let r_slice = r.as_slice();
|
||||
l_slice
|
||||
@ -681,7 +760,7 @@ fn find_gt_index(l: &VClock, r: &VClock) -> Option<VectorIdx> {
|
||||
.enumerate()
|
||||
.find_map(|(idx, &r)| if r == 0 { None } else { Some(idx) })
|
||||
.expect("Invalid VClock Invariant");
|
||||
Some(idx)
|
||||
Some(idx + r_slice.len())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
@ -712,18 +791,18 @@ fn report_data_race<'tcx>(
|
||||
// Convert the write action into the vector clock it
|
||||
// represents for diagnostic purposes.
|
||||
write_clock = VClock::new_with_index(range.write_index, range.write);
|
||||
("WRITE", range.write_index, &write_clock)
|
||||
(range.write_type.get_descriptor(), range.write_index, &write_clock)
|
||||
} else if let Some(idx) = Self::find_gt_index(&range.read, ¤t_clocks.clock) {
|
||||
("READ", idx, &range.read)
|
||||
("Read", idx, &range.read)
|
||||
} else if !is_atomic {
|
||||
if let Some(atomic) = range.atomic() {
|
||||
if let Some(idx) = Self::find_gt_index(&atomic.write_vector, ¤t_clocks.clock)
|
||||
{
|
||||
("ATOMIC_STORE", idx, &atomic.write_vector)
|
||||
("Atomic Store", idx, &atomic.write_vector)
|
||||
} else if let Some(idx) =
|
||||
Self::find_gt_index(&atomic.read_vector, ¤t_clocks.clock)
|
||||
{
|
||||
("ATOMIC_LOAD", idx, &atomic.read_vector)
|
||||
("Atomic Load", idx, &atomic.read_vector)
|
||||
} else {
|
||||
unreachable!(
|
||||
"Failed to report data-race for non-atomic operation: no race found"
|
||||
@ -774,7 +853,7 @@ pub fn read<'tcx>(&self, pointer: Pointer<Tag>, len: Size) -> InterpResult<'tcx>
|
||||
return Self::report_data_race(
|
||||
&self.global,
|
||||
range,
|
||||
"READ",
|
||||
"Read",
|
||||
false,
|
||||
pointer,
|
||||
len,
|
||||
@ -792,17 +871,17 @@ fn unique_access<'tcx>(
|
||||
&mut self,
|
||||
pointer: Pointer<Tag>,
|
||||
len: Size,
|
||||
action: &str,
|
||||
write_type: WriteType,
|
||||
) -> InterpResult<'tcx> {
|
||||
if self.global.multi_threaded.get() {
|
||||
let (index, clocks) = self.global.current_thread_state();
|
||||
for (_, range) in self.alloc_ranges.get_mut().iter_mut(pointer.offset, len) {
|
||||
if let Err(DataRace) = range.write_race_detect(&*clocks, index) {
|
||||
if let Err(DataRace) = range.write_race_detect(&*clocks, index, write_type) {
|
||||
// Report data-race
|
||||
return Self::report_data_race(
|
||||
&self.global,
|
||||
range,
|
||||
action,
|
||||
write_type.get_descriptor(),
|
||||
false,
|
||||
pointer,
|
||||
len,
|
||||
@ -820,7 +899,7 @@ fn unique_access<'tcx>(
|
||||
/// being created or if it is temporarily disabled during a racy read or write
|
||||
/// operation
|
||||
pub fn write<'tcx>(&mut self, pointer: Pointer<Tag>, len: Size) -> InterpResult<'tcx> {
|
||||
self.unique_access(pointer, len, "Write")
|
||||
self.unique_access(pointer, len, WriteType::Write)
|
||||
}
|
||||
|
||||
/// Detect data-races for an unsynchronized deallocate operation, will not perform
|
||||
@ -828,7 +907,7 @@ pub fn write<'tcx>(&mut self, pointer: Pointer<Tag>, len: Size) -> InterpResult<
|
||||
/// being created or if it is temporarily disabled during a racy read or write
|
||||
/// operation
|
||||
pub fn deallocate<'tcx>(&mut self, pointer: Pointer<Tag>, len: Size) -> InterpResult<'tcx> {
|
||||
self.unique_access(pointer, len, "Deallocate")
|
||||
self.unique_access(pointer, len, WriteType::Deallocate)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1134,6 +1213,8 @@ pub fn thread_created(&self, thread: ThreadId) {
|
||||
vector_info.push(thread)
|
||||
};
|
||||
|
||||
log::trace!("Creating thread = {:?} with vector index = {:?}", thread, created_index);
|
||||
|
||||
// Mark the chosen vector index as in use by the thread.
|
||||
thread_info[thread].vector_index = Some(created_index);
|
||||
|
||||
|
@ -478,7 +478,7 @@ fn init_allocation_extra<'b>(
|
||||
(None, Tag::Untagged)
|
||||
};
|
||||
let race_alloc = if let Some(data_race) = &memory_extra.data_race {
|
||||
Some(data_race::AllocExtra::new_allocation(&data_race, alloc.size))
|
||||
Some(data_race::AllocExtra::new_allocation(&data_race, alloc.size, kind))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
@ -510,6 +510,18 @@ fn before_deallocation(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
fn after_static_mem_initialized(
|
||||
ecx: &mut InterpCx<'mir, 'tcx, Self>,
|
||||
ptr: Pointer<Self::PointerTag>,
|
||||
size: Size,
|
||||
) -> InterpResult<'tcx> {
|
||||
if ecx.memory.extra.data_race.is_some() {
|
||||
ecx.reset_vector_clocks(ptr, size)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn tag_global_base_pointer(memory_extra: &MemoryExtra, id: AllocId) -> Self::PointerTag {
|
||||
if let Some(stacked_borrows) = &memory_extra.stacked_borrows {
|
||||
|
48
tests/compile-fail/data_race/alloc_read_race.rs
Normal file
48
tests/compile-fail/data_race/alloc_read_race.rs
Normal file
@ -0,0 +1,48 @@
|
||||
// ignore-windows: Concurrency on Windows is not supported yet.
|
||||
|
||||
use std::thread::spawn;
|
||||
use std::ptr::null_mut;
|
||||
use std::sync::atomic::{Ordering, AtomicPtr};
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
struct EvilSend<T>(pub T);
|
||||
|
||||
unsafe impl<T> Send for EvilSend<T> {}
|
||||
unsafe impl<T> Sync for EvilSend<T> {}
|
||||
|
||||
pub fn main() {
|
||||
// Shared atomic pointer
|
||||
let pointer = AtomicPtr::new(null_mut::<MaybeUninit<usize>>());
|
||||
let ptr = EvilSend(&pointer as *const AtomicPtr<MaybeUninit<usize>>);
|
||||
|
||||
// Note: this is scheduler-dependent
|
||||
// the operations need to occur in
|
||||
// order, otherwise the allocation is
|
||||
// not visible to the other-thread to
|
||||
// detect the race:
|
||||
// 1. alloc
|
||||
// 2. write
|
||||
unsafe {
|
||||
let j1 = spawn(move || {
|
||||
// Concurrent allocate the memory.
|
||||
// Uses relaxed semantics to not generate
|
||||
// a release sequence.
|
||||
let pointer = &*ptr.0;
|
||||
pointer.store(Box::into_raw(Box::new(MaybeUninit::uninit())), Ordering::Relaxed);
|
||||
});
|
||||
|
||||
let j2 = spawn(move || {
|
||||
let pointer = &*ptr.0;
|
||||
|
||||
// Note: could also error due to reading uninitialized memory, but the data-race detector triggers first.
|
||||
*pointer.load(Ordering::Relaxed) //~ ERROR Data race detected between Read on Thread(id = 2) and Allocate on Thread(id = 1)
|
||||
});
|
||||
|
||||
j1.join().unwrap();
|
||||
j2.join().unwrap();
|
||||
|
||||
// Clean up memory, will never be executed
|
||||
drop(Box::from_raw(pointer.load(Ordering::Relaxed)));
|
||||
}
|
||||
}
|
50
tests/compile-fail/data_race/alloc_write_race.rs
Normal file
50
tests/compile-fail/data_race/alloc_write_race.rs
Normal file
@ -0,0 +1,50 @@
|
||||
// ignore-windows: Concurrency on Windows is not supported yet.
|
||||
|
||||
use std::thread::spawn;
|
||||
use std::ptr::null_mut;
|
||||
use std::sync::atomic::{Ordering, AtomicPtr};
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
struct EvilSend<T>(pub T);
|
||||
|
||||
unsafe impl<T> Send for EvilSend<T> {}
|
||||
unsafe impl<T> Sync for EvilSend<T> {}
|
||||
|
||||
extern "C" {
|
||||
fn malloc(size: usize) -> *mut u8;
|
||||
fn free(ptr: *mut u8);
|
||||
}
|
||||
|
||||
pub fn main() {
|
||||
// Shared atomic pointer
|
||||
let pointer = AtomicPtr::new(null_mut::<usize>());
|
||||
let ptr = EvilSend(&pointer as *const AtomicPtr<usize>);
|
||||
|
||||
// Note: this is scheduler-dependent
|
||||
// the operations need to occur in
|
||||
// order, otherwise the allocation is
|
||||
// not visible to the other-thread to
|
||||
// detect the race:
|
||||
// 1. alloc
|
||||
// 2. write
|
||||
unsafe {
|
||||
let j1 = spawn(move || {
|
||||
// Concurrent allocate the memory.
|
||||
// Uses relaxed semantics to not generate
|
||||
// a release sequence.
|
||||
let pointer = &*ptr.0;
|
||||
pointer.store(malloc(std::mem::size_of::<usize>()) as *mut usize, Ordering::Relaxed);
|
||||
});
|
||||
|
||||
let j2 = spawn(move || {
|
||||
let pointer = &*ptr.0;
|
||||
*pointer.load(Ordering::Relaxed) = 2; //~ ERROR Data race detected between Write on Thread(id = 2) and Allocate on Thread(id = 1)
|
||||
});
|
||||
|
||||
j1.join().unwrap();
|
||||
j2.join().unwrap();
|
||||
|
||||
// Clean up memory, will never be executed
|
||||
free(pointer.load(Ordering::Relaxed) as *mut _);
|
||||
}
|
||||
}
|
@ -22,7 +22,7 @@ pub fn main() {
|
||||
|
||||
let j2 = spawn(move || {
|
||||
//Equivalent to: (&*c.0).load(Ordering::SeqCst)
|
||||
atomic_load(c.0 as *mut usize) //~ ERROR Data race
|
||||
atomic_load(c.0 as *mut usize) //~ ERROR Data race detected between Atomic Load on Thread(id = 2) and Write on Thread(id = 1)
|
||||
});
|
||||
|
||||
j1.join().unwrap();
|
||||
|
@ -22,7 +22,7 @@ pub fn main() {
|
||||
|
||||
let j2 = spawn(move || {
|
||||
let atomic_ref = &mut *c.0;
|
||||
*atomic_ref.get_mut() = 32; //~ ERROR Data race
|
||||
*atomic_ref.get_mut() = 32; //~ ERROR Data race detected between Write on Thread(id = 2) and Atomic Load on Thread(id = 1)
|
||||
});
|
||||
|
||||
j1.join().unwrap();
|
||||
|
@ -22,7 +22,7 @@ pub fn main() {
|
||||
|
||||
let j2 = spawn(move || {
|
||||
let atomic_ref = &mut *c.0;
|
||||
*atomic_ref.get_mut() //~ ERROR Data race
|
||||
*atomic_ref.get_mut() //~ ERROR Data race detected between Read on Thread(id = 2) and Atomic Store on Thread(id = 1)
|
||||
});
|
||||
|
||||
j1.join().unwrap();
|
||||
|
@ -22,7 +22,7 @@ pub fn main() {
|
||||
|
||||
let j2 = spawn(move || {
|
||||
//Equivalent to: (&*c.0).store(32, Ordering::SeqCst)
|
||||
atomic_store(c.0 as *mut usize, 32); //~ ERROR Data race
|
||||
atomic_store(c.0 as *mut usize, 32); //~ ERROR Data race detected between Atomic Store on Thread(id = 2) and Read on Thread(id = 1)
|
||||
});
|
||||
|
||||
j1.join().unwrap();
|
||||
|
@ -22,7 +22,7 @@ pub fn main() {
|
||||
|
||||
let j2 = spawn(move || {
|
||||
//Equivalent to: (&*c.0).store(64, Ordering::SeqCst)
|
||||
atomic_store(c.0 as *mut usize, 64); //~ ERROR Data race
|
||||
atomic_store(c.0 as *mut usize, 64); //~ ERROR Data race detected between Atomic Store on Thread(id = 2) and Write on Thread(id = 1)
|
||||
});
|
||||
|
||||
j1.join().unwrap();
|
||||
|
@ -22,7 +22,7 @@ pub fn main() {
|
||||
|
||||
let j2 = spawn(move || {
|
||||
let atomic_ref = &mut *c.0;
|
||||
*atomic_ref.get_mut() = 32; //~ ERROR Data race
|
||||
*atomic_ref.get_mut() = 32; //~ ERROR Data race detected between Write on Thread(id = 2) and Atomic Store on Thread(id = 1)
|
||||
});
|
||||
|
||||
j1.join().unwrap();
|
||||
|
@ -24,9 +24,9 @@ fn main() {
|
||||
})
|
||||
};
|
||||
|
||||
// Detatch the thread and sleep until it terminates
|
||||
// Detach the thread and sleep until it terminates
|
||||
mem::drop(join);
|
||||
sleep(Duration::from_millis(100));
|
||||
sleep(Duration::from_millis(200));
|
||||
|
||||
// Spawn and immediately join a thread
|
||||
// to execute the join code-path
|
||||
@ -36,7 +36,7 @@ fn main() {
|
||||
|
||||
let join2 = unsafe {
|
||||
spawn(move || {
|
||||
*c.0 = 64; //~ ERROR Data race
|
||||
*c.0 = 64; //~ ERROR Data race detected between Write on Thread(id = 3) and Write on Thread(id = 1)
|
||||
})
|
||||
};
|
||||
|
||||
|
@ -24,9 +24,9 @@ fn main() {
|
||||
})
|
||||
};
|
||||
|
||||
// Detatch the thread and sleep until it terminates
|
||||
// Detach the thread and sleep until it terminates
|
||||
mem::drop(join);
|
||||
sleep(Duration::from_millis(100));
|
||||
sleep(Duration::from_millis(200));
|
||||
|
||||
// Spawn and immediately join a thread
|
||||
// to execute the join code-path
|
||||
@ -36,6 +36,6 @@ fn main() {
|
||||
|
||||
|
||||
unsafe {
|
||||
*c.0 = 64; //~ ERROR Data race
|
||||
*c.0 = 64; //~ ERROR Data race detected between Write on Thread(id = 0, name = "main") and Write on Thread(id = 1)
|
||||
}
|
||||
}
|
||||
|
32
tests/compile-fail/data_race/dealloc_read_race1.rs
Normal file
32
tests/compile-fail/data_race/dealloc_read_race1.rs
Normal file
@ -0,0 +1,32 @@
|
||||
// ignore-windows: Concurrency on Windows is not supported yet.
|
||||
|
||||
use std::thread::spawn;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
struct EvilSend<T>(pub T);
|
||||
|
||||
unsafe impl<T> Send for EvilSend<T> {}
|
||||
unsafe impl<T> Sync for EvilSend<T> {}
|
||||
|
||||
extern "Rust" {
|
||||
fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize);
|
||||
}
|
||||
|
||||
pub fn main() {
|
||||
// Shared atomic pointer
|
||||
let pointer: *mut usize = Box::into_raw(Box::new(0usize));
|
||||
let ptr = EvilSend(pointer);
|
||||
|
||||
unsafe {
|
||||
let j1 = spawn(move || {
|
||||
*ptr.0
|
||||
});
|
||||
|
||||
let j2 = spawn(move || {
|
||||
__rust_dealloc(ptr.0 as *mut _, std::mem::size_of::<usize>(), std::mem::align_of::<usize>()); //~ ERROR Data race detected between Deallocate on Thread(id = 2) and Read on Thread(id = 1)
|
||||
});
|
||||
|
||||
j1.join().unwrap();
|
||||
j2.join().unwrap();
|
||||
}
|
||||
}
|
34
tests/compile-fail/data_race/dealloc_read_race2.rs
Normal file
34
tests/compile-fail/data_race/dealloc_read_race2.rs
Normal file
@ -0,0 +1,34 @@
|
||||
// ignore-windows: Concurrency on Windows is not supported yet.
|
||||
|
||||
use std::thread::spawn;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
struct EvilSend<T>(pub T);
|
||||
|
||||
unsafe impl<T> Send for EvilSend<T> {}
|
||||
unsafe impl<T> Sync for EvilSend<T> {}
|
||||
|
||||
extern "Rust" {
|
||||
fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize);
|
||||
}
|
||||
|
||||
pub fn main() {
|
||||
// Shared atomic pointer
|
||||
let pointer: *mut usize = Box::into_raw(Box::new(0usize));
|
||||
let ptr = EvilSend(pointer);
|
||||
|
||||
unsafe {
|
||||
let j1 = spawn(move || {
|
||||
__rust_dealloc(ptr.0 as *mut _, std::mem::size_of::<usize>(), std::mem::align_of::<usize>())
|
||||
});
|
||||
|
||||
let j2 = spawn(move || {
|
||||
// Also an error of the form: Data race detected between Read on Thread(id = 2) and Deallocate on Thread(id = 1)
|
||||
// but the invalid allocation is detected first.
|
||||
*ptr.0 //~ ERROR dereferenced after this allocation got freed
|
||||
});
|
||||
|
||||
j1.join().unwrap();
|
||||
j2.join().unwrap();
|
||||
}
|
||||
}
|
52
tests/compile-fail/data_race/dealloc_read_race_stack.rs
Normal file
52
tests/compile-fail/data_race/dealloc_read_race_stack.rs
Normal file
@ -0,0 +1,52 @@
|
||||
// ignore-windows: Concurrency on Windows is not supported yet.
|
||||
// compile-flags: -Zmiri-disable-isolation
|
||||
|
||||
use std::thread::{spawn, sleep};
|
||||
use std::ptr::null_mut;
|
||||
use std::sync::atomic::{Ordering, AtomicPtr};
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
struct EvilSend<T>(pub T);
|
||||
|
||||
unsafe impl<T> Send for EvilSend<T> {}
|
||||
unsafe impl<T> Sync for EvilSend<T> {}
|
||||
|
||||
pub fn main() {
|
||||
// Shared atomic pointer
|
||||
let pointer = AtomicPtr::new(null_mut::<usize>());
|
||||
let ptr = EvilSend(&pointer as *const AtomicPtr<usize>);
|
||||
|
||||
// Note: this is scheduler-dependent
|
||||
// the operations need to occur in
|
||||
// order, otherwise the allocation is
|
||||
// not visible to the other-thread to
|
||||
// detect the race:
|
||||
// 1. stack-allocate
|
||||
// 2. read
|
||||
// 3. stack-deallocate
|
||||
unsafe {
|
||||
let j1 = spawn(move || {
|
||||
// Concurrent allocate the memory.
|
||||
// Uses relaxed semantics to not generate
|
||||
// a release sequence.
|
||||
let pointer = &*ptr.0;
|
||||
{
|
||||
let mut stack_var = 0usize;
|
||||
|
||||
pointer.store(&mut stack_var as *mut _, Ordering::Release);
|
||||
|
||||
sleep(Duration::from_millis(200));
|
||||
|
||||
} //~ ERROR Data race detected between Deallocate on Thread(id = 1) and Read on Thread(id = 2)
|
||||
});
|
||||
|
||||
let j2 = spawn(move || {
|
||||
let pointer = &*ptr.0;
|
||||
*pointer.load(Ordering::Acquire)
|
||||
});
|
||||
|
||||
j1.join().unwrap();
|
||||
j2.join().unwrap();
|
||||
}
|
||||
}
|
52
tests/compile-fail/data_race/dealloc_read_race_stack_drop.rs
Normal file
52
tests/compile-fail/data_race/dealloc_read_race_stack_drop.rs
Normal file
@ -0,0 +1,52 @@
|
||||
// ignore-windows: Concurrency on Windows is not supported yet.
|
||||
// compile-flags: -Zmiri-disable-isolation
|
||||
|
||||
use std::thread::{spawn, sleep};
|
||||
use std::ptr::null_mut;
|
||||
use std::sync::atomic::{Ordering, AtomicPtr};
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
struct EvilSend<T>(pub T);
|
||||
|
||||
unsafe impl<T> Send for EvilSend<T> {}
|
||||
unsafe impl<T> Sync for EvilSend<T> {}
|
||||
|
||||
pub fn main() {
|
||||
// Shared atomic pointer
|
||||
let pointer = AtomicPtr::new(null_mut::<usize>());
|
||||
let ptr = EvilSend(&pointer as *const AtomicPtr<usize>);
|
||||
|
||||
// Note: this is scheduler-dependent
|
||||
// the operations need to occur in
|
||||
// order, otherwise the allocation is
|
||||
// not visible to the other-thread to
|
||||
// detect the race:
|
||||
// 1. stack-allocate
|
||||
// 2. read
|
||||
// 3. stack-deallocate
|
||||
unsafe {
|
||||
let j1 = spawn(move || {
|
||||
// Concurrent allocate the memory.
|
||||
// Uses relaxed semantics to not generate
|
||||
// a release sequence.
|
||||
let pointer = &*ptr.0;
|
||||
|
||||
let mut stack_var = 0usize;
|
||||
|
||||
pointer.store(&mut stack_var as *mut _, Ordering::Release);
|
||||
|
||||
sleep(Duration::from_millis(200));
|
||||
|
||||
drop(stack_var);
|
||||
}); //~ ERROR Data race detected between Deallocate on Thread(id = 1) and Read on Thread(id = 2)
|
||||
|
||||
let j2 = spawn(move || {
|
||||
let pointer = &*ptr.0;
|
||||
*pointer.load(Ordering::Acquire)
|
||||
});
|
||||
|
||||
j1.join().unwrap();
|
||||
j2.join().unwrap();
|
||||
}
|
||||
}
|
31
tests/compile-fail/data_race/dealloc_write_race1.rs
Normal file
31
tests/compile-fail/data_race/dealloc_write_race1.rs
Normal file
@ -0,0 +1,31 @@
|
||||
// ignore-windows: Concurrency on Windows is not supported yet.
|
||||
|
||||
use std::thread::spawn;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
struct EvilSend<T>(pub T);
|
||||
|
||||
unsafe impl<T> Send for EvilSend<T> {}
|
||||
unsafe impl<T> Sync for EvilSend<T> {}
|
||||
|
||||
extern "Rust" {
|
||||
fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize);
|
||||
}
|
||||
pub fn main() {
|
||||
// Shared atomic pointer
|
||||
let pointer: *mut usize = Box::into_raw(Box::new(0usize));
|
||||
let ptr = EvilSend(pointer);
|
||||
|
||||
unsafe {
|
||||
let j1 = spawn(move || {
|
||||
*ptr.0 = 2;
|
||||
});
|
||||
|
||||
let j2 = spawn(move || {
|
||||
__rust_dealloc(ptr.0 as *mut _, std::mem::size_of::<usize>(), std::mem::align_of::<usize>()); //~ ERROR Data race detected between Deallocate on Thread(id = 2) and Write on Thread(id = 1)
|
||||
});
|
||||
|
||||
j1.join().unwrap();
|
||||
j2.join().unwrap();
|
||||
}
|
||||
}
|
33
tests/compile-fail/data_race/dealloc_write_race2.rs
Normal file
33
tests/compile-fail/data_race/dealloc_write_race2.rs
Normal file
@ -0,0 +1,33 @@
|
||||
// ignore-windows: Concurrency on Windows is not supported yet.
|
||||
|
||||
use std::thread::spawn;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
struct EvilSend<T>(pub T);
|
||||
|
||||
unsafe impl<T> Send for EvilSend<T> {}
|
||||
unsafe impl<T> Sync for EvilSend<T> {}
|
||||
|
||||
extern "Rust" {
|
||||
fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize);
|
||||
}
|
||||
pub fn main() {
|
||||
// Shared atomic pointer
|
||||
let pointer: *mut usize = Box::into_raw(Box::new(0usize));
|
||||
let ptr = EvilSend(pointer);
|
||||
|
||||
unsafe {
|
||||
let j1 = spawn(move || {
|
||||
__rust_dealloc(ptr.0 as *mut _, std::mem::size_of::<usize>(), std::mem::align_of::<usize>());
|
||||
});
|
||||
|
||||
let j2 = spawn(move || {
|
||||
// Also an error of the form: Data race detected between Write on Thread(id = 2) and Deallocate on Thread(id = 1)
|
||||
// but the invalid allocation is detected first.
|
||||
*ptr.0 = 2; //~ ERROR dereferenced after this allocation got freed
|
||||
});
|
||||
|
||||
j1.join().unwrap();
|
||||
j2.join().unwrap();
|
||||
}
|
||||
}
|
52
tests/compile-fail/data_race/dealloc_write_race_stack.rs
Normal file
52
tests/compile-fail/data_race/dealloc_write_race_stack.rs
Normal file
@ -0,0 +1,52 @@
|
||||
// ignore-windows: Concurrency on Windows is not supported yet.
|
||||
// compile-flags: -Zmiri-disable-isolation
|
||||
|
||||
use std::thread::{spawn, sleep};
|
||||
use std::ptr::null_mut;
|
||||
use std::sync::atomic::{Ordering, AtomicPtr};
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
struct EvilSend<T>(pub T);
|
||||
|
||||
unsafe impl<T> Send for EvilSend<T> {}
|
||||
unsafe impl<T> Sync for EvilSend<T> {}
|
||||
|
||||
pub fn main() {
|
||||
// Shared atomic pointer
|
||||
let pointer = AtomicPtr::new(null_mut::<usize>());
|
||||
let ptr = EvilSend(&pointer as *const AtomicPtr<usize>);
|
||||
|
||||
// Note: this is scheduler-dependent
|
||||
// the operations need to occur in
|
||||
// order, otherwise the allocation is
|
||||
// not visible to the other-thread to
|
||||
// detect the race:
|
||||
// 1. stack-allocate
|
||||
// 2. read
|
||||
// 3. stack-deallocate
|
||||
unsafe {
|
||||
let j1 = spawn(move || {
|
||||
// Concurrent allocate the memory.
|
||||
// Uses relaxed semantics to not generate
|
||||
// a release sequence.
|
||||
let pointer = &*ptr.0;
|
||||
{
|
||||
let mut stack_var = 0usize;
|
||||
|
||||
pointer.store(&mut stack_var as *mut _, Ordering::Release);
|
||||
|
||||
sleep(Duration::from_millis(200));
|
||||
|
||||
} //~ ERROR Data race detected between Deallocate on Thread(id = 1) and Write on Thread(id = 2)
|
||||
});
|
||||
|
||||
let j2 = spawn(move || {
|
||||
let pointer = &*ptr.0;
|
||||
*pointer.load(Ordering::Acquire) = 3;
|
||||
});
|
||||
|
||||
j1.join().unwrap();
|
||||
j2.join().unwrap();
|
||||
}
|
||||
}
|
@ -0,0 +1,53 @@
|
||||
// ignore-windows: Concurrency on Windows is not supported yet.
|
||||
// compile-flags: -Zmiri-disable-isolation
|
||||
|
||||
use std::thread::{spawn, sleep};
|
||||
use std::ptr::null_mut;
|
||||
use std::sync::atomic::{Ordering, AtomicPtr};
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
struct EvilSend<T>(pub T);
|
||||
|
||||
unsafe impl<T> Send for EvilSend<T> {}
|
||||
unsafe impl<T> Sync for EvilSend<T> {}
|
||||
|
||||
pub fn main() {
|
||||
// Shared atomic pointer
|
||||
let pointer = AtomicPtr::new(null_mut::<usize>());
|
||||
let ptr = EvilSend(&pointer as *const AtomicPtr<usize>);
|
||||
|
||||
// Note: this is scheduler-dependent
|
||||
// the operations need to occur in
|
||||
// order, otherwise the allocation is
|
||||
// not visible to the other-thread to
|
||||
// detect the race:
|
||||
// 1. stack-allocate
|
||||
// 2. read
|
||||
// 3. stack-deallocate
|
||||
unsafe {
|
||||
let j1 = spawn(move || {
|
||||
// Concurrent allocate the memory.
|
||||
// Uses relaxed semantics to not generate
|
||||
// a release sequence.
|
||||
let pointer = &*ptr.0;
|
||||
|
||||
let mut stack_var = 0usize;
|
||||
|
||||
pointer.store(&mut stack_var as *mut _, Ordering::Release);
|
||||
|
||||
sleep(Duration::from_millis(200));
|
||||
|
||||
// Note: Implicit read for drop(_) races with write, would detect race with deallocate after.
|
||||
drop(stack_var); //~ ERROR Data race detected between Read on Thread(id = 1) and Write on Thread(id = 2)
|
||||
});
|
||||
|
||||
let j2 = spawn(move || {
|
||||
let pointer = &*ptr.0;
|
||||
*pointer.load(Ordering::Acquire) = 3;
|
||||
});
|
||||
|
||||
j1.join().unwrap();
|
||||
j2.join().unwrap();
|
||||
}
|
||||
}
|
@ -29,7 +29,7 @@ pub fn main() {
|
||||
});
|
||||
|
||||
let j2 = spawn(move || {
|
||||
*c.0 = 64; //~ ERROR Data race
|
||||
*c.0 = 64; //~ ERROR Data race detected between Write on Thread(id = 6) and Write on Thread(id = 5)
|
||||
});
|
||||
|
||||
j1.join().unwrap();
|
||||
|
@ -18,7 +18,7 @@ pub fn main() {
|
||||
});
|
||||
|
||||
let j2 = spawn(move || {
|
||||
*c.0 = 64; //~ ERROR Data race
|
||||
*c.0 = 64; //~ ERROR Data race detected between Write on Thread(id = 2) and Read on Thread(id = 1)
|
||||
});
|
||||
|
||||
j1.join().unwrap();
|
||||
|
57
tests/compile-fail/data_race/read_write_race_stack.rs
Normal file
57
tests/compile-fail/data_race/read_write_race_stack.rs
Normal file
@ -0,0 +1,57 @@
|
||||
// ignore-windows: Concurrency on Windows is not supported yet.
|
||||
// compile-flags: -Zmiri-disable-isolation -Zmir-opt-level=0
|
||||
|
||||
// Note: mir-opt-level set to 0 to prevent the read of stack_var in thread 1
|
||||
// from being optimized away and preventing the detection of the data-race.
|
||||
|
||||
use std::thread::{spawn, sleep};
|
||||
use std::ptr::null_mut;
|
||||
use std::sync::atomic::{Ordering, AtomicPtr};
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
struct EvilSend<T>(pub T);
|
||||
|
||||
unsafe impl<T> Send for EvilSend<T> {}
|
||||
unsafe impl<T> Sync for EvilSend<T> {}
|
||||
|
||||
pub fn main() {
|
||||
// Shared atomic pointer
|
||||
let pointer = AtomicPtr::new(null_mut::<usize>());
|
||||
let ptr = EvilSend(&pointer as *const AtomicPtr<usize>);
|
||||
|
||||
// Note: this is scheduler-dependent
|
||||
// the operations need to occur in
|
||||
// order, otherwise the allocation is
|
||||
// not visible to the other-thread to
|
||||
// detect the race:
|
||||
// 1. stack-allocate
|
||||
// 2. atomic_store
|
||||
// 3. atomic_load
|
||||
// 4. write-value
|
||||
// 5. read-value
|
||||
unsafe {
|
||||
let j1 = spawn(move || {
|
||||
// Concurrent allocate the memory.
|
||||
// Uses relaxed semantics to not generate
|
||||
// a release sequence.
|
||||
let pointer = &*ptr.0;
|
||||
|
||||
let mut stack_var = 0usize;
|
||||
|
||||
pointer.store(&mut stack_var as *mut _, Ordering::Release);
|
||||
|
||||
sleep(Duration::from_millis(200));
|
||||
|
||||
stack_var //~ ERROR Data race detected between Read on Thread(id = 1) and Write on Thread(id = 2)
|
||||
});
|
||||
|
||||
let j2 = spawn(move || {
|
||||
let pointer = &*ptr.0;
|
||||
*pointer.load(Ordering::Acquire) = 3;
|
||||
});
|
||||
|
||||
j1.join().unwrap();
|
||||
j2.join().unwrap();
|
||||
}
|
||||
}
|
@ -37,7 +37,7 @@ pub fn main() {
|
||||
|
||||
let j3 = spawn(move || {
|
||||
if SYNC.load(Ordering::Acquire) == 2 {
|
||||
*c.0 //~ ERROR Data race
|
||||
*c.0 //~ ERROR Data race detected between Read on Thread(id = 3) and Write on Thread(id = 1)
|
||||
} else {
|
||||
0
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ pub fn main() {
|
||||
let j1 = spawn(move || {
|
||||
*c.0 = 1;
|
||||
SYNC.store(1, Ordering::Release);
|
||||
sleep(Duration::from_millis(100));
|
||||
sleep(Duration::from_millis(200));
|
||||
SYNC.store(3, Ordering::Relaxed);
|
||||
});
|
||||
|
||||
@ -40,9 +40,9 @@ pub fn main() {
|
||||
});
|
||||
|
||||
let j3 = spawn(move || {
|
||||
sleep(Duration::from_millis(1000));
|
||||
sleep(Duration::from_millis(500));
|
||||
if SYNC.load(Ordering::Acquire) == 3 {
|
||||
*c.0 //~ ERROR Data race
|
||||
*c.0 //~ ERROR Data race detected between Read on Thread(id = 3) and Write on Thread(id = 1)
|
||||
} else {
|
||||
0
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ pub fn main() {
|
||||
|
||||
let j2 = spawn(move || {
|
||||
if SYNC.load(Ordering::Acquire) == 2 {
|
||||
*c.0 //~ ERROR Data race
|
||||
*c.0 //~ ERROR Data race detected between Read on Thread(id = 2) and Write on Thread(id = 1)
|
||||
} else {
|
||||
0
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ pub fn main() {
|
||||
|
||||
let j3 = spawn(move || {
|
||||
if SYNC.load(Ordering::Acquire) == 3 {
|
||||
*c.0 //~ ERROR Data race
|
||||
*c.0 //~ ERROR Data race detected between Read on Thread(id = 3) and Write on Thread(id = 1)
|
||||
} else {
|
||||
0
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ pub fn main() {
|
||||
});
|
||||
|
||||
let j2 = spawn(move || {
|
||||
*c.0 = 64; //~ ERROR Data race
|
||||
*c.0 = 64; //~ ERROR Data race detected between Write on Thread(id = 2) and Write on Thread(id = 1)
|
||||
});
|
||||
|
||||
j1.join().unwrap();
|
||||
|
57
tests/compile-fail/data_race/write_write_race_stack.rs
Normal file
57
tests/compile-fail/data_race/write_write_race_stack.rs
Normal file
@ -0,0 +1,57 @@
|
||||
// ignore-windows: Concurrency on Windows is not supported yet.
|
||||
// compile-flags: -Zmiri-disable-isolation
|
||||
|
||||
use std::thread::{spawn, sleep};
|
||||
use std::ptr::null_mut;
|
||||
use std::sync::atomic::{Ordering, AtomicPtr};
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
struct EvilSend<T>(pub T);
|
||||
|
||||
unsafe impl<T> Send for EvilSend<T> {}
|
||||
unsafe impl<T> Sync for EvilSend<T> {}
|
||||
|
||||
pub fn main() {
|
||||
// Shared atomic pointer
|
||||
let pointer = AtomicPtr::new(null_mut::<usize>());
|
||||
let ptr = EvilSend(&pointer as *const AtomicPtr<usize>);
|
||||
|
||||
// Note: this is scheduler-dependent
|
||||
// the operations need to occur in
|
||||
// order, otherwise the allocation is
|
||||
// not visible to the other-thread to
|
||||
// detect the race:
|
||||
// 1. stack-allocate
|
||||
// 2. atomic_store
|
||||
// 3. atomic_load
|
||||
// 4. write-value
|
||||
// 5. write-value
|
||||
unsafe {
|
||||
let j1 = spawn(move || {
|
||||
// Concurrent allocate the memory.
|
||||
// Uses relaxed semantics to not generate
|
||||
// a release sequence.
|
||||
let pointer = &*ptr.0;
|
||||
|
||||
let mut stack_var = 0usize;
|
||||
|
||||
pointer.store(&mut stack_var as *mut _, Ordering::Release);
|
||||
|
||||
sleep(Duration::from_millis(200));
|
||||
|
||||
stack_var = 1usize; //~ ERROR Data race detected between Write on Thread(id = 1) and Write on Thread(id = 2)
|
||||
|
||||
// read to silence errors
|
||||
stack_var
|
||||
});
|
||||
|
||||
let j2 = spawn(move || {
|
||||
let pointer = &*ptr.0;
|
||||
*pointer.load(Ordering::Acquire) = 3;
|
||||
});
|
||||
|
||||
j1.join().unwrap();
|
||||
j2.join().unwrap();
|
||||
}
|
||||
}
|
19
tests/run-pass/concurrency/concurrent_caller_location.rs
Normal file
19
tests/run-pass/concurrency/concurrent_caller_location.rs
Normal file
@ -0,0 +1,19 @@
|
||||
// ignore-windows: Concurrency on Windows is not supported yet.
|
||||
|
||||
use std::thread::spawn;
|
||||
use std::panic::Location;
|
||||
|
||||
fn initialize() {
|
||||
let _ignore = initialize_inner();
|
||||
}
|
||||
|
||||
fn initialize_inner() -> &'static Location<'static> {
|
||||
Location::caller()
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let j1 = spawn(initialize);
|
||||
let j2 = spawn(initialize);
|
||||
j1.join().unwrap();
|
||||
j2.join().unwrap();
|
||||
}
|
@ -0,0 +1,2 @@
|
||||
warning: thread support is experimental and incomplete: weak memory effects are not emulated.
|
||||
|
16
tests/run-pass/concurrency/issue1643.rs
Normal file
16
tests/run-pass/concurrency/issue1643.rs
Normal file
@ -0,0 +1,16 @@
|
||||
// ignore-windows: Concurrency on Windows is not supported yet.
|
||||
|
||||
use std::thread::spawn;
|
||||
|
||||
fn initialize() {
|
||||
initialize_inner(&mut || false)
|
||||
}
|
||||
|
||||
fn initialize_inner(_init: &mut dyn FnMut() -> bool) {}
|
||||
|
||||
fn main() {
|
||||
let j1 = spawn(initialize);
|
||||
let j2 = spawn(initialize);
|
||||
j1.join().unwrap();
|
||||
j2.join().unwrap();
|
||||
}
|
2
tests/run-pass/concurrency/issue1643.stderr
Normal file
2
tests/run-pass/concurrency/issue1643.stderr
Normal file
@ -0,0 +1,2 @@
|
||||
warning: thread support is experimental and incomplete: weak memory effects are not emulated.
|
||||
|
Loading…
Reference in New Issue
Block a user