Auto merge of #2248 - RalfJung:arc-drop-fix, r=RalfJung

do not protect SRW items

This is the Miri version of https://github.com/rust-lang/rust/pull/98017.
This commit is contained in:
bors 2022-06-22 21:39:41 +00:00
commit 655eed35b7
8 changed files with 102 additions and 89 deletions

View File

@ -821,6 +821,13 @@ fn reborrow(
} else {
Permission::SharedReadWrite
};
let protector = if frozen {
protector
} else {
// We do not protect inside UnsafeCell.
// This fixes https://github.com/rust-lang/rust/issues/55005.
None
};
let item = Item { perm, tag: new_tag, protector };
let mut global = this.machine.stacked_borrows.as_ref().unwrap().borrow_mut();
stacked_borrows.for_each(range, |offset, stack, history| {

View File

@ -1,17 +0,0 @@
// error-pattern: deallocating while item is protected
use std::cell::Cell;
// Check that even `&Cell` are dereferenceable.
// Also see <https://github.com/rust-lang/rust/issues/55005>.
fn inner(x: &Cell<i32>, f: fn(&Cell<i32>)) {
// `f` may mutate, but it may not deallocate!
f(x)
}
fn main() {
inner(Box::leak(Box::new(Cell::new(0))), |x| {
let raw = x as *const _ as *mut Cell<i32>;
drop(unsafe { Box::from_raw(raw) });
});
}

View File

@ -1,38 +0,0 @@
error: Undefined Behavior: deallocating while item is protected: [SharedReadWrite for <TAG> (call ID)]
--> RUSTLIB/alloc/src/alloc.rs:LL:CC
|
LL | unsafe { __rust_dealloc(ptr, layout.size(), layout.align()) }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ deallocating while item is protected: [SharedReadWrite for <TAG> (call ID)]
|
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
= note: inside `std::alloc::dealloc` at RUSTLIB/alloc/src/alloc.rs:LL:CC
= note: inside `<std::alloc::Global as std::alloc::Allocator>::deallocate` at RUSTLIB/alloc/src/alloc.rs:LL:CC
= note: inside `alloc::alloc::box_free::<std::cell::Cell<i32>, std::alloc::Global>` at RUSTLIB/alloc/src/alloc.rs:LL:CC
= note: inside `std::ptr::drop_in_place::<std::boxed::Box<std::cell::Cell<i32>>> - shim(Some(std::boxed::Box<std::cell::Cell<i32>>))` at RUSTLIB/core/src/ptr/mod.rs:LL:CC
= note: inside `std::mem::drop::<std::boxed::Box<std::cell::Cell<i32>>>` at RUSTLIB/core/src/mem/mod.rs:LL:CC
note: inside closure at $DIR/deallocate_against_barrier2.rs:LL:CC
--> $DIR/deallocate_against_barrier2.rs:LL:CC
|
LL | drop(unsafe { Box::from_raw(raw) });
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
= note: inside `<[closure@$DIR/deallocate_against_barrier2.rs:LL:CC] as std::ops::FnOnce<(&std::cell::Cell<i32>,)>>::call_once - shim` at RUSTLIB/core/src/ops/function.rs:LL:CC
note: inside `inner` at $DIR/deallocate_against_barrier2.rs:LL:CC
--> $DIR/deallocate_against_barrier2.rs:LL:CC
|
LL | f(x)
| ^^^^
note: inside `main` at $DIR/deallocate_against_barrier2.rs:LL:CC
--> $DIR/deallocate_against_barrier2.rs:LL:CC
|
LL | / inner(Box::leak(Box::new(Cell::new(0))), |x| {
LL | | let raw = x as *const _ as *mut Cell<i32>;
LL | | drop(unsafe { Box::from_raw(raw) });
LL | | });
| |______^
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to previous error

View File

@ -0,0 +1,19 @@
// ignore-windows: Concurrency on Windows is not supported yet.
use std::sync::Arc;
use std::thread;
/// Test for Arc::drop bug (https://github.com/rust-lang/rust/issues/55005)
fn main() {
// The bug seems to take up to 700 iterations to reproduce with most seeds (tested 0-9).
for _ in 0..700 {
let arc_1 = Arc::new(());
let arc_2 = arc_1.clone();
let thread = thread::spawn(|| drop(arc_2));
let mut i = 0;
while i < 256 {
i += 1;
}
drop(arc_1);
thread.join().unwrap();
}
}

View File

@ -217,7 +217,7 @@ fn test_single_thread() {
}
pub fn main() {
for _ in 0..50 {
for _ in 0..75 {
test_single_thread();
test_mixed_access();
test_load_buffering_acq_rel();

View File

@ -1,6 +1,5 @@
// ignore-windows: Concurrency on Windows is not supported yet.
// We are making scheduler assumptions here.
// compile-flags: -Zmiri-disable-isolation -Zmiri-strict-provenance -Zmiri-preemption-rate=0
// compile-flags: -Zmiri-disable-isolation -Zmiri-strict-provenance
use std::sync::{Arc, Barrier, Condvar, Mutex, Once, RwLock};
use std::thread;
@ -53,35 +52,6 @@ fn check_conditional_variables_notify_one() {
}
}
fn check_conditional_variables_notify_all() {
let pair = Arc::new(((Mutex::new(())), Condvar::new()));
// Spawn threads and block them on the conditional variable.
let handles: Vec<_> = (0..5)
.map(|_| {
let pair2 = pair.clone();
thread::spawn(move || {
let (lock, cvar) = &*pair2;
let guard = lock.lock().unwrap();
// Block waiting on the conditional variable.
let _ = cvar.wait(guard).unwrap();
})
})
.inspect(|_| {
thread::yield_now();
thread::yield_now();
})
.collect();
let (_, cvar) = &*pair;
// Unblock all threads.
cvar.notify_all();
for handle in handles {
handle.join().unwrap();
}
}
/// Test that waiting on a conditional variable with a timeout does not
/// deadlock.
fn check_conditional_variables_timed_wait_timeout() {
@ -301,7 +271,6 @@ fn check_condvar() {
fn main() {
check_barriers();
check_conditional_variables_notify_one();
check_conditional_variables_notify_all();
check_conditional_variables_timed_wait_timeout();
check_conditional_variables_timed_wait_notimeout();
check_mutex();

View File

@ -0,0 +1,40 @@
// ignore-windows: Concurrency on Windows is not supported yet.
// We are making scheduler assumptions here.
// compile-flags: -Zmiri-strict-provenance -Zmiri-preemption-rate=0
use std::sync::{Condvar, Mutex, Arc};
use std::thread;
fn check_conditional_variables_notify_all() {
let pair = Arc::new(((Mutex::new(())), Condvar::new()));
// Spawn threads and block them on the conditional variable.
let handles: Vec<_> = (0..5)
.map(|_| {
let pair2 = pair.clone();
thread::spawn(move || {
let (lock, cvar) = &*pair2;
let guard = lock.lock().unwrap();
// Block waiting on the conditional variable.
let _ = cvar.wait(guard).unwrap();
})
})
.inspect(|_| {
// Ensure the other threads all run and block on the `wait`.
thread::yield_now();
thread::yield_now();
})
.collect();
let (_, cvar) = &*pair;
// Unblock all threads.
cvar.notify_all();
for handle in handles {
handle.join().unwrap();
}
}
fn main() {
check_conditional_variables_notify_all();
}

View File

@ -1,11 +1,14 @@
// compile-flags: -Zmiri-tag-raw-pointers
use std::cell::{Cell, RefCell, UnsafeCell};
use std::mem::MaybeUninit;
use std::mem::{self, MaybeUninit};
fn main() {
aliasing_mut_and_shr();
aliasing_frz_and_shr();
into_interior_mutability();
unsafe_cell_2phase();
unsafe_cell_deallocate();
unsafe_cell_invalidate();
}
fn aliasing_mut_and_shr() {
@ -67,3 +70,33 @@ fn unsafe_cell_2phase() {
let _val = (*x2.get()).get(0);
}
}
/// Make sure we can deallocate an UnsafeCell that was passed to an active fn call.
/// (This is the fix for https://github.com/rust-lang/rust/issues/55005.)
fn unsafe_cell_deallocate() {
fn f(x: &UnsafeCell<i32>) {
let b: Box<i32> = unsafe { Box::from_raw(x as *const _ as *mut i32) };
drop(b)
}
let b = Box::new(0i32);
f(unsafe { mem::transmute(Box::into_raw(b)) });
}
/// As a side-effect of the above, we also allow this -- at least for now.
fn unsafe_cell_invalidate() {
fn f(_x: &UnsafeCell<i32>, y: *mut i32) {
// Writing to y invalidates x, but that is okay.
unsafe {
*y += 1;
}
}
let mut x = 0i32;
let raw1 = &mut x as *mut _;
let ref1 = unsafe { &mut *raw1 };
let raw2 = ref1 as *mut _;
// Now the borrow stack is: raw1, ref2, raw2.
// So using raw1 invalidates raw2.
f(unsafe { mem::transmute(raw2) }, raw1);
}