Auto merge of #1814 - RalfJung:rustup, r=RalfJung

avoid unnecessary RefCell calls

Blocked on https://github.com/rust-lang/rust/pull/85599
This commit is contained in:
bors 2021-05-23 16:08:53 +00:00
commit 62046bf8b4
8 changed files with 102 additions and 69 deletions

View File

@ -1 +1 @@
6e92fb409816c65cd0a78a1fbcc71e2fbabdf50a
0f8cd43ee8c3614e04b5c624dd8a45758d7023da

View File

@ -598,7 +598,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
// of the time, based on `rate`.
let rate = this.memory.extra.cmpxchg_weak_failure_rate;
let cmpxchg_success = eq.to_bool()?
&& (!can_fail_spuriously || this.memory.extra.rng.borrow_mut().gen::<f64>() < rate);
&& (!can_fail_spuriously || this.memory.extra.rng.get_mut().gen::<f64>() < rate);
let res = Immediate::ScalarPair(
old.to_scalar_or_uninit(),
Scalar::from_bool(cmpxchg_success).into(),
@ -647,7 +647,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
place: &MPlaceTy<'tcx, Tag>,
atomic: AtomicWriteOp,
) -> InterpResult<'tcx> {
let this = self.eval_context_ref();
let this = self.eval_context_mut();
this.validate_atomic_op(
place,
atomic,
@ -672,7 +672,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
use AtomicRwOp::*;
let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
let release = matches!(atomic, Release | AcqRel | SeqCst);
let this = self.eval_context_ref();
let this = self.eval_context_mut();
this.validate_atomic_op(place, atomic, "Atomic RMW", move |memory, clocks, index, _| {
if acquire {
memory.load_acquire(clocks, index)?;
@ -690,7 +690,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
/// Update the data-race detector for an atomic fence on the current thread.
fn validate_atomic_fence(&mut self, atomic: AtomicFenceOp) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
if let Some(data_race) = &this.memory.extra.data_race {
if let Some(data_race) = &mut this.memory.extra.data_race {
data_race.maybe_perform_sync_operation(move |index, mut clocks| {
log::trace!("Atomic fence on {:?} with ordering {:?}", index, atomic);
@ -771,7 +771,7 @@ impl VClockAlloc {
}
fn reset_clocks(&mut self, offset: Size, len: Size) {
let mut alloc_ranges = self.alloc_ranges.borrow_mut();
let alloc_ranges = self.alloc_ranges.get_mut();
for (_, range) in alloc_ranges.iter_mut(offset, len) {
// Reset the portion of the range
*range = MemoryCellClocks::new(0, VectorIdx::MAX_INDEX);
@ -1025,6 +1025,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
if let Some(data_race) = &this.memory.extra.data_race {
if data_race.multi_threaded.get() {
// Load and log the atomic operation.
// Note that atomic loads are possible even from read-only allocations, so `get_alloc_extra_mut` is not an option.
let place_ptr = place.ptr.assert_ptr();
let size = place.layout.size;
let alloc_meta =
@ -1105,6 +1106,7 @@ struct ThreadExtraState {
/// Global data-race detection state, contains the currently
/// executing thread as well as the vector-clocks associated
/// with each of the threads.
// FIXME: it is probably better to have one large RefCell, than to have so many small ones.
#[derive(Debug, Clone)]
pub struct GlobalState {
/// Set to true once the first additional
@ -1158,7 +1160,7 @@ impl GlobalState {
/// Create a new global state, setup with just thread-id=0
/// advanced to timestamp = 1.
pub fn new() -> Self {
let global_state = GlobalState {
let mut global_state = GlobalState {
multi_threaded: Cell::new(false),
vector_clocks: RefCell::new(IndexVec::new()),
vector_info: RefCell::new(IndexVec::new()),
@ -1172,9 +1174,9 @@ impl GlobalState {
// Setup the main-thread since it is not explicitly created:
// uses vector index and thread-id 0, also the rust runtime gives
// the main-thread a name of "main".
let index = global_state.vector_clocks.borrow_mut().push(ThreadClockSet::default());
global_state.vector_info.borrow_mut().push(ThreadId::new(0));
global_state.thread_info.borrow_mut().push(ThreadExtraState {
let index = global_state.vector_clocks.get_mut().push(ThreadClockSet::default());
global_state.vector_info.get_mut().push(ThreadId::new(0));
global_state.thread_info.get_mut().push(ThreadExtraState {
vector_index: Some(index),
thread_name: Some("main".to_string().into_boxed_str()),
termination_vector_clock: None,
@ -1221,7 +1223,7 @@ impl GlobalState {
// Hook for thread creation, enabled multi-threaded execution and marks
// the current thread timestamp as happening-before the current thread.
#[inline]
pub fn thread_created(&self, thread: ThreadId) {
pub fn thread_created(&mut self, thread: ThreadId) {
let current_index = self.current_index();
// Increment the number of active threads.
@ -1241,12 +1243,12 @@ impl GlobalState {
let created_index = if let Some(reuse_index) = self.find_vector_index_reuse_candidate() {
// Now re-configure the re-use candidate, increment the clock
// for the new sync use of the vector.
let mut vector_clocks = self.vector_clocks.borrow_mut();
let vector_clocks = self.vector_clocks.get_mut();
vector_clocks[reuse_index].increment_clock(reuse_index);
// Locate the old thread the vector was associated with and update
// it to represent the new thread instead.
let mut vector_info = self.vector_info.borrow_mut();
let vector_info = self.vector_info.get_mut();
let old_thread = vector_info[reuse_index];
vector_info[reuse_index] = thread;
@ -1258,7 +1260,7 @@ impl GlobalState {
} else {
// No vector re-use candidates available, instead create
// a new vector index.
let mut vector_info = self.vector_info.borrow_mut();
let vector_info = self.vector_info.get_mut();
vector_info.push(thread)
};
@ -1268,7 +1270,7 @@ impl GlobalState {
thread_info[thread].vector_index = Some(created_index);
// Create a thread clock set if applicable.
let mut vector_clocks = self.vector_clocks.borrow_mut();
let vector_clocks = self.vector_clocks.get_mut();
if created_index == vector_clocks.next_index() {
vector_clocks.push(ThreadClockSet::default());
}
@ -1289,9 +1291,9 @@ impl GlobalState {
/// Hook on a thread join to update the implicit happens-before relation
/// between the joined thread and the current thread.
#[inline]
pub fn thread_joined(&self, current_thread: ThreadId, join_thread: ThreadId) {
let mut clocks_vec = self.vector_clocks.borrow_mut();
let thread_info = self.thread_info.borrow();
pub fn thread_joined(&mut self, current_thread: ThreadId, join_thread: ThreadId) {
let clocks_vec = self.vector_clocks.get_mut();
let thread_info = self.thread_info.get_mut();
// Load the vector clock of the current thread.
let current_index = thread_info[current_thread]
@ -1329,9 +1331,9 @@ impl GlobalState {
// If the thread is marked as terminated but not joined
// then move the thread to the re-use set.
let mut termination = self.terminated_threads.borrow_mut();
let termination = self.terminated_threads.get_mut();
if let Some(index) = termination.remove(&join_thread) {
let mut reuse = self.reuse_candidates.borrow_mut();
let reuse = self.reuse_candidates.get_mut();
reuse.insert(index);
}
}
@ -1344,28 +1346,28 @@ impl GlobalState {
/// This should be called strictly before any calls to
/// `thread_joined`.
#[inline]
pub fn thread_terminated(&self) {
pub fn thread_terminated(&mut self) {
let current_index = self.current_index();
// Increment the clock to a unique termination timestamp.
let mut vector_clocks = self.vector_clocks.borrow_mut();
let vector_clocks = self.vector_clocks.get_mut();
let current_clocks = &mut vector_clocks[current_index];
current_clocks.increment_clock(current_index);
// Load the current thread id for the executing vector.
let vector_info = self.vector_info.borrow();
let vector_info = self.vector_info.get_mut();
let current_thread = vector_info[current_index];
// Load the current thread metadata, and move to a terminated
// vector state. Setting up the vector clock all join operations
// will use.
let mut thread_info = self.thread_info.borrow_mut();
let thread_info = self.thread_info.get_mut();
let current = &mut thread_info[current_thread];
current.termination_vector_clock = Some(current_clocks.clock.clone());
// Add this thread as a candidate for re-use after a thread join
// occurs.
let mut termination = self.terminated_threads.borrow_mut();
let termination = self.terminated_threads.get_mut();
termination.insert(current_thread, current_index);
// Reduce the number of active threads, now that a thread has
@ -1392,9 +1394,9 @@ impl GlobalState {
/// the thread name is used for improved diagnostics
/// during a data-race.
#[inline]
pub fn thread_set_name(&self, thread: ThreadId, name: String) {
pub fn thread_set_name(&mut self, thread: ThreadId, name: String) {
let name = name.into_boxed_str();
let mut thread_info = self.thread_info.borrow_mut();
let thread_info = self.thread_info.get_mut();
thread_info[thread].thread_name = Some(name);
}

View File

@ -119,8 +119,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// `lo.col` is 0-based - add 1 to make it 1-based for the caller.
let colno: u32 = lo.col.0 as u32 + 1;
let name_alloc = this.allocate_str(&name, MiriMemoryKind::Rust.into());
let filename_alloc = this.allocate_str(&filename, MiriMemoryKind::Rust.into());
// These are "mutable" allocations as we consider them to be owned by the callee.
let name_alloc = this.allocate_str(&name, MiriMemoryKind::Rust.into(), Mutability::Mut);
let filename_alloc =
this.allocate_str(&filename, MiriMemoryKind::Rust.into(), Mutability::Mut);
let lineno_alloc = Scalar::from_u32(lineno);
let colno_alloc = Scalar::from_u32(colno);

View File

@ -13,6 +13,7 @@
use log::trace;
use rustc_ast::Mutability;
use rustc_middle::{mir, ty};
use rustc_target::spec::abi::Abi;
use rustc_target::spec::PanicStrategy;
@ -169,7 +170,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let this = self.eval_context_mut();
// First arg: message.
let msg = this.allocate_str(msg, MiriMemoryKind::Machine.into());
let msg = this.allocate_str(msg, MiriMemoryKind::Machine.into(), Mutability::Not);
// Call the lang item.
let panic = this.tcx.lang_items().panic_fn().unwrap();

View File

@ -58,7 +58,7 @@ fn mutexattr_set_kind<'mir, 'tcx: 'mir>(
// (the kind has to be at its offset for compatibility with static initializer macros)
fn mutex_get_kind<'mir, 'tcx: 'mir>(
ecx: &mut MiriEvalContext<'mir, 'tcx>,
ecx: &MiriEvalContext<'mir, 'tcx>,
mutex_op: &OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };

View File

@ -457,14 +457,29 @@ impl<'tcx> Stacks {
&self,
ptr: Pointer<Tag>,
size: Size,
global: &GlobalState,
f: impl Fn(Pointer<Tag>, &mut Stack, &GlobalState) -> InterpResult<'tcx>,
f: impl Fn(Pointer<Tag>, &mut Stack) -> InterpResult<'tcx>,
) -> InterpResult<'tcx> {
let mut stacks = self.stacks.borrow_mut();
for (offset, stack) in stacks.iter_mut(ptr.offset, size) {
let mut cur_ptr = ptr;
cur_ptr.offset = offset;
f(cur_ptr, stack, &*global)?;
f(cur_ptr, stack)?;
}
Ok(())
}
/// Call `f` on every stack in the range.
fn for_each_mut(
&mut self,
ptr: Pointer<Tag>,
size: Size,
f: impl Fn(Pointer<Tag>, &mut Stack) -> InterpResult<'tcx>,
) -> InterpResult<'tcx> {
let stacks = self.stacks.get_mut();
for (offset, stack) in stacks.iter_mut(ptr.offset, size) {
let mut cur_ptr = ptr;
cur_ptr.offset = offset;
f(cur_ptr, stack)?;
}
Ok(())
}
@ -516,9 +531,8 @@ impl Stacks {
extra: &MemoryExtra,
) -> InterpResult<'tcx> {
trace!("read access with tag {:?}: {:?}, size {}", ptr.tag, ptr.erase_tag(), size.bytes());
self.for_each(ptr, size, &*extra.borrow(), |ptr, stack, global| {
stack.access(AccessKind::Read, ptr, global)
})
let global = &*extra.borrow();
self.for_each(ptr, size, move |ptr, stack| stack.access(AccessKind::Read, ptr, global))
}
#[inline(always)]
@ -529,9 +543,8 @@ impl Stacks {
extra: &mut MemoryExtra,
) -> InterpResult<'tcx> {
trace!("write access with tag {:?}: {:?}, size {}", ptr.tag, ptr.erase_tag(), size.bytes());
self.for_each(ptr, size, extra.get_mut(), |ptr, stack, global| {
stack.access(AccessKind::Write, ptr, global)
})
let global = extra.get_mut();
self.for_each_mut(ptr, size, move |ptr, stack| stack.access(AccessKind::Write, ptr, global))
}
#[inline(always)]
@ -542,7 +555,8 @@ impl Stacks {
extra: &mut MemoryExtra,
) -> InterpResult<'tcx> {
trace!("deallocation with tag {:?}: {:?}, size {}", ptr.tag, ptr.erase_tag(), size.bytes());
self.for_each(ptr, size, extra.get_mut(), |ptr, stack, global| stack.dealloc(ptr, global))
let global = extra.get_mut();
self.for_each_mut(ptr, size, move |ptr, stack| stack.dealloc(ptr, global))
}
}
@ -558,6 +572,18 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
new_tag: Tag,
protect: bool,
) -> InterpResult<'tcx> {
// Nothing to do for ZSTs.
if size == Size::ZERO {
trace!(
"reborrow of size 0: {} reference {:?} derived from {:?} (pointee {})",
kind,
new_tag,
place.ptr,
place.layout.ty,
);
return Ok(());
}
let this = self.eval_context_mut();
let protector = if protect { Some(this.frame().extra.call_id) } else { None };
let ptr = place.ptr.assert_ptr();
@ -571,12 +597,6 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
size.bytes()
);
// Get the allocation. We need both the allocation and the MemoryExtra, so we cannot use `&mut`.
// FIXME: make `get_alloc_extra_mut` also return `&mut MemoryExtra`.
let extra = this.memory.get_alloc_extra(ptr.alloc_id)?;
let stacked_borrows =
extra.stacked_borrows.as_ref().expect("we should have Stacked Borrows data");
let global = this.memory.extra.stacked_borrows.as_ref().unwrap().borrow();
// Update the stacks.
// Make sure that raw pointers and mutable shared references are reborrowed "weak":
// There could be existing unique pointers reborrowed from them that should remain valid!
@ -588,6 +608,12 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// Shared references and *const are a whole different kind of game, the
// permission is not uniform across the entire range!
// We need a frozen-sensitive reborrow.
// We have to use shared references to alloc/memory_extra here since
// `visit_freeze_sensitive` needs to access the global state.
let extra = this.memory.get_alloc_extra(ptr.alloc_id)?;
let stacked_borrows =
extra.stacked_borrows.as_ref().expect("we should have Stacked Borrows data");
let global = this.memory.extra.stacked_borrows.as_ref().unwrap().borrow();
return this.visit_freeze_sensitive(place, size, |cur_ptr, size, frozen| {
// We are only ever `SharedReadOnly` inside the frozen bits.
let perm = if frozen {
@ -596,15 +622,21 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
Permission::SharedReadWrite
};
let item = Item { perm, tag: new_tag, protector };
stacked_borrows.for_each(cur_ptr, size, &*global, |cur_ptr, stack, global| {
stack.grant(cur_ptr, item, global)
stacked_borrows.for_each(cur_ptr, size, |cur_ptr, stack| {
stack.grant(cur_ptr, item, &*global)
})
});
}
};
// Here we can avoid `borrow()` calls because we have mutable references.
// Note that this asserts that the allocation is mutable -- but since we are creating a
// mutable pointer, that seems reasonable.
let (alloc_extra, memory_extra) = this.memory.get_alloc_extra_mut(ptr.alloc_id)?;
let stacked_borrows =
alloc_extra.stacked_borrows.as_mut().expect("we should have Stacked Borrows data");
let global = memory_extra.stacked_borrows.as_mut().unwrap().get_mut();
let item = Item { perm, tag: new_tag, protector };
stacked_borrows
.for_each(ptr, size, &*global, |ptr, stack, global| stack.grant(ptr, item, global))
stacked_borrows.for_each_mut(ptr, size, |ptr, stack| stack.grant(ptr, item, global))
}
/// Retags an indidual pointer, returning the retagged version.
@ -631,16 +663,10 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// We can see dangling ptrs in here e.g. after a Box's `Unique` was
// updated using "self.0 = ..." (can happen in Box::from_raw) so we cannot ICE; see miri#1050.
let place = this.mplace_access_checked(place, Some(Align::from_bytes(1).unwrap()))?;
// Nothing to do for ZSTs. We use `is_bits` here because we *do* need to retag even ZSTs
// when there actually is a tag (to avoid inheriting a tag that would let us access more
// than 0 bytes).
if size == Size::ZERO && place.ptr.is_bits() {
return Ok(*val);
}
// Compute new borrow.
let new_tag = {
let mut mem_extra = this.memory.extra.stacked_borrows.as_ref().unwrap().borrow_mut();
let mem_extra = this.memory.extra.stacked_borrows.as_mut().unwrap().get_mut();
match kind {
// Give up tracking for raw pointers.
RefKind::Raw { .. } if !mem_extra.track_raw => Tag::Untagged,

View File

@ -332,7 +332,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
fn join_thread(
&mut self,
joined_thread_id: ThreadId,
data_race: &Option<data_race::GlobalState>,
data_race: Option<&mut data_race::GlobalState>,
) -> InterpResult<'tcx> {
if self.threads[joined_thread_id].join_status != ThreadJoinStatus::Joinable {
throw_ub_format!("trying to join a detached or already joined thread");
@ -436,7 +436,10 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
/// Wakes up threads joining on the active one and deallocates thread-local statics.
/// The `AllocId` that can now be freed is returned.
fn thread_terminated(&mut self, data_race: &Option<data_race::GlobalState>) -> Vec<AllocId> {
fn thread_terminated(
&mut self,
mut data_race: Option<&mut data_race::GlobalState>,
) -> Vec<AllocId> {
let mut free_tls_statics = Vec::new();
{
let mut thread_local_statics = self.thread_local_alloc_ids.borrow_mut();
@ -452,14 +455,14 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
});
}
// Set the thread into a terminated state in the data-race detector
if let Some(data_race) = data_race {
if let Some(ref mut data_race) = data_race {
data_race.thread_terminated();
}
// Check if we need to unblock any threads.
for (i, thread) in self.threads.iter_enumerated_mut() {
if thread.state == ThreadState::BlockedOnJoin(self.active_thread) {
// The thread has terminated, mark happens-before edge to joining thread
if let Some(data_race) = data_race {
if let Some(ref mut data_race) = data_race {
data_race.thread_joined(i, self.active_thread);
}
trace!("unblocking {:?} because {:?} terminated", i, self.active_thread);
@ -584,7 +587,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
fn create_thread(&mut self) -> ThreadId {
let this = self.eval_context_mut();
let id = this.machine.threads.create_thread();
if let Some(data_race) = &this.memory.extra.data_race {
if let Some(data_race) = &mut this.memory.extra.data_race {
data_race.thread_created(id);
}
id
@ -599,8 +602,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
#[inline]
fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let data_race = &this.memory.extra.data_race;
this.machine.threads.join_thread(joined_thread_id, data_race)?;
this.machine.threads.join_thread(joined_thread_id, this.memory.extra.data_race.as_mut())?;
Ok(())
}
@ -664,7 +666,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
#[inline]
fn set_active_thread_name(&mut self, new_thread_name: Vec<u8>) {
let this = self.eval_context_mut();
if let Some(data_race) = &this.memory.extra.data_race {
if let Some(data_race) = &mut this.memory.extra.data_race {
if let Ok(string) = String::from_utf8(new_thread_name.clone()) {
data_race.thread_set_name(this.machine.threads.active_thread, string);
}
@ -759,8 +761,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
#[inline]
fn thread_terminated(&mut self) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let data_race = &this.memory.extra.data_race;
for alloc_id in this.machine.threads.thread_terminated(data_race) {
for alloc_id in this.machine.threads.thread_terminated(this.memory.extra.data_race.as_mut())
{
let ptr = this.memory.global_base_pointer(alloc_id.into())?;
this.memory.deallocate(ptr, None, MiriMemoryKind::Tls.into())?;
}

View File

@ -3,6 +3,6 @@ static X: usize = 5;
#[allow(mutable_transmutes)]
fn main() {
let _x = unsafe {
std::mem::transmute::<&usize, &mut usize>(&X) //~ ERROR borrow stack
std::mem::transmute::<&usize, &mut usize>(&X) //~ ERROR writing to alloc0 which is read-only
};
}