From 543777acbd9797118c5992e308335ab21dcd1fbb Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sun, 23 May 2021 10:47:29 +0200 Subject: [PATCH 1/6] avoid unnecessary RefCell calls in Stacked Borrows --- src/stacked_borrows.rs | 60 +++++++++++++++++++++++++++--------------- 1 file changed, 39 insertions(+), 21 deletions(-) diff --git a/src/stacked_borrows.rs b/src/stacked_borrows.rs index a9c030c87de..b1ab34b1f34 100644 --- a/src/stacked_borrows.rs +++ b/src/stacked_borrows.rs @@ -457,14 +457,29 @@ impl<'tcx> Stacks { &self, ptr: Pointer, size: Size, - global: &GlobalState, - f: impl Fn(Pointer, &mut Stack, &GlobalState) -> InterpResult<'tcx>, + f: impl Fn(Pointer, &mut Stack) -> InterpResult<'tcx>, ) -> InterpResult<'tcx> { let mut stacks = self.stacks.borrow_mut(); for (offset, stack) in stacks.iter_mut(ptr.offset, size) { let mut cur_ptr = ptr; cur_ptr.offset = offset; - f(cur_ptr, stack, &*global)?; + f(cur_ptr, stack)?; + } + Ok(()) + } + + /// Call `f` on every stack in the range. + fn for_each_mut( + &mut self, + ptr: Pointer, + size: Size, + f: impl Fn(Pointer, &mut Stack) -> InterpResult<'tcx>, + ) -> InterpResult<'tcx> { + let stacks = self.stacks.get_mut(); + for (offset, stack) in stacks.iter_mut(ptr.offset, size) { + let mut cur_ptr = ptr; + cur_ptr.offset = offset; + f(cur_ptr, stack)?; } Ok(()) } @@ -516,9 +531,8 @@ impl Stacks { extra: &MemoryExtra, ) -> InterpResult<'tcx> { trace!("read access with tag {:?}: {:?}, size {}", ptr.tag, ptr.erase_tag(), size.bytes()); - self.for_each(ptr, size, &*extra.borrow(), |ptr, stack, global| { - stack.access(AccessKind::Read, ptr, global) - }) + let global = &*extra.borrow(); + self.for_each(ptr, size, move |ptr, stack| stack.access(AccessKind::Read, ptr, global)) } #[inline(always)] @@ -529,9 +543,8 @@ impl Stacks { extra: &mut MemoryExtra, ) -> InterpResult<'tcx> { trace!("write access with tag {:?}: {:?}, size {}", ptr.tag, ptr.erase_tag(), size.bytes()); - self.for_each(ptr, size, extra.get_mut(), |ptr, stack, global| { - stack.access(AccessKind::Write, ptr, global) - }) + let global = extra.get_mut(); + self.for_each_mut(ptr, size, move |ptr, stack| stack.access(AccessKind::Write, ptr, global)) } #[inline(always)] @@ -542,7 +555,8 @@ impl Stacks { extra: &mut MemoryExtra, ) -> InterpResult<'tcx> { trace!("deallocation with tag {:?}: {:?}, size {}", ptr.tag, ptr.erase_tag(), size.bytes()); - self.for_each(ptr, size, extra.get_mut(), |ptr, stack, global| stack.dealloc(ptr, global)) + let global = extra.get_mut(); + self.for_each_mut(ptr, size, move |ptr, stack| stack.dealloc(ptr, global)) } } @@ -571,12 +585,6 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx size.bytes() ); - // Get the allocation. We need both the allocation and the MemoryExtra, so we cannot use `&mut`. - // FIXME: make `get_alloc_extra_mut` also return `&mut MemoryExtra`. - let extra = this.memory.get_alloc_extra(ptr.alloc_id)?; - let stacked_borrows = - extra.stacked_borrows.as_ref().expect("we should have Stacked Borrows data"); - let global = this.memory.extra.stacked_borrows.as_ref().unwrap().borrow(); // Update the stacks. // Make sure that raw pointers and mutable shared references are reborrowed "weak": // There could be existing unique pointers reborrowed from them that should remain valid! @@ -588,6 +596,12 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx // Shared references and *const are a whole different kind of game, the // permission is not uniform across the entire range! // We need a frozen-sensitive reborrow. + // We have to use shared references to alloc/memory_extra here since + // `visit_freeze_sensitive` needs to access the global state. + let extra = this.memory.get_alloc_extra(ptr.alloc_id)?; + let stacked_borrows = + extra.stacked_borrows.as_ref().expect("we should have Stacked Borrows data"); + let global = this.memory.extra.stacked_borrows.as_ref().unwrap().borrow(); return this.visit_freeze_sensitive(place, size, |cur_ptr, size, frozen| { // We are only ever `SharedReadOnly` inside the frozen bits. let perm = if frozen { @@ -596,15 +610,19 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx Permission::SharedReadWrite }; let item = Item { perm, tag: new_tag, protector }; - stacked_borrows.for_each(cur_ptr, size, &*global, |cur_ptr, stack, global| { - stack.grant(cur_ptr, item, global) + stacked_borrows.for_each(cur_ptr, size, |cur_ptr, stack| { + stack.grant(cur_ptr, item, &*global) }) }); } }; + // Here we can avoid `borrow()` calls because we have mutable references. + let (alloc_extra, memory_extra) = this.memory.get_alloc_extra_mut(ptr.alloc_id)?; + let stacked_borrows = + alloc_extra.stacked_borrows.as_mut().expect("we should have Stacked Borrows data"); + let global = memory_extra.stacked_borrows.as_mut().unwrap().get_mut(); let item = Item { perm, tag: new_tag, protector }; - stacked_borrows - .for_each(ptr, size, &*global, |ptr, stack, global| stack.grant(ptr, item, global)) + stacked_borrows.for_each_mut(ptr, size, |ptr, stack| stack.grant(ptr, item, global)) } /// Retags an indidual pointer, returning the retagged version. @@ -640,7 +658,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx // Compute new borrow. let new_tag = { - let mut mem_extra = this.memory.extra.stacked_borrows.as_ref().unwrap().borrow_mut(); + let mem_extra = this.memory.extra.stacked_borrows.as_mut().unwrap().get_mut(); match kind { // Give up tracking for raw pointers. RefKind::Raw { .. } if !mem_extra.track_raw => Tag::Untagged, From e09c571eec1fff99632f96eb1f74a7e177fcf2b0 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sun, 23 May 2021 11:00:25 +0200 Subject: [PATCH 2/6] avoid some borrow_mut calls in data_race --- src/data_race.rs | 54 +++++++++++++++++++++-------------------- src/shims/posix/sync.rs | 2 +- src/thread.rs | 22 +++++++++-------- 3 files changed, 41 insertions(+), 37 deletions(-) diff --git a/src/data_race.rs b/src/data_race.rs index fb6bf8f8929..45159ef4c07 100644 --- a/src/data_race.rs +++ b/src/data_race.rs @@ -598,7 +598,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { // of the time, based on `rate`. let rate = this.memory.extra.cmpxchg_weak_failure_rate; let cmpxchg_success = eq.to_bool()? - && (!can_fail_spuriously || this.memory.extra.rng.borrow_mut().gen::() < rate); + && (!can_fail_spuriously || this.memory.extra.rng.get_mut().gen::() < rate); let res = Immediate::ScalarPair( old.to_scalar_or_uninit(), Scalar::from_bool(cmpxchg_success).into(), @@ -647,7 +647,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { place: &MPlaceTy<'tcx, Tag>, atomic: AtomicWriteOp, ) -> InterpResult<'tcx> { - let this = self.eval_context_ref(); + let this = self.eval_context_mut(); this.validate_atomic_op( place, atomic, @@ -672,7 +672,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { use AtomicRwOp::*; let acquire = matches!(atomic, Acquire | AcqRel | SeqCst); let release = matches!(atomic, Release | AcqRel | SeqCst); - let this = self.eval_context_ref(); + let this = self.eval_context_mut(); this.validate_atomic_op(place, atomic, "Atomic RMW", move |memory, clocks, index, _| { if acquire { memory.load_acquire(clocks, index)?; @@ -690,7 +690,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { /// Update the data-race detector for an atomic fence on the current thread. fn validate_atomic_fence(&mut self, atomic: AtomicFenceOp) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - if let Some(data_race) = &this.memory.extra.data_race { + if let Some(data_race) = &mut this.memory.extra.data_race { data_race.maybe_perform_sync_operation(move |index, mut clocks| { log::trace!("Atomic fence on {:?} with ordering {:?}", index, atomic); @@ -771,7 +771,7 @@ impl VClockAlloc { } fn reset_clocks(&mut self, offset: Size, len: Size) { - let mut alloc_ranges = self.alloc_ranges.borrow_mut(); + let alloc_ranges = self.alloc_ranges.get_mut(); for (_, range) in alloc_ranges.iter_mut(offset, len) { // Reset the portion of the range *range = MemoryCellClocks::new(0, VectorIdx::MAX_INDEX); @@ -1025,6 +1025,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { if let Some(data_race) = &this.memory.extra.data_race { if data_race.multi_threaded.get() { // Load and log the atomic operation. + // Note that atomic loads are possible even from read-only allocations, so `get_alloc_extra_mut` is not an option. let place_ptr = place.ptr.assert_ptr(); let size = place.layout.size; let alloc_meta = @@ -1105,6 +1106,7 @@ struct ThreadExtraState { /// Global data-race detection state, contains the currently /// executing thread as well as the vector-clocks associated /// with each of the threads. +// FIXME: it is probably better to have one large RefCell, than to have so many small ones. #[derive(Debug, Clone)] pub struct GlobalState { /// Set to true once the first additional @@ -1158,7 +1160,7 @@ impl GlobalState { /// Create a new global state, setup with just thread-id=0 /// advanced to timestamp = 1. pub fn new() -> Self { - let global_state = GlobalState { + let mut global_state = GlobalState { multi_threaded: Cell::new(false), vector_clocks: RefCell::new(IndexVec::new()), vector_info: RefCell::new(IndexVec::new()), @@ -1172,9 +1174,9 @@ impl GlobalState { // Setup the main-thread since it is not explicitly created: // uses vector index and thread-id 0, also the rust runtime gives // the main-thread a name of "main". - let index = global_state.vector_clocks.borrow_mut().push(ThreadClockSet::default()); - global_state.vector_info.borrow_mut().push(ThreadId::new(0)); - global_state.thread_info.borrow_mut().push(ThreadExtraState { + let index = global_state.vector_clocks.get_mut().push(ThreadClockSet::default()); + global_state.vector_info.get_mut().push(ThreadId::new(0)); + global_state.thread_info.get_mut().push(ThreadExtraState { vector_index: Some(index), thread_name: Some("main".to_string().into_boxed_str()), termination_vector_clock: None, @@ -1221,7 +1223,7 @@ impl GlobalState { // Hook for thread creation, enabled multi-threaded execution and marks // the current thread timestamp as happening-before the current thread. #[inline] - pub fn thread_created(&self, thread: ThreadId) { + pub fn thread_created(&mut self, thread: ThreadId) { let current_index = self.current_index(); // Increment the number of active threads. @@ -1241,12 +1243,12 @@ impl GlobalState { let created_index = if let Some(reuse_index) = self.find_vector_index_reuse_candidate() { // Now re-configure the re-use candidate, increment the clock // for the new sync use of the vector. - let mut vector_clocks = self.vector_clocks.borrow_mut(); + let vector_clocks = self.vector_clocks.get_mut(); vector_clocks[reuse_index].increment_clock(reuse_index); // Locate the old thread the vector was associated with and update // it to represent the new thread instead. - let mut vector_info = self.vector_info.borrow_mut(); + let vector_info = self.vector_info.get_mut(); let old_thread = vector_info[reuse_index]; vector_info[reuse_index] = thread; @@ -1258,7 +1260,7 @@ impl GlobalState { } else { // No vector re-use candidates available, instead create // a new vector index. - let mut vector_info = self.vector_info.borrow_mut(); + let vector_info = self.vector_info.get_mut(); vector_info.push(thread) }; @@ -1268,7 +1270,7 @@ impl GlobalState { thread_info[thread].vector_index = Some(created_index); // Create a thread clock set if applicable. - let mut vector_clocks = self.vector_clocks.borrow_mut(); + let vector_clocks = self.vector_clocks.get_mut(); if created_index == vector_clocks.next_index() { vector_clocks.push(ThreadClockSet::default()); } @@ -1289,9 +1291,9 @@ impl GlobalState { /// Hook on a thread join to update the implicit happens-before relation /// between the joined thread and the current thread. #[inline] - pub fn thread_joined(&self, current_thread: ThreadId, join_thread: ThreadId) { - let mut clocks_vec = self.vector_clocks.borrow_mut(); - let thread_info = self.thread_info.borrow(); + pub fn thread_joined(&mut self, current_thread: ThreadId, join_thread: ThreadId) { + let clocks_vec = self.vector_clocks.get_mut(); + let thread_info = self.thread_info.get_mut(); // Load the vector clock of the current thread. let current_index = thread_info[current_thread] @@ -1329,9 +1331,9 @@ impl GlobalState { // If the thread is marked as terminated but not joined // then move the thread to the re-use set. - let mut termination = self.terminated_threads.borrow_mut(); + let termination = self.terminated_threads.get_mut(); if let Some(index) = termination.remove(&join_thread) { - let mut reuse = self.reuse_candidates.borrow_mut(); + let reuse = self.reuse_candidates.get_mut(); reuse.insert(index); } } @@ -1344,28 +1346,28 @@ impl GlobalState { /// This should be called strictly before any calls to /// `thread_joined`. #[inline] - pub fn thread_terminated(&self) { + pub fn thread_terminated(&mut self) { let current_index = self.current_index(); // Increment the clock to a unique termination timestamp. - let mut vector_clocks = self.vector_clocks.borrow_mut(); + let vector_clocks = self.vector_clocks.get_mut(); let current_clocks = &mut vector_clocks[current_index]; current_clocks.increment_clock(current_index); // Load the current thread id for the executing vector. - let vector_info = self.vector_info.borrow(); + let vector_info = self.vector_info.get_mut(); let current_thread = vector_info[current_index]; // Load the current thread metadata, and move to a terminated // vector state. Setting up the vector clock all join operations // will use. - let mut thread_info = self.thread_info.borrow_mut(); + let thread_info = self.thread_info.get_mut(); let current = &mut thread_info[current_thread]; current.termination_vector_clock = Some(current_clocks.clock.clone()); // Add this thread as a candidate for re-use after a thread join // occurs. - let mut termination = self.terminated_threads.borrow_mut(); + let termination = self.terminated_threads.get_mut(); termination.insert(current_thread, current_index); // Reduce the number of active threads, now that a thread has @@ -1392,9 +1394,9 @@ impl GlobalState { /// the thread name is used for improved diagnostics /// during a data-race. #[inline] - pub fn thread_set_name(&self, thread: ThreadId, name: String) { + pub fn thread_set_name(&mut self, thread: ThreadId, name: String) { let name = name.into_boxed_str(); - let mut thread_info = self.thread_info.borrow_mut(); + let thread_info = self.thread_info.get_mut(); thread_info[thread].thread_name = Some(name); } diff --git a/src/shims/posix/sync.rs b/src/shims/posix/sync.rs index 3b68e4eee44..4725cd9fc3c 100644 --- a/src/shims/posix/sync.rs +++ b/src/shims/posix/sync.rs @@ -58,7 +58,7 @@ fn mutexattr_set_kind<'mir, 'tcx: 'mir>( // (the kind has to be at its offset for compatibility with static initializer macros) fn mutex_get_kind<'mir, 'tcx: 'mir>( - ecx: &mut MiriEvalContext<'mir, 'tcx>, + ecx: &MiriEvalContext<'mir, 'tcx>, mutex_op: &OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, ScalarMaybeUninit> { let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 }; diff --git a/src/thread.rs b/src/thread.rs index 3418e8c7d2b..7ee18bb7f80 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -332,7 +332,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { fn join_thread( &mut self, joined_thread_id: ThreadId, - data_race: &Option, + data_race: Option<&mut data_race::GlobalState>, ) -> InterpResult<'tcx> { if self.threads[joined_thread_id].join_status != ThreadJoinStatus::Joinable { throw_ub_format!("trying to join a detached or already joined thread"); @@ -436,7 +436,10 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { /// Wakes up threads joining on the active one and deallocates thread-local statics. /// The `AllocId` that can now be freed is returned. - fn thread_terminated(&mut self, data_race: &Option) -> Vec { + fn thread_terminated( + &mut self, + mut data_race: Option<&mut data_race::GlobalState>, + ) -> Vec { let mut free_tls_statics = Vec::new(); { let mut thread_local_statics = self.thread_local_alloc_ids.borrow_mut(); @@ -452,14 +455,14 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { }); } // Set the thread into a terminated state in the data-race detector - if let Some(data_race) = data_race { + if let Some(ref mut data_race) = data_race { data_race.thread_terminated(); } // Check if we need to unblock any threads. for (i, thread) in self.threads.iter_enumerated_mut() { if thread.state == ThreadState::BlockedOnJoin(self.active_thread) { // The thread has terminated, mark happens-before edge to joining thread - if let Some(data_race) = data_race { + if let Some(ref mut data_race) = data_race { data_race.thread_joined(i, self.active_thread); } trace!("unblocking {:?} because {:?} terminated", i, self.active_thread); @@ -584,7 +587,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn create_thread(&mut self) -> ThreadId { let this = self.eval_context_mut(); let id = this.machine.threads.create_thread(); - if let Some(data_race) = &this.memory.extra.data_race { + if let Some(data_race) = &mut this.memory.extra.data_race { data_race.thread_created(id); } id @@ -599,8 +602,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx #[inline] fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - let data_race = &this.memory.extra.data_race; - this.machine.threads.join_thread(joined_thread_id, data_race)?; + this.machine.threads.join_thread(joined_thread_id, this.memory.extra.data_race.as_mut())?; Ok(()) } @@ -664,7 +666,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx #[inline] fn set_active_thread_name(&mut self, new_thread_name: Vec) { let this = self.eval_context_mut(); - if let Some(data_race) = &this.memory.extra.data_race { + if let Some(data_race) = &mut this.memory.extra.data_race { if let Ok(string) = String::from_utf8(new_thread_name.clone()) { data_race.thread_set_name(this.machine.threads.active_thread, string); } @@ -759,8 +761,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx #[inline] fn thread_terminated(&mut self) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - let data_race = &this.memory.extra.data_race; - for alloc_id in this.machine.threads.thread_terminated(data_race) { + for alloc_id in this.machine.threads.thread_terminated(this.memory.extra.data_race.as_mut()) + { let ptr = this.memory.global_base_pointer(alloc_id.into())?; this.memory.deallocate(ptr, None, MiriMemoryKind::Tls.into())?; } From 9e0e9386a64c4dfa9875a2f3e8be265eaae394f4 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sun, 23 May 2021 11:52:41 +0200 Subject: [PATCH 3/6] better approach to skip ZST reborrows --- src/stacked_borrows.rs | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/src/stacked_borrows.rs b/src/stacked_borrows.rs index b1ab34b1f34..3e176d94b99 100644 --- a/src/stacked_borrows.rs +++ b/src/stacked_borrows.rs @@ -572,6 +572,18 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx new_tag: Tag, protect: bool, ) -> InterpResult<'tcx> { + // Nothing to do for ZSTs. + if size == Size::ZERO { + trace!( + "reborrow of size 0: {} reference {:?} derived from {:?} (pointee {})", + kind, + new_tag, + place.ptr, + place.layout.ty, + ); + return Ok(()); + } + let this = self.eval_context_mut(); let protector = if protect { Some(this.frame().extra.call_id) } else { None }; let ptr = place.ptr.assert_ptr(); @@ -617,6 +629,8 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } }; // Here we can avoid `borrow()` calls because we have mutable references. + // Note that this asserts that the allocation is mutable -- but since we are creating a + // mutable pointer, that seems reasonable. let (alloc_extra, memory_extra) = this.memory.get_alloc_extra_mut(ptr.alloc_id)?; let stacked_borrows = alloc_extra.stacked_borrows.as_mut().expect("we should have Stacked Borrows data"); @@ -649,12 +663,6 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx // We can see dangling ptrs in here e.g. after a Box's `Unique` was // updated using "self.0 = ..." (can happen in Box::from_raw) so we cannot ICE; see miri#1050. let place = this.mplace_access_checked(place, Some(Align::from_bytes(1).unwrap()))?; - // Nothing to do for ZSTs. We use `is_bits` here because we *do* need to retag even ZSTs - // when there actually is a tag (to avoid inheriting a tag that would let us access more - // than 0 bytes). - if size == Size::ZERO && place.ptr.is_bits() { - return Ok(*val); - } // Compute new borrow. let new_tag = { From c60efa0c69786cf6e7f05d83ebd1a94c65788c25 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sun, 23 May 2021 12:26:37 +0200 Subject: [PATCH 4/6] allocate backtrace strings mutably --- src/shims/backtrace.rs | 6 ++++-- src/shims/panic.rs | 3 ++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/shims/backtrace.rs b/src/shims/backtrace.rs index f936913114c..e866868d729 100644 --- a/src/shims/backtrace.rs +++ b/src/shims/backtrace.rs @@ -119,8 +119,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx // `lo.col` is 0-based - add 1 to make it 1-based for the caller. let colno: u32 = lo.col.0 as u32 + 1; - let name_alloc = this.allocate_str(&name, MiriMemoryKind::Rust.into()); - let filename_alloc = this.allocate_str(&filename, MiriMemoryKind::Rust.into()); + // These are "mutable" allocations as we consider them to be owned by the callee. + let name_alloc = this.allocate_str(&name, MiriMemoryKind::Rust.into(), Mutability::Mut); + let filename_alloc = + this.allocate_str(&filename, MiriMemoryKind::Rust.into(), Mutability::Mut); let lineno_alloc = Scalar::from_u32(lineno); let colno_alloc = Scalar::from_u32(colno); diff --git a/src/shims/panic.rs b/src/shims/panic.rs index b60da058e2c..06a434727b5 100644 --- a/src/shims/panic.rs +++ b/src/shims/panic.rs @@ -13,6 +13,7 @@ use log::trace; +use rustc_ast::Mutability; use rustc_middle::{mir, ty}; use rustc_target::spec::abi::Abi; use rustc_target::spec::PanicStrategy; @@ -169,7 +170,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let this = self.eval_context_mut(); // First arg: message. - let msg = this.allocate_str(msg, MiriMemoryKind::Machine.into()); + let msg = this.allocate_str(msg, MiriMemoryKind::Machine.into(), Mutability::Not); // Call the lang item. let panic = this.tcx.lang_items().panic_fn().unwrap(); From 393ce98b32ced200fc443ca411edcb3383bcdef9 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sun, 23 May 2021 12:37:52 +0200 Subject: [PATCH 5/6] fix a Stacked Borrows test whose output changed --- .../compile-fail/stacked_borrows/static_memory_modification.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/compile-fail/stacked_borrows/static_memory_modification.rs b/tests/compile-fail/stacked_borrows/static_memory_modification.rs index 88ac1649476..55a7f816c03 100644 --- a/tests/compile-fail/stacked_borrows/static_memory_modification.rs +++ b/tests/compile-fail/stacked_borrows/static_memory_modification.rs @@ -3,6 +3,6 @@ static X: usize = 5; #[allow(mutable_transmutes)] fn main() { let _x = unsafe { - std::mem::transmute::<&usize, &mut usize>(&X) //~ ERROR borrow stack + std::mem::transmute::<&usize, &mut usize>(&X) //~ ERROR writing to alloc0 which is read-only }; } From a03f700fc9380e881d95c62a1e5fd7b49f1dc743 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sun, 23 May 2021 18:05:50 +0200 Subject: [PATCH 6/6] rustup --- rust-version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust-version b/rust-version index bc7ef53b303..31d26ea43e4 100644 --- a/rust-version +++ b/rust-version @@ -1 +1 @@ -6e92fb409816c65cd0a78a1fbcc71e2fbabdf50a +0f8cd43ee8c3614e04b5c624dd8a45758d7023da