diff --git a/rust-version b/rust-version index d476da0baaf..fa9ce8aca88 100644 --- a/rust-version +++ b/rust-version @@ -1 +1 @@ -c8422403f775126c40d558838d321c063554c822 +27af5175497936ea3413bef5816e7c0172514b9c diff --git a/src/data_race.rs b/src/data_race.rs index 4a79d9e9904..7625763a3bb 100644 --- a/src/data_race.rs +++ b/src/data_race.rs @@ -999,7 +999,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { if let Some(data_race) = &this.machine.data_race { if data_race.multi_threaded.get() { let size = place.layout.size; - let (alloc_id, base_offset, ptr) = this.ptr_get_alloc_id(place.ptr)?; + let (alloc_id, base_offset, _tag) = this.ptr_get_alloc_id(place.ptr)?; // Load and log the atomic operation. // Note that atomic loads are possible even from read-only allocations, so `get_alloc_extra_mut` is not an option. let alloc_meta = &this.get_alloc_extra(alloc_id)?.data_race.as_ref().unwrap(); @@ -1007,7 +1007,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { "Atomic op({}) with ordering {:?} on {:?} (size={})", description, &atomic, - ptr, + place.ptr, size.bytes() ); @@ -1039,7 +1039,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { { log::trace!( "Updated atomic memory({:?}, size={}) to {:#?}", - ptr, + place.ptr, size.bytes(), range.atomic_ops ); diff --git a/src/helpers.rs b/src/helpers.rs index b052463bb02..9d387b06593 100644 --- a/src/helpers.rs +++ b/src/helpers.rs @@ -271,8 +271,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx /// Visits the memory covered by `place`, sensitive to freezing: the 2nd parameter /// of `action` will be true if this is frozen, false if this is in an `UnsafeCell`. /// The range is relative to `place`. - /// - /// Assumes that the `place` has a proper pointer in it. fn visit_freeze_sensitive( &self, place: &MPlaceTy<'tcx, Tag>, @@ -290,33 +288,30 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx // Store how far we proceeded into the place so far. Everything to the left of // this offset has already been handled, in the sense that the frozen parts // have had `action` called on them. - let ptr = place.ptr.into_pointer_or_addr().unwrap(); - let start_offset = ptr.into_parts().1 as Size; // we just compare offsets, the abs. value never matters - let mut cur_offset = start_offset; + let start_addr = place.ptr.addr(); + let mut cur_addr = start_addr; // Called when we detected an `UnsafeCell` at the given offset and size. // Calls `action` and advances `cur_ptr`. - let mut unsafe_cell_action = |unsafe_cell_ptr: Pointer>, + let mut unsafe_cell_action = |unsafe_cell_ptr: &Pointer>, unsafe_cell_size: Size| { - let unsafe_cell_ptr = unsafe_cell_ptr.into_pointer_or_addr().unwrap(); - debug_assert_eq!(unsafe_cell_ptr.provenance, ptr.provenance); // We assume that we are given the fields in increasing offset order, // and nothing else changes. - let unsafe_cell_offset = unsafe_cell_ptr.into_parts().1 as Size; // we just compare offsets, the abs. value never matters - assert!(unsafe_cell_offset >= cur_offset); - let frozen_size = unsafe_cell_offset - cur_offset; + let unsafe_cell_addr = unsafe_cell_ptr.addr(); + assert!(unsafe_cell_addr >= cur_addr); + let frozen_size = unsafe_cell_addr - cur_addr; // Everything between the cur_ptr and this `UnsafeCell` is frozen. if frozen_size != Size::ZERO { - action(alloc_range(cur_offset - start_offset, frozen_size), /*frozen*/ true)?; + action(alloc_range(cur_addr - start_addr, frozen_size), /*frozen*/ true)?; } - cur_offset += frozen_size; + cur_addr += frozen_size; // This `UnsafeCell` is NOT frozen. if unsafe_cell_size != Size::ZERO { action( - alloc_range(cur_offset - start_offset, unsafe_cell_size), + alloc_range(cur_addr - start_addr, unsafe_cell_size), /*frozen*/ false, )?; } - cur_offset += unsafe_cell_size; + cur_addr += unsafe_cell_size; // Done Ok(()) }; @@ -334,7 +329,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx .unwrap_or_else(|| place.layout.size); // Now handle this `UnsafeCell`, unless it is empty. if unsafe_cell_size != Size::ZERO { - unsafe_cell_action(place.ptr, unsafe_cell_size) + unsafe_cell_action(&place.ptr, unsafe_cell_size) } else { Ok(()) } @@ -344,7 +339,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } // The part between the end_ptr and the end of the place is also frozen. // So pretend there is a 0-sized `UnsafeCell` at the end. - unsafe_cell_action(place.ptr.wrapping_offset(size, this), Size::ZERO)?; + unsafe_cell_action(&place.ptr.offset(size, this)?, Size::ZERO)?; // Done! return Ok(()); @@ -428,9 +423,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let mut places = fields.collect::>>>()?; // we just compare offsets, the abs. value never matters - places.sort_by_key(|place| { - place.ptr.into_pointer_or_addr().unwrap().into_parts().1 as Size - }); + places.sort_by_key(|place| place.ptr.addr()); self.walk_aggregate(place, places.into_iter().map(Ok)) } FieldsShape::Union { .. } | FieldsShape::Primitive => { @@ -777,6 +770,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx /// Mark a machine allocation that was just created as immutable. fn mark_immutable(&mut self, mplace: &MemPlace) { let this = self.eval_context_mut(); + // This got just allocated, so there definitely is a pointer here. this.alloc_mark_immutable(mplace.ptr.into_pointer_or_addr().unwrap().provenance.alloc_id) .unwrap(); } diff --git a/src/intptrcast.rs b/src/intptrcast.rs index b1c96c7f1e7..895241bcc32 100644 --- a/src/intptrcast.rs +++ b/src/intptrcast.rs @@ -128,7 +128,7 @@ impl<'mir, 'tcx> GlobalStateInner { /// Convert a relative (tcx) pointer to an absolute address. pub fn rel_ptr_to_addr(ecx: &MiriEvalContext<'mir, 'tcx>, ptr: Pointer) -> u64 { - let (alloc_id, offset) = ptr.into_parts(); // offset is relative + let (alloc_id, offset) = ptr.into_parts(); // offset is relative (AllocId provenance) let base_addr = GlobalStateInner::alloc_base_addr(ecx, alloc_id); // Add offset with the right kind of pointer-overflowing arithmetic. @@ -137,7 +137,7 @@ impl<'mir, 'tcx> GlobalStateInner { } pub fn abs_ptr_to_rel(ecx: &MiriEvalContext<'mir, 'tcx>, ptr: Pointer) -> Size { - let (tag, addr) = ptr.into_parts(); // addr is absolute + let (tag, addr) = ptr.into_parts(); // addr is absolute (Tag provenance) let base_addr = GlobalStateInner::alloc_base_addr(ecx, tag.alloc_id); // Wrapping "addr - base_addr" diff --git a/src/machine.rs b/src/machine.rs index 532aeeece07..66854921a33 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -360,6 +360,7 @@ impl<'mir, 'tcx> Evaluator<'mir, 'tcx> { name: &str, ptr: Pointer>, ) { + // This got just allocated, so there definitely is a pointer here. let ptr = ptr.into_pointer_or_addr().unwrap(); this.machine.extern_statics.try_insert(Symbol::intern(name), ptr).unwrap(); } @@ -431,11 +432,13 @@ impl<'mir, 'tcx> MiriEvalContextExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> /// Machine hook implementations. impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> { type MemoryKind = MiriMemoryKind; + type ExtraFnVal = Dlsym; type FrameExtra = FrameData<'tcx>; type AllocExtra = AllocExtra; + type PointerTag = Tag; - type ExtraFnVal = Dlsym; + type TagExtra = SbTag; type MemoryMap = MonoHashMap, Allocation)>; @@ -607,9 +610,9 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> { fn ptr_get_alloc( ecx: &MiriEvalContext<'mir, 'tcx>, ptr: Pointer, - ) -> (AllocId, Size) { + ) -> (AllocId, Size, Self::TagExtra) { let rel = intptrcast::GlobalStateInner::abs_ptr_to_rel(ecx, ptr); - (ptr.provenance.alloc_id, rel) + (ptr.provenance.alloc_id, rel, ptr.provenance.sb) } #[inline(always)] @@ -617,16 +620,16 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> { _tcx: TyCtxt<'tcx>, machine: &Self, alloc_extra: &AllocExtra, - tag: Tag, + (alloc_id, tag): (AllocId, Self::TagExtra), range: AllocRange, ) -> InterpResult<'tcx> { if let Some(data_race) = &alloc_extra.data_race { - data_race.read(tag.alloc_id, range, machine.data_race.as_ref().unwrap())?; + data_race.read(alloc_id, range, machine.data_race.as_ref().unwrap())?; } if let Some(stacked_borrows) = &alloc_extra.stacked_borrows { stacked_borrows.memory_read( - tag.alloc_id, - tag.sb, + alloc_id, + tag, range, machine.stacked_borrows.as_ref().unwrap(), ) @@ -640,16 +643,16 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> { _tcx: TyCtxt<'tcx>, machine: &mut Self, alloc_extra: &mut AllocExtra, - tag: Tag, + (alloc_id, tag): (AllocId, Self::TagExtra), range: AllocRange, ) -> InterpResult<'tcx> { if let Some(data_race) = &mut alloc_extra.data_race { - data_race.write(tag.alloc_id, range, machine.data_race.as_mut().unwrap())?; + data_race.write(alloc_id, range, machine.data_race.as_mut().unwrap())?; } if let Some(stacked_borrows) = &mut alloc_extra.stacked_borrows { stacked_borrows.memory_written( - tag.alloc_id, - tag.sb, + alloc_id, + tag, range, machine.stacked_borrows.as_mut().unwrap(), ) @@ -663,19 +666,19 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> { _tcx: TyCtxt<'tcx>, machine: &mut Self, alloc_extra: &mut AllocExtra, - tag: Tag, + (alloc_id, tag): (AllocId, Self::TagExtra), range: AllocRange, ) -> InterpResult<'tcx> { - if Some(tag.alloc_id) == machine.tracked_alloc_id { - register_diagnostic(NonHaltingDiagnostic::FreedAlloc(tag.alloc_id)); + if Some(alloc_id) == machine.tracked_alloc_id { + register_diagnostic(NonHaltingDiagnostic::FreedAlloc(alloc_id)); } if let Some(data_race) = &mut alloc_extra.data_race { - data_race.deallocate(tag.alloc_id, range, machine.data_race.as_mut().unwrap())?; + data_race.deallocate(alloc_id, range, machine.data_race.as_mut().unwrap())?; } if let Some(stacked_borrows) = &mut alloc_extra.stacked_borrows { stacked_borrows.memory_deallocated( - tag.alloc_id, - tag.sb, + alloc_id, + tag, range, machine.stacked_borrows.as_mut().unwrap(), ) diff --git a/src/shims/backtrace.rs b/src/shims/backtrace.rs index 2ec4bbb32e0..3ada61abbd2 100644 --- a/src/shims/backtrace.rs +++ b/src/shims/backtrace.rs @@ -124,7 +124,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let ptr = this.read_pointer(ptr)?; // Take apart the pointer, we need its pieces. - let (alloc_id, offset, ptr) = this.ptr_get_alloc_id(ptr)?; + let (alloc_id, offset, _tag) = this.ptr_get_alloc_id(ptr)?; let fn_instance = if let Some(GlobalAlloc::Function(instance)) = this.tcx.get_global_alloc(alloc_id) { diff --git a/src/shims/mod.rs b/src/shims/mod.rs index 83bc6b6ae1b..af6064925f0 100644 --- a/src/shims/mod.rs +++ b/src/shims/mod.rs @@ -83,10 +83,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } let ptr = this.read_pointer(ptr_op)?; - if let Ok(ptr) = ptr.into_pointer_or_addr() { + if let Ok((alloc_id, _offset, _)) = this.ptr_try_get_alloc_id(ptr) { // Only do anything if we can identify the allocation this goes to. - let (_, cur_align) = - this.get_alloc_size_and_align(ptr.provenance.alloc_id, AllocCheck::MaybeDead)?; + let (_, cur_align) = this.get_alloc_size_and_align(alloc_id, AllocCheck::MaybeDead)?; if cur_align.bytes() >= req_align { // If the allocation alignment is at least the required alignment we use the // real implementation. diff --git a/src/stacked_borrows.rs b/src/stacked_borrows.rs index a365d909981..0029de3b5a9 100644 --- a/src/stacked_borrows.rs +++ b/src/stacked_borrows.rs @@ -702,8 +702,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx ); return Ok(()); } - let (alloc_id, base_offset, ptr) = this.ptr_get_alloc_id(place.ptr)?; - let orig_tag = ptr.provenance.sb; + let (alloc_id, base_offset, orig_tag) = this.ptr_get_alloc_id(place.ptr)?; // Ensure we bail out if the pointer goes out-of-bounds (see miri#1050). let (alloc_size, _) = diff --git a/tests/run-pass/issue-miri-2068.rs b/tests/run-pass/issue-miri-2068.rs new file mode 100644 index 00000000000..7576ba78f60 --- /dev/null +++ b/tests/run-pass/issue-miri-2068.rs @@ -0,0 +1,48 @@ +#![feature(pin_macro)] + +use core::future::Future; +use core::pin::Pin; +use core::task::{Context, Poll}; + +use std::sync::Arc; + +struct NopWaker; + +impl std::task::Wake for NopWaker { + fn wake(self: Arc) {} +} + +pub fn fuzzing_block_on>(fut: F) -> O { + let mut fut = std::pin::pin!(fut); + let waker = std::task::Waker::from(Arc::new(NopWaker)); + let mut context = std::task::Context::from_waker(&waker); + loop { + match fut.as_mut().poll(&mut context) { + Poll::Ready(v) => return v, + Poll::Pending => {} + } + } +} + +pub struct LastFuture { + last: S, +} + +impl Future for LastFuture +where + Self: Unpin, + S: Unpin + Copy, +{ + type Output = S; + + fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { + return Poll::Ready(self.last); + } +} + +fn main() { + fuzzing_block_on(async { + LastFuture { last: &0u32 }.await; + LastFuture { last: Option::::None }.await; + }); +}