Auto merge of #2394 - saethlin:unique-range-ice, r=RalfJung

Fix bugs where unique_range became invalid

And also expand the cache integrity checks to cover this case.

I'm going to run this over all the ICEs I've gotten out of Miri recently, could be a bit.

Fixes https://github.com/rust-lang/miri/issues/2389
This commit is contained in:
bors 2022-07-20 01:39:40 +00:00
commit ddde70c121
3 changed files with 65 additions and 10 deletions

View File

@ -92,6 +92,7 @@ fn verify_cache_consistency(&self) {
}
}
// Check that all Unique items fall within unique_range.
for (idx, item) in self.borrows.iter().enumerate() {
if item.perm() == Permission::Unique {
assert!(
@ -102,6 +103,19 @@ fn verify_cache_consistency(&self) {
);
}
}
// Check that the unique_range is a valid index into the borrow stack.
// This asserts that the unique_range's start <= end.
let uniques = &self.borrows[self.unique_range.clone()];
// Check that the start of the unique_range is precise.
if let Some(first_unique) = uniques.first() {
assert_eq!(first_unique.perm(), Permission::Unique);
}
// We cannot assert that the unique range is exact on the upper end.
// When we pop items within the unique range, setting the end of the range precisely
// requires doing a linear search of the borrow stack, which is exactly the kind of
// operation that all this caching exists to avoid.
}
/// Find the item granting the given kind of access to the given tag, and return where
@ -227,9 +241,14 @@ fn insert_cache(&mut self, new_idx: usize, new: Item) {
self.unique_range.end += 1;
}
if new.perm() == Permission::Unique {
// Make sure the possibly-unique range contains the new borrow
self.unique_range.start = self.unique_range.start.min(new_idx);
self.unique_range.end = self.unique_range.end.max(new_idx + 1);
// If this is the only Unique, set the range to contain just the new item.
if self.unique_range.is_empty() {
self.unique_range = new_idx..new_idx + 1;
} else {
// We already have other Unique items, expand the range to include the new item
self.unique_range.start = self.unique_range.start.min(new_idx);
self.unique_range.end = self.unique_range.end.max(new_idx + 1);
}
}
// The above insert changes the meaning of every index in the cache >= new_idx, so now
@ -282,6 +301,10 @@ pub fn set_unknown_bottom(&mut self, tag: SbTag) {
// cache when it has been cleared and not yet refilled.
self.borrows.clear();
self.unknown_bottom = Some(tag);
#[cfg(feature = "stack-cache")]
{
self.unique_range = 0..0;
}
}
/// Find all `Unique` elements in this borrow stack above `granting_idx`, pass a copy of them
@ -298,7 +321,7 @@ pub fn disable_uniques_starting_at<V: FnMut(Item) -> crate::InterpResult<'tcx>>(
if disable_start <= unique_range.end {
let lower = unique_range.start.max(disable_start);
let upper = (unique_range.end + 1).min(self.borrows.len());
let upper = self.unique_range.end;
for item in &mut self.borrows[lower..upper] {
if item.perm() == Permission::Unique {
log::trace!("access: disabling item {:?}", item);
@ -315,14 +338,14 @@ pub fn disable_uniques_starting_at<V: FnMut(Item) -> crate::InterpResult<'tcx>>(
}
#[cfg(feature = "stack-cache")]
if disable_start < self.unique_range.start {
if disable_start <= self.unique_range.start {
// We disabled all Unique items
self.unique_range.start = 0;
self.unique_range.end = 0;
} else {
// Truncate the range to disable_start. This is + 2 because we are only removing
// elements after disable_start, and this range does not include the end.
self.unique_range.end = self.unique_range.end.min(disable_start + 1);
// Truncate the range to only include items up to the index that we started disabling
// at.
self.unique_range.end = self.unique_range.end.min(disable_start);
}
#[cfg(debug_assertions)]
@ -369,12 +392,12 @@ pub fn pop_items_after<V: FnMut(Item) -> crate::InterpResult<'tcx>>(
self.cache.items[i] = base_tag;
}
if start < self.unique_range.start.saturating_sub(1) {
if start <= self.unique_range.start {
// We removed all the Unique items
self.unique_range = 0..0;
} else {
// Ensure the range doesn't extend past the new top of the stack
self.unique_range.end = self.unique_range.end.min(start + 1);
self.unique_range.end = self.unique_range.end.min(start);
}
} else {
self.unique_range = 0..0;

View File

@ -0,0 +1,17 @@
use std::cell::Cell;
fn main() {
unsafe {
let root0 = Cell::new(42);
let wildcard = &root0 as *const Cell<i32> as usize as *const Cell<i32>;
// empty the stack to unknown (via SRW reborrow from wildcard)
let _ref0 = &*wildcard;
// Do a non-SRW reborrow from wildcard to start building up a stack again.
// Now new refs start being inserted at idx 0, pushing the unique_range up.
let _refn = &*&*&*&*&*(wildcard.cast::<i32>());
// empty the stack again, but this time with unique_range.start sitting at some high index.
let _ref0 = &*wildcard;
// and do a read which tries to clear the uniques
wildcard.cast::<i32>().read();
}
}

View File

@ -0,0 +1,15 @@
warning: integer-to-pointer cast
--> $DIR/issue-miri-2389.rs:LL:CC
|
LL | let wildcard = &root0 as *const Cell<i32> as usize as *const Cell<i32>;
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ integer-to-pointer cast
|
= help: This program is using integer-to-pointer casts or (equivalently) `ptr::from_exposed_addr`,
= help: which means that Miri might miss pointer bugs in this program.
= help: See https://doc.rust-lang.org/nightly/std/ptr/fn.from_exposed_addr.html for more details on that operation.
= help: To ensure that Miri does not miss bugs in your program, use Strict Provenance APIs (https://doc.rust-lang.org/nightly/std/ptr/index.html#strict-provenance, https://crates.io/crates/sptr) instead.
= help: You can then pass the `-Zmiri-strict-provenance` flag to Miri, to ensure you are not relying on `from_exposed_addr` semantics.
= help: Alternatively, the `-Zmiri-permissive-provenance` flag disables this warning.
= note: backtrace:
= note: inside `main` at $DIR/issue-miri-2389.rs:LL:CC