finally we can actually have adjacent allocations :)

This commit is contained in:
Ralf Jung 2022-06-27 08:13:55 -04:00
parent 7fafbde038
commit c16b380d6b
3 changed files with 70 additions and 5 deletions

View File

@ -1,4 +1,5 @@
use std::cell::RefCell;
use std::cmp::max;
use std::collections::hash_map::Entry;
use log::trace;
@ -198,11 +199,11 @@ impl<'mir, 'tcx> GlobalStateInner {
slack,
);
// Remember next base address. Leave a gap of at least 1 to avoid two zero-sized allocations
// having the same base address, and to avoid ambiguous provenance for the address between two
// allocations (also see https://github.com/rust-lang/unsafe-code-guidelines/issues/313).
let size_plus_1 = size.bytes().checked_add(1).unwrap();
global_state.next_base_addr = base_addr.checked_add(size_plus_1).unwrap();
// Remember next base address. If this allocation is zero-sized, leave a gap
// of at least 1 to avoid two allocations having the same base address.
// (The logic in `alloc_id_from_addr` assumes unique addresses, and function
// pointers to different functions need to be distinguishable!)
global_state.next_base_addr = base_addr.checked_add(max(size.bytes(), 1)).unwrap();
// Given that `next_base_addr` increases in each allocation, pushing the
// corresponding tuple keeps `int_to_ptr_map` sorted
global_state.int_to_ptr_map.push((base_addr, alloc_id));

View File

@ -1,5 +1,20 @@
// compile-flags: -Zmiri-permissive-provenance
fn ensure_allocs_can_be_adjacent() {
for _ in 0..512 {
let n = 0u64;
let ptr: *const u64 = &n;
let ptr2 = {
let m = 0u64;
&m as *const u64
};
if ptr.wrapping_add(1) == ptr2 {
return;
}
}
panic!("never saw adjacent stack variables?");
}
fn test1() {
// The slack between allocations is random.
// Loop a few times to hit the zero-slack case.
@ -42,6 +57,7 @@ fn test2() {
}
fn main() {
ensure_allocs_can_be_adjacent();
test1();
test2();
}

View File

@ -1,5 +1,7 @@
// compile-flags: -Zmiri-permissive-provenance
use std::mem;
// This strips provenance
fn transmute_ptr_to_int<T>(x: *const T) -> usize {
unsafe { std::mem::transmute(x) }
@ -100,6 +102,51 @@ fn zst_deref_of_dangling() {
let _val = unsafe { *zst };
}
fn functions() {
// Roundtrip a few functions through integers. Do this multiple times to make sure this does not
// work by chance. If we did not give unique addresses to ZST allocations -- which fn
// allocations are -- then we might be unable to cast back, or we might call the wrong function!
// Every function gets at most one address so doing a loop would not help...
fn fn0() -> i32 {
0
}
fn fn1() -> i32 {
1
}
fn fn2() -> i32 {
2
}
fn fn3() -> i32 {
3
}
fn fn4() -> i32 {
4
}
fn fn5() -> i32 {
5
}
fn fn6() -> i32 {
6
}
fn fn7() -> i32 {
7
}
let fns = [
fn0 as fn() -> i32 as *const () as usize,
fn1 as fn() -> i32 as *const () as usize,
fn2 as fn() -> i32 as *const () as usize,
fn3 as fn() -> i32 as *const () as usize,
fn4 as fn() -> i32 as *const () as usize,
fn5 as fn() -> i32 as *const () as usize,
fn6 as fn() -> i32 as *const () as usize,
fn7 as fn() -> i32 as *const () as usize,
];
for (idx, &addr) in fns.iter().enumerate() {
let fun: fn() -> i32 = unsafe { mem::transmute(addr as *const ()) };
assert_eq!(fun(), idx as i32);
}
}
fn main() {
cast();
cast_dangling();
@ -112,4 +159,5 @@ fn main() {
ptr_eq_out_of_bounds_null();
ptr_eq_integer();
zst_deref_of_dangling();
functions();
}