rust/src/intptrcast.rs

143 lines
5.8 KiB
Rust
Raw Normal View History

use std::cell::RefCell;
use std::collections::{HashMap, hash_map::Entry};
use std::cmp::max;
2019-06-20 14:21:47 -05:00
use rand::Rng;
use rustc::ty::layout::HasDataLayout;
use rustc_mir::interpret::{AllocId, Pointer, InterpResult, Memory, AllocCheck, PointerArithmetic};
use rustc_target::abi::Size;
2019-06-20 14:21:47 -05:00
2019-06-29 06:33:47 -05:00
use crate::{Evaluator, Tag, STACK_ADDR};
pub type MemoryExtra = RefCell<GlobalState>;
2019-06-20 14:21:47 -05:00
#[derive(Clone, Debug)]
pub struct GlobalState {
/// This is used as a map between the address of each allocation and its `AllocId`.
/// It is always sorted
pub int_to_ptr_map: Vec<(u64, AllocId)>,
/// The base address for each allocation. We cannot put that into
/// `AllocExtra` because function pointers also have a base address, and
/// they do not have an `AllocExtra`.
/// This is the inverse of `int_to_ptr_map`.
pub base_addr: HashMap<AllocId, u64>,
/// This is used as a memory address when a new pointer is casted to an integer. It
/// is always larger than any address that was previously made part of a block.
pub next_base_addr: u64,
2019-06-20 14:21:47 -05:00
}
impl Default for GlobalState {
fn default() -> Self {
GlobalState {
int_to_ptr_map: Vec::default(),
base_addr: HashMap::default(),
2019-06-29 06:33:47 -05:00
next_base_addr: STACK_ADDR,
2019-06-20 14:21:47 -05:00
}
}
}
impl<'mir, 'tcx> GlobalState {
pub fn int_to_ptr(
2019-06-25 14:07:23 -05:00
int: u64,
memory: &Memory<'mir, 'tcx, Evaluator<'tcx>>,
) -> InterpResult<'tcx, Pointer<Tag>> {
2019-07-23 14:38:53 -05:00
if int == 0 {
2019-08-03 03:25:55 -05:00
throw_unsup!(InvalidNullPointerUsage);
2019-07-23 14:38:53 -05:00
}
let global_state = memory.extra.intptrcast.borrow();
2019-09-05 11:17:58 -05:00
2019-08-03 03:25:55 -05:00
Ok(match global_state.int_to_ptr_map.binary_search_by_key(&int, |(addr, _)| *addr) {
Ok(pos) => {
let (_, alloc_id) = global_state.int_to_ptr_map[pos];
2019-06-25 14:07:23 -05:00
// `int` is equal to the starting address for an allocation, the offset should be
// zero. The pointer is untagged because it was created from a cast
2019-08-03 03:25:55 -05:00
Pointer::new_with_tag(alloc_id, Size::from_bytes(0), Tag::Untagged)
},
2019-09-05 11:17:58 -05:00
Err(0) => throw_unsup!(DanglingPointerDeref),
Err(pos) => {
2019-06-25 14:07:23 -05:00
// This is the largest of the adresses smaller than `int`,
// i.e. the greatest lower bound (glb)
let (glb, alloc_id) = global_state.int_to_ptr_map[pos - 1];
2019-06-25 14:07:23 -05:00
// This never overflows because `int >= glb`
let offset = int - glb;
// If the offset exceeds the size of the allocation, this access is illegal
2019-09-05 11:17:58 -05:00
if offset <= memory.get(alloc_id)?.size.bytes() {
// This pointer is untagged because it was created from a cast
2019-08-03 03:25:55 -05:00
Pointer::new_with_tag(alloc_id, Size::from_bytes(offset), Tag::Untagged)
} else {
2019-08-03 03:25:55 -05:00
throw_unsup!(DanglingPointerDeref)
2019-09-05 11:17:58 -05:00
}
}
2019-08-03 03:25:55 -05:00
})
}
pub fn ptr_to_int(
ptr: Pointer<Tag>,
memory: &Memory<'mir, 'tcx, Evaluator<'tcx>>,
) -> InterpResult<'tcx, u64> {
let mut global_state = memory.extra.intptrcast.borrow_mut();
let global_state = &mut *global_state;
// There is nothing wrong with a raw pointer being cast to an integer only after
// it became dangling. Hence `MaybeDead`.
let (size, align) = memory.get_size_and_align(ptr.alloc_id, AllocCheck::MaybeDead)?;
let base_addr = match global_state.base_addr.entry(ptr.alloc_id) {
Entry::Occupied(entry) => *entry.get(),
Entry::Vacant(entry) => {
2019-06-25 14:07:23 -05:00
// This allocation does not have a base address yet, pick one.
// Leave some space to the previous allocation, to give it some chance to be less aligned.
let slack = {
2019-07-23 14:38:53 -05:00
let mut rng = memory.extra.rng.borrow_mut();
2019-06-28 03:23:29 -05:00
// This means that `(global_state.next_base_addr + slack) % 16` is uniformly distributed.
rng.gen_range(0, 16)
};
// From next_base_addr + slack, round up to adjust for alignment.
2019-07-23 16:43:37 -05:00
let base_addr = global_state.next_base_addr.checked_add(slack).unwrap();
let base_addr = Self::align_addr(base_addr, align.bytes());
entry.insert(base_addr);
2019-06-30 14:06:32 -05:00
trace!(
"Assigning base address {:#x} to allocation {:?} (slack: {}, align: {})",
base_addr, ptr.alloc_id, slack, align.bytes(),
);
// Remember next base address. If this allocation is zero-sized, leave a gap
// of at least 1 to avoid two allocations having the same base address.
2019-07-23 16:43:37 -05:00
global_state.next_base_addr = base_addr.checked_add(max(size.bytes(), 1)).unwrap();
// Given that `next_base_addr` increases in each allocation, pushing the
// corresponding tuple keeps `int_to_ptr_map` sorted
2019-09-05 11:17:58 -05:00
global_state.int_to_ptr_map.push((base_addr, ptr.alloc_id));
base_addr
}
};
debug_assert_eq!(base_addr % align.bytes(), 0); // sanity check
// Add offset with the right kind of pointer-overflowing arithmetic.
let dl = memory.data_layout();
Ok(dl.overflowing_offset(base_addr, ptr.offset.bytes()).0)
}
2019-06-25 14:07:23 -05:00
/// Shifts `addr` to make it aligned with `align` by rounding `addr` to the smallest multiple
/// of `align` that is larger or equal to `addr`
2019-06-25 14:07:23 -05:00
fn align_addr(addr: u64, align: u64) -> u64 {
2019-06-28 03:24:16 -05:00
match addr % align {
0 => addr,
2019-07-23 16:43:37 -05:00
rem => addr.checked_add(align).unwrap() - rem
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_align_addr() {
assert_eq!(GlobalState::align_addr(37, 4), 40);
assert_eq!(GlobalState::align_addr(44, 4), 44);
2019-06-25 14:07:23 -05:00
}
}