rust/src/helpers.rs

355 lines
14 KiB
Rust
Raw Normal View History

2018-10-19 02:51:04 -05:00
use std::mem;
2018-05-01 11:13:22 -05:00
2018-10-19 02:51:04 -05:00
use rustc::hir::def_id::{DefId, CRATE_DEF_INDEX};
2019-07-21 04:56:10 -05:00
use rustc::mir;
2019-10-08 15:06:14 -05:00
use rustc::ty::{
self,
layout::{self, Align, LayoutOf, Size},
2019-10-08 15:06:14 -05:00
};
2018-10-19 02:51:04 -05:00
2019-06-30 16:28:24 -05:00
use rand::RngCore;
2018-11-01 02:56:41 -05:00
use crate::*;
2018-10-19 02:51:04 -05:00
2019-06-13 01:52:04 -05:00
impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
2019-02-15 19:29:38 -06:00
2019-06-13 01:52:04 -05:00
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
2019-02-15 19:29:38 -06:00
/// Gets an instance for a path.
2019-06-08 15:14:47 -05:00
fn resolve_path(&self, path: &[&str]) -> InterpResult<'tcx, ty::Instance<'tcx>> {
let this = self.eval_context_ref();
this.tcx
2018-10-19 02:51:04 -05:00
.crates()
.iter()
2019-05-13 14:55:58 -05:00
.find(|&&krate| this.tcx.original_crate_name(krate).as_str() == path[0])
2018-10-19 02:51:04 -05:00
.and_then(|krate| {
let krate = DefId {
krate: *krate,
index: CRATE_DEF_INDEX,
};
let mut items = this.tcx.item_children(krate);
2018-10-19 02:51:04 -05:00
let mut path_it = path.iter().skip(1).peekable();
while let Some(segment) = path_it.next() {
for item in mem::replace(&mut items, Default::default()).iter() {
2019-05-13 14:55:58 -05:00
if item.ident.name.as_str() == *segment {
2018-10-19 02:51:04 -05:00
if path_it.peek().is_none() {
2019-05-13 04:37:54 -05:00
return Some(ty::Instance::mono(this.tcx.tcx, item.res.def_id()));
2018-10-19 02:51:04 -05:00
}
2019-05-13 04:37:54 -05:00
items = this.tcx.item_children(item.res.def_id());
2018-10-19 02:51:04 -05:00
break;
}
}
}
None
})
.ok_or_else(|| {
let path = path.iter().map(|&s| s.to_owned()).collect();
2019-08-03 03:25:55 -05:00
err_unsup!(PathNotFound(path)).into()
2018-10-19 02:51:04 -05:00
})
}
/// Write a 0 of the appropriate size to `dest`.
fn write_null(&mut self, dest: PlaceTy<'tcx, Tag>) -> InterpResult<'tcx> {
self.eval_context_mut().write_scalar(Scalar::from_int(0, dest.layout.size), dest)
}
/// Test if this immediate equals 0.
fn is_null(&self, val: Scalar<Tag>) -> InterpResult<'tcx, bool> {
let this = self.eval_context_ref();
let null = Scalar::from_int(0, this.memory().pointer_size());
this.ptr_eq(val, null)
}
/// Turn a Scalar into an Option<NonNullScalar>
fn test_null(&self, val: Scalar<Tag>) -> InterpResult<'tcx, Option<Scalar<Tag>>> {
let this = self.eval_context_ref();
Ok(if this.is_null(val)? {
None
} else {
Some(val)
})
}
2019-07-21 04:56:10 -05:00
/// Get the `Place` for a local
fn local_place(&mut self, local: mir::Local) -> InterpResult<'tcx, PlaceTy<'tcx, Tag>> {
let this = self.eval_context_mut();
let place = mir::Place { base: mir::PlaceBase::Local(local), projection: Box::new([]) };
2019-07-21 04:56:10 -05:00
this.eval_place(&place)
}
2019-06-30 16:28:24 -05:00
/// Generate some random bytes, and write them to `dest`.
fn gen_random(
&mut self,
2019-06-30 16:32:25 -05:00
ptr: Scalar<Tag>,
len: usize,
2019-06-30 16:28:24 -05:00
) -> InterpResult<'tcx> {
// Some programs pass in a null pointer and a length of 0
// to their platform's random-generation function (e.g. getrandom())
// on Linux. For compatibility with these programs, we don't perform
// any additional checks - it's okay if the pointer is invalid,
// since we wouldn't actually be writing to it.
if len == 0 {
return Ok(());
}
2019-06-30 16:28:24 -05:00
let this = self.eval_context_mut();
2019-06-30 16:32:25 -05:00
2019-08-04 09:17:39 -05:00
let ptr = this.memory().check_ptr_access(
ptr,
Size::from_bytes(len as u64),
Align::from_bytes(1).unwrap()
)?.expect("we already checked for size 0");
2019-06-30 16:28:24 -05:00
2019-07-23 14:38:53 -05:00
let mut data = vec![0; len];
if this.machine.communicate {
// Fill the buffer using the host's rng.
2019-08-20 10:47:38 -05:00
getrandom::getrandom(&mut data)
.map_err(|err| err_unsup_format!("getrandom failed: {}", err))?;
}
else {
let rng = this.memory_mut().extra.rng.get_mut();
rng.fill_bytes(&mut data);
}
2019-07-23 14:38:53 -05:00
2019-06-30 16:28:24 -05:00
let tcx = &{this.tcx.tcx};
2019-07-23 14:38:53 -05:00
this.memory_mut().get_mut(ptr.alloc_id)?.write_bytes(tcx, ptr, &data)
2019-06-30 16:28:24 -05:00
}
2019-02-15 19:29:38 -06:00
/// Visits the memory covered by `place`, sensitive to freezing: the 3rd parameter
/// will be true if this is frozen, false if this is in an `UnsafeCell`.
fn visit_freeze_sensitive(
&self,
place: MPlaceTy<'tcx, Tag>,
size: Size,
2019-06-08 15:14:47 -05:00
mut action: impl FnMut(Pointer<Tag>, Size, bool) -> InterpResult<'tcx>,
) -> InterpResult<'tcx> {
let this = self.eval_context_ref();
trace!("visit_frozen(place={:?}, size={:?})", *place, size);
debug_assert_eq!(size,
this.size_and_align_of_mplace(place)?
.map(|(size, _)| size)
.unwrap_or_else(|| place.layout.size)
);
2019-02-15 19:29:38 -06:00
// Store how far we proceeded into the place so far. Everything to the left of
// this offset has already been handled, in the sense that the frozen parts
// have had `action` called on them.
let mut end_ptr = place.ptr.assert_ptr();
// Called when we detected an `UnsafeCell` at the given offset and size.
// Calls `action` and advances `end_ptr`.
let mut unsafe_cell_action = |unsafe_cell_ptr: Scalar<Tag>, unsafe_cell_size: Size| {
let unsafe_cell_ptr = unsafe_cell_ptr.assert_ptr();
debug_assert_eq!(unsafe_cell_ptr.alloc_id, end_ptr.alloc_id);
debug_assert_eq!(unsafe_cell_ptr.tag, end_ptr.tag);
// We assume that we are given the fields in increasing offset order,
// and nothing else changes.
let unsafe_cell_offset = unsafe_cell_ptr.offset;
let end_offset = end_ptr.offset;
assert!(unsafe_cell_offset >= end_offset);
let frozen_size = unsafe_cell_offset - end_offset;
// Everything between the end_ptr and this `UnsafeCell` is frozen.
if frozen_size != Size::ZERO {
action(end_ptr, frozen_size, /*frozen*/true)?;
}
// This `UnsafeCell` is NOT frozen.
if unsafe_cell_size != Size::ZERO {
action(unsafe_cell_ptr, unsafe_cell_size, /*frozen*/false)?;
}
// Update end end_ptr.
end_ptr = unsafe_cell_ptr.wrapping_offset(unsafe_cell_size, this);
// Done
Ok(())
};
// Run a visitor
{
let mut visitor = UnsafeCellVisitor {
ecx: this,
unsafe_cell_action: |place| {
trace!("unsafe_cell_action on {:?}", place.ptr);
// We need a size to go on.
let unsafe_cell_size = this.size_and_align_of_mplace(place)?
2018-11-23 02:46:51 -06:00
.map(|(size, _)| size)
// for extern types, just cover what we can
2018-11-23 02:46:51 -06:00
.unwrap_or_else(|| place.layout.size);
2018-11-06 10:46:54 -06:00
// Now handle this `UnsafeCell`, unless it is empty.
if unsafe_cell_size != Size::ZERO {
unsafe_cell_action(place.ptr, unsafe_cell_size)
2018-11-06 10:46:54 -06:00
} else {
Ok(())
}
},
};
visitor.visit_value(place)?;
}
// The part between the end_ptr and the end of the place is also frozen.
// So pretend there is a 0-sized `UnsafeCell` at the end.
unsafe_cell_action(place.ptr.ptr_wrapping_offset(size, this), Size::ZERO)?;
// Done!
return Ok(());
/// Visiting the memory covered by a `MemPlace`, being aware of
/// whether we are inside an `UnsafeCell` or not.
2019-06-13 01:52:04 -05:00
struct UnsafeCellVisitor<'ecx, 'mir, 'tcx, F>
2019-06-08 15:14:47 -05:00
where F: FnMut(MPlaceTy<'tcx, Tag>) -> InterpResult<'tcx>
{
2019-06-13 01:52:04 -05:00
ecx: &'ecx MiriEvalContext<'mir, 'tcx>,
unsafe_cell_action: F,
}
2019-06-13 01:52:04 -05:00
impl<'ecx, 'mir, 'tcx, F>
ValueVisitor<'mir, 'tcx, Evaluator<'tcx>>
for
2019-06-13 01:52:04 -05:00
UnsafeCellVisitor<'ecx, 'mir, 'tcx, F>
where
2019-06-08 15:14:47 -05:00
F: FnMut(MPlaceTy<'tcx, Tag>) -> InterpResult<'tcx>
{
type V = MPlaceTy<'tcx, Tag>;
#[inline(always)]
2019-06-13 01:52:04 -05:00
fn ecx(&self) -> &MiriEvalContext<'mir, 'tcx> {
&self.ecx
}
2019-02-15 19:29:38 -06:00
// Hook to detect `UnsafeCell`.
2019-06-08 15:14:47 -05:00
fn visit_value(&mut self, v: MPlaceTy<'tcx, Tag>) -> InterpResult<'tcx>
{
trace!("UnsafeCellVisitor: {:?} {:?}", *v, v.layout.ty);
2019-09-26 04:40:13 -05:00
let is_unsafe_cell = match v.layout.ty.kind {
ty::Adt(adt, _) => Some(adt.did) == self.ecx.tcx.lang_items().unsafe_cell_type(),
_ => false,
};
if is_unsafe_cell {
// We do not have to recurse further, this is an `UnsafeCell`.
(self.unsafe_cell_action)(v)
} else if self.ecx.type_is_freeze(v.layout.ty) {
// This is `Freeze`, there cannot be an `UnsafeCell`
Ok(())
} else {
// We want to not actually read from memory for this visit. So, before
// walking this value, we have to make sure it is not a
// `Variants::Multiple`.
match v.layout.variants {
layout::Variants::Multiple { .. } => {
// A multi-variant enum, or generator, or so.
// Treat this like a union: without reading from memory,
// we cannot determine the variant we are in. Reading from
// memory would be subject to Stacked Borrows rules, leading
// to all sorts of "funny" recursion.
2019-08-28 11:45:10 -05:00
// We only end up here if the type is *not* freeze, so we just call the
// `UnsafeCell` action.
(self.unsafe_cell_action)(v)
}
layout::Variants::Single { .. } => {
2019-08-28 11:45:10 -05:00
// Proceed further, try to find where exactly that `UnsafeCell`
// is hiding.
self.walk_value(v)
}
}
}
}
2019-02-15 19:29:38 -06:00
// Make sure we visit aggregrates in increasing offset order.
2018-11-06 10:46:54 -06:00
fn visit_aggregate(
&mut self,
place: MPlaceTy<'tcx, Tag>,
2019-06-08 15:14:47 -05:00
fields: impl Iterator<Item=InterpResult<'tcx, MPlaceTy<'tcx, Tag>>>,
) -> InterpResult<'tcx> {
2018-11-06 10:46:54 -06:00
match place.layout.fields {
layout::FieldPlacement::Array { .. } => {
// For the array layout, we know the iterator will yield sorted elements so
// we can avoid the allocation.
self.walk_aggregate(place, fields)
}
layout::FieldPlacement::Arbitrary { .. } => {
// Gather the subplaces and sort them before visiting.
2019-06-08 15:14:47 -05:00
let mut places = fields.collect::<InterpResult<'tcx, Vec<MPlaceTy<'tcx, Tag>>>>()?;
places.sort_by_key(|place| place.ptr.assert_ptr().offset);
2018-11-06 10:46:54 -06:00
self.walk_aggregate(place, places.into_iter().map(Ok))
}
layout::FieldPlacement::Union { .. } => {
// Uh, what?
2019-02-15 19:29:38 -06:00
bug!("a union is not an aggregate we should ever visit")
2018-11-06 10:46:54 -06:00
}
}
}
2019-02-15 19:29:38 -06:00
// We have to do *something* for unions.
2019-06-08 15:14:47 -05:00
fn visit_union(&mut self, v: MPlaceTy<'tcx, Tag>) -> InterpResult<'tcx>
{
// With unions, we fall back to whatever the type says, to hopefully be consistent
// with LLVM IR.
2019-02-15 19:29:38 -06:00
// FIXME: are we consistent, and is this really the behavior we want?
let frozen = self.ecx.type_is_freeze(v.layout.ty);
if frozen {
Ok(())
} else {
(self.unsafe_cell_action)(v)
}
}
2019-02-15 19:29:38 -06:00
// We should never get to a primitive, but always short-circuit somewhere above.
2019-06-08 15:14:47 -05:00
fn visit_primitive(&mut self, _v: MPlaceTy<'tcx, Tag>) -> InterpResult<'tcx>
{
2019-02-15 19:29:38 -06:00
bug!("we should always short-circuit before coming to a primitive")
}
}
}
2019-10-11 04:17:43 -05:00
/// Helper function to get a `libc` constant as a `Scalar`.
fn eval_libc(&mut self, name: &str) -> InterpResult<'tcx, Scalar<Tag>> {
self.eval_context_mut()
.eval_path_scalar(&["libc", name])?
.ok_or_else(|| err_unsup_format!("Path libc::{} cannot be resolved.", name))?
.not_undef()
}
2019-10-11 04:17:43 -05:00
/// Helper function to get a `libc` constant as an `i32`.
fn eval_libc_i32(&mut self, name: &str) -> InterpResult<'tcx, i32> {
self.eval_libc(name)?.to_i32()
}
2019-10-08 15:06:14 -05:00
fn write_c_ints(
&mut self,
ptr: &Pointer<Tag>,
bits: &[i128],
ty_names: &[&str],
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let tcx = &{ this.tcx.tcx };
let mut sizes = Vec::new();
for name in ty_names {
let ty = this.resolve_path(&["libc", name])?.ty(*tcx);
sizes.push(this.layout_of(ty)?.size);
}
let allocation = this.memory_mut().get_mut(ptr.alloc_id)?;
let mut offset = Size::from_bytes(0);
for (&value, size) in bits.iter().zip(sizes) {
// If `value` does not fit in `size` bits, we error instead of letting
// `Scalar::from_int` panic.
let truncated = truncate(value as u128, size);
if sign_extend(truncated, size) as i128 != value {
throw_unsup_format!(
"Signed value {:#x} does not fit in {} bits",
value,
size.bits()
)
}
2019-10-08 15:06:14 -05:00
allocation.write_scalar(
tcx,
ptr.offset(offset, tcx)?,
Scalar::from_int(value, size).into(),
2019-10-08 15:06:14 -05:00
size,
)?;
offset += size;
}
Ok(())
}
2018-10-19 02:51:04 -05:00
}