Merge pull request #487 from solson/rustup

Rustup
This commit is contained in:
Ralf Jung 2018-10-22 20:42:58 +02:00 committed by GitHub
commit 25d7e1937e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
40 changed files with 804 additions and 416 deletions

View File

@ -1 +1 @@
nightly-2018-10-14
nightly-2018-10-22

View File

@ -12,8 +12,8 @@ pub trait EvalContextExt<'tcx, 'mir> {
fn emulate_foreign_item(
&mut self,
def_id: DefId,
args: &[OpTy<'tcx>],
dest: PlaceTy<'tcx>,
args: &[OpTy<'tcx, Borrow>],
dest: PlaceTy<'tcx, Borrow>,
ret: mir::BasicBlock,
) -> EvalResult<'tcx>;
@ -24,28 +24,28 @@ pub trait EvalContextExt<'tcx, 'mir> {
fn emulate_missing_fn(
&mut self,
path: String,
args: &[OpTy<'tcx>],
dest: Option<PlaceTy<'tcx>>,
args: &[OpTy<'tcx, Borrow>],
dest: Option<PlaceTy<'tcx, Borrow>>,
ret: Option<mir::BasicBlock>,
) -> EvalResult<'tcx>;
fn find_fn(
&mut self,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx>],
dest: Option<PlaceTy<'tcx>>,
args: &[OpTy<'tcx, Borrow>],
dest: Option<PlaceTy<'tcx, Borrow>>,
ret: Option<mir::BasicBlock>,
) -> EvalResult<'tcx, Option<&'mir mir::Mir<'tcx>>>;
fn write_null(&mut self, dest: PlaceTy<'tcx>) -> EvalResult<'tcx>;
fn write_null(&mut self, dest: PlaceTy<'tcx, Borrow>) -> EvalResult<'tcx>;
}
impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for EvalContext<'a, 'mir, 'tcx, super::Evaluator<'tcx>> {
impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalContext<'a, 'mir, 'tcx> {
fn find_fn(
&mut self,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx>],
dest: Option<PlaceTy<'tcx>>,
args: &[OpTy<'tcx, Borrow>],
dest: Option<PlaceTy<'tcx, Borrow>>,
ret: Option<mir::BasicBlock>,
) -> EvalResult<'tcx, Option<&'mir mir::Mir<'tcx>>> {
trace!("eval_fn_call: {:#?}, {:?}", instance, dest.map(|place| *place));
@ -104,8 +104,8 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for EvalContext<'a, '
fn emulate_foreign_item(
&mut self,
def_id: DefId,
args: &[OpTy<'tcx>],
dest: PlaceTy<'tcx>,
args: &[OpTy<'tcx, Borrow>],
dest: PlaceTy<'tcx, Borrow>,
ret: mir::BasicBlock,
) -> EvalResult<'tcx> {
let attrs = self.tcx.get_attrs(def_id);
@ -114,6 +114,10 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for EvalContext<'a, '
None => self.tcx.item_name(def_id).as_str(),
};
// All these functions take raw pointers, so if we access memory directly
// (as opposed to through a place), we have to remember to erase any tag
// that might still hang around!
match &link_name[..] {
"malloc" => {
let size = self.read_scalar(args[0])?.to_usize(&self)?;
@ -127,10 +131,10 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for EvalContext<'a, '
}
"free" => {
let ptr = self.read_scalar(args[0])?.not_undef()?;
let ptr = self.read_scalar(args[0])?.not_undef()?.erase_tag(); // raw ptr operation, no tag
if !ptr.is_null() {
self.memory.deallocate(
ptr.to_ptr()?,
ptr.to_ptr()?.with_default_tag(),
None,
MiriMemoryKind::C.into(),
)?;
@ -167,7 +171,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for EvalContext<'a, '
self.write_scalar(Scalar::Ptr(ptr), dest)?;
}
"__rust_dealloc" => {
let ptr = self.read_scalar(args[0])?.to_ptr()?;
let ptr = self.read_scalar(args[0])?.to_ptr()?.erase_tag(); // raw ptr operation, no tag
let old_size = self.read_scalar(args[1])?.to_usize(&self)?;
let align = self.read_scalar(args[2])?.to_usize(&self)?;
if old_size == 0 {
@ -177,13 +181,13 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for EvalContext<'a, '
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
self.memory.deallocate(
ptr,
ptr.with_default_tag(),
Some((Size::from_bytes(old_size), Align::from_bytes(align, align).unwrap())),
MiriMemoryKind::Rust.into(),
)?;
}
"__rust_realloc" => {
let ptr = self.read_scalar(args[0])?.to_ptr()?;
let ptr = self.read_scalar(args[0])?.to_ptr()?.erase_tag(); // raw ptr operation, no tag
let old_size = self.read_scalar(args[1])?.to_usize(&self)?;
let align = self.read_scalar(args[2])?.to_usize(&self)?;
let new_size = self.read_scalar(args[3])?.to_usize(&self)?;
@ -194,7 +198,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for EvalContext<'a, '
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
let new_ptr = self.memory.reallocate(
ptr,
ptr.with_default_tag(),
Size::from_bytes(old_size),
Align::from_bytes(align, align).unwrap(),
Size::from_bytes(new_size),
@ -226,8 +230,8 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for EvalContext<'a, '
"dlsym" => {
let _handle = self.read_scalar(args[0])?;
let symbol = self.read_scalar(args[1])?.to_ptr()?;
let symbol_name = self.memory.read_c_str(symbol)?;
let symbol = self.read_scalar(args[1])?.to_ptr()?.erase_tag();
let symbol_name = self.memory.read_c_str(symbol.with_default_tag())?;
let err = format!("bad c unicode symbol: {:?}", symbol_name);
let symbol_name = ::std::str::from_utf8(symbol_name).unwrap_or(&err);
return err!(Unimplemented(format!(
@ -280,13 +284,13 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for EvalContext<'a, '
return err!(MachineError("the evaluated program panicked".to_string())),
"memcmp" => {
let left = self.read_scalar(args[0])?.not_undef()?;
let right = self.read_scalar(args[1])?.not_undef()?;
let left = self.read_scalar(args[0])?.not_undef()?.erase_tag(); // raw ptr operation
let right = self.read_scalar(args[1])?.not_undef()?.erase_tag(); // raw ptr operation
let n = Size::from_bytes(self.read_scalar(args[2])?.to_usize(&self)?);
let result = {
let left_bytes = self.memory.read_bytes(left, n)?;
let right_bytes = self.memory.read_bytes(right, n)?;
let left_bytes = self.memory.read_bytes(left.with_default_tag(), n)?;
let right_bytes = self.memory.read_bytes(right.with_default_tag(), n)?;
use std::cmp::Ordering::*;
match left_bytes.cmp(right_bytes) {
@ -303,12 +307,12 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for EvalContext<'a, '
}
"memrchr" => {
let ptr = self.read_scalar(args[0])?.not_undef()?;
let ptr = self.read_scalar(args[0])?.not_undef()?.erase_tag(); // raw ptr operation
let ptr = ptr.with_default_tag();
let val = self.read_scalar(args[1])?.to_bytes()? as u8;
let num = self.read_scalar(args[2])?.to_usize(&self)?;
if let Some(idx) = self.memory.read_bytes(ptr, Size::from_bytes(num))?.iter().rev().position(
|&c| c == val,
)
if let Some(idx) = self.memory.read_bytes(ptr, Size::from_bytes(num))?
.iter().rev().position(|&c| c == val)
{
let new_ptr = ptr.ptr_offset(Size::from_bytes(num - idx as u64 - 1), &self)?;
self.write_scalar(new_ptr, dest)?;
@ -318,7 +322,8 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for EvalContext<'a, '
}
"memchr" => {
let ptr = self.read_scalar(args[0])?.not_undef()?;
let ptr = self.read_scalar(args[0])?.not_undef()?.erase_tag(); // raw ptr operation
let ptr = ptr.with_default_tag();
let val = self.read_scalar(args[1])?.to_bytes()? as u8;
let num = self.read_scalar(args[2])?.to_usize(&self)?;
if let Some(idx) = self.memory.read_bytes(ptr, Size::from_bytes(num))?.iter().position(
@ -334,8 +339,8 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for EvalContext<'a, '
"getenv" => {
let result = {
let name_ptr = self.read_scalar(args[0])?.to_ptr()?;
let name = self.memory.read_c_str(name_ptr)?;
let name_ptr = self.read_scalar(args[0])?.to_ptr()?.erase_tag(); // raw ptr operation
let name = self.memory.read_c_str(name_ptr.with_default_tag())?;
match self.machine.env_vars.get(name) {
Some(&var) => Scalar::Ptr(var),
None => Scalar::ptr_null(*self.tcx),
@ -347,9 +352,9 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for EvalContext<'a, '
"unsetenv" => {
let mut success = None;
{
let name_ptr = self.read_scalar(args[0])?.not_undef()?;
let name_ptr = self.read_scalar(args[0])?.not_undef()?.erase_tag(); // raw ptr operation
if !name_ptr.is_null() {
let name = self.memory.read_c_str(name_ptr.to_ptr()?)?;
let name = self.memory.read_c_str(name_ptr.to_ptr()?.with_default_tag())?;
if !name.is_empty() && !name.contains(&b'=') {
success = Some(self.machine.env_vars.remove(name));
}
@ -368,11 +373,11 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for EvalContext<'a, '
"setenv" => {
let mut new = None;
{
let name_ptr = self.read_scalar(args[0])?.not_undef()?;
let value_ptr = self.read_scalar(args[1])?.to_ptr()?;
let value = self.memory.read_c_str(value_ptr)?;
let name_ptr = self.read_scalar(args[0])?.not_undef()?.erase_tag(); // raw ptr operation
let value_ptr = self.read_scalar(args[1])?.to_ptr()?.erase_tag(); // raw ptr operation
let value = self.memory.read_c_str(value_ptr.with_default_tag())?;
if !name_ptr.is_null() {
let name = self.memory.read_c_str(name_ptr.to_ptr()?)?;
let name = self.memory.read_c_str(name_ptr.to_ptr()?.with_default_tag())?;
if !name.is_empty() && !name.contains(&b'=') {
new = Some((name.to_owned(), value.to_owned()));
}
@ -403,14 +408,14 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for EvalContext<'a, '
"write" => {
let fd = self.read_scalar(args[0])?.to_bytes()?;
let buf = self.read_scalar(args[1])?.not_undef()?;
let buf = self.read_scalar(args[1])?.not_undef()?.erase_tag();
let n = self.read_scalar(args[2])?.to_bytes()? as u64;
trace!("Called write({:?}, {:?}, {:?})", fd, buf, n);
let result = if fd == 1 || fd == 2 {
// stdout/stderr
use std::io::{self, Write};
let buf_cont = self.memory.read_bytes(buf, Size::from_bytes(n))?;
let buf_cont = self.memory.read_bytes(buf.with_default_tag(), Size::from_bytes(n))?;
let res = if fd == 1 {
io::stdout().write(buf_cont)
} else {
@ -431,8 +436,8 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for EvalContext<'a, '
}
"strlen" => {
let ptr = self.read_scalar(args[0])?.to_ptr()?;
let n = self.memory.read_c_str(ptr)?.len();
let ptr = self.read_scalar(args[0])?.to_ptr()?.erase_tag();
let n = self.memory.read_c_str(ptr.with_default_tag())?.len();
self.write_scalar(Scalar::from_uint(n as u64, dest.layout.size), dest)?;
}
@ -478,7 +483,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for EvalContext<'a, '
// Hook pthread calls that go to the thread-local storage memory subsystem
"pthread_key_create" => {
let key_ptr = self.read_scalar(args[0])?.to_ptr()?;
let key_ptr = self.read_scalar(args[0])?.to_ptr()?.erase_tag(); // raw ptr operation
// Extract the function type out of the signature (that seems easier than constructing it ourselves...)
let dtor = match self.read_scalar(args[1])?.not_undef()? {
@ -501,7 +506,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for EvalContext<'a, '
return err!(OutOfTls);
}
self.memory.write_scalar(
key_ptr,
key_ptr.with_default_tag(),
key_layout.align,
Scalar::from_uint(key, key_layout.size).into(),
key_layout.size,
@ -637,8 +642,8 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for EvalContext<'a, '
fn emulate_missing_fn(
&mut self,
path: String,
_args: &[OpTy<'tcx>],
dest: Option<PlaceTy<'tcx>>,
_args: &[OpTy<'tcx, Borrow>],
dest: Option<PlaceTy<'tcx, Borrow>>,
ret: Option<mir::BasicBlock>,
) -> EvalResult<'tcx> {
// In some cases in non-MIR libstd-mode, not having a destination is legit. Handle these early.
@ -686,7 +691,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for EvalContext<'a, '
Ok(())
}
fn write_null(&mut self, dest: PlaceTy<'tcx>) -> EvalResult<'tcx> {
fn write_null(&mut self, dest: PlaceTy<'tcx, Borrow>) -> EvalResult<'tcx> {
self.write_scalar(Scalar::from_int(0, dest.layout.size), dest)
}
}

View File

@ -12,7 +12,7 @@ pub trait ScalarExt {
fn to_bytes(self) -> EvalResult<'static, u128>;
}
impl ScalarExt for Scalar {
impl<Tag> ScalarExt for Scalar<Tag> {
fn to_bytes(self) -> EvalResult<'static, u128> {
match self {
Scalar::Bits { bits, size } => {
@ -24,7 +24,7 @@ impl ScalarExt for Scalar {
}
}
impl ScalarExt for ScalarMaybeUndef {
impl<Tag> ScalarExt for ScalarMaybeUndef<Tag> {
fn to_bytes(self) -> EvalResult<'static, u128> {
self.not_undef()?.to_bytes()
}

View File

@ -3,10 +3,9 @@ use rustc::ty::layout::{self, LayoutOf, Size};
use rustc::ty;
use rustc::mir::interpret::{EvalResult, PointerArithmetic};
use rustc_mir::interpret::{EvalContext, PlaceTy, OpTy};
use super::{
Value, Scalar, ScalarMaybeUndef,
PlaceTy, OpTy, Value, Scalar, ScalarMaybeUndef, Borrow,
ScalarExt, OperatorEvalContextExt
};
@ -14,17 +13,17 @@ pub trait EvalContextExt<'tcx> {
fn call_intrinsic(
&mut self,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx>],
dest: PlaceTy<'tcx>,
args: &[OpTy<'tcx, Borrow>],
dest: PlaceTy<'tcx, Borrow>,
) -> EvalResult<'tcx>;
}
impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super::Evaluator<'tcx>> {
impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, 'tcx> {
fn call_intrinsic(
&mut self,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx>],
dest: PlaceTy<'tcx>,
args: &[OpTy<'tcx, Borrow>],
dest: PlaceTy<'tcx, Borrow>,
) -> EvalResult<'tcx> {
if self.emulate_intrinsic(instance, args, dest)? {
return Ok(());
@ -32,6 +31,10 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
let substs = instance.substs;
// All these intrinsics take raw pointers, so if we access memory directly
// (as opposed to through a place), we have to remember to erase any tag
// that might still hang around!
let intrinsic_name = &self.tcx.item_name(instance.def_id()).as_str()[..];
match intrinsic_name {
"arith_offset" => {
@ -147,12 +150,13 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
let elem_size = elem_layout.size.bytes();
let count = self.read_scalar(args[2])?.to_usize(&self)?;
let elem_align = elem_layout.align;
let src = self.read_scalar(args[0])?.not_undef()?;
let dest = self.read_scalar(args[1])?.not_undef()?;
// erase tags: this is a raw ptr operation
let src = self.read_scalar(args[0])?.not_undef()?.erase_tag();
let dest = self.read_scalar(args[1])?.not_undef()?.erase_tag();
self.memory.copy(
src,
src.with_default_tag(),
elem_align,
dest,
dest.with_default_tag(),
elem_align,
Size::from_bytes(count * elem_size),
intrinsic_name.ends_with("_nonoverlapping"),
@ -429,7 +433,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
let ty = substs.type_at(0);
let ty_layout = self.layout_of(ty)?;
let val_byte = self.read_scalar(args[1])?.to_u8()?;
let ptr = self.read_scalar(args[0])?.not_undef()?;
let ptr = self.read_scalar(args[0])?.not_undef()?.erase_tag().with_default_tag();
let count = self.read_scalar(args[2])?.to_usize(&self)?;
self.memory.check_align(ptr, ty_layout.align)?;
self.memory.write_repeat(ptr, val_byte, ty_layout.size * count)?;

View File

@ -16,16 +16,14 @@ extern crate syntax;
use std::collections::HashMap;
use std::borrow::Cow;
use rustc::ty::{self, TyCtxt, query::TyCtxtAt};
use rustc::ty::{self, Ty, TyCtxt, query::TyCtxtAt};
use rustc::ty::layout::{TyLayout, LayoutOf, Size};
use rustc::hir::def_id::DefId;
use rustc::mir;
use syntax::ast::Mutability;
use syntax::attr;
pub use rustc::mir::interpret::*;
pub use rustc_mir::interpret::*;
pub use rustc_mir::interpret::{self, AllocMap}; // resolve ambiguity
@ -34,9 +32,9 @@ mod operator;
mod intrinsic;
mod helpers;
mod tls;
mod locks;
mod range_map;
mod mono_hash_map;
mod stacked_borrows;
use fn_call::EvalContextExt as MissingFnsEvalContextExt;
use operator::EvalContextExt as OperatorEvalContextExt;
@ -46,6 +44,7 @@ use range_map::RangeMap;
#[allow(unused_imports)] // FIXME rustc bug https://github.com/rust-lang/rust/issues/53682
use helpers::{ScalarExt, EvalContextExt as HelpersEvalContextExt};
use mono_hash_map::MonoHashMap;
use stacked_borrows::{EvalContextExt as StackedBorEvalContextExt, Borrow};
pub fn create_ecx<'a, 'mir: 'a, 'tcx: 'mir>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
@ -56,7 +55,6 @@ pub fn create_ecx<'a, 'mir: 'a, 'tcx: 'mir>(
tcx.at(syntax::source_map::DUMMY_SP),
ty::ParamEnv::reveal_all(),
Evaluator::new(validate),
Default::default(),
);
let main_instance = ty::Instance::mono(ecx.tcx.tcx, main_id);
@ -124,9 +122,9 @@ pub fn create_ecx<'a, 'mir: 'a, 'tcx: 'mir>(
let foo = ecx.memory.allocate_static_bytes(b"foo\0");
let foo_ty = ecx.tcx.mk_imm_ptr(ecx.tcx.types.u8);
let foo_layout = ecx.layout_of(foo_ty)?;
let foo_place = ecx.allocate(foo_layout, MemoryKind::Stack)?; // will be interned in just a second
let foo_place = ecx.allocate(foo_layout, MiriMemoryKind::Env.into())?;
ecx.write_scalar(Scalar::Ptr(foo), foo_place.into())?;
ecx.memory.intern_static(foo_place.to_ptr()?.alloc_id, Mutability::Immutable)?;
ecx.memory.mark_immutable(foo_place.to_ptr()?.alloc_id)?;
ecx.write_scalar(foo_place.ptr, dest)?;
assert!(args.next().is_none(), "start lang item has more arguments than expected");
@ -222,23 +220,36 @@ pub enum MiriMemoryKind {
}
impl Into<MemoryKind<MiriMemoryKind>> for MiriMemoryKind {
#[inline(always)]
fn into(self) -> MemoryKind<MiriMemoryKind> {
MemoryKind::Machine(self)
}
}
impl MayLeak for MiriMemoryKind {
#[inline(always)]
fn may_leak(self) -> bool {
use MiriMemoryKind::*;
match self {
Rust | C => false,
Env | MutStatic => true,
}
}
}
#[derive(Clone, PartialEq, Eq)]
pub struct Evaluator<'tcx> {
/// Environment variables set by `setenv`
/// Miri does not expose env vars from the host to the emulated program
pub(crate) env_vars: HashMap<Vec<u8>, Pointer>,
pub(crate) env_vars: HashMap<Vec<u8>, Pointer<Borrow>>,
/// TLS state
pub(crate) tls: TlsData<'tcx>,
/// Whether to enforce the validity invariant
pub(crate) validate: bool,
/// Stacked Borrows state
pub(crate) stacked_borrows: stacked_borrows::State,
}
impl<'tcx> Evaluator<'tcx> {
@ -247,16 +258,23 @@ impl<'tcx> Evaluator<'tcx> {
env_vars: HashMap::default(),
tls: TlsData::default(),
validate,
stacked_borrows: stacked_borrows::State::new(),
}
}
}
impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
type MemoryData = ();
type MemoryKinds = MiriMemoryKind;
type PointerTag = (); // still WIP
#[allow(dead_code)] // FIXME https://github.com/rust-lang/rust/issues/47131
type MiriEvalContext<'a, 'mir, 'tcx> = EvalContext<'a, 'mir, 'tcx, Evaluator<'tcx>>;
type MemoryMap = MonoHashMap<AllocId, (MemoryKind<MiriMemoryKind>, Allocation<()>)>;
impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
type MemoryKinds = MiriMemoryKind;
type AllocExtra = stacked_borrows::Stacks;
type PointerTag = Borrow;
const ENABLE_PTR_TRACKING_HOOKS: bool = true;
type MemoryMap = MonoHashMap<AllocId, (MemoryKind<MiriMemoryKind>, Allocation<Borrow, Self::AllocExtra>)>;
const STATIC_KIND: Option<MiriMemoryKind> = Some(MiriMemoryKind::MutStatic);
@ -284,39 +302,42 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
}
/// Returns Ok() when the function was handled, fail otherwise
#[inline(always)]
fn find_fn(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx>],
dest: Option<PlaceTy<'tcx>>,
args: &[OpTy<'tcx, Borrow>],
dest: Option<PlaceTy<'tcx, Borrow>>,
ret: Option<mir::BasicBlock>,
) -> EvalResult<'tcx, Option<&'mir mir::Mir<'tcx>>> {
ecx.find_fn(instance, args, dest, ret)
}
#[inline(always)]
fn call_intrinsic(
ecx: &mut rustc_mir::interpret::EvalContext<'a, 'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx>],
dest: PlaceTy<'tcx>,
args: &[OpTy<'tcx, Borrow>],
dest: PlaceTy<'tcx, Borrow>,
) -> EvalResult<'tcx> {
ecx.call_intrinsic(instance, args, dest)
}
#[inline(always)]
fn ptr_op(
ecx: &rustc_mir::interpret::EvalContext<'a, 'mir, 'tcx, Self>,
bin_op: mir::BinOp,
left: Scalar,
left: Scalar<Borrow>,
left_layout: TyLayout<'tcx>,
right: Scalar,
right: Scalar<Borrow>,
right_layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, (Scalar, bool)> {
) -> EvalResult<'tcx, (Scalar<Borrow>, bool)> {
ecx.ptr_op(bin_op, left, left_layout, right, right_layout)
}
fn box_alloc(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
dest: PlaceTy<'tcx>,
dest: PlaceTy<'tcx, Borrow>,
) -> EvalResult<'tcx> {
trace!("box_alloc for {:?}", dest.layout.ty);
// Call the `exchange_malloc` lang item
@ -356,7 +377,7 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
fn find_foreign_static(
tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
def_id: DefId,
) -> EvalResult<'tcx, Cow<'tcx, Allocation>> {
) -> EvalResult<'tcx, Cow<'tcx, Allocation<Borrow, Self::AllocExtra>>> {
let attrs = tcx.get_attrs(def_id);
let link_name = match attr::first_attr_value_str_by_name(&attrs, "link_name") {
Some(name) => name.as_str(),
@ -376,16 +397,7 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
Ok(Cow::Owned(alloc))
}
fn validation_op(
_ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
_op: ::rustc::mir::ValidationOp,
_operand: &::rustc::mir::ValidationOperand<'tcx, ::rustc::mir::Place<'tcx>>,
) -> EvalResult<'tcx> {
// FIXME: prevent this from ICEing
//ecx.validation_op(op, operand)
Ok(())
}
#[inline(always)]
fn before_terminator(_ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>) -> EvalResult<'tcx>
{
// We are not interested in detecting loops
@ -394,8 +406,67 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
fn static_with_default_tag(
alloc: &'_ Allocation
) -> Cow<'_, Allocation<Self::PointerTag>> {
let alloc = alloc.clone();
) -> Cow<'_, Allocation<Borrow, Self::AllocExtra>> {
let alloc: Allocation<Borrow, Self::AllocExtra> = Allocation {
bytes: alloc.bytes.clone(),
relocations: Relocations::from_presorted(
alloc.relocations.iter()
.map(|&(offset, ((), alloc))| (offset, (Borrow::default(), alloc)))
.collect()
),
undef_mask: alloc.undef_mask.clone(),
align: alloc.align,
mutability: alloc.mutability,
extra: Self::AllocExtra::default(),
};
Cow::Owned(alloc)
}
#[inline(always)]
fn memory_accessed(
alloc: &Allocation<Borrow, Self::AllocExtra>,
ptr: Pointer<Borrow>,
size: Size,
access: MemoryAccess,
) -> EvalResult<'tcx> {
alloc.extra.memory_accessed(ptr, size, access)
}
#[inline(always)]
fn memory_deallocated(
alloc: &mut Allocation<Self::PointerTag, Self::AllocExtra>,
ptr: Pointer<Borrow>,
) -> EvalResult<'tcx> {
alloc.extra.memory_deallocated(ptr)
}
#[inline(always)]
fn tag_reference(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
ptr: Pointer<Borrow>,
pointee_ty: Ty<'tcx>,
pointee_size: Size,
borrow_kind: Option<mir::BorrowKind>,
) -> EvalResult<'tcx, Borrow> {
if !ecx.machine.validate {
// No tracking
Ok(Borrow::default())
} else {
ecx.tag_reference(ptr, pointee_ty, pointee_size, borrow_kind)
}
}
#[inline(always)]
fn tag_dereference(
ecx: &EvalContext<'a, 'mir, 'tcx, Self>,
ptr: Pointer<Borrow>,
ptr_ty: Ty<'tcx>,
) -> EvalResult<'tcx, Borrow> {
if !ecx.machine.validate {
// No tracking
Ok(Borrow::default())
} else {
ecx.tag_dereference(ptr, ptr_ty)
}
}
}

View File

@ -1,94 +0,0 @@
#![allow(unused)]
use super::*;
use rustc::middle::region;
use rustc::ty::layout::Size;
////////////////////////////////////////////////////////////////////////////////
// Locks
////////////////////////////////////////////////////////////////////////////////
// Just some dummy to keep this compiling; I think some of this will be useful later
type AbsPlace<'tcx> = ::rustc::ty::Ty<'tcx>;
/// Information about a lock that is currently held.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct LockInfo<'tcx> {
/// Stores for which lifetimes (of the original write lock) we got
/// which suspensions.
suspended: HashMap<WriteLockId<'tcx>, Vec<region::Scope>>,
/// The current state of the lock that's actually effective.
pub active: Lock,
}
/// Write locks are identified by a stack frame and an "abstract" (untyped) place.
/// It may be tempting to use the lifetime as identifier, but that does not work
/// for two reasons:
/// * First of all, due to subtyping, the same lock may be referred to with different
/// lifetimes.
/// * Secondly, different write locks may actually have the same lifetime. See `test2`
/// in `run-pass/many_shr_bor.rs`.
/// The Id is "captured" when the lock is first suspended; at that point, the borrow checker
/// considers the path frozen and hence the Id remains stable.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct WriteLockId<'tcx> {
frame: usize,
path: AbsPlace<'tcx>,
}
use rustc::mir::interpret::Lock::*;
use rustc::mir::interpret::Lock;
impl<'tcx> Default for LockInfo<'tcx> {
fn default() -> Self {
LockInfo::new(NoLock)
}
}
impl<'tcx> LockInfo<'tcx> {
fn new(lock: Lock) -> LockInfo<'tcx> {
LockInfo {
suspended: HashMap::new(),
active: lock,
}
}
fn access_permitted(&self, frame: Option<usize>, access: AccessKind) -> bool {
use super::AccessKind::*;
match (&self.active, access) {
(&NoLock, _) => true,
(&ReadLock(ref lfts), Read) => {
assert!(!lfts.is_empty(), "Someone left an empty read lock behind.");
// Read access to read-locked region is okay, no matter who's holding the read lock.
true
}
(&WriteLock(ref lft), _) => {
// All access is okay if we are the ones holding it
Some(lft.frame) == frame
}
_ => false, // Nothing else is okay.
}
}
}
impl<'tcx> RangeMap<LockInfo<'tcx>> {
pub fn check(
&self,
frame: Option<usize>,
offset: u64,
len: u64,
access: AccessKind,
) -> Result<(), LockInfo<'tcx>> {
if len == 0 {
return Ok(());
}
for lock in self.iter(offset, len) {
// Check if the lock is in conflict with the access.
if !lock.access_permitted(frame, access) {
return Err(lock.clone());
}
}
Ok(())
}
}

View File

@ -7,44 +7,44 @@ pub trait EvalContextExt<'tcx> {
fn ptr_op(
&self,
bin_op: mir::BinOp,
left: Scalar,
left: Scalar<Borrow>,
left_layout: TyLayout<'tcx>,
right: Scalar,
right: Scalar<Borrow>,
right_layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, (Scalar, bool)>;
) -> EvalResult<'tcx, (Scalar<Borrow>, bool)>;
fn ptr_int_arithmetic(
&self,
bin_op: mir::BinOp,
left: Pointer,
left: Pointer<Borrow>,
right: u128,
signed: bool,
) -> EvalResult<'tcx, (Scalar, bool)>;
) -> EvalResult<'tcx, (Scalar<Borrow>, bool)>;
fn ptr_eq(
&self,
left: Scalar,
right: Scalar,
left: Scalar<Borrow>,
right: Scalar<Borrow>,
size: Size,
) -> EvalResult<'tcx, bool>;
fn pointer_offset_inbounds(
&self,
ptr: Scalar,
ptr: Scalar<Borrow>,
pointee_ty: Ty<'tcx>,
offset: i64,
) -> EvalResult<'tcx, Scalar>;
) -> EvalResult<'tcx, Scalar<Borrow>>;
}
impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super::Evaluator<'tcx>> {
impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, 'tcx> {
fn ptr_op(
&self,
bin_op: mir::BinOp,
left: Scalar,
left: Scalar<Borrow>,
left_layout: TyLayout<'tcx>,
right: Scalar,
right: Scalar<Borrow>,
right_layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, (Scalar, bool)> {
) -> EvalResult<'tcx, (Scalar<Borrow>, bool)> {
use rustc::mir::BinOp::*;
trace!("ptr_op: {:?} {:?} {:?}", left, bin_op, right);
@ -124,8 +124,8 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
fn ptr_eq(
&self,
left: Scalar,
right: Scalar,
left: Scalar<Borrow>,
right: Scalar<Borrow>,
size: Size,
) -> EvalResult<'tcx, bool> {
Ok(match (left, right) {
@ -203,13 +203,13 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
fn ptr_int_arithmetic(
&self,
bin_op: mir::BinOp,
left: Pointer,
left: Pointer<Borrow>,
right: u128,
signed: bool,
) -> EvalResult<'tcx, (Scalar, bool)> {
) -> EvalResult<'tcx, (Scalar<Borrow>, bool)> {
use rustc::mir::BinOp::*;
fn map_to_primval((res, over): (Pointer, bool)) -> (Scalar, bool) {
fn map_to_primval((res, over): (Pointer<Borrow>, bool)) -> (Scalar<Borrow>, bool) {
(Scalar::Ptr(res), over)
}
@ -237,7 +237,14 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
if right & base_mask == base_mask {
// Case 1: The base address bits are all preserved, i.e., right is all-1 there
let offset = (left.offset.bytes() as u128 & right) as u64;
(Scalar::Ptr(Pointer::new(left.alloc_id, Size::from_bytes(offset))), false)
(
Scalar::Ptr(Pointer::new_with_tag(
left.alloc_id,
Size::from_bytes(offset),
left.tag,
)),
false,
)
} else if right & base_mask == 0 {
// Case 2: The base address bits are all taken away, i.e., right is all-0 there
(Scalar::Bits { bits: (left.offset.bytes() as u128) & right, size: ptr_size }, false)
@ -277,10 +284,10 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
/// allocation.
fn pointer_offset_inbounds(
&self,
ptr: Scalar,
ptr: Scalar<Borrow>,
pointee_ty: Ty<'tcx>,
offset: i64,
) -> EvalResult<'tcx, Scalar> {
) -> EvalResult<'tcx, Scalar<Borrow>> {
// FIXME: assuming here that type size is < i64::max_value()
let pointee_size = self.layout_of(pointee_ty)?.size.bytes() as i64;
let offset = offset.checked_mul(pointee_size).ok_or_else(|| EvalErrorKind::Overflow(mir::BinOp::Mul))?;

View File

@ -9,11 +9,20 @@
use std::collections::BTreeMap;
use std::ops;
use rustc::ty::layout::Size;
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct RangeMap<T> {
map: BTreeMap<Range, T>,
}
impl<T> Default for RangeMap<T> {
#[inline(always)]
fn default() -> Self {
RangeMap::new()
}
}
// The derived `Ord` impl sorts first by the first field, then, if the fields are the same,
// by the second field.
// This is exactly what we need for our purposes, since a range query on a BTReeSet/BTreeMap will give us all
@ -21,14 +30,19 @@ pub struct RangeMap<T> {
// At the same time the `end` is irrelevant for the sorting and range searching, but used for the check.
// This kind of search breaks, if `end < start`, so don't do that!
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
pub struct Range {
struct Range {
start: u64,
end: u64, // Invariant: end > start
}
impl Range {
/// Compute a range of ranges that contains all ranges overlaping with [offset, offset+len)
fn range(offset: u64, len: u64) -> ops::Range<Range> {
assert!(len > 0);
if len == 0 {
// We can produce an empty range, nothing overlaps with this.
let r = Range { start: 0, end: 1 };
return r..r;
}
// We select all elements that are within
// the range given by the offset into the allocation and the length.
// This is sound if all ranges that intersect with the argument range, are in the
@ -46,14 +60,20 @@ impl Range {
left..right
}
/// Tests if all of [offset, offset+len) are contained in this range.
/// Tests if any element of [offset, offset+len) is contained in this range.
#[inline(always)]
fn overlaps(&self, offset: u64, len: u64) -> bool {
assert!(len > 0);
offset < self.end && offset + len >= self.start
if len == 0 {
// `offset` totally does not matter, we cannot overlap with an empty interval
false
} else {
offset < self.end && offset.checked_add(len).unwrap() >= self.start
}
}
}
impl<T> RangeMap<T> {
#[inline(always)]
pub fn new() -> RangeMap<T> {
RangeMap { map: BTreeMap::new() }
}
@ -63,10 +83,9 @@ impl<T> RangeMap<T> {
offset: u64,
len: u64,
) -> impl Iterator<Item = (&'a Range, &'a T)> + 'a {
assert!(len > 0);
self.map.range(Range::range(offset, len)).filter_map(
move |(range,
data)| {
move |(range, data)| {
debug_assert!(len > 0);
if range.overlaps(offset, len) {
Some((range, data))
} else {
@ -76,8 +95,12 @@ impl<T> RangeMap<T> {
)
}
pub fn iter<'a>(&'a self, offset: u64, len: u64) -> impl Iterator<Item = &'a T> + 'a {
self.iter_with_range(offset, len).map(|(_, data)| data)
pub fn iter<'a>(&'a self, offset: Size, len: Size) -> impl Iterator<Item = &'a T> + 'a {
self.iter_with_range(offset.bytes(), len.bytes()).map(|(_, data)| data)
}
pub fn iter_mut_all<'a>(&'a mut self) -> impl Iterator<Item = &'a mut T> + 'a {
self.map.values_mut()
}
fn split_entry_at(&mut self, offset: u64)
@ -114,28 +137,30 @@ impl<T> RangeMap<T> {
}
}
pub fn iter_mut_all<'a>(&'a mut self) -> impl Iterator<Item = &'a mut T> + 'a {
self.map.values_mut()
}
/// Provide mutable iteration over everything in the given range. As a side-effect,
/// this will split entries in the map that are only partially hit by the given range,
/// to make sure that when they are mutated, the effect is constrained to the given range.
/// If there are gaps, leave them be.
pub fn iter_mut_with_gaps<'a>(
&'a mut self,
offset: u64,
len: u64,
offset: Size,
len: Size,
) -> impl Iterator<Item = &'a mut T> + 'a
where
T: Clone,
{
assert!(len > 0);
// Preparation: Split first and last entry as needed.
self.split_entry_at(offset);
self.split_entry_at(offset + len);
let offset = offset.bytes();
let len = len.bytes();
if len > 0 {
// Preparation: Split first and last entry as needed.
self.split_entry_at(offset);
self.split_entry_at(offset + len);
}
// Now we can provide a mutable iterator
self.map.range_mut(Range::range(offset, len)).filter_map(
move |(&range, data)| {
debug_assert!(len > 0);
if range.overlaps(offset, len) {
assert!(
offset <= range.start && offset + len >= range.end,
@ -151,35 +176,41 @@ impl<T> RangeMap<T> {
}
/// Provide a mutable iterator over everything in the given range, with the same side-effects as
/// iter_mut_with_gaps. Furthermore, if there are gaps between ranges, fill them with the given default.
/// iter_mut_with_gaps. Furthermore, if there are gaps between ranges, fill them with the given default
/// before yielding them in the iterator.
/// This is also how you insert.
pub fn iter_mut<'a>(&'a mut self, offset: u64, len: u64) -> impl Iterator<Item = &'a mut T> + 'a
pub fn iter_mut<'a>(&'a mut self, offset: Size, len: Size) -> impl Iterator<Item = &'a mut T> + 'a
where
T: Clone + Default,
{
// Do a first iteration to collect the gaps
let mut gaps = Vec::new();
let mut last_end = offset;
for (range, _) in self.iter_with_range(offset, len) {
if last_end < range.start {
if len.bytes() > 0 {
let offset = offset.bytes();
let len = len.bytes();
// Do a first iteration to collect the gaps
let mut gaps = Vec::new();
let mut last_end = offset;
for (range, _) in self.iter_with_range(offset, len) {
if last_end < range.start {
gaps.push(Range {
start: last_end,
end: range.start,
});
}
last_end = range.end;
}
if last_end < offset + len {
gaps.push(Range {
start: last_end,
end: range.start,
end: offset + len,
});
}
last_end = range.end;
}
if last_end < offset + len {
gaps.push(Range {
start: last_end,
end: offset + len,
});
}
// Add default for all gaps
for gap in gaps {
let old = self.map.insert(gap, Default::default());
assert!(old.is_none());
// Add default for all gaps
for gap in gaps {
let old = self.map.insert(gap, Default::default());
assert!(old.is_none());
}
}
// Now provide mutable iteration
@ -208,10 +239,16 @@ mod tests {
use super::*;
/// Query the map at every offset in the range and collect the results.
fn to_vec<T: Copy>(map: &RangeMap<T>, offset: u64, len: u64) -> Vec<T> {
fn to_vec<T: Copy>(map: &RangeMap<T>, offset: u64, len: u64, default: Option<T>) -> Vec<T> {
(offset..offset + len)
.into_iter()
.map(|i| *map.iter(i, 1).next().unwrap())
.map(|i| map
.iter(Size::from_bytes(i), Size::from_bytes(1))
.next()
.map(|&t| t)
.or(default)
.unwrap()
)
.collect()
}
@ -219,34 +256,47 @@ mod tests {
fn basic_insert() {
let mut map = RangeMap::<i32>::new();
// Insert
for x in map.iter_mut(10, 1) {
for x in map.iter_mut(Size::from_bytes(10), Size::from_bytes(1)) {
*x = 42;
}
// Check
assert_eq!(to_vec(&map, 10, 1), vec![42]);
assert_eq!(to_vec(&map, 10, 1, None), vec![42]);
// Insert with size 0
for x in map.iter_mut(Size::from_bytes(10), Size::from_bytes(0)) {
*x = 19;
}
for x in map.iter_mut(Size::from_bytes(11), Size::from_bytes(0)) {
*x = 19;
}
assert_eq!(to_vec(&map, 10, 2, Some(-1)), vec![42, -1]);
}
#[test]
fn gaps() {
let mut map = RangeMap::<i32>::new();
for x in map.iter_mut(11, 1) {
for x in map.iter_mut(Size::from_bytes(11), Size::from_bytes(1)) {
*x = 42;
}
for x in map.iter_mut(15, 1) {
*x = 42;
for x in map.iter_mut(Size::from_bytes(15), Size::from_bytes(1)) {
*x = 43;
}
assert_eq!(
to_vec(&map, 10, 10, Some(-1)),
vec![-1, 42, -1, -1, -1, 43, -1, -1, -1, -1]
);
// Now request a range that needs three gaps filled
for x in map.iter_mut(10, 10) {
if *x != 42 {
for x in map.iter_mut(Size::from_bytes(10), Size::from_bytes(10)) {
if *x < 42 {
*x = 23;
}
}
assert_eq!(
to_vec(&map, 10, 10),
vec![23, 42, 23, 23, 23, 42, 23, 23, 23, 23]
to_vec(&map, 10, 10, None),
vec![23, 42, 23, 23, 23, 43, 23, 23, 23, 23]
);
assert_eq!(to_vec(&map, 13, 5), vec![23, 23, 42, 23, 23]);
assert_eq!(to_vec(&map, 13, 5, None), vec![23, 23, 43, 23, 23]);
}
}

308
src/stacked_borrows.rs Normal file
View File

@ -0,0 +1,308 @@
use std::cell::RefCell;
use rustc::ty::{Ty, layout::Size};
use rustc::mir;
use super::{
MemoryAccess, RangeMap, EvalResult,
Pointer,
};
pub type Timestamp = u64;
/// Information about a potentially mutable borrow
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub enum Mut {
/// A unique, mutable reference
Uniq(Timestamp),
/// Any raw pointer, or a shared borrow with interior mutability
Raw,
}
impl Mut {
#[inline(always)]
fn is_raw(self) -> bool {
match self {
Mut::Raw => true,
_ => false,
}
}
}
/// Information about any kind of borrow
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub enum Borrow {
/// A mutable borrow, a raw pointer, or a shared borrow with interior mutability
Mut(Mut),
/// A shared borrow without interior mutability
Frz(Timestamp)
}
impl Borrow {
#[inline(always)]
fn is_uniq(self) -> bool {
match self {
Borrow::Mut(Mut::Uniq(_)) => true,
_ => false,
}
}
}
/// An item in the borrow stack
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub enum BorStackItem {
/// Defines which references are permitted to mutate *if* the location is not frozen
Mut(Mut),
/// A barrier, tracking the function it belongs to by its index on the call stack
#[allow(dead_code)] // for future use
FnBarrier(usize)
}
impl Default for Borrow {
fn default() -> Self {
Borrow::Mut(Mut::Raw)
}
}
/// Extra global machine state
#[derive(Clone, Debug)]
pub struct State {
clock: Timestamp
}
impl State {
pub fn new() -> State {
State { clock: 0 }
}
}
/// Extra per-location state
#[derive(Clone, Debug)]
struct Stack {
borrows: Vec<BorStackItem>, // used as a stack
frozen_since: Option<Timestamp>,
}
impl Default for Stack {
fn default() -> Self {
Stack {
borrows: Vec::new(),
frozen_since: None,
}
}
}
/// Extra per-allocation state
#[derive(Clone, Debug, Default)]
pub struct Stacks {
stacks: RefCell<RangeMap<Stack>>,
}
/// Core operations
impl<'tcx> Stack {
fn check(&self, bor: Borrow) -> bool {
match bor {
Borrow::Frz(acc_t) =>
// Must be frozen at least as long as the `acc_t` says.
self.frozen_since.map_or(false, |loc_t| loc_t <= acc_t),
Borrow::Mut(acc_m) =>
// Raw pointers are fine with frozen locations. This is important because &Cell is raw!
if self.frozen_since.is_some() {
acc_m.is_raw()
} else {
self.borrows.last().map_or(false, |&loc_itm| loc_itm == BorStackItem::Mut(acc_m))
}
}
}
/// Reactive `bor` for this stack. If `force_mut` is set, we want to aggressively
/// unfreeze this location (because we are about to push a `Uniq`).
fn reactivate(&mut self, bor: Borrow, force_mut: bool) -> EvalResult<'tcx> {
// Unless mutation is bound to happen, do NOT change anything if `bor` is already active.
// In particular, if it is a `Mut(Raw)` and we are frozen, this should be a NOP.
if !force_mut && self.check(bor) {
return Ok(());
}
let acc_m = match bor {
Borrow::Frz(_) =>
if force_mut {
return err!(MachineError(format!("Using a shared borrow for mutation")))
} else {
return err!(MachineError(format!("Location should be frozen but it is not")))
}
Borrow::Mut(acc_m) => acc_m,
};
// We definitely have to unfreeze this, even if we use the topmost item.
self.frozen_since = None;
// Pop until we see the one we are looking for.
while let Some(&itm) = self.borrows.last() {
match itm {
BorStackItem::FnBarrier(_) => {
return err!(MachineError(format!("Trying to reactivate a borrow that lives behind a barrier")));
}
BorStackItem::Mut(loc_m) => {
if loc_m == acc_m { return Ok(()); }
trace!("reactivate: Popping {:?}", itm);
self.borrows.pop();
}
}
}
// Nothing to be found. Simulate a "virtual raw" element at the bottom of the stack.
if acc_m.is_raw() {
Ok(())
} else {
err!(MachineError(format!("Borrow-to-reactivate does not exist on the stack")))
}
}
fn initiate(&mut self, bor: Borrow) -> EvalResult<'tcx> {
match bor {
Borrow::Frz(t) => {
trace!("initiate: Freezing");
match self.frozen_since {
None => self.frozen_since = Some(t),
Some(since) => assert!(since <= t),
}
}
Borrow::Mut(m) => {
trace!("initiate: Pushing {:?}", bor);
match self.frozen_since {
None => self.borrows.push(BorStackItem::Mut(m)),
Some(_) =>
// FIXME: Do we want an exception for raw borrows?
return err!(MachineError(format!("Trying to mutate frozen location")))
}
}
}
Ok(())
}
}
impl State {
fn increment_clock(&mut self) -> Timestamp {
self.clock += 1;
self.clock
}
}
/// Higher-level operations
impl<'tcx> Stacks {
pub fn memory_accessed(
&self,
ptr: Pointer<Borrow>,
size: Size,
access: MemoryAccess,
) -> EvalResult<'tcx> {
trace!("memory_accessed({:?}) with tag {:?}: {:?}, size {}", access, ptr.tag, ptr, size.bytes());
let mut stacks = self.stacks.borrow_mut();
for stack in stacks.iter_mut(ptr.offset, size) {
// FIXME: Compare this with what the blog post says.
stack.reactivate(ptr.tag, /*force_mut*/access == MemoryAccess::Write)?;
}
Ok(())
}
pub fn memory_deallocated(
&mut self,
ptr: Pointer<Borrow>,
) -> EvalResult<'tcx> {
trace!("memory_deallocated with tag {:?}: {:?}", ptr.tag, ptr);
let stacks = self.stacks.get_mut();
for stack in stacks.iter_mut_all() {
// This is like mutating.
stack.reactivate(ptr.tag, /*force_mut*/true)?;
}
Ok(())
}
fn reborrow(
&self,
ptr: Pointer<Borrow>,
size: Size,
new_bor: Borrow,
) -> EvalResult<'tcx> {
let mut stacks = self.stacks.borrow_mut();
for stack in stacks.iter_mut(ptr.offset, size) {
if stack.check(new_bor) {
// The new borrow is already active! This can happen when creating multiple
// shared references from the same mutable reference. Do nothing.
} else {
// FIXME: The blog post says we should `reset` if this is a local.
stack.reactivate(ptr.tag, /*force_mut*/new_bor.is_uniq())?;
stack.initiate(new_bor)?;
}
}
Ok(())
}
}
/// Machine hooks
pub trait EvalContextExt<'tcx> {
fn tag_reference(
&mut self,
ptr: Pointer<Borrow>,
pointee_ty: Ty<'tcx>,
size: Size,
borrow_kind: Option<mir::BorrowKind>,
) -> EvalResult<'tcx, Borrow>;
fn tag_dereference(
&self,
ptr: Pointer<Borrow>,
ptr_ty: Ty<'tcx>,
) -> EvalResult<'tcx, Borrow>;
}
impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, 'tcx> {
fn tag_reference(
&mut self,
ptr: Pointer<Borrow>,
pointee_ty: Ty<'tcx>,
size: Size,
borrow_kind: Option<mir::BorrowKind>,
) -> EvalResult<'tcx, Borrow> {
let time = self.machine.stacked_borrows.increment_clock();
let new_bor = match borrow_kind {
Some(mir::BorrowKind::Mut { .. }) => Borrow::Mut(Mut::Uniq(time)),
Some(_) =>
// FIXME This does not do enough checking when only part of the data has
// interior mutability. When the type is `(i32, Cell<i32>)`, we want the
// first field to be frozen but not the second.
if self.type_is_freeze(pointee_ty) {
Borrow::Frz(time)
} else {
Borrow::Mut(Mut::Raw)
},
None => Borrow::Mut(Mut::Raw),
};
trace!("tag_reference: Creating new reference ({:?}) for {:?} (pointee {}, size {}): {:?}",
borrow_kind, ptr, pointee_ty, size.bytes(), new_bor);
// Make sure this reference is not dangling or so
self.memory.check_bounds(ptr, size, false)?;
// Update the stacks. We cannot use `get_mut` becuse this might be immutable
// memory.
let alloc = self.memory.get(ptr.alloc_id).expect("We checked that the ptr is fine!");
alloc.extra.reborrow(ptr, size, new_bor)?;
Ok(new_bor)
}
fn tag_dereference(
&self,
ptr: Pointer<Borrow>,
ptr_ty: Ty<'tcx>,
) -> EvalResult<'tcx, Borrow> {
// If this is a raw ptr, forget about the tag.
Ok(if ptr_ty.is_unsafe_ptr() {
trace!("tag_dereference: Erasing tag for {:?} ({})", ptr, ptr_ty);
Borrow::Mut(Mut::Raw)
} else {
// FIXME: Do we want to adjust the tag if it does not match the type?
ptr.tag
})
}
}

View File

@ -4,19 +4,19 @@ use rustc_target::abi::LayoutOf;
use rustc::{ty, ty::layout::HasDataLayout, mir};
use super::{
EvalResult, EvalErrorKind, StackPopCleanup, EvalContext, Evaluator,
MPlaceTy, Scalar,
EvalResult, EvalErrorKind, StackPopCleanup,
MPlaceTy, Scalar, Borrow,
};
pub type TlsKey = u128;
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[derive(Copy, Clone, Debug)]
pub struct TlsEntry<'tcx> {
pub(crate) data: Scalar, // Will eventually become a map from thread IDs to `Scalar`s, if we ever support more than one thread.
pub(crate) data: Scalar<Borrow>, // Will eventually become a map from thread IDs to `Scalar`s, if we ever support more than one thread.
pub(crate) dtor: Option<ty::Instance<'tcx>>,
}
#[derive(Debug, Clone, PartialEq, Eq)]
#[derive(Debug)]
pub struct TlsData<'tcx> {
/// The Key to use for the next thread-local allocation.
pub(crate) next_key: TlsKey,
@ -67,7 +67,7 @@ impl<'tcx> TlsData<'tcx> {
}
}
pub fn load_tls(&mut self, key: TlsKey) -> EvalResult<'tcx, Scalar> {
pub fn load_tls(&mut self, key: TlsKey) -> EvalResult<'tcx, Scalar<Borrow>> {
match self.keys.get(&key) {
Some(&TlsEntry { data, .. }) => {
trace!("TLS key {} loaded: {:?}", key, data);
@ -77,7 +77,7 @@ impl<'tcx> TlsData<'tcx> {
}
}
pub fn store_tls(&mut self, key: TlsKey, new_data: Scalar) -> EvalResult<'tcx> {
pub fn store_tls(&mut self, key: TlsKey, new_data: Scalar<Borrow>) -> EvalResult<'tcx> {
match self.keys.get_mut(&key) {
Some(&mut TlsEntry { ref mut data, .. }) => {
trace!("TLS key {} stored: {:?}", key, new_data);
@ -110,7 +110,7 @@ impl<'tcx> TlsData<'tcx> {
&mut self,
key: Option<TlsKey>,
cx: impl HasDataLayout,
) -> Option<(ty::Instance<'tcx>, Scalar, TlsKey)> {
) -> Option<(ty::Instance<'tcx>, Scalar<Borrow>, TlsKey)> {
use std::collections::Bound::*;
let thread_local = &mut self.keys;
@ -133,7 +133,7 @@ impl<'tcx> TlsData<'tcx> {
}
}
impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, Evaluator<'tcx>> {
impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, 'tcx> {
fn run_tls_dtors(&mut self) -> EvalResult<'tcx> {
let mut dtor = self.machine.tls.fetch_tls_dtor(None, *self.tcx);
// FIXME: replace loop by some structure that works with stepping

View File

@ -0,0 +1,15 @@
#![allow(unused_variables)]
// This makes a ref that was passed to us via &mut alias with things it should not alias with
fn retarget(x: &mut &u32, target: &mut u32) {
unsafe { *x = &mut *(target as *mut _); }
}
fn main() {
let target = &mut 42;
let mut target_alias = &42; // initial dummy value
retarget(&mut target_alias, target);
// now `target_alias` points to the same thing as `target`
*target = 13;
let _val = *target_alias; //~ ERROR should be frozen
}

View File

@ -1,10 +1,5 @@
// ignore-test validation_op is disabled
#![allow(unused_variables)]
// For some reason, the error location is different when using fullmir
// error-pattern: in conflict with lock WriteLock
mod safe {
use std::slice::from_raw_parts_mut;
@ -17,6 +12,8 @@ mod safe {
fn main() {
let v = vec![0,1,2];
let v1_ = safe::as_mut_slice(&v);
let v2_ = safe::as_mut_slice(&v);
let v1 = safe::as_mut_slice(&v);
let v2 = safe::as_mut_slice(&v);
v1[1] = 5; //~ ERROR does not exist on the stack
v1[1] = 6;
}

View File

@ -1,5 +1,3 @@
// ignore-test validation_op is disabled
#![allow(unused_variables)]
mod safe {
@ -20,5 +18,7 @@ mod safe {
fn main() {
let mut array = [1,2,3,4];
let _x = safe::split_at_mut(&mut array, 0); //~ ERROR: in conflict with lock WriteLock
let (a, b) = safe::split_at_mut(&mut array, 0);
a[1] = 5; //~ ERROR does not exist on the stack
b[1] = 6;
}

View File

@ -0,0 +1,11 @@
fn evil(x: &u32) {
let x : &mut u32 = unsafe { &mut *(x as *const _ as *mut _) };
*x = 42; // mutating shared ref without `UnsafeCell`
}
fn main() {
let target = 42;
let ref_ = &target;
evil(ref_); // invalidates shared ref
let _x = *ref_; //~ ERROR should be frozen
}

View File

@ -0,0 +1,10 @@
#![allow(unused_variables)]
fn main() {
let target = &mut 42;
let target2 = target as *mut _;
drop(&mut *target); // reborrow
// Now make sure our ref is still the only one
unsafe { *target2 = 13; } // invalidate our ref
let _val = *target; //~ ERROR does not exist on the stack
}

View File

@ -0,0 +1,22 @@
#![allow(unused_variables)]
static mut PTR: *mut u8 = 0 as *mut _;
fn fun1(x: &mut u8) {
unsafe {
PTR = x;
}
}
fn fun2() {
// Now we use a pointer we are not allowed to use
let _x = unsafe { *PTR };
}
fn main() {
let val = &mut 0; // FIXME: This should also work with a local variable, but currently it does not.
fun1(val);
*val = 2; // this invalidates any raw ptrs `fun1` might have created.
fun2(); // if they now use a raw ptr they break our reference
*val = 3; //~ ERROR does not exist on the stack
}

View File

@ -0,0 +1,21 @@
#![allow(unused_variables)]
use std::cell::RefCell;
fn test(r: &mut RefCell<i32>) {
let x = &*r; // not freezing because interior mutability
let mut x_ref = x.borrow_mut();
let x_inner : &mut i32 = &mut *x_ref; // Uniq reference
let x_evil = x_inner as *mut _;
{
let x_inner_shr = &*x_inner; // frozen
let y = &*r; // outer ref, not freezing
let x_inner_shr2 = &*x_inner; // freezing again
}
// Our old raw should be dead by now
unsafe { *x_evil = 0; } // this falls back to some Raw higher up the stack
*x_inner = 12; //~ ERROR does not exist on the stack
}
fn main() {
test(&mut RefCell::new(0));
}

View File

@ -1,17 +0,0 @@
// ignore-test validation_op is disabled
#![allow(unused_variables)]
mod safe {
pub(crate) fn safe(x: &u32) {
let x : &mut u32 = unsafe { &mut *(x as *const _ as *mut _) };
*x = 42; //~ ERROR: in conflict with lock ReadLock
}
}
fn main() {
let target = &mut 42;
let target_ref = &target;
// do a reborrow, but we keep the lock
safe::safe(&*target);
}

View File

@ -1,26 +0,0 @@
// ignore-test validation_op is disabled
// Make sure validation can handle many overlapping shared borrows for different parts of a data structure
#![allow(unused_variables)]
use std::cell::RefCell;
fn evil(x: *mut i32) {
unsafe { *x = 0; } //~ ERROR: in conflict with lock WriteLock
}
fn test(r: &mut RefCell<i32>) {
let x = &*r; // releasing write lock, first suspension recorded
let mut x_ref = x.borrow_mut();
let x_inner : &mut i32 = &mut *x_ref; // new inner write lock, with same lifetime as outer lock
{
let x_inner_shr = &*x_inner; // releasing inner write lock, recording suspension
let y = &*r; // second suspension for the outer write lock
let x_inner_shr2 = &*x_inner; // 2nd suspension for inner write lock
}
// If the two locks are mixed up, here we should have a write lock, but we do not.
evil(x_inner as *mut _);
}
fn main() {
test(&mut RefCell::new(0));
}

View File

@ -1,22 +0,0 @@
// ignore-test validation_op is disabled
#![allow(unused_variables)]
static mut PTR: *mut u8 = 0 as *mut _;
fn fun1(x: &mut u8) {
unsafe {
PTR = x;
}
}
fn fun2() {
// Now we use a pointer we are not allowed to use
let _x = unsafe { *PTR }; //~ ERROR: in conflict with lock WriteLock
}
fn main() {
let mut val = 0;
fun1(&mut val);
fun2();
}

View File

@ -1,18 +0,0 @@
// ignore-test validation_op is disabled
#![allow(unused_variables)]
#[repr(u32)]
enum Bool { True }
mod safe {
pub(crate) fn safe(x: &mut super::Bool) {
let x = x as *mut _ as *mut u32;
unsafe { *x = 44; } // out-of-bounds enum discriminant
}
}
fn main() {
let mut x = Bool::True;
safe::safe(&mut x); //~ ERROR: invalid enum discriminant
}

View File

@ -1,16 +0,0 @@
// ignore-test validation_op is disabled
#![allow(unused_variables)]
mod safe {
// This makes a ref that was passed to us via &mut alias with things it should not alias with
pub(crate) fn safe(x: &mut &u32, target: &mut u32) {
unsafe { *x = &mut *(target as *mut _); }
}
}
fn main() {
let target = &mut 42;
let mut target_alias = &42; // initial dummy value
safe::safe(&mut target_alias, target); //~ ERROR: in conflict with lock ReadLock
}

View File

@ -1,17 +0,0 @@
// ignore-test validation_op is disabled
#![allow(unused_variables)]
mod safe {
pub(crate) fn safe(x: *mut u32) {
unsafe { *x = 42; } //~ ERROR: in conflict with lock WriteLock
}
}
fn main() {
let target = &mut 42u32;
let target2 = target as *mut _;
drop(&mut *target); // reborrow
// Now make sure we still got the lock
safe::safe(target2);
}

View File

@ -1,16 +0,0 @@
// ignore-test validation_op is disabled
#![allow(unused_variables)]
// error-pattern: attempted to read undefined bytes
mod safe {
use std::mem;
pub(crate) fn make_float() -> f32 {
unsafe { mem::uninitialized() }
}
}
fn main() {
let _x = safe::make_float();
}

View File

@ -0,0 +1,16 @@
#![allow(unused_variables)]
#[repr(u32)]
enum Bool { True }
fn evil(x: &mut Bool) {
let x = x as *mut _ as *mut u32;
unsafe { *x = 44; } // out-of-bounds enum discriminant
}
fn main() {
let mut x = Bool::True;
evil(&mut x);
let _y = x; // reading this ought to be enough to trigger validation
//~^ ERROR invalid enum discriminant 44
}

View File

@ -0,0 +1,12 @@
#![allow(unused_variables)]
// error-pattern: encountered undefined data in pointer
use std::mem;
fn make_raw() -> *const f32 {
unsafe { mem::uninitialized() }
}
fn main() {
let _x = make_raw();
}

View File

@ -63,6 +63,7 @@ fn compile_fail(sysroot: &Path, path: &str, target: &str, host: &str, need_fullm
flags.push(format!("--sysroot {}", sysroot.display()));
flags.push("-Dwarnings -Dunused".to_owned()); // overwrite the -Aunused in compiletest-rs
config.src_base = PathBuf::from(path.to_string());
flags.push("-Zmir-opt-level=0".to_owned()); // optimization circumvents some stacked borrow checks
flags.push("-Zmir-emit-validate=1".to_owned());
config.target_rustcflags = Some(flags.join(" "));
config.target = target.to_owned();

View File

@ -0,0 +1,9 @@
//ignore-windows: env var emulation not implemented on Windows
use std::env;
fn main() {
assert_eq!(env::var("MIRI_TEST"), Err(env::VarError::NotPresent));
env::set_var("MIRI_TEST", "the answer");
assert_eq!(env::var("MIRI_TEST"), Ok("the answer".to_owned()));
}

21
tests/run-pass/raw.rs Normal file
View File

@ -0,0 +1,21 @@
fn basic_raw() {
let mut x = 12;
let x = &mut x;
assert_eq!(*x, 12);
let raw = x as *mut i32;
unsafe { *raw = 42; }
assert_eq!(*x, 42);
let raw = x as *mut i32;
unsafe { *raw = 12; }
*x = 23;
assert_eq!(*x, 23);
}
fn main() {
basic_raw();
}

33
tests/run-pass/refcell.rs Normal file
View File

@ -0,0 +1,33 @@
use std::cell::RefCell;
fn main() {
let c = RefCell::new(42);
{
let s1 = c.borrow();
let _x: i32 = *s1;
let s2 = c.borrow();
let _x: i32 = *s1;
let _y: i32 = *s2;
let _x: i32 = *s1;
let _y: i32 = *s2;
}
{
let mut m = c.borrow_mut();
let _z: i32 = *m;
{
let s: &i32 = &*m;
let _x = *s;
}
*m = 23;
let _z: i32 = *m;
}
{
let s1 = c.borrow();
let _x: i32 = *s1;
let s2 = c.borrow();
let _x: i32 = *s1;
let _y: i32 = *s2;
let _x: i32 = *s1;
let _y: i32 = *s2;
}
}

View File

@ -12,8 +12,9 @@ fn rc_cell() -> Rc<Cell<i32>> {
fn rc_refcell() -> i32 {
let r = Rc::new(RefCell::new(42));
*r.borrow_mut() += 10;
let x = *r.borrow();
x
let x = r.borrow();
let y = r.borrow();
(*x + *y)/2
}
fn arc() -> Arc<i32> {