Various cosmetic improvements.

This commit is contained in:
Alexander Regueiro 2019-02-16 01:29:38 +00:00
parent a478bfebd2
commit 12d3ecbaff
15 changed files with 468 additions and 405 deletions

View File

@ -23,7 +23,7 @@ Miri has already discovered some [real-world bugs](#bugs-found-by-miri).
[`copy_nonoverlapping`]: https://doc.rust-lang.org/stable/std/ptr/fn.copy_nonoverlapping.html
## Running Miri on your own project('s test suite)
## Running Miri on your own project (and its test suite)
Install Miri as a cargo subcommand:

View File

@ -2,10 +2,10 @@
extern crate cargo_metadata;
use std::path::{PathBuf, Path};
use std::io::{self, Write, BufRead};
use std::process::Command;
use std::fs::{self, File};
use std::io::{self, Write, BufRead};
use std::path::{PathBuf, Path};
use std::process::Command;
const CARGO_MIRI_HELP: &str = r#"Interprets bin crates and tests in Miri
@ -55,15 +55,15 @@ fn show_error(msg: String) -> ! {
std::process::exit(1)
}
// Determines whether a --flag is present
// Determines whether a `--flag` is present.
fn has_arg_flag(name: &str) -> bool {
let mut args = std::env::args().take_while(|val| val != "--");
args.any(|val| val == name)
}
/// Gets the value of a --flag
/// Gets the value of a `--flag`.
fn get_arg_flag_value(name: &str) -> Option<String> {
// stop searching at `--`
// Stop searching at `--`.
let mut args = std::env::args().take_while(|val| val != "--");
loop {
let arg = match args.next() {
@ -73,13 +73,15 @@ fn get_arg_flag_value(name: &str) -> Option<String> {
if !arg.starts_with(name) {
continue;
}
let suffix = &arg[name.len()..]; // strip leading `name`
// Strip leading `name`.
let suffix = &arg[name.len()..];
if suffix.is_empty() {
// This argument is exactly `name`, the next one is the value
// This argument is exactly `name`; the next one is the value.
return args.next();
} else if suffix.starts_with('=') {
// This argument is `name=value`, get the value
return Some(suffix[1..].to_owned()); // strip leading `=`
// This argument is `name=value`; get the value.
// Strip leading `=`.
return Some(suffix[1..].to_owned());
}
}
}
@ -96,7 +98,7 @@ fn list_targets() -> impl Iterator<Item=cargo_metadata::Target> {
{
metadata
} else {
show_error(format!("error: Could not obtain cargo metadata."));
show_error(format!("Could not obtain Cargo metadata"));
};
let current_dir = std::env::current_dir();
@ -167,20 +169,22 @@ fn ask(question: &str) {
io::stdout().flush().unwrap();
io::stdin().read_line(&mut buf).unwrap();
match buf.trim().to_lowercase().as_ref() {
"" | "y" | "yes" => {}, // proceed
// Proceed.
"" | "y" | "yes" => {},
"n" | "no" => show_error(format!("Aborting as per your request")),
a => show_error(format!("I do not understand `{}`", a))
};
}
/// Perform the setup requires to make `cargo miri` work: Getting a custom-built libstd. Then sets MIRI_SYSROOT.
/// Skipped if MIRI_SYSROOT is already set, in that case we expect the user has done all this already.
/// Performs the setup required to make `cargo miri` work: Getting a custom-built libstd. Then sets
/// `MIRI_SYSROOT`. Skipped if `MIRI_SYSROOT` is already set, in which case we expect the user has
/// done all this already.
fn setup(ask_user: bool) {
if std::env::var("MIRI_SYSROOT").is_ok() {
return;
}
// First, we need xargo
// First, we need xargo.
let xargo = xargo_version();
if xargo.map_or(true, |v| v < (0, 3, 13)) {
if ask_user {
@ -193,7 +197,7 @@ fn setup(ask_user: bool) {
}
}
// Then, unless XARGO_RUST_SRC is set, we also need rust-src.
// Then, unless `XARGO_RUST_SRC` is set, we also need rust-src.
// Let's see if it is already installed.
if std::env::var("XARGO_RUST_SRC").is_err() {
let sysroot = Command::new("rustc").args(&["--print", "sysroot"]).output().unwrap().stdout;
@ -229,7 +233,7 @@ features = ["panic_unwind"]
[dependencies.test]
stage = 1
"#).unwrap();
// The boring bits: A dummy project for xargo
// The boring bits: a dummy project for xargo.
File::create(dir.join("Cargo.toml")).unwrap()
.write_all(br#"
[package]
@ -241,7 +245,7 @@ version = "0.0.0"
path = "lib.rs"
"#).unwrap();
File::create(dir.join("lib.rs")).unwrap();
// Run xargo
// Run xargo.
let target = get_arg_flag_value("--target");
let mut command = Command::new("xargo");
command.arg("build").arg("-q")
@ -256,7 +260,7 @@ path = "lib.rs"
show_error(format!("Failed to run xargo"));
}
// That should be it! But we need to figure out where xargo built stuff.
// That should be it! But we need to figure out where xargo built stuff.
// Unfortunately, it puts things into a different directory when the
// architecture matches the host.
let is_host = match target {
@ -271,7 +275,7 @@ path = "lib.rs"
}
fn main() {
// Check for version and help flags even when invoked as 'cargo-miri'
// Check for version and help flags even when invoked as `cargo-miri`.
if std::env::args().any(|a| a == "--help" || a == "-h") {
show_help();
return;
@ -282,17 +286,16 @@ fn main() {
}
if let Some("miri") = std::env::args().nth(1).as_ref().map(AsRef::as_ref) {
// this arm is when `cargo miri` is called. We call `cargo rustc` for
// each applicable target, but with the RUSTC env var set to the `cargo-miri`
// binary so that we come back in the other branch, and dispatch
// the invocations to rustc and miri, respectively.
// This arm is for when `cargo miri` is called. We call `cargo rustc` for each applicable target,
// but with the `RUSTC` env var set to the `cargo-miri` binary so that we come back in the other branch,
// and dispatch the invocations to `rustc` and `miri`, respectively.
in_cargo_miri();
} else if let Some("rustc") = std::env::args().nth(1).as_ref().map(AsRef::as_ref) {
// This arm is executed when cargo-miri runs `cargo rustc` with the `RUSTC_WRAPPER` env var set to itself:
// Dependencies get dispatched to rustc, the final test/binary to miri.
// This arm is executed when `cargo-miri` runs `cargo rustc` with the `RUSTC_WRAPPER` env var set to itself:
// dependencies get dispatched to `rustc`, the final test/binary to `miri`.
inside_cargo_rustc();
} else {
show_error(format!("Must be called with either `miri` or `rustc` as first argument."))
show_error(format!("must be called with either `miri` or `rustc` as first argument."))
}
}
@ -301,17 +304,17 @@ fn in_cargo_miri() {
Some("test") => (MiriCommand::Test, 3),
Some("run") => (MiriCommand::Run, 3),
Some("setup") => (MiriCommand::Setup, 3),
// Default command, if there is an option or nothing
// Default command, if there is an option or nothing.
Some(s) if s.starts_with("-") => (MiriCommand::Run, 2),
None => (MiriCommand::Run, 2),
// Unvalid command
// Invalid command.
Some(s) => {
show_error(format!("Unknown command `{}`", s))
}
};
let verbose = has_arg_flag("-v");
// We always setup
// We always setup.
let ask = subcommand != MiriCommand::Setup;
setup(ask);
if subcommand == MiriCommand::Setup {
@ -326,16 +329,16 @@ fn in_cargo_miri() {
"badly formatted cargo metadata: target::kind is an empty array",
);
// Now we run `cargo rustc $FLAGS $ARGS`, giving the user the
// change to add additional flags. "FLAGS" is set to identify
// change to add additional flags. `FLAGS` is set to identify
// this target. The user gets to control what gets actually passed to Miri.
// However, we need to add a flag to what gets passed to rustc for the finaly
// binary, so that we know to interpret that with Miri.
// So after the first "--", we add "-Zcargo-miri-marker".
// So after the first `--`, we add `-Zcargo-miri-marker`.
let mut cmd = Command::new("cargo");
cmd.arg("rustc");
match (subcommand, &kind[..]) {
(MiriCommand::Run, "bin") => {
// FIXME: We just run all the binaries here.
// FIXME: we just run all the binaries here.
// We should instead support `cargo miri --bin foo`.
cmd.arg("--bin").arg(target.name);
}
@ -343,24 +346,24 @@ fn in_cargo_miri() {
cmd.arg("--test").arg(target.name);
}
(MiriCommand::Test, "lib") => {
// There can be only one lib
// There can be only one lib.
cmd.arg("--lib").arg("--profile").arg("test");
}
(MiriCommand::Test, "bin") => {
cmd.arg("--bin").arg(target.name).arg("--profile").arg("test");
}
// The remaining targets we do not even want to build
// The remaining targets we do not even want to build.
_ => continue,
}
// add user-defined args until first "--"
// Add user-defined args until first `--`.
while let Some(arg) = args.next() {
if arg == "--" {
break;
}
cmd.arg(arg);
}
// Add "--" (to end the cargo flags), and then the user flags. We add markers around the user flags
// to be able to identify them later.
// Add `--` (to end the `cargo` flags), and then the user flags. We add markers around the
// user flags to be able to identify them later.
cmd
.arg("--")
.arg("cargo-miri-marker-begin")
@ -403,11 +406,11 @@ fn inside_cargo_rustc() {
.and_then(|out| String::from_utf8(out.stdout).ok())
.map(|s| s.trim().to_owned())
})
.expect("need to specify RUST_SYSROOT env var during miri compilation, or use rustup or multirust")
.expect("need to specify `RUST_SYSROOT` env var during miri compilation, or use rustup or multirust")
};
// this conditional check for the --sysroot flag is there so users can call `cargo-miri` directly
// without having to pass --sysroot or anything
// This conditional check for the `--sysroot` flag is there so that users can call `cargo-miri`
// directly without having to pass `--sysroot` or anything.
let rustc_args = std::env::args().skip(2);
let mut args: Vec<String> = if std::env::args().any(|s| s == "--sysroot") {
rustc_args.collect()
@ -419,25 +422,27 @@ fn inside_cargo_rustc() {
};
args.splice(0..0, miri::miri_default_args().iter().map(ToString::to_string));
// See if we can find the cargo-miri markers. Those only get added to the binary we want to
// run. They also serve to mark the user-defined arguments, which we have to move all the way to the
// end (they get added somewhere in the middle).
// See if we can find the `cargo-miri` markers. Those only get added to the binary we want to
// run. They also serve to mark the user-defined arguments, which we have to move all the way
// to the end (they get added somewhere in the middle).
let needs_miri = if let Some(begin) = args.iter().position(|arg| arg == "cargo-miri-marker-begin") {
let end = args.iter().position(|arg| arg == "cargo-miri-marker-end").expect("Cannot find end marker");
// These mark the user arguments. We remove the first and last as they are the markers.
let end = args
.iter()
.position(|arg| arg == "cargo-miri-marker-end")
.expect("cannot find end marker");
// These mark the user arguments. We remove the first and last as they are the markers.
let mut user_args = args.drain(begin..=end);
assert_eq!(user_args.next().unwrap(), "cargo-miri-marker-begin");
assert_eq!(user_args.next_back().unwrap(), "cargo-miri-marker-end");
// Collect the rest and add it back at the end
// Collect the rest and add it back at the end.
let mut user_args = user_args.collect::<Vec<String>>();
args.append(&mut user_args);
// Run this in Miri
// Run this in Miri.
true
} else {
false
};
let mut command = if needs_miri {
let mut path = std::env::current_exe().expect("current executable path invalid");
path.set_file_name("miri");

View File

@ -1,23 +1,23 @@
#![feature(rustc_private)]
extern crate env_logger;
extern crate getopts;
#[macro_use]
extern crate log;
extern crate log_settings;
extern crate miri;
extern crate rustc;
extern crate rustc_metadata;
extern crate rustc_driver;
extern crate rustc_errors;
extern crate rustc_codegen_utils;
extern crate env_logger;
extern crate log_settings;
extern crate syntax;
#[macro_use]
extern crate log;
use std::path::PathBuf;
use std::str::FromStr;
use std::env;
use miri::MiriConfig;
use rustc::session::Session;
use rustc_metadata::cstore::CStore;
use rustc_driver::{Compilation, CompilerCalls, RustcDefaultCalls};
@ -27,8 +27,6 @@ use rustc_codegen_utils::codegen_backend::CodegenBackend;
use rustc::hir::def_id::LOCAL_CRATE;
use syntax::ast;
use miri::MiriConfig;
struct MiriCompilerCalls {
default: Box<RustcDefaultCalls>,
miri_config: MiriConfig,
@ -79,7 +77,7 @@ impl<'a> CompilerCalls<'a> for MiriCompilerCalls {
odir: &Option<PathBuf>,
ofile: &Option<PathBuf>,
) -> Compilation {
// Called *before* build_controller. Add filename to miri arguments.
// Called *before* `build_controller`. Add filename to `miri` arguments.
self.miri_config.args.insert(0, input.filestem().to_string());
self.default.late_callback(codegen_backend, matches, sess, cstore, input, odir, ofile)
}
@ -125,27 +123,27 @@ fn after_analysis<'a, 'tcx>(
}
fn init_early_loggers() {
// Notice that our `extern crate log` is NOT the same as rustc's! So we have to initialize
// them both. We always initialize miri early.
// Note that our `extern crate log` is *not* the same as rustc's; as a result, we have to
// initialize them both, and we always initialize `miri`'s first.
let env = env_logger::Env::new().filter("MIRI_LOG").write_style("MIRI_LOG_STYLE");
env_logger::init_from_env(env);
// We only initialize rustc if the env var is set (so the user asked for it).
// We only initialize `rustc` if the env var is set (so the user asked for it).
// If it is not set, we avoid initializing now so that we can initialize
// later with our custom settings, and NOT log anything for what happens before
// miri gets started.
// later with our custom settings, and *not* log anything for what happens before
// `miri` gets started.
if env::var("RUST_LOG").is_ok() {
rustc_driver::init_rustc_env_logger();
}
}
fn init_late_loggers() {
// Initializing loggers right before we start evaluation. We overwrite the RUST_LOG
// env var if it is not set, control it based on MIRI_LOG.
// We initialize loggers right before we start evaluation. We overwrite the `RUST_LOG`
// env var if it is not set, control it based on `MIRI_LOG`.
if let Ok(var) = env::var("MIRI_LOG") {
if env::var("RUST_LOG").is_err() {
// We try to be a bit clever here: If MIRI_LOG is just a single level
// We try to be a bit clever here: if `MIRI_LOG` is just a single level
// used for everything, we only apply it to the parts of rustc that are
// CTFE-related. Otherwise, we use it verbatim for RUST_LOG.
// CTFE-related. Otherwise, we use it verbatim for `RUST_LOG`.
// This way, if you set `MIRI_LOG=trace`, you get only the right parts of
// rustc traced, but you can also do `MIRI_LOG=miri=trace,rustc_mir::interpret=debug`.
if log::Level::from_str(&var).is_ok() {
@ -158,7 +156,7 @@ fn init_late_loggers() {
}
}
// If MIRI_BACKTRACE is set and RUST_CTFE_BACKTRACE is not, set RUST_CTFE_BACKTRACE.
// If `MIRI_BACKTRACE` is set and `RUST_CTFE_BACKTRACE` is not, set `RUST_CTFE_BACKTRACE`.
// Do this late, so we really only apply this to miri's errors.
if let Ok(var) = env::var("MIRI_BACKTRACE") {
if env::var("RUST_CTFE_BACKTRACE") == Err(env::VarError::NotPresent) {
@ -172,7 +170,7 @@ fn find_sysroot() -> String {
return sysroot;
}
// Taken from https://github.com/Manishearth/rust-clippy/pull/911.
// Taken from PR <https://github.com/Manishearth/rust-clippy/pull/911>.
let home = option_env!("RUSTUP_HOME").or(option_env!("MULTIRUST_HOME"));
let toolchain = option_env!("RUSTUP_TOOLCHAIN").or(option_env!("MULTIRUST_TOOLCHAIN"));
match (home, toolchain) {
@ -180,8 +178,8 @@ fn find_sysroot() -> String {
_ => {
option_env!("RUST_SYSROOT")
.expect(
"Could not find sysroot. Either set MIRI_SYSROOT at run-time, or at \
build-time specify RUST_SYSROOT env var or use rustup or multirust",
"could not find sysroot. either set `MIRI_SYSROOT` at run-time, or at \
build-time specify `RUST_SYSROOT` env var or use rustup or multirust",
)
.to_owned()
}
@ -191,18 +189,18 @@ fn find_sysroot() -> String {
fn main() {
init_early_loggers();
// Parse our arguments and split them across rustc and miri
// Parse our arguments and split them across `rustc` and `miri`.
let mut validate = true;
let mut rustc_args = vec![];
let mut miri_args = vec![];
let mut after_dashdash = false;
for arg in std::env::args() {
if rustc_args.is_empty() {
// Very first arg: for rustc
// Very first arg: for `rustc`.
rustc_args.push(arg);
}
else if after_dashdash {
// Everything that comes is Miri args
// Everything that comes after are `miri` args.
miri_args.push(arg);
} else {
match arg.as_str() {
@ -219,7 +217,7 @@ fn main() {
}
}
// Determine sysroot and let rustc know about it
// Determine sysroot and let rustc know about it.
let sysroot_flag = String::from("--sysroot");
if !rustc_args.contains(&sysroot_flag) {
rustc_args.push(sysroot_flag);

View File

@ -7,7 +7,7 @@ use syntax::attr;
use crate::*;
impl<'a, 'mir, 'tcx> EvalContextExt<'a, 'mir, 'tcx> for crate::MiriEvalContext<'a, 'mir, 'tcx> {}
pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a, 'mir, 'tcx> {
pub trait EvalContextExt<'a, 'mir, 'tcx: 'a + 'mir>: crate::MiriEvalContextExt<'a, 'mir, 'tcx> {
fn find_fn(
&mut self,
instance: ty::Instance<'tcx>,
@ -18,15 +18,15 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
let this = self.eval_context_mut();
trace!("eval_fn_call: {:#?}, {:?}", instance, dest.map(|place| *place));
// first run the common hooks also supported by CTFE
// First, run the common hooks also supported by CTFE.
if this.hook_fn(instance, args, dest)? {
this.goto_block(ret)?;
return Ok(None);
}
// there are some more lang items we want to hook that CTFE does not hook (yet)
// There are some more lang items we want to hook that CTFE does not hook (yet).
if this.tcx.lang_items().align_offset_fn() == Some(instance.def.def_id()) {
// FIXME: return a real value in case the target allocation has an
// alignment bigger than the one requested
// alignment bigger than the one requested.
let n = u128::max_value();
let dest = dest.unwrap();
let n = this.truncate(n, dest.layout);
@ -35,20 +35,20 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
return Ok(None);
}
// Try to see if we can do something about foreign items
// Try to see if we can do something about foreign items.
if this.tcx.is_foreign_item(instance.def_id()) {
// An external function that we cannot find MIR for, but we can still run enough
// of them to make miri viable.
this.emulate_foreign_item(instance.def_id(), args, dest, ret)?;
// `goto_block` already handled
// `goto_block` already handled.
return Ok(None);
}
// Otherwise, load the MIR
// Otherwise, load the MIR.
Ok(Some(this.load_mir(instance.def)?))
}
/// Emulate calling a foreign item, fail if the item is not supported.
/// Emulates calling a foreign item, failing if the item is not supported.
/// This function will handle `goto_block` if needed.
fn emulate_foreign_item(
&mut self,
@ -63,11 +63,11 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
Some(name) => name.as_str(),
None => this.tcx.item_name(def_id).as_str(),
};
// Strip linker suffixes (seen on 32bit macOS)
// Strip linker suffixes (seen on 32-bit macOS).
let link_name = link_name.trim_end_matches("$UNIX2003");
let tcx = &{this.tcx.tcx};
// first: functions that could diverge
// First: functions that could diverge.
match &link_name[..] {
"__rust_start_panic" | "panic_impl" => {
return err!(MachineError("the evaluated program panicked".to_string()));
@ -79,9 +79,9 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
}
}
// now: functions that assume a ret and dest
// Next: functions that assume a ret and dest.
let dest = dest.expect("we already checked for a dest");
let ret = ret.expect("dest is Some but ret is None");
let ret = ret.expect("dest is `Some` but ret is `None`");
match &link_name[..] {
"malloc" => {
let size = this.read_scalar(args[0])?.to_usize(this)?;
@ -97,7 +97,7 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
let ret = this.deref_operand(args[0])?;
let align = this.read_scalar(args[1])?.to_usize(this)?;
let size = this.read_scalar(args[2])?.to_usize(this)?;
// align must be a power of 2, and also at least ptr-sized (wtf, POSIX)
// Align must be power of 2, and also at least ptr-sized (POSIX rules).
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
@ -209,10 +209,10 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
}
"syscall" => {
// TODO: read `syscall` ids like `sysconf` ids and
// figure out some way to actually process some of them
// TODO: read `syscall` IDs like `sysconf` IDs and
// figure out some way to actually process some of them.
//
// libc::syscall(NR_GETRANDOM, buf.as_mut_ptr(), buf.len(), GRND_NONBLOCK)
// `libc::syscall(NR_GETRANDOM, buf.as_mut_ptr(), buf.len(), GRND_NONBLOCK)`
// is called if a `HashMap` is created the regular way.
match this.read_scalar(args[0])?.to_usize(this)? {
318 | 511 => {
@ -222,7 +222,7 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
}
id => {
return err!(Unimplemented(
format!("miri does not support syscall id {}", id),
format!("miri does not support syscall ID {}", id),
))
}
}
@ -241,16 +241,22 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
}
"__rust_maybe_catch_panic" => {
// fn __rust_maybe_catch_panic(f: fn(*mut u8), data: *mut u8, data_ptr: *mut usize, vtable_ptr: *mut usize) -> u32
// We abort on panic, so not much is going on here, but we still have to call the closure
// fn __rust_maybe_catch_panic(
// f: fn(*mut u8),
// data: *mut u8,
// data_ptr: *mut usize,
// vtable_ptr: *mut usize,
// ) -> u32
// We abort on panic, so not much is going on here, but we still have to call the closure.
let f = this.read_scalar(args[0])?.to_ptr()?;
let data = this.read_scalar(args[1])?.not_undef()?;
let f_instance = this.memory().get_fn(f)?;
this.write_null(dest)?;
trace!("__rust_maybe_catch_panic: {:?}", f_instance);
// Now we make a function call. TODO: Consider making this re-usable? EvalContext::step does sth. similar for the TLS dtors,
// and of course eval_main.
// Now we make a function call.
// TODO: consider making this reusable? `EvalContext::step` does something similar
// for the TLS destructors, and of course `eval_main`.
let mir = this.load_mir(f_instance.def)?;
let ret_place = MPlaceTy::dangling(this.layout_of(this.tcx.mk_unit())?, this).into();
this.push_stack_frame(
@ -258,7 +264,8 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
mir.span,
mir,
Some(ret_place),
StackPopCleanup::Goto(Some(ret)), // directly return to caller
// Directly return to caller.
StackPopCleanup::Goto(Some(ret)),
)?;
let mut args = this.frame().mir.args_iter();
@ -273,10 +280,10 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
assert!(args.next().is_none(), "__rust_maybe_catch_panic argument has more arguments than expected");
// We ourselves will return 0, eventually (because we will not return if we paniced)
// We ourselves will return `0`, eventually (because we will not return if we paniced).
this.write_null(dest)?;
// Don't fall through, we do NOT want to `goto_block`!
// Don't fall through, we do *not* want to `goto_block`!
return Ok(());
}
@ -321,10 +328,12 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
let ptr = this.read_scalar(args[0])?.not_undef()?;
let val = this.read_scalar(args[1])?.to_i32()? as u8;
let num = this.read_scalar(args[2])?.to_usize(this)?;
if let Some(idx) = this.memory().read_bytes(ptr, Size::from_bytes(num))?.iter().position(
|&c| c == val,
)
{
let idx = this
.memory()
.read_bytes(ptr, Size::from_bytes(num))?
.iter()
.position(|&c| c == val);
if let Some(idx) = idx {
let new_ptr = ptr.ptr_offset(Size::from_bytes(idx as u64), this)?;
this.write_scalar(new_ptr, dest)?;
} else {
@ -350,7 +359,11 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
let name_ptr = this.read_scalar(args[0])?.not_undef()?;
if !name_ptr.is_null_ptr(this) {
let name_ptr = name_ptr.to_ptr()?;
let name = this.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?.to_owned();
let name = this
.memory()
.get(name_ptr.alloc_id)?
.read_c_str(tcx, name_ptr)?
.to_owned();
if !name.is_empty() && !name.contains(&b'=') {
success = Some(this.machine.env_vars.remove(&name));
}
@ -381,7 +394,7 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
}
}
if let Some((name, value)) = new {
// +1 for the null terminator
// `+1` for the null terminator.
let value_copy = this.memory_mut().allocate(
Size::from_bytes((value.len() + 1) as u64),
Align::from_bytes(1).unwrap(),
@ -390,7 +403,10 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
{
let alloc = this.memory_mut().get_mut(value_copy.alloc_id)?;
alloc.write_bytes(tcx, value_copy, &value)?;
let trailing_zero_ptr = value_copy.offset(Size::from_bytes(value.len() as u64), tcx)?;
let trailing_zero_ptr = value_copy.offset(
Size::from_bytes(value.len() as u64),
tcx,
)?;
alloc.write_bytes(tcx, trailing_zero_ptr, &[0])?;
}
if let Some(var) = this.machine.env_vars.insert(
@ -435,8 +451,10 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
}
} else {
eprintln!("Miri: Ignored output to FD {}", fd);
n as i64 // pretend it all went well
}; // now result is the value we return back to the program
// Pretend it all went well.
n as i64
};
// Now, `result` is the value we return back to the program.
this.write_scalar(
Scalar::from_int(result, dest.layout.size),
dest,
@ -449,7 +467,7 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
this.write_scalar(Scalar::from_uint(n as u64, dest.layout.size), dest)?;
}
// Some things needed for sys::thread initialization to go through
// Some things needed for `sys::thread` initialization to go through.
"signal" | "sigaction" | "sigaltstack" => {
this.write_scalar(Scalar::from_int(0, dest.layout.size), dest)?;
}
@ -458,7 +476,7 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
let name = this.read_scalar(args[0])?.to_i32()?;
trace!("sysconf() called with name {}", name);
// cache the sysconf integers via miri's global cache
// Cache the sysconf integers via Miri's global cache.
let paths = &[
(&["libc", "_SC_PAGESIZE"], Scalar::from_int(4096, dest.layout.size)),
(&["libc", "_SC_GETPW_R_SIZE_MAX"], Scalar::from_int(-1, dest.layout.size)),
@ -493,11 +511,11 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
this.write_null(dest)?;
}
// Hook pthread calls that go to the thread-local storage memory subsystem
// Hook pthread calls that go to the thread-local storage memory subsystem.
"pthread_key_create" => {
let key_ptr = this.read_scalar(args[0])?.to_ptr()?;
// Extract the function type out of the signature (that seems easier than constructing it ourselves...)
// Extract the function type out of the signature (that seems easier than constructing it ourselves).
let dtor = match this.read_scalar(args[1])?.not_undef()? {
Scalar::Ptr(dtor_ptr) => Some(this.memory().get_fn(dtor_ptr)?),
Scalar::Bits { bits: 0, size } => {
@ -507,12 +525,15 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
Scalar::Bits { .. } => return err!(ReadBytesAsPointer),
};
// Figure out how large a pthread TLS key actually is. This is libc::pthread_key_t.
let key_type = args[0].layout.ty.builtin_deref(true)
.ok_or_else(|| EvalErrorKind::AbiViolation("Wrong signature used for pthread_key_create: First argument must be a raw pointer.".to_owned()))?.ty;
// Figure out how large a pthread TLS key actually is.
// This is `libc::pthread_key_t`.
let key_type = args[0].layout.ty
.builtin_deref(true)
.ok_or_else(|| EvalErrorKind::AbiViolation("wrong signature used for `pthread_key_create`: first argument must be a raw pointer.".to_owned()))?
.ty;
let key_layout = this.layout_of(key_type)?;
// Create key and write it into the memory where key_ptr wants it
// Create key and write it into the memory where `key_ptr` wants it.
let key = this.machine.tls.create_tls_key(dtor, tcx) as u128;
if key_layout.size.bits() < 128 && key >= (1u128 << key_layout.size.bits() as u128) {
return err!(OutOfTls);
@ -526,7 +547,7 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
key_layout.size,
)?;
// Return success (0)
// Return success (`0`).
this.write_null(dest)?;
}
"pthread_key_delete" => {
@ -545,29 +566,31 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
let new_ptr = this.read_scalar(args[1])?.not_undef()?;
this.machine.tls.store_tls(key, new_ptr)?;
// Return success (0)
// Return success (`0`).
this.write_null(dest)?;
}
// Determining stack base address
// Determine stack base address.
"pthread_attr_init" | "pthread_attr_destroy" | "pthread_attr_get_np" |
"pthread_getattr_np" | "pthread_self" | "pthread_get_stacksize_np" => {
this.write_null(dest)?;
}
"pthread_attr_getstack" => {
// second argument is where we are supposed to write the stack size
// Second argument is where we are supposed to write the stack size.
let ptr = this.deref_operand(args[1])?;
let stackaddr = Scalar::from_int(0x80000, args[1].layout.size); // just any address
this.write_scalar(stackaddr, ptr.into())?;
// return 0
// Just any address.
let stack_addr = Scalar::from_int(0x80000, args[1].layout.size);
this.write_scalar(stack_addr, ptr.into())?;
// Return success (`0`).
this.write_null(dest)?;
}
"pthread_get_stackaddr_np" => {
let stackaddr = Scalar::from_int(0x80000, dest.layout.size); // just any address
this.write_scalar(stackaddr, dest)?;
// Just any address.
let stack_addr = Scalar::from_int(0x80000, dest.layout.size);
this.write_scalar(stack_addr, dest)?;
}
// Stub out calls for condvar, mutex and rwlock to just return 0
// Stub out calls for condvar, mutex and rwlock, to just return `0`.
"pthread_mutexattr_init" | "pthread_mutexattr_settype" | "pthread_mutex_init" |
"pthread_mutexattr_destroy" | "pthread_mutex_lock" | "pthread_mutex_unlock" |
"pthread_mutex_destroy" | "pthread_rwlock_rdlock" | "pthread_rwlock_unlock" |
@ -578,7 +601,7 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
}
"mmap" => {
// This is a horrible hack, but well... the guard page mechanism calls mmap and expects a particular return value, so we give it that value
// This is a horrible hack, but since the guard page mechanism calls mmap and expects a particular return value, we just give it that value.
let addr = this.read_scalar(args[0])?.not_undef()?;
this.write_scalar(addr, dest)?;
}
@ -586,9 +609,9 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
this.write_null(dest)?;
}
// macOS API stubs
// macOS API stubs.
"_tlv_atexit" => {
// FIXME: Register the dtor
// FIXME: register the destructor.
},
"_NSGetArgc" => {
this.write_scalar(Scalar::Ptr(this.machine.argc.unwrap()), dest)?;
@ -597,7 +620,7 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
this.write_scalar(Scalar::Ptr(this.machine.argv.unwrap()), dest)?;
},
// Windows API stubs
// Windows API stubs.
"SetLastError" => {
let err = this.read_scalar(args[0])?.to_u32()?;
this.machine.last_error = err;
@ -607,30 +630,30 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
}
"AddVectoredExceptionHandler" => {
// any non zero value works for the stdlib. This is just used for stackoverflows anyway
// Any non zero value works for the stdlib. This is just used for stack overflows anyway.
this.write_scalar(Scalar::from_int(1, dest.layout.size), dest)?;
},
"InitializeCriticalSection" |
"EnterCriticalSection" |
"LeaveCriticalSection" |
"DeleteCriticalSection" => {
// Nothing to do, not even a return value
// Nothing to do, not even a return value.
},
"GetModuleHandleW" |
"GetProcAddress" |
"TryEnterCriticalSection" |
"GetConsoleScreenBufferInfo" |
"SetConsoleTextAttribute" => {
// pretend these do not exist/nothing happened, by returning zero
// Pretend these do not exist / nothing happened, by returning zero.
this.write_null(dest)?;
},
"GetSystemInfo" => {
let system_info = this.deref_operand(args[0])?;
let system_info_ptr = system_info.ptr.to_ptr()?;
// initialize with 0
// Initialize with `0`.
this.memory_mut().get_mut(system_info_ptr.alloc_id)?
.write_repeat(tcx, system_info_ptr, 0, system_info.layout.size)?;
// set number of processors to 1
// Set number of processors to `1`.
let dword_size = Size::from_bytes(4);
let offset = 2*dword_size + 3*tcx.pointer_size();
this.memory_mut().get_mut(system_info_ptr.alloc_id)?
@ -643,13 +666,14 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
}
"TlsAlloc" => {
// This just creates a key; Windows does not natively support TLS dtors.
// This just creates a key; Windows does not natively support TLS destructors.
// Create key and return it
// Create key and return it.
let key = this.machine.tls.create_tls_key(None, tcx) as u128;
// Figure out how large a TLS key actually is. This is c::DWORD.
if dest.layout.size.bits() < 128 && key >= (1u128 << dest.layout.size.bits() as u128) {
// Figure out how large a TLS key actually is. This is `c::DWORD`.
if dest.layout.size.bits() < 128
&& key >= (1u128 << dest.layout.size.bits() as u128) {
return err!(OutOfTls);
}
this.write_scalar(Scalar::from_uint(key, dest.layout.size), dest)?;
@ -664,12 +688,12 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
let new_ptr = this.read_scalar(args[1])?.not_undef()?;
this.machine.tls.store_tls(key, new_ptr)?;
// Return success (1)
// Return success (`1`).
this.write_scalar(Scalar::from_int(1, dest.layout.size), dest)?;
}
"GetStdHandle" => {
let which = this.read_scalar(args[0])?.to_i32()?;
// We just make this the identity function, so we know later in "WriteFile"
// We just make this the identity function, so we know later in `WriteFile`
// which one it is.
this.write_scalar(Scalar::from_int(which, this.pointer_size()), dest)?;
}
@ -678,7 +702,8 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
let buf = this.read_scalar(args[1])?.not_undef()?;
let n = this.read_scalar(args[2])?.to_u32()?;
let written_place = this.deref_operand(args[3])?;
this.write_null(written_place.into())?; // spec says we always write 0 first
// Spec says to always write `0` first.
this.write_null(written_place.into())?;
let written = if handle == -11 || handle == -12 {
// stdout/stderr
use std::io::{self, Write};
@ -692,24 +717,25 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
res.ok().map(|n| n as u32)
} else {
eprintln!("Miri: Ignored output to handle {}", handle);
Some(n) // pretend it all went well
// Pretend it all went well.
Some(n)
};
// If there was no error, write back how much was written
// If there was no error, write back how much was written.
if let Some(n) = written {
this.write_scalar(Scalar::from_uint(n, Size::from_bits(32)), written_place.into())?;
}
// Return whether this was a success
// Return whether this was a success.
this.write_scalar(
Scalar::from_int(if written.is_some() { 1 } else { 0 }, dest.layout.size),
dest,
)?;
}
"GetConsoleMode" => {
// Everything is a pipe
// Everything is a pipe.
this.write_null(dest)?;
}
"GetEnvironmentVariableW" => {
// This is not the env var you are looking for
// This is not the env var you are looking for.
this.machine.last_error = 203; // ERROR_ENVVAR_NOT_FOUND
this.write_null(dest)?;
}
@ -717,7 +743,7 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
this.write_scalar(Scalar::Ptr(this.machine.cmd_line.unwrap()), dest)?;
}
// We can't execute anything else
// We can't execute anything else.
_ => {
return err!(Unimplemented(
format!("can't call foreign function: {}", link_name),

View File

@ -6,8 +6,9 @@ use rustc::hir::def_id::{DefId, CRATE_DEF_INDEX};
use crate::*;
impl<'a, 'mir, 'tcx> EvalContextExt<'a, 'mir, 'tcx> for crate::MiriEvalContext<'a, 'mir, 'tcx> {}
pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a, 'mir, 'tcx> {
/// Get an instance for a path.
pub trait EvalContextExt<'a, 'mir, 'tcx: 'a + 'mir>: crate::MiriEvalContextExt<'a, 'mir, 'tcx> {
/// Gets an instance for a path.
fn resolve_path(&self, path: &[&str]) -> EvalResult<'tcx, ty::Instance<'tcx>> {
let this = self.eval_context_ref();
this.tcx
@ -42,7 +43,7 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
})
}
/// Visit the memory covered by `place`, sensitive to freezing: The 3rd parameter
/// Visits the memory covered by `place`, sensitive to freezing: the 3rd parameter
/// will be true if this is frozen, false if this is in an `UnsafeCell`.
fn visit_freeze_sensitive(
&self,
@ -57,7 +58,7 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
.map(|(size, _)| size)
.unwrap_or_else(|| place.layout.size)
);
// Store how far we proceeded into the place so far. Everything to the left of
// Store how far we proceeded into the place so far. Everything to the left of
// this offset has already been handled, in the sense that the frozen parts
// have had `action` called on them.
let mut end_ptr = place.ptr;
@ -139,7 +140,7 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
&self.ecx
}
// Hook to detect `UnsafeCell`
// Hook to detect `UnsafeCell`.
fn visit_value(&mut self, v: MPlaceTy<'tcx, Borrow>) -> EvalResult<'tcx>
{
trace!("UnsafeCellVisitor: {:?} {:?}", *v, v.layout.ty);
@ -159,7 +160,7 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
}
}
// Make sure we visit aggregrates in increasing offset order
// Make sure we visit aggregrates in increasing offset order.
fn visit_aggregate(
&mut self,
place: MPlaceTy<'tcx, Borrow>,
@ -179,17 +180,17 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
}
layout::FieldPlacement::Union { .. } => {
// Uh, what?
bug!("A union is not an aggregate we should ever visit")
bug!("a union is not an aggregate we should ever visit")
}
}
}
// We have to do *something* for unions
// We have to do *something* for unions.
fn visit_union(&mut self, v: MPlaceTy<'tcx, Borrow>) -> EvalResult<'tcx>
{
// With unions, we fall back to whatever the type says, to hopefully be consistent
// with LLVM IR.
// FIXME Are we consistent? And is this really the behavior we want?
// FIXME: are we consistent, and is this really the behavior we want?
let frozen = self.ecx.type_is_freeze(v.layout.ty);
if frozen {
Ok(())
@ -198,10 +199,10 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
}
}
// We should never get to a primitive, but always short-circuit somewhere above
// We should never get to a primitive, but always short-circuit somewhere above.
fn visit_primitive(&mut self, _v: MPlaceTy<'tcx, Borrow>) -> EvalResult<'tcx>
{
bug!("We should always short-circit before coming to a primitive")
bug!("we should always short-circuit before coming to a primitive")
}
}
}

View File

@ -1,9 +1,8 @@
use rustc::mir;
use rustc::mir::interpret::{EvalResult, PointerArithmetic};
use rustc::ty::layout::{self, LayoutOf, Size};
use rustc::ty;
use rustc::mir::interpret::{EvalResult, PointerArithmetic};
use crate::{
PlaceTy, OpTy, ImmTy, Immediate, Scalar, ScalarMaybeUndef, Borrow,
OperatorEvalContextExt
@ -268,7 +267,7 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
// However, this only affects direct calls of the intrinsic; calls to the stable
// functions wrapping them do get their validation.
// FIXME: should we check that the destination pointer is aligned even for ZSTs?
if !dest.layout.is_zst() { // nothing to do for ZST
if !dest.layout.is_zst() {
match dest.layout.abi {
layout::Abi::Scalar(ref s) => {
let x = Scalar::from_int(0, s.value.size(this));
@ -443,7 +442,7 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
// However, this only affects direct calls of the intrinsic; calls to the stable
// functions wrapping them do get their validation.
// FIXME: should we check alignment for ZSTs?
if !dest.layout.is_zst() { // nothing to do for ZST
if !dest.layout.is_zst() {
match dest.layout.abi {
layout::Abi::Scalar(..) => {
let x = ScalarMaybeUndef::Undef;

View File

@ -4,7 +4,6 @@
#[macro_use]
extern crate log;
// From rustc.
extern crate syntax;
#[macro_use]
@ -13,20 +12,6 @@ extern crate rustc_data_structures;
extern crate rustc_mir;
extern crate rustc_target;
use std::collections::HashMap;
use std::borrow::Cow;
use rustc::ty::{self, TyCtxt, query::TyCtxtAt};
use rustc::ty::layout::{LayoutOf, Size, Align};
use rustc::hir::{self, def_id::DefId};
use rustc::mir;
use syntax::attr;
use syntax::source_map::DUMMY_SP;
pub use rustc_mir::interpret::*;
pub use rustc_mir::interpret::{self, AllocMap, PlaceTy}; // resolve ambiguity
mod fn_call;
mod operator;
mod intrinsic;
@ -36,20 +21,34 @@ mod range_map;
mod mono_hash_map;
mod stacked_borrows;
use std::collections::HashMap;
use std::borrow::Cow;
use rustc::ty::{self, TyCtxt, query::TyCtxtAt};
use rustc::ty::layout::{LayoutOf, Size, Align};
use rustc::hir::{self, def_id::DefId};
use rustc::mir;
pub use rustc_mir::interpret::*;
// Resolve ambiguity.
pub use rustc_mir::interpret::{self, AllocMap, PlaceTy};
use syntax::attr;
use syntax::source_map::DUMMY_SP;
pub use crate::fn_call::EvalContextExt as MissingFnsEvalContextExt;
pub use crate::operator::EvalContextExt as OperatorEvalContextExt;
pub use crate::intrinsic::EvalContextExt as IntrinsicEvalContextExt;
pub use crate::tls::{EvalContextExt as TlsEvalContextExt, TlsData};
use crate::range_map::RangeMap;
#[allow(unused_imports)] // FIXME rustc bug https://github.com/rust-lang/rust/issues/53682
// FIXME: rustc bug, issue <https://github.com/rust-lang/rust/issues/53682>.
#[allow(unused_imports)]
pub use crate::helpers::{EvalContextExt as HelpersEvalContextExt};
use crate::mono_hash_map::MonoHashMap;
pub use crate::stacked_borrows::{EvalContextExt as StackedBorEvalContextExt};
// Used by priroda
// Used by priroda.
pub use crate::stacked_borrows::{Borrow, Stack, Stacks, BorStackItem};
/// Insert rustc arguments at the beginning of the argument list that miri wants to be
/// Insert rustc arguments at the beginning of the argument list that Miri wants to be
/// set per default, for maximal validation power.
pub fn miri_default_args() -> &'static [&'static str] {
// The flags here should be kept in sync with what bootstrap adds when `test-miri` is
@ -57,14 +56,14 @@ pub fn miri_default_args() -> &'static [&'static str] {
&["-Zalways-encode-mir", "-Zmir-emit-retag", "-Zmir-opt-level=0", "--cfg=miri"]
}
/// Configuration needed to spawn a Miri instance
/// Configuration needed to spawn a Miri instance.
#[derive(Clone)]
pub struct MiriConfig {
pub validate: bool,
pub args: Vec<String>,
}
// Used by priroda
// Used by priroda.
pub fn create_ecx<'a, 'mir: 'a, 'tcx: 'mir>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
main_id: DefId,
@ -105,14 +104,15 @@ pub fn create_ecx<'a, 'mir: 'a, 'tcx: 'mir>(
)));
}
// Return value (in static memory so that it does not count as leak)
// Return value (in static memory so that it does not count as leak).
let ret = ecx.layout_of(start_mir.return_ty())?;
let ret_ptr = ecx.allocate(ret, MiriMemoryKind::MutStatic.into());
// Push our stack frame
// Push our stack frame.
ecx.push_stack_frame(
start_instance,
DUMMY_SP, // there is no call site, we want no span
// There is no call site.
DUMMY_SP,
start_mir,
Some(ret_ptr.into()),
StackPopCleanup::None { cleanup: true },
@ -120,26 +120,26 @@ pub fn create_ecx<'a, 'mir: 'a, 'tcx: 'mir>(
let mut args = ecx.frame().mir.args_iter();
// First argument: pointer to main()
// First argument: pointer to `main()`.
let main_ptr = ecx.memory_mut().create_fn_alloc(main_instance).with_default_tag();
let dest = ecx.eval_place(&mir::Place::Local(args.next().unwrap()))?;
ecx.write_scalar(Scalar::Ptr(main_ptr), dest)?;
// Second argument (argc): 1
// Second argument (argc): `1`.
let dest = ecx.eval_place(&mir::Place::Local(args.next().unwrap()))?;
let argc = Scalar::from_uint(config.args.len() as u128, dest.layout.size);
ecx.write_scalar(argc, dest)?;
// Store argc for macOS _NSGetArgc
// Store argc for macOS: `_NSGetArgc`.
{
let argc_place = ecx.allocate(dest.layout, MiriMemoryKind::Env.into());
ecx.write_scalar(argc, argc_place.into())?;
ecx.machine.argc = Some(argc_place.ptr.to_ptr()?);
}
// FIXME: extract main source file path
// Third argument (argv): Created from config.args
// FIXME: extract main source file path.
// Third argument (`argv`): created from `config.args`.
let dest = ecx.eval_place(&mir::Place::Local(args.next().unwrap()))?;
// For Windows, construct a command string with all the aguments
// For Windows, construct a command string with all the aguments.
let mut cmd = String::new();
for arg in config.args.iter() {
if !cmd.is_empty() {
@ -147,11 +147,12 @@ pub fn create_ecx<'a, 'mir: 'a, 'tcx: 'mir>(
}
cmd.push_str(&*shell_escape::windows::escape(arg.as_str().into()));
}
cmd.push(std::char::from_u32(0).unwrap()); // don't forget 0 terminator
// Don't forget `0` terminator.
cmd.push(std::char::from_u32(0).unwrap());
// Collect the pointers to the individual strings.
let mut argvs = Vec::<Pointer<Borrow>>::new();
for arg in config.args {
// Add 0 terminator
// Add `0` terminator.
let mut arg = arg.into_bytes();
arg.push(0);
argvs.push(ecx.memory_mut().allocate_static_bytes(arg.as_slice()).with_default_tag());
@ -164,16 +165,16 @@ pub fn create_ecx<'a, 'mir: 'a, 'tcx: 'mir>(
ecx.write_scalar(Scalar::Ptr(arg), place.into())?;
}
ecx.memory_mut().mark_immutable(argvs_place.to_ptr()?.alloc_id)?;
// Write a pointe to that place as the argument.
// Write a pointer to that place as the argument.
let argv = argvs_place.ptr;
ecx.write_scalar(argv, dest)?;
// Store argv for macOS _NSGetArgv
// Store `argv` for macOS `_NSGetArgv`.
{
let argv_place = ecx.allocate(dest.layout, MiriMemoryKind::Env.into());
ecx.write_scalar(argv, argv_place.into())?;
ecx.machine.argv = Some(argv_place.ptr.to_ptr()?);
}
// Store cmdline as UTF-16 for Windows GetCommandLineW
// Store command line as UTF-16 for Windows `GetCommandLineW`.
{
let tcx = &{ecx.tcx.tcx};
let cmd_utf16: Vec<u16> = cmd.encode_utf16().collect();
@ -183,7 +184,7 @@ pub fn create_ecx<'a, 'mir: 'a, 'tcx: 'mir>(
MiriMemoryKind::Env.into(),
).with_default_tag();
ecx.machine.cmd_line = Some(cmd_ptr);
// store the UTF-16 string
// Store the UTF-16 string.
let char_size = Size::from_bytes(2);
let cmd_alloc = ecx.memory_mut().get_mut(cmd_ptr.alloc_id)?;
let mut cur_ptr = cmd_ptr;
@ -208,9 +209,9 @@ pub fn eval_main<'a, 'tcx: 'a>(
main_id: DefId,
config: MiriConfig,
) {
let mut ecx = create_ecx(tcx, main_id, config).expect("Couldn't create ecx");
let mut ecx = create_ecx(tcx, main_id, config).expect("couldn't create ecx");
// Run! The main execution.
// Perform the main execution.
let res: EvalResult = (|| {
ecx.run()?;
ecx.run_tls_dtors()
@ -243,7 +244,7 @@ pub fn eval_main<'a, 'tcx: 'a>(
let mut err = struct_error(ecx.tcx.tcx.at(span), msg.as_str());
let frames = ecx.generate_stacktrace(None);
err.span_label(span, e);
// we iterate with indices because we need to look at the next frame (the caller)
// We iterate with indices because we need to look at the next frame (the caller).
for idx in 0..frames.len() {
let frame_info = &frames[idx];
let call_site_is_local = frames.get(idx+1).map_or(false,
@ -273,16 +274,15 @@ pub fn eval_main<'a, 'tcx: 'a>(
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum MiriMemoryKind {
/// `__rust_alloc` memory
/// `__rust_alloc` memory.
Rust,
/// `malloc` memory
/// `malloc` memory.
C,
/// Part of env var emulation
/// Part of env var emulation.
Env,
/// mutable statics
/// Mutable statics.
MutStatic,
}
@ -305,27 +305,27 @@ impl MayLeak for MiriMemoryKind {
}
pub struct Evaluator<'tcx> {
/// Environment variables set by `setenv`
/// Miri does not expose env vars from the host to the emulated program
/// Environment variables set by `setenv`.
/// Miri does not expose env vars from the host to the emulated program.
pub(crate) env_vars: HashMap<Vec<u8>, Pointer<Borrow>>,
/// Program arguments (`Option` because we can only initialize them after creating the ecx).
/// These are *pointers* to argc/argv because macOS.
/// We also need the full cmdline as one string because Window.
/// We also need the full command line as one string because of Windows.
pub(crate) argc: Option<Pointer<Borrow>>,
pub(crate) argv: Option<Pointer<Borrow>>,
pub(crate) cmd_line: Option<Pointer<Borrow>>,
/// Last OS error
/// Last OS error.
pub(crate) last_error: u32,
/// TLS state
/// TLS state.
pub(crate) tls: TlsData<'tcx>,
/// Whether to enforce the validity invariant
/// Whether to enforce the validity invariant.
pub(crate) validate: bool,
/// Stacked Borrows state
/// Stacked Borrows state.
pub(crate) stacked_borrows: stacked_borrows::State,
}
@ -344,10 +344,11 @@ impl<'tcx> Evaluator<'tcx> {
}
}
#[allow(dead_code)] // FIXME https://github.com/rust-lang/rust/issues/47131
// FIXME: rustc issue <https://github.com/rust-lang/rust/issues/47131>.
#[allow(dead_code)]
type MiriEvalContext<'a, 'mir, 'tcx> = EvalContext<'a, 'mir, 'tcx, Evaluator<'tcx>>;
// A little trait that's useful to be inherited by extension traits
// A little trait that's useful to be inherited by extension traits.
pub trait MiriEvalContextExt<'a, 'mir, 'tcx> {
fn eval_context_ref(&self) -> &MiriEvalContext<'a, 'mir, 'tcx>;
fn eval_context_mut(&mut self) -> &mut MiriEvalContext<'a, 'mir, 'tcx>;
@ -380,7 +381,7 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
ecx.machine.validate
}
/// Returns Ok() when the function was handled, fail otherwise
/// Returns `Ok()` when the function was handled; fail otherwise.
#[inline(always)]
fn find_fn(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
@ -417,7 +418,7 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
dest: PlaceTy<'tcx, Borrow>,
) -> EvalResult<'tcx> {
trace!("box_alloc for {:?}", dest.layout.ty);
// Call the `exchange_malloc` lang item
// Call the `exchange_malloc` lang item.
let malloc = ecx.tcx.lang_items().exchange_malloc_fn().unwrap();
let malloc = ty::Instance::mono(ecx.tcx.tcx, malloc);
let malloc_mir = ecx.load_mir(malloc.def)?;
@ -426,28 +427,31 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
malloc_mir.span,
malloc_mir,
Some(dest),
// Don't do anything when we are done. The statement() function will increment
// Don't do anything when we are done. The `statement()` function will increment
// the old stack frame's stmt counter to the next statement, which means that when
// exchange_malloc returns, we go on evaluating exactly where we want to be.
// `exchange_malloc` returns, we go on evaluating exactly where we want to be.
StackPopCleanup::None { cleanup: true },
)?;
let mut args = ecx.frame().mir.args_iter();
let layout = ecx.layout_of(dest.layout.ty.builtin_deref(false).unwrap().ty)?;
// First argument: size
// (0 is allowed here, this is expected to be handled by the lang item)
// First argument: `size`.
// (`0` is allowed here -- this is expected to be handled by the lang item).
let arg = ecx.eval_place(&mir::Place::Local(args.next().unwrap()))?;
let size = layout.size.bytes();
ecx.write_scalar(Scalar::from_uint(size, arg.layout.size), arg)?;
// Second argument: align
// Second argument: `align`.
let arg = ecx.eval_place(&mir::Place::Local(args.next().unwrap()))?;
let align = layout.align.abi.bytes();
ecx.write_scalar(Scalar::from_uint(align, arg.layout.size), arg)?;
// No more arguments
assert!(args.next().is_none(), "exchange_malloc lang item has more arguments than expected");
// No more arguments.
assert!(
args.next().is_none(),
"`exchange_malloc` lang item has more arguments than expected"
);
Ok(())
}
@ -464,7 +468,7 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
let alloc = match &link_name[..] {
"__cxa_thread_atexit_impl" => {
// This should be all-zero, pointer-sized
// This should be all-zero, pointer-sized.
let size = tcx.data_layout.pointer_size;
let data = vec![0; size.bytes() as usize];
let extra = AllocationExtra::memory_allocated(size, memory_extra);
@ -480,7 +484,7 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
#[inline(always)]
fn before_terminator(_ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>) -> EvalResult<'tcx>
{
// We are not interested in detecting loops
// We are not interested in detecting loops.
Ok(())
}
@ -513,16 +517,16 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
mutability: Option<hir::Mutability>,
) -> EvalResult<'tcx, Scalar<Borrow>> {
let size = ecx.size_and_align_of_mplace(place)?.map(|(size, _)| size)
// for extern types, just cover what we can
// For extern types, just cover what we can.
.unwrap_or_else(|| place.layout.size);
if !ecx.tcx.sess.opts.debugging_opts.mir_emit_retag ||
!Self::enforce_validity(ecx) || size == Size::ZERO
{
// No tracking
// No tracking.
Ok(place.ptr)
} else {
ecx.ptr_dereference(place, size, mutability.into())?;
// We never change the pointer
// We never change the pointer.
Ok(place.ptr)
}
}
@ -534,7 +538,7 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
kind: MemoryKind<Self::MemoryKinds>,
) -> Pointer<Borrow> {
if !ecx.machine.validate {
// No tracking
// No tracking.
ptr.with_default_tag()
} else {
let tag = ecx.tag_new_allocation(ptr.alloc_id, kind);

View File

@ -1,6 +1,6 @@
//! This is a "monotonic HashMap": A HashMap that, when shared, can be pushed to but not
//! otherwise mutated. We also Box items in the map. This means we can safely provide
//! shared references into existing items in the HashMap, because they will not be dropped
//! This is a "monotonic `HashMap`": A `HashMap` that, when shared, can be pushed to but not
//! otherwise mutated. We also box items in the map. This means we can safely provide
//! shared references into existing items in the `HashMap`, because they will not be dropped
//! (from being removed) or moved (because they are boxed).
//! The API is is completely tailored to what `memory.rs` needs. It is still in
//! a separate file to minimize the amount of code that has to care about the unsafety.
@ -18,7 +18,7 @@ use crate::AllocMap;
pub struct MonoHashMap<K: Hash + Eq, V>(RefCell<FxHashMap<K, Box<V>>>);
impl<K: Hash + Eq, V> MonoHashMap<K, V> {
/// This function exists for priroda to be able to iterate over all evaluator memory
/// This function exists for priroda to be able to iterate over all evaluator memory.
///
/// The function is somewhat roundabout with the closure argument because internally the
/// `MonoHashMap` uses a `RefCell`. When iterating over the `HashMap` inside the `RefCell`,

View File

@ -60,7 +60,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
_ => {},
}
// Now we expect no more fat pointers
// Now we expect no more fat pointers.
let left_layout = left.layout;
let left = left.to_scalar()?;
let right_layout = right.layout;
@ -149,9 +149,9 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
if left.alloc_id == right.alloc_id {
left.offset == right.offset
} else {
// This accepts one-past-the end. So technically there is still
// This accepts one-past-the end. Thus, there is still technically
// some non-determinism that we do not fully rule out when two
// allocations sit right next to each other. The C/C++ standards are
// allocations sit right next to each other. The C/C++ standards are
// somewhat fuzzy about this case, so I think for now this check is
// "good enough".
// Dead allocations in miri cannot overlap with live allocations, but
@ -159,17 +159,17 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
// both pointers to be live.
self.memory().check_bounds_ptr(left, InboundsCheck::Live)?;
self.memory().check_bounds_ptr(right, InboundsCheck::Live)?;
// Two in-bounds pointers, we can compare across allocations
// Two in-bounds pointers, we can compare across allocations.
left == right
}
}
// Comparing ptr and integer
// Comparing ptr and integer.
(Scalar::Ptr(ptr), Scalar::Bits { bits, size }) |
(Scalar::Bits { bits, size }, Scalar::Ptr(ptr)) => {
assert_eq!(size as u64, self.pointer_size().bytes());
let bits = bits as u64;
// Case I: Comparing with NULL
// Case I: Comparing with NULL.
if bits == 0 {
// Test if the ptr is in-bounds. Then it cannot be NULL.
// Even dangling pointers cannot be NULL.
@ -186,7 +186,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
if ptr.offset.bytes() % alloc_align.bytes() == 0 {
// The offset maintains the allocation alignment, so we know `base+offset`
// is aligned by `alloc_align`.
// FIXME: We could be even more general, e.g. offset 2 into a 4-aligned
// FIXME: We could be even more general, e.g., offset 2 into a 4-aligned
// allocation cannot equal 3.
if bits % alloc_align.bytes() != 0 {
// The integer is *not* aligned. So they cannot be equal.
@ -198,7 +198,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
{
// Compute the highest address at which this allocation could live.
// Substract one more, because it must be possible to add the size
// to the base address without overflowing -- IOW, the very last address
// to the base address without overflowing; that is, the very last address
// of the address space is never dereferencable (but it can be in-bounds, i.e.,
// one-past-the-end).
let max_base_addr =
@ -208,7 +208,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
) as u64;
if let Some(max_addr) = max_base_addr.checked_add(ptr.offset.bytes()) {
if bits > max_addr {
// The integer is too big, this cannot possibly be equal
// The integer is too big, this cannot possibly be equal.
return Ok(false)
}
}
@ -235,7 +235,8 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
Ok(match bin_op {
Sub =>
// The only way this can overflow is by underflowing, so signdeness of the right operands does not matter
// The only way this can overflow is by underflowing, so signdeness of the right
// operands does not matter.
map_to_primval(left.overflowing_signed_offset(-(right as i128), self)),
Add if signed =>
map_to_primval(left.overflowing_signed_offset(right as i128, self)),
@ -245,17 +246,17 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
BitAnd if !signed => {
let ptr_base_align = self.memory().get(left.alloc_id)?.align.bytes();
let base_mask = {
// FIXME: Use interpret::truncate, once that takes a Size instead of a Layout
// FIXME: use `interpret::truncate`, once that takes a `Size` instead of a `Layout`.
let shift = 128 - self.memory().pointer_size().bits();
let value = !(ptr_base_align as u128 - 1);
// truncate (shift left to drop out leftover values, shift right to fill with zeroes)
// Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
(value << shift) >> shift
};
let ptr_size = self.memory().pointer_size().bytes() as u8;
trace!("Ptr BitAnd, align {}, operand {:#010x}, base_mask {:#010x}",
trace!("ptr BitAnd, align {}, operand {:#010x}, base_mask {:#010x}",
ptr_base_align, right, base_mask);
if right & base_mask == base_mask {
// Case 1: The base address bits are all preserved, i.e., right is all-1 there
// Case 1: the base address bits are all preserved, i.e., right is all-1 there.
let offset = (left.offset.bytes() as u128 & right) as u64;
(
Scalar::Ptr(Pointer::new_with_tag(
@ -266,7 +267,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
false,
)
} else if right & base_mask == 0 {
// Case 2: The base address bits are all taken away, i.e., right is all-0 there
// Case 2: the base address bits are all taken away, i.e., right is all-0 there.
(Scalar::Bits { bits: (left.offset.bytes() as u128) & right, size: ptr_size }, false)
} else {
return err!(ReadPointerAsBytes);
@ -275,43 +276,57 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
Rem if !signed => {
// Doing modulo a divisor of the alignment is allowed.
// (Intuition: Modulo a divisor leaks less information.)
// (Intuition: modulo a divisor leaks less information.)
let ptr_base_align = self.memory().get(left.alloc_id)?.align.bytes();
let right = right as u64;
let ptr_size = self.memory().pointer_size().bytes() as u8;
if right == 1 {
// modulo 1 is always 0
// Modulo 1 is always 0.
(Scalar::Bits { bits: 0, size: ptr_size }, false)
} else if ptr_base_align % right == 0 {
// the base address would be cancelled out by the modulo operation, so we can
// just take the modulo of the offset
(Scalar::Bits { bits: (left.offset.bytes() % right) as u128, size: ptr_size }, false)
// The base address would be cancelled out by the modulo operation, so we can
// just take the modulo of the offset.
(
Scalar::Bits {
bits: (left.offset.bytes() % right) as u128,
size: ptr_size
},
false,
)
} else {
return err!(ReadPointerAsBytes);
}
}
_ => {
let msg = format!("unimplemented binary op on pointer {:?}: {:?}, {:?} ({})", bin_op, left, right, if signed { "signed" } else { "unsigned" });
let msg = format!(
"unimplemented binary op on pointer {:?}: {:?}, {:?} ({})",
bin_op,
left,
right,
if signed { "signed" } else { "unsigned" }
);
return err!(Unimplemented(msg));
}
})
}
/// This function raises an error if the offset moves the pointer outside of its allocation. We consider
/// ZSTs their own huge allocation that doesn't overlap with anything (and nothing moves in there because the size is 0).
/// We also consider the NULL pointer its own separate allocation, and all the remaining integers pointers their own
/// allocation.
/// Raises an error if the offset moves the pointer outside of its allocation.
/// We consider ZSTs their own huge allocation that doesn't overlap with anything (and nothing
/// moves in there because the size is 0). We also consider the NULL pointer its own separate
/// allocation, and all the remaining integers pointers their own allocation.
fn pointer_offset_inbounds(
&self,
ptr: Scalar<Borrow>,
pointee_ty: Ty<'tcx>,
offset: i64,
) -> EvalResult<'tcx, Scalar<Borrow>> {
// FIXME: assuming here that type size is < i64::max_value()
// FIXME: assuming here that type size is less than `i64::max_value()`.
let pointee_size = self.layout_of(pointee_ty)?.size.bytes() as i64;
let offset = offset.checked_mul(pointee_size).ok_or_else(|| EvalErrorKind::Overflow(mir::BinOp::Mul))?;
// Now let's see what kind of pointer this is
let offset = offset
.checked_mul(pointee_size)
.ok_or_else(|| EvalErrorKind::Overflow(mir::BinOp::Mul))?;
// Now let's see what kind of pointer this is.
if let Scalar::Ptr(ptr) = ptr {
// Both old and new pointer must be in-bounds of a *live* allocation.
// (Of the same allocation, but that part is trivial with our representation.)

View File

@ -3,7 +3,7 @@
//! Implements a map from integer indices to data.
//! Rather than storing data for every index, internally, this maps entire ranges to the data.
//! To this end, the APIs all work on ranges, not on individual integers. Ranges are split as
//! necessary (e.g. when [0,5) is first associated with X, and then [1,2) is mutated).
//! necessary (e.g., when [0,5) is first associated with X, and then [1,2) is mutated).
//! Users must not depend on whether a range is coalesced or not, even though this is observable
//! via the iteration APIs.
@ -14,7 +14,8 @@ use rustc::ty::layout::Size;
#[derive(Clone, Debug)]
struct Elem<T> {
range: ops::Range<u64>, // the range covered by this element, never empty
/// The range covered by this element, never empty.
range: ops::Range<u64>,
data: T,
}
#[derive(Clone, Debug)]
@ -23,7 +24,7 @@ pub struct RangeMap<T> {
}
impl<T> RangeMap<T> {
/// Create a new RangeMap for the given size, and with the given initial value used for
/// Creates a new `RangeMap` for the given size, and with the given initial value used for
/// the entire range.
#[inline(always)]
pub fn new(size: Size, init: T) -> RangeMap<T> {
@ -38,9 +39,9 @@ impl<T> RangeMap<T> {
map
}
/// Find the index containing the given offset.
/// Finds the index containing the given offset.
fn find_offset(&self, offset: u64) -> usize {
// We do a binary search
// We do a binary search.
let mut left = 0usize; // inclusive
let mut right = self.v.len(); // exclusive
loop {
@ -62,22 +63,23 @@ impl<T> RangeMap<T> {
}
}
/// Provide read-only iteration over everything in the given range. This does
/// *not* split items if they overlap with the edges. Do not use this to mutate
/// Provides read-only iteration over everything in the given range. This does
/// *not* split items if they overlap with the edges. Do not use this to mutate
/// through interior mutability.
pub fn iter<'a>(&'a self, offset: Size, len: Size) -> impl Iterator<Item = &'a T> + 'a {
let offset = offset.bytes();
let len = len.bytes();
// Compute a slice starting with the elements we care about
// Compute a slice starting with the elements we care about.
let slice: &[Elem<T>] = if len == 0 {
// We just need any empty iterator. We don't even want to
// We just need any empty iterator. We don't even want to
// yield the element that surrounds this position.
&[]
} else {
let first_idx = self.find_offset(offset);
&self.v[first_idx..]
};
let end = offset + len; // the first offset that is not included any more
// The first offset that is not included any more.
let end = offset + len;
slice.iter()
.take_while(move |elem| elem.range.start < end)
.map(|elem| &elem.data)
@ -87,25 +89,25 @@ impl<T> RangeMap<T> {
self.v.iter_mut().map(|elem| &mut elem.data)
}
// Split the element situated at the given `index`, such that the 2nd one starts at offset `split_offset`.
// Do nothing if the element already starts there.
// Return whether a split was necessary.
// Splits the element situated at the given `index`, such that the 2nd one starts at offset
// `split_offset`. Do nothing if the element already starts there.
// Returns whether a split was necessary.
fn split_index(&mut self, index: usize, split_offset: u64) -> bool
where
T: Clone,
{
let elem = &mut self.v[index];
if split_offset == elem.range.start || split_offset == elem.range.end {
// Nothing to do
// Nothing to do.
return false;
}
debug_assert!(elem.range.contains(&split_offset),
"The split_offset is not in the element to be split");
"the `split_offset` is not in the element to be split");
// Now we really have to split. Reduce length of first element.
// Now we really have to split. Reduce length of first element.
let second_range = split_offset..elem.range.end;
elem.range.end = split_offset;
// Copy the data, and insert 2nd element
// Copy the data, and insert second element.
let second = Elem {
range: second_range,
data: elem.data.clone(),
@ -114,7 +116,7 @@ impl<T> RangeMap<T> {
return true;
}
/// Provide mutable iteration over everything in the given range. As a side-effect,
/// Provides mutable iteration over everything in the given range. As a side-effect,
/// this will split entries in the map that are only partially hit by the given range,
/// to make sure that when they are mutated, the effect is constrained to the given range.
/// Moreover, this will opportunistically merge neighbouring equal blocks.
@ -130,7 +132,7 @@ impl<T> RangeMap<T> {
let len = len.bytes();
// Compute a slice containing exactly the elements we care about
let slice: &mut [Elem<T>] = if len == 0 {
// We just need any empty iterator. We don't even want to
// We just need any empty iterator. We don't even want to
// yield the element that surrounds this position, nor do
// any splitting.
&mut []
@ -142,7 +144,7 @@ impl<T> RangeMap<T> {
first_idx += 1;
}
let first_idx = first_idx; // no more mutation
// Find our end. Linear scan, but that's okay because the iteration
// Find our end. Linear scan, but that's ok because the iteration
// is doing the same linear scan anyway -- no increase in complexity.
// We combine this scan with a scan for duplicates that we can merge, to reduce
// the number of elements.
@ -150,7 +152,7 @@ impl<T> RangeMap<T> {
// amounts of time on the merging.
let mut equal_since_idx = first_idx;
// Once we see too many non-mergeable blocks, we stop.
// The initial value is chosen via... magic. Benchmarking and magic.
// The initial value is chosen via... magic. Benchmarking and magic.
let mut successful_merge_count = 3usize;
let mut end_idx = first_idx; // when the loop is done, this is the first excluded element.
loop {
@ -162,7 +164,7 @@ impl<T> RangeMap<T> {
// see if we want to merge everything in `equal_since..end` (exclusive at the end!)
if successful_merge_count > 0 {
if done || self.v[end_idx].data != self.v[equal_since_idx].data {
// Everything in `equal_since..end` was equal. Make them just one element covering
// Everything in `equal_since..end` was equal. Make them just one element covering
// the entire range.
let removed_elems = end_idx - equal_since_idx - 1; // number of elements that we would remove
if removed_elems > 0 {
@ -173,10 +175,10 @@ impl<T> RangeMap<T> {
self.v.splice(equal_since_idx+1..end_idx, std::iter::empty());
// Adjust `end_idx` because we made the list shorter.
end_idx -= removed_elems;
// adjust the count for the cutoff
// Adjust the count for the cutoff.
successful_merge_count += removed_elems;
} else {
// adjust the count for the cutoff
// Adjust the count for the cutoff.
successful_merge_count -= 1;
}
// Go on scanning for the next block starting here.
@ -188,8 +190,9 @@ impl<T> RangeMap<T> {
break;
}
}
let end_idx = end_idx-1; // Move to last included instead of first excluded index.
// We need to split the end as well. Even if this performs a
// Move to last included instead of first excluded index.
let end_idx = end_idx-1;
// We need to split the end as well. Even if this performs a
// split, we don't have to adjust our index as we only care about
// the first part of the split.
self.split_index(end_idx, offset+len);
@ -220,15 +223,15 @@ mod tests {
#[test]
fn basic_insert() {
let mut map = RangeMap::<i32>::new(Size::from_bytes(20), -1);
// Insert
// Insert.
for x in map.iter_mut(Size::from_bytes(10), Size::from_bytes(1)) {
*x = 42;
}
// Check
// Check.
assert_eq!(to_vec(&map, 10, 1), vec![42]);
assert_eq!(map.v.len(), 3);
// Insert with size 0
// Insert with size 0.
for x in map.iter_mut(Size::from_bytes(10), Size::from_bytes(0)) {
*x = 19;
}
@ -275,11 +278,11 @@ mod tests {
to_vec(&map, 10, 10),
vec![23, 42, 23, 23, 23, 19, 19, 19, 19, 19]
);
// Should be seeing two blocks with 19
// Should be seeing two blocks with 19.
assert_eq!(map.iter(Size::from_bytes(15), Size::from_bytes(2))
.map(|&t| t).collect::<Vec<_>>(), vec![19, 19]);
// a NOP iter_mut should trigger merging
// A NOP `iter_mut` should trigger merging.
for x in map.iter_mut(Size::from_bytes(15), Size::from_bytes(5)) { }
assert_eq!(map.v.len(), 5);
assert_eq!(

View File

@ -15,16 +15,15 @@ use crate::{
pub type Timestamp = u64;
pub type CallId = u64;
/// Information about which kind of borrow was used to create the reference this is tagged
/// with.
/// Information about which kind of borrow was used to create the reference this is tagged with.
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub enum Borrow {
/// A unique (mutable) reference.
Uniq(Timestamp),
/// An aliasing reference. This is also used by raw pointers, which do not track details
/// An aliasing reference. This is also used by raw pointers, which do not track details
/// of how or when they were created, hence the timestamp is optional.
/// Shr(Some(_)) does NOT mean that the destination of this reference is frozen;
/// that depends on the type! Only those parts outside of an `UnsafeCell` are actually
/// `Shr(Some(_))` does *not* mean that the destination of this reference is frozen;
/// that depends on the type! Only those parts outside of an `UnsafeCell` are actually
/// frozen.
Alias(Option<Timestamp>),
}
@ -53,23 +52,25 @@ impl Default for Borrow {
}
}
/// An item in the per-location borrow stack
/// An item in the per-location borrow stack.
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub enum BorStackItem {
/// Indicates the unique reference that may mutate.
Uniq(Timestamp),
/// Indicates that the location has been mutably shared. Used for raw pointers as
/// Indicates that the location has been mutably shared. Used for raw pointers as
/// well as for unfrozen shared references.
Raw,
/// A barrier, tracking the function it belongs to by its index on the call stack
/// A barrier, tracking the function it belongs to by its index on the call stack.
FnBarrier(CallId)
}
/// Extra per-location state
/// Extra per-location state.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Stack {
borrows: Vec<BorStackItem>, // used as a stack; never empty
frozen_since: Option<Timestamp>, // virtual frozen "item" on top of the stack
/// Used as the stack; never empty.
borrows: Vec<BorStackItem>,
/// A virtual frozen "item" on top of the stack.
frozen_since: Option<Timestamp>,
}
impl Stack {
@ -79,18 +80,18 @@ impl Stack {
}
}
/// What kind of reference is being used?
/// Indicates which kind of reference is being used.
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub enum RefKind {
/// &mut
/// `&mut`.
Unique,
/// & without interior mutability
/// `&` without interior mutability.
Frozen,
/// * (raw pointer) or & to `UnsafeCell`
/// `*` (raw pointer) or `&` to `UnsafeCell`.
Raw,
}
/// What kind of access is being performed?
/// Indicates which kind of access is being performed.
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub enum AccessKind {
Read,
@ -98,7 +99,7 @@ pub enum AccessKind {
Dealloc,
}
/// Extra global state in the memory, available to the memory access hooks
/// Extra global state in the memory, available to the memory access hooks.
#[derive(Debug)]
pub struct BarrierTracking {
next_id: CallId,
@ -133,7 +134,7 @@ impl BarrierTracking {
}
}
/// Extra global machine state
/// Extra global machine state.
#[derive(Clone, Debug)]
pub struct State {
clock: Timestamp
@ -153,7 +154,7 @@ impl State {
}
}
/// Extra per-allocation state
/// Extra per-allocation state.
#[derive(Clone, Debug)]
pub struct Stacks {
// Even reading memory can have effects on the stack, so we need a `RefCell` here.
@ -164,18 +165,19 @@ pub struct Stacks {
/// Core per-location operations: deref, access, create.
/// We need to make at least the following things true:
///
/// U1: After creating a Uniq, it is at the top (+unfrozen).
/// U2: If the top is Uniq (+unfrozen), accesses must be through that Uniq or pop it.
/// U3: If an access (deref sufficient?) happens with a Uniq, it requires the Uniq to be in the stack.
/// U1: After creating a `Uniq`, it is at the top (and unfrozen).
/// U2: If the top is `Uniq` (and unfrozen), accesses must be through that `Uniq` or pop it.
/// U3: If an access (deref sufficient?) happens with a `Uniq`, it requires the `Uniq` to be in the stack.
///
/// F1: After creating a &, the parts outside `UnsafeCell` are frozen.
/// F1: After creating a `&`, the parts outside `UnsafeCell` are frozen.
/// F2: If a write access happens, it unfreezes.
/// F3: If an access (well, a deref) happens with an & outside `UnsafeCell`, it requires the location to still be frozen.
/// F3: If an access (well, a deref) happens with an `&` outside `UnsafeCell`,
/// it requires the location to still be frozen.
impl<'tcx> Stack {
/// Deref `bor`: Check if the location is frozen and the tag in the stack.
/// This dos *not* constitute an access! "Deref" refers to the `*` operator
/// Deref `bor`: check if the location is frozen and the tag in the stack.
/// This dos *not* constitute an access! "Deref" refers to the `*` operator
/// in Rust, and includs cases like `&*x` or `(*x).foo` where no or only part
/// of the memory actually gets accessed. Also we cannot know if we are
/// of the memory actually gets accessed. Also we cannot know if we are
/// going to read or write.
/// Returns the index of the item we matched, `None` if it was the frozen one.
/// `kind` indicates which kind of reference is being dereferenced.
@ -186,44 +188,46 @@ impl<'tcx> Stack {
) -> Result<Option<usize>, String> {
// Exclude unique ref with frozen tag.
if let (RefKind::Unique, Borrow::Alias(Some(_))) = (kind, bor) {
return Err(format!("Encountered mutable reference with frozen tag ({:?})", bor));
return Err(format!("encountered mutable reference with frozen tag ({:?})", bor));
}
// Checks related to freezing
// Checks related to freezing.
match bor {
Borrow::Alias(Some(bor_t)) if kind == RefKind::Frozen => {
// We need the location to be frozen. This ensures F3.
let frozen = self.frozen_since.map_or(false, |itm_t| itm_t <= bor_t);
return if frozen { Ok(None) } else {
Err(format!("Location is not frozen long enough"))
Err(format!("location is not frozen long enough"))
}
}
Borrow::Alias(_) if self.frozen_since.is_some() => {
return Ok(None) // Shared deref to frozen location, looking good
// Shared deref to frozen location; looking good.
return Ok(None)
}
_ => {} // Not sufficient, go on looking.
// Not sufficient; go on looking.
_ => {}
}
// If we got here, we have to look for our item in the stack.
for (idx, &itm) in self.borrows.iter().enumerate().rev() {
match (itm, bor) {
(BorStackItem::Uniq(itm_t), Borrow::Uniq(bor_t)) if itm_t == bor_t => {
// Found matching unique item. This satisfies U3.
// Found matching unique item. This satisfies U3.
return Ok(Some(idx))
}
(BorStackItem::Raw, Borrow::Alias(_)) => {
// Found matching aliasing/raw item.
return Ok(Some(idx))
}
// Go on looking. We ignore barriers! When an `&mut` and an `&` alias,
// Go on looking. We ignore barriers! When an `&mut` and an `&` alias,
// dereferencing the `&` is still possible (to reborrow), but doing
// an access is not.
_ => {}
}
}
// If we got here, we did not find our item. We have to error to satisfy U3.
// If we got here, we did not find our item. We have to error to satisfy U3.
Err(format!("Borrow being dereferenced ({:?}) does not exist on the borrow stack", bor))
}
/// Perform an actual memory access using `bor`. We do not know any types here
/// Performs an actual memory access using `bor`. We do not know any types here
/// or whether things should be frozen, but we *do* know if this is reading
/// or writing.
fn access(
@ -236,45 +240,44 @@ impl<'tcx> Stack {
// Not possible on writes!
if self.is_frozen() {
if kind == AccessKind::Read {
// When we are frozen, we just accept all reads. No harm in this.
// When we are frozen, we just accept all reads. No harm in this.
// The deref already checked that `Uniq` items are in the stack, and that
// the location is frozen if it should be.
return Ok(());
}
trace!("access: Unfreezing");
trace!("access: unfreezing");
}
// Unfreeze on writes. This ensures F2.
// Unfreeze on writes. This ensures F2.
self.frozen_since = None;
// Pop the stack until we have something matching.
while let Some(&itm) = self.borrows.last() {
match (itm, bor) {
(BorStackItem::FnBarrier(call), _) if barrier_tracking.is_active(call) => {
return err!(MachineError(format!(
"Stopping looking for borrow being accessed ({:?}) because of barrier ({})",
"stopping looking for borrow being accessed ({:?}) because of barrier ({})",
bor, call
)))
}
(BorStackItem::Uniq(itm_t), Borrow::Uniq(bor_t)) if itm_t == bor_t => {
// Found matching unique item. Continue after the match.
// Found matching unique item. Continue after the match.
}
(BorStackItem::Raw, _) if kind == AccessKind::Read => {
// When reading, everything can use a raw item!
// We do not want to do this when writing: Writing to an `&mut`
// should reaffirm its exclusivity (i.e., make sure it is
// on top of the stack).
// Continue after the match.
// on top of the stack). Continue after the match.
}
(BorStackItem::Raw, Borrow::Alias(_)) => {
// Found matching raw item. Continue after the match.
// Found matching raw item. Continue after the match.
}
_ => {
// Pop this, go on. This ensures U2.
// Pop this, go on. This ensures U2.
let itm = self.borrows.pop().unwrap();
trace!("access: Popping {:?}", itm);
continue
}
}
// If we got here, we found a matching item. Congratulations!
// If we got here, we found a matching item. Congratulations!
// However, we are not done yet: If this access is deallocating, we must make sure
// there are no active barriers remaining on the stack.
if kind == AccessKind::Dealloc {
@ -282,19 +285,19 @@ impl<'tcx> Stack {
match itm {
BorStackItem::FnBarrier(call) if barrier_tracking.is_active(call) => {
return err!(MachineError(format!(
"Deallocating with active barrier ({})", call
"deallocating with active barrier ({})", call
)))
}
_ => {},
}
}
}
// NOW we are done.
// Now we are done.
return Ok(())
}
// If we got here, we did not find our item.
err!(MachineError(format!(
"Borrow being accessed ({:?}) does not exist on the borrow stack",
"borrow being accessed ({:?}) does not exist on the borrow stack",
bor
)))
}
@ -304,7 +307,7 @@ impl<'tcx> Stack {
/// is met: We cannot push `Uniq` onto frozen stacks.
/// `kind` indicates which kind of reference is being created.
fn create(&mut self, bor: Borrow, kind: RefKind) {
// When creating a frozen reference, freeze. This ensures F1.
// When creating a frozen reference, freeze. This ensures F1.
// We also do *not* push anything else to the stack, making sure that no nother kind
// of access (like writing through raw pointers) is permitted.
if kind == RefKind::Frozen {
@ -312,10 +315,13 @@ impl<'tcx> Stack {
Borrow::Alias(Some(t)) => t,
_ => bug!("Creating illegal borrow {:?} for frozen ref", bor),
};
// It is possible that we already are frozen (e.g. if we just pushed a barrier,
// It is possible that we already are frozen (e.g., if we just pushed a barrier,
// the redundancy check would not have kicked in).
match self.frozen_since {
Some(loc_t) => assert!(loc_t <= bor_t, "Trying to freeze location for longer than it was already frozen"),
Some(loc_t) => assert!(
loc_t <= bor_t,
"trying to freeze location for longer than it was already frozen"
),
None => {
trace!("create: Freezing");
self.frozen_since = Some(bor_t);
@ -323,7 +329,10 @@ impl<'tcx> Stack {
}
return;
}
assert!(self.frozen_since.is_none(), "Trying to create non-frozen reference to frozen location");
assert!(
self.frozen_since.is_none(),
"trying to create non-frozen reference to frozen location"
);
// Push new item to the stack.
let itm = match bor {
@ -334,15 +343,15 @@ impl<'tcx> Stack {
// This is just an optimization, no functional change: Avoid stacking
// multiple `Shr` on top of each other.
assert!(bor.is_aliasing());
trace!("create: Sharing a shared location is a NOP");
trace!("create: sharing a shared location is a NOP");
} else {
// This ensures U1.
trace!("create: Pushing {:?}", itm);
trace!("create: pushing {:?}", itm);
self.borrows.push(itm);
}
}
/// Add a barrier
/// Adds a barrier.
fn barrier(&mut self, call: CallId) {
let itm = BorStackItem::FnBarrier(call);
if *self.borrows.last().unwrap() == itm {
@ -350,9 +359,9 @@ impl<'tcx> Stack {
// multiple identical barriers on top of each other.
// This can happen when a function receives several shared references
// that overlap.
trace!("barrier: Avoiding redundant extra barrier");
trace!("barrier: avoiding redundant extra barrier");
} else {
trace!("barrier: Pushing barrier for call {}", call);
trace!("barrier: pushing barrier for call {}", call);
self.borrows.push(itm);
}
}
@ -360,7 +369,7 @@ impl<'tcx> Stack {
/// Higher-level per-location operations: deref, access, reborrow.
impl<'tcx> Stacks {
/// Check that this stack is fine with being dereferenced
/// Checks that this stack is fine with being dereferenced.
fn deref(
&self,
ptr: Pointer<Borrow>,
@ -406,14 +415,15 @@ impl<'tcx> Stacks {
new_kind: RefKind,
) -> EvalResult<'tcx> {
assert_eq!(new_bor.is_unique(), new_kind == RefKind::Unique);
trace!("reborrow for tag {:?} to {:?} as {:?}: {:?}, size {}",
ptr.tag, new_bor, new_kind, ptr, size.bytes());
trace!(
"reborrow for tag {:?} to {:?} as {:?}: {:?}, size {}",
ptr.tag, new_bor, new_kind, ptr, size.bytes(),
);
if new_kind == RefKind::Raw {
// No barrier for raw, including `&UnsafeCell`. They can rightfully
// alias with `&mut`.
// FIXME: This means that the `dereferencable` attribute on non-frozen shared
// references is incorrect! They are dereferencable when the function is
// called, but might become non-dereferencable during the course of execution.
// No barrier for raw, including `&UnsafeCell`. They can rightfully alias with `&mut`.
// FIXME: This means that the `dereferencable` attribute on non-frozen shared references
// is incorrect! They are dereferencable when the function is called, but might become
// non-dereferencable during the course of execution.
// Also see [1], [2].
//
// [1]: <https://internals.rust-lang.org/t/
@ -428,7 +438,7 @@ impl<'tcx> Stacks {
let ptr_idx = stack.deref(ptr.tag, new_kind).map_err(EvalErrorKind::MachineError)?;
// If we can deref the new tag already, and if that tag lives higher on
// the stack than the one we come from, just use that.
// IOW, we check if `new_bor` *already* is "derived from" `ptr.tag`.
// That is, we check if `new_bor` *already* is "derived from" `ptr.tag`.
// This also checks frozenness, if required.
let bor_redundant = barrier.is_none() &&
match (ptr_idx, stack.deref(new_bor, new_kind)) {
@ -436,11 +446,11 @@ impl<'tcx> Stacks {
// above the old one in the stack, our job here is done.
(_, Ok(None)) => true,
(Some(ptr_idx), Ok(Some(new_idx))) if new_idx >= ptr_idx => true,
// Otherwise we need to create a new borrow.
// Otherwise, we need to create a new borrow.
_ => false,
};
if bor_redundant {
assert!(new_bor.is_aliasing(), "A unique reborrow can never be redundant");
assert!(new_bor.is_aliasing(), "a unique reborrow can never be redundant");
trace!("reborrow is redundant");
continue;
}
@ -460,7 +470,7 @@ impl<'tcx> Stacks {
}
}
/// Hooks and glue
/// Hooks and glue.
impl AllocationExtra<Borrow, MemoryState> for Stacks {
#[inline(always)]
fn memory_allocated<'tcx>(size: Size, extra: &MemoryState) -> Self {
@ -529,10 +539,10 @@ trait EvalContextPrivExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
let this = self.eval_context_mut();
let ptr = place.ptr.to_ptr()?;
let barrier = if fn_barrier { Some(this.frame().extra) } else { None };
trace!("reborrow: Creating new reference for {:?} (pointee {}): {:?}",
trace!("reborrow: creating new reference for {:?} (pointee {}): {:?}",
ptr, place.layout.ty, new_bor);
// Get the allocation. It might not be mutable, so we cannot use `get_mut`.
// Get the allocation. It might not be mutable, so we cannot use `get_mut`.
let alloc = this.memory().get(ptr.alloc_id)?;
alloc.check_bounds(this, ptr, size)?;
// Update the stacks.
@ -550,7 +560,7 @@ trait EvalContextPrivExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
Ok(())
}
/// Retag an indidual pointer, returning the retagged version.
/// Retags an indidual pointer, returning the retagged version.
/// `mutbl` can be `None` to make this a raw pointer.
fn retag_reference(
&mut self,
@ -587,10 +597,10 @@ trait EvalContextPrivExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
// We immediately share it, to allow read accesses
let two_phase_time = this.machine.stacked_borrows.increment_clock();
let two_phase_bor = Borrow::Alias(Some(two_phase_time));
this.reborrow(new_place, size, /*fn_barrier*/false, two_phase_bor)?;
this.reborrow(new_place, size, false /* fn_barrier */, two_phase_bor)?;
}
// Return new ptr.
// Return new pointer.
Ok(new_place.to_ref())
}
}
@ -607,29 +617,32 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
MemoryKind::Stack => {
// New unique borrow. This `Uniq` is not accessible by the program,
// so it will only ever be used when using the local directly (i.e.,
// not through a pointer). IOW, whenever we directly use a local this will pop
// everything else off the stack, invalidating all previous pointers
// and, in particular, *all* raw pointers. This subsumes the explicit
// not through a pointer). That is, whenever we directly use a local, this will pop
// everything else off the stack, invalidating all previous pointers,
// and in particular, *all* raw pointers. This subsumes the explicit
// `reset` which the blog post [1] says to perform when accessing a local.
//
// [1] https://www.ralfj.de/blog/2018/08/07/stacked-borrows.html
// [1]: <https://www.ralfj.de/blog/2018/08/07/stacked-borrows.html>
this.machine.stacked_borrows.increment_clock()
}
_ => {
// Nothing to do for everything else
// Nothing to do for everything else.
return Borrow::default()
}
};
// Make this the active borrow for this allocation
let alloc = this.memory_mut().get_mut(id).expect("This is a new allocation, it must still exist");
// Make this the active borrow for this allocation.
let alloc = this
.memory_mut()
.get_mut(id)
.expect("this is a new allocation; it must still exist");
let size = Size::from_bytes(alloc.bytes.len() as u64);
alloc.extra.first_item(BorStackItem::Uniq(time), size);
Borrow::Uniq(time)
}
/// Called for value-to-place conversion. `mutability` is `None` for raw pointers.
/// Called for value-to-place conversion. `mutability` is `None` for raw pointers.
///
/// Note that this does NOT mean that all this memory will actually get accessed/referenced!
/// Note that this does *not* mean that all this memory will actually get accessed/referenced!
/// We could be in the middle of `&(*var).1`.
fn ptr_dereference(
&self,
@ -638,9 +651,15 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
mutability: Option<Mutability>,
) -> EvalResult<'tcx> {
let this = self.eval_context_ref();
trace!("ptr_dereference: Accessing {} reference for {:?} (pointee {})",
if let Some(mutability) = mutability { format!("{:?}", mutability) } else { format!("raw") },
place.ptr, place.layout.ty);
trace!(
"ptr_dereference: Accessing {} reference for {:?} (pointee {})",
if let Some(mutability) = mutability {
format!("{:?}", mutability)
} else {
format!("raw")
},
place.ptr, place.layout.ty
);
let ptr = place.ptr.to_ptr()?;
if mutability.is_none() {
// No further checks on raw derefs -- only the access itself will be checked.
@ -653,18 +672,18 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
// If we got here, we do some checking, *but* we leave the tag unchanged.
if let Borrow::Alias(Some(_)) = ptr.tag {
assert_eq!(mutability, Some(MutImmutable));
// We need a frozen-sensitive check
// We need a frozen-sensitive check.
this.visit_freeze_sensitive(place, size, |cur_ptr, size, frozen| {
let kind = if frozen { RefKind::Frozen } else { RefKind::Raw };
alloc.extra.deref(cur_ptr, size, kind)
})?;
} else {
// Just treat this as one big chunk
// Just treat this as one big chunk.
let kind = if mutability == Some(MutMutable) { RefKind::Unique } else { RefKind::Raw };
alloc.extra.deref(ptr, size, kind)?;
}
// All is good
// All is good.
Ok(())
}
@ -679,22 +698,22 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
// making it useless.
fn qualify(ty: ty::Ty<'_>, kind: RetagKind) -> Option<(Option<Mutability>, bool)> {
match ty.sty {
// References are simple
// References are simple.
ty::Ref(_, _, mutbl) => Some((Some(mutbl), kind == RetagKind::FnEntry)),
// Raw pointers need to be enabled
// Raw pointers need to be enabled.
ty::RawPtr(..) if kind == RetagKind::Raw => Some((None, false)),
// Boxes do not get a barrier: Barriers reflect that references outlive the call
// Boxes do not get a barrier: barriers reflect that references outlive the call
// they were passed in to; that's just not the case for boxes.
ty::Adt(..) if ty.is_box() => Some((Some(MutMutable), false)),
_ => None,
}
}
// We need a visitor to visit all references. However, that requires
// We need a visitor to visit all references. However, that requires
// a `MemPlace`, so we have a fast path for reference types that
// avoids allocating.
if let Some((mutbl, barrier)) = qualify(place.layout.ty, kind) {
// fast path
// Fast path.
let val = this.read_immediate(this.place_to_op(place)?)?;
let val = this.retag_reference(val, mutbl, barrier, kind == RetagKind::TwoPhase)?;
this.write_immediate(val, place)?;
@ -705,7 +724,7 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a,
let mut visitor = RetagVisitor { ecx: this, kind };
visitor.visit_value(place)?;
// The actual visitor
// The actual visitor.
struct RetagVisitor<'ecx, 'a, 'mir, 'tcx> {
ecx: &'ecx mut MiriEvalContext<'a, 'mir, 'tcx>,
kind: RetagKind,

View File

@ -1,4 +1,4 @@
// error-pattern: Deallocating with active barrier
// error-pattern: deallocating with active barrier
fn inner(x: &mut i32, f: fn(&mut i32)) {
// `f` may mutate, but it may not deallocate!

View File

@ -1,13 +1,3 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* Try to double-check that static fns have the right size (with or
* without dummy env ptr, as appropriate) by iterating a size-2 array.

View File

@ -1,4 +1,5 @@
//ignore-windows: Inspects allocation base address on Windows
//ignore-windows: inspects allocation base address on Windows
#![feature(allocator_api)]
use std::ptr::NonNull;
@ -6,8 +7,10 @@ use std::alloc::{Global, Alloc, Layout, System};
fn check_overalign_requests<T: Alloc>(mut allocator: T) {
let size = 8;
let align = 16; // greater than size
let iterations = 1; // Miri is deterministic, no need to try many times
// Greater than `size`.
let align = 16;
// Miri is deterministic; no need to try many times.
let iterations = 1;
unsafe {
let pointers: Vec<_> = (0..iterations).map(|_| {
allocator.alloc(Layout::from_size_align(size, align).unwrap()).unwrap()
@ -17,7 +20,7 @@ fn check_overalign_requests<T: Alloc>(mut allocator: T) {
"Got a pointer less aligned than requested")
}
// Clean up
// Clean up.
for &ptr in &pointers {
allocator.dealloc(ptr, Layout::from_size_align(size, align).unwrap())
}

View File

@ -1,4 +1,4 @@
// PrimVals in Miri are represented with 8 bytes (u64) and at the time of writing, the `-x`
// `PrimVal`s in Miri are represented with 8 bytes (u64) and at the time of writing, the `-x`
// will sign extend into the entire 8 bytes. Then, if you tried to write the `-x` into
// something smaller than 8 bytes, like a 4 byte pointer, it would crash in byteorder crate
// code that assumed only the low 4 bytes would be set. Actually, we were masking properly for