Rustfmt all the things

This commit is contained in:
Oliver Scherer 2019-12-23 12:56:23 +01:00
parent a3ea1cb458
commit 7ead530841
25 changed files with 905 additions and 898 deletions

View File

@ -7,7 +7,7 @@ use crate::helpers::*;
#[bench]
fn fib(bencher: &mut Bencher) {
bencher.iter(|| { fibonacci_helper::main(); })
bencher.iter(|| fibonacci_helper::main())
}
#[bench]
@ -17,7 +17,7 @@ fn fib_miri(bencher: &mut Bencher) {
#[bench]
fn fib_iter(bencher: &mut Bencher) {
bencher.iter(|| { fibonacci_helper_iterative::main(); })
bencher.iter(|| fibonacci_helper_iterative::main())
}
#[bench]

View File

@ -6,23 +6,26 @@ extern crate rustc_interface;
extern crate test;
use self::miri::eval_main;
use rustc::hir::def_id::LOCAL_CRATE;
use rustc_interface::{interface, Queries};
use rustc_driver::Compilation;
use crate::test::Bencher;
use rustc::hir::def_id::LOCAL_CRATE;
use rustc_driver::Compilation;
use rustc_interface::{interface, Queries};
struct MiriCompilerCalls<'a> {
bencher: &'a mut Bencher,
}
impl rustc_driver::Callbacks for MiriCompilerCalls<'_> {
fn after_analysis<'tcx>(&mut self, compiler: &interface::Compiler, queries: &'tcx Queries<'tcx>) -> Compilation {
fn after_analysis<'tcx>(
&mut self,
compiler: &interface::Compiler,
queries: &'tcx Queries<'tcx>,
) -> Compilation {
compiler.session().abort_if_errors();
queries.global_ctxt().unwrap().peek_mut().enter(|tcx| {
let (entry_def_id, _) = tcx.entry_fn(LOCAL_CRATE).expect(
"no main or start function found",
);
let (entry_def_id, _) =
tcx.entry_fn(LOCAL_CRATE).expect("no main or start function found");
self.bencher.iter(|| {
let config = miri::MiriConfig {
@ -50,13 +53,9 @@ fn find_sysroot() -> String {
let toolchain = option_env!("RUSTUP_TOOLCHAIN").or(option_env!("MULTIRUST_TOOLCHAIN"));
match (home, toolchain) {
(Some(home), Some(toolchain)) => format!("{}/toolchains/{}", home, toolchain),
_ => {
option_env!("RUST_SYSROOT")
.expect(
"need to specify RUST_SYSROOT env var or use rustup or multirust",
)
.to_owned()
}
_ => option_env!("RUST_SYSROOT")
.expect("need to specify RUST_SYSROOT env var or use rustup or multirust")
.to_owned(),
}
}

View File

@ -7,7 +7,7 @@ use crate::helpers::*;
#[bench]
fn noop(bencher: &mut Bencher) {
bencher.iter(|| { smoke_helper::main(); })
bencher.iter(|| smoke_helper::main())
}
/*

3
rustfmt.toml Normal file
View File

@ -0,0 +1,3 @@
use_small_heuristics = "Max"
version = "Two"
match_arm_blocks = false

View File

@ -1,10 +1,10 @@
#![feature(inner_deref)]
use std::fs::{self, File};
use std::io::{self, Write, BufRead};
use std::path::{PathBuf, Path};
use std::process::Command;
use std::io::{self, BufRead, Write};
use std::ops::Not;
use std::path::{Path, PathBuf};
use std::process::Command;
const XARGO_MIN_VERSION: (u32, u32, u32) = (0, 3, 17);
@ -40,8 +40,12 @@ fn show_help() {
}
fn show_version() {
println!("miri {} ({} {})",
env!("CARGO_PKG_VERSION"), env!("VERGEN_SHA_SHORT"), env!("VERGEN_COMMIT_DATE"));
println!(
"miri {} ({} {})",
env!("CARGO_PKG_VERSION"),
env!("VERGEN_SHA_SHORT"),
env!("VERGEN_COMMIT_DATE")
);
}
fn show_error(msg: String) -> ! {
@ -80,11 +84,10 @@ fn get_arg_flag_value(name: &str) -> Option<String> {
}
}
fn list_targets() -> impl Iterator<Item=cargo_metadata::Target> {
fn list_targets() -> impl Iterator<Item = cargo_metadata::Target> {
// We need to get the manifest, and then the metadata, to enumerate targets.
let manifest_path = get_arg_flag_value("--manifest-path").map(|m|
Path::new(&m).canonicalize().unwrap()
);
let manifest_path =
get_arg_flag_value("--manifest-path").map(|m| Path::new(&m).canonicalize().unwrap());
let mut cmd = cargo_metadata::MetadataCommand::new();
if let Some(ref manifest_path) = manifest_path {
@ -106,16 +109,18 @@ fn list_targets() -> impl Iterator<Item=cargo_metadata::Target> {
if let Some(ref manifest_path) = manifest_path {
package_manifest_path == manifest_path
} else {
let current_dir = current_dir.as_ref().expect(
"could not read current directory",
);
let package_manifest_directory = package_manifest_path.parent().expect(
"could not find parent directory of package manifest",
);
let current_dir = current_dir.as_ref().expect("could not read current directory");
let package_manifest_directory = package_manifest_path
.parent()
.expect("could not find parent directory of package manifest");
package_manifest_directory == current_dir
}
})
.unwrap_or_else(|| show_error(format!("This seems to be a workspace, which is not supported by cargo-miri")));
.unwrap_or_else(|| {
show_error(format!(
"This seems to be a workspace, which is not supported by cargo-miri"
))
});
let package = metadata.packages.remove(package_index);
// Finally we got the list of targets to build
@ -134,17 +139,24 @@ fn find_miri() -> PathBuf {
/// toolchain than what is used when `cargo miri` is run.
fn test_sysroot_consistency() {
fn get_sysroot(mut cmd: Command) -> PathBuf {
let out = cmd.arg("--print").arg("sysroot")
.output().expect("Failed to run rustc to get sysroot info");
let out = cmd
.arg("--print")
.arg("sysroot")
.output()
.expect("Failed to run rustc to get sysroot info");
let stdout = String::from_utf8(out.stdout).expect("stdout is not valid UTF-8");
let stderr = String::from_utf8(out.stderr).expect("stderr is not valid UTF-8");
assert!(
out.status.success(),
"Bad status code {} when getting sysroot info via {:?}.\nstdout:\n{}\nstderr:\n{}",
out.status, cmd, stdout, stderr,
out.status,
cmd,
stdout,
stderr,
);
let stdout = stdout.trim();
PathBuf::from(stdout).canonicalize()
PathBuf::from(stdout)
.canonicalize()
.unwrap_or_else(|_| panic!("Failed to canonicalize sysroot: {}", stdout))
}
@ -164,7 +176,8 @@ fn test_sysroot_consistency() {
Make sure you use the same toolchain to run miri that you used to build it!\n\
rustc sysroot: `{}`\n\
miri sysroot: `{}`",
rustc_sysroot.display(), miri_sysroot.display()
rustc_sysroot.display(),
miri_sysroot.display()
));
}
}
@ -193,28 +206,36 @@ fn xargo_version() -> Option<(u32, u32, u32)> {
return None;
}
// Parse output. The first line looks like "xargo 0.3.12 (b004f1c 2018-12-13)".
let line = out.stderr.lines().nth(0)
let line = out
.stderr
.lines()
.nth(0)
.expect("malformed `xargo --version` output: not at least one line")
.expect("malformed `xargo --version` output: error reading first line");
let (name, version) = {
let mut split = line.split(' ');
(split.next().expect("malformed `xargo --version` output: empty"),
split.next().expect("malformed `xargo --version` output: not at least two words"))
(
split.next().expect("malformed `xargo --version` output: empty"),
split.next().expect("malformed `xargo --version` output: not at least two words"),
)
};
if name != "xargo" {
// This is some fork of xargo
return None;
}
let mut version_pieces = version.split('.');
let major = version_pieces.next()
let major = version_pieces
.next()
.expect("malformed `xargo --version` output: not a major version piece")
.parse()
.expect("malformed `xargo --version` output: major version is not an integer");
let minor = version_pieces.next()
let minor = version_pieces
.next()
.expect("malformed `xargo --version` output: not a minor version piece")
.parse()
.expect("malformed `xargo --version` output: minor version is not an integer");
let patch = version_pieces.next()
let patch = version_pieces
.next()
.expect("malformed `xargo --version` output: not a patch version piece")
.parse()
.expect("malformed `xargo --version` output: patch version is not an integer");
@ -232,18 +253,15 @@ fn ask_to_run(mut cmd: Command, ask: bool, text: &str) {
io::stdin().read_line(&mut buf).unwrap();
match buf.trim().to_lowercase().as_ref() {
// Proceed.
"" | "y" | "yes" => {},
"" | "y" | "yes" => {}
"n" | "no" => show_error(format!("Aborting as per your request")),
a => show_error(format!("I do not understand `{}`", a))
a => show_error(format!("I do not understand `{}`", a)),
};
} else {
println!("Running `{:?}` to {}.", cmd, text);
}
if cmd.status()
.expect(&format!("failed to execute {:?}", cmd))
.success().not()
{
if cmd.status().expect(&format!("failed to execute {:?}", cmd)).success().not() {
show_error(format!("Failed to {}", text));
}
}
@ -275,7 +293,9 @@ fn setup(ask_user: bool) {
Ok(val) => PathBuf::from(val),
Err(_) => {
// Check for `rust-src` rustup component.
let sysroot = Command::new("rustc").args(&["--print", "sysroot"]).output()
let sysroot = Command::new("rustc")
.args(&["--print", "sysroot"])
.output()
.expect("failed to get rustc sysroot")
.stdout;
let sysroot = std::str::from_utf8(&sysroot).unwrap();
@ -298,7 +318,11 @@ fn setup(ask_user: bool) {
// Fallback: Ask the user to install the `rust-src` component, and use that.
let mut cmd = Command::new("rustup");
cmd.args(&["component", "add", "rust-src"]);
ask_to_run(cmd, ask_user, "install the rustc-src component for the selected toolchain");
ask_to_run(
cmd,
ask_user,
"install the rustc-src component for the selected toolchain",
);
rustup_src
}
}
@ -317,8 +341,10 @@ fn setup(ask_user: bool) {
fs::create_dir_all(&dir).unwrap();
}
// The interesting bit: Xargo.toml
File::create(dir.join("Xargo.toml")).unwrap()
.write_all(br#"
File::create(dir.join("Xargo.toml"))
.unwrap()
.write_all(
br#"
[dependencies.std]
default_features = false
# We need the `panic_unwind` feature because we use the `unwind` panic strategy.
@ -326,10 +352,14 @@ default_features = false
features = ["panic_unwind"]
[dependencies.test]
"#).unwrap();
"#,
)
.unwrap();
// The boring bits: a dummy project for xargo.
File::create(dir.join("Cargo.toml")).unwrap()
.write_all(br#"
File::create(dir.join("Cargo.toml"))
.unwrap()
.write_all(
br#"
[package]
name = "miri-xargo"
description = "A dummy project for building libstd with xargo."
@ -337,7 +367,9 @@ version = "0.0.0"
[lib]
path = "lib.rs"
"#).unwrap();
"#,
)
.unwrap();
File::create(dir.join("lib.rs")).unwrap();
// Prepare xargo invocation.
let target = get_arg_flag_value("--target");
@ -355,10 +387,7 @@ path = "lib.rs"
command.arg("--target").arg(&target);
}
// Finally run it!
if command.status()
.expect("failed to run xargo")
.success().not()
{
if command.status().expect("failed to run xargo").success().not() {
show_error(format!("Failed to run xargo"));
}
@ -413,9 +442,7 @@ fn in_cargo_miri() {
Some(s) if s.starts_with("-") => (MiriCommand::Run, 2),
None => (MiriCommand::Run, 2),
// Invalid command.
Some(s) => {
show_error(format!("Unknown command `{}`", s))
}
Some(s) => show_error(format!("Unknown command `{}`", s)),
};
let verbose = has_arg_flag("-v");
@ -433,9 +460,10 @@ fn in_cargo_miri() {
// Now run the command.
for target in list_targets() {
let mut args = std::env::args().skip(skip);
let kind = target.kind.get(0).expect(
"badly formatted cargo metadata: target::kind is an empty array",
);
let kind = target
.kind
.get(0)
.expect("badly formatted cargo metadata: target::kind is an empty array");
// Now we run `cargo rustc $FLAGS $ARGS`, giving the user the
// change to add additional arguments. `FLAGS` is set to identify
// this target. The user gets to control what gets actually passed to Miri.
@ -470,22 +498,15 @@ fn in_cargo_miri() {
// Add `--` (to end the `cargo` flags), and then the user flags. We add markers around the
// user flags to be able to identify them later. "cargo rustc" adds more stuff after this,
// so we have to mark both the beginning and the end.
cmd
.arg("--")
.arg("cargo-miri-marker-begin")
.args(args)
.arg("cargo-miri-marker-end");
cmd.arg("--").arg("cargo-miri-marker-begin").args(args).arg("cargo-miri-marker-end");
let path = std::env::current_exe().expect("current executable path invalid");
cmd.env("RUSTC_WRAPPER", path);
if verbose {
eprintln!("+ {:?}", cmd);
}
let exit_status = cmd
.spawn()
.expect("could not run cargo")
.wait()
.expect("failed to wait for cargo?");
let exit_status =
cmd.spawn().expect("could not run cargo").wait().expect("failed to wait for cargo?");
if !exit_status.success() {
std::process::exit(exit_status.code().unwrap_or(-1))
@ -497,49 +518,43 @@ fn inside_cargo_rustc() {
let sysroot = std::env::var("MIRI_SYSROOT").expect("The wrapper should have set MIRI_SYSROOT");
let rustc_args = std::env::args().skip(2); // skip `cargo rustc`
let mut args: Vec<String> = rustc_args
.chain(Some("--sysroot".to_owned()))
.chain(Some(sysroot))
.collect();
let mut args: Vec<String> =
rustc_args.chain(Some("--sysroot".to_owned())).chain(Some(sysroot)).collect();
args.splice(0..0, miri::miri_default_args().iter().map(ToString::to_string));
// See if we can find the `cargo-miri` markers. Those only get added to the binary we want to
// run. They also serve to mark the user-defined arguments, which we have to move all the way
// to the end (they get added somewhere in the middle).
let needs_miri = if let Some(begin) = args.iter().position(|arg| arg == "cargo-miri-marker-begin") {
let end = args
.iter()
.position(|arg| arg == "cargo-miri-marker-end")
.expect("cannot find end marker");
// These mark the user arguments. We remove the first and last as they are the markers.
let mut user_args = args.drain(begin..=end);
assert_eq!(user_args.next().unwrap(), "cargo-miri-marker-begin");
assert_eq!(user_args.next_back().unwrap(), "cargo-miri-marker-end");
// Collect the rest and add it back at the end.
let mut user_args = user_args.collect::<Vec<String>>();
args.append(&mut user_args);
// Run this in Miri.
true
} else {
false
};
let needs_miri =
if let Some(begin) = args.iter().position(|arg| arg == "cargo-miri-marker-begin") {
let end = args
.iter()
.position(|arg| arg == "cargo-miri-marker-end")
.expect("cannot find end marker");
// These mark the user arguments. We remove the first and last as they are the markers.
let mut user_args = args.drain(begin..=end);
assert_eq!(user_args.next().unwrap(), "cargo-miri-marker-begin");
assert_eq!(user_args.next_back().unwrap(), "cargo-miri-marker-end");
// Collect the rest and add it back at the end.
let mut user_args = user_args.collect::<Vec<String>>();
args.append(&mut user_args);
// Run this in Miri.
true
} else {
false
};
let mut command = if needs_miri {
Command::new(find_miri())
} else {
Command::new("rustc")
};
let mut command = if needs_miri { Command::new(find_miri()) } else { Command::new("rustc") };
command.args(&args);
if has_arg_flag("-v") {
eprintln!("+ {:?}", command);
}
match command.status() {
Ok(exit) => {
Ok(exit) =>
if !exit.success() {
std::process::exit(exit.code().unwrap_or(42));
}
}
},
Err(ref e) if needs_miri => panic!("error during miri run: {:?}", e),
Err(ref e) => panic!("error during rustc call: {:?}", e),
}

View File

@ -1,25 +1,24 @@
#![feature(rustc_private)]
extern crate miri;
extern crate getopts;
extern crate miri;
extern crate rustc;
extern crate rustc_metadata;
extern crate rustc_codegen_utils;
extern crate rustc_driver;
extern crate rustc_errors;
extern crate rustc_codegen_utils;
extern crate rustc_interface;
extern crate rustc_metadata;
extern crate syntax;
use std::path::Path;
use std::io::Write;
use std::sync::{Mutex, Arc};
use std::io;
use std::io::Write;
use std::path::Path;
use std::sync::{Arc, Mutex};
use rustc_interface::{interface, Queries};
use rustc::hir::def_id::LOCAL_CRATE;
use rustc::hir::{self, itemlikevisit};
use rustc::ty::TyCtxt;
use rustc::hir::def_id::LOCAL_CRATE;
use rustc_driver::Compilation;
use rustc_interface::{interface, Queries};
use miri::MiriConfig;
@ -29,7 +28,11 @@ struct MiriCompilerCalls {
}
impl rustc_driver::Callbacks for MiriCompilerCalls {
fn after_analysis<'tcx>(&mut self, compiler: &interface::Compiler, queries: &'tcx Queries<'tcx>) -> Compilation {
fn after_analysis<'tcx>(
&mut self,
compiler: &interface::Compiler,
queries: &'tcx Queries<'tcx>,
) -> Compilation {
compiler.session().abort_if_errors();
queries.global_ctxt().unwrap().peek_mut().enter(|tcx| {
if std::env::args().any(|arg| arg == "--test") {
@ -37,7 +40,8 @@ impl rustc_driver::Callbacks for MiriCompilerCalls {
impl<'tcx, 'hir> itemlikevisit::ItemLikeVisitor<'hir> for Visitor<'tcx> {
fn visit_item(&mut self, i: &'hir hir::Item) {
if let hir::ItemKind::Fn(.., body_id) = i.kind {
if i.attrs.iter().any(|attr| attr.check_name(syntax::symbol::sym::test)) {
if i.attrs.iter().any(|attr| attr.check_name(syntax::symbol::sym::test))
{
let config = MiriConfig {
validate: true,
communicate: false,
@ -82,12 +86,10 @@ impl rustc_driver::Callbacks for MiriCompilerCalls {
}
fn main() {
let path = option_env!("MIRI_RUSTC_TEST")
.map(String::from)
.unwrap_or_else(|| {
std::env::var("MIRI_RUSTC_TEST")
.expect("need to set MIRI_RUSTC_TEST to path of rustc tests")
});
let path = option_env!("MIRI_RUSTC_TEST").map(String::from).unwrap_or_else(|| {
std::env::var("MIRI_RUSTC_TEST")
.expect("need to set MIRI_RUSTC_TEST to path of rustc tests")
});
let mut mir_not_found = Vec::new();
let mut crate_not_found = Vec::new();
@ -115,14 +117,16 @@ fn main() {
let stderr = std::io::stderr();
write!(stderr.lock(), "test [miri-pass] {} ... ", path.display()).unwrap();
let mut host_target = false;
let mut args: Vec<String> = std::env::args().filter(|arg| {
if arg == "--miri_host_target" {
host_target = true;
false // remove the flag, rustc doesn't know it
} else {
true
}
}).collect();
let mut args: Vec<String> = std::env::args()
.filter(|arg| {
if arg == "--miri_host_target" {
host_target = true;
false // remove the flag, rustc doesn't know it
} else {
true
}
})
.collect();
args.splice(1..1, miri::miri_default_args().iter().map(ToString::to_string));
// file to process
args.push(path.display().to_string());
@ -130,7 +134,13 @@ fn main() {
let sysroot_flag = String::from("--sysroot");
if !args.contains(&sysroot_flag) {
args.push(sysroot_flag);
args.push(Path::new(&std::env::var("HOME").unwrap()).join(".xargo").join("HOST").display().to_string());
args.push(
Path::new(&std::env::var("HOME").unwrap())
.join(".xargo")
.join("HOST")
.display()
.to_string(),
);
}
// A threadsafe buffer for writing.
@ -148,14 +158,19 @@ fn main() {
let buf = BufWriter::default();
let output = buf.clone();
let result = std::panic::catch_unwind(|| {
let _ = rustc_driver::run_compiler(&args, &mut MiriCompilerCalls { host_target }, None, Some(Box::new(buf)));
let _ = rustc_driver::run_compiler(
&args,
&mut MiriCompilerCalls { host_target },
None,
Some(Box::new(buf)),
);
});
match result {
Ok(()) => {
success += 1;
writeln!(stderr.lock(), "ok").unwrap()
},
}
Err(_) => {
let output = output.0.lock().unwrap();
let output_err = std::str::from_utf8(&output).unwrap();
@ -178,7 +193,8 @@ fn main() {
if text.starts_with(c_abi) {
c_abi_fns.push(text[c_abi.len()..end].to_string());
} else if text.starts_with(unimplemented_intrinsic_s) {
unimplemented_intrinsic.push(text[unimplemented_intrinsic_s.len()..end].to_string());
unimplemented_intrinsic
.push(text[unimplemented_intrinsic_s.len()..end].to_string());
} else if text.starts_with(unsupported_s) {
unsupported.push(text[unsupported_s.len()..end].to_string());
} else if text.starts_with(abi_s) {
@ -196,10 +212,19 @@ fn main() {
}
let stderr = std::io::stderr();
let mut stderr = stderr.lock();
writeln!(stderr, "{} success, {} no mir, {} crate not found, {} failed, \
{} C fn, {} ABI, {} unsupported, {} intrinsic",
success, mir_not_found.len(), crate_not_found.len(), failed.len(),
c_abi_fns.len(), abi.len(), unsupported.len(), unimplemented_intrinsic.len()).unwrap();
writeln!(
stderr,
"{} success, {} no mir, {} crate not found, {} failed, {} C fn, {} ABI, {} unsupported, {} intrinsic",
success,
mir_not_found.len(),
crate_not_found.len(),
failed.len(),
c_abi_fns.len(),
abi.len(),
unsupported.len(),
unimplemented_intrinsic.len()
)
.unwrap();
writeln!(stderr, "# The \"other reasons\" errors").unwrap();
writeln!(stderr, "(sorted, deduplicated)").unwrap();
print_vec(&mut stderr, failed);

View File

@ -7,29 +7,33 @@ extern crate log;
extern crate log_settings;
extern crate miri;
extern crate rustc;
extern crate rustc_metadata;
extern crate rustc_codegen_utils;
extern crate rustc_driver;
extern crate rustc_errors;
extern crate rustc_codegen_utils;
extern crate rustc_interface;
extern crate rustc_metadata;
extern crate syntax;
use std::str::FromStr;
use std::convert::TryFrom;
use std::env;
use std::str::FromStr;
use hex::FromHexError;
use rustc_interface::{interface, Queries};
use rustc::hir::def_id::LOCAL_CRATE;
use rustc_driver::Compilation;
use rustc_interface::{interface, Queries};
struct MiriCompilerCalls {
miri_config: miri::MiriConfig,
}
impl rustc_driver::Callbacks for MiriCompilerCalls {
fn after_analysis<'tcx>(&mut self, compiler: &interface::Compiler, queries: &'tcx Queries<'tcx>) -> Compilation {
fn after_analysis<'tcx>(
&mut self,
compiler: &interface::Compiler,
queries: &'tcx Queries<'tcx>,
) -> Compilation {
init_late_loggers();
compiler.session().abort_if_errors();
@ -41,7 +45,9 @@ impl rustc_driver::Callbacks for MiriCompilerCalls {
config.args.insert(0, compiler.input().filestem().to_string());
if let Some(return_code) = miri::eval_main(tcx, entry_def_id, config) {
std::process::exit(i32::try_from(return_code).expect("Return value was too large!"));
std::process::exit(
i32::try_from(return_code).expect("Return value was too large!"),
);
}
});
@ -76,8 +82,10 @@ fn init_late_loggers() {
// This way, if you set `MIRI_LOG=trace`, you get only the right parts of
// rustc traced, but you can also do `MIRI_LOG=miri=trace,rustc_mir::interpret=debug`.
if log::Level::from_str(&var).is_ok() {
env::set_var("RUSTC_LOG",
&format!("rustc::mir::interpret={0},rustc_mir::interpret={0}", var));
env::set_var(
"RUSTC_LOG",
&format!("rustc::mir::interpret={0},rustc_mir::interpret={0}", var),
);
} else {
env::set_var("RUSTC_LOG", &var);
}
@ -110,11 +118,9 @@ fn compile_time_sysroot() -> Option<String> {
let toolchain = option_env!("RUSTUP_TOOLCHAIN").or(option_env!("MULTIRUST_TOOLCHAIN"));
Some(match (home, toolchain) {
(Some(home), Some(toolchain)) => format!("{}/toolchains/{}", home, toolchain),
_ => {
option_env!("RUST_SYSROOT")
.expect("To build Miri without rustup, set the `RUST_SYSROOT` env var at build time")
.to_owned()
}
_ => option_env!("RUST_SYSROOT")
.expect("To build Miri without rustup, set the `RUST_SYSROOT` env var at build time")
.to_owned(),
})
}
@ -135,21 +141,20 @@ fn main() {
if rustc_args.is_empty() {
// Very first arg: for `rustc`.
rustc_args.push(arg);
}
else if after_dashdash {
} else if after_dashdash {
// Everything that comes after are `miri` args.
miri_args.push(arg);
} else {
match arg.as_str() {
"-Zmiri-disable-validation" => {
validate = false;
},
}
"-Zmiri-disable-isolation" => {
communicate = true;
},
}
"-Zmiri-ignore-leaks" => {
ignore_leaks = true;
},
}
"--" => {
after_dashdash = true;
}
@ -162,32 +167,40 @@ fn main() {
FromHexError::InvalidHexCharacter { .. } => panic!(
"-Zmiri-seed should only contain valid hex digits [0-9a-fA-F]"
),
FromHexError::OddLength => panic!("-Zmiri-seed should have an even number of digits"),
FromHexError::OddLength =>
panic!("-Zmiri-seed should have an even number of digits"),
err => panic!("Unknown error decoding -Zmiri-seed as hex: {:?}", err),
});
if seed_raw.len() > 8 {
panic!(format!("-Zmiri-seed must be at most 8 bytes, was {}", seed_raw.len()));
panic!(format!(
"-Zmiri-seed must be at most 8 bytes, was {}",
seed_raw.len()
));
}
let mut bytes = [0; 8];
bytes[..seed_raw.len()].copy_from_slice(&seed_raw);
seed = Some(u64::from_be_bytes(bytes));
},
}
arg if arg.starts_with("-Zmiri-env-exclude=") => {
excluded_env_vars.push(arg.trim_start_matches("-Zmiri-env-exclude=").to_owned());
},
excluded_env_vars
.push(arg.trim_start_matches("-Zmiri-env-exclude=").to_owned());
}
arg if arg.starts_with("-Zmiri-track-pointer-tag=") => {
let id: u64 = match arg.trim_start_matches("-Zmiri-track-pointer-tag=").parse() {
let id: u64 = match arg.trim_start_matches("-Zmiri-track-pointer-tag=").parse()
{
Ok(id) => id,
Err(err) => panic!("-Zmiri-track-pointer-tag requires a valid `u64` as the argument: {}", err),
Err(err) => panic!(
"-Zmiri-track-pointer-tag requires a valid `u64` as the argument: {}",
err
),
};
if let Some(id) = miri::PtrId::new(id) {
tracked_pointer_tag = Some(id);
} else {
panic!("-Zmiri-track-pointer-tag must be a nonzero id");
}
},
}
_ => {
rustc_args.push(arg);
}
@ -225,6 +238,7 @@ fn main() {
rustc_driver::install_ice_hook();
let result = rustc_driver::catch_fatal_errors(move || {
rustc_driver::run_compiler(&rustc_args, &mut MiriCompilerCalls { miri_config }, None, None)
}).and_then(|result| result);
})
.and_then(|result| result);
std::process::exit(result.is_err() as i32);
}

View File

@ -50,7 +50,11 @@ pub fn create_ecx<'mir, 'tcx: 'mir>(
tcx.at(syntax::source_map::DUMMY_SP),
ty::ParamEnv::reveal_all(),
Evaluator::new(config.communicate),
MemoryExtra::new(StdRng::seed_from_u64(config.seed.unwrap_or(0)), config.validate, config.tracked_pointer_tag),
MemoryExtra::new(
StdRng::seed_from_u64(config.seed.unwrap_or(0)),
config.validate,
config.tracked_pointer_tag,
),
);
// Complete initialization.
EnvVars::init(&mut ecx, config.excluded_env_vars);
@ -75,9 +79,7 @@ pub fn create_ecx<'mir, 'tcx: 'mir>(
.unwrap();
// First argument: pointer to `main()`.
let main_ptr = ecx
.memory
.create_fn_alloc(FnVal::Instance(main_instance));
let main_ptr = ecx.memory.create_fn_alloc(FnVal::Instance(main_instance));
// Second argument (argc): length of `config.args`.
let argc = Scalar::from_uint(config.args.len() as u128, ecx.pointer_size());
// Third argument (`argv`): created from `config.args`.
@ -93,24 +95,20 @@ pub fn create_ecx<'mir, 'tcx: 'mir>(
argvs.push(arg_place.ptr);
}
// Make an array with all these pointers, in the Miri memory.
let argvs_layout = ecx.layout_of(
tcx.mk_array(tcx.mk_imm_ptr(tcx.types.u8), argvs.len() as u64),
)?;
let argvs_layout =
ecx.layout_of(tcx.mk_array(tcx.mk_imm_ptr(tcx.types.u8), argvs.len() as u64))?;
let argvs_place = ecx.allocate(argvs_layout, MiriMemoryKind::Env.into());
for (idx, arg) in argvs.into_iter().enumerate() {
let place = ecx.mplace_field(argvs_place, idx as u64)?;
ecx.write_scalar(arg, place.into())?;
}
ecx.memory
.mark_immutable(argvs_place.ptr.assert_ptr().alloc_id)?;
ecx.memory.mark_immutable(argvs_place.ptr.assert_ptr().alloc_id)?;
// A pointer to that place is the 3rd argument for main.
let argv = argvs_place.ptr;
// Store `argc` and `argv` for macOS `_NSGetArg{c,v}`.
{
let argc_place = ecx.allocate(
ecx.layout_of(tcx.types.isize)?,
MiriMemoryKind::Env.into(),
);
let argc_place =
ecx.allocate(ecx.layout_of(tcx.types.isize)?, MiriMemoryKind::Env.into());
ecx.write_scalar(argc, argc_place.into())?;
ecx.machine.argc = Some(argc_place.ptr);
@ -149,10 +147,7 @@ pub fn create_ecx<'mir, 'tcx: 'mir>(
};
// Return place (in static memory so that it does not count as leak).
let ret_place = ecx.allocate(
ecx.layout_of(tcx.types.isize)?,
MiriMemoryKind::Env.into(),
);
let ret_place = ecx.allocate(ecx.layout_of(tcx.types.isize)?, MiriMemoryKind::Env.into());
// Call start function.
ecx.call_function(
start_instance,
@ -209,27 +204,30 @@ pub fn eval_main<'tcx>(tcx: TyCtxt<'tcx>, main_id: DefId, config: MiriConfig) ->
return None;
}
}
return Some(return_code)
return Some(return_code);
}
Err(mut e) => {
// Special treatment for some error kinds
let msg = match e.kind {
InterpError::MachineStop(ref info) => {
let info = info.downcast_ref::<TerminationInfo>()
let info = info
.downcast_ref::<TerminationInfo>()
.expect("invalid MachineStop payload");
match info {
TerminationInfo::Exit(code) => return Some(*code),
TerminationInfo::PoppedTrackedPointerTag(item) =>
format!("popped tracked tag for item {:?}", item),
TerminationInfo::Abort =>
format!("the evaluated program aborted execution")
format!("the evaluated program aborted execution"),
}
}
err_unsup!(NoMirFor(..)) =>
format!("{}. Did you set `MIRI_SYSROOT` to a Miri-enabled sysroot? You can prepare one with `cargo miri setup`.", e),
err_unsup!(NoMirFor(..)) => format!(
"{}. Did you set `MIRI_SYSROOT` to a Miri-enabled sysroot? You can prepare one with `cargo miri setup`.",
e
),
InterpError::InvalidProgram(_) =>
bug!("This error should be impossible in Miri: {}", e),
_ => e.to_string()
_ => e.to_string(),
};
e.print_backtrace();
if let Some(frame) = ecx.stack().last() {
@ -242,9 +240,9 @@ pub fn eval_main<'tcx>(tcx: TyCtxt<'tcx>, main_id: DefId, config: MiriConfig) ->
// We iterate with indices because we need to look at the next frame (the caller).
for idx in 0..frames.len() {
let frame_info = &frames[idx];
let call_site_is_local = frames.get(idx + 1).map_or(false, |caller_info| {
caller_info.instance.def_id().is_local()
});
let call_site_is_local = frames
.get(idx + 1)
.map_or(false, |caller_info| caller_info.instance.def_id().is_local());
if call_site_is_local {
err.span_note(frame_info.call_site, &frame_info.to_string());
} else {

View File

@ -1,15 +1,14 @@
use std::{mem, iter};
use std::ffi::OsStr;
use std::{iter, mem};
use syntax::source_map::DUMMY_SP;
use rustc::hir::def_id::{DefId, CRATE_DEF_INDEX};
use rustc::mir;
use rustc::ty::{
self,
List,
TyCtxt,
layout::{self, LayoutOf, Size, TyLayout},
List, TyCtxt,
};
use syntax::source_map::DUMMY_SP;
use rand::RngCore;
@ -19,15 +18,11 @@ impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tc
/// Gets an instance for a path.
fn resolve_did<'mir, 'tcx>(tcx: TyCtxt<'tcx>, path: &[&str]) -> InterpResult<'tcx, DefId> {
tcx
.crates()
tcx.crates()
.iter()
.find(|&&krate| tcx.original_crate_name(krate).as_str() == path[0])
.and_then(|krate| {
let krate = DefId {
krate: *krate,
index: CRATE_DEF_INDEX,
};
let krate = DefId { krate: *krate, index: CRATE_DEF_INDEX };
let mut items = tcx.item_children(krate);
let mut path_it = path.iter().skip(1).peekable();
@ -35,7 +30,7 @@ fn resolve_did<'mir, 'tcx>(tcx: TyCtxt<'tcx>, path: &[&str]) -> InterpResult<'tc
for item in mem::replace(&mut items, Default::default()).iter() {
if item.ident.name.as_str() == *segment {
if path_it.peek().is_none() {
return Some(item.res.def_id())
return Some(item.res.def_id());
}
items = tcx.item_children(item.res.def_id());
@ -51,12 +46,13 @@ fn resolve_did<'mir, 'tcx>(tcx: TyCtxt<'tcx>, path: &[&str]) -> InterpResult<'tc
})
}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
/// Gets an instance for a path.
fn resolve_path(&self, path: &[&str]) -> InterpResult<'tcx, ty::Instance<'tcx>> {
Ok(ty::Instance::mono(self.eval_context_ref().tcx.tcx, resolve_did(self.eval_context_ref().tcx.tcx, path)?))
Ok(ty::Instance::mono(
self.eval_context_ref().tcx.tcx,
resolve_did(self.eval_context_ref().tcx.tcx, path)?,
))
}
/// Write a 0 of the appropriate size to `dest`.
@ -74,11 +70,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
/// Turn a Scalar into an Option<NonNullScalar>
fn test_null(&self, val: Scalar<Tag>) -> InterpResult<'tcx, Option<Scalar<Tag>>> {
let this = self.eval_context_ref();
Ok(if this.is_null(val)? {
None
} else {
Some(val)
})
Ok(if this.is_null(val)? { None } else { Some(val) })
}
/// Get the `Place` for a local
@ -89,11 +81,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}
/// Generate some random bytes, and write them to `dest`.
fn gen_random(
&mut self,
ptr: Scalar<Tag>,
len: usize,
) -> InterpResult<'tcx> {
fn gen_random(&mut self, ptr: Scalar<Tag>, len: usize) -> InterpResult<'tcx> {
// Some programs pass in a null pointer and a length of 0
// to their platform's random-generation function (e.g. getrandom())
// on Linux. For compatibility with these programs, we don't perform
@ -110,8 +98,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// Fill the buffer using the host's rng.
getrandom::getrandom(&mut data)
.map_err(|err| err_unsup_format!("getrandom failed: {}", err))?;
}
else {
} else {
let rng = this.memory.extra.rng.get_mut();
rng.fill_bytes(&mut data);
}
@ -132,23 +119,19 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// Push frame.
let mir = &*this.load_mir(f.def, None)?;
let span = this.stack().last()
let span = this
.stack()
.last()
.and_then(Frame::current_source_info)
.map(|si| si.span)
.unwrap_or(DUMMY_SP);
this.push_stack_frame(
f,
span,
mir,
dest,
stack_pop,
)?;
this.push_stack_frame(f, span, mir, dest, stack_pop)?;
// Initialize arguments.
let mut callee_args = this.frame().body.args_iter();
for arg in args {
let callee_arg = this.local_place(
callee_args.next().expect("callee has fewer arguments than expected")
callee_args.next().expect("callee has fewer arguments than expected"),
)?;
this.write_immediate(*arg, callee_arg)?;
}
@ -167,10 +150,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
) -> InterpResult<'tcx> {
let this = self.eval_context_ref();
trace!("visit_frozen(place={:?}, size={:?})", *place, size);
debug_assert_eq!(size,
debug_assert_eq!(
size,
this.size_and_align_of_mplace(place)?
.map(|(size, _)| size)
.unwrap_or_else(|| place.layout.size)
.map(|(size, _)| size)
.unwrap_or_else(|| place.layout.size)
);
// Store how far we proceeded into the place so far. Everything to the left of
// this offset has already been handled, in the sense that the frozen parts
@ -190,11 +174,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let frozen_size = unsafe_cell_offset - end_offset;
// Everything between the end_ptr and this `UnsafeCell` is frozen.
if frozen_size != Size::ZERO {
action(end_ptr, frozen_size, /*frozen*/true)?;
action(end_ptr, frozen_size, /*frozen*/ true)?;
}
// This `UnsafeCell` is NOT frozen.
if unsafe_cell_size != Size::ZERO {
action(unsafe_cell_ptr, unsafe_cell_size, /*frozen*/false)?;
action(unsafe_cell_ptr, unsafe_cell_size, /*frozen*/ false)?;
}
// Update end end_ptr.
end_ptr = unsafe_cell_ptr.wrapping_offset(unsafe_cell_size, this);
@ -208,7 +192,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
unsafe_cell_action: |place| {
trace!("unsafe_cell_action on {:?}", place.ptr);
// We need a size to go on.
let unsafe_cell_size = this.size_and_align_of_mplace(place)?
let unsafe_cell_size = this
.size_and_align_of_mplace(place)?
.map(|(size, _)| size)
// for extern types, just cover what we can
.unwrap_or_else(|| place.layout.size);
@ -231,18 +216,17 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
/// Visiting the memory covered by a `MemPlace`, being aware of
/// whether we are inside an `UnsafeCell` or not.
struct UnsafeCellVisitor<'ecx, 'mir, 'tcx, F>
where F: FnMut(MPlaceTy<'tcx, Tag>) -> InterpResult<'tcx>
where
F: FnMut(MPlaceTy<'tcx, Tag>) -> InterpResult<'tcx>,
{
ecx: &'ecx MiriEvalContext<'mir, 'tcx>,
unsafe_cell_action: F,
}
impl<'ecx, 'mir, 'tcx, F>
ValueVisitor<'mir, 'tcx, Evaluator<'tcx>>
for
UnsafeCellVisitor<'ecx, 'mir, 'tcx, F>
impl<'ecx, 'mir, 'tcx, F> ValueVisitor<'mir, 'tcx, Evaluator<'tcx>>
for UnsafeCellVisitor<'ecx, 'mir, 'tcx, F>
where
F: FnMut(MPlaceTy<'tcx, Tag>) -> InterpResult<'tcx>
F: FnMut(MPlaceTy<'tcx, Tag>) -> InterpResult<'tcx>,
{
type V = MPlaceTy<'tcx, Tag>;
@ -252,11 +236,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}
// Hook to detect `UnsafeCell`.
fn visit_value(&mut self, v: MPlaceTy<'tcx, Tag>) -> InterpResult<'tcx>
{
fn visit_value(&mut self, v: MPlaceTy<'tcx, Tag>) -> InterpResult<'tcx> {
trace!("UnsafeCellVisitor: {:?} {:?}", *v, v.layout.ty);
let is_unsafe_cell = match v.layout.ty.kind {
ty::Adt(adt, _) => Some(adt.did) == self.ecx.tcx.lang_items().unsafe_cell_type(),
ty::Adt(adt, _) =>
Some(adt.did) == self.ecx.tcx.lang_items().unsafe_cell_type(),
_ => false,
};
if is_unsafe_cell {
@ -293,7 +277,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
fn visit_aggregate(
&mut self,
place: MPlaceTy<'tcx, Tag>,
fields: impl Iterator<Item=InterpResult<'tcx, MPlaceTy<'tcx, Tag>>>,
fields: impl Iterator<Item = InterpResult<'tcx, MPlaceTy<'tcx, Tag>>>,
) -> InterpResult<'tcx> {
match place.layout.fields {
layout::FieldPlacement::Array { .. } => {
@ -303,7 +287,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}
layout::FieldPlacement::Arbitrary { .. } => {
// Gather the subplaces and sort them before visiting.
let mut places = fields.collect::<InterpResult<'tcx, Vec<MPlaceTy<'tcx, Tag>>>>()?;
let mut places =
fields.collect::<InterpResult<'tcx, Vec<MPlaceTy<'tcx, Tag>>>>()?;
places.sort_by_key(|place| place.ptr.assert_ptr().offset);
self.walk_aggregate(place, places.into_iter().map(Ok))
}
@ -315,22 +300,16 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}
// We have to do *something* for unions.
fn visit_union(&mut self, v: MPlaceTy<'tcx, Tag>) -> InterpResult<'tcx>
{
fn visit_union(&mut self, v: MPlaceTy<'tcx, Tag>) -> InterpResult<'tcx> {
// With unions, we fall back to whatever the type says, to hopefully be consistent
// with LLVM IR.
// FIXME: are we consistent, and is this really the behavior we want?
let frozen = self.ecx.type_is_freeze(v.layout.ty);
if frozen {
Ok(())
} else {
(self.unsafe_cell_action)(v)
}
if frozen { Ok(()) } else { (self.unsafe_cell_action)(v) }
}
// We should never get to a primitive, but always short-circuit somewhere above.
fn visit_primitive(&mut self, _v: MPlaceTy<'tcx, Tag>) -> InterpResult<'tcx>
{
fn visit_primitive(&mut self, _v: MPlaceTy<'tcx, Tag>) -> InterpResult<'tcx> {
bug!("we should always short-circuit before coming to a primitive")
}
}
@ -382,7 +361,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
/// case.
fn check_no_isolation(&mut self, name: &str) -> InterpResult<'tcx> {
if !self.eval_context_mut().machine.communicate {
throw_unsup_format!("`{}` not available when isolation is enabled. Pass the flag `-Zmiri-disable-isolation` to disable it.", name)
throw_unsup_format!(
"`{}` not available when isolation is enabled. Pass the flag `-Zmiri-disable-isolation` to disable it.",
name
)
}
Ok(())
}
@ -423,11 +405,16 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
TimedOut => "ETIMEDOUT",
AlreadyExists => "EEXIST",
WouldBlock => "EWOULDBLOCK",
_ => throw_unsup_format!("The {} error cannot be transformed into a raw os error", e)
_ => {
throw_unsup_format!("The {} error cannot be transformed into a raw os error", e)
}
})?
} else {
// FIXME: we have to implement the Windows equivalent of this.
throw_unsup_format!("Setting the last OS error from an io::Error is unsupported for {}.", target.target_os)
throw_unsup_format!(
"Setting the last OS error from an io::Error is unsupported for {}.",
target.target_os
)
};
this.set_last_error(last_error)
}
@ -454,7 +441,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
/// Helper function to read an OsString from a null-terminated sequence of bytes, which is what
/// the Unix APIs usually handle.
fn read_os_str_from_c_str<'a>(&'a self, scalar: Scalar<Tag>) -> InterpResult<'tcx, &'a OsStr>
where 'tcx: 'a, 'mir: 'a
where
'tcx: 'a,
'mir: 'a,
{
let this = self.eval_context_ref();
let bytes = this.memory.read_c_str(scalar)?;
@ -469,7 +458,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
&mut self,
os_str: &OsStr,
scalar: Scalar<Tag>,
size: u64
size: u64,
) -> InterpResult<'tcx, bool> {
let bytes = os_str_to_bytes(os_str)?;
// If `size` is smaller or equal than `bytes.len()`, writing `bytes` plus the required null
@ -477,7 +466,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
if size <= bytes.len() as u64 {
return Ok(false);
}
self.eval_context_mut().memory.write_bytes(scalar, bytes.iter().copied().chain(iter::once(0u8)))?;
self.eval_context_mut()
.memory
.write_bytes(scalar, bytes.iter().copied().chain(iter::once(0u8)))?;
Ok(true)
}
}
@ -488,7 +479,7 @@ fn os_str_to_bytes<'tcx, 'a>(os_str: &'a OsStr) -> InterpResult<'tcx, &'a [u8]>
}
#[cfg(target_os = "unix")]
fn bytes_to_os_str<'tcx, 'a>(bytes: &'a[u8]) -> InterpResult<'tcx, &'a OsStr> {
fn bytes_to_os_str<'tcx, 'a>(bytes: &'a [u8]) -> InterpResult<'tcx, &'a OsStr> {
Ok(std::os::unix::ffi::OsStringExt::from_bytes(bytes))
}
@ -522,11 +513,7 @@ pub fn immty_from_int_checked<'tcx>(
let size = layout.size;
let truncated = truncate(int as u128, size);
if sign_extend(truncated, size) as i128 != int {
throw_unsup_format!(
"Signed value {:#x} does not fit in {} bits",
int,
size.bits()
)
throw_unsup_format!("Signed value {:#x} does not fit in {} bits", int, size.bits())
}
Ok(ImmTy::from_int(int, layout))
}
@ -542,12 +529,7 @@ pub fn immty_from_uint_checked<'tcx>(
// `ImmTy::from_int` panic.
let size = layout.size;
if truncate(int, size) != int {
throw_unsup_format!(
"Unsigned value {:#x} does not fit in {} bits",
int,
size.bits()
)
throw_unsup_format!("Unsigned value {:#x} does not fit in {} bits", int, size.bits())
}
Ok(ImmTy::from_uint(int, layout))
}

View File

@ -1,11 +1,11 @@
use std::cell::RefCell;
use std::collections::{HashMap, hash_map::Entry};
use std::cmp::max;
use std::collections::{hash_map::Entry, HashMap};
use rand::Rng;
use rustc::ty::layout::HasDataLayout;
use rustc_mir::interpret::{AllocId, Pointer, InterpResult, Memory, AllocCheck, PointerArithmetic};
use rustc_mir::interpret::{AllocCheck, AllocId, InterpResult, Memory, Pointer, PointerArithmetic};
use rustc_target::abi::Size;
use crate::{Evaluator, Tag, STACK_ADDR};
@ -47,14 +47,15 @@ impl<'mir, 'tcx> GlobalState {
}
let global_state = memory.extra.intptrcast.borrow();
let pos = global_state.int_to_ptr_map.binary_search_by_key(&int, |(addr, _)| *addr);
Ok(match global_state.int_to_ptr_map.binary_search_by_key(&int, |(addr, _)| *addr) {
Ok(match pos {
Ok(pos) => {
let (_, alloc_id) = global_state.int_to_ptr_map[pos];
// `int` is equal to the starting address for an allocation, the offset should be
// zero. The pointer is untagged because it was created from a cast
Pointer::new_with_tag(alloc_id, Size::from_bytes(0), Tag::Untagged)
},
}
Err(0) => throw_unsup!(DanglingPointerDeref),
Err(pos) => {
// This is the largest of the adresses smaller than `int`,
@ -100,7 +101,10 @@ impl<'mir, 'tcx> GlobalState {
entry.insert(base_addr);
trace!(
"Assigning base address {:#x} to allocation {:?} (slack: {}, align: {})",
base_addr, ptr.alloc_id, slack, align.bytes(),
base_addr,
ptr.alloc_id,
slack,
align.bytes(),
);
// Remember next base address. If this allocation is zero-sized, leave a gap
@ -114,7 +118,8 @@ impl<'mir, 'tcx> GlobalState {
}
};
debug_assert_eq!(base_addr % align.bytes(), 0); // sanity check
// Sanity check that the base address is aligned.
debug_assert_eq!(base_addr % align.bytes(), 0);
// Add offset with the right kind of pointer-overflowing arithmetic.
let dl = memory.data_layout();
Ok(dl.overflowing_offset(base_addr, ptr.offset.bytes()).0)
@ -125,7 +130,7 @@ impl<'mir, 'tcx> GlobalState {
fn align_addr(addr: u64, align: u64) -> u64 {
match addr % align {
0 => addr,
rem => addr.checked_add(align).unwrap() - rem
rem => addr.checked_add(align).unwrap() - rem,
}
}
}

View File

@ -1,57 +1,57 @@
#![feature(rustc_private)]
#![feature(option_expect_none, option_unwrap_none)]
#![warn(rust_2018_idioms)]
#![allow(clippy::cast_lossless)]
#[macro_use]
extern crate log;
// From rustc.
extern crate syntax;
extern crate rustc_apfloat;
#[macro_use] extern crate rustc;
extern crate syntax;
#[macro_use]
extern crate rustc;
extern crate rustc_data_structures;
extern crate rustc_mir;
extern crate rustc_target;
mod shims;
mod operator;
mod eval;
mod helpers;
mod range_map;
mod mono_hash_map;
mod stacked_borrows;
mod intptrcast;
mod machine;
mod eval;
mod mono_hash_map;
mod operator;
mod range_map;
mod shims;
mod stacked_borrows;
// Make all those symbols available in the same place as our own.
pub use rustc_mir::interpret::*;
// Resolve ambiguity.
pub use rustc_mir::interpret::{self, AllocMap, PlaceTy};
pub use crate::shims::{EvalContextExt as ShimsEvalContextExt};
pub use crate::shims::foreign_items::EvalContextExt as ForeignItemsEvalContextExt;
pub use crate::shims::intrinsics::EvalContextExt as IntrinsicsEvalContextExt;
pub use crate::shims::tls::{EvalContextExt as TlsEvalContextExt, TlsData};
pub use crate::shims::time::{EvalContextExt as TimeEvalContextExt};
pub use crate::shims::dlsym::{Dlsym, EvalContextExt as DlsymEvalContextExt};
pub use crate::shims::env::{EnvVars, EvalContextExt as EnvEvalContextExt};
pub use crate::shims::fs::{FileHandler, EvalContextExt as FileEvalContextExt};
pub use crate::shims::foreign_items::EvalContextExt as ForeignItemsEvalContextExt;
pub use crate::shims::fs::{EvalContextExt as FileEvalContextExt, FileHandler};
pub use crate::shims::intrinsics::EvalContextExt as IntrinsicsEvalContextExt;
pub use crate::shims::panic::{CatchUnwindData, EvalContextExt as PanicEvalContextExt};
pub use crate::shims::time::EvalContextExt as TimeEvalContextExt;
pub use crate::shims::tls::{EvalContextExt as TlsEvalContextExt, TlsData};
pub use crate::shims::EvalContextExt as ShimsEvalContextExt;
pub use crate::eval::{create_ecx, eval_main, MiriConfig, TerminationInfo};
pub use crate::helpers::EvalContextExt as HelpersEvalContextExt;
pub use crate::machine::{
AllocExtra, Evaluator, FrameData, MemoryExtra, MiriEvalContext, MiriEvalContextExt,
MiriMemoryKind, NUM_CPUS, PAGE_SIZE, STACK_ADDR, STACK_SIZE,
};
pub use crate::mono_hash_map::MonoHashMap;
pub use crate::operator::EvalContextExt as OperatorEvalContextExt;
pub use crate::range_map::RangeMap;
pub use crate::helpers::{EvalContextExt as HelpersEvalContextExt};
pub use crate::mono_hash_map::MonoHashMap;
pub use crate::stacked_borrows::{
EvalContextExt as StackedBorEvalContextExt, Tag, Permission, Stack, Stacks, Item, PtrId,
GlobalState,
EvalContextExt as StackedBorEvalContextExt, GlobalState, Item, Permission, PtrId, Stack,
Stacks, Tag,
};
pub use crate::machine::{
PAGE_SIZE, STACK_ADDR, STACK_SIZE, NUM_CPUS,
MemoryExtra, AllocExtra, FrameData, MiriMemoryKind, Evaluator, MiriEvalContext, MiriEvalContextExt,
};
pub use crate::eval::{eval_main, create_ecx, MiriConfig, TerminationInfo};
/// Insert rustc arguments at the beginning of the argument list that Miri wants to be
/// set per default, for maximal validation power.

View File

@ -8,8 +8,12 @@ use std::rc::Rc;
use rand::rngs::StdRng;
use rustc::hir::def_id::DefId;
use rustc::ty::{self, layout::{Size, LayoutOf}, Ty, TyCtxt};
use rustc::mir;
use rustc::ty::{
self,
layout::{LayoutOf, Size},
Ty, TyCtxt,
};
use syntax::{attr, source_map::Span, symbol::sym};
use crate::*;
@ -33,7 +37,6 @@ pub struct FrameData<'tcx> {
pub catch_panic: Option<CatchUnwindData<'tcx>>,
}
/// Extra memory kinds
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum MiriMemoryKind {
@ -114,7 +117,7 @@ pub struct Evaluator<'tcx> {
/// The temporary used for storing the argument of
/// the call to `miri_start_panic` (the panic payload) when unwinding.
pub(crate) panic_payload: Option<ImmTy<'tcx, Tag>>
pub(crate) panic_payload: Option<ImmTy<'tcx, Tag>>,
}
impl<'tcx> Evaluator<'tcx> {
@ -130,7 +133,7 @@ impl<'tcx> Evaluator<'tcx> {
tls: TlsData::default(),
communicate,
file_handler: Default::default(),
panic_payload: None
panic_payload: None,
}
}
}
@ -164,13 +167,8 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'tcx> {
type PointerTag = Tag;
type ExtraFnVal = Dlsym;
type MemoryMap = MonoHashMap<
AllocId,
(
MemoryKind<MiriMemoryKind>,
Allocation<Tag, Self::AllocExtra>,
),
>;
type MemoryMap =
MonoHashMap<AllocId, (MemoryKind<MiriMemoryKind>, Allocation<Tag, Self::AllocExtra>)>;
const STATIC_KIND: Option<MiriMemoryKind> = Some(MiriMemoryKind::Static);
@ -322,9 +320,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'tcx> {
stacked_borrows.static_base_ptr(alloc)
}
},
AllocExtra {
stacked_borrows: stacks,
},
AllocExtra { stacked_borrows: stacks },
);
(Cow::Owned(alloc), base_tag)
}
@ -334,10 +330,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'tcx> {
if !memory_extra.validate {
Tag::Untagged
} else {
memory_extra
.stacked_borrows
.borrow_mut()
.static_base_ptr(id)
memory_extra.stacked_borrows.borrow_mut().static_base_ptr(id)
}
}
@ -356,9 +349,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'tcx> {
}
#[inline(always)]
fn stack_push(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
) -> InterpResult<'tcx, FrameData<'tcx>> {
fn stack_push(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx, FrameData<'tcx>> {
Ok(FrameData {
call_id: ecx.memory.extra.stacked_borrows.borrow_mut().new_call(),
catch_panic: None,
@ -369,7 +360,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'tcx> {
fn stack_pop(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
extra: FrameData<'tcx>,
unwinding: bool
unwinding: bool,
) -> InterpResult<'tcx, StackPopInfo> {
ecx.handle_stack_pop(extra, unwinding)
}

View File

@ -5,10 +5,10 @@
//! The API is is completely tailored to what `memory.rs` needs. It is still in
//! a separate file to minimize the amount of code that has to care about the unsafety.
use std::collections::hash_map::Entry;
use std::cell::RefCell;
use std::hash::Hash;
use std::borrow::Borrow;
use std::cell::RefCell;
use std::collections::hash_map::Entry;
use std::hash::Hash;
use rustc_data_structures::fx::FxHashMap;
@ -26,7 +26,7 @@ impl<K: Hash + Eq, V> MonoHashMap<K, V> {
/// as long as the `Ref` returned by `RefCell::borrow()` is alive. So we can't return the
/// iterator, as that would drop the `Ref`. We can't return both, as it's not possible in Rust
/// to have a struct/tuple with a field that refers to another field.
pub fn iter<T>(&self, f: impl FnOnce(&mut dyn Iterator<Item=(&K, &V)>) -> T) -> T {
pub fn iter<T>(&self, f: impl FnOnce(&mut dyn Iterator<Item = (&K, &V)>) -> T) -> T {
f(&mut self.0.borrow().iter().map(|(k, v)| (k, &**v)))
}
}
@ -40,30 +40,28 @@ impl<K: Hash + Eq, V> Default for MonoHashMap<K, V> {
impl<K: Hash + Eq, V> AllocMap<K, V> for MonoHashMap<K, V> {
#[inline(always)]
fn contains_key<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> bool
where K: Borrow<Q>
where
K: Borrow<Q>,
{
self.0.get_mut().contains_key(k)
}
#[inline(always)]
fn insert(&mut self, k: K, v: V) -> Option<V>
{
fn insert(&mut self, k: K, v: V) -> Option<V> {
self.0.get_mut().insert(k, Box::new(v)).map(|x| *x)
}
#[inline(always)]
fn remove<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> Option<V>
where K: Borrow<Q>
where
K: Borrow<Q>,
{
self.0.get_mut().remove(k).map(|x| *x)
}
#[inline(always)]
fn filter_map_collect<T>(&self, mut f: impl FnMut(&K, &V) -> Option<T>) -> Vec<T> {
self.0.borrow()
.iter()
.filter_map(move |(k, v)| f(k, &*v))
.collect()
self.0.borrow().iter().filter_map(move |(k, v)| f(k, &*v)).collect()
}
/// The most interesting method: Providing a shared ref without
@ -73,11 +71,7 @@ impl<K: Hash + Eq, V> AllocMap<K, V> for MonoHashMap<K, V> {
/// if it returns a reference, that is used directly, if it
/// returns owned data, that is put into the map and returned.
#[inline(always)]
fn get_or<E>(
&self,
k: K,
vacant: impl FnOnce() -> Result<V, E>
) -> Result<&V, E> {
fn get_or<E>(&self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&V, E> {
let val: *const V = match self.0.borrow_mut().entry(k) {
Entry::Occupied(entry) => &**entry.get(),
Entry::Vacant(entry) => &**entry.insert(Box::new(vacant()?)),
@ -88,12 +82,7 @@ impl<K: Hash + Eq, V> AllocMap<K, V> for MonoHashMap<K, V> {
}
#[inline(always)]
fn get_mut_or<E>(
&mut self,
k: K,
vacant: impl FnOnce() -> Result<V, E>
) -> Result<&mut V, E>
{
fn get_mut_or<E>(&mut self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&mut V, E> {
match self.0.get_mut().entry(k) {
Entry::Occupied(e) => Ok(e.into_mut()),
Entry::Vacant(e) => {

View File

@ -1,7 +1,10 @@
use std::convert::TryFrom;
use rustc::ty::{Ty, layout::{Size, LayoutOf}};
use rustc::mir;
use rustc::ty::{
layout::{LayoutOf, Size},
Ty,
};
use crate::*;
@ -13,11 +16,7 @@ pub trait EvalContextExt<'tcx> {
right: ImmTy<'tcx, Tag>,
) -> InterpResult<'tcx, (Scalar<Tag>, bool, Ty<'tcx>)>;
fn ptr_eq(
&self,
left: Scalar<Tag>,
right: Scalar<Tag>,
) -> InterpResult<'tcx, bool>;
fn ptr_eq(&self, left: Scalar<Tag>, right: Scalar<Tag>) -> InterpResult<'tcx, bool>;
fn pointer_offset_inbounds(
&self,
@ -41,12 +40,15 @@ impl<'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'mir, 'tcx> {
Ok(match bin_op {
Eq | Ne => {
// This supports fat pointers.
#[rustfmt::skip]
let eq = match (*left, *right) {
(Immediate::Scalar(left), Immediate::Scalar(right)) =>
self.ptr_eq(left.not_undef()?, right.not_undef()?)?,
(Immediate::ScalarPair(left1, left2), Immediate::ScalarPair(right1, right2)) =>
self.ptr_eq(left1.not_undef()?, right1.not_undef()?)? &&
self.ptr_eq(left2.not_undef()?, right2.not_undef()?)?,
(Immediate::Scalar(left), Immediate::Scalar(right)) => {
self.ptr_eq(left.not_undef()?, right.not_undef()?)?
}
(Immediate::ScalarPair(left1, left2), Immediate::ScalarPair(right1, right2)) => {
self.ptr_eq(left1.not_undef()?, right1.not_undef()?)?
&& self.ptr_eq(left2.not_undef()?, right2.not_undef()?)?
}
_ => bug!("Type system should not allow comparing Scalar with ScalarPair"),
};
(Scalar::from_bool(if bin_op == Eq { eq } else { !eq }), false, self.tcx.types.bool)
@ -68,10 +70,8 @@ impl<'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'mir, 'tcx> {
}
Offset => {
let pointee_ty = left.layout.ty
.builtin_deref(true)
.expect("Offset called on non-ptr type")
.ty;
let pointee_ty =
left.layout.ty.builtin_deref(true).expect("Offset called on non-ptr type").ty;
let ptr = self.pointer_offset_inbounds(
left.to_scalar()?,
pointee_ty,
@ -80,15 +80,11 @@ impl<'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'mir, 'tcx> {
(ptr, false, left.layout.ty)
}
_ => bug!("Invalid operator on pointers: {:?}", bin_op)
_ => bug!("Invalid operator on pointers: {:?}", bin_op),
})
}
fn ptr_eq(
&self,
left: Scalar<Tag>,
right: Scalar<Tag>,
) -> InterpResult<'tcx, bool> {
fn ptr_eq(&self, left: Scalar<Tag>, right: Scalar<Tag>) -> InterpResult<'tcx, bool> {
let size = self.pointer_size();
// Just compare the integers.
// TODO: Do we really want to *always* do that, even when comparing two live in-bounds pointers?

View File

@ -29,10 +29,7 @@ impl<T> RangeMap<T> {
let size = size.bytes();
let mut map = RangeMap { v: Vec::new() };
if size > 0 {
map.v.push(Elem {
range: 0..size,
data: init
});
map.v.push(Elem { range: 0..size, data: init });
}
map
}
@ -53,7 +50,7 @@ impl<T> RangeMap<T> {
} else if offset >= elem.range.end {
// We are too far left (offset is further right).
debug_assert!(candidate >= left); // we are making progress
left = candidate+1;
left = candidate + 1;
} else {
// This is it!
return candidate;
@ -69,18 +66,16 @@ impl<T> RangeMap<T> {
let len = len.bytes();
// Compute a slice starting with the elements we care about.
let slice: &[Elem<T>] = if len == 0 {
// We just need any empty iterator. We don't even want to
// yield the element that surrounds this position.
&[]
} else {
let first_idx = self.find_offset(offset);
&self.v[first_idx..]
};
// We just need any empty iterator. We don't even want to
// yield the element that surrounds this position.
&[]
} else {
let first_idx = self.find_offset(offset);
&self.v[first_idx..]
};
// The first offset that is not included any more.
let end = offset + len;
slice.iter()
.take_while(move |elem| elem.range.start < end)
.map(|elem| &elem.data)
slice.iter().take_while(move |elem| elem.range.start < end).map(|elem| &elem.data)
}
pub fn iter_mut_all<'a>(&'a mut self) -> impl Iterator<Item = &'a mut T> + 'a {
@ -99,18 +94,17 @@ impl<T> RangeMap<T> {
// Nothing to do.
return false;
}
debug_assert!(elem.range.contains(&split_offset),
"the `split_offset` is not in the element to be split");
debug_assert!(
elem.range.contains(&split_offset),
"the `split_offset` is not in the element to be split"
);
// Now we really have to split. Reduce length of first element.
let second_range = split_offset..elem.range.end;
elem.range.end = split_offset;
// Copy the data, and insert second element.
let second = Elem {
range: second_range,
data: elem.data.clone(),
};
self.v.insert(index+1, second);
let second = Elem { range: second_range, data: elem.data.clone() };
self.v.insert(index + 1, second);
return true;
}
@ -130,73 +124,79 @@ impl<T> RangeMap<T> {
let len = len.bytes();
// Compute a slice containing exactly the elements we care about
let slice: &mut [Elem<T>] = if len == 0 {
// We just need any empty iterator. We don't even want to
// yield the element that surrounds this position, nor do
// any splitting.
&mut []
} else {
// Make sure we got a clear beginning
let mut first_idx = self.find_offset(offset);
if self.split_index(first_idx, offset) {
// The newly created 2nd element is ours
first_idx += 1;
}
let first_idx = first_idx; // no more mutation
// Find our end. Linear scan, but that's ok because the iteration
// is doing the same linear scan anyway -- no increase in complexity.
// We combine this scan with a scan for duplicates that we can merge, to reduce
// the number of elements.
// We stop searching after the first "block" of size 1, to avoid spending excessive
// amounts of time on the merging.
let mut equal_since_idx = first_idx;
// Once we see too many non-mergeable blocks, we stop.
// The initial value is chosen via... magic. Benchmarking and magic.
let mut successful_merge_count = 3usize;
let mut end_idx = first_idx; // when the loop is done, this is the first excluded element.
loop {
// Compute if `end` is the last element we need to look at.
let done = self.v[end_idx].range.end >= offset+len;
// We definitely need to include `end`, so move the index.
end_idx += 1;
debug_assert!(done || end_idx < self.v.len(), "iter_mut: end-offset {} is out-of-bounds", offset+len);
// see if we want to merge everything in `equal_since..end` (exclusive at the end!)
if successful_merge_count > 0 {
if done || self.v[end_idx].data != self.v[equal_since_idx].data {
// Everything in `equal_since..end` was equal. Make them just one element covering
// the entire range.
let removed_elems = end_idx - equal_since_idx - 1; // number of elements that we would remove
if removed_elems > 0 {
// Adjust the range of the first element to cover all of them.
let equal_until = self.v[end_idx - 1].range.end; // end of range of last of the equal elements
self.v[equal_since_idx].range.end = equal_until;
// Delete the rest of them.
self.v.splice(equal_since_idx+1..end_idx, std::iter::empty());
// Adjust `end_idx` because we made the list shorter.
end_idx -= removed_elems;
// Adjust the count for the cutoff.
successful_merge_count += removed_elems;
} else {
// Adjust the count for the cutoff.
successful_merge_count -= 1;
}
// Go on scanning for the next block starting here.
equal_since_idx = end_idx;
// We just need any empty iterator. We don't even want to
// yield the element that surrounds this position, nor do
// any splitting.
&mut []
} else {
// Make sure we got a clear beginning
let mut first_idx = self.find_offset(offset);
if self.split_index(first_idx, offset) {
// The newly created 2nd element is ours
first_idx += 1;
}
// No more mutation.
let first_idx = first_idx;
// Find our end. Linear scan, but that's ok because the iteration
// is doing the same linear scan anyway -- no increase in complexity.
// We combine this scan with a scan for duplicates that we can merge, to reduce
// the number of elements.
// We stop searching after the first "block" of size 1, to avoid spending excessive
// amounts of time on the merging.
let mut equal_since_idx = first_idx;
// Once we see too many non-mergeable blocks, we stop.
// The initial value is chosen via... magic. Benchmarking and magic.
let mut successful_merge_count = 3usize;
// When the loop is done, this is the first excluded element.
let mut end_idx = first_idx;
loop {
// Compute if `end` is the last element we need to look at.
let done = self.v[end_idx].range.end >= offset + len;
// We definitely need to include `end`, so move the index.
end_idx += 1;
debug_assert!(
done || end_idx < self.v.len(),
"iter_mut: end-offset {} is out-of-bounds",
offset + len
);
// see if we want to merge everything in `equal_since..end` (exclusive at the end!)
if successful_merge_count > 0 {
if done || self.v[end_idx].data != self.v[equal_since_idx].data {
// Everything in `equal_since..end` was equal. Make them just one element covering
// the entire range.
let removed_elems = end_idx - equal_since_idx - 1; // number of elements that we would remove
if removed_elems > 0 {
// Adjust the range of the first element to cover all of them.
let equal_until = self.v[end_idx - 1].range.end; // end of range of last of the equal elements
self.v[equal_since_idx].range.end = equal_until;
// Delete the rest of them.
self.v.splice(equal_since_idx + 1..end_idx, std::iter::empty());
// Adjust `end_idx` because we made the list shorter.
end_idx -= removed_elems;
// Adjust the count for the cutoff.
successful_merge_count += removed_elems;
} else {
// Adjust the count for the cutoff.
successful_merge_count -= 1;
}
}
// Leave loop if this is the last element.
if done {
break;
// Go on scanning for the next block starting here.
equal_since_idx = end_idx;
}
}
// Move to last included instead of first excluded index.
let end_idx = end_idx-1;
// We need to split the end as well. Even if this performs a
// split, we don't have to adjust our index as we only care about
// the first part of the split.
self.split_index(end_idx, offset+len);
// Now we yield the slice. `end` is inclusive.
&mut self.v[first_idx..=end_idx]
};
// Leave loop if this is the last element.
if done {
break;
}
}
// Move to last included instead of first excluded index.
let end_idx = end_idx - 1;
// We need to split the end as well. Even if this performs a
// split, we don't have to adjust our index as we only care about
// the first part of the split.
self.split_index(end_idx, offset + len);
// Now we yield the slice. `end` is inclusive.
&mut self.v[first_idx..=end_idx]
};
slice.iter_mut().map(|elem| &mut elem.data)
}
}
@ -209,12 +209,7 @@ mod tests {
fn to_vec<T: Copy>(map: &RangeMap<T>, offset: u64, len: u64) -> Vec<T> {
(offset..offset + len)
.into_iter()
.map(|i| map
.iter(Size::from_bytes(i), Size::from_bytes(1))
.next()
.map(|&t| t)
.unwrap()
)
.map(|i| map.iter(Size::from_bytes(i), Size::from_bytes(1)).next().map(|&t| t).unwrap())
.collect()
}
@ -250,10 +245,7 @@ mod tests {
*x = 43;
}
assert_eq!(map.v.len(), 5);
assert_eq!(
to_vec(&map, 10, 10),
vec![-1, 42, -1, -1, -1, 43, -1, -1, -1, -1]
);
assert_eq!(to_vec(&map, 10, 10), vec![-1, 42, -1, -1, -1, 43, -1, -1, -1, -1]);
for x in map.iter_mut(Size::from_bytes(10), Size::from_bytes(10)) {
if *x < 42 {
@ -261,31 +253,23 @@ mod tests {
}
}
assert_eq!(map.v.len(), 6);
assert_eq!(
to_vec(&map, 10, 10),
vec![23, 42, 23, 23, 23, 43, 23, 23, 23, 23]
);
assert_eq!(to_vec(&map, 10, 10), vec![23, 42, 23, 23, 23, 43, 23, 23, 23, 23]);
assert_eq!(to_vec(&map, 13, 5), vec![23, 23, 43, 23, 23]);
for x in map.iter_mut(Size::from_bytes(15), Size::from_bytes(5)) {
*x = 19;
}
assert_eq!(map.v.len(), 6);
assert_eq!(
to_vec(&map, 10, 10),
vec![23, 42, 23, 23, 23, 19, 19, 19, 19, 19]
);
assert_eq!(to_vec(&map, 10, 10), vec![23, 42, 23, 23, 23, 19, 19, 19, 19, 19]);
// Should be seeing two blocks with 19.
assert_eq!(map.iter(Size::from_bytes(15), Size::from_bytes(2))
.map(|&t| t).collect::<Vec<_>>(), vec![19, 19]);
assert_eq!(
map.iter(Size::from_bytes(15), Size::from_bytes(2)).map(|&t| t).collect::<Vec<_>>(),
vec![19, 19]
);
// A NOP `iter_mut` should trigger merging.
for _ in map.iter_mut(Size::from_bytes(15), Size::from_bytes(5)) { }
for _ in map.iter_mut(Size::from_bytes(15), Size::from_bytes(5)) {}
assert_eq!(map.v.len(), 5);
assert_eq!(
to_vec(&map, 10, 10),
vec![23, 42, 23, 23, 23, 19, 19, 19, 19, 19]
);
assert_eq!(to_vec(&map, 10, 10), vec![23, 42, 23, 23, 23, 19, 19, 19, 19, 19]);
}
}

View File

@ -15,8 +15,7 @@ impl Dlsym {
Ok(match name {
"getentropy" => Some(GetEntropy),
"__pthread_get_minstack" => None,
_ =>
throw_unsup_format!("Unsupported dlsym: {}", name),
_ => throw_unsup_format!("Unsupported dlsym: {}", name),
})
}
}

View File

@ -1,10 +1,10 @@
use std::{iter, convert::TryInto};
use std::{convert::TryInto, iter};
use rustc::mir;
use rustc::ty::layout::{Align, LayoutOf, Size};
use rustc::hir::def_id::DefId;
use rustc_apfloat::Float;
use rustc::mir;
use rustc::ty;
use rustc::ty::layout::{Align, LayoutOf, Size};
use rustc_apfloat::Float;
use syntax::attr;
use syntax::symbol::sym;
@ -47,14 +47,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
Scalar::from_int(0, this.pointer_size())
} else {
let align = this.min_align(size, kind);
let ptr = this
.memory
.allocate(Size::from_bytes(size), align, kind.into());
let ptr = this.memory.allocate(Size::from_bytes(size), align, kind.into());
if zero_init {
// We just allocated this, the access is definitely in-bounds.
this.memory
.write_bytes(ptr.into(), iter::repeat(0u8).take(size as usize))
.unwrap();
this.memory.write_bytes(ptr.into(), iter::repeat(0u8).take(size as usize)).unwrap();
}
Scalar::Ptr(ptr)
}
@ -82,8 +78,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
Ok(Scalar::from_int(0, this.pointer_size()))
} else {
let new_ptr =
this.memory
.allocate(Size::from_bytes(new_size), new_align, kind.into());
this.memory.allocate(Size::from_bytes(new_size), new_align, kind.into());
Ok(Scalar::Ptr(new_ptr))
}
} else {
@ -110,12 +105,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
/// by this function.
/// Returns Ok(Some(body)) if processing the foreign item
/// is delegated to another function.
#[rustfmt::skip]
fn emulate_foreign_item(
&mut self,
def_id: DefId,
args: &[OpTy<'tcx, Tag>],
ret: Option<(PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
_unwind: Option<mir::BasicBlock>
_unwind: Option<mir::BasicBlock>,
) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>> {
let this = self.eval_context_mut();
let attrs = this.tcx.get_attrs(def_id);
@ -136,11 +132,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// also be a custom user-provided implementation via `#![feature(panic_runtime)]`
"__rust_start_panic" => {
// FIXME we might want to cache this... but it's not really performance-critical.
let panic_runtime = tcx.crates().iter()
let panic_runtime = tcx
.crates()
.iter()
.find(|cnum| tcx.is_panic_runtime(**cnum))
.expect("No panic runtime found!");
let panic_runtime = tcx.crate_name(*panic_runtime);
let start_panic_instance = this.resolve_path(&[&*panic_runtime.as_str(), "__rust_start_panic"])?;
let start_panic_instance =
this.resolve_path(&[&*panic_runtime.as_str(), "__rust_start_panic"])?;
return Ok(Some(&*this.load_mir(start_panic_instance.def, None)?));
}
// Similarly, we forward calls to the `panic_impl` foreign item to its implementation.
@ -151,7 +150,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
return Ok(Some(&*this.load_mir(panic_impl_instance.def, None)?));
}
"exit" | "ExitProcess" => {
| "exit"
| "ExitProcess"
=> {
// it's really u32 for ExitProcess, but we have to put it into the `Exit` variant anyway
let code = this.read_scalar(args[0])?.to_i32()?;
throw_machine_stop!(TerminationInfo::Exit(code.into()));
@ -175,9 +176,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
"calloc" => {
let items = this.read_scalar(args[0])?.to_machine_usize(this)?;
let len = this.read_scalar(args[1])?.to_machine_usize(this)?;
let size = items
.checked_mul(len)
.ok_or_else(|| err_panic!(Overflow(mir::BinOp::Mul)))?;
let size =
items.checked_mul(len).ok_or_else(|| err_panic!(Overflow(mir::BinOp::Mul)))?;
let res = this.malloc(size, /*zero_init:*/ true, MiriMemoryKind::C);
this.write_scalar(res, dest)?;
}
@ -250,9 +250,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
MiriMemoryKind::Rust.into(),
);
// We just allocated this, the access is definitely in-bounds.
this.memory
.write_bytes(ptr.into(), iter::repeat(0u8).take(size as usize))
.unwrap();
this.memory.write_bytes(ptr.into(), iter::repeat(0u8).take(size as usize)).unwrap();
this.write_scalar(ptr, dest)?;
}
"__rust_dealloc" => {
@ -268,10 +266,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let ptr = this.force_ptr(ptr)?;
this.memory.deallocate(
ptr,
Some((
Size::from_bytes(old_size),
Align::from_bytes(align).unwrap(),
)),
Some((Size::from_bytes(old_size), Align::from_bytes(align).unwrap())),
MiriMemoryKind::Rust.into(),
)?;
}
@ -346,7 +341,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
"__rust_maybe_catch_panic" => {
this.handle_catch_panic(args, dest, ret)?;
return Ok(None)
return Ok(None);
}
"memcmp" => {
@ -404,7 +399,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}
}
"__errno_location" | "__error" => {
| "__errno_location"
| "__error"
=> {
let errno_place = this.machine.last_error.unwrap();
this.write_scalar(errno_place.to_ref().to_scalar()?, dest)?;
}
@ -434,7 +431,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?;
}
"open" | "open64" => {
| "open"
| "open64"
=> {
let result = this.open(args[0], args[1])?;
this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?;
}
@ -444,7 +443,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?;
}
"close" | "close$NOCANCEL" => {
| "close"
| "close$NOCANCEL"
=> {
let result = this.close(args[0])?;
this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?;
}
@ -510,7 +511,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}
// math functions
"cbrtf" | "coshf" | "sinhf" | "tanf" | "acosf" | "asinf" | "atanf" => {
| "cbrtf"
| "coshf"
| "sinhf"
| "tanf"
| "acosf"
| "asinf"
| "atanf"
=> {
// FIXME: Using host floats.
let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
let f = match link_name {
@ -526,7 +534,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?;
}
// underscore case for windows
"_hypotf" | "hypotf" | "atan2f" => {
| "_hypotf"
| "hypotf"
| "atan2f"
=> {
// FIXME: Using host floats.
let f1 = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
let f2 = f32::from_bits(this.read_scalar(args[1])?.to_u32()?);
@ -538,7 +549,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
this.write_scalar(Scalar::from_u32(n.to_bits()), dest)?;
}
"cbrt" | "cosh" | "sinh" | "tan" | "acos" | "asin" | "atan" => {
| "cbrt"
| "cosh"
| "sinh"
| "tan"
| "acos"
| "asin"
| "atan"
=> {
// FIXME: Using host floats.
let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
let f = match link_name {
@ -555,7 +573,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}
// underscore case for windows, here and below
// (see https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/floating-point-primitives?view=vs-2019)
"_hypot" | "hypot" | "atan2" => {
| "_hypot"
| "hypot"
| "atan2"
=> {
// FIXME: Using host floats.
let f1 = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
let f2 = f64::from_bits(this.read_scalar(args[1])?.to_u64()?);
@ -567,7 +588,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
this.write_scalar(Scalar::from_u64(n.to_bits()), dest)?;
}
// For radix-2 (binary) systems, `ldexp` and `scalbn` are the same.
"_ldexp" | "ldexp" | "scalbn" => {
| "_ldexp"
| "ldexp"
| "scalbn"
=> {
let x = this.read_scalar(args[0])?.to_f64()?;
let exp = this.read_scalar(args[1])?.to_i32()?;
@ -586,7 +610,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}
// Some things needed for `sys::thread` initialization to go through.
"signal" | "sigaction" | "sigaltstack" => {
| "signal"
| "sigaction"
| "sigaltstack"
=> {
this.write_scalar(Scalar::from_int(0, dest.layout.size), dest)?;
}
@ -596,14 +623,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
trace!("sysconf() called with name {}", name);
// TODO: Cache the sysconf integers via Miri's global cache.
let paths = &[
(
&["libc", "_SC_PAGESIZE"],
Scalar::from_int(PAGE_SIZE, dest.layout.size),
),
(
&["libc", "_SC_GETPW_R_SIZE_MAX"],
Scalar::from_int(-1, dest.layout.size),
),
(&["libc", "_SC_PAGESIZE"], Scalar::from_int(PAGE_SIZE, dest.layout.size)),
(&["libc", "_SC_GETPW_R_SIZE_MAX"], Scalar::from_int(-1, dest.layout.size)),
(
&["libc", "_SC_NPROCESSORS_ONLN"],
Scalar::from_int(NUM_CPUS, dest.layout.size),
@ -688,7 +709,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}
// Stack size/address stuff.
"pthread_attr_init"
| "pthread_attr_init"
| "pthread_attr_destroy"
| "pthread_self"
| "pthread_attr_setstacksize" => {
@ -712,12 +733,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}
// We don't support threading. (Also for Windows.)
"pthread_create" | "CreateThread" => {
| "pthread_create"
| "CreateThread"
=> {
throw_unsup_format!("Miri does not support threading");
}
// Stub out calls for condvar, mutex and rwlock, to just return `0`.
"pthread_mutexattr_init"
| "pthread_mutexattr_init"
| "pthread_mutexattr_settype"
| "pthread_mutex_init"
| "pthread_mutexattr_destroy"
@ -732,7 +755,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
| "pthread_condattr_setclock"
| "pthread_cond_init"
| "pthread_condattr_destroy"
| "pthread_cond_destroy" => {
| "pthread_cond_destroy"
=> {
this.write_null(dest)?;
}
@ -751,7 +775,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}
// macOS API stubs.
"pthread_attr_get_np" | "pthread_getattr_np" => {
| "pthread_attr_get_np"
| "pthread_getattr_np"
=> {
this.write_null(dest)?;
}
"pthread_get_stackaddr_np" => {
@ -822,32 +848,36 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// Any non zero value works for the stdlib. This is just used for stack overflows anyway.
this.write_scalar(Scalar::from_int(1, dest.layout.size), dest)?;
}
"InitializeCriticalSection"
| "InitializeCriticalSection"
| "EnterCriticalSection"
| "LeaveCriticalSection"
| "DeleteCriticalSection" => {
| "DeleteCriticalSection"
=> {
// Nothing to do, not even a return value.
}
"GetModuleHandleW"
| "GetModuleHandleW"
| "GetProcAddress"
| "TryEnterCriticalSection"
| "GetConsoleScreenBufferInfo"
| "SetConsoleTextAttribute" => {
| "SetConsoleTextAttribute"
=> {
// Pretend these do not exist / nothing happened, by returning zero.
this.write_null(dest)?;
}
"GetSystemInfo" => {
let system_info = this.deref_operand(args[0])?;
// Initialize with `0`.
this.memory
.write_bytes(system_info.ptr, iter::repeat(0u8).take(system_info.layout.size.bytes() as usize))?;
this.memory.write_bytes(
system_info.ptr,
iter::repeat(0u8).take(system_info.layout.size.bytes() as usize),
)?;
// Set number of processors.
let dword_size = Size::from_bytes(4);
let num_cpus = this.mplace_field(system_info, 6)?;
this.write_scalar(
Scalar::from_int(NUM_CPUS, dword_size),
num_cpus.into(),
)?;
this.write_scalar(Scalar::from_int(NUM_CPUS, dword_size), num_cpus.into())?;
}
"TlsAlloc" => {
@ -894,9 +924,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// stdout/stderr
use std::io::{self, Write};
let buf_cont = this
.memory
.read_bytes(buf, Size::from_bytes(u64::from(n)))?;
let buf_cont = this.memory.read_bytes(buf, Size::from_bytes(u64::from(n)))?;
let res = if handle == -11 {
io::stdout().write(buf_cont)
} else {
@ -928,7 +956,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
this.write_null(dest)?;
}
"GetCommandLineW" => {
this.write_scalar(this.machine.cmd_line.expect("machine must be initialized"), dest)?;
this.write_scalar(
this.machine.cmd_line.expect("machine must be initialized"),
dest,
)?;
}
// The actual name of 'RtlGenRandom'
"SystemFunction036" => {
@ -955,10 +986,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
) -> InterpResult<'tcx, Option<ScalarMaybeUndef<Tag>>> {
let this = self.eval_context_mut();
if let Ok(instance) = this.resolve_path(path) {
let cid = GlobalId {
instance,
promoted: None,
};
let cid = GlobalId { instance, promoted: None };
let const_val = this.const_eval_raw(cid)?;
let const_val = this.read_scalar(const_val.into())?;
return Ok(Some(const_val));

View File

@ -1,11 +1,11 @@
use std::collections::HashMap;
use std::convert::{TryInto, TryFrom};
use std::convert::{TryFrom, TryInto};
use std::fs::{remove_file, File, OpenOptions};
use std::io::{Read, Write};
use std::path::PathBuf;
use std::time::SystemTime;
use rustc::ty::layout::{Size, Align, LayoutOf};
use rustc::ty::layout::{Align, LayoutOf, Size};
use crate::stacked_borrows::Tag;
use crate::*;
@ -172,18 +172,18 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let fd = this.read_scalar(fd_op)?.to_i32()?;
let buf = this.read_scalar(buf_op)?.not_undef()?;
let count = this
.read_scalar(count_op)?
.to_machine_usize(&*this.tcx)?;
let count = this.read_scalar(count_op)?.to_machine_usize(&*this.tcx)?;
// Check that the *entire* buffer is actually valid memory.
this.memory.check_ptr_access(buf, Size::from_bytes(count), Align::from_bytes(1).unwrap())?;
this.memory.check_ptr_access(
buf,
Size::from_bytes(count),
Align::from_bytes(1).unwrap(),
)?;
// We cap the number of read bytes to the largest value that we are able to fit in both the
// host's and target's `isize`. This saves us from having to handle overflows later.
let count = count
.min(this.isize_max() as u64)
.min(isize::max_value() as u64);
let count = count.min(this.isize_max() as u64).min(isize::max_value() as u64);
if let Some(handle) = this.machine.file_handler.handles.get_mut(&fd) {
// This can never fail because `count` was capped to be smaller than
@ -227,18 +227,18 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let fd = this.read_scalar(fd_op)?.to_i32()?;
let buf = this.read_scalar(buf_op)?.not_undef()?;
let count = this
.read_scalar(count_op)?
.to_machine_usize(&*this.tcx)?;
let count = this.read_scalar(count_op)?.to_machine_usize(&*this.tcx)?;
// Check that the *entire* buffer is actually valid memory.
this.memory.check_ptr_access(buf, Size::from_bytes(count), Align::from_bytes(1).unwrap())?;
this.memory.check_ptr_access(
buf,
Size::from_bytes(count),
Align::from_bytes(1).unwrap(),
)?;
// We cap the number of written bytes to the largest value that we are able to fit in both the
// host's and target's `isize`. This saves us from having to handle overflows later.
let count = count
.min(this.isize_max() as u64)
.min(isize::max_value() as u64);
let count = count.min(this.isize_max() as u64).min(isize::max_value() as u64);
if let Some(handle) = this.machine.file_handler.handles.get_mut(&fd) {
let bytes = this.memory.read_bytes(buf, Size::from_bytes(count))?;
@ -263,11 +263,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
fn statx(
&mut self,
dirfd_op: OpTy<'tcx, Tag>, // Should be an `int`
dirfd_op: OpTy<'tcx, Tag>, // Should be an `int`
pathname_op: OpTy<'tcx, Tag>, // Should be a `const char *`
flags_op: OpTy<'tcx, Tag>, // Should be an `int`
_mask_op: OpTy<'tcx, Tag>, // Should be an `unsigned int`
statxbuf_op: OpTy<'tcx, Tag> // Should be a `struct statx *`
flags_op: OpTy<'tcx, Tag>, // Should be an `int`
_mask_op: OpTy<'tcx, Tag>, // Should be an `unsigned int`
statxbuf_op: OpTy<'tcx, Tag>, // Should be a `struct statx *`
) -> InterpResult<'tcx, i32> {
let this = self.eval_context_mut();
@ -302,29 +302,20 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let path: PathBuf = this.read_os_str_from_c_str(pathname_scalar)?.into();
// `flags` should be a `c_int` but the `syscall` function provides an `isize`.
let flags: i32 = this
.read_scalar(flags_op)?
.to_machine_isize(&*this.tcx)?
.try_into()
.map_err(|e| err_unsup_format!(
"Failed to convert pointer sized operand to integer: {}",
e
))?;
let flags: i32 =
this.read_scalar(flags_op)?.to_machine_isize(&*this.tcx)?.try_into().map_err(|e| {
err_unsup_format!("Failed to convert pointer sized operand to integer: {}", e)
})?;
// `dirfd` should be a `c_int` but the `syscall` function provides an `isize`.
let dirfd: i32 = this
.read_scalar(dirfd_op)?
.to_machine_isize(&*this.tcx)?
.try_into()
.map_err(|e| err_unsup_format!(
"Failed to convert pointer sized operand to integer: {}",
e
))?;
let dirfd: i32 =
this.read_scalar(dirfd_op)?.to_machine_isize(&*this.tcx)?.try_into().map_err(|e| {
err_unsup_format!("Failed to convert pointer sized operand to integer: {}", e)
})?;
// we only support interpreting `path` as an absolute directory or as a directory relative
// to `dirfd` when the latter is `AT_FDCWD`. The behavior of `statx` with a relative path
// and a directory file descriptor other than `AT_FDCWD` is specified but it cannot be
// tested from `libstd`. If you found this error, please open an issue reporting it.
if !(path.is_absolute() || dirfd == this.eval_libc_i32("AT_FDCWD")?)
{
if !(path.is_absolute() || dirfd == this.eval_libc_i32("AT_FDCWD")?) {
throw_unsup_format!(
"Using statx with a relative path and a file descriptor different from `AT_FDCWD` is not supported"
)
@ -368,29 +359,30 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// the owner, its group and other users. Given that we can only provide the file type
// without using platform specific methods, we only set the bits corresponding to the file
// type. This should be an `__u16` but `libc` provides its values as `u32`.
let mode: u16 = this.eval_libc(mode_name)?
.to_u32()?
.try_into()
.unwrap_or_else(|_| bug!("libc contains bad value for `{}` constant", mode_name));
let mode: u16 = this
.eval_libc(mode_name)?
.to_u32()?
.try_into()
.unwrap_or_else(|_| bug!("libc contains bad value for `{}` constant", mode_name));
let size = metadata.len();
let (access_sec, access_nsec) = extract_sec_and_nsec(
metadata.accessed(),
&mut mask,
this.eval_libc("STATX_ATIME")?.to_u32()?
this.eval_libc("STATX_ATIME")?.to_u32()?,
)?;
let (created_sec, created_nsec) = extract_sec_and_nsec(
metadata.created(),
&mut mask,
this.eval_libc("STATX_BTIME")?.to_u32()?
this.eval_libc("STATX_BTIME")?.to_u32()?,
)?;
let (modified_sec, modified_nsec) = extract_sec_and_nsec(
metadata.modified(),
&mut mask,
this.eval_libc("STATX_MTIME")?.to_u32()?
this.eval_libc("STATX_MTIME")?.to_u32()?,
)?;
let __u32_layout = this.libc_ty_layout("__u32")?;
@ -401,16 +393,16 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// zero for the unavailable fields.
// FIXME: Provide more fields using platform specific methods.
let imms = [
immty_from_uint_checked(mask, __u32_layout)?, // stx_mask
immty_from_uint_checked(mask, __u32_layout)?, // stx_mask
immty_from_uint_checked(0u128, __u32_layout)?, // stx_blksize
immty_from_uint_checked(0u128, __u64_layout)?, // stx_attributes
immty_from_uint_checked(0u128, __u32_layout)?, // stx_nlink
immty_from_uint_checked(0u128, __u32_layout)?, // stx_uid
immty_from_uint_checked(0u128, __u32_layout)?, // stx_gid
immty_from_uint_checked(mode, __u16_layout)?, // stx_mode
immty_from_uint_checked(mode, __u16_layout)?, // stx_mode
immty_from_uint_checked(0u128, __u16_layout)?, // statx padding
immty_from_uint_checked(0u128, __u64_layout)?, // stx_ino
immty_from_uint_checked(size, __u64_layout)?, // stx_size
immty_from_uint_checked(size, __u64_layout)?, // stx_size
immty_from_uint_checked(0u128, __u64_layout)?, // stx_blocks
immty_from_uint_checked(0u128, __u64_layout)?, // stx_attributes
immty_from_uint_checked(access_sec, __u64_layout)?, // stx_atime.tv_sec
@ -451,7 +443,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// Extracts the number of seconds and nanoseconds elapsed between `time` and the unix epoch, and
// then sets the `mask` bits determined by `flag` when `time` is Ok. If `time` is an error, it
// returns `(0, 0)` without setting any bits.
fn extract_sec_and_nsec<'tcx>(time: std::io::Result<SystemTime>, mask: &mut u32, flag: u32) -> InterpResult<'tcx, (u64, u32)> {
fn extract_sec_and_nsec<'tcx>(
time: std::io::Result<SystemTime>,
mask: &mut u32,
flag: u32,
) -> InterpResult<'tcx, (u64, u32)> {
if let Ok(time) = time {
let duration = system_time_to_duration(&time)?;
*mask |= flag;

View File

@ -1,10 +1,10 @@
use std::iter;
use rustc_apfloat::Float;
use rustc::mir;
use rustc::mir::interpret::{InterpResult, PointerArithmetic};
use rustc::ty::layout::{self, LayoutOf, Size, Align};
use rustc::ty;
use rustc::ty::layout::{self, Align, LayoutOf, Size};
use rustc_apfloat::Float;
use syntax::source_map::Span;
use crate::*;
@ -17,13 +17,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, Tag>],
ret: Option<(PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
unwind: Option<mir::BasicBlock>
unwind: Option<mir::BasicBlock>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
if this.emulate_intrinsic(span, instance, args, ret)? {
return Ok(());
}
let tcx = &{this.tcx.tcx};
let tcx = &{ this.tcx.tcx };
let substs = instance.substs;
// All these intrinsics take raw pointers, so if we access memory directly
@ -37,13 +37,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
throw_machine_stop!(TerminationInfo::Abort);
}
"miri_start_panic" => return this.handle_miri_start_panic(args, unwind),
_ => {
_ =>
if let Some(p) = ret {
p
} else {
throw_unsup_format!("unimplemented (diverging) intrinsic: {}", intrinsic_name);
}
}
},
};
match intrinsic_name {
@ -75,9 +74,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
this.copy_op(args[1], place.into())?;
}
"atomic_load" |
"atomic_load_relaxed" |
"atomic_load_acq" => {
#[rustfmt::skip]
| "atomic_load"
| "atomic_load_relaxed"
| "atomic_load_acq"
=> {
let place = this.deref_operand(args[0])?;
let val = this.read_scalar(place.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
@ -90,9 +91,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
this.write_scalar(val, dest)?;
}
"atomic_store" |
"atomic_store_relaxed" |
"atomic_store_rel" => {
#[rustfmt::skip]
| "atomic_store"
| "atomic_store_relaxed"
| "atomic_store_rel"
=> {
let place = this.deref_operand(args[0])?;
let val = this.read_scalar(args[1])?; // make sure it fits into a scalar; otherwise it cannot be atomic
@ -105,10 +108,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
this.write_scalar(val, place.into())?;
}
"atomic_fence_acq" |
"atomic_fence_rel" |
"atomic_fence_acqrel" |
"atomic_fence" => {
#[rustfmt::skip]
| "atomic_fence_acq"
| "atomic_fence_rel"
| "atomic_fence_acqrel"
| "atomic_fence"
=> {
// we are inherently singlethreaded and singlecored, this is a nop
}
@ -139,46 +144,49 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
// binary_op will bail if either of them is not a scalar
// `binary_op` will bail if either of them is not a scalar.
let eq = this.overflowing_binary_op(mir::BinOp::Eq, old, expect_old)?.0;
let res = Immediate::ScalarPair(old.to_scalar_or_undef(), eq.into());
this.write_immediate(res, dest)?; // old value is returned
// update ptr depending on comparison
// Return old value.
this.write_immediate(res, dest)?;
// Update ptr depending on comparison.
if eq.to_bool()? {
this.write_scalar(new, place.into())?;
}
}
"atomic_or" |
"atomic_or_acq" |
"atomic_or_rel" |
"atomic_or_acqrel" |
"atomic_or_relaxed" |
"atomic_xor" |
"atomic_xor_acq" |
"atomic_xor_rel" |
"atomic_xor_acqrel" |
"atomic_xor_relaxed" |
"atomic_and" |
"atomic_and_acq" |
"atomic_and_rel" |
"atomic_and_acqrel" |
"atomic_and_relaxed" |
"atomic_nand" |
"atomic_nand_acq" |
"atomic_nand_rel" |
"atomic_nand_acqrel" |
"atomic_nand_relaxed" |
"atomic_xadd" |
"atomic_xadd_acq" |
"atomic_xadd_rel" |
"atomic_xadd_acqrel" |
"atomic_xadd_relaxed" |
"atomic_xsub" |
"atomic_xsub_acq" |
"atomic_xsub_rel" |
"atomic_xsub_acqrel" |
"atomic_xsub_relaxed" => {
#[rustfmt::skip]
| "atomic_or"
| "atomic_or_acq"
| "atomic_or_rel"
| "atomic_or_acqrel"
| "atomic_or_relaxed"
| "atomic_xor"
| "atomic_xor_acq"
| "atomic_xor_rel"
| "atomic_xor_acqrel"
| "atomic_xor_relaxed"
| "atomic_and"
| "atomic_and_acq"
| "atomic_and_rel"
| "atomic_and_acqrel"
| "atomic_and_relaxed"
| "atomic_nand"
| "atomic_nand_acq"
| "atomic_nand_rel"
| "atomic_nand_acqrel"
| "atomic_nand_relaxed"
| "atomic_xadd"
| "atomic_xadd_acq"
| "atomic_xadd_rel"
| "atomic_xadd_acqrel"
| "atomic_xadd_relaxed"
| "atomic_xsub"
| "atomic_xsub_acq"
| "atomic_xsub_rel"
| "atomic_xsub_acqrel"
| "atomic_xsub_relaxed"
=> {
let place = this.deref_operand(args[0])?;
if !place.layout.ty.is_integral() {
bug!("Atomic arithmetic operations only work on integer types");
@ -204,18 +212,16 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
};
// Atomics wrap around on overflow.
let val = this.binary_op(op, old, rhs)?;
let val = if neg {
this.unary_op(mir::UnOp::Not, val)?
} else {
val
};
let val = if neg { this.unary_op(mir::UnOp::Not, val)? } else { val };
this.write_immediate(*val, place.into())?;
}
"breakpoint" => unimplemented!(), // halt miri
"copy" |
"copy_nonoverlapping" => {
#[rustfmt::skip]
| "copy"
| "copy_nonoverlapping"
=> {
let elem_ty = substs.type_at(0);
let elem_layout = this.layout_of(elem_ty)?;
let elem_size = elem_layout.size.bytes();
@ -244,8 +250,21 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
this.write_scalar(Scalar::from_uint(discr_val, dest.layout.size), dest)?;
}
"sinf32" | "fabsf32" | "cosf32" | "sqrtf32" | "expf32" | "exp2f32" | "logf32" |
"log10f32" | "log2f32" | "floorf32" | "ceilf32" | "truncf32" | "roundf32" => {
#[rustfmt::skip]
| "sinf32"
| "fabsf32"
| "cosf32"
| "sqrtf32"
| "expf32"
| "exp2f32"
| "logf32"
| "log10f32"
| "log2f32"
| "floorf32"
| "ceilf32"
| "truncf32"
| "roundf32"
=> {
// FIXME: Using host floats.
let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
let f = match intrinsic_name {
@ -267,8 +286,21 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?;
}
"sinf64" | "fabsf64" | "cosf64" | "sqrtf64" | "expf64" | "exp2f64" | "logf64" |
"log10f64" | "log2f64" | "floorf64" | "ceilf64" | "truncf64" | "roundf64" => {
#[rustfmt::skip]
| "sinf64"
| "fabsf64"
| "cosf64"
| "sqrtf64"
| "expf64"
| "exp2f64"
| "logf64"
| "log10f64"
| "log2f64"
| "floorf64"
| "ceilf64"
| "truncf64"
| "roundf64"
=> {
// FIXME: Using host floats.
let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
let f = match intrinsic_name {
@ -290,7 +322,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
this.write_scalar(Scalar::from_u64(f.to_bits()), dest)?;
}
"fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
#[rustfmt::skip]
| "fadd_fast"
| "fsub_fast"
| "fmul_fast"
| "fdiv_fast"
| "frem_fast"
=> {
let a = this.read_immediate(args[0])?;
let b = this.read_immediate(args[1])?;
let op = match intrinsic_name {
@ -304,7 +342,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
this.binop_ignore_overflow(op, a, b, dest)?;
}
"minnumf32" | "maxnumf32" | "copysignf32" => {
#[rustfmt::skip]
| "minnumf32"
| "maxnumf32"
| "copysignf32"
=> {
let a = this.read_scalar(args[0])?.to_f32()?;
let b = this.read_scalar(args[1])?.to_f32()?;
let res = match intrinsic_name {
@ -316,7 +358,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
this.write_scalar(Scalar::from_f32(res), dest)?;
}
"minnumf64" | "maxnumf64" | "copysignf64" => {
#[rustfmt::skip]
| "minnumf64"
| "maxnumf64"
| "copysignf64"
=> {
let a = this.read_scalar(args[0])?.to_f64()?;
let b = this.read_scalar(args[1])?.to_f64()?;
let res = match intrinsic_name {
@ -329,15 +375,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}
"exact_div" =>
this.exact_div(
this.read_immediate(args[0])?,
this.read_immediate(args[1])?,
dest,
)?,
this.exact_div(this.read_immediate(args[0])?, this.read_immediate(args[1])?, dest)?,
"forget" => {}
"likely" | "unlikely" => {
#[rustfmt::skip]
| "likely"
| "unlikely"
=> {
// These just return their argument
let b = this.read_immediate(args[0])?;
this.write_immediate(*b, dest)?;
@ -365,7 +410,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// Do it in memory
let mplace = this.force_allocation(dest)?;
mplace.meta.unwrap_none(); // must be sized
this.memory.write_bytes(mplace.ptr, iter::repeat(0u8).take(dest.layout.size.bytes() as usize))?;
this.memory.write_bytes(
mplace.ptr,
iter::repeat(0u8).take(dest.layout.size.bytes() as usize),
)?;
}
}
}
@ -405,20 +453,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// FIXME: Using host floats.
let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
let f2 = f32::from_bits(this.read_scalar(args[1])?.to_u32()?);
this.write_scalar(
Scalar::from_u32(f.powf(f2).to_bits()),
dest,
)?;
this.write_scalar(Scalar::from_u32(f.powf(f2).to_bits()), dest)?;
}
"powf64" => {
// FIXME: Using host floats.
let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
let f2 = f64::from_bits(this.read_scalar(args[1])?.to_u64()?);
this.write_scalar(
Scalar::from_u64(f.powf(f2).to_bits()),
dest,
)?;
this.write_scalar(Scalar::from_u64(f.powf(f2).to_bits()), dest)?;
}
"fmaf32" => {
@ -426,10 +468,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let b = this.read_scalar(args[1])?.to_f32()?;
let c = this.read_scalar(args[2])?.to_f32()?;
let res = a.mul_add(b, c).value;
this.write_scalar(
Scalar::from_f32(res),
dest,
)?;
this.write_scalar(Scalar::from_f32(res), dest)?;
}
"fmaf64" => {
@ -437,53 +476,42 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let b = this.read_scalar(args[1])?.to_f64()?;
let c = this.read_scalar(args[2])?.to_f64()?;
let res = a.mul_add(b, c).value;
this.write_scalar(
Scalar::from_f64(res),
dest,
)?;
this.write_scalar(Scalar::from_f64(res), dest)?;
}
"powif32" => {
// FIXME: Using host floats.
let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
let i = this.read_scalar(args[1])?.to_i32()?;
this.write_scalar(
Scalar::from_u32(f.powi(i).to_bits()),
dest,
)?;
this.write_scalar(Scalar::from_u32(f.powi(i).to_bits()), dest)?;
}
"powif64" => {
// FIXME: Using host floats.
let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
let i = this.read_scalar(args[1])?.to_i32()?;
this.write_scalar(
Scalar::from_u64(f.powi(i).to_bits()),
dest,
)?;
this.write_scalar(Scalar::from_u64(f.powi(i).to_bits()), dest)?;
}
"size_of_val" => {
let mplace = this.deref_operand(args[0])?;
let (size, _) = this.size_and_align_of_mplace(mplace)?
let (size, _) = this
.size_and_align_of_mplace(mplace)?
.expect("size_of_val called on extern type");
let ptr_size = this.pointer_size();
this.write_scalar(
Scalar::from_uint(size.bytes() as u128, ptr_size),
dest,
)?;
this.write_scalar(Scalar::from_uint(size.bytes() as u128, ptr_size), dest)?;
}
"min_align_of_val" |
"align_of_val" => {
#[rustfmt::skip]
| "min_align_of_val"
| "align_of_val"
=> {
let mplace = this.deref_operand(args[0])?;
let (_, align) = this.size_and_align_of_mplace(mplace)?
let (_, align) = this
.size_and_align_of_mplace(mplace)?
.expect("size_of_val called on extern type");
let ptr_size = this.pointer_size();
this.write_scalar(
Scalar::from_uint(align.bytes(), ptr_size),
dest,
)?;
this.write_scalar(Scalar::from_uint(align.bytes(), ptr_size), dest)?;
}
"unchecked_div" => {
@ -493,12 +521,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
if rval == 0 {
throw_ub_format!("Division by 0 in unchecked_div");
}
this.binop_ignore_overflow(
mir::BinOp::Div,
l,
r,
dest,
)?;
this.binop_ignore_overflow(mir::BinOp::Div, l, r, dest)?;
}
"unchecked_rem" => {
@ -508,15 +531,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
if rval == 0 {
throw_ub_format!("Division by 0 in unchecked_rem");
}
this.binop_ignore_overflow(
mir::BinOp::Rem,
l,
r,
dest,
)?;
this.binop_ignore_overflow(mir::BinOp::Rem, l, r, dest)?;
}
"unchecked_add" | "unchecked_sub" | "unchecked_mul" => {
#[rustfmt::skip]
| "unchecked_add"
| "unchecked_sub"
| "unchecked_mul"
=> {
let l = this.read_immediate(args[0])?;
let r = this.read_immediate(args[1])?;
let op = match intrinsic_name {
@ -555,9 +577,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
mplace.meta.unwrap_none();
let ptr = mplace.ptr.to_ptr()?;
// We know the return place is in-bounds
this.memory
.get_raw_mut(ptr.alloc_id)?
.mark_definedness(ptr, dest.layout.size, false);
this.memory.get_raw_mut(ptr.alloc_id)?.mark_definedness(
ptr,
dest.layout.size,
false,
);
}
}
}
@ -570,7 +594,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let ptr = this.read_scalar(args[0])?.not_undef()?;
let count = this.read_scalar(args[2])?.to_machine_usize(this)?;
let byte_count = ty_layout.size * count;
this.memory.write_bytes(ptr, iter::repeat(val_byte).take(byte_count.bytes() as usize))?;
this.memory
.write_bytes(ptr, iter::repeat(val_byte).take(byte_count.bytes() as usize))?;
}
name => throw_unsup_format!("unimplemented intrinsic: {}", name),

View File

@ -1,14 +1,14 @@
pub mod dlsym;
pub mod env;
pub mod foreign_items;
pub mod intrinsics;
pub mod tls;
pub mod fs;
pub mod time;
pub mod intrinsics;
pub mod panic;
pub mod time;
pub mod tls;
use rustc::{mir, ty};
use crate::*;
use rustc::{mir, ty};
impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
@ -17,14 +17,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, Tag>],
ret: Option<(PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
unwind: Option<mir::BasicBlock>
unwind: Option<mir::BasicBlock>,
) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>> {
let this = self.eval_context_mut();
trace!(
"eval_fn_call: {:#?}, {:?}",
instance,
ret.map(|p| *p.0)
);
trace!("eval_fn_call: {:#?}, {:?}", instance, ret.map(|p| *p.0));
// There are some more lang items we want to hook that CTFE does not hook (yet).
if this.tcx.lang_items().align_offset_fn() == Some(instance.def.def_id()) {
@ -59,10 +55,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
) -> InterpResult<'tcx, Option<u128>> {
let this = self.eval_context_mut();
let req_align = this.force_bits(
this.read_scalar(align_op)?.not_undef()?,
this.pointer_size(),
)? as usize;
let req_align = this
.force_bits(this.read_scalar(align_op)?.not_undef()?, this.pointer_size())?
as usize;
// FIXME: This should actually panic in the interpreted program
if !req_align.is_power_of_two() {
@ -72,7 +67,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let ptr_scalar = this.read_scalar(ptr_op)?.not_undef()?;
if let Ok(ptr) = this.force_ptr(ptr_scalar) {
let cur_align = this.memory.get_size_and_align(ptr.alloc_id, AllocCheck::MaybeDead)?.1.bytes() as usize;
let cur_align =
this.memory.get_size_and_align(ptr.alloc_id, AllocCheck::MaybeDead)?.1.bytes()
as usize;
if cur_align >= req_align {
// if the allocation alignment is at least the required alignment we use the
// libcore implementation

View File

@ -11,10 +11,10 @@
//! gets popped *during unwinding*, we take the panic payload and store it according to the extra
//! metadata we remembered when pushing said frame.
use syntax::source_map::Span;
use rustc::mir;
use rustc::ty::{self, layout::LayoutOf};
use rustc_target::spec::PanicStrategy;
use syntax::source_map::Span;
use crate::*;
@ -35,13 +35,12 @@ pub struct CatchUnwindData<'tcx> {
impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
/// Handles the special "miri_start_panic" intrinsic, which is called
/// by libpanic_unwind to delegate the actual unwinding process to Miri.
fn handle_miri_start_panic(
&mut self,
args: &[OpTy<'tcx, Tag>],
unwind: Option<mir::BasicBlock>
unwind: Option<mir::BasicBlock>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
@ -49,12 +48,15 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// Get the raw pointer stored in arg[0] (the panic payload).
let scalar = this.read_immediate(args[0])?;
assert!(this.machine.panic_payload.is_none(), "the panic runtime should avoid double-panics");
assert!(
this.machine.panic_payload.is_none(),
"the panic runtime should avoid double-panics"
);
this.machine.panic_payload = Some(scalar);
// Jump to the unwind block to begin unwinding.
this.unwind_to_block(unwind);
return Ok(())
return Ok(());
}
fn handle_catch_panic(
@ -64,7 +66,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
ret: mir::BasicBlock,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let tcx = &{this.tcx.tcx};
let tcx = &{ this.tcx.tcx };
// fn __rust_maybe_catch_panic(
// f: fn(*mut u8),
@ -82,8 +84,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// Now we make a function call, and pass `f_arg` as first and only argument.
let f_instance = this.memory.get_fn(f)?.as_instance()?;
trace!("__rust_maybe_catch_panic: {:?}", f_instance);
let ret_place =
MPlaceTy::dangling(this.layout_of(tcx.mk_unit())?, this).into();
let ret_place = MPlaceTy::dangling(this.layout_of(tcx.mk_unit())?, this).into();
this.call_function(
f_instance,
&[f_arg.into()],
@ -99,11 +100,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// This lets `handle_stack_pop` (below) know that we should stop unwinding
// when we pop this frame.
if this.tcx.tcx.sess.panic_strategy() == PanicStrategy::Unwind {
this.frame_mut().extra.catch_panic = Some(CatchUnwindData {
data_place,
vtable_place,
dest,
})
this.frame_mut().extra.catch_panic =
Some(CatchUnwindData { data_place, vtable_place, dest })
}
return Ok(());
@ -112,7 +110,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
fn handle_stack_pop(
&mut self,
mut extra: FrameData<'tcx>,
unwinding: bool
unwinding: bool,
) -> InterpResult<'tcx, StackPopInfo> {
let this = self.eval_context_mut();

View File

@ -9,10 +9,9 @@ fn get_time<'tcx>() -> InterpResult<'tcx, Duration> {
system_time_to_duration(&SystemTime::now())
}
// Returns the time elapsed between the provided time and the unix epoch as a `Duration`.
/// Returns the time elapsed between the provided time and the unix epoch as a `Duration`.
pub fn system_time_to_duration<'tcx>(time: &SystemTime) -> InterpResult<'tcx, Duration> {
time
.duration_since(SystemTime::UNIX_EPOCH)
time.duration_since(SystemTime::UNIX_EPOCH)
.map_err(|_| err_unsup_format!("Times before the Unix epoch are not supported").into())
}

View File

@ -2,14 +2,10 @@
use std::collections::BTreeMap;
use rustc_target::abi::LayoutOf;
use rustc::{ty, ty::layout::HasDataLayout};
use rustc_target::abi::LayoutOf;
use crate::{
InterpResult, StackPopCleanup,
MPlaceTy, Scalar, Tag,
HelpersEvalContextExt,
};
use crate::{HelpersEvalContextExt, InterpResult, MPlaceTy, Scalar, StackPopCleanup, Tag};
pub type TlsKey = u128;
@ -41,19 +37,10 @@ impl<'tcx> Default for TlsData<'tcx> {
}
impl<'tcx> TlsData<'tcx> {
pub fn create_tls_key(
&mut self,
dtor: Option<ty::Instance<'tcx>>,
) -> TlsKey {
pub fn create_tls_key(&mut self, dtor: Option<ty::Instance<'tcx>>) -> TlsKey {
let new_key = self.next_key;
self.next_key += 1;
self.keys.insert(
new_key,
TlsEntry {
data: None,
dtor,
},
).unwrap_none();
self.keys.insert(new_key, TlsEntry { data: None, dtor }).unwrap_none();
trace!("New TLS key allocated: {} with dtor {:?}", new_key, dtor);
new_key
}

View File

@ -3,17 +3,17 @@
use std::cell::RefCell;
use std::collections::{HashMap, HashSet};
use std::rc::Rc;
use std::fmt;
use std::num::NonZeroU64;
use std::rc::Rc;
use rustc::ty::{self, layout::Size};
use rustc::hir::Mutability::{Mutable, Immutable};
use rustc::hir::Mutability::{Immutable, Mutable};
use rustc::mir::RetagKind;
use rustc::ty::{self, layout::Size};
use crate::{
InterpResult, HelpersEvalContextExt, TerminationInfo,
MemoryKind, MiriMemoryKind, RangeMap, AllocId, Pointer, Immediate, ImmTy, PlaceTy, MPlaceTy,
AllocId, HelpersEvalContextExt, ImmTy, Immediate, InterpResult, MPlaceTy, MemoryKind,
MiriMemoryKind, PlaceTy, Pointer, RangeMap, TerminationInfo,
};
pub type PtrId = NonZeroU64;
@ -82,7 +82,6 @@ pub struct Stack {
borrows: Vec<Item>,
}
/// Extra per-allocation state.
#[derive(Clone, Debug)]
pub struct Stacks {
@ -217,7 +216,8 @@ impl Permission {
/// This defines for a given permission, whether it permits the given kind of access.
fn grants(self, access: AccessKind) -> bool {
// Disabled grants nothing. Otherwise, all items grant read access, and except for SharedReadOnly they grant write access.
self != Permission::Disabled && (access == AccessKind::Read || self != Permission::SharedReadOnly)
self != Permission::Disabled
&& (access == AccessKind::Read || self != Permission::SharedReadOnly)
}
}
@ -226,17 +226,16 @@ impl<'tcx> Stack {
/// Find the item granting the given kind of access to the given tag, and return where
/// it is on the stack.
fn find_granting(&self, access: AccessKind, tag: Tag) -> Option<usize> {
self.borrows.iter()
self.borrows
.iter()
.enumerate() // we also need to know *where* in the stack
.rev() // search top-to-bottom
// Return permission of first item that grants access.
// We require a permission with the right tag, ensuring U3 and F3.
.find_map(|(idx, item)|
if tag == item.tag && item.perm.grants(access) {
Some(idx)
} else {
None
}
.find_map(
|(idx, item)| {
if tag == item.tag && item.perm.grants(access) { Some(idx) } else { None }
},
)
}
@ -245,13 +244,10 @@ impl<'tcx> Stack {
fn find_first_write_incompatible(&self, granting: usize) -> usize {
let perm = self.borrows[granting].perm;
match perm {
Permission::SharedReadOnly =>
bug!("Cannot use SharedReadOnly for writing"),
Permission::Disabled =>
bug!("Cannot use Disabled for anything"),
Permission::Unique =>
// On a write, everything above us is incompatible.
granting + 1,
Permission::SharedReadOnly => bug!("Cannot use SharedReadOnly for writing"),
Permission::Disabled => bug!("Cannot use Disabled for anything"),
// On a write, everything above us is incompatible.
Permission::Unique => granting + 1,
Permission::SharedReadWrite => {
// The SharedReadWrite *just* above us are compatible, to skip those.
let mut idx = granting + 1;
@ -285,7 +281,8 @@ impl<'tcx> Stack {
)));
} else {
throw_ub!(UbExperimental(format!(
"deallocating while item is protected: {:?}", item
"deallocating while item is protected: {:?}",
item
)));
}
}
@ -295,20 +292,16 @@ impl<'tcx> Stack {
/// Test if a memory `access` using pointer tagged `tag` is granted.
/// If yes, return the index of the item that granted it.
fn access(
&mut self,
access: AccessKind,
tag: Tag,
global: &GlobalState,
) -> InterpResult<'tcx> {
fn access(&mut self, access: AccessKind, tag: Tag, global: &GlobalState) -> InterpResult<'tcx> {
// Two main steps: Find granting item, remove incompatible items above.
// Step 1: Find granting item.
let granting_idx = self.find_granting(access, tag)
.ok_or_else(|| err_ub!(UbExperimental(format!(
let granting_idx = self.find_granting(access, tag).ok_or_else(|| {
err_ub!(UbExperimental(format!(
"no item granting {} to tag {:?} found in borrow stack",
access, tag,
))))?;
)))
})?;
// Step 2: Remove incompatible items above them. Make sure we do not remove protected
// items. Behavior differs for reads and writes.
@ -329,7 +322,7 @@ impl<'tcx> Stack {
// This pattern occurs a lot in the standard library: create a raw pointer, then also create a shared
// reference and use that.
// We *disable* instead of removing `Unique` to avoid "connecting" two neighbouring blocks of SRWs.
for idx in (granting_idx+1 .. self.borrows.len()).rev() {
for idx in ((granting_idx + 1)..self.borrows.len()).rev() {
let item = &mut self.borrows[idx];
if item.perm == Permission::Unique {
trace!("access: disabling item {:?}", item);
@ -345,17 +338,14 @@ impl<'tcx> Stack {
/// Deallocate a location: Like a write access, but also there must be no
/// active protectors at all because we will remove all items.
fn dealloc(
&mut self,
tag: Tag,
global: &GlobalState,
) -> InterpResult<'tcx> {
fn dealloc(&mut self, tag: Tag, global: &GlobalState) -> InterpResult<'tcx> {
// Step 1: Find granting item.
self.find_granting(AccessKind::Write, tag)
.ok_or_else(|| err_ub!(UbExperimental(format!(
self.find_granting(AccessKind::Write, tag).ok_or_else(|| {
err_ub!(UbExperimental(format!(
"no item granting write access for deallocation to tag {:?} found in borrow stack",
tag,
))))?;
)))
})?;
// Step 2: Remove all items. Also checks for protectors.
for item in self.borrows.drain(..).rev() {
@ -369,18 +359,10 @@ impl<'tcx> Stack {
/// `weak` controls whether this operation is weak or strong: weak granting does not act as
/// an access, and they add the new item directly on top of the one it is derived
/// from instead of all the way at the top of the stack.
fn grant(
&mut self,
derived_from: Tag,
new: Item,
global: &GlobalState,
) -> InterpResult<'tcx> {
fn grant(&mut self, derived_from: Tag, new: Item, global: &GlobalState) -> InterpResult<'tcx> {
// Figure out which access `perm` corresponds to.
let access = if new.perm.grants(AccessKind::Write) {
AccessKind::Write
} else {
AccessKind::Read
};
let access =
if new.perm.grants(AccessKind::Write) { AccessKind::Write } else { AccessKind::Read };
// Now we figure out which item grants our parent (`derived_from`) this kind of access.
// We use that to determine where to put the new item.
let granting_idx = self.find_granting(access, derived_from)
@ -392,7 +374,10 @@ impl<'tcx> Stack {
// Either way, we ensure that we insert the new item in a way such that between
// `derived_from` and the new one, there are only items *compatible with* `derived_from`.
let new_idx = if new.perm == Permission::SharedReadWrite {
assert!(access == AccessKind::Write, "this case only makes sense for stack-like accesses");
assert!(
access == AccessKind::Write,
"this case only makes sense for stack-like accesses"
);
// SharedReadWrite can coexist with "existing loans", meaning they don't act like a write
// access. Instead of popping the stack, we insert the item at the place the stack would
// be popped to (i.e., we insert it above all the write-compatible items).
@ -412,7 +397,7 @@ impl<'tcx> Stack {
};
// Put the new item there. As an optimization, deduplicate if it is equal to one of its new neighbors.
if self.borrows[new_idx-1] == new || self.borrows.get(new_idx) == Some(&new) {
if self.borrows[new_idx - 1] == new || self.borrows.get(new_idx) == Some(&new) {
// Optimization applies, done.
trace!("reborrow: avoiding adding redundant item {:?}", new);
} else {
@ -428,21 +413,11 @@ impl<'tcx> Stack {
/// Map per-stack operations to higher-level per-location-range operations.
impl<'tcx> Stacks {
/// Creates new stack with initial tag.
fn new(
size: Size,
perm: Permission,
tag: Tag,
extra: MemoryExtra,
) -> Self {
fn new(size: Size, perm: Permission, tag: Tag, extra: MemoryExtra) -> Self {
let item = Item { perm, tag, protector: None };
let stack = Stack {
borrows: vec![item],
};
let stack = Stack { borrows: vec![item] };
Stacks {
stacks: RefCell::new(RangeMap::new(size, stack)),
global: extra,
}
Stacks { stacks: RefCell::new(RangeMap::new(size, stack)), global: extra }
}
/// Call `f` on every stack in the range.
@ -470,33 +445,27 @@ impl Stacks {
kind: MemoryKind<MiriMemoryKind>,
) -> (Self, Tag) {
let (tag, perm) = match kind {
MemoryKind::Stack =>
// New unique borrow. This tag is not accessible by the program,
// so it will only ever be used when using the local directly (i.e.,
// not through a pointer). That is, whenever we directly write to a local, this will pop
// everything else off the stack, invalidating all previous pointers,
// and in particular, *all* raw pointers.
(Tag::Tagged(extra.borrow_mut().new_ptr()), Permission::Unique),
// New unique borrow. This tag is not accessible by the program,
// so it will only ever be used when using the local directly (i.e.,
// not through a pointer). That is, whenever we directly write to a local, this will pop
// everything else off the stack, invalidating all previous pointers,
// and in particular, *all* raw pointers.
MemoryKind::Stack => (Tag::Tagged(extra.borrow_mut().new_ptr()), Permission::Unique),
// Static memory can be referenced by "global" pointers from `tcx`.
// Thus we call `static_base_ptr` such that the global pointers get the same tag
// as what we use here.
// The base pointer is not unique, so the base permission is `SharedReadWrite`.
MemoryKind::Machine(MiriMemoryKind::Static) =>
// Static memory can be referenced by "global" pointers from `tcx`.
// Thus we call `static_base_ptr` such that the global pointers get the same tag
// as what we use here.
// The base pointer is not unique, so the base permission is `SharedReadWrite`.
(extra.borrow_mut().static_base_ptr(id), Permission::SharedReadWrite),
_ =>
// Everything else we handle entirely untagged for now.
// FIXME: experiment with more precise tracking.
(Tag::Untagged, Permission::SharedReadWrite),
// Everything else we handle entirely untagged for now.
// FIXME: experiment with more precise tracking.
_ => (Tag::Untagged, Permission::SharedReadWrite),
};
(Stacks::new(size, perm, tag, extra), tag)
}
#[inline(always)]
pub fn memory_read<'tcx>(
&self,
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx> {
pub fn memory_read<'tcx>(&self, ptr: Pointer<Tag>, size: Size) -> InterpResult<'tcx> {
trace!("read access with tag {:?}: {:?}, size {}", ptr.tag, ptr.erase_tag(), size.bytes());
self.for_each(ptr, size, |stack, global| {
stack.access(AccessKind::Read, ptr.tag, global)?;
@ -505,11 +474,7 @@ impl Stacks {
}
#[inline(always)]
pub fn memory_written<'tcx>(
&mut self,
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx> {
pub fn memory_written<'tcx>(&mut self, ptr: Pointer<Tag>, size: Size) -> InterpResult<'tcx> {
trace!("write access with tag {:?}: {:?}, size {}", ptr.tag, ptr.erase_tag(), size.bytes());
self.for_each(ptr, size, |stack, global| {
stack.access(AccessKind::Write, ptr.tag, global)?;
@ -524,9 +489,7 @@ impl Stacks {
size: Size,
) -> InterpResult<'tcx> {
trace!("deallocation with tag {:?}: {:?}, size {}", ptr.tag, ptr.erase_tag(), size.bytes());
self.for_each(ptr, size, |stack, global| {
stack.dealloc(ptr.tag, global)
})
self.for_each(ptr, size, |stack, global| stack.dealloc(ptr.tag, global))
}
}
@ -545,12 +508,20 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let this = self.eval_context_mut();
let protector = if protect { Some(this.frame().extra.call_id) } else { None };
let ptr = place.ptr.to_ptr().expect("we should have a proper pointer");
trace!("reborrow: {} reference {:?} derived from {:?} (pointee {}): {:?}, size {}",
kind, new_tag, ptr.tag, place.layout.ty, ptr.erase_tag(), size.bytes());
trace!(
"reborrow: {} reference {:?} derived from {:?} (pointee {}): {:?}, size {}",
kind,
new_tag,
ptr.tag,
place.layout.ty,
ptr.erase_tag(),
size.bytes()
);
// Get the allocation. It might not be mutable, so we cannot use `get_mut`.
let extra = &this.memory.get_raw(ptr.alloc_id)?.extra;
let stacked_borrows = extra.stacked_borrows.as_ref().expect("we should have Stacked Borrows data");
let stacked_borrows =
extra.stacked_borrows.as_ref().expect("we should have Stacked Borrows data");
// Update the stacks.
// Make sure that raw pointers and mutable shared references are reborrowed "weak":
// There could be existing unique pointers reborrowed from them that should remain valid!
@ -564,7 +535,11 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
// We need a frozen-sensitive reborrow.
return this.visit_freeze_sensitive(place, size, |cur_ptr, size, frozen| {
// We are only ever `SharedReadOnly` inside the frozen bits.
let perm = if frozen { Permission::SharedReadOnly } else { Permission::SharedReadWrite };
let perm = if frozen {
Permission::SharedReadOnly
} else {
Permission::SharedReadWrite
};
let item = Item { perm, tag: new_tag, protector };
stacked_borrows.for_each(cur_ptr, size, |stack, global| {
stack.grant(cur_ptr.tag, item, global)
@ -573,9 +548,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}
};
let item = Item { perm, tag: new_tag, protector };
stacked_borrows.for_each(ptr, size, |stack, global| {
stack.grant(ptr.tag, item, global)
})
stacked_borrows.for_each(ptr, size, |stack, global| stack.grant(ptr.tag, item, global))
}
/// Retags an indidual pointer, returning the retagged version.
@ -589,7 +562,8 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let this = self.eval_context_mut();
// We want a place for where the ptr *points to*, so we get one.
let place = this.ref_to_mplace(val)?;
let size = this.size_and_align_of_mplace(place)?
let size = this
.size_and_align_of_mplace(place)?
.map(|(size, _)| size)
.unwrap_or_else(|| place.layout.size);
// We can see dangling ptrs in here e.g. after a Box's `Unique` was
@ -622,11 +596,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
fn retag(
&mut self,
kind: RetagKind,
place: PlaceTy<'tcx, Tag>
) -> InterpResult<'tcx> {
fn retag(&mut self, kind: RetagKind, place: PlaceTy<'tcx, Tag>) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
// Determine mutability and whether to add a protector.
// Cannot use `builtin_deref` because that reports *immutable* for `Box`,
@ -634,10 +604,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
fn qualify(ty: ty::Ty<'_>, kind: RetagKind) -> Option<(RefKind, bool)> {
match ty.kind {
// References are simple.
ty::Ref(_, _, Mutable) =>
Some((RefKind::Unique { two_phase: kind == RetagKind::TwoPhase}, kind == RetagKind::FnEntry)),
ty::Ref(_, _, Immutable) =>
Some((RefKind::Shared, kind == RetagKind::FnEntry)),
ty::Ref(_, _, Mutable) => Some((
RefKind::Unique { two_phase: kind == RetagKind::TwoPhase },
kind == RetagKind::FnEntry,
)),
ty::Ref(_, _, Immutable) => Some((RefKind::Shared, kind == RetagKind::FnEntry)),
// Raw pointers need to be enabled.
ty::RawPtr(tym) if kind == RetagKind::Raw =>
Some((RefKind::Raw { mutable: tym.mutbl == Mutable }, false)),

View File

@ -2,11 +2,11 @@
// Custom test runner, to avoid libtest being wrapped around compiletest which wraps libtest.
#![test_runner(test_runner)]
use std::path::PathBuf;
use std::env;
use std::path::PathBuf;
use compiletest_rs as compiletest;
use colored::*;
use compiletest_rs as compiletest;
fn miri_path() -> PathBuf {
if rustc_test_suite().is_some() {
@ -57,12 +57,15 @@ fn run_tests(mode: &str, path: &str, target: &str, mut flags: Vec<String>) {
fn compile_fail(path: &str, target: &str, opt: bool) {
let opt_str = if opt { " with optimizations" } else { "" };
eprintln!("{}", format!(
"## Running compile-fail tests in {} against miri for target {}{}",
path,
target,
opt_str
).green().bold());
eprintln!(
"{}",
format!(
"## Running compile-fail tests in {} against miri for target {}{}",
path, target, opt_str
)
.green()
.bold()
);
let mut flags = Vec::new();
if opt {
@ -76,12 +79,15 @@ fn compile_fail(path: &str, target: &str, opt: bool) {
fn miri_pass(path: &str, target: &str, opt: bool) {
let opt_str = if opt { " with optimizations" } else { "" };
eprintln!("{}", format!(
"## Running run-pass tests in {} against miri for target {}{}",
path,
target,
opt_str
).green().bold());
eprintln!(
"{}",
format!(
"## Running run-pass tests in {} against miri for target {}{}",
path, target, opt_str
)
.green()
.bold()
);
let mut flags = Vec::new();
if opt {