diff --git a/benches/fibonacci.rs b/benches/fibonacci.rs index 39974ad6c18..90b231a32bf 100644 --- a/benches/fibonacci.rs +++ b/benches/fibonacci.rs @@ -7,9 +7,7 @@ #[bench] fn fib(bencher: &mut Bencher) { - bencher.iter(|| { - fibonacci_helper::main(); - }) + bencher.iter(|| { fibonacci_helper::main(); }) } #[bench] @@ -19,9 +17,7 @@ fn fib_miri(bencher: &mut Bencher) { #[bench] fn fib_iter(bencher: &mut Bencher) { - bencher.iter(|| { - fibonacci_helper_iterative::main(); - }) + bencher.iter(|| { fibonacci_helper_iterative::main(); }) } #[bench] diff --git a/benches/helpers/fibonacci_helper.rs b/benches/helpers/fibonacci_helper.rs index 004000e70ea..586f1ce7da4 100644 --- a/benches/helpers/fibonacci_helper.rs +++ b/benches/helpers/fibonacci_helper.rs @@ -4,9 +4,5 @@ pub fn main() { } fn fib(n: usize) -> usize { - if n <= 2 { - 1 - } else { - fib(n - 1) + fib(n - 2) - } + if n <= 2 { 1 } else { fib(n - 1) + fib(n - 2) } } diff --git a/benches/helpers/miri_helper.rs b/benches/helpers/miri_helper.rs index 441c35f5513..6657ba11997 100644 --- a/benches/helpers/miri_helper.rs +++ b/benches/helpers/miri_helper.rs @@ -19,9 +19,13 @@ fn find_sysroot() -> String { let toolchain = option_env!("RUSTUP_TOOLCHAIN").or(option_env!("MULTIRUST_TOOLCHAIN")); match (home, toolchain) { (Some(home), Some(toolchain)) => format!("{}/toolchains/{}", home, toolchain), - _ => option_env!("RUST_SYSROOT") - .expect("need to specify RUST_SYSROOT env var or use rustup or multirust") - .to_owned(), + _ => { + option_env!("RUST_SYSROOT") + .expect( + "need to specify RUST_SYSROOT env var or use rustup or multirust", + ) + .to_owned() + } } } @@ -30,7 +34,7 @@ pub fn run(filename: &str, bencher: &mut Bencher) { "miri".to_string(), format!("benches/helpers/{}.rs", filename), "--sysroot".to_string(), - find_sysroot() + find_sysroot(), ]; let compiler_calls = &mut MiriCompilerCalls(Rc::new(RefCell::new(bencher))); rustc_driver::run_compiler(args, compiler_calls, None, None); @@ -40,7 +44,7 @@ impl<'a> CompilerCalls<'a> for MiriCompilerCalls<'a> { fn build_controller( &mut self, _: &Session, - _: &getopts::Matches + _: &getopts::Matches, ) -> driver::CompileController<'a> { let mut control: driver::CompileController<'a> = driver::CompileController::basic(); @@ -51,14 +55,17 @@ fn build_controller( state.session.abort_if_errors(); let tcx = state.tcx.unwrap(); - let (entry_node_id, _) = state.session.entry_fn.borrow() - .expect("no main or start function found"); + let (entry_node_id, _) = state.session.entry_fn.borrow().expect( + "no main or start function found", + ); let entry_def_id = tcx.map.local_def_id(entry_node_id); - let memory_size = 100*1024*1024; // 100MB + let memory_size = 100 * 1024 * 1024; // 100MB let step_limit = 1000_000; let stack_limit = 100; - bencher.borrow_mut().iter(|| { eval_main(tcx, entry_def_id, memory_size, step_limit, stack_limit); }); + bencher.borrow_mut().iter(|| { + eval_main(tcx, entry_def_id, memory_size, step_limit, stack_limit); + }); state.session.abort_if_errors(); }); diff --git a/benches/helpers/smoke_helper.rs b/benches/helpers/smoke_helper.rs index ef05b044cdd..e81db817aea 100644 --- a/benches/helpers/smoke_helper.rs +++ b/benches/helpers/smoke_helper.rs @@ -1,3 +1,2 @@ #[inline(never)] -pub fn main() { -} +pub fn main() {} diff --git a/benches/smoke.rs b/benches/smoke.rs index eabd58a8688..1dbc4fed82f 100644 --- a/benches/smoke.rs +++ b/benches/smoke.rs @@ -7,9 +7,7 @@ #[bench] fn noop(bencher: &mut Bencher) { - bencher.iter(|| { - smoke_helper::main(); - }) + bencher.iter(|| { smoke_helper::main(); }) } /* diff --git a/miri/bin/cargo-miri.rs b/miri/bin/cargo-miri.rs index 6eff6650fa9..06d5b3e9971 100644 --- a/miri/bin/cargo-miri.rs +++ b/miri/bin/cargo-miri.rs @@ -50,29 +50,42 @@ fn main() { let test = std::env::args().nth(2).map_or(false, |text| text == "test"); let skip = if test { 3 } else { 2 }; - let manifest_path_arg = std::env::args().skip(skip).find(|val| val.starts_with("--manifest-path=")); + let manifest_path_arg = std::env::args().skip(skip).find(|val| { + val.starts_with("--manifest-path=") + }); - let mut metadata = if let Ok(metadata) = cargo_metadata::metadata(manifest_path_arg.as_ref().map(AsRef::as_ref)) { + let mut metadata = if let Ok(metadata) = cargo_metadata::metadata( + manifest_path_arg.as_ref().map(AsRef::as_ref), + ) + { metadata } else { - let _ = std::io::stderr().write_fmt(format_args!("error: Could not obtain cargo metadata.")); + let _ = std::io::stderr().write_fmt(format_args!( + "error: Could not obtain cargo metadata." + )); std::process::exit(101); }; - let manifest_path = manifest_path_arg.map(|arg| PathBuf::from(Path::new(&arg["--manifest-path=".len()..]))); + let manifest_path = manifest_path_arg.map(|arg| { + PathBuf::from(Path::new(&arg["--manifest-path=".len()..])) + }); let current_dir = std::env::current_dir(); - let package_index = metadata.packages + let package_index = metadata + .packages .iter() .position(|package| { let package_manifest_path = Path::new(&package.manifest_path); if let Some(ref manifest_path) = manifest_path { package_manifest_path == manifest_path } else { - let current_dir = current_dir.as_ref().expect("could not read current directory"); - let package_manifest_directory = package_manifest_path.parent() - .expect("could not find parent directory of package manifest"); + let current_dir = current_dir.as_ref().expect( + "could not read current directory", + ); + let package_manifest_directory = package_manifest_path.parent().expect( + "could not find parent directory of package manifest", + ); package_manifest_directory == current_dir } }) @@ -80,13 +93,25 @@ fn main() { let package = metadata.packages.remove(package_index); for target in package.targets { let args = std::env::args().skip(skip); - let kind = target.kind.get(0).expect("badly formatted cargo metadata: target::kind is an empty array"); + let kind = target.kind.get(0).expect( + "badly formatted cargo metadata: target::kind is an empty array", + ); if test && kind == "test" { - if let Err(code) = process(vec!["--test".to_string(), target.name].into_iter().chain(args)) { + if let Err(code) = process( + vec!["--test".to_string(), target.name].into_iter().chain( + args, + ), + ) + { std::process::exit(code); } } else if !test && kind == "bin" { - if let Err(code) = process(vec!["--bin".to_string(), target.name].into_iter().chain(args)) { + if let Err(code) = process( + vec!["--bin".to_string(), target.name].into_iter().chain( + args, + ), + ) + { std::process::exit(code); } } @@ -118,7 +143,11 @@ fn main() { let mut args: Vec = if std::env::args().any(|s| s == "--sysroot") { std::env::args().skip(1).collect() } else { - std::env::args().skip(1).chain(Some("--sysroot".to_owned())).chain(Some(sys_root)).collect() + std::env::args() + .skip(1) + .chain(Some("--sysroot".to_owned())) + .chain(Some(sys_root)) + .collect() }; // this check ensures that dependencies are built but not interpreted and the final crate is @@ -137,9 +166,11 @@ fn main() { args.extend_from_slice(&["--cfg".to_owned(), r#"feature="cargo-miri""#.to_owned()]); match command.args(&args).status() { - Ok(exit) => if !exit.success() { - std::process::exit(exit.code().unwrap_or(42)); - }, + Ok(exit) => { + if !exit.success() { + std::process::exit(exit.code().unwrap_or(42)); + } + } Err(ref e) if miri_enabled => panic!("error during miri run: {:?}", e), Err(ref e) => panic!("error during rustc call: {:?}", e), } @@ -147,7 +178,8 @@ fn main() { } fn process(old_args: I) -> Result<(), i32> - where I: Iterator +where + I: Iterator, { let mut args = vec!["rustc".to_owned()]; diff --git a/miri/bin/miri.rs b/miri/bin/miri.rs index 01a4a8656b4..29c47e35570 100644 --- a/miri/bin/miri.rs +++ b/miri/bin/miri.rs @@ -16,7 +16,7 @@ use rustc::session::config::{self, Input, ErrorOutputType}; use rustc::hir::{self, itemlikevisit}; use rustc::ty::TyCtxt; -use syntax::ast::{MetaItemKind, NestedMetaItemKind, self}; +use syntax::ast::{self, MetaItemKind, NestedMetaItemKind}; use std::path::PathBuf; struct MiriCompilerCalls(RustcDefaultCalls); @@ -28,9 +28,15 @@ fn early_callback( sopts: &config::Options, cfg: &ast::CrateConfig, descriptions: &rustc_errors::registry::Registry, - output: ErrorOutputType + output: ErrorOutputType, ) -> Compilation { - self.0.early_callback(matches, sopts, cfg, descriptions, output) + self.0.early_callback( + matches, + sopts, + cfg, + descriptions, + output, + ) } fn no_input( &mut self, @@ -39,9 +45,16 @@ fn no_input( cfg: &ast::CrateConfig, odir: &Option, ofile: &Option, - descriptions: &rustc_errors::registry::Registry + descriptions: &rustc_errors::registry::Registry, ) -> Option<(Input, Option)> { - self.0.no_input(matches, sopts, cfg, odir, ofile, descriptions) + self.0.no_input( + matches, + sopts, + cfg, + odir, + ofile, + descriptions, + ) } fn late_callback( &mut self, @@ -49,11 +62,15 @@ fn late_callback( sess: &Session, input: &Input, odir: &Option, - ofile: &Option + ofile: &Option, ) -> Compilation { self.0.late_callback(matches, sess, input, odir, ofile) } - fn build_controller(&mut self, sess: &Session, matches: &getopts::Matches) -> CompileController<'a> { + fn build_controller( + &mut self, + sess: &Session, + matches: &getopts::Matches, + ) -> CompileController<'a> { let mut control = self.0.build_controller(sess, matches); control.after_hir_lowering.callback = Box::new(after_hir_lowering); control.after_analysis.callback = Box::new(after_analysis); @@ -66,7 +83,10 @@ fn build_controller(&mut self, sess: &Session, matches: &getopts::Matches) -> Co } fn after_hir_lowering(state: &mut CompileState) { - let attr = (String::from("miri"), syntax::feature_gate::AttributeType::Whitelisted); + let attr = ( + String::from("miri"), + syntax::feature_gate::AttributeType::Whitelisted, + ); state.session.plugin_attributes.borrow_mut().push(attr); } @@ -77,13 +97,23 @@ fn after_analysis<'a, 'tcx>(state: &mut CompileState<'a, 'tcx>) { let limits = resource_limits_from_attributes(state); if std::env::args().any(|arg| arg == "--test") { - struct Visitor<'a, 'tcx: 'a>(miri::ResourceLimits, TyCtxt<'a, 'tcx, 'tcx>, &'a CompileState<'a, 'tcx>); + struct Visitor<'a, 'tcx: 'a>( + miri::ResourceLimits, + TyCtxt<'a, 'tcx, 'tcx>, + &'a CompileState<'a, 'tcx> + ); impl<'a, 'tcx: 'a, 'hir> itemlikevisit::ItemLikeVisitor<'hir> for Visitor<'a, 'tcx> { fn visit_item(&mut self, i: &'hir hir::Item) { if let hir::Item_::ItemFn(_, _, _, _, _, body_id) = i.node { - if i.attrs.iter().any(|attr| attr.name().map_or(false, |n| n == "test")) { + if i.attrs.iter().any(|attr| { + attr.name().map_or(false, |n| n == "test") + }) + { let did = self.1.hir.body_owner_def_id(body_id); - println!("running test: {}", self.1.hir.def_path(did).to_string(self.1)); + println!( + "running test: {}", + self.1.hir.def_path(did).to_string(self.1) + ); miri::eval_main(self.1, did, None, self.0); self.2.session.abort_if_errors(); } @@ -92,11 +122,18 @@ fn visit_item(&mut self, i: &'hir hir::Item) { fn visit_trait_item(&mut self, _trait_item: &'hir hir::TraitItem) {} fn visit_impl_item(&mut self, _impl_item: &'hir hir::ImplItem) {} } - state.hir_crate.unwrap().visit_all_item_likes(&mut Visitor(limits, tcx, state)); + state.hir_crate.unwrap().visit_all_item_likes( + &mut Visitor(limits, tcx, state), + ); } else if let Some((entry_node_id, _)) = *state.session.entry_fn.borrow() { let entry_def_id = tcx.hir.local_def_id(entry_node_id); - let start_wrapper = tcx.lang_items.start_fn().and_then(|start_fn| - if tcx.is_mir_available(start_fn) { Some(start_fn) } else { None }); + let start_wrapper = tcx.lang_items.start_fn().and_then(|start_fn| { + if tcx.is_mir_available(start_fn) { + Some(start_fn) + } else { + None + } + }); miri::eval_main(tcx, entry_def_id, start_wrapper, limits); state.session.abort_if_errors(); @@ -112,11 +149,19 @@ fn resource_limits_from_attributes(state: &CompileState) -> miri::ResourceLimits let extract_int = |lit: &syntax::ast::Lit| -> u128 { match lit.node { syntax::ast::LitKind::Int(i, _) => i, - _ => state.session.span_fatal(lit.span, "expected an integer literal"), + _ => { + state.session.span_fatal( + lit.span, + "expected an integer literal", + ) + } } }; - for attr in krate.attrs.iter().filter(|a| a.name().map_or(false, |n| n == "miri")) { + for attr in krate.attrs.iter().filter(|a| { + a.name().map_or(false, |n| n == "miri") + }) + { if let Some(items) = attr.meta_item_list() { for item in items { if let NestedMetaItemKind::MetaItem(ref inner) = item.node { @@ -165,7 +210,10 @@ fn init_logger() { }; let mut builder = env_logger::LogBuilder::new(); - builder.format(format).filter(None, log::LogLevelFilter::Info); + builder.format(format).filter( + None, + log::LogLevelFilter::Info, + ); if std::env::var("MIRI_LOG").is_ok() { builder.parse(&std::env::var("MIRI_LOG").unwrap()); @@ -184,9 +232,13 @@ fn find_sysroot() -> String { let toolchain = option_env!("RUSTUP_TOOLCHAIN").or(option_env!("MULTIRUST_TOOLCHAIN")); match (home, toolchain) { (Some(home), Some(toolchain)) => format!("{}/toolchains/{}", home, toolchain), - _ => option_env!("RUST_SYSROOT") - .expect("need to specify RUST_SYSROOT env var or use rustup or multirust") - .to_owned(), + _ => { + option_env!("RUST_SYSROOT") + .expect( + "need to specify RUST_SYSROOT env var or use rustup or multirust", + ) + .to_owned() + } } } diff --git a/miri/fn_call.rs b/miri/fn_call.rs index 26263a854dd..3467322768c 100644 --- a/miri/fn_call.rs +++ b/miri/fn_call.rs @@ -9,10 +9,7 @@ use rustc_miri::interpret::*; -use super::{ - TlsKey, - EvalContext, -}; +use super::{TlsKey, EvalContext}; use tls::MemoryExt; @@ -62,13 +59,19 @@ fn eval_fn_call( let mir = match self.load_mir(instance.def) { Ok(mir) => mir, - Err(EvalError{ kind: EvalErrorKind::NoMirFor(path), ..} ) => { - self.call_missing_fn(instance, destination, arg_operands, sig, path)?; + Err(EvalError { kind: EvalErrorKind::NoMirFor(path), .. }) => { + self.call_missing_fn( + instance, + destination, + arg_operands, + sig, + path, + )?; return Ok(true); - }, + } Err(other) => return Err(other), }; - + let (return_lvalue, return_to_block) = match destination { Some((lvalue, block)) => (lvalue, StackPopCleanup::Goto(block)), None => (Lvalue::undef(), StackPopCleanup::None), @@ -99,7 +102,8 @@ fn call_c_abi( .unwrap_or(name) .as_str(); - let args_res: EvalResult> = arg_operands.iter() + let args_res: EvalResult> = arg_operands + .iter() .map(|arg| self.eval_operand(arg)) .collect(); let args = args_res?; @@ -121,7 +125,11 @@ fn call_c_abi( "free" => { let ptr = args[0].into_ptr(&mut self.memory)?; if !ptr.is_null()? { - self.memory.deallocate(ptr.to_ptr()?, None, MemoryKind::C.into())?; + self.memory.deallocate( + ptr.to_ptr()?, + None, + MemoryKind::C.into(), + )?; } } @@ -132,9 +140,16 @@ fn call_c_abi( // libc::syscall(NR_GETRANDOM, buf.as_mut_ptr(), buf.len(), GRND_NONBLOCK) // is called if a `HashMap` is created the regular way. match self.value_to_primval(args[0], usize)?.to_u64()? { - 318 | - 511 => return err!(Unimplemented("miri does not support random number generators".to_owned())), - id => return err!(Unimplemented(format!("miri does not support syscall id {}", id))), + 318 | 511 => { + return err!(Unimplemented( + "miri does not support random number generators".to_owned(), + )) + } + id => { + return err!(Unimplemented( + format!("miri does not support syscall id {}", id), + )) + } } } @@ -144,7 +159,10 @@ fn call_c_abi( let symbol_name = self.memory.read_c_str(symbol)?; let err = format!("bad c unicode symbol: {:?}", symbol_name); let symbol_name = ::std::str::from_utf8(symbol_name).unwrap_or(&err); - return err!(Unimplemented(format!("miri does not support dynamically loading libraries (requested symbol: {})", symbol_name))); + return err!(Unimplemented(format!( + "miri does not support dynamically loading libraries (requested symbol: {})", + symbol_name + ))); } "__rust_maybe_catch_panic" => { @@ -167,7 +185,12 @@ fn call_c_abi( StackPopCleanup::Goto(dest_block), )?; - let arg_local = self.frame().mir.args_iter().next().ok_or(EvalErrorKind::AbiViolation("Argument to __rust_maybe_catch_panic does not take enough arguments.".to_owned()))?; + let arg_local = self.frame().mir.args_iter().next().ok_or( + EvalErrorKind::AbiViolation( + "Argument to __rust_maybe_catch_panic does not take enough arguments." + .to_owned(), + ), + )?; let arg_dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?; self.write_ptr(arg_dest, data, u8_ptr_ty)?; @@ -199,14 +222,21 @@ fn call_c_abi( } }; - self.write_primval(dest, PrimVal::Bytes(result as u128), dest_ty)?; + self.write_primval( + dest, + PrimVal::Bytes(result as u128), + dest_ty, + )?; } "memrchr" => { let ptr = args[0].into_ptr(&mut self.memory)?; let val = self.value_to_primval(args[1], usize)?.to_u64()? as u8; let num = self.value_to_primval(args[2], usize)?.to_u64()?; - if let Some(idx) = self.memory.read_bytes(ptr, num)?.iter().rev().position(|&c| c == val) { + if let Some(idx) = self.memory.read_bytes(ptr, num)?.iter().rev().position( + |&c| c == val, + ) + { let new_ptr = ptr.offset(num - idx as u64 - 1, &self)?; self.write_ptr(dest, new_ptr, dest_ty)?; } else { @@ -218,7 +248,10 @@ fn call_c_abi( let ptr = args[0].into_ptr(&mut self.memory)?; let val = self.value_to_primval(args[1], usize)?.to_u64()? as u8; let num = self.value_to_primval(args[2], usize)?.to_u64()?; - if let Some(idx) = self.memory.read_bytes(ptr, num)?.iter().position(|&c| c == val) { + if let Some(idx) = self.memory.read_bytes(ptr, num)?.iter().position( + |&c| c == val, + ) + { let new_ptr = ptr.offset(idx as u64, &self)?; self.write_ptr(dest, new_ptr, dest_ty)?; } else { @@ -274,11 +307,19 @@ fn call_c_abi( } if let Some((name, value)) = new { // +1 for the null terminator - let value_copy = self.memory.allocate((value.len() + 1) as u64, 1, MemoryKind::Env.into())?; + let value_copy = self.memory.allocate( + (value.len() + 1) as u64, + 1, + MemoryKind::Env.into(), + )?; self.memory.write_bytes(value_copy.into(), &value)?; let trailing_zero_ptr = value_copy.offset(value.len() as u64, &self)?.into(); self.memory.write_bytes(trailing_zero_ptr, &[0])?; - if let Some(var) = self.machine_data.env_vars.insert(name.to_owned(), value_copy) { + if let Some(var) = self.machine_data.env_vars.insert( + name.to_owned(), + value_copy, + ) + { self.memory.deallocate(var, None, MemoryKind::Env.into())?; } self.write_null(dest, dest_ty)?; @@ -292,17 +333,29 @@ fn call_c_abi( let buf = args[1].into_ptr(&mut self.memory)?; let n = self.value_to_primval(args[2], usize)?.to_u64()?; trace!("Called write({:?}, {:?}, {:?})", fd, buf, n); - let result = if fd == 1 || fd == 2 { // stdout/stderr + let result = if fd == 1 || fd == 2 { + // stdout/stderr use std::io::{self, Write}; - + let buf_cont = self.memory.read_bytes(buf, n)?; - let res = if fd == 1 { io::stdout().write(buf_cont) } else { io::stderr().write(buf_cont) }; - match res { Ok(n) => n as isize, Err(_) => -1 } + let res = if fd == 1 { + io::stdout().write(buf_cont) + } else { + io::stderr().write(buf_cont) + }; + match res { + Ok(n) => n as isize, + Err(_) => -1, + } } else { info!("Ignored output to FD {}", fd); n as isize // pretend it all went well }; // now result is the value we return back to the program - self.write_primval(dest, PrimVal::Bytes(result as u128), dest_ty)?; + self.write_primval( + dest, + PrimVal::Bytes(result as u128), + dest_ty, + )?; } "strlen" => { @@ -328,7 +381,10 @@ fn call_c_abi( let mut result = None; for &(path, path_value) in paths { if let Ok(instance) = self.resolve_path(path) { - let cid = GlobalId { instance, promoted: None }; + let cid = GlobalId { + instance, + promoted: None, + }; // compute global if not cached let val = match self.globals.get(&cid).cloned() { Some(ptr) => self.value_to_primval(Value::ByRef(ptr), c_int)?.to_u64()?, @@ -343,7 +399,9 @@ fn call_c_abi( if let Some(result) = result { self.write_primval(dest, result, dest_ty)?; } else { - return err!(Unimplemented(format!("Unimplemented sysconf name: {}", name))); + return err!(Unimplemented( + format!("Unimplemented sysconf name: {}", name), + )); } } @@ -373,7 +431,11 @@ fn call_c_abi( return err!(OutOfTls); } // TODO: Does this need checking for alignment? - self.memory.write_uint(key_ptr.to_ptr()?, key, key_size.bytes())?; + self.memory.write_uint( + key_ptr.to_ptr()?, + key, + key_size.bytes(), + )?; // Return success (0) self.write_null(dest, dest_ty)?; @@ -396,7 +458,7 @@ fn call_c_abi( let key = self.value_to_primval(args[0], usize)?.to_u64()? as TlsKey; let new_ptr = args[1].into_ptr(&mut self.memory)?; self.memory.store_tls(key, new_ptr)?; - + // Return success (0) self.write_null(dest, dest_ty)?; } @@ -405,10 +467,12 @@ fn call_c_abi( link_name if link_name.starts_with("pthread_") => { warn!("ignoring C ABI call: {}", link_name); self.write_null(dest, dest_ty)?; - }, + } _ => { - return err!(Unimplemented(format!("can't call C ABI function: {}", link_name))); + return err!(Unimplemented( + format!("can't call C ABI function: {}", link_name), + )); } } @@ -425,7 +489,8 @@ fn resolve_path(&self, path: &[&str]) -> EvalResult<'tcx, ty::Instance<'tcx>> { let cstore = &self.tcx.sess.cstore; let crates = cstore.crates(); - crates.iter() + crates + .iter() .find(|&&krate| cstore.crate_name(krate) == path[0]) .and_then(|krate| { let krate = DefId { @@ -450,9 +515,7 @@ fn resolve_path(&self, path: &[&str]) -> EvalResult<'tcx, ty::Instance<'tcx>> { None }) .ok_or_else(|| { - let path = path.iter() - .map(|&s| s.to_owned()) - .collect(); + let path = path.iter().map(|&s| s.to_owned()).collect(); EvalErrorKind::PathNotFound(path).into() }) } @@ -469,27 +532,36 @@ fn call_missing_fn( match &path[..] { "std::panicking::rust_panic_with_hook" | "std::rt::begin_panic_fmt" => return err!(Panic), - _ => {}, + _ => {} } let dest_ty = sig.output(); - let (dest, dest_block) = destination.ok_or_else(|| EvalErrorKind::NoMirFor(path.clone()))?; + let (dest, dest_block) = destination.ok_or_else( + || EvalErrorKind::NoMirFor(path.clone()), + )?; if sig.abi == Abi::C { // An external C function // TODO: That functions actually has a similar preamble to what follows here. May make sense to // unify these two mechanisms for "hooking into missing functions". - self.call_c_abi(instance.def_id(), arg_operands, dest, dest_ty, dest_block)?; + self.call_c_abi( + instance.def_id(), + arg_operands, + dest, + dest_ty, + dest_block, + )?; return Ok(()); } - let args_res: EvalResult> = arg_operands.iter() + let args_res: EvalResult> = arg_operands + .iter() .map(|arg| self.eval_operand(arg)) .collect(); let args = args_res?; let usize = self.tcx.types.usize; - + match &path[..] { // Allocators are magic. They have no MIR, even when the rest of libstd does. "alloc::heap::::__rust_alloc" => { @@ -527,7 +599,11 @@ fn call_missing_fn( if !align.is_power_of_two() { return err!(HeapAllocNonPowerOfTwoAlignment(align)); } - self.memory.deallocate(ptr, Some((old_size, align)), MemoryKind::Rust.into())?; + self.memory.deallocate( + ptr, + Some((old_size, align)), + MemoryKind::Rust.into(), + )?; } "alloc::heap::::__rust_realloc" => { let ptr = args[0].into_ptr(&mut self.memory)?.to_ptr()?; @@ -544,17 +620,32 @@ fn call_missing_fn( if !new_align.is_power_of_two() { return err!(HeapAllocNonPowerOfTwoAlignment(new_align)); } - let new_ptr = self.memory.reallocate(ptr, old_size, old_align, new_size, new_align, MemoryKind::Rust.into())?; + let new_ptr = self.memory.reallocate( + ptr, + old_size, + old_align, + new_size, + new_align, + MemoryKind::Rust.into(), + )?; self.write_primval(dest, PrimVal::Ptr(new_ptr), dest_ty)?; } // A Rust function is missing, which means we are running with MIR missing for libstd (or other dependencies). // Still, we can make many things mostly work by "emulating" or ignoring some functions. "std::io::_print" => { - trace!("Ignoring output. To run programs that print, make sure you have a libstd with full MIR."); + trace!( + "Ignoring output. To run programs that print, make sure you have a libstd with full MIR." + ); + } + "std::thread::Builder::new" => { + return err!(Unimplemented("miri does not support threading".to_owned())) + } + "std::env::args" => { + return err!(Unimplemented( + "miri does not support program arguments".to_owned(), + )) } - "std::thread::Builder::new" => return err!(Unimplemented("miri does not support threading".to_owned())), - "std::env::args" => return err!(Unimplemented("miri does not support program arguments".to_owned())), "std::panicking::panicking" | "std::rt::panicking" => { // we abort on panic -> `std::rt::panicking` always returns false diff --git a/miri/helpers.rs b/miri/helpers.rs index 3cdabd4e623..809e5ebfacd 100644 --- a/miri/helpers.rs +++ b/miri/helpers.rs @@ -1,9 +1,4 @@ -use rustc_miri::interpret::{ - Pointer, - EvalResult, - PrimVal, - EvalContext, -}; +use rustc_miri::interpret::{Pointer, EvalResult, PrimVal, EvalContext}; use rustc::ty::Ty; @@ -31,7 +26,9 @@ fn wrapping_pointer_offset( offset: i64, ) -> EvalResult<'tcx, Pointer> { // FIXME: assuming here that type size is < i64::max_value() - let pointee_size = self.type_size(pointee_ty)?.expect("cannot offset a pointer to an unsized type") as i64; + let pointee_size = self.type_size(pointee_ty)?.expect( + "cannot offset a pointer to an unsized type", + ) as i64; let offset = offset.overflowing_mul(pointee_size).0; ptr.wrapping_signed_offset(offset, self) } @@ -47,11 +44,18 @@ fn pointer_offset( // We also consider the NULL pointer its own separate allocation, and all the remaining integers pointers their own // allocation. - if ptr.is_null()? { // NULL pointers must only be offset by 0 - return if offset == 0 { Ok(ptr) } else { err!(InvalidNullPointerUsage) }; + if ptr.is_null()? { + // NULL pointers must only be offset by 0 + return if offset == 0 { + Ok(ptr) + } else { + err!(InvalidNullPointerUsage) + }; } // FIXME: assuming here that type size is < i64::max_value() - let pointee_size = self.type_size(pointee_ty)?.expect("cannot offset a pointer to an unsized type") as i64; + let pointee_size = self.type_size(pointee_ty)?.expect( + "cannot offset a pointer to an unsized type", + ) as i64; return if let Some(offset) = offset.checked_mul(pointee_size) { let ptr = ptr.signed_offset(offset, self)?; // Do not do bounds-checking for integers; they can never alias a normal pointer anyway. @@ -64,6 +68,6 @@ fn pointer_offset( Ok(ptr) } else { err!(OverflowingMath) - } + }; } } diff --git a/miri/intrinsic.rs b/miri/intrinsic.rs index 5a609a569bb..83c65a427c0 100644 --- a/miri/intrinsic.rs +++ b/miri/intrinsic.rs @@ -3,13 +3,8 @@ use rustc::ty::layout::Layout; use rustc::ty::{self, Ty}; -use rustc_miri::interpret::{ - EvalResult, - Lvalue, LvalueExtra, - PrimVal, PrimValKind, Value, Pointer, - HasMemory, - EvalContext, PtrAndAlign, -}; +use rustc_miri::interpret::{EvalResult, Lvalue, LvalueExtra, PrimVal, PrimValKind, Value, Pointer, + HasMemory, EvalContext, PtrAndAlign}; use helpers::EvalContextExt as HelperEvalContextExt; @@ -35,9 +30,8 @@ fn call_intrinsic( dest_layout: &'tcx Layout, target: mir::BasicBlock, ) -> EvalResult<'tcx> { - let arg_vals: EvalResult> = args.iter() - .map(|arg| self.eval_operand(arg)) - .collect(); + let arg_vals: EvalResult> = + args.iter().map(|arg| self.eval_operand(arg)).collect(); let arg_vals = arg_vals?; let i32 = self.tcx.types.i32; let isize = self.tcx.types.isize; @@ -48,15 +42,35 @@ fn call_intrinsic( let intrinsic_name = &self.tcx.item_name(instance.def_id()).as_str()[..]; match intrinsic_name { - "add_with_overflow" => - self.intrinsic_with_overflow(mir::BinOp::Add, &args[0], &args[1], dest, dest_ty)?, + "add_with_overflow" => { + self.intrinsic_with_overflow( + mir::BinOp::Add, + &args[0], + &args[1], + dest, + dest_ty, + )? + } - "sub_with_overflow" => - self.intrinsic_with_overflow(mir::BinOp::Sub, &args[0], &args[1], dest, dest_ty)?, - - "mul_with_overflow" => - self.intrinsic_with_overflow(mir::BinOp::Mul, &args[0], &args[1], dest, dest_ty)?, + "sub_with_overflow" => { + self.intrinsic_with_overflow( + mir::BinOp::Sub, + &args[0], + &args[1], + dest, + dest_ty, + )? + } + "mul_with_overflow" => { + self.intrinsic_with_overflow( + mir::BinOp::Mul, + &args[0], + &args[1], + dest, + dest_ty, + )? + } "arith_offset" => { let offset = self.value_to_primval(arg_vals[1], isize)?.to_i128()? as i64; @@ -68,7 +82,9 @@ fn call_intrinsic( "assume" => { let bool = self.tcx.types.bool; let cond = self.value_to_primval(arg_vals[0], bool)?.to_bool()?; - if !cond { return err!(AssumptionNotHeld); } + if !cond { + return err!(AssumptionNotHeld); + } } "atomic_load" | @@ -104,7 +120,11 @@ fn call_intrinsic( Value::ByValPair(..) => bug!("atomic_xchg doesn't work with nonprimitives"), }; self.write_primval(dest, old, ty)?; - self.write_primval(Lvalue::from_primval_ptr(ptr), change, ty)?; + self.write_primval( + Lvalue::from_primval_ptr(ptr), + change, + ty, + )?; } _ if intrinsic_name.starts_with("atomic_cxchg") => { @@ -121,14 +141,38 @@ fn call_intrinsic( let (val, _) = self.binary_op(mir::BinOp::Eq, old, ty, expect_old, ty)?; let dest = self.force_allocation(dest)?.to_ptr()?; self.write_pair_to_ptr(old, val, dest, dest_ty)?; - self.write_primval(Lvalue::from_primval_ptr(ptr), change, ty)?; + self.write_primval( + Lvalue::from_primval_ptr(ptr), + change, + ty, + )?; } - "atomic_or" | "atomic_or_acq" | "atomic_or_rel" | "atomic_or_acqrel" | "atomic_or_relaxed" | - "atomic_xor" | "atomic_xor_acq" | "atomic_xor_rel" | "atomic_xor_acqrel" | "atomic_xor_relaxed" | - "atomic_and" | "atomic_and_acq" | "atomic_and_rel" | "atomic_and_acqrel" | "atomic_and_relaxed" | - "atomic_xadd" | "atomic_xadd_acq" | "atomic_xadd_rel" | "atomic_xadd_acqrel" | "atomic_xadd_relaxed" | - "atomic_xsub" | "atomic_xsub_acq" | "atomic_xsub_rel" | "atomic_xsub_acqrel" | "atomic_xsub_relaxed" => { + "atomic_or" | + "atomic_or_acq" | + "atomic_or_rel" | + "atomic_or_acqrel" | + "atomic_or_relaxed" | + "atomic_xor" | + "atomic_xor_acq" | + "atomic_xor_rel" | + "atomic_xor_acqrel" | + "atomic_xor_relaxed" | + "atomic_and" | + "atomic_and_acq" | + "atomic_and_rel" | + "atomic_and_acqrel" | + "atomic_and_relaxed" | + "atomic_xadd" | + "atomic_xadd_acq" | + "atomic_xadd_rel" | + "atomic_xadd_acqrel" | + "atomic_xadd_relaxed" | + "atomic_xsub" | + "atomic_xsub_acq" | + "atomic_xsub_rel" | + "atomic_xsub_acqrel" | + "atomic_xsub_relaxed" => { let ty = substs.type_at(0); let ptr = arg_vals[0].into_ptr(&self.memory)?; let change = self.value_to_primval(arg_vals[1], ty)?; @@ -136,7 +180,9 @@ fn call_intrinsic( let old = match old { Value::ByVal(val) => val, Value::ByRef { .. } => bug!("just read the value, can't be byref"), - Value::ByValPair(..) => bug!("atomic_xadd_relaxed doesn't work with nonprimitives"), + Value::ByValPair(..) => { + bug!("atomic_xadd_relaxed doesn't work with nonprimitives") + } }; self.write_primval(dest, old, ty)?; let op = match intrinsic_name.split('_').nth(1).unwrap() { @@ -150,7 +196,7 @@ fn call_intrinsic( // FIXME: what do atomics do on overflow? let (val, _) = self.binary_op(op, old, ty, change, ty)?; self.write_primval(Lvalue::from_primval_ptr(ptr), val, ty)?; - }, + } "breakpoint" => unimplemented!(), // halt miri @@ -165,22 +211,23 @@ fn call_intrinsic( let elem_align = self.type_align(elem_ty)?; let src = arg_vals[0].into_ptr(&self.memory)?; let dest = arg_vals[1].into_ptr(&self.memory)?; - self.memory.copy(src, dest, count * elem_size, elem_align, intrinsic_name.ends_with("_nonoverlapping"))?; + self.memory.copy( + src, + dest, + count * elem_size, + elem_align, + intrinsic_name.ends_with("_nonoverlapping"), + )?; } } - "ctpop" | - "cttz" | - "cttz_nonzero" | - "ctlz" | - "ctlz_nonzero" | - "bswap" => { + "ctpop" | "cttz" | "cttz_nonzero" | "ctlz" | "ctlz_nonzero" | "bswap" => { let ty = substs.type_at(0); let num = self.value_to_primval(arg_vals[0], ty)?.to_bytes()?; let kind = self.ty_to_primval_kind(ty)?; let num = if intrinsic_name.ends_with("_nonzero") { if num == 0 { - return err!(Intrinsic(format!("{} called on 0", intrinsic_name))) + return err!(Intrinsic(format!("{} called on 0", intrinsic_name))); } numeric_intrinsic(intrinsic_name.trim_right_matches("_nonzero"), num, kind)? } else { @@ -196,10 +243,8 @@ fn call_intrinsic( self.write_primval(dest, PrimVal::Bytes(discr_val), dest_ty)?; } - "sinf32" | "fabsf32" | "cosf32" | - "sqrtf32" | "expf32" | "exp2f32" | - "logf32" | "log10f32" | "log2f32" | - "floorf32" | "ceilf32" | "truncf32" => { + "sinf32" | "fabsf32" | "cosf32" | "sqrtf32" | "expf32" | "exp2f32" | "logf32" | + "log10f32" | "log2f32" | "floorf32" | "ceilf32" | "truncf32" => { let f = self.value_to_primval(arg_vals[0], f32)?.to_f32()?; let f = match intrinsic_name { "sinf32" => f.sin(), @@ -219,10 +264,8 @@ fn call_intrinsic( self.write_primval(dest, PrimVal::from_f32(f), dest_ty)?; } - "sinf64" | "fabsf64" | "cosf64" | - "sqrtf64" | "expf64" | "exp2f64" | - "logf64" | "log10f64" | "log2f64" | - "floorf64" | "ceilf64" | "truncf64" => { + "sinf64" | "fabsf64" | "cosf64" | "sqrtf64" | "expf64" | "exp2f64" | "logf64" | + "log10f64" | "log2f64" | "floorf64" | "ceilf64" | "truncf64" => { let f = self.value_to_primval(arg_vals[0], f64)?.to_f64()?; let f = match intrinsic_name { "sinf64" => f.sin(), @@ -258,9 +301,7 @@ fn call_intrinsic( self.write_primval(dest, result.0, dest_ty)?; } - "likely" | - "unlikely" | - "forget" => {} + "likely" | "unlikely" | "forget" => {} "init" => { let size = self.type_size(dest_ty)?.expect("cannot zero unsized value"); @@ -270,27 +311,35 @@ fn call_intrinsic( // These writes have no alignment restriction anyway. this.memory.write_repeat(ptr, 0, size)?; val - }, + } // TODO(solson): Revisit this, it's fishy to check for Undef here. - Value::ByVal(PrimVal::Undef) => match this.ty_to_primval_kind(dest_ty) { - Ok(_) => Value::ByVal(PrimVal::Bytes(0)), - Err(_) => { - let ptr = this.alloc_ptr_with_substs(dest_ty, substs)?; - let ptr = Pointer::from(PrimVal::Ptr(ptr)); - this.memory.write_repeat(ptr, 0, size)?; - Value::by_ref(ptr) + Value::ByVal(PrimVal::Undef) => { + match this.ty_to_primval_kind(dest_ty) { + Ok(_) => Value::ByVal(PrimVal::Bytes(0)), + Err(_) => { + let ptr = this.alloc_ptr_with_substs(dest_ty, substs)?; + let ptr = Pointer::from(PrimVal::Ptr(ptr)); + this.memory.write_repeat(ptr, 0, size)?; + Value::by_ref(ptr) + } } - }, + } Value::ByVal(_) => Value::ByVal(PrimVal::Bytes(0)), - Value::ByValPair(..) => - Value::ByValPair(PrimVal::Bytes(0), PrimVal::Bytes(0)), + Value::ByValPair(..) => { + Value::ByValPair(PrimVal::Bytes(0), PrimVal::Bytes(0)) + } }; Ok(zero_val) }; match dest { Lvalue::Local { frame, local } => self.modify_local(frame, local, init)?, - Lvalue::Ptr { ptr: PtrAndAlign { ptr, aligned: true }, extra: LvalueExtra::None } => self.memory.write_repeat(ptr, 0, size)?, - Lvalue::Ptr { .. } => bug!("init intrinsic tried to write to fat or unaligned ptr target"), + Lvalue::Ptr { + ptr: PtrAndAlign { ptr, aligned: true }, + extra: LvalueExtra::None, + } => self.memory.write_repeat(ptr, 0, size)?, + Lvalue::Ptr { .. } => { + bug!("init intrinsic tried to write to fat or unaligned ptr target") + } } } @@ -319,7 +368,11 @@ fn call_intrinsic( let ty = substs.type_at(0); let env = ty::ParamEnv::empty(Reveal::All); let needs_drop = ty.needs_drop(self.tcx, env); - self.write_primval(dest, PrimVal::from_bool(needs_drop), dest_ty)?; + self.write_primval( + dest, + PrimVal::from_bool(needs_drop), + dest_ty, + )?; } "offset" => { @@ -330,72 +383,124 @@ fn call_intrinsic( } "overflowing_sub" => { - self.intrinsic_overflowing(mir::BinOp::Sub, &args[0], &args[1], dest, dest_ty)?; + self.intrinsic_overflowing( + mir::BinOp::Sub, + &args[0], + &args[1], + dest, + dest_ty, + )?; } "overflowing_mul" => { - self.intrinsic_overflowing(mir::BinOp::Mul, &args[0], &args[1], dest, dest_ty)?; + self.intrinsic_overflowing( + mir::BinOp::Mul, + &args[0], + &args[1], + dest, + dest_ty, + )?; } "overflowing_add" => { - self.intrinsic_overflowing(mir::BinOp::Add, &args[0], &args[1], dest, dest_ty)?; + self.intrinsic_overflowing( + mir::BinOp::Add, + &args[0], + &args[1], + dest, + dest_ty, + )?; } "powf32" => { let f = self.value_to_primval(arg_vals[0], f32)?.to_f32()?; let f2 = self.value_to_primval(arg_vals[1], f32)?.to_f32()?; - self.write_primval(dest, PrimVal::from_f32(f.powf(f2)), dest_ty)?; + self.write_primval( + dest, + PrimVal::from_f32(f.powf(f2)), + dest_ty, + )?; } "powf64" => { let f = self.value_to_primval(arg_vals[0], f64)?.to_f64()?; let f2 = self.value_to_primval(arg_vals[1], f64)?.to_f64()?; - self.write_primval(dest, PrimVal::from_f64(f.powf(f2)), dest_ty)?; + self.write_primval( + dest, + PrimVal::from_f64(f.powf(f2)), + dest_ty, + )?; } "fmaf32" => { let a = self.value_to_primval(arg_vals[0], f32)?.to_f32()?; let b = self.value_to_primval(arg_vals[1], f32)?.to_f32()?; let c = self.value_to_primval(arg_vals[2], f32)?.to_f32()?; - self.write_primval(dest, PrimVal::from_f32(a * b + c), dest_ty)?; + self.write_primval( + dest, + PrimVal::from_f32(a * b + c), + dest_ty, + )?; } "fmaf64" => { let a = self.value_to_primval(arg_vals[0], f64)?.to_f64()?; let b = self.value_to_primval(arg_vals[1], f64)?.to_f64()?; let c = self.value_to_primval(arg_vals[2], f64)?.to_f64()?; - self.write_primval(dest, PrimVal::from_f64(a * b + c), dest_ty)?; + self.write_primval( + dest, + PrimVal::from_f64(a * b + c), + dest_ty, + )?; } "powif32" => { let f = self.value_to_primval(arg_vals[0], f32)?.to_f32()?; let i = self.value_to_primval(arg_vals[1], i32)?.to_i128()?; - self.write_primval(dest, PrimVal::from_f32(f.powi(i as i32)), dest_ty)?; + self.write_primval( + dest, + PrimVal::from_f32(f.powi(i as i32)), + dest_ty, + )?; } "powif64" => { let f = self.value_to_primval(arg_vals[0], f64)?.to_f64()?; let i = self.value_to_primval(arg_vals[1], i32)?.to_i128()?; - self.write_primval(dest, PrimVal::from_f64(f.powi(i as i32)), dest_ty)?; + self.write_primval( + dest, + PrimVal::from_f64(f.powi(i as i32)), + dest_ty, + )?; } "size_of" => { let ty = substs.type_at(0); - let size = self.type_size(ty)?.expect("size_of intrinsic called on unsized value") as u128; + let size = self.type_size(ty)?.expect( + "size_of intrinsic called on unsized value", + ) as u128; self.write_primval(dest, PrimVal::from_u128(size), dest_ty)?; } "size_of_val" => { let ty = substs.type_at(0); let (size, _) = self.size_and_align_of_dst(ty, arg_vals[0])?; - self.write_primval(dest, PrimVal::from_u128(size as u128), dest_ty)?; + self.write_primval( + dest, + PrimVal::from_u128(size as u128), + dest_ty, + )?; } "min_align_of_val" | "align_of_val" => { let ty = substs.type_at(0); let (_, align) = self.size_and_align_of_dst(ty, arg_vals[0])?; - self.write_primval(dest, PrimVal::from_u128(align as u128), dest_ty)?; + self.write_primval( + dest, + PrimVal::from_u128(align as u128), + dest_ty, + )?; } "type_name" => { @@ -413,61 +518,103 @@ fn call_intrinsic( "transmute" => { let src_ty = substs.type_at(0); let ptr = self.force_allocation(dest)?.to_ptr()?; - self.write_maybe_aligned_mut(/*aligned*/false, |ectx| { - ectx.write_value_to_ptr(arg_vals[0], ptr.into(), src_ty) - })?; + self.write_maybe_aligned_mut( + /*aligned*/ + false, + |ectx| { + ectx.write_value_to_ptr(arg_vals[0], ptr.into(), src_ty) + }, + )?; } "unchecked_shl" => { - let bits = self.type_size(dest_ty)?.expect("intrinsic can't be called on unsized type") as u128 * 8; - let rhs = self.value_to_primval(arg_vals[1], substs.type_at(0))?.to_bytes()?; + let bits = self.type_size(dest_ty)?.expect( + "intrinsic can't be called on unsized type", + ) as u128 * 8; + let rhs = self.value_to_primval(arg_vals[1], substs.type_at(0))? + .to_bytes()?; if rhs >= bits { - return err!(Intrinsic(format!("Overflowing shift by {} in unchecked_shl", rhs))); + return err!(Intrinsic( + format!("Overflowing shift by {} in unchecked_shl", rhs), + )); } - self.intrinsic_overflowing(mir::BinOp::Shl, &args[0], &args[1], dest, dest_ty)?; + self.intrinsic_overflowing( + mir::BinOp::Shl, + &args[0], + &args[1], + dest, + dest_ty, + )?; } "unchecked_shr" => { - let bits = self.type_size(dest_ty)?.expect("intrinsic can't be called on unsized type") as u128 * 8; - let rhs = self.value_to_primval(arg_vals[1], substs.type_at(0))?.to_bytes()?; + let bits = self.type_size(dest_ty)?.expect( + "intrinsic can't be called on unsized type", + ) as u128 * 8; + let rhs = self.value_to_primval(arg_vals[1], substs.type_at(0))? + .to_bytes()?; if rhs >= bits { - return err!(Intrinsic(format!("Overflowing shift by {} in unchecked_shr", rhs))); + return err!(Intrinsic( + format!("Overflowing shift by {} in unchecked_shr", rhs), + )); } - self.intrinsic_overflowing(mir::BinOp::Shr, &args[0], &args[1], dest, dest_ty)?; + self.intrinsic_overflowing( + mir::BinOp::Shr, + &args[0], + &args[1], + dest, + dest_ty, + )?; } "unchecked_div" => { - let rhs = self.value_to_primval(arg_vals[1], substs.type_at(0))?.to_bytes()?; + let rhs = self.value_to_primval(arg_vals[1], substs.type_at(0))? + .to_bytes()?; if rhs == 0 { return err!(Intrinsic(format!("Division by 0 in unchecked_div"))); } - self.intrinsic_overflowing(mir::BinOp::Div, &args[0], &args[1], dest, dest_ty)?; + self.intrinsic_overflowing( + mir::BinOp::Div, + &args[0], + &args[1], + dest, + dest_ty, + )?; } "unchecked_rem" => { - let rhs = self.value_to_primval(arg_vals[1], substs.type_at(0))?.to_bytes()?; + let rhs = self.value_to_primval(arg_vals[1], substs.type_at(0))? + .to_bytes()?; if rhs == 0 { return err!(Intrinsic(format!("Division by 0 in unchecked_rem"))); } - self.intrinsic_overflowing(mir::BinOp::Rem, &args[0], &args[1], dest, dest_ty)?; + self.intrinsic_overflowing( + mir::BinOp::Rem, + &args[0], + &args[1], + dest, + dest_ty, + )?; } "uninit" => { let size = dest_layout.size(&self.tcx.data_layout).bytes(); - let uninit = |this: &mut Self, val: Value| { - match val { - Value::ByRef(PtrAndAlign { ptr, .. }) => { - this.memory.mark_definedness(ptr, size, false)?; - Ok(val) - }, - _ => Ok(Value::ByVal(PrimVal::Undef)), + let uninit = |this: &mut Self, val: Value| match val { + Value::ByRef(PtrAndAlign { ptr, .. }) => { + this.memory.mark_definedness(ptr, size, false)?; + Ok(val) } + _ => Ok(Value::ByVal(PrimVal::Undef)), }; match dest { Lvalue::Local { frame, local } => self.modify_local(frame, local, uninit)?, - Lvalue::Ptr { ptr: PtrAndAlign { ptr, aligned: true }, extra: LvalueExtra::None } => - self.memory.mark_definedness(ptr, size, false)?, - Lvalue::Ptr { .. } => bug!("uninit intrinsic tried to write to fat or unaligned ptr target"), + Lvalue::Ptr { + ptr: PtrAndAlign { ptr, aligned: true }, + extra: LvalueExtra::None, + } => self.memory.mark_definedness(ptr, size, false)?, + Lvalue::Ptr { .. } => { + bug!("uninit intrinsic tried to write to fat or unaligned ptr target") + } } } @@ -476,7 +623,9 @@ fn call_intrinsic( let ty = substs.type_at(0); let ty_align = self.type_align(ty)?; let val_byte = self.value_to_primval(arg_vals[1], u8)?.to_u128()? as u8; - let size = self.type_size(ty)?.expect("write_bytes() type must be sized"); + let size = self.type_size(ty)?.expect( + "write_bytes() type must be sized", + ); let ptr = arg_vals[0].into_ptr(&self.memory)?; let count = self.value_to_primval(arg_vals[2], usize)?.to_u64()?; if count > 0 { @@ -502,7 +651,7 @@ fn call_intrinsic( fn numeric_intrinsic<'tcx>( name: &str, bytes: u128, - kind: PrimValKind + kind: PrimValKind, ) -> EvalResult<'tcx, PrimVal> { macro_rules! integer_intrinsic { ($method:ident) => ({ @@ -527,10 +676,10 @@ macro_rules! integer_intrinsic { let result_val = match name { "bswap" => integer_intrinsic!(swap_bytes), - "ctlz" => integer_intrinsic!(leading_zeros), + "ctlz" => integer_intrinsic!(leading_zeros), "ctpop" => integer_intrinsic!(count_ones), - "cttz" => integer_intrinsic!(trailing_zeros), - _ => bug!("not a numeric intrinsic: {}", name), + "cttz" => integer_intrinsic!(trailing_zeros), + _ => bug!("not a numeric intrinsic: {}", name), }; Ok(result_val) diff --git a/miri/lib.rs b/miri/lib.rs index c93b938e9bd..a26fbd5d3fc 100644 --- a/miri/lib.rs +++ b/miri/lib.rs @@ -20,10 +20,7 @@ use syntax::codemap::Span; -use std::collections::{ - HashMap, - BTreeMap, -}; +use std::collections::{HashMap, BTreeMap}; #[macro_use] extern crate rustc_miri; @@ -57,7 +54,10 @@ fn run_main<'a, 'tcx: 'a>( let mut cleanup_ptr = None; // Pointer to be deallocated when we are done if !main_mir.return_ty.is_nil() || main_mir.arg_count != 0 { - return err!(Unimplemented("miri does not support main functions without `fn()` type signatures".to_owned())); + return err!(Unimplemented( + "miri does not support main functions without `fn()` type signatures" + .to_owned(), + )); } if let Some(start_id) = start_wrapper { @@ -65,7 +65,10 @@ fn run_main<'a, 'tcx: 'a>( let start_mir = ecx.load_mir(start_instance.def)?; if start_mir.arg_count != 3 { - return err!(AbiViolation(format!("'start' lang item should have three arguments, but has {}", start_mir.arg_count))); + return err!(AbiViolation(format!( + "'start' lang item should have three arguments, but has {}", + start_mir.arg_count + ))); } // Return value @@ -90,7 +93,11 @@ fn run_main<'a, 'tcx: 'a>( let dest = ecx.eval_lvalue(&mir::Lvalue::Local(args.next().unwrap()))?; let main_ty = main_instance.def.def_ty(ecx.tcx); let main_ptr_ty = ecx.tcx.mk_fn_ptr(main_ty.fn_sig(ecx.tcx)); - ecx.write_value(Value::ByVal(PrimVal::Ptr(main_ptr)), dest, main_ptr_ty)?; + ecx.write_value( + Value::ByVal(PrimVal::Ptr(main_ptr)), + dest, + main_ptr_ty, + )?; // Second argument (argc): 0 let dest = ecx.eval_lvalue(&mir::Lvalue::Local(args.next().unwrap()))?; @@ -114,7 +121,11 @@ fn run_main<'a, 'tcx: 'a>( while ecx.step()? {} ecx.run_tls_dtors()?; if let Some(cleanup_ptr) = cleanup_ptr { - ecx.memory_mut().deallocate(cleanup_ptr, None, MemoryKind::Stack)?; + ecx.memory_mut().deallocate( + cleanup_ptr, + None, + MemoryKind::Stack, + )?; } Ok(()) } diff --git a/miri/operator.rs b/miri/operator.rs index b6ab72c5dd0..6d68aadf96c 100644 --- a/miri/operator.rs +++ b/miri/operator.rs @@ -37,20 +37,28 @@ fn ptr_op( use rustc::mir::BinOp::*; let usize = PrimValKind::from_uint_size(self.memory.pointer_size()); let isize = PrimValKind::from_int_size(self.memory.pointer_size()); - let left_kind = self.ty_to_primval_kind(left_ty)?; + let left_kind = self.ty_to_primval_kind(left_ty)?; let right_kind = self.ty_to_primval_kind(right_ty)?; match bin_op { Offset if left_kind == Ptr && right_kind == usize => { - let pointee_ty = left_ty.builtin_deref(true, ty::LvaluePreference::NoPreference).expect("Offset called on non-ptr type").ty; - let ptr = self.pointer_offset(left.into(), pointee_ty, right.to_bytes()? as i64)?; + let pointee_ty = left_ty + .builtin_deref(true, ty::LvaluePreference::NoPreference) + .expect("Offset called on non-ptr type") + .ty; + let ptr = self.pointer_offset( + left.into(), + pointee_ty, + right.to_bytes()? as i64, + )?; Ok(Some((ptr.into_inner_primval(), false))) - }, + } // These work on anything Eq if left_kind == right_kind => { let result = match (left, right) { (PrimVal::Bytes(left), PrimVal::Bytes(right)) => left == right, (PrimVal::Ptr(left), PrimVal::Ptr(right)) => left == right, - (PrimVal::Undef, _) | (_, PrimVal::Undef) => return err!(ReadUndefBytes), + (PrimVal::Undef, _) | + (_, PrimVal::Undef) => return err!(ReadUndefBytes), _ => false, }; Ok(Some((PrimVal::from_bool(result), false))) @@ -59,16 +67,17 @@ fn ptr_op( let result = match (left, right) { (PrimVal::Bytes(left), PrimVal::Bytes(right)) => left != right, (PrimVal::Ptr(left), PrimVal::Ptr(right)) => left != right, - (PrimVal::Undef, _) | (_, PrimVal::Undef) => return err!(ReadUndefBytes), + (PrimVal::Undef, _) | + (_, PrimVal::Undef) => return err!(ReadUndefBytes), _ => true, }; Ok(Some((PrimVal::from_bool(result), false))) } // These need both pointers to be in the same allocation Lt | Le | Gt | Ge | Sub - if left_kind == right_kind - && (left_kind == Ptr || left_kind == usize || left_kind == isize) - && left.is_ptr() && right.is_ptr() => { + if left_kind == right_kind && + (left_kind == Ptr || left_kind == usize || left_kind == isize) && + left.is_ptr() && right.is_ptr() => { let left = left.to_ptr()?; let right = right.to_ptr()?; if left.alloc_id == right.alloc_id { @@ -77,13 +86,15 @@ fn ptr_op( Le => left.offset <= right.offset, Gt => left.offset > right.offset, Ge => left.offset >= right.offset, - Sub => return self.binary_op( - Sub, - PrimVal::Bytes(left.offset as u128), - self.tcx.types.usize, - PrimVal::Bytes(right.offset as u128), - self.tcx.types.usize, - ).map(Some), + Sub => { + return self.binary_op( + Sub, + PrimVal::Bytes(left.offset as u128), + self.tcx.types.usize, + PrimVal::Bytes(right.offset as u128), + self.tcx.types.usize, + ).map(Some) + } _ => bug!("We already established it has to be one of these operators."), }; Ok(Some((PrimVal::from_bool(res), false))) @@ -94,18 +105,28 @@ fn ptr_op( } // These work if one operand is a pointer, the other an integer Add | BitAnd | Sub - if left_kind == right_kind && (left_kind == usize || left_kind == isize) - && left.is_ptr() && right.is_bytes() => { + if left_kind == right_kind && (left_kind == usize || left_kind == isize) && + left.is_ptr() && right.is_bytes() => { // Cast to i128 is fine as we checked the kind to be ptr-sized - self.ptr_int_arithmetic(bin_op, left.to_ptr()?, right.to_bytes()? as i128, left_kind == isize).map(Some) + self.ptr_int_arithmetic( + bin_op, + left.to_ptr()?, + right.to_bytes()? as i128, + left_kind == isize, + ).map(Some) } Add | BitAnd - if left_kind == right_kind && (left_kind == usize || left_kind == isize) - && left.is_bytes() && right.is_ptr() => { + if left_kind == right_kind && (left_kind == usize || left_kind == isize) && + left.is_bytes() && right.is_ptr() => { // This is a commutative operation, just swap the operands - self.ptr_int_arithmetic(bin_op, right.to_ptr()?, left.to_bytes()? as i128, left_kind == isize).map(Some) + self.ptr_int_arithmetic( + bin_op, + right.to_ptr()?, + left.to_bytes()? as i128, + left_kind == isize, + ).map(Some) } - _ => Ok(None) + _ => Ok(None), } } @@ -118,7 +139,7 @@ fn ptr_int_arithmetic( ) -> EvalResult<'tcx, (PrimVal, bool)> { use rustc::mir::BinOp::*; - fn map_to_primval((res, over) : (MemoryPointer, bool)) -> (PrimVal, bool) { + fn map_to_primval((res, over): (MemoryPointer, bool)) -> (PrimVal, bool) { (PrimVal::Ptr(res), over) } diff --git a/miri/tls.rs b/miri/tls.rs index 6900535dfb8..e592478f6f9 100644 --- a/miri/tls.rs +++ b/miri/tls.rs @@ -1,21 +1,17 @@ use rustc::{ty, mir}; -use super::{ - TlsKey, TlsEntry, - EvalResult, EvalErrorKind, - Pointer, - Memory, - Evaluator, - Lvalue, - StackPopCleanup, EvalContext, -}; +use super::{TlsKey, TlsEntry, EvalResult, EvalErrorKind, Pointer, Memory, Evaluator, Lvalue, + StackPopCleanup, EvalContext}; pub trait MemoryExt<'tcx> { fn create_tls_key(&mut self, dtor: Option>) -> TlsKey; fn delete_tls_key(&mut self, key: TlsKey) -> EvalResult<'tcx>; fn load_tls(&mut self, key: TlsKey) -> EvalResult<'tcx, Pointer>; fn store_tls(&mut self, key: TlsKey, new_data: Pointer) -> EvalResult<'tcx>; - fn fetch_tls_dtor(&mut self, key: Option) -> EvalResult<'tcx, Option<(ty::Instance<'tcx>, Pointer, TlsKey)>>; + fn fetch_tls_dtor( + &mut self, + key: Option, + ) -> EvalResult<'tcx, Option<(ty::Instance<'tcx>, Pointer, TlsKey)>>; } pub trait EvalContextExt<'tcx> { @@ -26,7 +22,13 @@ impl<'a, 'tcx: 'a> MemoryExt<'tcx> for Memory<'a, 'tcx, Evaluator> { fn create_tls_key(&mut self, dtor: Option>) -> TlsKey { let new_key = self.data.next_thread_local; self.data.next_thread_local += 1; - self.data.thread_local.insert(new_key, TlsEntry { data: Pointer::null(), dtor }); + self.data.thread_local.insert( + new_key, + TlsEntry { + data: Pointer::null(), + dtor, + }, + ); trace!("New TLS key allocated: {} with dtor {:?}", new_key, dtor); return new_key; } @@ -36,9 +38,9 @@ fn delete_tls_key(&mut self, key: TlsKey) -> EvalResult<'tcx> { Some(_) => { trace!("TLS key {} removed", key); Ok(()) - }, - None => err!(TlsOutOfBounds) - } + } + None => err!(TlsOutOfBounds), + }; } fn load_tls(&mut self, key: TlsKey) -> EvalResult<'tcx, Pointer> { @@ -46,9 +48,9 @@ fn load_tls(&mut self, key: TlsKey) -> EvalResult<'tcx, Pointer> { Some(&TlsEntry { data, .. }) => { trace!("TLS key {} loaded: {:?}", key, data); Ok(data) - }, - None => err!(TlsOutOfBounds) - } + } + None => err!(TlsOutOfBounds), + }; } fn store_tls(&mut self, key: TlsKey, new_data: Pointer) -> EvalResult<'tcx> { @@ -57,11 +59,11 @@ fn store_tls(&mut self, key: TlsKey, new_data: Pointer) -> EvalResult<'tcx> { trace!("TLS key {} stored: {:?}", key, new_data); *data = new_data; Ok(()) - }, - None => err!(TlsOutOfBounds) - } + } + None => err!(TlsOutOfBounds), + }; } - + /// Returns a dtor, its argument and its index, if one is supposed to run /// /// An optional destructor function may be associated with each key value. @@ -80,13 +82,18 @@ fn store_tls(&mut self, key: TlsKey, new_data: Pointer) -> EvalResult<'tcx> { /// with associated destructors, implementations may stop calling destructors, /// or they may continue calling destructors until no non-NULL values with /// associated destructors exist, even though this might result in an infinite loop. - fn fetch_tls_dtor(&mut self, key: Option) -> EvalResult<'tcx, Option<(ty::Instance<'tcx>, Pointer, TlsKey)>> { + fn fetch_tls_dtor( + &mut self, + key: Option, + ) -> EvalResult<'tcx, Option<(ty::Instance<'tcx>, Pointer, TlsKey)>> { use std::collections::Bound::*; let start = match key { Some(key) => Excluded(key), None => Unbounded, }; - for (&key, &mut TlsEntry { ref mut data, dtor }) in self.data.thread_local.range_mut((start, Unbounded)) { + for (&key, &mut TlsEntry { ref mut data, dtor }) in + self.data.thread_local.range_mut((start, Unbounded)) + { if !data.is_null()? { if let Some(dtor) = dtor { let ret = Some((dtor, *data, key)); @@ -115,7 +122,9 @@ fn run_tls_dtors(&mut self) -> EvalResult<'tcx> { Lvalue::undef(), StackPopCleanup::None, )?; - let arg_local = self.frame().mir.args_iter().next().ok_or(EvalErrorKind::AbiViolation("TLS dtor does not take enough arguments.".to_owned()))?; + let arg_local = self.frame().mir.args_iter().next().ok_or( + EvalErrorKind::AbiViolation("TLS dtor does not take enough arguments.".to_owned()), + )?; let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?; let ty = self.tcx.mk_mut_ptr(self.tcx.types.u8); self.write_ptr(dest, ptr, ty)?; diff --git a/src/librustc_mir/interpret/cast.rs b/src/librustc_mir/interpret/cast.rs index c3ddeca0e65..c6016509d23 100644 --- a/src/librustc_mir/interpret/cast.rs +++ b/src/librustc_mir/interpret/cast.rs @@ -1,20 +1,14 @@ use rustc::ty::{self, Ty}; use syntax::ast::{FloatTy, IntTy, UintTy}; -use super::{ - PrimVal, - EvalContext, - EvalResult, - MemoryPointer, PointerArithmetic, - Machine, -}; +use super::{PrimVal, EvalContext, EvalResult, MemoryPointer, PointerArithmetic, Machine}; impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> { pub(super) fn cast_primval( &self, val: PrimVal, src_ty: Ty<'tcx>, - dest_ty: Ty<'tcx> + dest_ty: Ty<'tcx>, ) -> EvalResult<'tcx, PrimVal> { let src_kind = self.ty_to_primval_kind(src_ty)?; @@ -29,11 +23,11 @@ pub(super) fn cast_primval( I8 | I16 | I32 | I64 | I128 => { self.cast_from_signed_int(val.to_i128()?, dest_ty) - }, + } Bool | Char | U8 | U16 | U32 | U64 | U128 | FnPtr | Ptr => { self.cast_from_int(val.to_u128()?, dest_ty, false) - }, + } } } } @@ -43,18 +37,22 @@ fn cast_from_signed_int(&self, val: i128, ty: ty::Ty<'tcx>) -> EvalResult<'tcx, self.cast_from_int(val as u128, ty, val < 0) } - fn cast_from_int(&self, v: u128, ty: ty::Ty<'tcx>, negative: bool) -> EvalResult<'tcx, PrimVal> { + fn cast_from_int( + &self, + v: u128, + ty: ty::Ty<'tcx>, + negative: bool, + ) -> EvalResult<'tcx, PrimVal> { use rustc::ty::TypeVariants::*; match ty.sty { // Casts to bool are not permitted by rustc, no need to handle them here. - - TyInt(IntTy::I8) => Ok(PrimVal::Bytes(v as i128 as i8 as u128)), + TyInt(IntTy::I8) => Ok(PrimVal::Bytes(v as i128 as i8 as u128)), TyInt(IntTy::I16) => Ok(PrimVal::Bytes(v as i128 as i16 as u128)), TyInt(IntTy::I32) => Ok(PrimVal::Bytes(v as i128 as i32 as u128)), TyInt(IntTy::I64) => Ok(PrimVal::Bytes(v as i128 as i64 as u128)), TyInt(IntTy::I128) => Ok(PrimVal::Bytes(v as u128)), - TyUint(UintTy::U8) => Ok(PrimVal::Bytes(v as u8 as u128)), + TyUint(UintTy::U8) => Ok(PrimVal::Bytes(v as u8 as u128)), TyUint(UintTy::U16) => Ok(PrimVal::Bytes(v as u16 as u128)), TyUint(UintTy::U32) => Ok(PrimVal::Bytes(v as u32 as u128)), TyUint(UintTy::U64) => Ok(PrimVal::Bytes(v as u64 as u128)), @@ -73,9 +71,9 @@ fn cast_from_int(&self, v: u128, ty: ty::Ty<'tcx>, negative: bool) -> EvalResult } TyFloat(FloatTy::F64) if negative => Ok(PrimVal::from_f64(v as i128 as f64)), - TyFloat(FloatTy::F64) => Ok(PrimVal::from_f64(v as f64)), + TyFloat(FloatTy::F64) => Ok(PrimVal::from_f64(v as f64)), TyFloat(FloatTy::F32) if negative => Ok(PrimVal::from_f32(v as i128 as f32)), - TyFloat(FloatTy::F32) => Ok(PrimVal::from_f32(v as f32)), + TyFloat(FloatTy::F32) => Ok(PrimVal::from_f32(v as f32)), TyChar if v as u8 as u128 == v => Ok(PrimVal::Bytes(v)), TyChar => err!(InvalidChar(v)), @@ -92,7 +90,7 @@ fn cast_from_float(&self, val: f64, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> { match ty.sty { // Casting negative floats to unsigned integers yields zero. TyUint(_) if val < 0.0 => self.cast_from_int(0, ty, false), - TyInt(_) if val < 0.0 => self.cast_from_int(val as i128 as u128, ty, true), + TyInt(_) if val < 0.0 => self.cast_from_int(val as i128 as u128, ty, true), TyInt(_) | ty::TyUint(_) => self.cast_from_int(val as u128, ty, false), @@ -106,8 +104,9 @@ fn cast_from_ptr(&self, ptr: MemoryPointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Pr use rustc::ty::TypeVariants::*; match ty.sty { // Casting to a reference or fn pointer is not permitted by rustc, no need to support it here. - TyRawPtr(_) | TyInt(IntTy::Is) | TyUint(UintTy::Us) => - Ok(PrimVal::Ptr(ptr)), + TyRawPtr(_) | + TyInt(IntTy::Is) | + TyUint(UintTy::Us) => Ok(PrimVal::Ptr(ptr)), TyInt(_) | TyUint(_) => err!(ReadPointerAsBytes), _ => err!(Unimplemented(format!("ptr to {:?} cast", ty))), } diff --git a/src/librustc_mir/interpret/const_eval.rs b/src/librustc_mir/interpret/const_eval.rs index f11734a588a..f66d3f65ff1 100644 --- a/src/librustc_mir/interpret/const_eval.rs +++ b/src/librustc_mir/interpret/const_eval.rs @@ -5,13 +5,8 @@ use syntax::ast::Mutability; use syntax::codemap::Span; -use super::{ - EvalResult, EvalError, EvalErrorKind, - GlobalId, Lvalue, Value, - PrimVal, - EvalContext, StackPopCleanup, PtrAndAlign, - MemoryKind, -}; +use super::{EvalResult, EvalError, EvalErrorKind, GlobalId, Lvalue, Value, PrimVal, EvalContext, + StackPopCleanup, PtrAndAlign, MemoryKind}; use rustc_const_math::ConstInt; @@ -24,22 +19,37 @@ pub fn eval_body_as_primval<'a, 'tcx>( ) -> EvalResult<'tcx, (PrimVal, Ty<'tcx>)> { let limits = super::ResourceLimits::default(); let mut ecx = EvalContext::::new(tcx, limits, (), ()); - let cid = GlobalId { instance, promoted: None }; + let cid = GlobalId { + instance, + promoted: None, + }; if ecx.tcx.has_attr(instance.def_id(), "linkage") { return Err(ConstEvalError::NotConst("extern global".to_string()).into()); } - + let mir = ecx.load_mir(instance.def)?; if !ecx.globals.contains_key(&cid) { - let size = ecx.type_size_with_substs(mir.return_ty, instance.substs)?.expect("unsized global"); + let size = ecx.type_size_with_substs(mir.return_ty, instance.substs)? + .expect("unsized global"); let align = ecx.type_align_with_substs(mir.return_ty, instance.substs)?; - let ptr = ecx.memory.allocate(size, align, MemoryKind::UninitializedStatic)?; + let ptr = ecx.memory.allocate( + size, + align, + MemoryKind::UninitializedStatic, + )?; let aligned = !ecx.is_packed(mir.return_ty)?; - ecx.globals.insert(cid, PtrAndAlign { ptr: ptr.into(), aligned }); + ecx.globals.insert( + cid, + PtrAndAlign { + ptr: ptr.into(), + aligned, + }, + ); let mutable = !mir.return_ty.is_freeze( - ecx.tcx, - ty::ParamEnv::empty(Reveal::All), - mir.span); + ecx.tcx, + ty::ParamEnv::empty(Reveal::All), + mir.span, + ); let mutability = if mutable { Mutability::Mutable } else { @@ -77,14 +87,26 @@ pub fn eval_body_as_integer<'a, 'tcx>( TyInt(IntTy::I32) => ConstInt::I32(prim as i128 as i32), TyInt(IntTy::I64) => ConstInt::I64(prim as i128 as i64), TyInt(IntTy::I128) => ConstInt::I128(prim as i128), - TyInt(IntTy::Is) => ConstInt::Isize(ConstIsize::new(prim as i128 as i64, tcx.sess.target.int_type).expect("miri should already have errored")), + TyInt(IntTy::Is) => ConstInt::Isize( + ConstIsize::new(prim as i128 as i64, tcx.sess.target.int_type) + .expect("miri should already have errored"), + ), TyUint(UintTy::U8) => ConstInt::U8(prim as u8), TyUint(UintTy::U16) => ConstInt::U16(prim as u16), TyUint(UintTy::U32) => ConstInt::U32(prim as u32), TyUint(UintTy::U64) => ConstInt::U64(prim as u64), TyUint(UintTy::U128) => ConstInt::U128(prim), - TyUint(UintTy::Us) => ConstInt::Usize(ConstUsize::new(prim as u64, tcx.sess.target.uint_type).expect("miri should already have errored")), - _ => return Err(ConstEvalError::NeedsRfc("evaluating anything other than isize/usize during typeck".to_string()).into()), + TyUint(UintTy::Us) => ConstInt::Usize( + ConstUsize::new(prim as u64, tcx.sess.target.uint_type) + .expect("miri should already have errored"), + ), + _ => { + return Err( + ConstEvalError::NeedsRfc( + "evaluating anything other than isize/usize during typeck".to_string(), + ).into(), + ) + } }) } @@ -106,10 +128,14 @@ impl fmt::Display for ConstEvalError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::ConstEvalError::*; match *self { - NeedsRfc(ref msg) => - write!(f, "\"{}\" needs an rfc before being allowed inside constants", msg), - NotConst(ref msg) => - write!(f, "Cannot evaluate within constants: \"{}\"", msg), + NeedsRfc(ref msg) => { + write!( + f, + "\"{}\" needs an rfc before being allowed inside constants", + msg + ) + } + NotConst(ref msg) => write!(f, "Cannot evaluate within constants: \"{}\"", msg), } } } @@ -118,10 +144,8 @@ impl Error for ConstEvalError { fn description(&self) -> &str { use self::ConstEvalError::*; match *self { - NeedsRfc(_) => - "this feature needs an rfc before being allowed inside constants", - NotConst(_) => - "this feature is not compatible with constant evaluation", + NeedsRfc(_) => "this feature needs an rfc before being allowed inside constants", + NotConst(_) => "this feature is not compatible with constant evaluation", } } @@ -143,14 +167,19 @@ fn eval_fn_call<'a>( _sig: ty::FnSig<'tcx>, ) -> EvalResult<'tcx, bool> { if !ecx.tcx.is_const_fn(instance.def_id()) { - return Err(ConstEvalError::NotConst(format!("calling non-const fn `{}`", instance)).into()); + return Err( + ConstEvalError::NotConst(format!("calling non-const fn `{}`", instance)).into(), + ); } let mir = match ecx.load_mir(instance.def) { Ok(mir) => mir, - Err(EvalError{ kind: EvalErrorKind::NoMirFor(path), ..} ) => { + Err(EvalError { kind: EvalErrorKind::NoMirFor(path), .. }) => { // some simple things like `malloc` might get accepted in the future - return Err(ConstEvalError::NeedsRfc(format!("calling extern function `{}`", path)).into()); - }, + return Err( + ConstEvalError::NeedsRfc(format!("calling extern function `{}`", path)) + .into(), + ); + } Err(other) => return Err(other), }; let (return_lvalue, return_to_block) = match destination { @@ -178,7 +207,9 @@ fn call_intrinsic<'a>( _dest_layout: &'tcx layout::Layout, _target: mir::BasicBlock, ) -> EvalResult<'tcx> { - Err(ConstEvalError::NeedsRfc("calling intrinsics".to_string()).into()) + Err( + ConstEvalError::NeedsRfc("calling intrinsics".to_string()).into(), + ) } fn try_ptr_op<'a>( @@ -192,7 +223,9 @@ fn try_ptr_op<'a>( if left.is_bytes() && right.is_bytes() { Ok(None) } else { - Err(ConstEvalError::NeedsRfc("Pointer arithmetic or comparison".to_string()).into()) + Err( + ConstEvalError::NeedsRfc("Pointer arithmetic or comparison".to_string()).into(), + ) } } @@ -204,6 +237,8 @@ fn box_alloc<'a>( _ecx: &mut EvalContext<'a, 'tcx, Self>, _ty: ty::Ty<'tcx>, ) -> EvalResult<'tcx, PrimVal> { - Err(ConstEvalError::NeedsRfc("Heap allocations via `box` keyword".to_string()).into()) + Err( + ConstEvalError::NeedsRfc("Heap allocations via `box` keyword".to_string()).into(), + ) } } diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index fd609d5fec1..a5f5072dc29 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -16,16 +16,9 @@ use syntax::ast::{self, Mutability}; use syntax::abi::Abi; -use super::{ - EvalError, EvalResult, EvalErrorKind, - GlobalId, Lvalue, LvalueExtra, - Memory, MemoryPointer, HasMemory, - MemoryKind, - operator, - PrimVal, PrimValKind, Value, Pointer, - ValidationQuery, - Machine, -}; +use super::{EvalError, EvalResult, EvalErrorKind, GlobalId, Lvalue, LvalueExtra, Memory, + MemoryPointer, HasMemory, MemoryKind, operator, PrimVal, PrimValKind, Value, Pointer, + ValidationQuery, Machine}; pub struct EvalContext<'a, 'tcx: 'a, M: Machine<'tcx>> { /// Stores data required by the `Machine` @@ -60,7 +53,6 @@ pub struct Frame<'tcx> { //////////////////////////////////////////////////////////////////////////////// // Function and callsite information //////////////////////////////////////////////////////////////////////////////// - /// The MIR for the function called on this frame. pub mir: &'tcx mir::Mir<'tcx>, @@ -73,7 +65,6 @@ pub struct Frame<'tcx> { //////////////////////////////////////////////////////////////////////////////// // Return lvalue and locals //////////////////////////////////////////////////////////////////////////////// - /// The block to return to when returning from the current stack frame pub return_to_block: StackPopCleanup, @@ -91,7 +82,6 @@ pub struct Frame<'tcx> { //////////////////////////////////////////////////////////////////////////////// // Current position within the function //////////////////////////////////////////////////////////////////////////////// - /// The block that is currently executed (or will be executed after the above call stacks /// return). pub block: mir::BasicBlock, @@ -189,9 +179,11 @@ pub fn alloc_ptr(&mut self, ty: Ty<'tcx>) -> EvalResult<'tcx, MemoryPointer> { pub fn alloc_ptr_with_substs( &mut self, ty: Ty<'tcx>, - substs: &'tcx Substs<'tcx> + substs: &'tcx Substs<'tcx>, ) -> EvalResult<'tcx, MemoryPointer> { - let size = self.type_size_with_substs(ty, substs)?.expect("cannot alloc memory for unsized type"); + let size = self.type_size_with_substs(ty, substs)?.expect( + "cannot alloc memory for unsized type", + ); let align = self.type_align_with_substs(ty, substs)?; self.memory.allocate(size, align, MemoryKind::Stack) } @@ -216,7 +208,10 @@ pub fn cur_frame(&self) -> usize { pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> { let ptr = self.memory.allocate_cached(s.as_bytes())?; - Ok(Value::ByValPair(PrimVal::Ptr(ptr), PrimVal::from_u128(s.len() as u128))) + Ok(Value::ByValPair( + PrimVal::Ptr(ptr), + PrimVal::from_u128(s.len() as u128), + )) } pub(super) fn const_to_value(&mut self, const_val: &ConstVal<'tcx>) -> EvalResult<'tcx, Value> { @@ -237,12 +232,12 @@ pub(super) fn const_to_value(&mut self, const_val: &ConstVal<'tcx>) -> EvalResul PrimVal::Ptr(ptr) } - Variant(_) => unimplemented!(), - Struct(_) => unimplemented!(), - Tuple(_) => unimplemented!(), + Variant(_) => unimplemented!(), + Struct(_) => unimplemented!(), + Tuple(_) => unimplemented!(), // function items are zero sized and thus have no readable value - Function(..) => PrimVal::Undef, - Array(_) => unimplemented!(), + Function(..) => PrimVal::Undef, + Array(_) => unimplemented!(), Repeat(_, _) => unimplemented!(), }; @@ -255,10 +250,17 @@ pub(super) fn type_is_sized(&self, ty: Ty<'tcx>) -> bool { ty.is_sized(self.tcx, ty::ParamEnv::empty(Reveal::All), DUMMY_SP) } - pub fn load_mir(&self, instance: ty::InstanceDef<'tcx>) -> EvalResult<'tcx, &'tcx mir::Mir<'tcx>> { + pub fn load_mir( + &self, + instance: ty::InstanceDef<'tcx>, + ) -> EvalResult<'tcx, &'tcx mir::Mir<'tcx>> { trace!("load mir {:?}", instance); match instance { - ty::InstanceDef::Item(def_id) => self.tcx.maybe_optimized_mir(def_id).ok_or_else(|| EvalErrorKind::NoMirFor(self.tcx.item_path_str(def_id)).into()), + ty::InstanceDef::Item(def_id) => { + self.tcx.maybe_optimized_mir(def_id).ok_or_else(|| { + EvalErrorKind::NoMirFor(self.tcx.item_path_str(def_id)).into() + }) + } _ => Ok(self.tcx.instance_mir(instance)), } } @@ -272,7 +274,8 @@ pub fn monomorphize(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> } pub fn erase_lifetimes(&self, value: &Binder) -> T - where T : TypeFoldable<'tcx> + where + T: TypeFoldable<'tcx>, { let value = self.tcx.erase_late_bound_regions(value); self.tcx.erase_regions(&value) @@ -301,15 +304,25 @@ pub fn size_and_align_of_dst( let (sized_size, sized_align) = match *layout { ty::layout::Layout::Univariant { ref variant, .. } => { - (variant.offsets.last().map_or(0, |o| o.bytes()), variant.align) + ( + variant.offsets.last().map_or(0, |o| o.bytes()), + variant.align, + ) } _ => { - bug!("size_and_align_of_dst: expcted Univariant for `{}`, found {:#?}", - ty, layout); + bug!( + "size_and_align_of_dst: expcted Univariant for `{}`, found {:#?}", + ty, + layout + ); } }; - debug!("DST {} statically sized prefix size: {} align: {:?}", - ty, sized_size, sized_align); + debug!( + "DST {} statically sized prefix size: {} align: {:?}", + ty, + sized_size, + sized_align + ); // Recurse to get the size of the dynamically sized field (must be // the last field). @@ -339,7 +352,8 @@ pub fn size_and_align_of_dst( // Choose max of two known alignments (combined value must // be aligned according to more restrictive of the two). - let align = sized_align.max(Align::from_bytes(unsized_align, unsized_align).unwrap()); + let align = + sized_align.max(Align::from_bytes(unsized_align, unsized_align).unwrap()); // Issue #27023: must add any necessary padding to `size` // (to make it a multiple of `align`) before returning it. @@ -363,7 +377,9 @@ pub fn size_and_align_of_dst( ty::TySlice(_) | ty::TyStr => { let elem_ty = ty.sequence_element_type(self.tcx); - let elem_size = self.type_size(elem_ty)?.expect("slice element must be sized") as u64; + let elem_size = self.type_size(elem_ty)?.expect( + "slice element must be sized", + ) as u64; let (_, len) = value.into_slice(&mut self.memory)?; let align = self.type_align(elem_ty)?; Ok((len * elem_size, align as u64)) @@ -375,12 +391,10 @@ pub fn size_and_align_of_dst( } /// Returns the normalized type of a struct field - fn field_ty( - &self, - param_substs: &Substs<'tcx>, - f: &ty::FieldDef, - ) -> ty::Ty<'tcx> { - self.tcx.normalize_associated_type(&f.ty(self.tcx, param_substs)) + fn field_ty(&self, param_substs: &Substs<'tcx>, f: &ty::FieldDef) -> ty::Ty<'tcx> { + self.tcx.normalize_associated_type( + &f.ty(self.tcx, param_substs), + ) } pub fn type_size(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, Option> { @@ -404,19 +418,30 @@ pub fn type_size_with_substs( } } - pub fn type_align_with_substs(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> EvalResult<'tcx, u64> { - self.type_layout_with_substs(ty, substs).map(|layout| layout.align(&self.tcx.data_layout).abi()) + pub fn type_align_with_substs( + &self, + ty: Ty<'tcx>, + substs: &'tcx Substs<'tcx>, + ) -> EvalResult<'tcx, u64> { + self.type_layout_with_substs(ty, substs).map(|layout| { + layout.align(&self.tcx.data_layout).abi() + }) } pub fn type_layout(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, &'tcx Layout> { self.type_layout_with_substs(ty, self.substs()) } - fn type_layout_with_substs(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> EvalResult<'tcx, &'tcx Layout> { + fn type_layout_with_substs( + &self, + ty: Ty<'tcx>, + substs: &'tcx Substs<'tcx>, + ) -> EvalResult<'tcx, &'tcx Layout> { // TODO(solson): Is this inefficient? Needs investigation. let ty = self.monomorphize(ty, substs); - ty.layout(self.tcx, ty::ParamEnv::empty(Reveal::All)).map_err(|layout| EvalErrorKind::Layout(layout).into()) + ty.layout(self.tcx, ty::ParamEnv::empty(Reveal::All)) + .map_err(|layout| EvalErrorKind::Layout(layout).into()) } pub fn push_stack_frame( @@ -437,13 +462,14 @@ fn collect_storage_annotations<'tcx>(mir: &'tcx mir::Mir<'tcx>) -> HashSet { + StorageLive(mir::Lvalue::Local(local)) | + StorageDead(mir::Lvalue::Local(local)) => { set.insert(local); } _ => {} } } - }; + } set } @@ -453,7 +479,7 @@ fn collect_storage_annotations<'tcx>(mir: &'tcx mir::Mir<'tcx>) -> HashSet(mir: &'tcx mir::Mir<'tcx>) -> HashSet EvalResult<'tcx> { ::log_settings::settings().indentation -= 1; self.memory.locks_lifetime_ended(None); - let frame = self.stack.pop().expect("tried to pop a stack frame, but there were none"); + let frame = self.stack.pop().expect( + "tried to pop a stack frame, but there were none", + ); if !self.stack.is_empty() { // TODO: IS this the correct time to start considering these accesses as originating from the returned-to stack frame? let cur_frame = self.cur_frame(); self.memory.set_cur_frame(cur_frame); } match frame.return_to_block { - StackPopCleanup::MarkStatic(mutable) => if let Lvalue::Ptr{ ptr, .. } = frame.return_lvalue { - // FIXME: to_ptr()? might be too extreme here, static zsts might reach this under certain conditions - self.memory.mark_static_initalized(ptr.to_ptr()?.alloc_id, mutable)? - } else { - bug!("StackPopCleanup::MarkStatic on: {:?}", frame.return_lvalue); - }, + StackPopCleanup::MarkStatic(mutable) => { + if let Lvalue::Ptr { ptr, .. } = frame.return_lvalue { + // FIXME: to_ptr()? might be too extreme here, static zsts might reach this under certain conditions + self.memory.mark_static_initalized( + ptr.to_ptr()?.alloc_id, + mutable, + )? + } else { + bug!("StackPopCleanup::MarkStatic on: {:?}", frame.return_lvalue); + } + } StackPopCleanup::Goto(target) => self.goto_block(target), - StackPopCleanup::None => {}, + StackPopCleanup::None => {} } // deallocate all locals that are backed by an allocation for local in frame.locals { @@ -597,7 +630,14 @@ pub(super) fn eval_rvalue_into_lvalue( } BinaryOp(bin_op, ref left, ref right) => { - if self.intrinsic_overflowing(bin_op, left, right, dest, dest_ty)? { + if self.intrinsic_overflowing( + bin_op, + left, + right, + dest, + dest_ty, + )? + { // There was an overflow in an unchecked binop. Right now, we consider this an error and bail out. // The rationale is that the reason rustc emits unchecked binops in release mode (vs. the checked binops // it emits in debug mode) is performance, but it doesn't cost us any performance in miri. @@ -608,13 +648,23 @@ pub(super) fn eval_rvalue_into_lvalue( } CheckedBinaryOp(bin_op, ref left, ref right) => { - self.intrinsic_with_overflow(bin_op, left, right, dest, dest_ty)?; + self.intrinsic_with_overflow( + bin_op, + left, + right, + dest, + dest_ty, + )?; } UnaryOp(un_op, ref operand) => { let val = self.eval_operand_to_primval(operand)?; let kind = self.ty_to_primval_kind(dest_ty)?; - self.write_primval(dest, operator::unary_op(un_op, val, kind)?, dest_ty)?; + self.write_primval( + dest, + operator::unary_op(un_op, val, kind)?, + dest_ty, + )?; } // Skip everything for zsts @@ -634,9 +684,14 @@ pub(super) fn eval_rvalue_into_lvalue( self.assign_fields(dest, dest_ty, operands)?; } - General { discr, ref variants, .. } => { + General { + discr, + ref variants, + .. + } => { if let mir::AggregateKind::Adt(adt_def, variant, _, _) = **kind { - let discr_val = adt_def.discriminants(self.tcx) + let discr_val = adt_def + .discriminants(self.tcx) .nth(variant) .expect("broken mir: Adt variant id invalid") .to_u128_unchecked(); @@ -677,7 +732,12 @@ pub(super) fn eval_rvalue_into_lvalue( } } - StructWrappedNullablePointer { nndiscr, ref discrfield_source, ref nonnull, .. } => { + StructWrappedNullablePointer { + nndiscr, + ref discrfield_source, + ref nonnull, + .. + } => { if let mir::AggregateKind::Adt(_, variant, _, _) = **kind { if nndiscr == variant as u64 { self.write_maybe_aligned_mut(!nonnull.packed, |ecx| { @@ -688,18 +748,25 @@ pub(super) fn eval_rvalue_into_lvalue( let operand_ty = self.operand_ty(operand); assert_eq!(self.type_size(operand_ty)?, Some(0)); } - let (offset, TyAndPacked { ty, packed: _}) = self.nonnull_offset_and_ty(dest_ty, nndiscr, discrfield_source)?; + let (offset, TyAndPacked { ty, packed: _ }) = + self.nonnull_offset_and_ty( + dest_ty, + nndiscr, + discrfield_source, + )?; // TODO: The packed flag is ignored // FIXME(solson) let dest = self.force_allocation(dest)?.to_ptr()?; let dest = dest.offset(offset.bytes(), &self)?; - let dest_size = self.type_size(ty)? - .expect("bad StructWrappedNullablePointer discrfield"); - self.memory.write_maybe_aligned_mut(!nonnull.packed, |mem| { - mem.write_int(dest, 0, dest_size) - })?; + let dest_size = self.type_size(ty)?.expect( + "bad StructWrappedNullablePointer discrfield", + ); + self.memory.write_maybe_aligned_mut( + !nonnull.packed, + |mem| mem.write_int(dest, 0, dest_size), + )?; } } else { bug!("tried to assign {:?} to Layout::RawNullablePointer", kind); @@ -709,7 +776,8 @@ pub(super) fn eval_rvalue_into_lvalue( CEnum { .. } => { assert_eq!(operands.len(), 0); if let mir::AggregateKind::Adt(adt_def, variant, _, _) = **kind { - let n = adt_def.discriminants(self.tcx) + let n = adt_def + .discriminants(self.tcx) .nth(variant) .expect("broken mir: Adt variant index invalid") .to_u128_unchecked(); @@ -747,11 +815,17 @@ pub(super) fn eval_rvalue_into_lvalue( Repeat(ref operand, _) => { let (elem_ty, length) = match dest_ty.sty { ty::TyArray(elem_ty, n) => (elem_ty, n as u64), - _ => bug!("tried to assign array-repeat to non-array type {:?}", dest_ty), + _ => { + bug!( + "tried to assign array-repeat to non-array type {:?}", + dest_ty + ) + } }; self.inc_step_counter_and_check_limit(length)?; - let elem_size = self.type_size(elem_ty)? - .expect("repeat element type must be sized"); + let elem_size = self.type_size(elem_ty)?.expect( + "repeat element type must be sized", + ); let value = self.eval_operand(operand)?; // FIXME(solson) @@ -768,7 +842,11 @@ pub(super) fn eval_rvalue_into_lvalue( let src = self.eval_lvalue(lvalue)?; let ty = self.lvalue_ty(lvalue); let (_, len) = src.elem_ty_and_len(ty); - self.write_primval(dest, PrimVal::from_u128(len as u128), dest_ty)?; + self.write_primval( + dest, + PrimVal::from_u128(len as u128), + dest_ty, + )?; } Ref(_, _, ref lvalue) => { @@ -781,8 +859,9 @@ pub(super) fn eval_rvalue_into_lvalue( LvalueExtra::None => ptr.ptr.to_value(), LvalueExtra::Length(len) => ptr.ptr.to_value_with_len(len), LvalueExtra::Vtable(vtable) => ptr.ptr.to_value_with_vtable(vtable), - LvalueExtra::DowncastVariant(..) => - bug!("attempted to take a reference to an enum downcast lvalue"), + LvalueExtra::DowncastVariant(..) => { + bug!("attempted to take a reference to an enum downcast lvalue") + } }; self.write_value(val, dest, dest_ty)?; } @@ -793,8 +872,14 @@ pub(super) fn eval_rvalue_into_lvalue( } NullaryOp(mir::NullOp::SizeOf, ty) => { - let size = self.type_size(ty)?.expect("SizeOf nullary MIR operator called for unsized type"); - self.write_primval(dest, PrimVal::from_u128(size as u128), dest_ty)?; + let size = self.type_size(ty)?.expect( + "SizeOf nullary MIR operator called for unsized type", + ); + self.write_primval( + dest, + PrimVal::from_u128(size as u128), + dest_ty, + )?; } Cast(kind, ref operand, cast_ty) => { @@ -812,13 +897,13 @@ pub(super) fn eval_rvalue_into_lvalue( let src_ty = self.operand_ty(operand); if self.type_is_fat_ptr(src_ty) { match (src, self.type_is_fat_ptr(dest_ty)) { - (Value::ByRef{..}, _) | + (Value::ByRef { .. }, _) | (Value::ByValPair(..), true) => { self.write_value(src, dest, dest_ty)?; - }, + } (Value::ByValPair(data, _), false) => { self.write_value(Value::ByVal(data), dest, dest_ty)?; - }, + } (Value::ByVal(_), _) => bug!("expected fat ptr"), } } else { @@ -828,31 +913,50 @@ pub(super) fn eval_rvalue_into_lvalue( } } - ReifyFnPointer => match self.operand_ty(operand).sty { - ty::TyFnDef(def_id, substs) => { - let instance = resolve(self.tcx, def_id, substs); - let fn_ptr = self.memory.create_fn_alloc(instance); - self.write_value(Value::ByVal(PrimVal::Ptr(fn_ptr)), dest, dest_ty)?; - }, - ref other => bug!("reify fn pointer on {:?}", other), - }, + ReifyFnPointer => { + match self.operand_ty(operand).sty { + ty::TyFnDef(def_id, substs) => { + let instance = resolve(self.tcx, def_id, substs); + let fn_ptr = self.memory.create_fn_alloc(instance); + self.write_value( + Value::ByVal(PrimVal::Ptr(fn_ptr)), + dest, + dest_ty, + )?; + } + ref other => bug!("reify fn pointer on {:?}", other), + } + } - UnsafeFnPointer => match dest_ty.sty { - ty::TyFnPtr(_) => { - let src = self.eval_operand(operand)?; - self.write_value(src, dest, dest_ty)?; - }, - ref other => bug!("fn to unsafe fn cast on {:?}", other), - }, + UnsafeFnPointer => { + match dest_ty.sty { + ty::TyFnPtr(_) => { + let src = self.eval_operand(operand)?; + self.write_value(src, dest, dest_ty)?; + } + ref other => bug!("fn to unsafe fn cast on {:?}", other), + } + } - ClosureFnPointer => match self.operand_ty(operand).sty { - ty::TyClosure(def_id, substs) => { - let instance = resolve_closure(self.tcx, def_id, substs, ty::ClosureKind::FnOnce); - let fn_ptr = self.memory.create_fn_alloc(instance); - self.write_value(Value::ByVal(PrimVal::Ptr(fn_ptr)), dest, dest_ty)?; - }, - ref other => bug!("closure fn pointer on {:?}", other), - }, + ClosureFnPointer => { + match self.operand_ty(operand).sty { + ty::TyClosure(def_id, substs) => { + let instance = resolve_closure( + self.tcx, + def_id, + substs, + ty::ClosureKind::FnOnce, + ); + let fn_ptr = self.memory.create_fn_alloc(instance); + self.write_value( + Value::ByVal(PrimVal::Ptr(fn_ptr)), + dest, + dest_ty, + )?; + } + ref other => bug!("closure fn pointer on {:?}", other), + } + } } } @@ -862,14 +966,17 @@ pub(super) fn eval_rvalue_into_lvalue( let ptr = self.force_allocation(lval)?.to_ptr()?; let discr_val = self.read_discriminant_value(ptr, ty)?; if let ty::TyAdt(adt_def, _) = ty.sty { - if adt_def.discriminants(self.tcx).all(|v| discr_val != v.to_u128_unchecked()) { + if adt_def.discriminants(self.tcx).all(|v| { + discr_val != v.to_u128_unchecked() + }) + { return err!(InvalidDiscriminant); } } else { bug!("rustc only generates Rvalue::Discriminant for enums"); } self.write_primval(dest, PrimVal::Bytes(discr_val), dest_ty)?; - }, + } } if log_enabled!(::log::LogLevel::Trace) { @@ -903,7 +1010,10 @@ pub(super) fn nonnull_offset_and_ty( let variant = &adt_def.variants[nndiscr as usize]; let index = discrfield[1]; let field = &variant.fields[index as usize]; - (self.get_field_offset(ty, index as usize)?, field.ty(self.tcx, substs)) + ( + self.get_field_offset(ty, index as usize)?, + field.ty(self.tcx, substs), + ) } _ => bug!("non-enum for StructWrappedNullablePointer: {}", ty), }; @@ -921,16 +1031,28 @@ fn field_path_offset_and_ty>( let mut packed = false; for field_index in path { let field_offset = self.get_field_offset(ty, field_index)?; - trace!("field_path_offset_and_ty: {}, {}, {:?}, {:?}", field_index, ty, field_offset, offset); + trace!( + "field_path_offset_and_ty: {}, {}, {:?}, {:?}", + field_index, + ty, + field_offset, + offset + ); let field_ty = self.get_field_ty(ty, field_index)?; ty = field_ty.ty; packed = packed || field_ty.packed; - offset = offset.checked_add(field_offset, &self.tcx.data_layout).unwrap(); + offset = offset + .checked_add(field_offset, &self.tcx.data_layout) + .unwrap(); } Ok((offset, TyAndPacked { ty, packed })) } - fn get_fat_field(&self, pointee_ty: Ty<'tcx>, field_index: usize) -> EvalResult<'tcx, Ty<'tcx>> { + fn get_fat_field( + &self, + pointee_ty: Ty<'tcx>, + field_index: usize, + ) -> EvalResult<'tcx, Ty<'tcx>> { match (field_index, &self.tcx.struct_tail(pointee_ty).sty) { (1, &ty::TyStr) | (1, &ty::TySlice(_)) => Ok(self.tcx.types.usize), @@ -941,42 +1063,92 @@ fn get_fat_field(&self, pointee_ty: Ty<'tcx>, field_index: usize) -> EvalResult< } /// Returns the field type and whether the field is packed - pub fn get_field_ty(&self, ty: Ty<'tcx>, field_index: usize) -> EvalResult<'tcx, TyAndPacked<'tcx>> { + pub fn get_field_ty( + &self, + ty: Ty<'tcx>, + field_index: usize, + ) -> EvalResult<'tcx, TyAndPacked<'tcx>> { match ty.sty { - ty::TyAdt(adt_def, _) if adt_def.is_box() => - Ok(TyAndPacked { ty: self.get_fat_field(ty.boxed_ty(), field_index)?, packed: false }), + ty::TyAdt(adt_def, _) if adt_def.is_box() => Ok(TyAndPacked { + ty: self.get_fat_field(ty.boxed_ty(), field_index)?, + packed: false, + }), ty::TyAdt(adt_def, substs) if adt_def.is_enum() => { use rustc::ty::layout::Layout::*; match *self.type_layout(ty)? { - RawNullablePointer { nndiscr, .. } => - Ok(TyAndPacked { ty: adt_def.variants[nndiscr as usize].fields[field_index].ty(self.tcx, substs), packed: false }), - StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => { - let ty = adt_def.variants[nndiscr as usize].fields[field_index].ty(self.tcx, substs); - Ok(TyAndPacked { ty, packed: nonnull.packed }) - }, - _ => err!(Unimplemented(format!("get_field_ty can't handle enum type: {:?}, {:?}", ty, ty.sty))), + RawNullablePointer { nndiscr, .. } => Ok(TyAndPacked { + ty: adt_def.variants[nndiscr as usize].fields[field_index].ty( + self.tcx, + substs, + ), + packed: false, + }), + StructWrappedNullablePointer { + nndiscr, + ref nonnull, + .. + } => { + let ty = adt_def.variants[nndiscr as usize].fields[field_index].ty( + self.tcx, + substs, + ); + Ok(TyAndPacked { + ty, + packed: nonnull.packed, + }) + } + _ => { + err!(Unimplemented(format!( + "get_field_ty can't handle enum type: {:?}, {:?}", + ty, + ty.sty + ))) + } } } ty::TyAdt(adt_def, substs) => { let variant_def = adt_def.struct_variant(); use rustc::ty::layout::Layout::*; match *self.type_layout(ty)? { - UntaggedUnion { ref variants } => - Ok(TyAndPacked { ty: variant_def.fields[field_index].ty(self.tcx, substs), packed: variants.packed }), - Univariant { ref variant, .. } => - Ok(TyAndPacked { ty: variant_def.fields[field_index].ty(self.tcx, substs), packed: variant.packed }), - _ => err!(Unimplemented(format!("get_field_ty can't handle struct type: {:?}, {:?}", ty, ty.sty))), + UntaggedUnion { ref variants } => Ok(TyAndPacked { + ty: variant_def.fields[field_index].ty(self.tcx, substs), + packed: variants.packed, + }), + Univariant { ref variant, .. } => Ok(TyAndPacked { + ty: variant_def.fields[field_index].ty(self.tcx, substs), + packed: variant.packed, + }), + _ => { + err!(Unimplemented(format!( + "get_field_ty can't handle struct type: {:?}, {:?}", + ty, + ty.sty + ))) + } } } - ty::TyTuple(fields, _) => Ok(TyAndPacked { ty: fields[field_index], packed: false }), + ty::TyTuple(fields, _) => Ok(TyAndPacked { + ty: fields[field_index], + packed: false, + }), ty::TyRef(_, ref tam) | - ty::TyRawPtr(ref tam) => Ok(TyAndPacked { ty: self.get_fat_field(tam.ty, field_index)?, packed: false }), + ty::TyRawPtr(ref tam) => Ok(TyAndPacked { + ty: self.get_fat_field(tam.ty, field_index)?, + packed: false, + }), - ty::TyArray(ref inner, _) => Ok(TyAndPacked { ty: inner, packed: false }), + ty::TyArray(ref inner, _) => Ok(TyAndPacked { + ty: inner, + packed: false, + }), - _ => err!(Unimplemented(format!("can't handle type: {:?}, {:?}", ty, ty.sty))), + _ => { + err!(Unimplemented( + format!("can't handle type: {:?}, {:?}", ty, ty.sty), + )) + } } } @@ -986,19 +1158,19 @@ fn get_field_offset(&self, ty: Ty<'tcx>, field_index: usize) -> EvalResult<'tcx, use rustc::ty::layout::Layout::*; match *layout { - Univariant { ref variant, .. } => { - Ok(variant.offsets[field_index]) - } + Univariant { ref variant, .. } => Ok(variant.offsets[field_index]), FatPointer { .. } => { let bytes = field_index as u64 * self.memory.pointer_size(); Ok(Size::from_bytes(bytes)) } - StructWrappedNullablePointer { ref nonnull, .. } => { - Ok(nonnull.offsets[field_index]) - } + StructWrappedNullablePointer { ref nonnull, .. } => Ok(nonnull.offsets[field_index]), UntaggedUnion { .. } => Ok(Size::from_bytes(0)), _ => { - let msg = format!("get_field_offset: can't handle type: {:?}, with layout: {:?}", ty, layout); + let msg = format!( + "get_field_offset: can't handle type: {:?}, with layout: {:?}", + ty, + layout + ); err!(Unimplemented(msg)) } } @@ -1012,18 +1184,25 @@ pub fn get_field_count(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, u64> { Univariant { ref variant, .. } => Ok(variant.offsets.len() as u64), FatPointer { .. } => Ok(2), StructWrappedNullablePointer { ref nonnull, .. } => Ok(nonnull.offsets.len() as u64), - Vector { count , .. } | + Vector { count, .. } | Array { count, .. } => Ok(count), Scalar { .. } => Ok(0), UntaggedUnion { .. } => Ok(1), _ => { - let msg = format!("get_field_count: can't handle type: {:?}, with layout: {:?}", ty, layout); + let msg = format!( + "get_field_count: can't handle type: {:?}, with layout: {:?}", + ty, + layout + ); err!(Unimplemented(msg)) } } } - pub(super) fn eval_operand_to_primval(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, PrimVal> { + pub(super) fn eval_operand_to_primval( + &mut self, + op: &mir::Operand<'tcx>, + ) -> EvalResult<'tcx, PrimVal> { let value = self.eval_operand(op)?; let ty = self.operand_ty(op); self.value_to_primval(value, ty) @@ -1042,7 +1221,10 @@ pub fn eval_operand(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, Valu Literal::Item { def_id, substs } => { let instance = self.resolve_associated_const(def_id, substs); - let cid = GlobalId { instance, promoted: None }; + let cid = GlobalId { + instance, + promoted: None, + }; Value::ByRef(*self.globals.get(&cid).expect("static/const not cached")) } @@ -1069,7 +1251,9 @@ pub fn operand_ty(&self, operand: &mir::Operand<'tcx>) -> Ty<'tcx> { } fn copy(&mut self, src: Pointer, dest: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx> { - let size = self.type_size(ty)?.expect("cannot copy from an unsized type"); + let size = self.type_size(ty)?.expect( + "cannot copy from an unsized type", + ); let align = self.type_align(ty)?; self.memory.copy(src, dest, size, align, false)?; Ok(()) @@ -1090,24 +1274,25 @@ pub fn is_packed(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, bool> { }) } - pub fn force_allocation( - &mut self, - lvalue: Lvalue, - ) -> EvalResult<'tcx, Lvalue> { + pub fn force_allocation(&mut self, lvalue: Lvalue) -> EvalResult<'tcx, Lvalue> { let new_lvalue = match lvalue { Lvalue::Local { frame, local } => { // -1 since we don't store the return value match self.stack[frame].locals[local.index() - 1] { None => return err!(DeadLocal), Some(Value::ByRef(ptr)) => { - Lvalue::Ptr { ptr, extra: LvalueExtra::None } - }, + Lvalue::Ptr { + ptr, + extra: LvalueExtra::None, + } + } Some(val) => { let ty = self.stack[frame].mir.local_decls[local].ty; let ty = self.monomorphize(ty, self.stack[frame].instance.substs); let substs = self.stack[frame].instance.substs; let ptr = self.alloc_ptr_with_substs(ty, substs)?; - self.stack[frame].locals[local.index() - 1] = Some(Value::by_ref(ptr.into())); // it stays live + self.stack[frame].locals[local.index() - 1] = + Some(Value::by_ref(ptr.into())); // it stays live self.write_value_to_ptr(val, ptr.into(), ty)?; Lvalue::from_ptr(ptr) } @@ -1119,7 +1304,11 @@ pub fn force_allocation( } /// ensures this Value is not a ByRef - pub(super) fn follow_by_ref_value(&mut self, value: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> { + pub(super) fn follow_by_ref_value( + &mut self, + value: Value, + ty: Ty<'tcx>, + ) -> EvalResult<'tcx, Value> { match value { Value::ByRef(PtrAndAlign { ptr, aligned }) => { self.read_maybe_aligned(aligned, |ectx| ectx.read_value(ptr, ty)) @@ -1130,7 +1319,7 @@ pub(super) fn follow_by_ref_value(&mut self, value: Value, ty: Ty<'tcx>) -> Eval pub fn value_to_primval(&mut self, value: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> { match self.follow_by_ref_value(value, ty)? { - Value::ByRef{..} => bug!("follow_by_ref_value can't result in `ByRef`"), + Value::ByRef { .. } => bug!("follow_by_ref_value can't result in `ByRef`"), Value::ByVal(primval) => { self.ensure_valid_value(primval, ty)?; @@ -1141,20 +1330,11 @@ pub fn value_to_primval(&mut self, value: Value, ty: Ty<'tcx>) -> EvalResult<'tc } } - pub fn write_null( - &mut self, - dest: Lvalue, - dest_ty: Ty<'tcx>, - ) -> EvalResult<'tcx> { + pub fn write_null(&mut self, dest: Lvalue, dest_ty: Ty<'tcx>) -> EvalResult<'tcx> { self.write_primval(dest, PrimVal::Bytes(0), dest_ty) } - pub fn write_ptr( - &mut self, - dest: Lvalue, - val: Pointer, - dest_ty: Ty<'tcx>, - ) -> EvalResult<'tcx> { + pub fn write_ptr(&mut self, dest: Lvalue, val: Pointer, dest_ty: Ty<'tcx>) -> EvalResult<'tcx> { self.write_value(val.to_value(), dest, dest_ty) } @@ -1179,10 +1359,15 @@ pub fn write_value( // correct if we never look at this data with the wrong type. match dest { - Lvalue::Ptr { ptr: PtrAndAlign { ptr, aligned }, extra } => { + Lvalue::Ptr { + ptr: PtrAndAlign { ptr, aligned }, + extra, + } => { assert_eq!(extra, LvalueExtra::None); - self.write_maybe_aligned_mut(aligned, - |ectx| ectx.write_value_to_ptr(src_val, ptr, dest_ty)) + self.write_maybe_aligned_mut( + aligned, + |ectx| ectx.write_value_to_ptr(src_val, ptr, dest_ty), + ) } Lvalue::Local { frame, local } => { @@ -1205,7 +1390,11 @@ fn write_value_possibly_by_val EvalResult<'tcx>>( old_dest_val: Value, dest_ty: Ty<'tcx>, ) -> EvalResult<'tcx> { - if let Value::ByRef(PtrAndAlign { ptr: dest_ptr, aligned }) = old_dest_val { + if let Value::ByRef(PtrAndAlign { + ptr: dest_ptr, + aligned, + }) = old_dest_val + { // If the value is already `ByRef` (that is, backed by an `Allocation`), // then we must write the new value into this allocation, because there may be // other pointers into the allocation. These other pointers are logically @@ -1213,10 +1402,15 @@ fn write_value_possibly_by_val EvalResult<'tcx>>( // // Thus, it would be an error to replace the `ByRef` with a `ByVal`, unless we // knew for certain that there were no outstanding pointers to this allocation. - self.write_maybe_aligned_mut(aligned, - |ectx| ectx.write_value_to_ptr(src_val, dest_ptr, dest_ty))?; + self.write_maybe_aligned_mut(aligned, |ectx| { + ectx.write_value_to_ptr(src_val, dest_ptr, dest_ty) + })?; - } else if let Value::ByRef(PtrAndAlign { ptr: src_ptr, aligned }) = src_val { + } else if let Value::ByRef(PtrAndAlign { + ptr: src_ptr, + aligned, + }) = src_val + { // If the value is not `ByRef`, then we know there are no pointers to it // and we can simply overwrite the `Value` in the locals array directly. // @@ -1256,7 +1450,7 @@ pub fn write_value_to_ptr( match value { Value::ByRef(PtrAndAlign { ptr, aligned }) => { self.read_maybe_aligned_mut(aligned, |ectx| ectx.copy(ptr, dest, dest_ty)) - }, + } Value::ByVal(primval) => { let size = self.type_size(dest_ty)?.expect("dest type must be sized"); self.memory.write_primval(dest, primval, size) @@ -1270,7 +1464,7 @@ pub fn write_pair_to_ptr( a: PrimVal, b: PrimVal, ptr: MemoryPointer, - mut ty: Ty<'tcx> + mut ty: Ty<'tcx>, ) -> EvalResult<'tcx> { let mut packed = false; while self.get_field_count(ty)? == 1 { @@ -1283,16 +1477,26 @@ pub fn write_pair_to_ptr( let field_1 = self.get_field_offset(ty, 1)?; let field_0_ty = self.get_field_ty(ty, 0)?; let field_1_ty = self.get_field_ty(ty, 1)?; - assert_eq!(field_0_ty.packed, field_1_ty.packed, "the two fields must agree on being packed"); + assert_eq!( + field_0_ty.packed, + field_1_ty.packed, + "the two fields must agree on being packed" + ); packed = packed || field_0_ty.packed; - let field_0_size = self.type_size(field_0_ty.ty)?.expect("pair element type must be sized"); - let field_1_size = self.type_size(field_1_ty.ty)?.expect("pair element type must be sized"); + let field_0_size = self.type_size(field_0_ty.ty)?.expect( + "pair element type must be sized", + ); + let field_1_size = self.type_size(field_1_ty.ty)?.expect( + "pair element type must be sized", + ); let field_0_ptr = ptr.offset(field_0.bytes(), &self)?.into(); let field_1_ptr = ptr.offset(field_1.bytes(), &self)?.into(); - self.write_maybe_aligned_mut(!packed, - |ectx| ectx.memory.write_primval(field_0_ptr, a, field_0_size))?; - self.write_maybe_aligned_mut(!packed, - |ectx| ectx.memory.write_primval(field_1_ptr, b, field_1_size))?; + self.write_maybe_aligned_mut(!packed, |ectx| { + ectx.memory.write_primval(field_0_ptr, a, field_0_size) + })?; + self.write_maybe_aligned_mut(!packed, |ectx| { + ectx.memory.write_primval(field_1_ptr, b, field_1_size) + })?; Ok(()) } @@ -1388,8 +1592,9 @@ fn ensure_valid_value(&self, val: PrimVal, ty: Ty<'tcx>) -> EvalResult<'tcx> { match ty.sty { ty::TyBool if val.to_bytes()? > 1 => err!(InvalidBool), - ty::TyChar if ::std::char::from_u32(val.to_bytes()? as u32).is_none() - => err!(InvalidChar(val.to_bytes()? as u32 as u128)), + ty::TyChar if ::std::char::from_u32(val.to_bytes()? as u32).is_none() => { + err!(InvalidChar(val.to_bytes()? as u32 as u128)) + } _ => Ok(()), } @@ -1403,7 +1608,11 @@ pub fn read_value(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> } } - pub(crate) fn read_ptr(&self, ptr: MemoryPointer, pointee_ty: Ty<'tcx>) -> EvalResult<'tcx, Value> { + pub(crate) fn read_ptr( + &self, + ptr: MemoryPointer, + pointee_ty: Ty<'tcx>, + ) -> EvalResult<'tcx, Value> { let p = self.memory.read_ptr(ptr)?; if self.type_is_sized(pointee_ty) { Ok(p.to_value()) @@ -1411,9 +1620,12 @@ pub(crate) fn read_ptr(&self, ptr: MemoryPointer, pointee_ty: Ty<'tcx>) -> EvalR trace!("reading fat pointer extra of type {}", pointee_ty); let extra = ptr.offset(self.memory.pointer_size(), self)?; match self.tcx.struct_tail(pointee_ty).sty { - ty::TyDynamic(..) => Ok(p.to_value_with_vtable(self.memory.read_ptr(extra)?.to_ptr()?)), - ty::TySlice(..) | - ty::TyStr => Ok(p.to_value_with_len(self.memory.read_usize(extra)?)), + ty::TyDynamic(..) => Ok(p.to_value_with_vtable( + self.memory.read_ptr(extra)?.to_ptr()?, + )), + ty::TySlice(..) | ty::TyStr => Ok( + p.to_value_with_len(self.memory.read_usize(extra)?), + ), _ => bug!("unsized primval ptr read from {:?}", pointee_ty), } } @@ -1466,7 +1678,10 @@ fn try_read_value(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Option< // if we transmute a ptr to an usize, reading it back into a primval shouldn't panic // for consistency's sake, we use the same code as above match self.memory.read_uint(ptr.to_ptr()?, size) { - Err(EvalError { kind: EvalErrorKind::ReadPointerAsBytes, .. }) if size == self.memory.pointer_size() => self.memory.read_ptr(ptr.to_ptr()?)?.into_inner_primval(), + Err(EvalError { kind: EvalErrorKind::ReadPointerAsBytes, .. }) + if size == self.memory.pointer_size() => { + self.memory.read_ptr(ptr.to_ptr()?)?.into_inner_primval() + } other => PrimVal::from_u128(other?), } } @@ -1493,7 +1708,7 @@ fn try_read_value(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Option< } else { return Ok(None); } - }, + } _ => return Ok(None), }; @@ -1540,14 +1755,17 @@ fn unsize_into_ptr( // traits, and hence never actually require an actual // change to the vtable. self.write_value(src, dest, dest_ty) - }, + } (_, &ty::TyDynamic(ref data, _)) => { - let trait_ref = data.principal().unwrap().with_self_ty(self.tcx, src_pointee_ty); + let trait_ref = data.principal().unwrap().with_self_ty( + self.tcx, + src_pointee_ty, + ); let trait_ref = self.tcx.erase_regions(&trait_ref); let vtable = self.get_vtable(src_pointee_ty, trait_ref)?; let ptr = src.into_ptr(&self.memory)?; self.write_value(ptr.to_value_with_vtable(vtable), dest, dest_ty) - }, + } _ => bug!("invalid unsizing {:?} -> {:?}", src_ty, dest_ty), } @@ -1563,13 +1781,22 @@ fn unsize_into( match (&src_ty.sty, &dest_ty.sty) { (&ty::TyRef(_, ref s), &ty::TyRef(_, ref d)) | (&ty::TyRef(_, ref s), &ty::TyRawPtr(ref d)) | - (&ty::TyRawPtr(ref s), &ty::TyRawPtr(ref d)) => self.unsize_into_ptr(src, src_ty, dest, dest_ty, s.ty, d.ty), + (&ty::TyRawPtr(ref s), &ty::TyRawPtr(ref d)) => { + self.unsize_into_ptr(src, src_ty, dest, dest_ty, s.ty, d.ty) + } (&ty::TyAdt(def_a, substs_a), &ty::TyAdt(def_b, substs_b)) => { if def_a.is_box() || def_b.is_box() { if !def_a.is_box() || !def_b.is_box() { panic!("invalid unsizing between {:?} -> {:?}", src_ty, dest_ty); } - return self.unsize_into_ptr(src, src_ty, dest, dest_ty, src_ty.boxed_ty(), dest_ty.boxed_ty()); + return self.unsize_into_ptr( + src, + src_ty, + dest, + dest_ty, + src_ty.boxed_ty(), + dest_ty.boxed_ty(), + ); } if self.ty_to_primval_kind(src_ty).is_ok() { // TODO: We ignore the packed flag here @@ -1610,12 +1837,23 @@ fn unsize_into( if src_fty == dst_fty { self.copy(src_f_ptr, dst_f_ptr.into(), src_fty)?; } else { - self.unsize_into(Value::by_ref(src_f_ptr), src_fty, Lvalue::from_ptr(dst_f_ptr), dst_fty)?; + self.unsize_into( + Value::by_ref(src_f_ptr), + src_fty, + Lvalue::from_ptr(dst_f_ptr), + dst_fty, + )?; } } Ok(()) } - _ => bug!("unsize_into: invalid conversion: {:?} -> {:?}", src_ty, dest_ty), + _ => { + bug!( + "unsize_into: invalid conversion: {:?} -> {:?}", + src_ty, + dest_ty + ) + } } } @@ -1631,27 +1869,36 @@ pub fn dump_local(&self, lvalue: Lvalue) { write!(msg, ":").unwrap(); match self.stack[frame].get_local(local) { - Err(EvalError{ kind: EvalErrorKind::DeadLocal, ..} ) => { + Err(EvalError { kind: EvalErrorKind::DeadLocal, .. }) => { write!(msg, " is dead").unwrap(); } Err(err) => { panic!("Failed to access local: {:?}", err); } - Ok(Value::ByRef(PtrAndAlign{ ptr, aligned })) => match ptr.into_inner_primval() { - PrimVal::Ptr(ptr) => { - write!(msg, " by {}ref:", if aligned { "" } else { "unaligned " }).unwrap(); - allocs.push(ptr.alloc_id); - }, - ptr => write!(msg, " integral by ref: {:?}", ptr).unwrap(), - }, + Ok(Value::ByRef(PtrAndAlign { ptr, aligned })) => { + match ptr.into_inner_primval() { + PrimVal::Ptr(ptr) => { + write!(msg, " by {}ref:", if aligned { "" } else { "unaligned " }) + .unwrap(); + allocs.push(ptr.alloc_id); + } + ptr => write!(msg, " integral by ref: {:?}", ptr).unwrap(), + } + } Ok(Value::ByVal(val)) => { write!(msg, " {:?}", val).unwrap(); - if let PrimVal::Ptr(ptr) = val { allocs.push(ptr.alloc_id); } + if let PrimVal::Ptr(ptr) = val { + allocs.push(ptr.alloc_id); + } } Ok(Value::ByValPair(val1, val2)) => { write!(msg, " ({:?}, {:?})", val1, val2).unwrap(); - if let PrimVal::Ptr(ptr) = val1 { allocs.push(ptr.alloc_id); } - if let PrimVal::Ptr(ptr) = val2 { allocs.push(ptr.alloc_id); } + if let PrimVal::Ptr(ptr) = val1 { + allocs.push(ptr.alloc_id); + } + if let PrimVal::Ptr(ptr) = val2 { + allocs.push(ptr.alloc_id); + } } } @@ -1663,7 +1910,7 @@ pub fn dump_local(&self, lvalue: Lvalue) { PrimVal::Ptr(ptr) => { trace!("by {}ref:", if aligned { "" } else { "unaligned " }); self.memory.dump_alloc(ptr.alloc_id); - }, + } ptr => trace!(" integral by ref: {:?}", ptr), } } @@ -1671,13 +1918,9 @@ pub fn dump_local(&self, lvalue: Lvalue) { } /// Convenience function to ensure correct usage of locals - pub fn modify_local( - &mut self, - frame: usize, - local: mir::Local, - f: F, - ) -> EvalResult<'tcx> - where F: FnOnce(&mut Self, Value) -> EvalResult<'tcx, Value>, + pub fn modify_local(&mut self, frame: usize, local: mir::Local, f: F) -> EvalResult<'tcx> + where + F: FnOnce(&mut Self, Value) -> EvalResult<'tcx, Value>, { let val = self.stack[frame].get_local(local)?; let new_val = f(self, val)?; @@ -1704,7 +1947,8 @@ pub fn report(&self, e: &mut EvalError) { break 'frames; } else if name.starts_with("backtrace::capture::Backtrace::new") // debug mode produces funky symbol names - || name.starts_with("backtrace::capture::{{impl}}::new") { + || name.starts_with("backtrace::capture::{{impl}}::new") + { // don't report backtrace internals skip_init = false; continue 'frames; @@ -1715,7 +1959,7 @@ pub fn report(&self, e: &mut EvalError) { continue; } for symbol in frame.symbols() { - write!(trace_text, "{}: " , i).unwrap(); + write!(trace_text, "{}: ", i).unwrap(); if let Some(name) = symbol.name() { write!(trace_text, "{}\n", name).unwrap(); } else { @@ -1745,7 +1989,9 @@ pub fn report(&self, e: &mut EvalError) { }; let mut err = self.tcx.sess.struct_span_err(span, &e.to_string()); for &Frame { instance, span, .. } in self.stack().iter().rev() { - if self.tcx.def_key(instance.def_id()).disambiguated_data.data == DefPathData::ClosureExpr { + if self.tcx.def_key(instance.def_id()).disambiguated_data.data == + DefPathData::ClosureExpr + { err.span_note(span, "inside call to closure"); continue; } @@ -1817,7 +2063,7 @@ pub fn is_inhabited<'a, 'tcx: 'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> } /// FIXME: expose trans::monomorphize::resolve_closure -pub fn resolve_closure<'a, 'tcx> ( +pub fn resolve_closure<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, substs: ty::ClosureSubsts<'tcx>, @@ -1826,7 +2072,7 @@ pub fn resolve_closure<'a, 'tcx> ( let actual_kind = tcx.closure_kind(def_id); match needs_fn_once_adapter_shim(actual_kind, requested_kind) { Ok(true) => fn_once_adapter_instance(tcx, def_id, substs), - _ => ty::Instance::new(def_id, substs.substs) + _ => ty::Instance::new(def_id, substs.substs), } } @@ -1835,40 +2081,39 @@ fn fn_once_adapter_instance<'a, 'tcx>( closure_did: DefId, substs: ty::ClosureSubsts<'tcx>, ) -> ty::Instance<'tcx> { - debug!("fn_once_adapter_shim({:?}, {:?})", - closure_did, - substs); + debug!("fn_once_adapter_shim({:?}, {:?})", closure_did, substs); let fn_once = tcx.lang_items.fn_once_trait().unwrap(); let call_once = tcx.associated_items(fn_once) .find(|it| it.kind == ty::AssociatedKind::Method) - .unwrap().def_id; + .unwrap() + .def_id; let def = ty::InstanceDef::ClosureOnceShim { call_once }; - let self_ty = tcx.mk_closure_from_closure_substs( - closure_did, substs); + let self_ty = tcx.mk_closure_from_closure_substs(closure_did, substs); let sig = tcx.fn_sig(closure_did).subst(tcx, substs.substs); let sig = tcx.erase_late_bound_regions_and_normalize(&sig); assert_eq!(sig.inputs().len(), 1); - let substs = tcx.mk_substs([ - Kind::from(self_ty), - Kind::from(sig.inputs()[0]), - ].iter().cloned()); + let substs = tcx.mk_substs( + [Kind::from(self_ty), Kind::from(sig.inputs()[0])] + .iter() + .cloned(), + ); debug!("fn_once_adapter_shim: self_ty={:?} sig={:?}", self_ty, sig); ty::Instance { def, substs } } -fn needs_fn_once_adapter_shim(actual_closure_kind: ty::ClosureKind, - trait_closure_kind: ty::ClosureKind) - -> Result -{ +fn needs_fn_once_adapter_shim( + actual_closure_kind: ty::ClosureKind, + trait_closure_kind: ty::ClosureKind, +) -> Result { match (actual_closure_kind, trait_closure_kind) { (ty::ClosureKind::Fn, ty::ClosureKind::Fn) | (ty::ClosureKind::FnMut, ty::ClosureKind::FnMut) | (ty::ClosureKind::FnOnce, ty::ClosureKind::FnOnce) => { // No adapter needed. - Ok(false) + Ok(false) } (ty::ClosureKind::Fn, ty::ClosureKind::FnMut) => { // The closure fn `llfn` is a `fn(&self, ...)`. We want a @@ -1897,10 +2142,9 @@ fn needs_fn_once_adapter_shim(actual_closure_kind: ty::ClosureKind, pub fn resolve<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, - substs: &'tcx Substs<'tcx> + substs: &'tcx Substs<'tcx>, ) -> ty::Instance<'tcx> { - debug!("resolve(def_id={:?}, substs={:?})", - def_id, substs); + debug!("resolve(def_id={:?}, substs={:?})", def_id, substs); let result = if let Some(trait_def_id) = tcx.trait_of_item(def_id) { debug!(" => associated item, attempting to find impl"); let item = tcx.associated_item(def_id); @@ -1908,12 +2152,11 @@ pub fn resolve<'a, 'tcx>( } else { let item_type = def_ty(tcx, def_id, substs); let def = match item_type.sty { - ty::TyFnDef(..) if { - let f = item_type.fn_sig(tcx); - f.abi() == Abi::RustIntrinsic || - f.abi() == Abi::PlatformIntrinsic - } => - { + ty::TyFnDef(..) + if { + let f = item_type.fn_sig(tcx); + f.abi() == Abi::RustIntrinsic || f.abi() == Abi::PlatformIntrinsic + } => { debug!(" => intrinsic"); ty::InstanceDef::Intrinsic(def_id) } @@ -1935,8 +2178,12 @@ pub fn resolve<'a, 'tcx>( }; ty::Instance { def, substs } }; - debug!("resolve(def_id={:?}, substs={:?}) = {}", - def_id, substs, result); + debug!( + "resolve(def_id={:?}, substs={:?}) = {}", + def_id, + substs, + result + ); result } @@ -1969,7 +2216,7 @@ pub fn needs_drop_glue<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, t: Ty<'tcx>) -> bo true } } - _ => true + _ => true, } } @@ -1977,13 +2224,17 @@ fn resolve_associated_item<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, trait_item: &ty::AssociatedItem, trait_id: DefId, - rcvr_substs: &'tcx Substs<'tcx> + rcvr_substs: &'tcx Substs<'tcx>, ) -> ty::Instance<'tcx> { let def_id = trait_item.def_id; - debug!("resolve_associated_item(trait_item={:?}, \ + debug!( + "resolve_associated_item(trait_item={:?}, \ trait_id={:?}, \ rcvr_substs={:?})", - def_id, trait_id, rcvr_substs); + def_id, + trait_id, + rcvr_substs + ); let trait_ref = ty::TraitRef::from_method(tcx, trait_id, rcvr_substs); let vtbl = fulfill_obligation(tcx, DUMMY_SP, ty::Binder(trait_ref)); @@ -1992,56 +2243,64 @@ fn resolve_associated_item<'a, 'tcx>( // the actual function: match vtbl { ::rustc::traits::VtableImpl(impl_data) => { - let (def_id, substs) = ::rustc::traits::find_associated_item( - tcx, trait_item, rcvr_substs, &impl_data); + let (def_id, substs) = + ::rustc::traits::find_associated_item(tcx, trait_item, rcvr_substs, &impl_data); let substs = tcx.erase_regions(&substs); ty::Instance::new(def_id, substs) } ::rustc::traits::VtableClosure(closure_data) => { let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_id).unwrap(); - resolve_closure(tcx, closure_data.closure_def_id, closure_data.substs, - trait_closure_kind) + resolve_closure( + tcx, + closure_data.closure_def_id, + closure_data.substs, + trait_closure_kind, + ) } ::rustc::traits::VtableFnPointer(ref data) => { ty::Instance { def: ty::InstanceDef::FnPtrShim(trait_item.def_id, data.fn_ty), - substs: rcvr_substs + substs: rcvr_substs, } } ::rustc::traits::VtableObject(ref data) => { let index = tcx.get_vtable_index_of_object_method(data, def_id); ty::Instance { def: ty::InstanceDef::Virtual(def_id, index), - substs: rcvr_substs + substs: rcvr_substs, } } - _ => { - bug!("static call to invalid vtable: {:?}", vtbl) - } + _ => bug!("static call to invalid vtable: {:?}", vtbl), } } -pub fn def_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - def_id: DefId, - substs: &'tcx Substs<'tcx>) - -> Ty<'tcx> -{ +pub fn def_ty<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId, + substs: &'tcx Substs<'tcx>, +) -> Ty<'tcx> { let ty = tcx.type_of(def_id); apply_param_substs(tcx, substs, &ty) } /// Monomorphizes a type from the AST by first applying the in-scope /// substitutions and then normalizing any associated types. -pub fn apply_param_substs<'a, 'tcx, T>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - param_substs: &Substs<'tcx>, - value: &T) - -> T - where T: ::rustc::infer::TransNormalize<'tcx> +pub fn apply_param_substs<'a, 'tcx, T>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_substs: &Substs<'tcx>, + value: &T, +) -> T +where + T: ::rustc::infer::TransNormalize<'tcx>, { - debug!("apply_param_substs(param_substs={:?}, value={:?})", param_substs, value); + debug!( + "apply_param_substs(param_substs={:?}, value={:?})", + param_substs, + value + ); let substituted = value.subst(tcx, param_substs); let substituted = tcx.erase_regions(&substituted); - AssociatedTypeNormalizer{ tcx }.fold(&substituted) + AssociatedTypeNormalizer { tcx }.fold(&substituted) } @@ -2082,27 +2341,31 @@ fn type_is_sized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { /// Attempts to resolve an obligation. The result is a shallow vtable resolution -- meaning that we /// do not (necessarily) resolve all nested obligations on the impl. Note that type check should /// guarantee to us that all nested obligations *could be* resolved if we wanted to. -fn fulfill_obligation<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - span: Span, - trait_ref: ty::PolyTraitRef<'tcx>) - -> traits::Vtable<'tcx, ()> -{ +fn fulfill_obligation<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + span: Span, + trait_ref: ty::PolyTraitRef<'tcx>, +) -> traits::Vtable<'tcx, ()> { // Remove any references to regions; this helps improve caching. let trait_ref = tcx.erase_regions(&trait_ref); - debug!("trans::fulfill_obligation(trait_ref={:?}, def_id={:?})", - trait_ref, trait_ref.def_id()); + debug!( + "trans::fulfill_obligation(trait_ref={:?}, def_id={:?})", + trait_ref, + trait_ref.def_id() + ); // Do the initial selection for the obligation. This yields the // shallow result we are looking for -- that is, what specific impl. tcx.infer_ctxt().enter(|infcx| { let mut selcx = traits::SelectionContext::new(&infcx); - let obligation_cause = traits::ObligationCause::misc(span, - ast::DUMMY_NODE_ID); - let obligation = traits::Obligation::new(obligation_cause, - ty::ParamEnv::empty(Reveal::All), - trait_ref.to_poly_trait_predicate()); + let obligation_cause = traits::ObligationCause::misc(span, ast::DUMMY_NODE_ID); + let obligation = traits::Obligation::new( + obligation_cause, + ty::ParamEnv::empty(Reveal::All), + trait_ref.to_poly_trait_predicate(), + ); let selection = match selcx.select(&obligation) { Ok(Some(selection)) => selection, @@ -2113,16 +2376,24 @@ fn fulfill_obligation<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // leading to an ambiguous result. So report this as an // overflow bug, since I believe this is the only case // where ambiguity can result. - debug!("Encountered ambiguity selecting `{:?}` during trans, \ + debug!( + "Encountered ambiguity selecting `{:?}` during trans, \ presuming due to overflow", - trait_ref); - tcx.sess.span_fatal(span, + trait_ref + ); + tcx.sess.span_fatal( + span, "reached the recursion limit during monomorphization \ - (selection ambiguity)"); + (selection ambiguity)", + ); } Err(e) => { - span_bug!(span, "Encountered error `{:?}` selecting `{:?}` during trans", - e, trait_ref) + span_bug!( + span, + "Encountered error `{:?}` selecting `{:?}` during trans", + e, + trait_ref + ) } }; @@ -2133,7 +2404,10 @@ fn fulfill_obligation<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // inference of the impl's type parameters. let mut fulfill_cx = traits::FulfillmentContext::new(); let vtable = selection.map(|predicate| { - debug!("fulfill_obligation: register_predicate_obligation {:?}", predicate); + debug!( + "fulfill_obligation: register_predicate_obligation {:?}", + predicate + ); fulfill_cx.register_predicate_obligation(&infcx, predicate); }); let vtable = infcx.drain_fulfillment_cx_or_panic(span, &mut fulfill_cx, &vtable); @@ -2146,8 +2420,7 @@ fn fulfill_obligation<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, pub fn resolve_drop_in_place<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>, -) -> ty::Instance<'tcx> -{ +) -> ty::Instance<'tcx> { let def_id = tcx.require_lang_item(::rustc::middle::lang_items::DropInPlaceFnLangItem); let substs = tcx.intern_substs(&[Kind::from(ty)]); resolve(tcx, def_id, substs) diff --git a/src/librustc_mir/interpret/lvalue.rs b/src/librustc_mir/interpret/lvalue.rs index 02af68d384f..86479acd2a4 100644 --- a/src/librustc_mir/interpret/lvalue.rs +++ b/src/librustc_mir/interpret/lvalue.rs @@ -3,14 +3,7 @@ use rustc::ty::{self, Ty}; use rustc_data_structures::indexed_vec::Idx; -use super::{ - EvalResult, - EvalContext, - MemoryPointer, - PrimVal, Value, Pointer, - Machine, - PtrAndAlign, -}; +use super::{EvalResult, EvalContext, MemoryPointer, PrimVal, Value, Pointer, Machine, PtrAndAlign}; #[derive(Copy, Clone, Debug)] pub enum Lvalue { @@ -25,10 +18,7 @@ pub enum Lvalue { /// An lvalue referring to a value on the stack. Represented by a stack frame index paired with /// a Mir local index. - Local { - frame: usize, - local: mir::Local, - }, + Local { frame: usize, local: mir::Local }, } #[derive(Copy, Clone, Debug, Eq, PartialEq)] @@ -57,7 +47,10 @@ pub fn undef() -> Self { } pub fn from_primval_ptr(ptr: Pointer) -> Self { - Lvalue::Ptr { ptr: PtrAndAlign { ptr, aligned: true }, extra: LvalueExtra::None } + Lvalue::Ptr { + ptr: PtrAndAlign { ptr, aligned: true }, + extra: LvalueExtra::None, + } } pub fn from_ptr(ptr: MemoryPointer) -> Self { @@ -87,7 +80,12 @@ pub(super) fn elem_ty_and_len(self, ty: Ty<'tcx>) -> (Ty<'tcx>, u64) { ty::TySlice(elem) => { match self { Lvalue::Ptr { extra: LvalueExtra::Length(len), .. } => (elem, len), - _ => bug!("elem_ty_and_len of a TySlice given non-slice lvalue: {:?}", self), + _ => { + bug!( + "elem_ty_and_len of a TySlice given non-slice lvalue: {:?}", + self + ) + } } } @@ -99,7 +97,10 @@ pub(super) fn elem_ty_and_len(self, ty: Ty<'tcx>) -> (Ty<'tcx>, u64) { impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> { /// Reads a value from the lvalue without going through the intermediate step of obtaining /// a `miri::Lvalue` - pub fn try_read_lvalue(&mut self, lvalue: &mir::Lvalue<'tcx>) -> EvalResult<'tcx, Option> { + pub fn try_read_lvalue( + &mut self, + lvalue: &mir::Lvalue<'tcx>, + ) -> EvalResult<'tcx, Option> { use rustc::mir::Lvalue::*; match *lvalue { // Might allow this in the future, right now there's no way to do this from Rust code anyway @@ -109,14 +110,22 @@ pub fn try_read_lvalue(&mut self, lvalue: &mir::Lvalue<'tcx>) -> EvalResult<'tcx // Directly reading a static will always succeed Static(ref static_) => { let instance = ty::Instance::mono(self.tcx, static_.def_id); - let cid = GlobalId { instance, promoted: None }; - Ok(Some(Value::ByRef(*self.globals.get(&cid).expect("global not cached")))) - }, + let cid = GlobalId { + instance, + promoted: None, + }; + Ok(Some(Value::ByRef( + *self.globals.get(&cid).expect("global not cached"), + ))) + } Projection(ref proj) => self.try_read_lvalue_projection(proj), } } - fn try_read_lvalue_projection(&mut self, proj: &mir::LvalueProjection<'tcx>) -> EvalResult<'tcx, Option> { + fn try_read_lvalue_projection( + &mut self, + proj: &mir::LvalueProjection<'tcx>, + ) -> EvalResult<'tcx, Option> { use rustc::mir::ProjectionElem::*; let base = match self.try_read_lvalue(&proj.base)? { Some(base) => base, @@ -147,7 +156,10 @@ fn try_read_lvalue_projection(&mut self, proj: &mir::LvalueProjection<'tcx>) -> } /// Returns a value and (in case of a ByRef) if we are supposed to use aligned accesses. - pub(super) fn eval_and_read_lvalue(&mut self, lvalue: &mir::Lvalue<'tcx>) -> EvalResult<'tcx, Value> { + pub(super) fn eval_and_read_lvalue( + &mut self, + lvalue: &mir::Lvalue<'tcx>, + ) -> EvalResult<'tcx, Value> { // Shortcut for things like accessing a fat pointer's field, // which would otherwise (in the `eval_lvalue` path) require moving a `ByValPair` to memory // and returning an `Lvalue::Ptr` to it @@ -164,9 +176,7 @@ pub fn read_lvalue(&self, lvalue: Lvalue) -> EvalResult<'tcx, Value> { assert_eq!(extra, LvalueExtra::None); Ok(Value::ByRef(ptr)) } - Lvalue::Local { frame, local } => { - self.stack[frame].get_local(local) - } + Lvalue::Local { frame, local } => self.stack[frame].get_local(local), } } @@ -174,11 +184,17 @@ pub fn eval_lvalue(&mut self, mir_lvalue: &mir::Lvalue<'tcx>) -> EvalResult<'tcx use rustc::mir::Lvalue::*; let lvalue = match *mir_lvalue { Local(mir::RETURN_POINTER) => self.frame().return_lvalue, - Local(local) => Lvalue::Local { frame: self.cur_frame(), local }, + Local(local) => Lvalue::Local { + frame: self.cur_frame(), + local, + }, Static(ref static_) => { let instance = ty::Instance::mono(self.tcx, static_.def_id); - let gid = GlobalId { instance, promoted: None }; + let gid = GlobalId { + instance, + promoted: None, + }; Lvalue::Ptr { ptr: *self.globals.get(&gid).expect("uncached global"), extra: LvalueExtra::None, @@ -209,9 +225,7 @@ pub fn lvalue_field( let base_layout = self.type_layout(base_ty)?; use rustc::ty::layout::Layout::*; let (offset, packed) = match *base_layout { - Univariant { ref variant, .. } => { - (variant.offsets[field_index], variant.packed) - }, + Univariant { ref variant, .. } => (variant.offsets[field_index], variant.packed), General { ref variants, .. } => { let (_, base_extra) = base.to_ptr_extra_aligned(); @@ -249,8 +263,13 @@ pub fn lvalue_field( ty::TyArray(elem_ty, n) => { assert!(field < n as u64); self.type_size(elem_ty)?.expect("array elements are sized") as u64 - }, - _ => bug!("lvalue_field: got Array layout but non-array type {:?}", base_ty), + } + _ => { + bug!( + "lvalue_field: got Array layout but non-array type {:?}", + base_ty + ) + } }; (Size::from_bytes(field * elem_size), false) } @@ -267,22 +286,36 @@ pub fn lvalue_field( // Do not allocate in trivial cases let (base_ptr, base_extra) = match base { Lvalue::Ptr { ptr, extra } => (ptr, extra), - Lvalue::Local { frame, local } => match self.stack[frame].get_local(local)? { - // in case the type has a single field, just return the value - Value::ByVal(_) if self.get_field_count(base_ty).map(|c| c == 1).unwrap_or(false) => { - assert_eq!(offset.bytes(), 0, "ByVal can only have 1 non zst field with offset 0"); - return Ok(base); - }, - Value::ByRef{..} | - Value::ByValPair(..) | - Value::ByVal(_) => self.force_allocation(base)?.to_ptr_extra_aligned(), - }, + Lvalue::Local { frame, local } => { + match self.stack[frame].get_local(local)? { + // in case the type has a single field, just return the value + Value::ByVal(_) + if self.get_field_count(base_ty).map(|c| c == 1).unwrap_or( + false, + ) => { + assert_eq!( + offset.bytes(), + 0, + "ByVal can only have 1 non zst field with offset 0" + ); + return Ok(base); + } + Value::ByRef { .. } | + Value::ByValPair(..) | + Value::ByVal(_) => self.force_allocation(base)?.to_ptr_extra_aligned(), + } + } }; let offset = match base_extra { LvalueExtra::Vtable(tab) => { - let (_, align) = self.size_and_align_of_dst(base_ty, base_ptr.ptr.to_value_with_vtable(tab))?; - offset.abi_align(Align::from_bytes(align, align).unwrap()).bytes() + let (_, align) = self.size_and_align_of_dst( + base_ty, + base_ptr.ptr.to_value_with_vtable(tab), + )?; + offset + .abi_align(Align::from_bytes(align, align).unwrap()) + .bytes() } _ => offset.bytes(), }; @@ -299,41 +332,63 @@ pub fn lvalue_field( } else { match base_extra { LvalueExtra::None => bug!("expected fat pointer"), - LvalueExtra::DowncastVariant(..) => - bug!("Rust doesn't support unsized fields in enum variants"), + LvalueExtra::DowncastVariant(..) => { + bug!("Rust doesn't support unsized fields in enum variants") + } LvalueExtra::Vtable(_) | - LvalueExtra::Length(_) => {}, + LvalueExtra::Length(_) => {} } base_extra }; - Ok(Lvalue::Ptr { ptr, extra } ) + Ok(Lvalue::Ptr { ptr, extra }) } pub(super) fn val_to_lvalue(&self, val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Lvalue> { Ok(match self.tcx.struct_tail(ty).sty { ty::TyDynamic(..) => { let (ptr, vtable) = val.into_ptr_vtable_pair(&self.memory)?; - Lvalue::Ptr { ptr: PtrAndAlign { ptr, aligned: true }, extra: LvalueExtra::Vtable(vtable) } - }, + Lvalue::Ptr { + ptr: PtrAndAlign { ptr, aligned: true }, + extra: LvalueExtra::Vtable(vtable), + } + } ty::TyStr | ty::TySlice(_) => { let (ptr, len) = val.into_slice(&self.memory)?; - Lvalue::Ptr { ptr: PtrAndAlign { ptr, aligned: true }, extra: LvalueExtra::Length(len) } - }, + Lvalue::Ptr { + ptr: PtrAndAlign { ptr, aligned: true }, + extra: LvalueExtra::Length(len), + } + } _ => Lvalue::from_primval_ptr(val.into_ptr(&self.memory)?), }) } - pub(super) fn lvalue_index(&mut self, base: Lvalue, outer_ty: Ty<'tcx>, n: u64) -> EvalResult<'tcx, Lvalue> { + pub(super) fn lvalue_index( + &mut self, + base: Lvalue, + outer_ty: Ty<'tcx>, + n: u64, + ) -> EvalResult<'tcx, Lvalue> { // Taking the outer type here may seem odd; it's needed because for array types, the outer type gives away the length. let base = self.force_allocation(base)?; let (base_ptr, _) = base.to_ptr_extra_aligned(); let (elem_ty, len) = base.elem_ty_and_len(outer_ty); - let elem_size = self.type_size(elem_ty)?.expect("slice element must be sized"); - assert!(n < len, "Tried to access element {} of array/slice with length {}", n, len); + let elem_size = self.type_size(elem_ty)?.expect( + "slice element must be sized", + ); + assert!( + n < len, + "Tried to access element {} of array/slice with length {}", + n, + len + ); let ptr = base_ptr.offset(n * elem_size, self.memory.layout)?; - Ok(Lvalue::Ptr { ptr, extra: LvalueExtra::None }) + Ok(Lvalue::Ptr { + ptr, + extra: LvalueExtra::None, + }) } pub(super) fn eval_lvalue_projection( @@ -357,7 +412,8 @@ pub(super) fn eval_lvalue_projection( use rustc::ty::layout::Layout::*; let extra = match *base_layout { General { .. } => LvalueExtra::DowncastVariant(variant), - RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => base_extra, + RawNullablePointer { .. } | + StructWrappedNullablePointer { .. } => base_extra, _ => bug!("variant downcast on non-aggregate: {:?}", base_layout), }; (base_ptr, extra) @@ -386,13 +442,19 @@ pub(super) fn eval_lvalue_projection( return self.lvalue_index(base, base_ty, n); } - ConstantIndex { offset, min_length, from_end } => { + ConstantIndex { + offset, + min_length, + from_end, + } => { // FIXME(solson) let base = self.force_allocation(base)?; let (base_ptr, _) = base.to_ptr_extra_aligned(); let (elem_ty, n) = base.elem_ty_and_len(base_ty); - let elem_size = self.type_size(elem_ty)?.expect("sequence element must be sized"); + let elem_size = self.type_size(elem_ty)?.expect( + "sequence element must be sized", + ); assert!(n >= min_length as u64); let index = if from_end { @@ -411,7 +473,9 @@ pub(super) fn eval_lvalue_projection( let (base_ptr, _) = base.to_ptr_extra_aligned(); let (elem_ty, n) = base.elem_ty_and_len(base_ty); - let elem_size = self.type_size(elem_ty)?.expect("slice element must be sized"); + let elem_size = self.type_size(elem_ty)?.expect( + "slice element must be sized", + ); assert!(u64::from(from) <= n - u64::from(to)); let ptr = base_ptr.offset(u64::from(from) * elem_size, &self)?; let extra = LvalueExtra::Length(n - u64::from(to) - u64::from(from)); @@ -423,6 +487,9 @@ pub(super) fn eval_lvalue_projection( } pub(super) fn lvalue_ty(&self, lvalue: &mir::Lvalue<'tcx>) -> Ty<'tcx> { - self.monomorphize(lvalue.ty(self.mir(), self.tcx).to_ty(self.tcx), self.substs()) + self.monomorphize( + lvalue.ty(self.mir(), self.tcx).to_ty(self.tcx), + self.substs(), + ) } } diff --git a/src/librustc_mir/interpret/machine.rs b/src/librustc_mir/interpret/machine.rs index c65c3f2e103..dbe9f97dc1d 100644 --- a/src/librustc_mir/interpret/machine.rs +++ b/src/librustc_mir/interpret/machine.rs @@ -2,12 +2,7 @@ //! This separation exists to ensure that no fancy miri features like //! interpreting common C functions leak into CTFE. -use super::{ - EvalResult, - EvalContext, - Lvalue, - PrimVal -}; +use super::{EvalResult, EvalContext, Lvalue, PrimVal}; use rustc::{mir, ty}; use syntax::codemap::Span; @@ -76,4 +71,3 @@ fn box_alloc<'a>( ty: ty::Ty<'tcx>, ) -> EvalResult<'tcx, PrimVal>; } - diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs index 75acdbe778c..56c051dcfad 100644 --- a/src/librustc_mir/interpret/memory.rs +++ b/src/librustc_mir/interpret/memory.rs @@ -8,13 +8,8 @@ use syntax::ast::Mutability; use rustc::middle::region::CodeExtent; -use super::{ - EvalResult, EvalErrorKind, - PrimVal, Pointer, - EvalContext, DynamicLifetime, - Machine, - RangeMap, -}; +use super::{EvalResult, EvalErrorKind, PrimVal, Pointer, EvalContext, DynamicLifetime, Machine, + RangeMap}; //////////////////////////////////////////////////////////////////////////////// // Locks @@ -52,7 +47,10 @@ fn default() -> Self { impl LockInfo { fn new(lock: Lock) -> LockInfo { - LockInfo { suspended: HashMap::new(), active: lock } + LockInfo { + suspended: HashMap::new(), + active: lock, + } } fn access_permitted(&self, frame: Option, access: AccessKind) -> bool { @@ -63,11 +61,11 @@ fn access_permitted(&self, frame: Option, access: AccessKind) -> bool { assert!(!lfts.is_empty(), "Someone left an empty read lock behind."); // Read access to read-locked region is okay, no matter who's holding the read lock. true - }, + } (&WriteLock(ref lft), _) => { // All access is okay if we are the ones holding it Some(lft.frame) == frame - }, + } _ => false, // Nothing else is okay. } } @@ -152,9 +150,15 @@ pub struct Allocation { } impl Allocation { - fn check_locks<'tcx>(&self, frame: Option, offset: u64, len: u64, access: AccessKind) -> Result<(), LockInfo> { + fn check_locks<'tcx>( + &self, + frame: Option, + offset: u64, + len: u64, + access: AccessKind, + ) -> Result<(), LockInfo> { if len == 0 { - return Ok(()) + return Ok(()); } for lock in self.locks.iter(offset, len) { // Check if the lock is in conflict with the access. @@ -193,7 +197,10 @@ pub fn new(alloc_id: AllocId, offset: u64) -> Self { } pub(crate) fn wrapping_signed_offset(self, i: i64, cx: C) -> Self { - MemoryPointer::new(self.alloc_id, cx.data_layout().wrapping_signed_offset(self.offset, i)) + MemoryPointer::new( + self.alloc_id, + cx.data_layout().wrapping_signed_offset(self.offset, i), + ) } pub fn overflowing_signed_offset(self, i: i128, cx: C) -> (Self, bool) { @@ -202,7 +209,10 @@ pub fn overflowing_signed_offset(self, i: i128, cx: C) -> (Sel } pub(crate) fn signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { - Ok(MemoryPointer::new(self.alloc_id, cx.data_layout().signed_offset(self.offset, i)?)) + Ok(MemoryPointer::new( + self.alloc_id, + cx.data_layout().signed_offset(self.offset, i)?, + )) } pub fn overflowing_offset(self, i: u64, cx: C) -> (Self, bool) { @@ -211,7 +221,10 @@ pub fn overflowing_offset(self, i: u64, cx: C) -> (Self, bool) } pub fn offset(self, i: u64, cx: C) -> EvalResult<'tcx, Self> { - Ok(MemoryPointer::new(self.alloc_id, cx.data_layout().offset(self.offset, i)?)) + Ok(MemoryPointer::new( + self.alloc_id, + cx.data_layout().offset(self.offset, i)?, + )) } } @@ -276,8 +289,12 @@ pub fn new(layout: &'a TargetDataLayout, max_memory: u64, data: M::MemoryData) - } } - pub fn allocations<'x>(&'x self) -> impl Iterator)> { - self.alloc_map.iter().map(|(&id, alloc)| (AllocIdKind::Runtime(id).into_alloc_id(), alloc)) + pub fn allocations<'x>( + &'x self, + ) -> impl Iterator)> { + self.alloc_map.iter().map(|(&id, alloc)| { + (AllocIdKind::Runtime(id).into_alloc_id(), alloc) + }) } pub fn create_fn_alloc(&mut self, instance: ty::Instance<'tcx>) -> MemoryPointer { @@ -297,10 +314,20 @@ pub fn allocate_cached(&mut self, bytes: &[u8]) -> EvalResult<'tcx, MemoryPointe return Ok(MemoryPointer::new(alloc_id, 0)); } - let ptr = self.allocate(bytes.len() as u64, 1, MemoryKind::UninitializedStatic)?; + let ptr = self.allocate( + bytes.len() as u64, + 1, + MemoryKind::UninitializedStatic, + )?; self.write_bytes(ptr.into(), bytes)?; - self.mark_static_initalized(ptr.alloc_id, Mutability::Immutable)?; - self.literal_alloc_cache.insert(bytes.to_vec(), ptr.alloc_id); + self.mark_static_initalized( + ptr.alloc_id, + Mutability::Immutable, + )?; + self.literal_alloc_cache.insert( + bytes.to_vec(), + ptr.alloc_id, + ); Ok(ptr) } @@ -334,7 +361,10 @@ pub fn allocate( let id = self.next_alloc_id; self.next_alloc_id += 1; self.alloc_map.insert(id, alloc); - Ok(MemoryPointer::new(AllocIdKind::Runtime(id).into_alloc_id(), 0)) + Ok(MemoryPointer::new( + AllocIdKind::Runtime(id).into_alloc_id(), + 0, + )) } pub fn reallocate( @@ -353,13 +383,23 @@ pub fn reallocate( } if let Ok(alloc) = self.get(ptr.alloc_id) { if alloc.kind != kind { - return err!(ReallocatedWrongMemoryKind(format!("{:?}", alloc.kind), format!("{:?}", kind))); + return err!(ReallocatedWrongMemoryKind( + format!("{:?}", alloc.kind), + format!("{:?}", kind), + )); } } // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc" let new_ptr = self.allocate(new_size, new_align, kind)?; - self.copy(ptr.into(), new_ptr.into(), min(old_size, new_size), min(old_align, new_align), /*nonoverlapping*/true)?; + self.copy( + ptr.into(), + new_ptr.into(), + min(old_size, new_size), + min(old_align, new_align), + /*nonoverlapping*/ + true, + )?; self.deallocate(ptr, Some((old_size, old_align)), kind)?; Ok(new_ptr) @@ -376,8 +416,12 @@ pub fn deallocate( } let alloc_id = match ptr.alloc_id.into_alloc_id_kind() { - AllocIdKind::Function(_) => - return err!(DeallocatedWrongMemoryKind("function".to_string(), format!("{:?}", kind))), + AllocIdKind::Function(_) => { + return err!(DeallocatedWrongMemoryKind( + "function".to_string(), + format!("{:?}", kind), + )) + } AllocIdKind::Runtime(id) => id, }; @@ -391,11 +435,25 @@ pub fn deallocate( // However, we should check *something*. For now, we make sure that there is no conflicting write // lock by another frame. We *have* to permit deallocation if we hold a read lock. // TODO: Figure out the exact rules here. - alloc.check_locks(Some(self.cur_frame), 0, alloc.bytes.len() as u64, AccessKind::Read) - .map_err(|lock| EvalErrorKind::DeallocatedLockedMemory { ptr, lock: lock.active })?; + alloc + .check_locks( + Some(self.cur_frame), + 0, + alloc.bytes.len() as u64, + AccessKind::Read, + ) + .map_err(|lock| { + EvalErrorKind::DeallocatedLockedMemory { + ptr, + lock: lock.active, + } + })?; if alloc.kind != kind { - return err!(DeallocatedWrongMemoryKind(format!("{:?}", alloc.kind), format!("{:?}", kind))); + return err!(DeallocatedWrongMemoryKind( + format!("{:?}", alloc.kind), + format!("{:?}", kind), + )); } if let Some((size, align)) = size_and_align { if size != alloc.bytes.len() as u64 || align != alloc.align { @@ -429,14 +487,14 @@ pub fn check_align(&self, ptr: Pointer, align: u64) -> EvalResult<'tcx> { }); } ptr.offset - }, + } PrimVal::Bytes(bytes) => { let v = ((bytes as u128) % (1 << self.pointer_size())) as u64; if v == 0 { return err!(InvalidNullPointerUsage); } v - }, + } PrimVal::Undef => return err!(ReadUndefBytes), }; if offset % align == 0 { @@ -453,7 +511,11 @@ pub fn check_bounds(&self, ptr: MemoryPointer, access: bool) -> EvalResult<'tcx> let alloc = self.get(ptr.alloc_id)?; let allocation_size = alloc.bytes.len() as u64; if ptr.offset > allocation_size { - return err!(PointerOutOfBounds { ptr, access, allocation_size }); + return err!(PointerOutOfBounds { + ptr, + access, + allocation_size, + }); } Ok(()) } @@ -465,21 +527,48 @@ pub(crate) fn set_cur_frame(&mut self, cur_frame: usize) { /// Locking impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> { - pub(crate) fn check_locks(&self, ptr: MemoryPointer, len: u64, access: AccessKind) -> EvalResult<'tcx> { + pub(crate) fn check_locks( + &self, + ptr: MemoryPointer, + len: u64, + access: AccessKind, + ) -> EvalResult<'tcx> { if len == 0 { - return Ok(()) + return Ok(()); } let alloc = self.get(ptr.alloc_id)?; let frame = self.cur_frame; - alloc.check_locks(Some(frame), ptr.offset, len, access) - .map_err(|lock| EvalErrorKind::MemoryLockViolation { ptr, len, frame, access, lock: lock.active }.into()) + alloc + .check_locks(Some(frame), ptr.offset, len, access) + .map_err(|lock| { + EvalErrorKind::MemoryLockViolation { + ptr, + len, + frame, + access, + lock: lock.active, + }.into() + }) } /// Acquire the lock for the given lifetime - pub(crate) fn acquire_lock(&mut self, ptr: MemoryPointer, len: u64, region: Option, kind: AccessKind) -> EvalResult<'tcx> { + pub(crate) fn acquire_lock( + &mut self, + ptr: MemoryPointer, + len: u64, + region: Option, + kind: AccessKind, + ) -> EvalResult<'tcx> { let frame = self.cur_frame; assert!(len > 0); - trace!("Frame {} acquiring {:?} lock at {:?}, size {} for region {:?}", frame, kind, ptr, len, region); + trace!( + "Frame {} acquiring {:?} lock at {:?}, size {} for region {:?}", + frame, + kind, + ptr, + len, + region + ); self.check_bounds(ptr.offset(len, self.layout)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow) let alloc = self.get_mut_unchecked(ptr.alloc_id)?; @@ -488,7 +577,12 @@ pub(crate) fn acquire_lock(&mut self, ptr: MemoryPointer, len: u64, region: Opti let lifetime = DynamicLifetime { frame, region }; for lock in alloc.locks.iter_mut(ptr.offset, len) { if !lock.access_permitted(None, kind) { - return err!(MemoryAcquireConflict { ptr, len, kind, lock: lock.active.clone() }); + return err!(MemoryAcquireConflict { + ptr, + len, + kind, + lock: lock.active.clone(), + }); } // See what we have to do match (&mut lock.active, kind) { @@ -503,7 +597,7 @@ pub(crate) fn acquire_lock(&mut self, ptr: MemoryPointer, len: u64, region: Opti } _ => bug!("We already checked that there is no conflicting lock"), } - }; + } Ok(()) } @@ -511,28 +605,36 @@ pub(crate) fn acquire_lock(&mut self, ptr: MemoryPointer, len: u64, region: Opti /// When releasing, if there is a read lock or someone else's write lock, that's an error. /// We *do* accept relasing a NoLock, as this can happen when a local is first acquired and later force_allocate'd. /// When suspending, the same cases are fine; we just register an additional suspension. - pub(crate) fn suspend_write_lock(&mut self, ptr: MemoryPointer, len: u64, - lock_region: Option, suspend: Option) -> EvalResult<'tcx> { + pub(crate) fn suspend_write_lock( + &mut self, + ptr: MemoryPointer, + len: u64, + lock_region: Option, + suspend: Option, + ) -> EvalResult<'tcx> { assert!(len > 0); let cur_frame = self.cur_frame; - let lock_lft = DynamicLifetime { frame: cur_frame, region: lock_region }; + let lock_lft = DynamicLifetime { + frame: cur_frame, + region: lock_region, + }; let alloc = self.get_mut_unchecked(ptr.alloc_id)?; 'locks: for lock in alloc.locks.iter_mut(ptr.offset, len) { let is_our_lock = match lock.active { - WriteLock(lft) => { - lft == lock_lft - } - ReadLock(_) | NoLock => { - false - } + WriteLock(lft) => lft == lock_lft, + ReadLock(_) | NoLock => false, }; if is_our_lock { trace!("Releasing {:?} at {:?}", lock.active, lock_lft); // Disable the lock lock.active = NoLock; } else { - trace!("Not touching {:?} at {:?} as its not our lock", lock.active, lock_lft); + trace!( + "Not touching {:?} at {:?} as its not our lock", + lock.active, + lock_lft + ); } match suspend { Some(suspend_region) => { @@ -540,14 +642,20 @@ pub(crate) fn suspend_write_lock(&mut self, ptr: MemoryPointer, len: u64, // We just released this lock, so add a new suspension. // FIXME: Really, if there ever already is a suspension when is_our_lock, or if there is no suspension when !is_our_lock, something is amiss. // But this model is not good enough yet to prevent that. - lock.suspended.entry(lock_lft) + lock.suspended + .entry(lock_lft) .or_insert_with(|| Vec::new()) .push(suspend_region); } None => { // Make sure we did not try to release someone else's lock. if !is_our_lock && lock.active != NoLock { - return err!(InvalidMemoryLockRelease { ptr, len, frame: cur_frame, lock: lock.active.clone() }); + return err!(InvalidMemoryLockRelease { + ptr, + len, + frame: cur_frame, + lock: lock.active.clone(), + }); } } } @@ -557,13 +665,19 @@ pub(crate) fn suspend_write_lock(&mut self, ptr: MemoryPointer, len: u64, } /// Release a suspension from the write lock. If this is the last suspension or if there is no suspension, acquire the lock. - pub(crate) fn recover_write_lock(&mut self, ptr: MemoryPointer, len: u64, - lock_region: Option, suspended_region: CodeExtent, ) - -> EvalResult<'tcx> - { + pub(crate) fn recover_write_lock( + &mut self, + ptr: MemoryPointer, + len: u64, + lock_region: Option, + suspended_region: CodeExtent, + ) -> EvalResult<'tcx> { assert!(len > 0); let cur_frame = self.cur_frame; - let lock_lft = DynamicLifetime { frame: cur_frame, region: lock_region }; + let lock_lft = DynamicLifetime { + frame: cur_frame, + region: lock_region, + }; let alloc = self.get_mut_unchecked(ptr.alloc_id)?; for lock in alloc.locks.iter_mut(ptr.offset, len) { @@ -591,7 +705,8 @@ pub(crate) fn recover_write_lock(&mut self, ptr: MemoryPointer, len: u64, (got_lock, got_lock) } }; - if remove_suspension { // with NLL, we could do that up in the match above... + if remove_suspension { + // with NLL, we could do that up in the match above... assert!(got_the_lock); lock.suspended.remove(&lock_lft); } @@ -601,7 +716,12 @@ pub(crate) fn recover_write_lock(&mut self, ptr: MemoryPointer, len: u64, *active = WriteLock(lock_lft); } _ => { - return err!(MemoryAcquireConflict { ptr, len, kind: AccessKind::Write, lock: lock.active.clone() }) + return err!(MemoryAcquireConflict { + ptr, + len, + kind: AccessKind::Write, + lock: lock.active.clone(), + }) } } } @@ -612,15 +732,19 @@ pub(crate) fn recover_write_lock(&mut self, ptr: MemoryPointer, len: u64, pub(crate) fn locks_lifetime_ended(&mut self, ending_region: Option) { let cur_frame = self.cur_frame; - trace!("Releasing frame {} locks that expire at {:?}", cur_frame, ending_region); - let has_ended = |lifetime: &DynamicLifetime| -> bool { + trace!( + "Releasing frame {} locks that expire at {:?}", + cur_frame, + ending_region + ); + let has_ended = |lifetime: &DynamicLifetime| -> bool { if lifetime.frame != cur_frame { return false; } match ending_region { None => true, // When a function ends, we end *all* its locks. It's okay for a function to still have lifetime-related locks - // when it returns, that can happen e.g. with NLL when a lifetime can, but does not have to, extend beyond the - // end of a function. Same for a function still having recoveries. + // when it returns, that can happen e.g. with NLL when a lifetime can, but does not have to, extend beyond the + // end of a function. Same for a function still having recoveries. Some(ending_region) => lifetime.region == Some(ending_region), } }; @@ -629,9 +753,7 @@ pub(crate) fn locks_lifetime_ended(&mut self, ending_region: Option) for lock in alloc.locks.iter_mut_all() { // Delete everything that ends now -- i.e., keep only all the other lifetimes. let lock_ended = match lock.active { - WriteLock(ref lft) => { - has_ended(lft) - } + WriteLock(ref lft) => has_ended(lft), ReadLock(ref mut lfts) => { lfts.retain(|lft| !has_ended(lft)); lfts.is_empty() @@ -645,8 +767,9 @@ pub(crate) fn locks_lifetime_ended(&mut self, ending_region: Option) lock.suspended.retain(|lft, _suspensions| !has_ended(lft)); } // Clean up the map - alloc.locks.retain(|lock| { - match lock.active { NoLock => lock.suspended.len() > 0, _ => true } + alloc.locks.retain(|lock| match lock.active { + NoLock => lock.suspended.len() > 0, + _ => true, }); } } @@ -657,20 +780,27 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> { pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation> { match id.into_alloc_id_kind() { AllocIdKind::Function(_) => err!(DerefFunctionPointer), - AllocIdKind::Runtime(id) => match self.alloc_map.get(&id) { - Some(alloc) => Ok(alloc), - None => err!(DanglingPointerDeref), - }, + AllocIdKind::Runtime(id) => { + match self.alloc_map.get(&id) { + Some(alloc) => Ok(alloc), + None => err!(DanglingPointerDeref), + } + } } } - - fn get_mut_unchecked(&mut self, id: AllocId) -> EvalResult<'tcx, &mut Allocation> { + + fn get_mut_unchecked( + &mut self, + id: AllocId, + ) -> EvalResult<'tcx, &mut Allocation> { match id.into_alloc_id_kind() { AllocIdKind::Function(_) => err!(DerefFunctionPointer), - AllocIdKind::Runtime(id) => match self.alloc_map.get_mut(&id) { - Some(alloc) => Ok(alloc), - None => err!(DanglingPointerDeref), - }, + AllocIdKind::Runtime(id) => { + match self.alloc_map.get_mut(&id) { + Some(alloc) => Ok(alloc), + None => err!(DanglingPointerDeref), + } + } } } @@ -716,14 +846,16 @@ pub fn dump_allocs(&self, mut allocs: Vec) { AllocIdKind::Function(id) => { trace!("{} {}", msg, self.functions[id]); continue; - }, - AllocIdKind::Runtime(id) => match self.alloc_map.get(&id) { - Some(a) => a, - None => { - trace!("{} (deallocated)", msg); - continue; + } + AllocIdKind::Runtime(id) => { + match self.alloc_map.get(&id) { + Some(a) => a, + None => { + trace!("{} (deallocated)", msg); + continue; + } } - }, + } }; for i in 0..(alloc.bytes.len() as u64) { @@ -742,13 +874,21 @@ pub fn dump_allocs(&self, mut allocs: Vec) { } let immutable = match (alloc.kind, alloc.mutable) { - (MemoryKind::UninitializedStatic, _) => " (static in the process of initialization)".to_owned(), + (MemoryKind::UninitializedStatic, _) => { + " (static in the process of initialization)".to_owned() + } (MemoryKind::Static, Mutability::Mutable) => " (static mut)".to_owned(), (MemoryKind::Static, Mutability::Immutable) => " (immutable)".to_owned(), (MemoryKind::Machine(m), _) => format!(" ({:?})", m), (MemoryKind::Stack, _) => " (stack)".to_owned(), }; - trace!("{}({} bytes, alignment {}){}", msg, alloc.bytes.len(), alloc.align, immutable); + trace!( + "{}({} bytes, alignment {}){}", + msg, + alloc.bytes.len(), + alloc.align, + immutable + ); if !relocations.is_empty() { msg.clear(); @@ -772,12 +912,10 @@ pub fn leak_report(&self) -> usize { trace!("### LEAK REPORT ###"); let leaks: Vec<_> = self.alloc_map .iter() - .filter_map(|(&key, val)| { - if val.kind != MemoryKind::Static { - Some(AllocIdKind::Runtime(key).into_alloc_id()) - } else { - None - } + .filter_map(|(&key, val)| if val.kind != MemoryKind::Static { + Some(AllocIdKind::Runtime(key).into_alloc_id()) + } else { + None }) .collect(); let n = leaks.len(); @@ -788,7 +926,12 @@ pub fn leak_report(&self) -> usize { /// Byte accessors impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> { - fn get_bytes_unchecked(&self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &[u8]> { + fn get_bytes_unchecked( + &self, + ptr: MemoryPointer, + size: u64, + align: u64, + ) -> EvalResult<'tcx, &[u8]> { // Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL if self.reads_are_aligned.get() { self.check_align(ptr.into(), align)?; @@ -805,7 +948,12 @@ fn get_bytes_unchecked(&self, ptr: MemoryPointer, size: u64, align: u64) -> Eval Ok(&alloc.bytes[offset..offset + size as usize]) } - fn get_bytes_unchecked_mut(&mut self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &mut [u8]> { + fn get_bytes_unchecked_mut( + &mut self, + ptr: MemoryPointer, + size: u64, + align: u64, + ) -> EvalResult<'tcx, &mut [u8]> { // Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL if self.writes_are_aligned.get() { self.check_align(ptr.into(), align)?; @@ -831,7 +979,12 @@ fn get_bytes(&self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tc self.get_bytes_unchecked(ptr, size, align) } - fn get_bytes_mut(&mut self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &mut [u8]> { + fn get_bytes_mut( + &mut self, + ptr: MemoryPointer, + size: u64, + align: u64, + ) -> EvalResult<'tcx, &mut [u8]> { assert_ne!(size, 0); self.clear_relocations(ptr, size)?; self.mark_definedness(ptr.into(), size, true)?; @@ -841,19 +994,33 @@ fn get_bytes_mut(&mut self, ptr: MemoryPointer, size: u64, align: u64) -> EvalRe /// Reading and writing impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> { - /// mark an allocation pointed to by a static as static and initialized - pub fn mark_inner_allocation(&mut self, alloc: AllocId, mutability: Mutability) -> EvalResult<'tcx> { + pub fn mark_inner_allocation( + &mut self, + alloc: AllocId, + mutability: Mutability, + ) -> EvalResult<'tcx> { // relocations into other statics are not "inner allocations" - if self.get(alloc).ok().map_or(false, |alloc| alloc.kind != MemoryKind::UninitializedStatic) { + if self.get(alloc).ok().map_or(false, |alloc| { + alloc.kind != MemoryKind::UninitializedStatic + }) + { self.mark_static_initalized(alloc, mutability)?; } Ok(()) } /// mark an allocation as static and initialized, either mutable or not - pub fn mark_static_initalized(&mut self, alloc_id: AllocId, mutability: Mutability) -> EvalResult<'tcx> { - trace!("mark_static_initalized {:?}, mutability: {:?}", alloc_id, mutability); + pub fn mark_static_initalized( + &mut self, + alloc_id: AllocId, + mutability: Mutability, + ) -> EvalResult<'tcx> { + trace!( + "mark_static_initalized {:?}, mutability: {:?}", + alloc_id, + mutability + ); // do not use `self.get_mut(alloc_id)` here, because we might have already marked a // sub-element or have circular pointers (e.g. `Rc`-cycles) let alloc_id = match alloc_id.into_alloc_id_kind() { @@ -861,7 +1028,12 @@ pub fn mark_static_initalized(&mut self, alloc_id: AllocId, mutability: Mutabili AllocIdKind::Runtime(id) => id, }; let relocations = match self.alloc_map.get_mut(&alloc_id) { - Some(&mut Allocation { ref mut relocations, ref mut kind, ref mut mutable, .. }) => { + Some(&mut Allocation { + ref mut relocations, + ref mut kind, + ref mut mutable, + .. + }) => { match *kind { // const eval results can refer to "locals". // E.g. `const Foo: &u32 = &1;` refers to the temp local that stores the `1` @@ -879,7 +1051,7 @@ pub fn mark_static_initalized(&mut self, alloc_id: AllocId, mutability: Mutabili // take out the relocations vector to free the borrow on self, so we can call // mark recursively mem::replace(relocations, Default::default()) - }, + } None => return err!(DanglingPointerDeref), }; // recurse into inner allocations @@ -887,11 +1059,21 @@ pub fn mark_static_initalized(&mut self, alloc_id: AllocId, mutability: Mutabili self.mark_inner_allocation(alloc, mutability)?; } // put back the relocations - self.alloc_map.get_mut(&alloc_id).expect("checked above").relocations = relocations; + self.alloc_map + .get_mut(&alloc_id) + .expect("checked above") + .relocations = relocations; Ok(()) } - pub fn copy(&mut self, src: Pointer, dest: Pointer, size: u64, align: u64, nonoverlapping: bool) -> EvalResult<'tcx> { + pub fn copy( + &mut self, + src: Pointer, + dest: Pointer, + size: u64, + align: u64, + nonoverlapping: bool, + ) -> EvalResult<'tcx> { if size == 0 { // Empty accesses don't need to be valid pointers, but they should still be aligned if self.reads_are_aligned.get() { @@ -917,8 +1099,11 @@ pub fn copy(&mut self, src: Pointer, dest: Pointer, size: u64, align: u64, nonov if src.alloc_id == dest.alloc_id { if nonoverlapping { if (src.offset <= dest.offset && src.offset + size > dest.offset) || - (dest.offset <= src.offset && dest.offset + size > src.offset) { - return err!(Intrinsic(format!("copy_nonoverlapping called on overlapping ranges"))); + (dest.offset <= src.offset && dest.offset + size > src.offset) + { + return err!(Intrinsic( + format!("copy_nonoverlapping called on overlapping ranges"), + )); } } ptr::copy(src_bytes, dest_bytes, size as usize); @@ -945,7 +1130,7 @@ pub fn read_c_str(&self, ptr: MemoryPointer) -> EvalResult<'tcx, &[u8]> { self.check_defined(ptr, (size + 1) as u64)?; self.check_locks(ptr, (size + 1) as u64, AccessKind::Read)?; Ok(&alloc.bytes[offset..offset + size]) - }, + } None => err!(UnterminatedCString(ptr)), } } @@ -983,7 +1168,9 @@ pub fn write_repeat(&mut self, ptr: Pointer, val: u8, count: u64) -> EvalResult< return Ok(()); } let bytes = self.get_bytes_mut(ptr.to_ptr()?, count, 1)?; - for b in bytes { *b = val; } + for b in bytes { + *b = val; + } Ok(()) } @@ -1009,16 +1196,14 @@ pub fn read_ptr(&self, ptr: MemoryPointer) -> EvalResult<'tcx, Pointer> { pub fn write_ptr(&mut self, dest: MemoryPointer, ptr: MemoryPointer) -> EvalResult<'tcx> { self.write_usize(dest, ptr.offset as u64)?; - self.get_mut(dest.alloc_id)?.relocations.insert(dest.offset, ptr.alloc_id); + self.get_mut(dest.alloc_id)?.relocations.insert( + dest.offset, + ptr.alloc_id, + ); Ok(()) } - pub fn write_primval( - &mut self, - dest: Pointer, - val: PrimVal, - size: u64, - ) -> EvalResult<'tcx> { + pub fn write_primval(&mut self, dest: Pointer, val: PrimVal, size: u64) -> EvalResult<'tcx> { match val { PrimVal::Ptr(ptr) => { assert_eq!(size, self.pointer_size()); @@ -1054,8 +1239,9 @@ pub fn read_bool(&self, ptr: MemoryPointer) -> EvalResult<'tcx, bool> { pub fn write_bool(&mut self, ptr: MemoryPointer, b: bool) -> EvalResult<'tcx> { let align = self.layout.i1_align.abi(); - self.get_bytes_mut(ptr, 1, align) - .map(|bytes| bytes[0] = b as u8) + self.get_bytes_mut(ptr, 1, align).map( + |bytes| bytes[0] = b as u8, + ) } fn int_align(&self, size: u64) -> EvalResult<'tcx, u64> { @@ -1071,7 +1257,9 @@ fn int_align(&self, size: u64) -> EvalResult<'tcx, u64> { pub fn read_int(&self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx, i128> { let align = self.int_align(size)?; - self.get_bytes(ptr, size, align).map(|b| read_target_int(self.endianess(), b).unwrap()) + self.get_bytes(ptr, size, align).map(|b| { + read_target_int(self.endianess(), b).unwrap() + }) } pub fn write_int(&mut self, ptr: MemoryPointer, n: i128, size: u64) -> EvalResult<'tcx> { @@ -1084,7 +1272,9 @@ pub fn write_int(&mut self, ptr: MemoryPointer, n: i128, size: u64) -> EvalResul pub fn read_uint(&self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx, u128> { let align = self.int_align(size)?; - self.get_bytes(ptr, size, align).map(|b| read_target_uint(self.endianess(), b).unwrap()) + self.get_bytes(ptr, size, align).map(|b| { + read_target_uint(self.endianess(), b).unwrap() + }) } pub fn write_uint(&mut self, ptr: MemoryPointer, n: u128, size: u64) -> EvalResult<'tcx> { @@ -1130,21 +1320,29 @@ pub fn write_f64(&mut self, ptr: MemoryPointer, f: f64) -> EvalResult<'tcx> { } pub fn read_f32(&self, ptr: MemoryPointer) -> EvalResult<'tcx, f32> { - self.get_bytes(ptr, 4, self.layout.f32_align.abi()) - .map(|b| read_target_f32(self.endianess(), b).unwrap()) + self.get_bytes(ptr, 4, self.layout.f32_align.abi()).map( + |b| { + read_target_f32(self.endianess(), b).unwrap() + }, + ) } pub fn read_f64(&self, ptr: MemoryPointer) -> EvalResult<'tcx, f64> { - self.get_bytes(ptr, 8, self.layout.f64_align.abi()) - .map(|b| read_target_f64(self.endianess(), b).unwrap()) + self.get_bytes(ptr, 8, self.layout.f64_align.abi()).map( + |b| { + read_target_f64(self.endianess(), b).unwrap() + }, + ) } } /// Relocations impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> { - fn relocations(&self, ptr: MemoryPointer, size: u64) - -> EvalResult<'tcx, btree_map::Range> - { + fn relocations( + &self, + ptr: MemoryPointer, + size: u64, + ) -> EvalResult<'tcx, btree_map::Range> { let start = ptr.offset.saturating_sub(self.pointer_size() - 1); let end = ptr.offset + size; Ok(self.get(ptr.alloc_id)?.relocations.range(start..end)) @@ -1153,7 +1351,9 @@ fn relocations(&self, ptr: MemoryPointer, size: u64) fn clear_relocations(&mut self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> { // Find all relocations overlapping the given range. let keys: Vec<_> = self.relocations(ptr, size)?.map(|(&k, _)| k).collect(); - if keys.is_empty() { return Ok(()); } + if keys.is_empty() { + return Ok(()); + } // Find the start and end of the given range and its outermost relocations. let start = ptr.offset; @@ -1165,11 +1365,17 @@ fn clear_relocations(&mut self, ptr: MemoryPointer, size: u64) -> EvalResult<'tc // Mark parts of the outermost relocations as undefined if they partially fall outside the // given range. - if first < start { alloc.undef_mask.set_range(first, start, false); } - if last > end { alloc.undef_mask.set_range(end, last, false); } + if first < start { + alloc.undef_mask.set_range(first, start, false); + } + if last > end { + alloc.undef_mask.set_range(end, last, false); + } // Forget all the relocations. - for k in keys { alloc.relocations.remove(&k); } + for k in keys { + alloc.relocations.remove(&k); + } Ok(()) } @@ -1183,7 +1389,12 @@ fn check_relocation_edges(&self, ptr: MemoryPointer, size: u64) -> EvalResult<'t Ok(()) } - fn copy_relocations(&mut self, src: MemoryPointer, dest: MemoryPointer, size: u64) -> EvalResult<'tcx> { + fn copy_relocations( + &mut self, + src: MemoryPointer, + dest: MemoryPointer, + size: u64, + ) -> EvalResult<'tcx> { let relocations: Vec<_> = self.relocations(src, size)? .map(|(&offset, &alloc_id)| { // Update relocation offsets for the new positions in the destination allocation. @@ -1198,7 +1409,12 @@ fn copy_relocations(&mut self, src: MemoryPointer, dest: MemoryPointer, size: u6 /// Undefined bytes impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> { // FIXME(solson): This is a very naive, slow version. - fn copy_undef_mask(&mut self, src: MemoryPointer, dest: MemoryPointer, size: u64) -> EvalResult<'tcx> { + fn copy_undef_mask( + &mut self, + src: MemoryPointer, + dest: MemoryPointer, + size: u64, + ) -> EvalResult<'tcx> { // The bits have to be saved locally before writing to dest in case src and dest overlap. assert_eq!(size as usize as u64, size); let mut v = Vec::with_capacity(size as usize); @@ -1207,14 +1423,22 @@ fn copy_undef_mask(&mut self, src: MemoryPointer, dest: MemoryPointer, size: u64 v.push(defined); } for (i, defined) in v.into_iter().enumerate() { - self.get_mut(dest.alloc_id)?.undef_mask.set(dest.offset + i as u64, defined); + self.get_mut(dest.alloc_id)?.undef_mask.set( + dest.offset + + i as u64, + defined, + ); } Ok(()) } fn check_defined(&self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> { let alloc = self.get(ptr.alloc_id)?; - if !alloc.undef_mask.is_range_defined(ptr.offset, ptr.offset + size) { + if !alloc.undef_mask.is_range_defined( + ptr.offset, + ptr.offset + size, + ) + { return err!(ReadUndefBytes); } Ok(()) @@ -1224,14 +1448,18 @@ pub fn mark_definedness( &mut self, ptr: Pointer, size: u64, - new_state: bool + new_state: bool, ) -> EvalResult<'tcx> { if size == 0 { - return Ok(()) + return Ok(()); } let ptr = ptr.to_ptr()?; let mut alloc = self.get_mut(ptr.alloc_id)?; - alloc.undef_mask.set_range(ptr.offset, ptr.offset + size, new_state); + alloc.undef_mask.set_range( + ptr.offset, + ptr.offset + size, + new_state, + ); Ok(()) } } @@ -1240,14 +1468,22 @@ pub fn mark_definedness( // Methods to access integers in the target endianess //////////////////////////////////////////////////////////////////////////////// -fn write_target_uint(endianess: layout::Endian, mut target: &mut [u8], data: u128) -> Result<(), io::Error> { +fn write_target_uint( + endianess: layout::Endian, + mut target: &mut [u8], + data: u128, +) -> Result<(), io::Error> { let len = target.len(); match endianess { layout::Endian::Little => target.write_uint128::(data, len), layout::Endian::Big => target.write_uint128::(data, len), } } -fn write_target_int(endianess: layout::Endian, mut target: &mut [u8], data: i128) -> Result<(), io::Error> { +fn write_target_int( + endianess: layout::Endian, + mut target: &mut [u8], + data: i128, +) -> Result<(), io::Error> { let len = target.len(); match endianess { layout::Endian::Little => target.write_int128::(data, len), @@ -1272,13 +1508,21 @@ fn read_target_int(endianess: layout::Endian, mut source: &[u8]) -> Result Result<(), io::Error> { +fn write_target_f32( + endianess: layout::Endian, + mut target: &mut [u8], + data: f32, +) -> Result<(), io::Error> { match endianess { layout::Endian::Little => target.write_f32::(data), layout::Endian::Big => target.write_f32::(data), } } -fn write_target_f64(endianess: layout::Endian, mut target: &mut [u8], data: f64) -> Result<(), io::Error> { +fn write_target_f64( + endianess: layout::Endian, + mut target: &mut [u8], + data: f64, +) -> Result<(), io::Error> { match endianess { layout::Endian::Little => target.write_f64::(data), layout::Endian::Big => target.write_f64::(data), @@ -1323,21 +1567,29 @@ fn new(size: u64) -> Self { /// Check whether the range `start..end` (end-exclusive) is entirely defined. pub fn is_range_defined(&self, start: u64, end: u64) -> bool { - if end > self.len { return false; } + if end > self.len { + return false; + } for i in start..end { - if !self.get(i) { return false; } + if !self.get(i) { + return false; + } } true } fn set_range(&mut self, start: u64, end: u64, new_state: bool) { let len = self.len; - if end > len { self.grow(end - len, new_state); } + if end > len { + self.grow(end - len, new_state); + } self.set_range_inbounds(start, end, new_state); } fn set_range_inbounds(&mut self, start: u64, end: u64, new_state: bool) { - for i in start..end { self.set(i, new_state); } + for i in start..end { + self.set(i, new_state); + } } fn get(&self, i: u64) -> bool { @@ -1359,7 +1611,9 @@ fn grow(&mut self, amount: u64, new_state: bool) { if amount > unused_trailing_bits { let additional_blocks = amount / BLOCK_SIZE + 1; assert_eq!(additional_blocks as usize as u64, additional_blocks); - self.blocks.extend(iter::repeat(0).take(additional_blocks as usize)); + self.blocks.extend( + iter::repeat(0).take(additional_blocks as usize), + ); } let start = self.len; self.len += amount; @@ -1385,7 +1639,8 @@ pub trait HasMemory<'a, 'tcx, M: Machine<'tcx>> { // These are not supposed to be overriden. fn read_maybe_aligned(&self, aligned: bool, f: F) -> EvalResult<'tcx, T> - where F: FnOnce(&Self) -> EvalResult<'tcx, T> + where + F: FnOnce(&Self) -> EvalResult<'tcx, T>, { let old = self.memory().reads_are_aligned.get(); // Do alignment checking if *all* nested calls say it has to be aligned. @@ -1396,7 +1651,8 @@ fn read_maybe_aligned(&self, aligned: bool, f: F) -> EvalResult<'tcx, T> } fn read_maybe_aligned_mut(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T> - where F: FnOnce(&mut Self) -> EvalResult<'tcx, T> + where + F: FnOnce(&mut Self) -> EvalResult<'tcx, T>, { let old = self.memory().reads_are_aligned.get(); // Do alignment checking if *all* nested calls say it has to be aligned. @@ -1407,7 +1663,8 @@ fn read_maybe_aligned_mut(&mut self, aligned: bool, f: F) -> EvalResult<'t } fn write_maybe_aligned_mut(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T> - where F: FnOnce(&mut Self) -> EvalResult<'tcx, T> + where + F: FnOnce(&mut Self) -> EvalResult<'tcx, T>, { let old = self.memory().writes_are_aligned.get(); // Do alignment checking if *all* nested calls say it has to be aligned. @@ -1446,7 +1703,7 @@ fn memory(&self) -> &Memory<'a, 'tcx, M> { // Pointer arithmetic //////////////////////////////////////////////////////////////////////////////// -pub trait PointerArithmetic : layout::HasDataLayout { +pub trait PointerArithmetic: layout::HasDataLayout { // These are not supposed to be overriden. //// Trunace the given value to the pointer size; also return whether there was an overflow @@ -1476,20 +1733,12 @@ fn overflowing_offset(self, val: u64, i: u64) -> (u64, bool) { fn signed_offset<'tcx>(self, val: u64, i: i64) -> EvalResult<'tcx, u64> { let (res, over) = self.overflowing_signed_offset(val, i as i128); - if over { - err!(OverflowingMath) - } else { - Ok(res) - } + if over { err!(OverflowingMath) } else { Ok(res) } } fn offset<'tcx>(self, val: u64, i: u64) -> EvalResult<'tcx, u64> { let (res, over) = self.overflowing_offset(val, i); - if over { - err!(OverflowingMath) - } else { - Ok(res) - } + if over { err!(OverflowingMath) } else { Ok(res) } } fn wrapping_signed_offset(self, val: u64, i: i64) -> u64 { @@ -1512,7 +1761,8 @@ fn data_layout(&self) -> &TargetDataLayout { } } -impl<'c, 'b, 'a, 'tcx, M: Machine<'tcx>> layout::HasDataLayout for &'c &'b mut EvalContext<'a, 'tcx, M> { +impl<'c, 'b, 'a, 'tcx, M: Machine<'tcx>> layout::HasDataLayout + for &'c &'b mut EvalContext<'a, 'tcx, M> { #[inline] fn data_layout(&self) -> &TargetDataLayout { self.memory().layout diff --git a/src/librustc_mir/interpret/mod.rs b/src/librustc_mir/interpret/mod.rs index 392724757eb..e3931444e6f 100644 --- a/src/librustc_mir/interpret/mod.rs +++ b/src/librustc_mir/interpret/mod.rs @@ -20,62 +20,23 @@ macro_rules! err { mod traits; mod value; -pub use self::error::{ - EvalError, - EvalResult, - EvalErrorKind, -}; +pub use self::error::{EvalError, EvalResult, EvalErrorKind}; -pub use self::eval_context::{ - EvalContext, - Frame, - ResourceLimits, - StackPopCleanup, - DynamicLifetime, - TyAndPacked, - PtrAndAlign, -}; +pub use self::eval_context::{EvalContext, Frame, ResourceLimits, StackPopCleanup, DynamicLifetime, + TyAndPacked, PtrAndAlign}; -pub use self::lvalue::{ - Lvalue, - LvalueExtra, - GlobalId, -}; +pub use self::lvalue::{Lvalue, LvalueExtra, GlobalId}; -pub use self::memory::{ - AllocId, - Memory, - MemoryPointer, - MemoryKind, - HasMemory, -}; +pub use self::memory::{AllocId, Memory, MemoryPointer, MemoryKind, HasMemory}; -use self::memory::{ - PointerArithmetic, - Lock, - AccessKind, -}; +use self::memory::{PointerArithmetic, Lock, AccessKind}; -use self::range_map::{ - RangeMap -}; +use self::range_map::RangeMap; -pub use self::value::{ - PrimVal, - PrimValKind, - Value, - Pointer, -}; +pub use self::value::{PrimVal, PrimValKind, Value, Pointer}; -pub use self::const_eval::{ - eval_body_as_integer, - eval_body_as_primval, -}; +pub use self::const_eval::{eval_body_as_integer, eval_body_as_primval}; -pub use self::machine::{ - Machine, -}; +pub use self::machine::Machine; -pub use self::validation::{ - ValidationQuery, -}; +pub use self::validation::ValidationQuery; diff --git a/src/librustc_mir/interpret/operator.rs b/src/librustc_mir/interpret/operator.rs index 8880be6e848..42d4538e950 100644 --- a/src/librustc_mir/interpret/operator.rs +++ b/src/librustc_mir/interpret/operator.rs @@ -1,22 +1,10 @@ use rustc::mir; use rustc::ty::Ty; -use super::{ - EvalResult, - EvalContext, - Lvalue, - Machine, -}; +use super::{EvalResult, EvalContext, Lvalue, Machine}; -use super::value::{ - PrimVal, - PrimValKind, - Value, - bytes_to_f32, - bytes_to_f64, - f32_to_bytes, - f64_to_bytes, -}; +use super::value::{PrimVal, PrimValKind, Value, bytes_to_f32, bytes_to_f64, f32_to_bytes, + f64_to_bytes}; impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> { fn binop_with_overflow( @@ -25,10 +13,10 @@ fn binop_with_overflow( left: &mir::Operand<'tcx>, right: &mir::Operand<'tcx>, ) -> EvalResult<'tcx, (PrimVal, bool)> { - let left_ty = self.operand_ty(left); - let right_ty = self.operand_ty(right); - let left_val = self.eval_operand_to_primval(left)?; - let right_val = self.eval_operand_to_primval(right)?; + let left_ty = self.operand_ty(left); + let right_ty = self.operand_ty(right); + let left_val = self.eval_operand_to_primval(left)?; + let right_val = self.eval_operand_to_primval(right)?; self.binary_op(op, left_val, left_ty, right_val, right_ty) } @@ -147,7 +135,7 @@ pub fn binary_op( use rustc::mir::BinOp::*; use super::PrimValKind::*; - let left_kind = self.ty_to_primval_kind(left_ty)?; + let left_kind = self.ty_to_primval_kind(left_ty)?; let right_kind = self.ty_to_primval_kind(right_ty)?; //trace!("Running binary op {:?}: {:?} ({:?}), {:?} ({:?})", bin_op, left, left_kind, right, right_kind); @@ -172,23 +160,30 @@ pub fn binary_op( } if left_kind != right_kind { - let msg = format!("unimplemented binary op {:?}: {:?} ({:?}), {:?} ({:?})", bin_op, left, left_kind, right, right_kind); + let msg = format!( + "unimplemented binary op {:?}: {:?} ({:?}), {:?} ({:?})", + bin_op, + left, + left_kind, + right, + right_kind + ); return err!(Unimplemented(msg)); } let val = match (bin_op, left_kind) { (Eq, F32) => PrimVal::from_bool(bytes_to_f32(l) == bytes_to_f32(r)), (Ne, F32) => PrimVal::from_bool(bytes_to_f32(l) != bytes_to_f32(r)), - (Lt, F32) => PrimVal::from_bool(bytes_to_f32(l) < bytes_to_f32(r)), + (Lt, F32) => PrimVal::from_bool(bytes_to_f32(l) < bytes_to_f32(r)), (Le, F32) => PrimVal::from_bool(bytes_to_f32(l) <= bytes_to_f32(r)), - (Gt, F32) => PrimVal::from_bool(bytes_to_f32(l) > bytes_to_f32(r)), + (Gt, F32) => PrimVal::from_bool(bytes_to_f32(l) > bytes_to_f32(r)), (Ge, F32) => PrimVal::from_bool(bytes_to_f32(l) >= bytes_to_f32(r)), (Eq, F64) => PrimVal::from_bool(bytes_to_f64(l) == bytes_to_f64(r)), (Ne, F64) => PrimVal::from_bool(bytes_to_f64(l) != bytes_to_f64(r)), - (Lt, F64) => PrimVal::from_bool(bytes_to_f64(l) < bytes_to_f64(r)), + (Lt, F64) => PrimVal::from_bool(bytes_to_f64(l) < bytes_to_f64(r)), (Le, F64) => PrimVal::from_bool(bytes_to_f64(l) <= bytes_to_f64(r)), - (Gt, F64) => PrimVal::from_bool(bytes_to_f64(l) > bytes_to_f64(r)), + (Gt, F64) => PrimVal::from_bool(bytes_to_f64(l) > bytes_to_f64(r)), (Ge, F64) => PrimVal::from_bool(bytes_to_f64(l) >= bytes_to_f64(r)), (Add, F32) => f32_arithmetic!(+, l, r), @@ -207,15 +202,15 @@ pub fn binary_op( (Ne, _) => PrimVal::from_bool(l != r), (Lt, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) < (r as i128)), - (Lt, _) => PrimVal::from_bool(l < r), + (Lt, _) => PrimVal::from_bool(l < r), (Le, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) <= (r as i128)), (Le, _) => PrimVal::from_bool(l <= r), (Gt, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) > (r as i128)), - (Gt, _) => PrimVal::from_bool(l > r), + (Gt, _) => PrimVal::from_bool(l > r), (Ge, k) if k.is_signed_int() => PrimVal::from_bool((l as i128) >= (r as i128)), (Ge, _) => PrimVal::from_bool(l >= r), - (BitOr, _) => PrimVal::Bytes(l | r), + (BitOr, _) => PrimVal::Bytes(l | r), (BitAnd, _) => PrimVal::Bytes(l & r), (BitXor, _) => PrimVal::Bytes(l ^ r), @@ -226,7 +221,14 @@ pub fn binary_op( (Rem, k) if k.is_int() => return int_arithmetic!(k, overflowing_rem, l, r), _ => { - let msg = format!("unimplemented binary op {:?}: {:?} ({:?}), {:?} ({:?})", bin_op, left, left_kind, right, right_kind); + let msg = format!( + "unimplemented binary op {:?}: {:?} ({:?}), {:?} ({:?})", + bin_op, + left, + left_kind, + right, + right_kind + ); return err!(Unimplemented(msg)); } }; @@ -248,19 +250,19 @@ pub fn unary_op<'tcx>( let result_bytes = match (un_op, val_kind) { (Not, Bool) => !val.to_bool()? as u128, - (Not, U8) => !(bytes as u8) as u128, + (Not, U8) => !(bytes as u8) as u128, (Not, U16) => !(bytes as u16) as u128, (Not, U32) => !(bytes as u32) as u128, (Not, U64) => !(bytes as u64) as u128, (Not, U128) => !bytes, - (Not, I8) => !(bytes as i8) as u128, + (Not, I8) => !(bytes as i8) as u128, (Not, I16) => !(bytes as i16) as u128, (Not, I32) => !(bytes as i32) as u128, (Not, I64) => !(bytes as i64) as u128, (Not, I128) => !(bytes as i128) as u128, - (Neg, I8) => -(bytes as i8) as u128, + (Neg, I8) => -(bytes as i8) as u128, (Neg, I16) => -(bytes as i16) as u128, (Neg, I32) => -(bytes as i32) as u128, (Neg, I64) => -(bytes as i64) as u128, diff --git a/src/librustc_mir/interpret/range_map.rs b/src/librustc_mir/interpret/range_map.rs index e4db9b0e0fc..5cdcbe35121 100644 --- a/src/librustc_mir/interpret/range_map.rs +++ b/src/librustc_mir/interpret/range_map.rs @@ -4,12 +4,12 @@ //! necessary (e.g. when [0,5) is first associated with X, and then [1,2) is mutated). //! Users must not depend on whether a range is coalesced or not, even though this is observable //! via the iteration APIs. -use std::collections::{BTreeMap}; +use std::collections::BTreeMap; use std::ops; #[derive(Clone, Debug)] pub struct RangeMap { - map: BTreeMap + map: BTreeMap, } // The derived `Ord` impl sorts first by the first field, then, if the fields are the same, @@ -31,11 +31,13 @@ fn range(offset: u64, len: u64) -> ops::Range { // the range given by the offset into the allocation and the length. // This is sound if all ranges that intersect with the argument range, are in the // resulting range of ranges. - let left = Range { // lowest range to include `offset` + let left = Range { + // lowest range to include `offset` start: 0, end: offset + 1, }; - let right = Range { // lowest (valid) range not to include `offset+len` + let right = Range { + // lowest (valid) range not to include `offset+len` start: offset + len, end: offset + len + 1, }; @@ -45,7 +47,7 @@ fn range(offset: u64, len: u64) -> ops::Range { /// Tests if all of [offset, offset+len) are contained in this range. fn overlaps(&self, offset: u64, len: u64) -> bool { assert!(len > 0); - offset < self.end && offset+len >= self.start + offset < self.end && offset + len >= self.start } } @@ -54,82 +56,122 @@ pub fn new() -> RangeMap { RangeMap { map: BTreeMap::new() } } - fn iter_with_range<'a>(&'a self, offset: u64, len: u64) -> impl Iterator + 'a { + fn iter_with_range<'a>( + &'a self, + offset: u64, + len: u64, + ) -> impl Iterator + 'a { assert!(len > 0); - self.map.range(Range::range(offset, len)) - .filter_map(move |(range, data)| { + self.map.range(Range::range(offset, len)).filter_map( + move |(range, + data)| { if range.overlaps(offset, len) { Some((range, data)) } else { None } - }) + }, + ) } - pub fn iter<'a>(&'a self, offset: u64, len: u64) -> impl Iterator + 'a { + pub fn iter<'a>(&'a self, offset: u64, len: u64) -> impl Iterator + 'a { self.iter_with_range(offset, len).map(|(_, data)| data) } - fn split_entry_at(&mut self, offset: u64) where T: Clone { + fn split_entry_at(&mut self, offset: u64) + where + T: Clone, + { let range = match self.iter_with_range(offset, 1).next() { Some((&range, _)) => range, None => return, }; - assert!(range.start <= offset && range.end > offset, "We got a range that doesn't even contain what we asked for."); + assert!( + range.start <= offset && range.end > offset, + "We got a range that doesn't even contain what we asked for." + ); // There is an entry overlapping this position, see if we have to split it if range.start < offset { let data = self.map.remove(&range).unwrap(); - let old = self.map.insert(Range { start: range.start, end: offset }, data.clone()); + let old = self.map.insert( + Range { + start: range.start, + end: offset, + }, + data.clone(), + ); assert!(old.is_none()); - let old = self.map.insert(Range { start: offset, end: range.end }, data); + let old = self.map.insert( + Range { + start: offset, + end: range.end, + }, + data, + ); assert!(old.is_none()); } } - pub fn iter_mut_all<'a>(&'a mut self) -> impl Iterator + 'a { + pub fn iter_mut_all<'a>(&'a mut self) -> impl Iterator + 'a { self.map.values_mut() } /// Provide mutable iteration over everything in the given range. As a side-effect, /// this will split entries in the map that are only partially hit by the given range, /// to make sure that when they are mutated, the effect is constrained to the given range. - pub fn iter_mut_with_gaps<'a>(&'a mut self, offset: u64, len: u64) -> impl Iterator + 'a - where T: Clone + pub fn iter_mut_with_gaps<'a>( + &'a mut self, + offset: u64, + len: u64, + ) -> impl Iterator + 'a + where + T: Clone, { assert!(len > 0); // Preparation: Split first and last entry as needed. self.split_entry_at(offset); - self.split_entry_at(offset+len); + self.split_entry_at(offset + len); // Now we can provide a mutable iterator - self.map.range_mut(Range::range(offset, len)) - .filter_map(move |(&range, data)| { + self.map.range_mut(Range::range(offset, len)).filter_map( + move |(&range, data)| { if range.overlaps(offset, len) { - assert!(offset <= range.start && offset+len >= range.end, "The splitting went wrong"); + assert!( + offset <= range.start && offset + len >= range.end, + "The splitting went wrong" + ); Some(data) } else { // Skip this one None } - }) + }, + ) } /// Provide a mutable iterator over everything in the given range, with the same side-effects as /// iter_mut_with_gaps. Furthermore, if there are gaps between ranges, fill them with the given default. /// This is also how you insert. - pub fn iter_mut<'a>(&'a mut self, offset: u64, len: u64) -> impl Iterator + 'a - where T: Clone + Default + pub fn iter_mut<'a>(&'a mut self, offset: u64, len: u64) -> impl Iterator + 'a + where + T: Clone + Default, { // Do a first iteration to collect the gaps let mut gaps = Vec::new(); let mut last_end = offset; for (range, _) in self.iter_with_range(offset, len) { if last_end < range.start { - gaps.push(Range { start: last_end, end: range.start }); + gaps.push(Range { + start: last_end, + end: range.start, + }); } last_end = range.end; } - if last_end < offset+len { - gaps.push(Range { start: last_end, end: offset+len }); + if last_end < offset + len { + gaps.push(Range { + start: last_end, + end: offset + len, + }); } // Add default for all gaps @@ -143,7 +185,8 @@ pub fn iter_mut<'a>(&'a mut self, offset: u64, len: u64) -> impl Iterator(&mut self, mut f: F) - where F: FnMut(&T) -> bool + where + F: FnMut(&T) -> bool, { let mut remove = Vec::new(); for (range, data) in self.map.iter() { @@ -164,7 +207,10 @@ mod tests { /// Query the map at every offset in the range and collect the results. fn to_vec(map: &RangeMap, offset: u64, len: u64) -> Vec { - (offset..offset+len).into_iter().map(|i| *map.iter(i, 1).next().unwrap()).collect() + (offset..offset + len) + .into_iter() + .map(|i| *map.iter(i, 1).next().unwrap()) + .collect() } #[test] @@ -190,10 +236,15 @@ fn gaps() { // Now request a range that needs three gaps filled for x in map.iter_mut(10, 10) { - if *x != 42 { *x = 23; } + if *x != 42 { + *x = 23; + } } - assert_eq!(to_vec(&map, 10, 10), vec![23, 42, 23, 23, 23, 42, 23, 23, 23, 23]); + assert_eq!( + to_vec(&map, 10, 10), + vec![23, 42, 23, 23, 23, 42, 23, 23, 23, 23] + ); assert_eq!(to_vec(&map, 13, 5), vec![23, 23, 42, 23, 23]); } } diff --git a/src/librustc_mir/interpret/step.rs b/src/librustc_mir/interpret/step.rs index a85d8d05c32..c43ad18e0d8 100644 --- a/src/librustc_mir/interpret/step.rs +++ b/src/librustc_mir/interpret/step.rs @@ -11,13 +11,8 @@ use rustc::ty::layout::Layout; use rustc::ty::subst::Substs; -use super::{ - EvalResult, - EvalContext, StackPopCleanup, TyAndPacked, PtrAndAlign, - GlobalId, Lvalue, - HasMemory, MemoryKind, - Machine, -}; +use super::{EvalResult, EvalContext, StackPopCleanup, TyAndPacked, PtrAndAlign, GlobalId, Lvalue, + HasMemory, MemoryKind, Machine}; use syntax::codemap::Span; use syntax::ast::Mutability; @@ -52,7 +47,14 @@ pub fn step(&mut self) -> EvalResult<'tcx, bool> { ecx: self, mir, new_constants: &mut new, - }.visit_statement(block, stmt, mir::Location { block, statement_index: stmt_id }); + }.visit_statement( + block, + stmt, + mir::Location { + block, + statement_index: stmt_id, + }, + ); // if ConstantExtractor added new frames, we don't execute anything here // but await the next call to step if new? == 0 { @@ -69,7 +71,14 @@ pub fn step(&mut self) -> EvalResult<'tcx, bool> { ecx: self, mir, new_constants: &mut new, - }.visit_terminator(block, terminator, mir::Location { block, statement_index: stmt_id }); + }.visit_terminator( + block, + terminator, + mir::Location { + block, + statement_index: stmt_id, + }, + ); // if ConstantExtractor added new frames, we don't execute anything here // but await the next call to step if new? == 0 { @@ -85,7 +94,10 @@ fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> EvalResult<'tcx> { match stmt.kind { Assign(ref lvalue, ref rvalue) => self.eval_rvalue_into_lvalue(rvalue, lvalue)?, - SetDiscriminant { ref lvalue, variant_index } => { + SetDiscriminant { + ref lvalue, + variant_index, + } => { let dest = self.eval_lvalue(lvalue)?; let dest_ty = self.lvalue_ty(lvalue); let dest_layout = self.type_layout(dest_ty)?; @@ -94,7 +106,11 @@ fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> EvalResult<'tcx> { Layout::General { discr, .. } => { let discr_size = discr.size().bytes(); let dest_ptr = self.force_allocation(dest)?.to_ptr()?; - self.memory.write_uint(dest_ptr, variant_index as u128, discr_size)? + self.memory.write_uint( + dest_ptr, + variant_index as u128, + discr_size, + )? } Layout::RawNullablePointer { nndiscr, .. } => { @@ -103,31 +119,57 @@ fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> EvalResult<'tcx> { } } - Layout::StructWrappedNullablePointer { nndiscr, ref discrfield_source, .. } => { + Layout::StructWrappedNullablePointer { + nndiscr, + ref discrfield_source, + .. + } => { if variant_index as u64 != nndiscr { - let (offset, TyAndPacked { ty, packed }) = self.nonnull_offset_and_ty(dest_ty, nndiscr, discrfield_source)?; - let nonnull = self.force_allocation(dest)?.to_ptr()?.offset(offset.bytes(), &self)?; + let (offset, TyAndPacked { ty, packed }) = self.nonnull_offset_and_ty( + dest_ty, + nndiscr, + discrfield_source, + )?; + let nonnull = self.force_allocation(dest)?.to_ptr()?.offset( + offset.bytes(), + &self, + )?; trace!("struct wrapped nullable pointer type: {}", ty); // only the pointer part of a fat pointer is used for this space optimization - let discr_size = self.type_size(ty)?.expect("bad StructWrappedNullablePointer discrfield"); - self.write_maybe_aligned_mut(!packed, |ectx| ectx.memory.write_uint(nonnull, 0, discr_size))?; + let discr_size = self.type_size(ty)?.expect( + "bad StructWrappedNullablePointer discrfield", + ); + self.write_maybe_aligned_mut(!packed, |ectx| { + ectx.memory.write_uint(nonnull, 0, discr_size) + })?; } - }, + } - _ => bug!("SetDiscriminant on {} represented as {:#?}", dest_ty, dest_layout), + _ => { + bug!( + "SetDiscriminant on {} represented as {:#?}", + dest_ty, + dest_layout + ) + } } } // Mark locals as dead or alive. - StorageLive(ref lvalue) | StorageDead(ref lvalue)=> { - let (frame, local) = match self.eval_lvalue(lvalue)? { - Lvalue::Local{ frame, local } if self.cur_frame() == frame => (frame, local), - _ => return err!(Unimplemented("Storage annotations must refer to locals of the topmost stack frame.".to_owned())) // FIXME maybe this should get its own error type - }; + StorageLive(ref lvalue) | + StorageDead(ref lvalue) => { + let (frame, local) = + match self.eval_lvalue(lvalue)? { + Lvalue::Local { frame, local } if self.cur_frame() == frame => ( + frame, + local, + ), + _ => return err!(Unimplemented("Storage annotations must refer to locals of the topmost stack frame.".to_owned())), // FIXME maybe this should get its own error type + }; let old_val = match stmt.kind { StorageLive(_) => self.stack[frame].storage_live(local)?, - StorageDead(_) => self.stack[frame].storage_dead(local)?, - _ => bug!("We already checked that we are a storage stmt") + StorageDead(_) => self.stack[frame].storage_dead(local)?, + _ => bug!("We already checked that we are a storage stmt"), }; self.deallocate_local(old_val)?; } @@ -171,7 +213,10 @@ fn global_item( mutability: Mutability, ) -> EvalResult<'tcx, bool> { let instance = self.resolve_associated_const(def_id, substs); - let cid = GlobalId { instance, promoted: None }; + let cid = GlobalId { + instance, + promoted: None, + }; if self.globals.contains_key(&cid) { return Ok(false); } @@ -179,22 +224,45 @@ fn global_item( // FIXME: check that it's `#[linkage = "extern_weak"]` trace!("Initializing an extern global with NULL"); let ptr_size = self.memory.pointer_size(); - let ptr = self.memory.allocate(ptr_size, ptr_size, MemoryKind::UninitializedStatic)?; + let ptr = self.memory.allocate( + ptr_size, + ptr_size, + MemoryKind::UninitializedStatic, + )?; self.memory.write_usize(ptr, 0)?; self.memory.mark_static_initalized(ptr.alloc_id, mutability)?; - self.globals.insert(cid, PtrAndAlign { ptr: ptr.into(), aligned: true }); + self.globals.insert( + cid, + PtrAndAlign { + ptr: ptr.into(), + aligned: true, + }, + ); return Ok(false); } let mir = self.load_mir(instance.def)?; - let size = self.type_size_with_substs(mir.return_ty, substs)?.expect("unsized global"); + let size = self.type_size_with_substs(mir.return_ty, substs)?.expect( + "unsized global", + ); let align = self.type_align_with_substs(mir.return_ty, substs)?; - let ptr = self.memory.allocate(size, align, MemoryKind::UninitializedStatic)?; + let ptr = self.memory.allocate( + size, + align, + MemoryKind::UninitializedStatic, + )?; let aligned = !self.is_packed(mir.return_ty)?; - self.globals.insert(cid, PtrAndAlign { ptr: ptr.into(), aligned }); + self.globals.insert( + cid, + PtrAndAlign { + ptr: ptr.into(), + aligned, + }, + ); let internally_mutable = !mir.return_ty.is_freeze( - self.tcx, - ty::ParamEnv::empty(Reveal::All), - span); + self.tcx, + ty::ParamEnv::empty(Reveal::All), + span, + ); let mutability = if mutability == Mutability::Mutable || internally_mutable { Mutability::Mutable } else { @@ -237,7 +305,7 @@ fn try EvalResult<'tcx, bool>>(&mut self, f: F) { // everything ok + a new stackframe Ok(true) => *self.new_constants = Ok(n + 1), // constant correctly evaluated, but no new stackframe - Ok(false) => {}, + Ok(false) => {} // constant eval errored Err(err) => *self.new_constants = Err(err), } @@ -251,8 +319,15 @@ fn visit_constant(&mut self, constant: &mir::Constant<'tcx>, location: mir::Loca // already computed by rustc mir::Literal::Value { .. } => {} mir::Literal::Item { def_id, substs } => { - self.try(|this| this.ecx.global_item(def_id, substs, constant.span, Mutability::Immutable)); - }, + self.try(|this| { + this.ecx.global_item( + def_id, + substs, + constant.span, + Mutability::Immutable, + ) + }); + } mir::Literal::Promoted { index } => { let cid = GlobalId { instance: self.instance, @@ -263,17 +338,33 @@ fn visit_constant(&mut self, constant: &mir::Constant<'tcx>, location: mir::Loca } let mir = &self.mir.promoted[index]; self.try(|this| { - let size = this.ecx.type_size_with_substs(mir.return_ty, this.instance.substs)?.expect("unsized global"); - let align = this.ecx.type_align_with_substs(mir.return_ty, this.instance.substs)?; - let ptr = this.ecx.memory.allocate(size, align, MemoryKind::UninitializedStatic)?; + let size = this.ecx + .type_size_with_substs(mir.return_ty, this.instance.substs)? + .expect("unsized global"); + let align = this.ecx.type_align_with_substs( + mir.return_ty, + this.instance.substs, + )?; + let ptr = this.ecx.memory.allocate( + size, + align, + MemoryKind::UninitializedStatic, + )?; let aligned = !this.ecx.is_packed(mir.return_ty)?; - this.ecx.globals.insert(cid, PtrAndAlign { ptr: ptr.into(), aligned }); + this.ecx.globals.insert( + cid, + PtrAndAlign { + ptr: ptr.into(), + aligned, + }, + ); trace!("pushing stack frame for {:?}", index); - this.ecx.push_stack_frame(this.instance, - constant.span, - mir, - Lvalue::from_ptr(ptr), - StackPopCleanup::MarkStatic(Mutability::Immutable), + this.ecx.push_stack_frame( + this.instance, + constant.span, + mir, + Lvalue::from_ptr(ptr), + StackPopCleanup::MarkStatic(Mutability::Immutable), )?; Ok(true) }); @@ -285,7 +376,7 @@ fn visit_lvalue( &mut self, lvalue: &mir::Lvalue<'tcx>, context: LvalueContext<'tcx>, - location: mir::Location + location: mir::Location, ) { self.super_lvalue(lvalue, context, location); if let mir::Lvalue::Static(ref static_) = *lvalue { @@ -295,7 +386,18 @@ fn visit_lvalue( if let Some(node_item) = self.ecx.tcx.hir.get_if_local(def_id) { if let hir::map::Node::NodeItem(&hir::Item { ref node, .. }) = node_item { if let hir::ItemStatic(_, m, _) = *node { - self.try(|this| this.ecx.global_item(def_id, substs, span, if m == hir::MutMutable { Mutability::Mutable } else { Mutability::Immutable })); + self.try(|this| { + this.ecx.global_item( + def_id, + substs, + span, + if m == hir::MutMutable { + Mutability::Mutable + } else { + Mutability::Immutable + }, + ) + }); return; } else { bug!("static def id doesn't point to static"); @@ -306,7 +408,18 @@ fn visit_lvalue( } else { let def = self.ecx.tcx.describe_def(def_id).expect("static not found"); if let hir::def::Def::Static(_, mutable) = def { - self.try(|this| this.ecx.global_item(def_id, substs, span, if mutable { Mutability::Mutable } else { Mutability::Immutable })); + self.try(|this| { + this.ecx.global_item( + def_id, + substs, + span, + if mutable { + Mutability::Mutable + } else { + Mutability::Immutable + }, + ) + }); } else { bug!("static found but isn't a static: {:?}", def); } diff --git a/src/librustc_mir/interpret/terminator/drop.rs b/src/librustc_mir/interpret/terminator/drop.rs index 36d56511afc..334d23ef369 100644 --- a/src/librustc_mir/interpret/terminator/drop.rs +++ b/src/librustc_mir/interpret/terminator/drop.rs @@ -2,29 +2,45 @@ use rustc::ty::{self, Ty}; use syntax::codemap::Span; -use interpret::{ - EvalResult, - EvalContext, StackPopCleanup, - Lvalue, LvalueExtra, - PrimVal, Value, - Machine, -}; +use interpret::{EvalResult, EvalContext, StackPopCleanup, Lvalue, LvalueExtra, PrimVal, Value, + Machine}; impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> { - pub(crate) fn drop_lvalue(&mut self, lval: Lvalue, instance: ty::Instance<'tcx>, ty: Ty<'tcx>, span: Span) -> EvalResult<'tcx> { + pub(crate) fn drop_lvalue( + &mut self, + lval: Lvalue, + instance: ty::Instance<'tcx>, + ty: Ty<'tcx>, + span: Span, + ) -> EvalResult<'tcx> { trace!("drop_lvalue: {:#?}", lval); // We take the address of the object. This may well be unaligned, which is fine for us here. // However, unaligned accesses will probably make the actual drop implementation fail -- a problem shared // by rustc. let val = match self.force_allocation(lval)? { - Lvalue::Ptr { ptr, extra: LvalueExtra::Vtable(vtable) } => ptr.ptr.to_value_with_vtable(vtable), - Lvalue::Ptr { ptr, extra: LvalueExtra::Length(len) } => ptr.ptr.to_value_with_len(len), - Lvalue::Ptr { ptr, extra: LvalueExtra::None } => ptr.ptr.to_value(), + Lvalue::Ptr { + ptr, + extra: LvalueExtra::Vtable(vtable), + } => ptr.ptr.to_value_with_vtable(vtable), + Lvalue::Ptr { + ptr, + extra: LvalueExtra::Length(len), + } => ptr.ptr.to_value_with_len(len), + Lvalue::Ptr { + ptr, + extra: LvalueExtra::None, + } => ptr.ptr.to_value(), _ => bug!("force_allocation broken"), }; self.drop(val, instance, ty, span) } - pub(crate) fn drop(&mut self, arg: Value, mut instance: ty::Instance<'tcx>, ty: Ty<'tcx>, span: Span) -> EvalResult<'tcx> { + pub(crate) fn drop( + &mut self, + arg: Value, + mut instance: ty::Instance<'tcx>, + ty: Ty<'tcx>, + span: Span, + ) -> EvalResult<'tcx> { trace!("drop: {:#?}, {:?}, {:?}", arg, ty.sty, instance.def); if let ty::InstanceDef::DropGlue(_, None) = instance.def { @@ -42,11 +58,11 @@ pub(crate) fn drop(&mut self, arg: Value, mut instance: ty::Instance<'tcx>, ty: Some(func) => { instance = func; self.load_mir(func.def)? - }, + } // no drop fn -> bail out None => return Ok(()), } - }, + } _ => self.load_mir(instance.def)?, }; diff --git a/src/librustc_mir/interpret/terminator/mod.rs b/src/librustc_mir/interpret/terminator/mod.rs index 531e1792d9e..dde541ed5f7 100644 --- a/src/librustc_mir/interpret/terminator/mod.rs +++ b/src/librustc_mir/interpret/terminator/mod.rs @@ -4,15 +4,8 @@ use syntax::codemap::Span; use syntax::abi::Abi; -use super::{ - EvalError, EvalResult, EvalErrorKind, - EvalContext, eval_context, TyAndPacked, PtrAndAlign, - Lvalue, - MemoryPointer, - PrimVal, Value, - Machine, - HasMemory, -}; +use super::{EvalError, EvalResult, EvalErrorKind, EvalContext, eval_context, TyAndPacked, + PtrAndAlign, Lvalue, MemoryPointer, PrimVal, Value, Machine, HasMemory}; use super::eval_context::IntegerExt; use rustc_data_structures::indexed_vec::Idx; @@ -38,7 +31,12 @@ pub(super) fn eval_terminator( Goto { target } => self.goto_block(target), - SwitchInt { ref discr, ref values, ref targets, .. } => { + SwitchInt { + ref discr, + ref values, + ref targets, + .. + } => { // FIXME(CTFE): forbid branching let discr_val = self.eval_operand(discr)?; let discr_ty = self.operand_ty(discr); @@ -58,7 +56,12 @@ pub(super) fn eval_terminator( self.goto_block(target_block); } - Call { ref func, ref args, ref destination, .. } => { + Call { + ref func, + ref args, + ref destination, + .. + } => { let destination = match *destination { Some((ref lv, target)) => Some((self.eval_lvalue(lv)?, target)), None => None, @@ -80,22 +83,35 @@ pub(super) fn eval_terminator( if !self.check_sig_compat(sig, real_sig)? { return err!(FunctionPointerTyMismatch(real_sig, sig)); } - }, + } ref other => bug!("instance def ty: {:?}", other), } (instance, sig) - }, - ty::TyFnDef(def_id, substs) => (eval_context::resolve(self.tcx, def_id, substs), func_ty.fn_sig(self.tcx)), + } + ty::TyFnDef(def_id, substs) => ( + eval_context::resolve(self.tcx, def_id, substs), + func_ty.fn_sig(self.tcx), + ), _ => { let msg = format!("can't handle callee of type {:?}", func_ty); return err!(Unimplemented(msg)); } }; let sig = self.erase_lifetimes(&sig); - self.eval_fn_call(fn_def, destination, args, terminator.source_info.span, sig)?; + self.eval_fn_call( + fn_def, + destination, + args, + terminator.source_info.span, + sig, + )?; } - Drop { ref location, target, .. } => { + Drop { + ref location, + target, + .. + } => { trace!("TerminatorKind::drop: {:?}, {:?}", location, self.substs()); // FIXME(CTFE): forbid drop in const eval let lval = self.eval_lvalue(location)?; @@ -104,10 +120,21 @@ pub(super) fn eval_terminator( let ty = eval_context::apply_param_substs(self.tcx, self.substs(), &ty); let instance = eval_context::resolve_drop_in_place(self.tcx, ty); - self.drop_lvalue(lval, instance, ty, terminator.source_info.span)?; + self.drop_lvalue( + lval, + instance, + ty, + terminator.source_info.span, + )?; } - Assert { ref cond, expected, ref msg, target, .. } => { + Assert { + ref cond, + expected, + ref msg, + target, + .. + } => { let cond_val = self.eval_operand_to_primval(cond)?.to_bool()?; if expected == cond_val { self.goto_block(target); @@ -122,12 +149,13 @@ pub(super) fn eval_terminator( .expect("can't eval index") .to_u64()?; err!(ArrayIndexOutOfBounds(span, len, index)) - }, - mir::AssertMessage::Math(ref err) => - err!(Math(terminator.source_info.span, err.clone())), - } + } + mir::AssertMessage::Math(ref err) => { + err!(Math(terminator.source_info.span, err.clone())) + } + }; } - }, + } DropAndReplace { .. } => unimplemented!(), Resume => unimplemented!(), @@ -144,27 +172,30 @@ fn check_sig_compat( sig: ty::FnSig<'tcx>, real_sig: ty::FnSig<'tcx>, ) -> EvalResult<'tcx, bool> { - fn check_ty_compat<'tcx>( - ty: ty::Ty<'tcx>, - real_ty: ty::Ty<'tcx>, - ) -> bool { - if ty == real_ty { return true; } // This is actually a fast pointer comparison + fn check_ty_compat<'tcx>(ty: ty::Ty<'tcx>, real_ty: ty::Ty<'tcx>) -> bool { + if ty == real_ty { + return true; + } // This is actually a fast pointer comparison return match (&ty.sty, &real_ty.sty) { // Permit changing the pointer type of raw pointers and references as well as // mutability of raw pointers. // TODO: Should not be allowed when fat pointers are involved. (&TypeVariants::TyRawPtr(_), &TypeVariants::TyRawPtr(_)) => true, - (&TypeVariants::TyRef(_, _), &TypeVariants::TyRef(_, _)) => - ty.is_mutable_pointer() == real_ty.is_mutable_pointer(), + (&TypeVariants::TyRef(_, _), &TypeVariants::TyRef(_, _)) => { + ty.is_mutable_pointer() == real_ty.is_mutable_pointer() + } // rule out everything else - _ => false - } + _ => false, + }; } - if sig.abi == real_sig.abi && - sig.variadic == real_sig.variadic && + if sig.abi == real_sig.abi && sig.variadic == real_sig.variadic && sig.inputs_and_output.len() == real_sig.inputs_and_output.len() && - sig.inputs_and_output.iter().zip(real_sig.inputs_and_output).all(|(ty, real_ty)| check_ty_compat(ty, real_ty)) { + sig.inputs_and_output + .iter() + .zip(real_sig.inputs_and_output) + .all(|(ty, real_ty)| check_ty_compat(ty, real_ty)) + { // Definitely good. return Ok(true); } @@ -224,22 +255,15 @@ fn eval_fn_call( M::call_intrinsic(self, instance, arg_operands, ret, ty, layout, target)?; self.dump_local(ret); Ok(()) - }, - ty::InstanceDef::ClosureOnceShim{..} => { + } + ty::InstanceDef::ClosureOnceShim { .. } => { let mut args = Vec::new(); for arg in arg_operands { let arg_val = self.eval_operand(arg)?; let arg_ty = self.operand_ty(arg); args.push((arg_val, arg_ty)); } - if M::eval_fn_call( - self, - instance, - destination, - arg_operands, - span, - sig, - )? { + if M::eval_fn_call(self, instance, destination, arg_operands, span, sig)? { return Ok(()); } let mut arg_locals = self.frame().mir.args_iter(); @@ -250,19 +274,25 @@ fn eval_fn_call( let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?; self.write_value(arg_val, dest, arg_ty)?; } - }, + } // non capture closure as fn ptr // need to inject zst ptr for closure object (aka do nothing) // and need to pack arguments Abi::Rust => { - trace!("arg_locals: {:?}", self.frame().mir.args_iter().collect::>()); + trace!( + "arg_locals: {:?}", + self.frame().mir.args_iter().collect::>() + ); trace!("arg_operands: {:?}", arg_operands); let local = arg_locals.nth(1).unwrap(); for (i, (arg_val, arg_ty)) in args.into_iter().enumerate() { - let dest = self.eval_lvalue(&mir::Lvalue::Local(local).field(mir::Field::new(i), arg_ty))?; + let dest = self.eval_lvalue(&mir::Lvalue::Local(local).field( + mir::Field::new(i), + arg_ty, + ))?; self.write_value(arg_val, dest, arg_ty)?; } - }, + } _ => bug!("bad ABI for ClosureOnceShim: {:?}", sig.abi), } Ok(()) @@ -276,27 +306,24 @@ fn eval_fn_call( } // Push the stack frame, and potentially be entirely done if the call got hooked - if M::eval_fn_call( - self, - instance, - destination, - arg_operands, - span, - sig, - )? { + if M::eval_fn_call(self, instance, destination, arg_operands, span, sig)? { return Ok(()); } // Pass the arguments let mut arg_locals = self.frame().mir.args_iter(); trace!("ABI: {:?}", sig.abi); - trace!("arg_locals: {:?}", self.frame().mir.args_iter().collect::>()); + trace!( + "arg_locals: {:?}", + self.frame().mir.args_iter().collect::>() + ); trace!("arg_operands: {:?}", arg_operands); match sig.abi { Abi::RustCall => { assert_eq!(args.len(), 2); - { // write first argument + { + // write first argument let first_local = arg_locals.next().unwrap(); let dest = self.eval_lvalue(&mir::Lvalue::Local(first_local))?; let (arg_val, arg_ty) = args.remove(0); @@ -306,37 +333,58 @@ fn eval_fn_call( // unpack and write all other args let (arg_val, arg_ty) = args.remove(0); let layout = self.type_layout(arg_ty)?; - if let (&ty::TyTuple(fields, _), &Layout::Univariant { ref variant, .. }) = (&arg_ty.sty, layout) { + if let (&ty::TyTuple(fields, _), + &Layout::Univariant { ref variant, .. }) = (&arg_ty.sty, layout) + { trace!("fields: {:?}", fields); if self.frame().mir.args_iter().count() == fields.len() + 1 { let offsets = variant.offsets.iter().map(|s| s.bytes()); match arg_val { Value::ByRef(PtrAndAlign { ptr, aligned }) => { - assert!(aligned, "Unaligned ByRef-values cannot occur as function arguments"); - for ((offset, ty), arg_local) in offsets.zip(fields).zip(arg_locals) { + assert!( + aligned, + "Unaligned ByRef-values cannot occur as function arguments" + ); + for ((offset, ty), arg_local) in + offsets.zip(fields).zip(arg_locals) + { let arg = Value::by_ref(ptr.offset(offset, &self)?); - let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?; - trace!("writing arg {:?} to {:?} (type: {})", arg, dest, ty); + let dest = + self.eval_lvalue(&mir::Lvalue::Local(arg_local))?; + trace!( + "writing arg {:?} to {:?} (type: {})", + arg, + dest, + ty + ); self.write_value(arg, dest, ty)?; } - }, - Value::ByVal(PrimVal::Undef) => {}, + } + Value::ByVal(PrimVal::Undef) => {} other => { assert_eq!(fields.len(), 1); - let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_locals.next().unwrap()))?; + let dest = self.eval_lvalue(&mir::Lvalue::Local( + arg_locals.next().unwrap(), + ))?; self.write_value(other, dest, fields[0])?; } } } else { trace!("manual impl of rust-call ABI"); // called a manual impl of a rust-call function - let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_locals.next().unwrap()))?; + let dest = self.eval_lvalue( + &mir::Lvalue::Local(arg_locals.next().unwrap()), + )?; self.write_value(arg_val, dest, arg_ty)?; } } else { - bug!("rust-call ABI tuple argument was {:?}, {:?}", arg_ty, layout); + bug!( + "rust-call ABI tuple argument was {:?}, {:?}", + arg_ty, + layout + ); } - }, + } _ => { for (arg_local, (arg_val, arg_ty)) in arg_locals.zip(args) { let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?; @@ -345,7 +393,7 @@ fn eval_fn_call( } } Ok(()) - }, + } ty::InstanceDef::DropGlue(..) => { assert_eq!(arg_operands.len(), 1); assert_eq!(sig.abi, Abi::Rust); @@ -361,7 +409,7 @@ fn eval_fn_call( _ => bug!("can only deref pointer types"), }; self.drop(val, instance, pointee_type, span) - }, + } ty::InstanceDef::FnPtrShim(..) => { trace!("ABI: {}", sig.abi); let mut args = Vec::new(); @@ -370,22 +418,15 @@ fn eval_fn_call( let arg_ty = self.operand_ty(arg); args.push((arg_val, arg_ty)); } - if M::eval_fn_call( - self, - instance, - destination, - arg_operands, - span, - sig, - )? { + if M::eval_fn_call(self, instance, destination, arg_operands, span, sig)? { return Ok(()); } let arg_locals = self.frame().mir.args_iter(); match sig.abi { Abi::Rust => { args.remove(0); - }, - Abi::RustCall => {}, + } + Abi::RustCall => {} _ => unimplemented!(), }; for (arg_local, (arg_val, arg_ty)) in arg_locals.zip(args) { @@ -393,43 +434,56 @@ fn eval_fn_call( self.write_value(arg_val, dest, arg_ty)?; } Ok(()) - }, + } ty::InstanceDef::Virtual(_, idx) => { let ptr_size = self.memory.pointer_size(); - let (_, vtable) = self.eval_operand(&arg_operands[0])?.into_ptr_vtable_pair(&self.memory)?; - let fn_ptr = self.memory.read_ptr(vtable.offset(ptr_size * (idx as u64 + 3), &self)?)?; + let (_, vtable) = self.eval_operand(&arg_operands[0])?.into_ptr_vtable_pair( + &self.memory, + )?; + let fn_ptr = self.memory.read_ptr( + vtable.offset(ptr_size * (idx as u64 + 3), &self)?, + )?; let instance = self.memory.get_fn(fn_ptr.to_ptr()?)?; let mut arg_operands = arg_operands.to_vec(); let ty = self.operand_ty(&arg_operands[0]); let ty = self.get_field_ty(ty, 0)?.ty; // TODO: packed flag is ignored match arg_operands[0] { - mir::Operand::Consume(ref mut lval) => *lval = lval.clone().field(mir::Field::new(0), ty), + mir::Operand::Consume(ref mut lval) => { + *lval = lval.clone().field(mir::Field::new(0), ty) + } _ => bug!("virtual call first arg cannot be a constant"), } // recurse with concrete function - self.eval_fn_call( - instance, - destination, - &arg_operands, - span, - sig, - ) - }, + self.eval_fn_call(instance, destination, &arg_operands, span, sig) + } } } - pub fn read_discriminant_value(&self, adt_ptr: MemoryPointer, adt_ty: Ty<'tcx>) -> EvalResult<'tcx, u128> { + pub fn read_discriminant_value( + &self, + adt_ptr: MemoryPointer, + adt_ty: Ty<'tcx>, + ) -> EvalResult<'tcx, u128> { use rustc::ty::layout::Layout::*; let adt_layout = self.type_layout(adt_ty)?; //trace!("read_discriminant_value {:#?}", adt_layout); let discr_val = match *adt_layout { - General { discr, .. } | CEnum { discr, signed: false, .. } => { + General { discr, .. } | + CEnum { + discr, + signed: false, + .. + } => { let discr_size = discr.size().bytes(); self.memory.read_uint(adt_ptr, discr_size)? } - CEnum { discr, signed: true, .. } => { + CEnum { + discr, + signed: true, + .. + } => { let discr_size = discr.size().bytes(); self.memory.read_int(adt_ptr, discr_size)? as u128 } @@ -437,32 +491,62 @@ pub fn read_discriminant_value(&self, adt_ptr: MemoryPointer, adt_ty: Ty<'tcx>) RawNullablePointer { nndiscr, value } => { let discr_size = value.size(&self.tcx.data_layout).bytes(); trace!("rawnullablepointer with size {}", discr_size); - self.read_nonnull_discriminant_value(adt_ptr, nndiscr as u128, discr_size)? + self.read_nonnull_discriminant_value( + adt_ptr, + nndiscr as u128, + discr_size, + )? } - StructWrappedNullablePointer { nndiscr, ref discrfield_source, .. } => { - let (offset, TyAndPacked { ty, packed }) = self.nonnull_offset_and_ty(adt_ty, nndiscr, discrfield_source)?; + StructWrappedNullablePointer { + nndiscr, + ref discrfield_source, + .. + } => { + let (offset, TyAndPacked { ty, packed }) = self.nonnull_offset_and_ty( + adt_ty, + nndiscr, + discrfield_source, + )?; let nonnull = adt_ptr.offset(offset.bytes(), &*self)?; trace!("struct wrapped nullable pointer type: {}", ty); // only the pointer part of a fat pointer is used for this space optimization - let discr_size = self.type_size(ty)?.expect("bad StructWrappedNullablePointer discrfield"); - self.read_maybe_aligned(!packed, - |ectx| ectx.read_nonnull_discriminant_value(nonnull, nndiscr as u128, discr_size))? + let discr_size = self.type_size(ty)?.expect( + "bad StructWrappedNullablePointer discrfield", + ); + self.read_maybe_aligned(!packed, |ectx| { + ectx.read_nonnull_discriminant_value(nonnull, nndiscr as u128, discr_size) + })? } // The discriminant_value intrinsic returns 0 for non-sum types. - Array { .. } | FatPointer { .. } | Scalar { .. } | Univariant { .. } | - Vector { .. } | UntaggedUnion { .. } => 0, + Array { .. } | + FatPointer { .. } | + Scalar { .. } | + Univariant { .. } | + Vector { .. } | + UntaggedUnion { .. } => 0, }; Ok(discr_val) } - fn read_nonnull_discriminant_value(&self, ptr: MemoryPointer, nndiscr: u128, discr_size: u64) -> EvalResult<'tcx, u128> { - trace!("read_nonnull_discriminant_value: {:?}, {}, {}", ptr, nndiscr, discr_size); + fn read_nonnull_discriminant_value( + &self, + ptr: MemoryPointer, + nndiscr: u128, + discr_size: u64, + ) -> EvalResult<'tcx, u128> { + trace!( + "read_nonnull_discriminant_value: {:?}, {}, {}", + ptr, + nndiscr, + discr_size + ); let not_null = match self.memory.read_uint(ptr, discr_size) { Ok(0) => false, - Ok(_) | Err(EvalError{ kind: EvalErrorKind::ReadPointerAsBytes, .. }) => true, + Ok(_) | + Err(EvalError { kind: EvalErrorKind::ReadPointerAsBytes, .. }) => true, Err(e) => return Err(e), }; assert!(nndiscr == 0 || nndiscr == 1); diff --git a/src/librustc_mir/interpret/traits.rs b/src/librustc_mir/interpret/traits.rs index 3b642591917..07d7de854b9 100644 --- a/src/librustc_mir/interpret/traits.rs +++ b/src/librustc_mir/interpret/traits.rs @@ -5,16 +5,14 @@ use syntax::codemap::DUMMY_SP; use syntax::ast::{self, Mutability}; -use super::{ - EvalResult, - EvalContext, eval_context, - MemoryPointer, MemoryKind, - Value, PrimVal, - Machine, -}; +use super::{EvalResult, EvalContext, eval_context, MemoryPointer, MemoryKind, Value, PrimVal, + Machine}; impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> { - pub(crate) fn fulfill_obligation(&self, trait_ref: ty::PolyTraitRef<'tcx>) -> traits::Vtable<'tcx, ()> { + pub(crate) fn fulfill_obligation( + &self, + trait_ref: ty::PolyTraitRef<'tcx>, + ) -> traits::Vtable<'tcx, ()> { // Do the initial selection for the obligation. This yields the shallow result we are // looking for -- that is, what specific impl. self.tcx.infer_ctxt().enter(|infcx| { @@ -43,15 +41,25 @@ pub(crate) fn fulfill_obligation(&self, trait_ref: ty::PolyTraitRef<'tcx>) -> tr /// The `trait_ref` encodes the erased self type. Hence if we are /// making an object `Foo` from a value of type `Foo`, then /// `trait_ref` would map `T:Trait`. - pub fn get_vtable(&mut self, ty: Ty<'tcx>, trait_ref: ty::PolyTraitRef<'tcx>) -> EvalResult<'tcx, MemoryPointer> { + pub fn get_vtable( + &mut self, + ty: Ty<'tcx>, + trait_ref: ty::PolyTraitRef<'tcx>, + ) -> EvalResult<'tcx, MemoryPointer> { debug!("get_vtable(trait_ref={:?})", trait_ref); - let size = self.type_size(trait_ref.self_ty())?.expect("can't create a vtable for an unsized type"); + let size = self.type_size(trait_ref.self_ty())?.expect( + "can't create a vtable for an unsized type", + ); let align = self.type_align(trait_ref.self_ty())?; let ptr_size = self.memory.pointer_size(); let methods = ::rustc::traits::get_vtable_methods(self.tcx, trait_ref); - let vtable = self.memory.allocate(ptr_size * (3 + methods.count() as u64), ptr_size, MemoryKind::UninitializedStatic)?; + let vtable = self.memory.allocate( + ptr_size * (3 + methods.count() as u64), + ptr_size, + MemoryKind::UninitializedStatic, + )?; let drop = eval_context::resolve_drop_in_place(self.tcx, ty); let drop = self.memory.create_fn_alloc(drop); @@ -71,12 +79,18 @@ pub fn get_vtable(&mut self, ty: Ty<'tcx>, trait_ref: ty::PolyTraitRef<'tcx>) -> } } - self.memory.mark_static_initalized(vtable.alloc_id, Mutability::Mutable)?; + self.memory.mark_static_initalized( + vtable.alloc_id, + Mutability::Mutable, + )?; Ok(vtable) } - pub fn read_drop_type_from_vtable(&self, vtable: MemoryPointer) -> EvalResult<'tcx, Option>> { + pub fn read_drop_type_from_vtable( + &self, + vtable: MemoryPointer, + ) -> EvalResult<'tcx, Option>> { // we don't care about the pointee type, we just want a pointer match self.read_ptr(vtable, self.tcx.mk_nil_ptr())? { // some values don't need to call a drop impl, so the value is null @@ -86,10 +100,15 @@ pub fn read_drop_type_from_vtable(&self, vtable: MemoryPointer) -> EvalResult<'t } } - pub fn read_size_and_align_from_vtable(&self, vtable: MemoryPointer) -> EvalResult<'tcx, (u64, u64)> { + pub fn read_size_and_align_from_vtable( + &self, + vtable: MemoryPointer, + ) -> EvalResult<'tcx, (u64, u64)> { let pointer_size = self.memory.pointer_size(); let size = self.memory.read_usize(vtable.offset(pointer_size, self)?)?; - let align = self.memory.read_usize(vtable.offset(pointer_size * 2, self)?)?; + let align = self.memory.read_usize( + vtable.offset(pointer_size * 2, self)?, + )?; Ok((size, align)) } @@ -103,8 +122,11 @@ pub(crate) fn resolve_associated_const( let vtable = self.fulfill_obligation(trait_ref); if let traits::VtableImpl(vtable_impl) = vtable { let name = self.tcx.item_name(def_id); - let assoc_const_opt = self.tcx.associated_items(vtable_impl.impl_def_id) - .find(|item| item.kind == ty::AssociatedKind::Const && item.name == name); + let assoc_const_opt = self.tcx.associated_items(vtable_impl.impl_def_id).find( + |item| { + item.kind == ty::AssociatedKind::Const && item.name == name + }, + ); if let Some(assoc_const) = assoc_const_opt { return ty::Instance::new(assoc_const.def_id, vtable_impl.substs); } diff --git a/src/librustc_mir/interpret/validation.rs b/src/librustc_mir/interpret/validation.rs index 6a8df1b5246..20b601b538c 100644 --- a/src/librustc_mir/interpret/validation.rs +++ b/src/librustc_mir/interpret/validation.rs @@ -8,14 +8,8 @@ use rustc::traits::Reveal; use rustc::middle::region::CodeExtent; -use super::{ - EvalError, EvalResult, EvalErrorKind, - EvalContext, DynamicLifetime, - AccessKind, - Value, - Lvalue, LvalueExtra, - Machine, -}; +use super::{EvalError, EvalResult, EvalErrorKind, EvalContext, DynamicLifetime, AccessKind, Value, + Lvalue, LvalueExtra, Machine}; pub type ValidationQuery<'tcx> = ValidationOperand<'tcx, Lvalue>; @@ -39,7 +33,11 @@ fn acquiring(self) -> bool { // Validity checks impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> { - pub(crate) fn validation_op(&mut self, op: ValidationOp, operand: &ValidationOperand<'tcx, mir::Lvalue<'tcx>>) -> EvalResult<'tcx> { + pub(crate) fn validation_op( + &mut self, + op: ValidationOp, + operand: &ValidationOperand<'tcx, mir::Lvalue<'tcx>>, + ) -> EvalResult<'tcx> { // If mir-emit-validate is set to 0 (i.e., disabled), we may still see validation commands // because other crates may have been compiled with mir-emit-validate > 0. Ignore those // commands. This makes mir-emit-validate also a flag to control whether miri will do @@ -73,14 +71,19 @@ pub(crate) fn validation_op(&mut self, op: ValidationOp, operand: &ValidationOpe // Now test let name = self.stack[self.cur_frame()].instance.to_string(); if RE.is_match(&name) { - return Ok(()) + return Ok(()); } } // We need to monomorphize ty *without* erasing lifetimes let ty = operand.ty.subst(self.tcx, self.substs()); let lval = self.eval_lvalue(&operand.lval)?; - let query = ValidationQuery { lval, ty, re: operand.re, mutbl: operand.mutbl }; + let query = ValidationQuery { + lval, + ty, + re: operand.re, + mutbl: operand.mutbl, + }; // Check the mode, and also perform mode-specific operations let mode = match op { @@ -88,9 +91,14 @@ pub(crate) fn validation_op(&mut self, op: ValidationOp, operand: &ValidationOpe ValidationOp::Release => ValidationMode::ReleaseUntil(None), ValidationOp::Suspend(ce) => { if query.mutbl == MutMutable { - let lft = DynamicLifetime { frame: self.cur_frame(), region: Some(ce) }; + let lft = DynamicLifetime { + frame: self.cur_frame(), + region: Some(ce), + }; trace!("Suspending {:?} until {:?}", query, ce); - self.suspended.entry(lft).or_insert_with(Vec::new).push(query.clone()); + self.suspended.entry(lft).or_insert_with(Vec::new).push( + query.clone(), + ); } ValidationMode::ReleaseUntil(Some(ce)) } @@ -101,7 +109,10 @@ pub(crate) fn validation_op(&mut self, op: ValidationOp, operand: &ValidationOpe pub(crate) fn end_region(&mut self, ce: CodeExtent) -> EvalResult<'tcx> { self.memory.locks_lifetime_ended(Some(ce)); // Recover suspended lvals - let lft = DynamicLifetime { frame: self.cur_frame(), region: Some(ce) }; + let lft = DynamicLifetime { + frame: self.cur_frame(), + region: Some(ce), + }; if let Some(queries) = self.suspended.remove(&lft) { for query in queries { trace!("Recovering {:?} from suspension", query); @@ -118,16 +129,19 @@ fn normalize_type_unerased(&self, ty: Ty<'tcx>) -> Ty<'tcx> { // We copy a bunch of stuff from rustc/infer/mod.rs to be able to tweak its behavior fn normalize_projections_in<'a, 'gcx, 'tcx, T>( - self_: &InferCtxt<'a, 'gcx, 'tcx>, - param_env: ty::ParamEnv<'tcx>, - value: &T) - -> T::Lifted - where T: TypeFoldable<'tcx> + ty::Lift<'gcx> + self_: &InferCtxt<'a, 'gcx, 'tcx>, + param_env: ty::ParamEnv<'tcx>, + value: &T, + ) -> T::Lifted + where + T: TypeFoldable<'tcx> + ty::Lift<'gcx>, { let mut selcx = traits::SelectionContext::new(self_); let cause = traits::ObligationCause::dummy(); - let traits::Normalized { value: result, obligations } = - traits::normalize(&mut selcx, param_env, cause, value); + let traits::Normalized { + value: result, + obligations, + } = traits::normalize(&mut selcx, param_env, cause, value); let mut fulfill_cx = traits::FulfillmentContext::new(); @@ -139,12 +153,13 @@ fn normalize_projections_in<'a, 'gcx, 'tcx, T>( } fn drain_fulfillment_cx_or_panic<'a, 'gcx, 'tcx, T>( - self_: &InferCtxt<'a, 'gcx, 'tcx>, - span: Span, - fulfill_cx: &mut traits::FulfillmentContext<'tcx>, - result: &T) - -> T::Lifted - where T: TypeFoldable<'tcx> + ty::Lift<'gcx> + self_: &InferCtxt<'a, 'gcx, 'tcx>, + span: Span, + fulfill_cx: &mut traits::FulfillmentContext<'tcx>, + result: &T, + ) -> T::Lifted + where + T: TypeFoldable<'tcx> + ty::Lift<'gcx>, { // In principle, we only need to do this so long as `result` // contains unbound type parameters. It could be a slight @@ -152,13 +167,23 @@ fn drain_fulfillment_cx_or_panic<'a, 'gcx, 'tcx, T>( match fulfill_cx.select_all_or_error(self_) { Ok(()) => { } Err(errors) => { - span_bug!(span, "Encountered errors `{:?}` resolving bounds after type-checking", - errors); + span_bug!( + span, + "Encountered errors `{:?}` resolving bounds after type-checking", + errors + ); } } let result = self_.resolve_type_vars_if_possible(result); - let result = self_.tcx.fold_regions(&result, &mut false, |r, _| match *r { ty::ReVar(_) => self_.tcx.types.re_erased, _ => r }); + let result = self_.tcx.fold_regions( + &result, + &mut false, + |r, _| match *r { + ty::ReVar(_) => self_.tcx.types.re_erased, + _ => r, + }, + ); match self_.tcx.lift_to_global(&result) { Some(result) => result, @@ -169,10 +194,11 @@ fn drain_fulfillment_cx_or_panic<'a, 'gcx, 'tcx, T>( } trait MyTransNormalize<'gcx>: TypeFoldable<'gcx> { - fn my_trans_normalize<'a, 'tcx>(&self, - infcx: &InferCtxt<'a, 'gcx, 'tcx>, - param_env: ty::ParamEnv<'tcx>) - -> Self; + fn my_trans_normalize<'a, 'tcx>( + &self, + infcx: &InferCtxt<'a, 'gcx, 'tcx>, + param_env: ty::ParamEnv<'tcx>, + ) -> Self; } macro_rules! items { ($($item:item)+) => ($($item)+) } @@ -200,7 +226,8 @@ fn my_trans_normalize<'a, 'tcx>(&self, ); fn normalize_associated_type<'a, 'tcx, T>(self_: TyCtxt<'a, 'tcx, 'tcx>, value: &T) -> T - where T: MyTransNormalize<'tcx> + where + T: MyTransNormalize<'tcx>, { let param_env = ty::ParamEnv::empty(Reveal::All); @@ -225,12 +252,26 @@ fn validate_variant( for (idx, field) in variant.fields.iter().enumerate() { let field_ty = field.ty(self.tcx, subst); let field_lvalue = self.lvalue_field(query.lval, idx, query.ty, field_ty)?; - self.validate(ValidationQuery { lval: field_lvalue, ty: field_ty, ..query }, mode)?; + self.validate( + ValidationQuery { + lval: field_lvalue, + ty: field_ty, + ..query + }, + mode, + )?; } Ok(()) } - fn validate_ptr(&mut self, val: Value, pointee_ty: Ty<'tcx>, re: Option, mutbl: Mutability, mode: ValidationMode) -> EvalResult<'tcx> { + fn validate_ptr( + &mut self, + val: Value, + pointee_ty: Ty<'tcx>, + re: Option, + mutbl: Mutability, + mode: ValidationMode, + ) -> EvalResult<'tcx> { // Check alignment and non-NULLness let (_, align) = self.size_and_align_of_dst(pointee_ty, val)?; let ptr = val.into_ptr(&self.memory)?; @@ -238,30 +279,39 @@ fn validate_ptr(&mut self, val: Value, pointee_ty: Ty<'tcx>, re: Option, mode: ValidationMode) -> EvalResult<'tcx> - { + fn validate(&mut self, query: ValidationQuery<'tcx>, mode: ValidationMode) -> EvalResult<'tcx> { match self.try_validate(query, mode) { // ReleaseUntil(None) of an uninitalized variable is a NOP. This is needed because // we have to release the return value of a function; due to destination-passing-style // the callee may directly write there. // TODO: Ideally we would know whether the destination is already initialized, and only // release if it is. But of course that can't even always be statically determined. - Err(EvalError{ kind: EvalErrorKind::ReadUndefBytes, ..}) - if mode == ValidationMode::ReleaseUntil(None) - => { + Err(EvalError { kind: EvalErrorKind::ReadUndefBytes, .. }) + if mode == ValidationMode::ReleaseUntil(None) => { return Ok(()); } res => res, } } - fn try_validate(&mut self, mut query: ValidationQuery<'tcx>, mode: ValidationMode) -> EvalResult<'tcx> - { + fn try_validate( + &mut self, + mut query: ValidationQuery<'tcx>, + mode: ValidationMode, + ) -> EvalResult<'tcx> { use rustc::ty::TypeVariants::*; use rustc::ty::RegionKind::*; use rustc::ty::AdtKind; @@ -284,12 +334,13 @@ fn try_validate(&mut self, mut query: ValidationQuery<'tcx>, mode: ValidationMod Lvalue::Local { frame, local } => { let res = self.stack[frame].get_local(local); match (res, mode) { - (Err(EvalError{ kind: EvalErrorKind::DeadLocal, ..}), ValidationMode::Recover(_)) => { + (Err(EvalError { kind: EvalErrorKind::DeadLocal, .. }), + ValidationMode::Recover(_)) => { return Ok(()); } - _ => {}, + _ => {} } - }, + } _ => {} } @@ -300,12 +351,14 @@ fn try_validate(&mut self, mut query: ValidationQuery<'tcx>, mode: ValidationMod // just assembles pieces (that each own their memory) together to a larger whole. // TODO: Currently, we don't acquire locks for padding and discriminants. We should. let is_owning = match query.ty.sty { - TyInt(_) | TyUint(_) | TyRawPtr(_) | - TyBool | TyFloat(_) | TyChar | TyStr | + TyInt(_) | TyUint(_) | TyRawPtr(_) | TyBool | TyFloat(_) | TyChar | TyStr | TyRef(..) | TyFnPtr(..) | TyFnDef(..) | TyNever => true, TyAdt(adt, _) if adt.is_box() => true, - TySlice(_) | TyAdt(_, _) | TyTuple(..) | TyClosure(..) | TyArray(..) | TyDynamic(..) => false, - TyParam(_) | TyInfer(_) | TyProjection(_) | TyAnon(..) | TyError => bug!("I got an incomplete/unnormalized type for validation"), + TySlice(_) | TyAdt(_, _) | TyTuple(..) | TyClosure(..) | TyArray(..) | + TyDynamic(..) => false, + TyParam(_) | TyInfer(_) | TyProjection(_) | TyAnon(..) | TyError => { + bug!("I got an incomplete/unnormalized type for validation") + } }; if is_owning { // We need to lock. So we need memory. So we have to force_acquire. @@ -322,7 +375,11 @@ fn try_validate(&mut self, mut query: ValidationQuery<'tcx>, mode: ValidationMod } None => { // The only unsized typ we concider "owning" is TyStr. - assert_eq!(query.ty.sty, TyStr, "Found a surprising unsized owning type"); + assert_eq!( + query.ty.sty, + TyStr, + "Found a surprising unsized owning type" + ); // The extra must be the length, in bytes. match extra { LvalueExtra::Length(len) => len, @@ -334,20 +391,45 @@ fn try_validate(&mut self, mut query: ValidationQuery<'tcx>, mode: ValidationMod if len > 0 { let ptr = ptr.to_ptr()?; match query.mutbl { - MutImmutable => + MutImmutable => { if mode.acquiring() { - self.memory.acquire_lock(ptr, len, query.re, AccessKind::Read)?; + self.memory.acquire_lock( + ptr, + len, + query.re, + AccessKind::Read, + )?; } - // No releasing of read locks, ever. - MutMutable => + } + // No releasing of read locks, ever. + MutMutable => { match mode { - ValidationMode::Acquire => - self.memory.acquire_lock(ptr, len, query.re, AccessKind::Write)?, - ValidationMode::Recover(ending_ce) => - self.memory.recover_write_lock(ptr, len, query.re, ending_ce)?, - ValidationMode::ReleaseUntil(suspended_ce) => - self.memory.suspend_write_lock(ptr, len, query.re, suspended_ce)?, + ValidationMode::Acquire => { + self.memory.acquire_lock( + ptr, + len, + query.re, + AccessKind::Write, + )? + } + ValidationMode::Recover(ending_ce) => { + self.memory.recover_write_lock( + ptr, + len, + query.re, + ending_ce, + )? + } + ValidationMode::ReleaseUntil(suspended_ce) => { + self.memory.suspend_write_lock( + ptr, + len, + query.re, + suspended_ce, + )? + } } + } } } } @@ -362,10 +444,12 @@ fn try_validate(&mut self, mut query: ValidationQuery<'tcx>, mode: ValidationMod // TODO: Check if these are valid bool/float/codepoint/UTF-8, respectively (and in particular, not undef). Ok(()) } - TyNever => { - err!(ValidationFailure(format!("The empty type is never valid."))) - } - TyRef(region, ty::TypeAndMut { ty: pointee_ty, mutbl }) => { + TyNever => err!(ValidationFailure(format!("The empty type is never valid."))), + TyRef(region, + ty::TypeAndMut { + ty: pointee_ty, + mutbl, + }) => { let val = self.read_lvalue(query.lval)?; // Sharing restricts our context if mutbl == MutImmutable { @@ -378,7 +462,7 @@ fn try_validate(&mut self, mut query: ValidationQuery<'tcx>, mode: ValidationMod ReScope(ce) => query.re = Some(ce), // It is possible for us to encounter erased lifetimes here because the lifetimes in // this functions' Subst will be erased. - _ => {}, + _ => {} } } self.validate_ptr(val, pointee_ty, query.re, query.mutbl, mode) @@ -388,7 +472,9 @@ fn try_validate(&mut self, mut query: ValidationQuery<'tcx>, mode: ValidationMod self.validate_ptr(val, query.ty.boxed_ty(), query.re, query.mutbl, mode) } TyFnPtr(_sig) => { - let ptr = self.read_lvalue(query.lval)?.into_ptr(&self.memory)?.to_ptr()?; + let ptr = self.read_lvalue(query.lval)? + .into_ptr(&self.memory)? + .to_ptr()?; self.memory.get_fn(ptr)?; // TODO: Check if the signature matches (should be the same check as what terminator/mod.rs already does on call?). Ok(()) @@ -403,18 +489,37 @@ fn try_validate(&mut self, mut query: ValidationQuery<'tcx>, mode: ValidationMod TySlice(elem_ty) => { let len = match query.lval { Lvalue::Ptr { extra: LvalueExtra::Length(len), .. } => len, - _ => bug!("acquire_valid of a TySlice given non-slice lvalue: {:?}", query.lval), + _ => { + bug!( + "acquire_valid of a TySlice given non-slice lvalue: {:?}", + query.lval + ) + } }; for i in 0..len { let inner_lvalue = self.lvalue_index(query.lval, query.ty, i)?; - self.validate(ValidationQuery { lval: inner_lvalue, ty: elem_ty, ..query }, mode)?; + self.validate( + ValidationQuery { + lval: inner_lvalue, + ty: elem_ty, + ..query + }, + mode, + )?; } Ok(()) } TyArray(elem_ty, len) => { for i in 0..len { let inner_lvalue = self.lvalue_index(query.lval, query.ty, i as u64)?; - self.validate(ValidationQuery { lval: inner_lvalue, ty: elem_ty, ..query }, mode)?; + self.validate( + ValidationQuery { + lval: inner_lvalue, + ty: elem_ty, + ..query + }, + mode, + )?; } Ok(()) } @@ -422,7 +527,12 @@ fn try_validate(&mut self, mut query: ValidationQuery<'tcx>, mode: ValidationMod // Check that this is a valid vtable let vtable = match query.lval { Lvalue::Ptr { extra: LvalueExtra::Vtable(vtable), .. } => vtable, - _ => bug!("acquire_valid of a TyDynamic given non-trait-object lvalue: {:?}", query.lval), + _ => { + bug!( + "acquire_valid of a TyDynamic given non-trait-object lvalue: {:?}", + query.lval + ) + } }; self.read_size_and_align_from_vtable(vtable)?; // TODO: Check that the vtable contains all the function pointers we expect it to have. @@ -433,7 +543,9 @@ fn try_validate(&mut self, mut query: ValidationQuery<'tcx>, mode: ValidationMod Ok(()) } TyAdt(adt, subst) => { - if Some(adt.did) == self.tcx.lang_items.unsafe_cell_type() && query.mutbl == MutImmutable { + if Some(adt.did) == self.tcx.lang_items.unsafe_cell_type() && + query.mutbl == MutImmutable + { // No locks for shared unsafe cells. Also no other validation, the only field is private anyway. return Ok(()); } @@ -445,8 +557,9 @@ fn try_validate(&mut self, mut query: ValidationQuery<'tcx>, mode: ValidationMod let discr = self.read_discriminant_value(ptr, query.ty)?; // Get variant index for discriminant - let variant_idx = adt.discriminants(self.tcx) - .position(|variant_discr| variant_discr.to_u128_unchecked() == discr); + let variant_idx = adt.discriminants(self.tcx).position(|variant_discr| { + variant_discr.to_u128_unchecked() == discr + }); let variant_idx = match variant_idx { Some(val) => val, None => return err!(InvalidDiscriminant), @@ -456,13 +569,22 @@ fn try_validate(&mut self, mut query: ValidationQuery<'tcx>, mode: ValidationMod if variant.fields.len() > 0 { // Downcast to this variant, if needed let lval = if adt.variants.len() > 1 { - self.eval_lvalue_projection(query.lval, query.ty, &mir::ProjectionElem::Downcast(adt, variant_idx))? + self.eval_lvalue_projection( + query.lval, + query.ty, + &mir::ProjectionElem::Downcast(adt, variant_idx), + )? } else { query.lval }; // Recursively validate the fields - self.validate_variant(ValidationQuery { lval, ..query} , variant, subst, mode) + self.validate_variant( + ValidationQuery { lval, ..query }, + variant, + subst, + mode, + ) } else { // No fields, nothing left to check. Downcasting may fail, e.g. in case of a CEnum. Ok(()) @@ -481,20 +603,34 @@ fn try_validate(&mut self, mut query: ValidationQuery<'tcx>, mode: ValidationMod TyTuple(ref types, _) => { for (idx, field_ty) in types.iter().enumerate() { let field_lvalue = self.lvalue_field(query.lval, idx, query.ty, field_ty)?; - self.validate(ValidationQuery { lval: field_lvalue, ty: field_ty, ..query }, mode)?; + self.validate( + ValidationQuery { + lval: field_lvalue, + ty: field_ty, + ..query + }, + mode, + )?; } Ok(()) } TyClosure(def_id, ref closure_substs) => { for (idx, field_ty) in closure_substs.upvar_tys(def_id, self.tcx).enumerate() { let field_lvalue = self.lvalue_field(query.lval, idx, query.ty, field_ty)?; - self.validate(ValidationQuery { lval: field_lvalue, ty: field_ty, ..query }, mode)?; + self.validate( + ValidationQuery { + lval: field_lvalue, + ty: field_ty, + ..query + }, + mode, + )?; } // TODO: Check if the signature matches (should be the same check as what terminator/mod.rs already does on call?). // Is there other things we can/should check? Like vtable pointers? Ok(()) } - _ => bug!("We already establishd that this is a type we support.") + _ => bug!("We already establishd that this is a type we support."), } } } diff --git a/src/librustc_mir/interpret/value.rs b/src/librustc_mir/interpret/value.rs index be6d304d3f5..8424e72fef0 100644 --- a/src/librustc_mir/interpret/value.rs +++ b/src/librustc_mir/interpret/value.rs @@ -3,12 +3,7 @@ use rustc::ty::layout::HasDataLayout; -use super::{ - EvalResult, - Memory, MemoryPointer, HasMemory, PointerArithmetic, - Machine, - PtrAndAlign, -}; +use super::{EvalResult, Memory, MemoryPointer, HasMemory, PointerArithmetic, Machine, PtrAndAlign}; pub(super) fn bytes_to_f32(bytes: u128) -> f32 { f32::from_bits(bytes as u32) @@ -70,8 +65,10 @@ pub fn signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, match self.primval { PrimVal::Bytes(b) => { assert_eq!(b as u64 as u128, b); - Ok(Pointer::from(PrimVal::Bytes(layout.signed_offset(b as u64, i)? as u128))) - }, + Ok(Pointer::from( + PrimVal::Bytes(layout.signed_offset(b as u64, i)? as u128), + )) + } PrimVal::Ptr(ptr) => ptr.signed_offset(i, layout).map(Pointer::from), PrimVal::Undef => err!(ReadUndefBytes), } @@ -82,8 +79,10 @@ pub fn offset(self, i: u64, cx: C) -> EvalResult<'tcx, Self> { match self.primval { PrimVal::Bytes(b) => { assert_eq!(b as u64 as u128, b); - Ok(Pointer::from(PrimVal::Bytes(layout.offset(b as u64, i)? as u128))) - }, + Ok(Pointer::from( + PrimVal::Bytes(layout.offset(b as u64, i)? as u128), + )) + } PrimVal::Ptr(ptr) => ptr.offset(i, layout).map(Pointer::from), PrimVal::Undef => err!(ReadUndefBytes), } @@ -94,8 +93,10 @@ pub fn wrapping_signed_offset(self, i: i64, cx: C) -> EvalResu match self.primval { PrimVal::Bytes(b) => { assert_eq!(b as u64 as u128, b); - Ok(Pointer::from(PrimVal::Bytes(layout.wrapping_signed_offset(b as u64, i) as u128))) - }, + Ok(Pointer::from(PrimVal::Bytes( + layout.wrapping_signed_offset(b as u64, i) as u128, + ))) + } PrimVal::Ptr(ptr) => Ok(Pointer::from(ptr.wrapping_signed_offset(i, layout))), PrimVal::Undef => err!(ReadUndefBytes), } @@ -158,10 +159,9 @@ pub enum PrimValKind { I8, I16, I32, I64, I128, U8, U16, U32, U64, U128, F32, F64, + Ptr, FnPtr, Bool, Char, - Ptr, - FnPtr, } impl<'a, 'tcx: 'a> Value { @@ -172,26 +172,35 @@ pub fn by_ref(ptr: Pointer) -> Self { /// Convert the value into a pointer (or a pointer-sized integer). If the value is a ByRef, /// this may have to perform a load. - pub fn into_ptr>(&self, mem: &Memory<'a, 'tcx, M>) -> EvalResult<'tcx, Pointer> { + pub fn into_ptr>( + &self, + mem: &Memory<'a, 'tcx, M>, + ) -> EvalResult<'tcx, Pointer> { use self::Value::*; match *self { ByRef(PtrAndAlign { ptr, aligned }) => { - mem.read_maybe_aligned(aligned, |mem| mem.read_ptr(ptr.to_ptr()?) ) - }, - ByVal(ptr) | ByValPair(ptr, _) => Ok(ptr.into()), + mem.read_maybe_aligned(aligned, |mem| mem.read_ptr(ptr.to_ptr()?)) + } + ByVal(ptr) | + ByValPair(ptr, _) => Ok(ptr.into()), } } pub(super) fn into_ptr_vtable_pair>( &self, - mem: &Memory<'a, 'tcx, M> + mem: &Memory<'a, 'tcx, M>, ) -> EvalResult<'tcx, (Pointer, MemoryPointer)> { use self::Value::*; match *self { - ByRef(PtrAndAlign { ptr: ref_ptr, aligned }) => { + ByRef(PtrAndAlign { + ptr: ref_ptr, + aligned, + }) => { mem.read_maybe_aligned(aligned, |mem| { let ptr = mem.read_ptr(ref_ptr.to_ptr()?)?; - let vtable = mem.read_ptr(ref_ptr.offset(mem.pointer_size(), mem.layout)?.to_ptr()?)?; + let vtable = mem.read_ptr( + ref_ptr.offset(mem.pointer_size(), mem.layout)?.to_ptr()?, + )?; Ok((ptr, vtable.to_ptr()?)) }) } @@ -203,21 +212,29 @@ pub(super) fn into_ptr_vtable_pair>( } } - pub(super) fn into_slice>(&self, mem: &Memory<'a, 'tcx, M>) -> EvalResult<'tcx, (Pointer, u64)> { + pub(super) fn into_slice>( + &self, + mem: &Memory<'a, 'tcx, M>, + ) -> EvalResult<'tcx, (Pointer, u64)> { use self::Value::*; match *self { - ByRef(PtrAndAlign { ptr: ref_ptr, aligned } ) => { + ByRef(PtrAndAlign { + ptr: ref_ptr, + aligned, + }) => { mem.read_maybe_aligned(aligned, |mem| { let ptr = mem.read_ptr(ref_ptr.to_ptr()?)?; - let len = mem.read_usize(ref_ptr.offset(mem.pointer_size(), mem.layout)?.to_ptr()?)?; + let len = mem.read_usize( + ref_ptr.offset(mem.pointer_size(), mem.layout)?.to_ptr()?, + )?; Ok((ptr, len)) }) - }, + } ByValPair(ptr, val) => { let len = val.to_u128()?; assert_eq!(len as u64 as u128, len); Ok((ptr.into(), len as u64)) - }, + } ByVal(PrimVal::Undef) => err!(ReadUndefBytes), ByVal(_) => bug!("expected ptr and length, got {:?}", self), } @@ -349,7 +366,7 @@ pub fn is_signed_int(self) -> bool { } } - pub fn is_float(self) -> bool { + pub fn is_float(self) -> bool { use self::PrimValKind::*; match self { F32 | F64 => true, diff --git a/tests/compiletest.rs b/tests/compiletest.rs index 7d1829adb5a..7493551ecf7 100644 --- a/tests/compiletest.rs +++ b/tests/compiletest.rs @@ -16,7 +16,11 @@ macro_rules! eprintln { const MIRI_PATH: &'static str = concat!("target/", env!("PROFILE"), "/miri"); fn compile_fail(sysroot: &Path, path: &str, target: &str, host: &str, fullmir: bool) { - eprintln!("## Running compile-fail tests in {} against miri for target {}", path, target); + eprintln!( + "## Running compile-fail tests in {} against miri for target {}", + path, + target + ); let mut config = compiletest::default_config(); config.mode = "compile-fail".parse().expect("Invalid mode"); config.rustc_path = MIRI_PATH.into(); @@ -26,7 +30,9 @@ fn compile_fail(sysroot: &Path, path: &str, target: &str, host: &str, fullmir: b // skip fullmir on nonhost return; } - let sysroot = Path::new(&std::env::var("HOME").unwrap()).join(".xargo").join("HOST"); + let sysroot = Path::new(&std::env::var("HOME").unwrap()) + .join(".xargo") + .join("HOST"); config.target_rustcflags = Some(format!("--sysroot {}", sysroot.to_str().unwrap())); config.src_base = PathBuf::from(path.to_string()); } else { @@ -50,12 +56,13 @@ fn run_pass(path: &str) { } fn miri_pass(path: &str, target: &str, host: &str, fullmir: bool, opt: bool) { - let opt_str = if opt { - " with optimizations" - } else { - "" - }; - eprintln!("## Running run-pass tests in {} against miri for target {}{}", path, target, opt_str); + let opt_str = if opt { " with optimizations" } else { "" }; + eprintln!( + "## Running run-pass tests in {} against miri for target {}{}", + path, + target, + opt_str + ); let mut config = compiletest::default_config(); config.mode = "mir-opt".parse().expect("Invalid mode"); config.src_base = PathBuf::from(path); @@ -68,7 +75,9 @@ fn miri_pass(path: &str, target: &str, host: &str, fullmir: bool, opt: bool) { // skip fullmir on nonhost return; } - let sysroot = Path::new(&std::env::var("HOME").unwrap()).join(".xargo").join("HOST"); + let sysroot = Path::new(&std::env::var("HOME").unwrap()) + .join(".xargo") + .join("HOST"); flags.push(format!("--sysroot {}", sysroot.to_str().unwrap())); } if opt { @@ -99,7 +108,9 @@ fn for_all_targets(sysroot: &Path, mut f: F) { let target_dir = sysroot.join("lib").join("rustlib"); for entry in std::fs::read_dir(target_dir).expect("invalid sysroot") { let entry = entry.unwrap(); - if !is_target_dir(entry.path()) { continue; } + if !is_target_dir(entry.path()) { + continue; + } let target = entry.file_name().into_string().unwrap(); f(target); } @@ -125,7 +136,9 @@ fn get_host() -> String { .expect("rustc not found for -vV") .stdout; let host = std::str::from_utf8(&host).expect("sysroot is not utf8"); - let host = host.split("\nhost: ").nth(1).expect("no host: part in rustc -vV"); + let host = host.split("\nhost: ").nth(1).expect( + "no host: part in rustc -vV", + ); let host = host.split('\n').next().expect("no \n after host"); String::from(host) }