rust/src/fn_call.rs

714 lines
30 KiB
Rust
Raw Normal View History

use rustc::ty;
use rustc::ty::layout::{Align, LayoutOf, Size};
2018-10-19 09:51:04 +02:00
use rustc::hir::def_id::DefId;
use rustc::mir;
use syntax::attr;
2018-11-01 08:56:41 +01:00
use crate::*;
pub trait EvalContextExt<'tcx, 'mir> {
/// Emulate calling a foreign item, fail if the item is not supported.
/// This function will handle `goto_block` if needed.
fn emulate_foreign_item(
&mut self,
def_id: DefId,
args: &[OpTy<'tcx, Borrow>],
dest: PlaceTy<'tcx, Borrow>,
ret: mir::BasicBlock,
) -> EvalResult<'tcx>;
/// Emulate a function that should have MIR but does not.
/// This is solely to support execution without full MIR.
/// Fail if emulating this function is not supported.
/// This function will handle `goto_block` if needed.
fn emulate_missing_fn(
&mut self,
path: String,
args: &[OpTy<'tcx, Borrow>],
dest: Option<PlaceTy<'tcx, Borrow>>,
ret: Option<mir::BasicBlock>,
) -> EvalResult<'tcx>;
fn find_fn(
&mut self,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, Borrow>],
dest: Option<PlaceTy<'tcx, Borrow>>,
ret: Option<mir::BasicBlock>,
) -> EvalResult<'tcx, Option<&'mir mir::Mir<'tcx>>>;
2017-11-21 13:32:40 +01:00
fn write_null(&mut self, dest: PlaceTy<'tcx, Borrow>) -> EvalResult<'tcx>;
}
2018-10-16 18:01:50 +02:00
impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalContext<'a, 'mir, 'tcx> {
fn find_fn(
&mut self,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, Borrow>],
dest: Option<PlaceTy<'tcx, Borrow>>,
ret: Option<mir::BasicBlock>,
) -> EvalResult<'tcx, Option<&'mir mir::Mir<'tcx>>> {
trace!("eval_fn_call: {:#?}, {:?}", instance, dest.map(|place| *place));
// first run the common hooks also supported by CTFE
if self.hook_fn(instance, args, dest)? {
self.goto_block(ret)?;
return Ok(None);
}
// there are some more lang items we want to hook that CTFE does not hook (yet)
if self.tcx.lang_items().align_offset_fn() == Some(instance.def.def_id()) {
// FIXME: return a real value in case the target allocation has an
// alignment bigger than the one requested
let n = u128::max_value();
let dest = dest.unwrap();
let n = self.truncate(n, dest.layout);
self.write_scalar(Scalar::from_uint(n, dest.layout.size), dest)?;
self.goto_block(ret)?;
return Ok(None);
}
// Try to see if we can do something about foreign items
if self.tcx.is_foreign_item(instance.def_id()) {
// An external function that we cannot find MIR for, but we can still run enough
// of them to make miri viable.
self.emulate_foreign_item(
instance.def_id(),
args,
dest.unwrap(),
ret.unwrap(),
)?;
// `goto_block` already handled
return Ok(None);
}
// Otherwise we really want to see the MIR -- but if we do not have it, maybe we can
// emulate something. This is a HACK to support running without a full-MIR libstd.
let mir = match self.load_mir(instance.def) {
Ok(mir) => mir,
Err(EvalError { kind: EvalErrorKind::NoMirFor(path), .. }) => {
self.emulate_missing_fn(
path,
args,
dest,
ret,
)?;
// `goto_block` already handled
return Ok(None);
}
Err(other) => return Err(other),
};
Ok(Some(mir))
}
fn emulate_foreign_item(
&mut self,
def_id: DefId,
args: &[OpTy<'tcx, Borrow>],
dest: PlaceTy<'tcx, Borrow>,
ret: mir::BasicBlock,
) -> EvalResult<'tcx> {
let attrs = self.tcx.get_attrs(def_id);
2017-09-08 19:10:21 +02:00
let link_name = match attr::first_attr_value_str_by_name(&attrs, "link_name") {
Some(name) => name.as_str(),
None => self.tcx.item_name(def_id).as_str(),
2017-09-08 19:10:21 +02:00
};
let tcx = &{self.tcx.tcx};
// All these functions take raw pointers, so if we access memory directly
// (as opposed to through a place), we have to remember to erase any tag
// that might still hang around!
match &link_name[..] {
"malloc" => {
2018-11-05 08:51:55 +01:00
let size = self.read_scalar(args[0])?.to_usize(self)?;
if size == 0 {
self.write_null(dest)?;
} else {
2018-11-23 09:46:51 +01:00
let align = self.tcx.data_layout.pointer_align.abi;
2018-10-19 19:51:41 +02:00
let ptr = self.memory_mut().allocate(Size::from_bytes(size), align, MiriMemoryKind::C.into())?;
self.write_scalar(Scalar::Ptr(ptr.with_default_tag()), dest)?;
}
}
"free" => {
2018-11-17 10:11:21 +01:00
let ptr = self.read_scalar(args[0])?.not_undef()?;
2018-11-05 08:51:55 +01:00
if !ptr.is_null_ptr(self) {
2018-10-19 19:51:41 +02:00
self.memory_mut().deallocate(
2018-11-17 10:11:21 +01:00
ptr.to_ptr()?,
None,
MiriMemoryKind::C.into(),
)?;
}
}
"__rust_alloc" => {
2018-11-05 08:51:55 +01:00
let size = self.read_scalar(args[0])?.to_usize(self)?;
let align = self.read_scalar(args[1])?.to_usize(self)?;
if size == 0 {
return err!(HeapAllocZeroBytes);
}
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
2018-10-30 10:41:01 +01:00
let ptr = self.memory_mut()
.allocate(
Size::from_bytes(size),
2018-11-23 09:46:51 +01:00
Align::from_bytes(align).unwrap(),
2018-10-30 10:41:01 +01:00
MiriMemoryKind::Rust.into()
)?
.with_default_tag();
self.write_scalar(Scalar::Ptr(ptr), dest)?;
}
"__rust_alloc_zeroed" => {
2018-11-05 08:51:55 +01:00
let size = self.read_scalar(args[0])?.to_usize(self)?;
let align = self.read_scalar(args[1])?.to_usize(self)?;
if size == 0 {
return err!(HeapAllocZeroBytes);
}
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
2018-10-30 10:41:01 +01:00
let ptr = self.memory_mut()
.allocate(
Size::from_bytes(size),
2018-11-23 09:46:51 +01:00
Align::from_bytes(align).unwrap(),
MiriMemoryKind::Rust.into()
2018-10-30 10:41:01 +01:00
)?
.with_default_tag();
self.memory_mut()
.get_mut(ptr.alloc_id)?
.write_repeat(tcx, ptr, 0, Size::from_bytes(size))?;
self.write_scalar(Scalar::Ptr(ptr), dest)?;
}
"__rust_dealloc" => {
2018-11-17 10:11:21 +01:00
let ptr = self.read_scalar(args[0])?.to_ptr()?;
2018-11-05 08:51:55 +01:00
let old_size = self.read_scalar(args[1])?.to_usize(self)?;
let align = self.read_scalar(args[2])?.to_usize(self)?;
if old_size == 0 {
return err!(HeapAllocZeroBytes);
}
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
2018-10-19 19:51:41 +02:00
self.memory_mut().deallocate(
2018-11-17 10:11:21 +01:00
ptr,
2018-11-23 09:46:51 +01:00
Some((Size::from_bytes(old_size), Align::from_bytes(align).unwrap())),
MiriMemoryKind::Rust.into(),
)?;
}
"__rust_realloc" => {
2018-11-17 10:11:21 +01:00
let ptr = self.read_scalar(args[0])?.to_ptr()?;
2018-11-05 08:51:55 +01:00
let old_size = self.read_scalar(args[1])?.to_usize(self)?;
let align = self.read_scalar(args[2])?.to_usize(self)?;
let new_size = self.read_scalar(args[3])?.to_usize(self)?;
if old_size == 0 || new_size == 0 {
return err!(HeapAllocZeroBytes);
}
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
2018-10-19 19:51:41 +02:00
let new_ptr = self.memory_mut().reallocate(
2018-11-17 10:11:21 +01:00
ptr,
Size::from_bytes(old_size),
2018-11-23 09:46:51 +01:00
Align::from_bytes(align).unwrap(),
Size::from_bytes(new_size),
2018-11-23 09:46:51 +01:00
Align::from_bytes(align).unwrap(),
MiriMemoryKind::Rust.into(),
)?;
self.write_scalar(Scalar::Ptr(new_ptr.with_default_tag()), dest)?;
}
"syscall" => {
// TODO: read `syscall` ids like `sysconf` ids and
// figure out some way to actually process some of them
//
// libc::syscall(NR_GETRANDOM, buf.as_mut_ptr(), buf.len(), GRND_NONBLOCK)
// is called if a `HashMap` is created the regular way.
2018-11-05 08:51:55 +01:00
match self.read_scalar(args[0])?.to_usize(self)? {
318 | 511 => {
return err!(Unimplemented(
"miri does not support random number generators".to_owned(),
))
}
id => {
return err!(Unimplemented(
format!("miri does not support syscall id {}", id),
))
}
}
}
"dlsym" => {
let _handle = self.read_scalar(args[0])?;
2018-11-17 10:11:21 +01:00
let symbol = self.read_scalar(args[1])?.to_ptr()?;
let symbol_name = self.memory().get(symbol.alloc_id)?.read_c_str(tcx, symbol)?;
let err = format!("bad c unicode symbol: {:?}", symbol_name);
let symbol_name = ::std::str::from_utf8(symbol_name).unwrap_or(&err);
return err!(Unimplemented(format!(
"miri does not support dynamically loading libraries (requested symbol: {})",
symbol_name
)));
}
"__rust_maybe_catch_panic" => {
// fn __rust_maybe_catch_panic(f: fn(*mut u8), data: *mut u8, data_ptr: *mut usize, vtable_ptr: *mut usize) -> u32
// We abort on panic, so not much is going on here, but we still have to call the closure
let f = self.read_scalar(args[0])?.to_ptr()?;
let data = self.read_scalar(args[1])?.not_undef()?;
2018-10-19 19:51:41 +02:00
let f_instance = self.memory().get_fn(f)?;
self.write_null(dest)?;
2018-08-23 21:22:57 +02:00
trace!("__rust_maybe_catch_panic: {:?}", f_instance);
// Now we make a function call. TODO: Consider making this re-usable? EvalContext::step does sth. similar for the TLS dtors,
// and of course eval_main.
let mir = self.load_mir(f_instance.def)?;
2018-11-05 08:51:55 +01:00
let ret_place = MPlaceTy::dangling(self.layout_of(self.tcx.mk_unit())?, self).into();
self.push_stack_frame(
f_instance,
mir.span,
mir,
Some(ret_place),
StackPopCleanup::Goto(Some(ret)), // directly return to caller
)?;
let mut args = self.frame().mir.args_iter();
2018-07-10 17:32:38 +02:00
let arg_local = args.next().ok_or_else(||
EvalErrorKind::AbiViolation(
"Argument to __rust_maybe_catch_panic does not take enough arguments."
.to_owned(),
),
)?;
2017-12-06 08:39:31 +01:00
let arg_dest = self.eval_place(&mir::Place::Local(arg_local))?;
self.write_scalar(data, arg_dest)?;
assert!(args.next().is_none(), "__rust_maybe_catch_panic argument has more arguments than expected");
// We ourselves will return 0, eventually (because we will not return if we paniced)
self.write_null(dest)?;
// Don't fall through, we do NOT want to `goto_block`!
return Ok(());
}
"__rust_start_panic" =>
return err!(MachineError("the evaluated program panicked".to_string())),
"memcmp" => {
2018-11-17 10:11:21 +01:00
let left = self.read_scalar(args[0])?.not_undef()?;
let right = self.read_scalar(args[1])?.not_undef()?;
2018-11-05 08:51:55 +01:00
let n = Size::from_bytes(self.read_scalar(args[2])?.to_usize(self)?);
let result = {
2018-11-17 10:11:21 +01:00
let left_bytes = self.memory().read_bytes(left, n)?;
let right_bytes = self.memory().read_bytes(right, n)?;
use std::cmp::Ordering::*;
match left_bytes.cmp(right_bytes) {
2018-05-30 14:29:32 +02:00
Less => -1i32,
Equal => 0,
Greater => 1,
}
};
2018-05-26 17:07:34 +02:00
self.write_scalar(
2018-08-26 20:42:26 +02:00
Scalar::from_int(result, Size::from_bits(32)),
dest,
)?;
}
"memrchr" => {
2018-11-17 10:11:21 +01:00
let ptr = self.read_scalar(args[0])?.not_undef()?;
let val = self.read_scalar(args[1])?.to_bytes()? as u8;
2018-11-05 08:51:55 +01:00
let num = self.read_scalar(args[2])?.to_usize(self)?;
2018-10-19 19:51:41 +02:00
if let Some(idx) = self.memory().read_bytes(ptr, Size::from_bytes(num))?
.iter().rev().position(|&c| c == val)
{
2018-11-05 08:51:55 +01:00
let new_ptr = ptr.ptr_offset(Size::from_bytes(num - idx as u64 - 1), self)?;
self.write_scalar(new_ptr, dest)?;
} else {
self.write_null(dest)?;
}
}
"memchr" => {
2018-11-17 10:11:21 +01:00
let ptr = self.read_scalar(args[0])?.not_undef()?;
let val = self.read_scalar(args[1])?.to_bytes()? as u8;
2018-11-05 08:51:55 +01:00
let num = self.read_scalar(args[2])?.to_usize(self)?;
2018-10-19 19:51:41 +02:00
if let Some(idx) = self.memory().read_bytes(ptr, Size::from_bytes(num))?.iter().position(
|&c| c == val,
)
{
2018-11-05 08:51:55 +01:00
let new_ptr = ptr.ptr_offset(Size::from_bytes(idx as u64), self)?;
self.write_scalar(new_ptr, dest)?;
} else {
self.write_null(dest)?;
}
}
"getenv" => {
let result = {
2018-11-17 10:11:21 +01:00
let name_ptr = self.read_scalar(args[0])?.to_ptr()?;
let name = self.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?;
2017-12-06 15:03:24 +01:00
match self.machine.env_vars.get(name) {
2018-05-26 17:07:34 +02:00
Some(&var) => Scalar::Ptr(var),
2018-11-05 08:51:55 +01:00
None => Scalar::ptr_null(&*self.tcx),
}
};
self.write_scalar(result, dest)?;
}
"unsetenv" => {
let mut success = None;
{
2018-11-17 10:11:21 +01:00
let name_ptr = self.read_scalar(args[0])?.not_undef()?;
2018-11-05 08:51:55 +01:00
if !name_ptr.is_null_ptr(self) {
let name_ptr = name_ptr.to_ptr()?;
let name = self.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?.to_owned();
if !name.is_empty() && !name.contains(&b'=') {
2018-10-19 19:51:41 +02:00
success = Some(self.machine.env_vars.remove(&name));
}
}
}
if let Some(old) = success {
if let Some(var) = old {
2018-10-19 19:51:41 +02:00
self.memory_mut().deallocate(var, None, MiriMemoryKind::Env.into())?;
}
self.write_null(dest)?;
} else {
self.write_scalar(Scalar::from_int(-1, dest.layout.size), dest)?;
}
}
"setenv" => {
let mut new = None;
{
2018-11-17 10:11:21 +01:00
let name_ptr = self.read_scalar(args[0])?.not_undef()?;
let value_ptr = self.read_scalar(args[1])?.to_ptr()?;
let value = self.memory().get(value_ptr.alloc_id)?.read_c_str(tcx, value_ptr)?;
2018-11-05 08:51:55 +01:00
if !name_ptr.is_null_ptr(self) {
let name_ptr = name_ptr.to_ptr()?;
let name = self.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?;
if !name.is_empty() && !name.contains(&b'=') {
new = Some((name.to_owned(), value.to_owned()));
}
}
}
if let Some((name, value)) = new {
// +1 for the null terminator
2018-10-19 19:51:41 +02:00
let value_copy = self.memory_mut().allocate(
Size::from_bytes((value.len() + 1) as u64),
2018-11-23 09:46:51 +01:00
Align::from_bytes(1).unwrap(),
MiriMemoryKind::Env.into(),
)?.with_default_tag();
{
let alloc = self.memory_mut().get_mut(value_copy.alloc_id)?;
alloc.write_bytes(tcx, value_copy, &value)?;
let trailing_zero_ptr = value_copy.offset(Size::from_bytes(value.len() as u64), tcx)?;
alloc.write_bytes(tcx, trailing_zero_ptr, &[0])?;
}
2017-12-06 15:03:24 +01:00
if let Some(var) = self.machine.env_vars.insert(
name.to_owned(),
value_copy,
)
{
2018-10-19 19:51:41 +02:00
self.memory_mut().deallocate(var, None, MiriMemoryKind::Env.into())?;
}
self.write_null(dest)?;
} else {
self.write_scalar(Scalar::from_int(-1, dest.layout.size), dest)?;
}
}
"write" => {
let fd = self.read_scalar(args[0])?.to_bytes()?;
2018-11-17 10:11:21 +01:00
let buf = self.read_scalar(args[1])?.not_undef()?;
let n = self.read_scalar(args[2])?.to_bytes()? as u64;
trace!("Called write({:?}, {:?}, {:?})", fd, buf, n);
let result = if fd == 1 || fd == 2 {
// stdout/stderr
use std::io::{self, Write};
2018-11-17 10:11:21 +01:00
let buf_cont = self.memory().read_bytes(buf, Size::from_bytes(n))?;
let res = if fd == 1 {
io::stdout().write(buf_cont)
} else {
io::stderr().write(buf_cont)
};
match res {
2018-05-26 17:07:34 +02:00
Ok(n) => n as i64,
Err(_) => -1,
}
} else {
warn!("Ignored output to FD {}", fd);
2018-05-26 17:07:34 +02:00
n as i64 // pretend it all went well
}; // now result is the value we return back to the program
2018-05-26 17:07:34 +02:00
self.write_scalar(
Scalar::from_int(result, dest.layout.size),
dest,
)?;
}
"strlen" => {
2018-11-17 10:11:21 +01:00
let ptr = self.read_scalar(args[0])?.to_ptr()?;
let n = self.memory().get(ptr.alloc_id)?.read_c_str(tcx, ptr)?.len();
self.write_scalar(Scalar::from_uint(n as u64, dest.layout.size), dest)?;
}
// Some things needed for sys::thread initialization to go through
"signal" | "sigaction" | "sigaltstack" => {
2018-08-26 20:42:26 +02:00
self.write_scalar(Scalar::from_int(0, dest.layout.size), dest)?;
}
"sysconf" => {
2018-08-23 21:22:57 +02:00
let name = self.read_scalar(args[0])?.to_i32()?;
2018-01-02 17:43:03 -05:00
trace!("sysconf() called with name {}", name);
// cache the sysconf integers via miri's global cache
let paths = &[
(&["libc", "_SC_PAGESIZE"], Scalar::from_int(4096, dest.layout.size)),
(&["libc", "_SC_GETPW_R_SIZE_MAX"], Scalar::from_int(-1, dest.layout.size)),
];
let mut result = None;
for &(path, path_value) in paths {
if let Ok(instance) = self.resolve_path(path) {
let cid = GlobalId {
instance,
promoted: None,
};
2018-11-21 09:52:31 +01:00
let const_val = self.const_eval_raw(cid)?;
let const_val = self.read_scalar(const_val.into())?;
let value = const_val.to_i32()?;
2018-01-14 18:59:13 +01:00
if value == name {
result = Some(path_value);
break;
}
}
}
if let Some(result) = result {
self.write_scalar(result, dest)?;
} else {
return err!(Unimplemented(
format!("Unimplemented sysconf name: {}", name),
));
}
}
// Hook pthread calls that go to the thread-local storage memory subsystem
"pthread_key_create" => {
2018-11-17 10:11:21 +01:00
let key_ptr = self.read_scalar(args[0])?.to_ptr()?;
// Extract the function type out of the signature (that seems easier than constructing it ourselves...)
let dtor = match self.read_scalar(args[1])?.not_undef()? {
2018-10-19 19:51:41 +02:00
Scalar::Ptr(dtor_ptr) => Some(self.memory().get_fn(dtor_ptr)?),
2018-08-07 15:22:11 +02:00
Scalar::Bits { bits: 0, size } => {
2018-10-19 19:51:41 +02:00
assert_eq!(size as u64, self.memory().pointer_size().bytes());
2018-08-07 15:22:11 +02:00
None
},
2018-05-26 17:07:34 +02:00
Scalar::Bits { .. } => return err!(ReadBytesAsPointer),
};
// Figure out how large a pthread TLS key actually is. This is libc::pthread_key_t.
let key_type = args[0].layout.ty.builtin_deref(true)
2018-07-10 17:32:38 +02:00
.ok_or_else(|| EvalErrorKind::AbiViolation("Wrong signature used for pthread_key_create: First argument must be a raw pointer.".to_owned()))?.ty;
2018-08-07 15:22:11 +02:00
let key_layout = self.layout_of(key_type)?;
// Create key and write it into the memory where key_ptr wants it
let key = self.machine.tls.create_tls_key(dtor, tcx) as u128;
2018-08-07 15:22:11 +02:00
if key_layout.size.bits() < 128 && key >= (1u128 << key_layout.size.bits() as u128) {
2017-08-02 16:59:01 +02:00
return err!(OutOfTls);
}
self.memory().check_align(key_ptr.into(), key_layout.align.abi)?;
self.memory_mut().get_mut(key_ptr.alloc_id)?.write_scalar(
tcx,
2018-11-17 10:11:21 +01:00
key_ptr,
2018-08-07 15:22:11 +02:00
Scalar::from_uint(key, key_layout.size).into(),
key_layout.size,
)?;
// Return success (0)
self.write_null(dest)?;
}
"pthread_key_delete" => {
let key = self.read_scalar(args[0])?.to_bytes()?;
self.machine.tls.delete_tls_key(key)?;
// Return success (0)
self.write_null(dest)?;
}
"pthread_getspecific" => {
let key = self.read_scalar(args[0])?.to_bytes()?;
let ptr = self.machine.tls.load_tls(key)?;
self.write_scalar(ptr, dest)?;
}
"pthread_setspecific" => {
let key = self.read_scalar(args[0])?.to_bytes()?;
let new_ptr = self.read_scalar(args[1])?.not_undef()?;
self.machine.tls.store_tls(key, new_ptr)?;
// Return success (0)
self.write_null(dest)?;
}
2017-07-19 15:06:21 +02:00
2018-03-23 12:18:33 +01:00
"_tlv_atexit" => {
// FIXME: Register the dtor
2017-07-19 15:06:21 +02:00
},
2018-08-23 21:22:57 +02:00
// Determining stack base address
"pthread_attr_init" | "pthread_attr_destroy" | "pthread_attr_get_np" |
2018-08-30 09:04:57 +02:00
"pthread_getattr_np" | "pthread_self" | "pthread_get_stacksize_np" => {
2018-08-23 21:22:57 +02:00
self.write_null(dest)?;
}
"pthread_attr_getstack" => {
// second argument is where we are supposed to write the stack size
let ptr = self.deref_operand(args[1])?;
2018-08-30 09:04:57 +02:00
let stackaddr = Scalar::from_int(0x80000, args[1].layout.size); // just any address
self.write_scalar(stackaddr, ptr.into())?;
2018-08-23 21:22:57 +02:00
// return 0
self.write_null(dest)?;
}
2018-08-30 09:04:57 +02:00
"pthread_get_stackaddr_np" => {
let stackaddr = Scalar::from_int(0x80000, dest.layout.size); // just any address
self.write_scalar(stackaddr, dest)?;
}
2018-08-23 21:22:57 +02:00
// Stub out calls for condvar, mutex and rwlock to just return 0
"pthread_mutexattr_init" | "pthread_mutexattr_settype" | "pthread_mutex_init" |
"pthread_mutexattr_destroy" | "pthread_mutex_lock" | "pthread_mutex_unlock" |
"pthread_mutex_destroy" | "pthread_rwlock_rdlock" | "pthread_rwlock_unlock" |
"pthread_rwlock_wrlock" | "pthread_rwlock_destroy" | "pthread_condattr_init" |
"pthread_condattr_setclock" | "pthread_cond_init" | "pthread_condattr_destroy" |
"pthread_cond_destroy" => {
self.write_null(dest)?;
}
2017-07-19 15:06:21 +02:00
"mmap" => {
// This is a horrible hack, but well... the guard page mechanism calls mmap and expects a particular return value, so we give it that value
let addr = self.read_scalar(args[0])?.not_undef()?;
self.write_scalar(addr, dest)?;
2017-07-19 15:06:21 +02:00
}
2018-08-30 09:22:01 +02:00
"mprotect" => {
self.write_null(dest)?;
}
2017-07-19 15:06:21 +02:00
// Windows API subs
2018-07-15 23:47:28 +02:00
"AddVectoredExceptionHandler" => {
// any non zero value works for the stdlib. This is just used for stackoverflows anyway
self.write_scalar(Scalar::from_int(1, dest.layout.size), dest)?;
2018-07-15 23:47:28 +02:00
},
"InitializeCriticalSection" |
"EnterCriticalSection" |
"LeaveCriticalSection" |
"DeleteCriticalSection" |
"SetLastError" => {
2018-08-14 18:44:31 +02:00
// Function does not return anything, nothing to do
},
"GetModuleHandleW" |
"GetProcAddress" |
"TryEnterCriticalSection" => {
2018-07-15 23:47:28 +02:00
// pretend these do not exist/nothing happened, by returning zero
self.write_null(dest)?;
2018-07-15 23:47:28 +02:00
},
"GetLastError" => {
// this is c::ERROR_CALL_NOT_IMPLEMENTED
self.write_scalar(Scalar::from_int(120, dest.layout.size), dest)?;
},
2018-07-16 11:42:46 +02:00
// Windows TLS
"TlsAlloc" => {
// This just creates a key; Windows does not natively support TLS dtors.
// Create key and return it
let key = self.machine.tls.create_tls_key(None, tcx) as u128;
2018-08-07 15:22:11 +02:00
// Figure out how large a TLS key actually is. This is c::DWORD.
if dest.layout.size.bits() < 128 && key >= (1u128 << dest.layout.size.bits() as u128) {
2018-07-16 11:42:46 +02:00
return err!(OutOfTls);
}
self.write_scalar(Scalar::from_uint(key, dest.layout.size), dest)?;
2018-07-16 11:42:46 +02:00
}
"TlsGetValue" => {
let key = self.read_scalar(args[0])?.to_bytes()?;
let ptr = self.machine.tls.load_tls(key)?;
self.write_scalar(ptr, dest)?;
2018-07-16 11:42:46 +02:00
}
"TlsSetValue" => {
let key = self.read_scalar(args[0])?.to_bytes()?;
let new_ptr = self.read_scalar(args[1])?.not_undef()?;
self.machine.tls.store_tls(key, new_ptr)?;
2018-07-16 11:42:46 +02:00
// Return success (1)
self.write_scalar(Scalar::from_int(1, dest.layout.size), dest)?;
2018-07-16 11:42:46 +02:00
}
// We can't execute anything else
_ => {
return err!(Unimplemented(
format!("can't call foreign function: {}", link_name),
));
}
}
self.goto_block(Some(ret))?;
self.dump_place(*dest);
Ok(())
}
fn emulate_missing_fn(
&mut self,
path: String,
_args: &[OpTy<'tcx, Borrow>],
dest: Option<PlaceTy<'tcx, Borrow>>,
ret: Option<mir::BasicBlock>,
) -> EvalResult<'tcx> {
// In some cases in non-MIR libstd-mode, not having a destination is legit. Handle these early.
match &path[..] {
"std::panicking::rust_panic_with_hook" |
2017-08-29 11:32:10 +02:00
"core::panicking::panic_fmt::::panic_impl" |
"std::rt::begin_panic_fmt" =>
return err!(MachineError("the evaluated program panicked".to_string())),
_ => {}
}
let dest = dest.ok_or_else(
// Must be some function we do not support
|| EvalErrorKind::NoMirFor(path.clone()),
)?;
match &path[..] {
// A Rust function is missing, which means we are running with MIR missing for libstd (or other dependencies).
// Still, we can make many things mostly work by "emulating" or ignoring some functions.
"std::io::_print" |
"std::io::_eprint" => {
warn!(
2018-10-19 10:09:53 +02:00
"Ignoring output. To run programs that prints, make sure you have a libstd with full MIR."
);
}
"std::thread::Builder::new" => {
return err!(Unimplemented("miri does not support threading".to_owned()))
}
"std::env::args" => {
return err!(Unimplemented(
"miri does not support program arguments".to_owned(),
))
}
"std::panicking::panicking" |
"std::rt::panicking" => {
// we abort on panic -> `std::rt::panicking` always returns false
self.write_scalar(Scalar::from_bool(false), dest)?;
}
2017-08-02 16:59:01 +02:00
_ => return err!(NoMirFor(path)),
}
self.goto_block(ret)?;
self.dump_place(*dest);
2018-07-10 17:32:38 +02:00
Ok(())
}
2017-11-21 13:32:40 +01:00
fn write_null(&mut self, dest: PlaceTy<'tcx, Borrow>) -> EvalResult<'tcx> {
2018-08-26 20:42:26 +02:00
self.write_scalar(Scalar::from_int(0, dest.layout.size), dest)
2017-11-21 13:32:40 +01:00
}
}