Merge pull request #568 from RalfJung/cleanup

Cleanup: Avoid repeating signatures, get rid of to_bytes hack
This commit is contained in:
Ralf Jung 2018-12-11 15:21:43 +01:00 committed by GitHub
commit 6eb3274121
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 402 additions and 484 deletions

View File

@ -6,29 +6,8 @@ use syntax::attr;
use crate::*;
pub trait EvalContextExt<'tcx, 'mir> {
/// Emulate calling a foreign item, fail if the item is not supported.
/// This function will handle `goto_block` if needed.
fn emulate_foreign_item(
&mut self,
def_id: DefId,
args: &[OpTy<'tcx, Borrow>],
dest: PlaceTy<'tcx, Borrow>,
ret: mir::BasicBlock,
) -> EvalResult<'tcx>;
fn find_fn(
&mut self,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, Borrow>],
dest: Option<PlaceTy<'tcx, Borrow>>,
ret: Option<mir::BasicBlock>,
) -> EvalResult<'tcx, Option<&'mir mir::Mir<'tcx>>>;
fn write_null(&mut self, dest: PlaceTy<'tcx, Borrow>) -> EvalResult<'tcx>;
}
impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalContext<'a, 'mir, 'tcx> {
impl<'a, 'mir, 'tcx> EvalContextExt<'a, 'mir, 'tcx> for crate::MiriEvalContext<'a, 'mir, 'tcx> {}
pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a, 'mir, 'tcx> {
fn find_fn(
&mut self,
instance: ty::Instance<'tcx>,
@ -36,30 +15,31 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
dest: Option<PlaceTy<'tcx, Borrow>>,
ret: Option<mir::BasicBlock>,
) -> EvalResult<'tcx, Option<&'mir mir::Mir<'tcx>>> {
let this = self.eval_context_mut();
trace!("eval_fn_call: {:#?}, {:?}", instance, dest.map(|place| *place));
// first run the common hooks also supported by CTFE
if self.hook_fn(instance, args, dest)? {
self.goto_block(ret)?;
if this.hook_fn(instance, args, dest)? {
this.goto_block(ret)?;
return Ok(None);
}
// there are some more lang items we want to hook that CTFE does not hook (yet)
if self.tcx.lang_items().align_offset_fn() == Some(instance.def.def_id()) {
if this.tcx.lang_items().align_offset_fn() == Some(instance.def.def_id()) {
// FIXME: return a real value in case the target allocation has an
// alignment bigger than the one requested
let n = u128::max_value();
let dest = dest.unwrap();
let n = self.truncate(n, dest.layout);
self.write_scalar(Scalar::from_uint(n, dest.layout.size), dest)?;
self.goto_block(ret)?;
let n = this.truncate(n, dest.layout);
this.write_scalar(Scalar::from_uint(n, dest.layout.size), dest)?;
this.goto_block(ret)?;
return Ok(None);
}
// Try to see if we can do something about foreign items
if self.tcx.is_foreign_item(instance.def_id()) {
if this.tcx.is_foreign_item(instance.def_id()) {
// An external function that we cannot find MIR for, but we can still run enough
// of them to make miri viable.
self.emulate_foreign_item(
this.emulate_foreign_item(
instance.def_id(),
args,
dest.unwrap(),
@ -70,9 +50,11 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
}
// Otherwise, load the MIR
Ok(Some(self.load_mir(instance.def)?))
Ok(Some(this.load_mir(instance.def)?))
}
/// Emulate calling a foreign item, fail if the item is not supported.
/// This function will handle `goto_block` if needed.
fn emulate_foreign_item(
&mut self,
def_id: DefId,
@ -80,15 +62,16 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
dest: PlaceTy<'tcx, Borrow>,
ret: mir::BasicBlock,
) -> EvalResult<'tcx> {
let attrs = self.tcx.get_attrs(def_id);
let this = self.eval_context_mut();
let attrs = this.tcx.get_attrs(def_id);
let link_name = match attr::first_attr_value_str_by_name(&attrs, "link_name") {
Some(name) => name.as_str(),
None => self.tcx.item_name(def_id).as_str(),
None => this.tcx.item_name(def_id).as_str(),
};
// Strip linker suffixes (seen on 32bit macOS)
let link_name = link_name.trim_end_matches("$UNIX2003");
let tcx = &{self.tcx.tcx};
let tcx = &{this.tcx.tcx};
// All these functions take raw pointers, so if we access memory directly
// (as opposed to through a place), we have to remember to erase any tag
@ -96,20 +79,20 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
match &link_name[..] {
"malloc" => {
let size = self.read_scalar(args[0])?.to_usize(self)?;
let size = this.read_scalar(args[0])?.to_usize(this)?;
if size == 0 {
self.write_null(dest)?;
this.write_null(dest)?;
} else {
let align = self.tcx.data_layout.pointer_align.abi;
let ptr = self.memory_mut().allocate(Size::from_bytes(size), align, MiriMemoryKind::C.into())?;
self.write_scalar(Scalar::Ptr(ptr.with_default_tag()), dest)?;
let align = this.tcx.data_layout.pointer_align.abi;
let ptr = this.memory_mut().allocate(Size::from_bytes(size), align, MiriMemoryKind::C.into())?;
this.write_scalar(Scalar::Ptr(ptr.with_default_tag()), dest)?;
}
}
"free" => {
let ptr = self.read_scalar(args[0])?.not_undef()?;
if !ptr.is_null_ptr(self) {
self.memory_mut().deallocate(
let ptr = this.read_scalar(args[0])?.not_undef()?;
if !ptr.is_null_ptr(this) {
this.memory_mut().deallocate(
ptr.to_ptr()?,
None,
MiriMemoryKind::C.into(),
@ -118,72 +101,72 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
}
"__rust_alloc" => {
let size = self.read_scalar(args[0])?.to_usize(self)?;
let align = self.read_scalar(args[1])?.to_usize(self)?;
let size = this.read_scalar(args[0])?.to_usize(this)?;
let align = this.read_scalar(args[1])?.to_usize(this)?;
if size == 0 {
return err!(HeapAllocZeroBytes);
}
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
let ptr = self.memory_mut()
let ptr = this.memory_mut()
.allocate(
Size::from_bytes(size),
Align::from_bytes(align).unwrap(),
MiriMemoryKind::Rust.into()
)?
.with_default_tag();
self.write_scalar(Scalar::Ptr(ptr), dest)?;
this.write_scalar(Scalar::Ptr(ptr), dest)?;
}
"__rust_alloc_zeroed" => {
let size = self.read_scalar(args[0])?.to_usize(self)?;
let align = self.read_scalar(args[1])?.to_usize(self)?;
let size = this.read_scalar(args[0])?.to_usize(this)?;
let align = this.read_scalar(args[1])?.to_usize(this)?;
if size == 0 {
return err!(HeapAllocZeroBytes);
}
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
let ptr = self.memory_mut()
let ptr = this.memory_mut()
.allocate(
Size::from_bytes(size),
Align::from_bytes(align).unwrap(),
MiriMemoryKind::Rust.into()
)?
.with_default_tag();
self.memory_mut()
this.memory_mut()
.get_mut(ptr.alloc_id)?
.write_repeat(tcx, ptr, 0, Size::from_bytes(size))?;
self.write_scalar(Scalar::Ptr(ptr), dest)?;
this.write_scalar(Scalar::Ptr(ptr), dest)?;
}
"__rust_dealloc" => {
let ptr = self.read_scalar(args[0])?.to_ptr()?;
let old_size = self.read_scalar(args[1])?.to_usize(self)?;
let align = self.read_scalar(args[2])?.to_usize(self)?;
let ptr = this.read_scalar(args[0])?.to_ptr()?;
let old_size = this.read_scalar(args[1])?.to_usize(this)?;
let align = this.read_scalar(args[2])?.to_usize(this)?;
if old_size == 0 {
return err!(HeapAllocZeroBytes);
}
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
self.memory_mut().deallocate(
this.memory_mut().deallocate(
ptr,
Some((Size::from_bytes(old_size), Align::from_bytes(align).unwrap())),
MiriMemoryKind::Rust.into(),
)?;
}
"__rust_realloc" => {
let ptr = self.read_scalar(args[0])?.to_ptr()?;
let old_size = self.read_scalar(args[1])?.to_usize(self)?;
let align = self.read_scalar(args[2])?.to_usize(self)?;
let new_size = self.read_scalar(args[3])?.to_usize(self)?;
let ptr = this.read_scalar(args[0])?.to_ptr()?;
let old_size = this.read_scalar(args[1])?.to_usize(this)?;
let align = this.read_scalar(args[2])?.to_usize(this)?;
let new_size = this.read_scalar(args[3])?.to_usize(this)?;
if old_size == 0 || new_size == 0 {
return err!(HeapAllocZeroBytes);
}
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
let new_ptr = self.memory_mut().reallocate(
let new_ptr = this.memory_mut().reallocate(
ptr,
Size::from_bytes(old_size),
Align::from_bytes(align).unwrap(),
@ -191,7 +174,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
Align::from_bytes(align).unwrap(),
MiriMemoryKind::Rust.into(),
)?;
self.write_scalar(Scalar::Ptr(new_ptr.with_default_tag()), dest)?;
this.write_scalar(Scalar::Ptr(new_ptr.with_default_tag()), dest)?;
}
"syscall" => {
@ -200,7 +183,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
//
// libc::syscall(NR_GETRANDOM, buf.as_mut_ptr(), buf.len(), GRND_NONBLOCK)
// is called if a `HashMap` is created the regular way.
match self.read_scalar(args[0])?.to_usize(self)? {
match this.read_scalar(args[0])?.to_usize(this)? {
318 | 511 => {
return err!(Unimplemented(
"miri does not support random number generators".to_owned(),
@ -215,9 +198,9 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
}
"dlsym" => {
let _handle = self.read_scalar(args[0])?;
let symbol = self.read_scalar(args[1])?.to_ptr()?;
let symbol_name = self.memory().get(symbol.alloc_id)?.read_c_str(tcx, symbol)?;
let _handle = this.read_scalar(args[0])?;
let symbol = this.read_scalar(args[1])?.to_ptr()?;
let symbol_name = this.memory().get(symbol.alloc_id)?.read_c_str(tcx, symbol)?;
let err = format!("bad c unicode symbol: {:?}", symbol_name);
let symbol_name = ::std::str::from_utf8(symbol_name).unwrap_or(&err);
return err!(Unimplemented(format!(
@ -229,24 +212,24 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
"__rust_maybe_catch_panic" => {
// fn __rust_maybe_catch_panic(f: fn(*mut u8), data: *mut u8, data_ptr: *mut usize, vtable_ptr: *mut usize) -> u32
// We abort on panic, so not much is going on here, but we still have to call the closure
let f = self.read_scalar(args[0])?.to_ptr()?;
let data = self.read_scalar(args[1])?.not_undef()?;
let f_instance = self.memory().get_fn(f)?;
self.write_null(dest)?;
let f = this.read_scalar(args[0])?.to_ptr()?;
let data = this.read_scalar(args[1])?.not_undef()?;
let f_instance = this.memory().get_fn(f)?;
this.write_null(dest)?;
trace!("__rust_maybe_catch_panic: {:?}", f_instance);
// Now we make a function call. TODO: Consider making this re-usable? EvalContext::step does sth. similar for the TLS dtors,
// and of course eval_main.
let mir = self.load_mir(f_instance.def)?;
let ret_place = MPlaceTy::dangling(self.layout_of(self.tcx.mk_unit())?, self).into();
self.push_stack_frame(
let mir = this.load_mir(f_instance.def)?;
let ret_place = MPlaceTy::dangling(this.layout_of(this.tcx.mk_unit())?, this).into();
this.push_stack_frame(
f_instance,
mir.span,
mir,
Some(ret_place),
StackPopCleanup::Goto(Some(ret)), // directly return to caller
)?;
let mut args = self.frame().mir.args_iter();
let mut args = this.frame().mir.args_iter();
let arg_local = args.next().ok_or_else(||
EvalErrorKind::AbiViolation(
@ -254,13 +237,13 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
.to_owned(),
),
)?;
let arg_dest = self.eval_place(&mir::Place::Local(arg_local))?;
self.write_scalar(data, arg_dest)?;
let arg_dest = this.eval_place(&mir::Place::Local(arg_local))?;
this.write_scalar(data, arg_dest)?;
assert!(args.next().is_none(), "__rust_maybe_catch_panic argument has more arguments than expected");
// We ourselves will return 0, eventually (because we will not return if we paniced)
self.write_null(dest)?;
this.write_null(dest)?;
// Don't fall through, we do NOT want to `goto_block`!
return Ok(());
@ -270,13 +253,13 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
return err!(MachineError("the evaluated program panicked".to_string())),
"memcmp" => {
let left = self.read_scalar(args[0])?.not_undef()?;
let right = self.read_scalar(args[1])?.not_undef()?;
let n = Size::from_bytes(self.read_scalar(args[2])?.to_usize(self)?);
let left = this.read_scalar(args[0])?.not_undef()?;
let right = this.read_scalar(args[1])?.not_undef()?;
let n = Size::from_bytes(this.read_scalar(args[2])?.to_usize(this)?);
let result = {
let left_bytes = self.memory().read_bytes(left, n)?;
let right_bytes = self.memory().read_bytes(right, n)?;
let left_bytes = this.memory().read_bytes(left, n)?;
let right_bytes = this.memory().read_bytes(right, n)?;
use std::cmp::Ordering::*;
match left_bytes.cmp(right_bytes) {
@ -286,84 +269,84 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
}
};
self.write_scalar(
this.write_scalar(
Scalar::from_int(result, Size::from_bits(32)),
dest,
)?;
}
"memrchr" => {
let ptr = self.read_scalar(args[0])?.not_undef()?;
let val = self.read_scalar(args[1])?.to_bytes()? as u8;
let num = self.read_scalar(args[2])?.to_usize(self)?;
if let Some(idx) = self.memory().read_bytes(ptr, Size::from_bytes(num))?
let ptr = this.read_scalar(args[0])?.not_undef()?;
let val = this.read_scalar(args[1])?.to_i32()? as u8;
let num = this.read_scalar(args[2])?.to_usize(this)?;
if let Some(idx) = this.memory().read_bytes(ptr, Size::from_bytes(num))?
.iter().rev().position(|&c| c == val)
{
let new_ptr = ptr.ptr_offset(Size::from_bytes(num - idx as u64 - 1), self)?;
self.write_scalar(new_ptr, dest)?;
let new_ptr = ptr.ptr_offset(Size::from_bytes(num - idx as u64 - 1), this)?;
this.write_scalar(new_ptr, dest)?;
} else {
self.write_null(dest)?;
this.write_null(dest)?;
}
}
"memchr" => {
let ptr = self.read_scalar(args[0])?.not_undef()?;
let val = self.read_scalar(args[1])?.to_bytes()? as u8;
let num = self.read_scalar(args[2])?.to_usize(self)?;
if let Some(idx) = self.memory().read_bytes(ptr, Size::from_bytes(num))?.iter().position(
let ptr = this.read_scalar(args[0])?.not_undef()?;
let val = this.read_scalar(args[1])?.to_i32()? as u8;
let num = this.read_scalar(args[2])?.to_usize(this)?;
if let Some(idx) = this.memory().read_bytes(ptr, Size::from_bytes(num))?.iter().position(
|&c| c == val,
)
{
let new_ptr = ptr.ptr_offset(Size::from_bytes(idx as u64), self)?;
self.write_scalar(new_ptr, dest)?;
let new_ptr = ptr.ptr_offset(Size::from_bytes(idx as u64), this)?;
this.write_scalar(new_ptr, dest)?;
} else {
self.write_null(dest)?;
this.write_null(dest)?;
}
}
"getenv" => {
let result = {
let name_ptr = self.read_scalar(args[0])?.to_ptr()?;
let name = self.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?;
match self.machine.env_vars.get(name) {
let name_ptr = this.read_scalar(args[0])?.to_ptr()?;
let name = this.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?;
match this.machine.env_vars.get(name) {
Some(&var) => Scalar::Ptr(var),
None => Scalar::ptr_null(&*self.tcx),
None => Scalar::ptr_null(&*this.tcx),
}
};
self.write_scalar(result, dest)?;
this.write_scalar(result, dest)?;
}
"unsetenv" => {
let mut success = None;
{
let name_ptr = self.read_scalar(args[0])?.not_undef()?;
if !name_ptr.is_null_ptr(self) {
let name_ptr = this.read_scalar(args[0])?.not_undef()?;
if !name_ptr.is_null_ptr(this) {
let name_ptr = name_ptr.to_ptr()?;
let name = self.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?.to_owned();
let name = this.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?.to_owned();
if !name.is_empty() && !name.contains(&b'=') {
success = Some(self.machine.env_vars.remove(&name));
success = Some(this.machine.env_vars.remove(&name));
}
}
}
if let Some(old) = success {
if let Some(var) = old {
self.memory_mut().deallocate(var, None, MiriMemoryKind::Env.into())?;
this.memory_mut().deallocate(var, None, MiriMemoryKind::Env.into())?;
}
self.write_null(dest)?;
this.write_null(dest)?;
} else {
self.write_scalar(Scalar::from_int(-1, dest.layout.size), dest)?;
this.write_scalar(Scalar::from_int(-1, dest.layout.size), dest)?;
}
}
"setenv" => {
let mut new = None;
{
let name_ptr = self.read_scalar(args[0])?.not_undef()?;
let value_ptr = self.read_scalar(args[1])?.to_ptr()?;
let value = self.memory().get(value_ptr.alloc_id)?.read_c_str(tcx, value_ptr)?;
if !name_ptr.is_null_ptr(self) {
let name_ptr = this.read_scalar(args[0])?.not_undef()?;
let value_ptr = this.read_scalar(args[1])?.to_ptr()?;
let value = this.memory().get(value_ptr.alloc_id)?.read_c_str(tcx, value_ptr)?;
if !name_ptr.is_null_ptr(this) {
let name_ptr = name_ptr.to_ptr()?;
let name = self.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?;
let name = this.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?;
if !name.is_empty() && !name.contains(&b'=') {
new = Some((name.to_owned(), value.to_owned()));
}
@ -371,40 +354,40 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
}
if let Some((name, value)) = new {
// +1 for the null terminator
let value_copy = self.memory_mut().allocate(
let value_copy = this.memory_mut().allocate(
Size::from_bytes((value.len() + 1) as u64),
Align::from_bytes(1).unwrap(),
MiriMemoryKind::Env.into(),
)?.with_default_tag();
{
let alloc = self.memory_mut().get_mut(value_copy.alloc_id)?;
let alloc = this.memory_mut().get_mut(value_copy.alloc_id)?;
alloc.write_bytes(tcx, value_copy, &value)?;
let trailing_zero_ptr = value_copy.offset(Size::from_bytes(value.len() as u64), tcx)?;
alloc.write_bytes(tcx, trailing_zero_ptr, &[0])?;
}
if let Some(var) = self.machine.env_vars.insert(
if let Some(var) = this.machine.env_vars.insert(
name.to_owned(),
value_copy,
)
{
self.memory_mut().deallocate(var, None, MiriMemoryKind::Env.into())?;
this.memory_mut().deallocate(var, None, MiriMemoryKind::Env.into())?;
}
self.write_null(dest)?;
this.write_null(dest)?;
} else {
self.write_scalar(Scalar::from_int(-1, dest.layout.size), dest)?;
this.write_scalar(Scalar::from_int(-1, dest.layout.size), dest)?;
}
}
"write" => {
let fd = self.read_scalar(args[0])?.to_bytes()?;
let buf = self.read_scalar(args[1])?.not_undef()?;
let n = self.read_scalar(args[2])?.to_bytes()? as u64;
let fd = this.read_scalar(args[0])?.to_i32()?;
let buf = this.read_scalar(args[1])?.not_undef()?;
let n = this.read_scalar(args[2])?.to_usize(&*this.tcx)?;
trace!("Called write({:?}, {:?}, {:?})", fd, buf, n);
let result = if fd == 1 || fd == 2 {
// stdout/stderr
use std::io::{self, Write};
let buf_cont = self.memory().read_bytes(buf, Size::from_bytes(n))?;
let buf_cont = this.memory().read_bytes(buf, Size::from_bytes(n))?;
let res = if fd == 1 {
io::stdout().write(buf_cont)
} else {
@ -418,25 +401,25 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
warn!("Ignored output to FD {}", fd);
n as i64 // pretend it all went well
}; // now result is the value we return back to the program
self.write_scalar(
this.write_scalar(
Scalar::from_int(result, dest.layout.size),
dest,
)?;
}
"strlen" => {
let ptr = self.read_scalar(args[0])?.to_ptr()?;
let n = self.memory().get(ptr.alloc_id)?.read_c_str(tcx, ptr)?.len();
self.write_scalar(Scalar::from_uint(n as u64, dest.layout.size), dest)?;
let ptr = this.read_scalar(args[0])?.to_ptr()?;
let n = this.memory().get(ptr.alloc_id)?.read_c_str(tcx, ptr)?.len();
this.write_scalar(Scalar::from_uint(n as u64, dest.layout.size), dest)?;
}
// Some things needed for sys::thread initialization to go through
"signal" | "sigaction" | "sigaltstack" => {
self.write_scalar(Scalar::from_int(0, dest.layout.size), dest)?;
this.write_scalar(Scalar::from_int(0, dest.layout.size), dest)?;
}
"sysconf" => {
let name = self.read_scalar(args[0])?.to_i32()?;
let name = this.read_scalar(args[0])?.to_i32()?;
trace!("sysconf() called with name {}", name);
// cache the sysconf integers via miri's global cache
@ -446,13 +429,13 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
];
let mut result = None;
for &(path, path_value) in paths {
if let Ok(instance) = self.resolve_path(path) {
if let Ok(instance) = this.resolve_path(path) {
let cid = GlobalId {
instance,
promoted: None,
};
let const_val = self.const_eval_raw(cid)?;
let const_val = self.read_scalar(const_val.into())?;
let const_val = this.const_eval_raw(cid)?;
let const_val = this.read_scalar(const_val.into())?;
let value = const_val.to_i32()?;
if value == name {
result = Some(path_value);
@ -461,7 +444,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
}
}
if let Some(result) = result {
self.write_scalar(result, dest)?;
this.write_scalar(result, dest)?;
} else {
return err!(Unimplemented(
format!("Unimplemented sysconf name: {}", name),
@ -471,13 +454,13 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
// Hook pthread calls that go to the thread-local storage memory subsystem
"pthread_key_create" => {
let key_ptr = self.read_scalar(args[0])?.to_ptr()?;
let key_ptr = this.read_scalar(args[0])?.to_ptr()?;
// Extract the function type out of the signature (that seems easier than constructing it ourselves...)
let dtor = match self.read_scalar(args[1])?.not_undef()? {
Scalar::Ptr(dtor_ptr) => Some(self.memory().get_fn(dtor_ptr)?),
let dtor = match this.read_scalar(args[1])?.not_undef()? {
Scalar::Ptr(dtor_ptr) => Some(this.memory().get_fn(dtor_ptr)?),
Scalar::Bits { bits: 0, size } => {
assert_eq!(size as u64, self.memory().pointer_size().bytes());
assert_eq!(size as u64, this.memory().pointer_size().bytes());
None
},
Scalar::Bits { .. } => return err!(ReadBytesAsPointer),
@ -486,16 +469,16 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
// Figure out how large a pthread TLS key actually is. This is libc::pthread_key_t.
let key_type = args[0].layout.ty.builtin_deref(true)
.ok_or_else(|| EvalErrorKind::AbiViolation("Wrong signature used for pthread_key_create: First argument must be a raw pointer.".to_owned()))?.ty;
let key_layout = self.layout_of(key_type)?;
let key_layout = this.layout_of(key_type)?;
// Create key and write it into the memory where key_ptr wants it
let key = self.machine.tls.create_tls_key(dtor, tcx) as u128;
let key = this.machine.tls.create_tls_key(dtor, tcx) as u128;
if key_layout.size.bits() < 128 && key >= (1u128 << key_layout.size.bits() as u128) {
return err!(OutOfTls);
}
self.memory().check_align(key_ptr.into(), key_layout.align.abi)?;
self.memory_mut().get_mut(key_ptr.alloc_id)?.write_scalar(
this.memory().check_align(key_ptr.into(), key_layout.align.abi)?;
this.memory_mut().get_mut(key_ptr.alloc_id)?.write_scalar(
tcx,
key_ptr,
Scalar::from_uint(key, key_layout.size).into(),
@ -503,26 +486,26 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
)?;
// Return success (0)
self.write_null(dest)?;
this.write_null(dest)?;
}
"pthread_key_delete" => {
let key = self.read_scalar(args[0])?.to_bytes()?;
self.machine.tls.delete_tls_key(key)?;
let key = this.read_scalar(args[0])?.to_bits(args[0].layout.size)?;
this.machine.tls.delete_tls_key(key)?;
// Return success (0)
self.write_null(dest)?;
this.write_null(dest)?;
}
"pthread_getspecific" => {
let key = self.read_scalar(args[0])?.to_bytes()?;
let ptr = self.machine.tls.load_tls(key)?;
self.write_scalar(ptr, dest)?;
let key = this.read_scalar(args[0])?.to_bits(args[0].layout.size)?;
let ptr = this.machine.tls.load_tls(key)?;
this.write_scalar(ptr, dest)?;
}
"pthread_setspecific" => {
let key = self.read_scalar(args[0])?.to_bytes()?;
let new_ptr = self.read_scalar(args[1])?.not_undef()?;
self.machine.tls.store_tls(key, new_ptr)?;
let key = this.read_scalar(args[0])?.to_bits(args[0].layout.size)?;
let new_ptr = this.read_scalar(args[1])?.not_undef()?;
this.machine.tls.store_tls(key, new_ptr)?;
// Return success (0)
self.write_null(dest)?;
this.write_null(dest)?;
}
"_tlv_atexit" => {
@ -532,19 +515,19 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
// Determining stack base address
"pthread_attr_init" | "pthread_attr_destroy" | "pthread_attr_get_np" |
"pthread_getattr_np" | "pthread_self" | "pthread_get_stacksize_np" => {
self.write_null(dest)?;
this.write_null(dest)?;
}
"pthread_attr_getstack" => {
// second argument is where we are supposed to write the stack size
let ptr = self.deref_operand(args[1])?;
let ptr = this.deref_operand(args[1])?;
let stackaddr = Scalar::from_int(0x80000, args[1].layout.size); // just any address
self.write_scalar(stackaddr, ptr.into())?;
this.write_scalar(stackaddr, ptr.into())?;
// return 0
self.write_null(dest)?;
this.write_null(dest)?;
}
"pthread_get_stackaddr_np" => {
let stackaddr = Scalar::from_int(0x80000, dest.layout.size); // just any address
self.write_scalar(stackaddr, dest)?;
this.write_scalar(stackaddr, dest)?;
}
// Stub out calls for condvar, mutex and rwlock to just return 0
@ -554,22 +537,22 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
"pthread_rwlock_wrlock" | "pthread_rwlock_destroy" | "pthread_condattr_init" |
"pthread_condattr_setclock" | "pthread_cond_init" | "pthread_condattr_destroy" |
"pthread_cond_destroy" => {
self.write_null(dest)?;
this.write_null(dest)?;
}
"mmap" => {
// This is a horrible hack, but well... the guard page mechanism calls mmap and expects a particular return value, so we give it that value
let addr = self.read_scalar(args[0])?.not_undef()?;
self.write_scalar(addr, dest)?;
let addr = this.read_scalar(args[0])?.not_undef()?;
this.write_scalar(addr, dest)?;
}
"mprotect" => {
self.write_null(dest)?;
this.write_null(dest)?;
}
// Windows API subs
"AddVectoredExceptionHandler" => {
// any non zero value works for the stdlib. This is just used for stackoverflows anyway
self.write_scalar(Scalar::from_int(1, dest.layout.size), dest)?;
this.write_scalar(Scalar::from_int(1, dest.layout.size), dest)?;
},
"InitializeCriticalSection" |
"EnterCriticalSection" |
@ -582,11 +565,11 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
"GetProcAddress" |
"TryEnterCriticalSection" => {
// pretend these do not exist/nothing happened, by returning zero
self.write_null(dest)?;
this.write_null(dest)?;
},
"GetLastError" => {
// this is c::ERROR_CALL_NOT_IMPLEMENTED
self.write_scalar(Scalar::from_int(120, dest.layout.size), dest)?;
this.write_scalar(Scalar::from_int(120, dest.layout.size), dest)?;
},
// Windows TLS
@ -594,26 +577,26 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
// This just creates a key; Windows does not natively support TLS dtors.
// Create key and return it
let key = self.machine.tls.create_tls_key(None, tcx) as u128;
let key = this.machine.tls.create_tls_key(None, tcx) as u128;
// Figure out how large a TLS key actually is. This is c::DWORD.
if dest.layout.size.bits() < 128 && key >= (1u128 << dest.layout.size.bits() as u128) {
return err!(OutOfTls);
}
self.write_scalar(Scalar::from_uint(key, dest.layout.size), dest)?;
this.write_scalar(Scalar::from_uint(key, dest.layout.size), dest)?;
}
"TlsGetValue" => {
let key = self.read_scalar(args[0])?.to_bytes()?;
let ptr = self.machine.tls.load_tls(key)?;
self.write_scalar(ptr, dest)?;
let key = this.read_scalar(args[0])?.to_bits(args[0].layout.size)?;
let ptr = this.machine.tls.load_tls(key)?;
this.write_scalar(ptr, dest)?;
}
"TlsSetValue" => {
let key = self.read_scalar(args[0])?.to_bytes()?;
let new_ptr = self.read_scalar(args[1])?.not_undef()?;
self.machine.tls.store_tls(key, new_ptr)?;
let key = this.read_scalar(args[0])?.to_bits(args[0].layout.size)?;
let new_ptr = this.read_scalar(args[1])?.not_undef()?;
this.machine.tls.store_tls(key, new_ptr)?;
// Return success (1)
self.write_scalar(Scalar::from_int(1, dest.layout.size), dest)?;
this.write_scalar(Scalar::from_int(1, dest.layout.size), dest)?;
}
// We can't execute anything else
@ -624,12 +607,12 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
}
}
self.goto_block(Some(ret))?;
self.dump_place(*dest);
this.goto_block(Some(ret))?;
this.dump_place(*dest);
Ok(())
}
fn write_null(&mut self, dest: PlaceTy<'tcx, Borrow>) -> EvalResult<'tcx> {
self.write_scalar(Scalar::from_int(0, dest.layout.size), dest)
self.eval_context_mut().write_scalar(Scalar::from_int(0, dest.layout.size), dest)
}
}

View File

@ -5,68 +5,31 @@ use rustc::hir::def_id::{DefId, CRATE_DEF_INDEX};
use crate::*;
pub trait ScalarExt {
/// HACK: this function just extracts all bits if `defined != 0`
/// Mainly used for args of C-functions and we should totally correctly fetch the size
/// of their arguments
fn to_bytes(self) -> EvalResult<'static, u128>;
}
impl<Tag> ScalarExt for Scalar<Tag> {
fn to_bytes(self) -> EvalResult<'static, u128> {
match self {
Scalar::Bits { bits, size } => {
assert_ne!(size, 0);
Ok(bits)
},
Scalar::Ptr(_) => err!(ReadPointerAsBytes),
}
}
}
impl<Tag> ScalarExt for ScalarMaybeUndef<Tag> {
fn to_bytes(self) -> EvalResult<'static, u128> {
self.not_undef()?.to_bytes()
}
}
pub trait EvalContextExt<'tcx> {
fn resolve_path(&self, path: &[&str]) -> EvalResult<'tcx, ty::Instance<'tcx>>;
/// Visit the memory covered by `place`, sensitive to freezing: The 3rd parameter
/// will be true if this is frozen, false if this is in an `UnsafeCell`.
fn visit_freeze_sensitive(
&self,
place: MPlaceTy<'tcx, Borrow>,
size: Size,
action: impl FnMut(Pointer<Borrow>, Size, bool) -> EvalResult<'tcx>,
) -> EvalResult<'tcx>;
}
impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super::Evaluator<'tcx>> {
impl<'a, 'mir, 'tcx> EvalContextExt<'a, 'mir, 'tcx> for crate::MiriEvalContext<'a, 'mir, 'tcx> {}
pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a, 'mir, 'tcx> {
/// Get an instance for a path.
fn resolve_path(&self, path: &[&str]) -> EvalResult<'tcx, ty::Instance<'tcx>> {
self.tcx
let this = self.eval_context_ref();
this.tcx
.crates()
.iter()
.find(|&&krate| self.tcx.original_crate_name(krate) == path[0])
.find(|&&krate| this.tcx.original_crate_name(krate) == path[0])
.and_then(|krate| {
let krate = DefId {
krate: *krate,
index: CRATE_DEF_INDEX,
};
let mut items = self.tcx.item_children(krate);
let mut items = this.tcx.item_children(krate);
let mut path_it = path.iter().skip(1).peekable();
while let Some(segment) = path_it.next() {
for item in mem::replace(&mut items, Default::default()).iter() {
if item.ident.name == *segment {
if path_it.peek().is_none() {
return Some(ty::Instance::mono(self.tcx.tcx, item.def.def_id()));
return Some(ty::Instance::mono(this.tcx.tcx, item.def.def_id()));
}
items = self.tcx.item_children(item.def.def_id());
items = this.tcx.item_children(item.def.def_id());
break;
}
}
@ -79,15 +42,18 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
})
}
/// Visit the memory covered by `place`, sensitive to freezing: The 3rd parameter
/// will be true if this is frozen, false if this is in an `UnsafeCell`.
fn visit_freeze_sensitive(
&self,
place: MPlaceTy<'tcx, Borrow>,
size: Size,
mut action: impl FnMut(Pointer<Borrow>, Size, bool) -> EvalResult<'tcx>,
) -> EvalResult<'tcx> {
let this = self.eval_context_ref();
trace!("visit_frozen(place={:?}, size={:?})", *place, size);
debug_assert_eq!(size,
self.size_and_align_of_mplace(place)?
this.size_and_align_of_mplace(place)?
.map(|(size, _)| size)
.unwrap_or_else(|| place.layout.size)
);
@ -106,8 +72,8 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
}
// We assume that we are given the fields in increasing offset order,
// and nothing else changes.
let unsafe_cell_offset = unsafe_cell_ptr.get_ptr_offset(self);
let end_offset = end_ptr.get_ptr_offset(self);
let unsafe_cell_offset = unsafe_cell_ptr.get_ptr_offset(this);
let end_offset = end_ptr.get_ptr_offset(this);
assert!(unsafe_cell_offset >= end_offset);
let frozen_size = unsafe_cell_offset - end_offset;
// Everything between the end_ptr and this `UnsafeCell` is frozen.
@ -119,18 +85,18 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
action(unsafe_cell_ptr.to_ptr()?, unsafe_cell_size, /*frozen*/false)?;
}
// Update end end_ptr.
end_ptr = unsafe_cell_ptr.ptr_wrapping_offset(unsafe_cell_size, self);
end_ptr = unsafe_cell_ptr.ptr_wrapping_offset(unsafe_cell_size, this);
// Done
Ok(())
};
// Run a visitor
{
let mut visitor = UnsafeCellVisitor {
ecx: self,
ecx: this,
unsafe_cell_action: |place| {
trace!("unsafe_cell_action on {:?}", place.ptr);
// We need a size to go on.
let unsafe_cell_size = self.size_and_align_of_mplace(place)?
let unsafe_cell_size = this.size_and_align_of_mplace(place)?
.map(|(size, _)| size)
// for extern types, just cover what we can
.unwrap_or_else(|| place.layout.size);
@ -146,7 +112,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
}
// The part between the end_ptr and the end of the place is also frozen.
// So pretend there is a 0-sized `UnsafeCell` at the end.
unsafe_cell_action(place.ptr.ptr_wrapping_offset(size, self), Size::ZERO)?;
unsafe_cell_action(place.ptr.ptr_wrapping_offset(size, this), Size::ZERO)?;
// Done!
return Ok(());

View File

@ -6,50 +6,43 @@ use rustc::mir::interpret::{EvalResult, PointerArithmetic};
use crate::{
PlaceTy, OpTy, Immediate, Scalar, ScalarMaybeUndef, Borrow,
ScalarExt, OperatorEvalContextExt
OperatorEvalContextExt
};
pub trait EvalContextExt<'tcx> {
fn call_intrinsic(
&mut self,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, Borrow>],
dest: PlaceTy<'tcx, Borrow>,
) -> EvalResult<'tcx>;
}
impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, 'tcx> {
impl<'a, 'mir, 'tcx> EvalContextExt<'a, 'mir, 'tcx> for crate::MiriEvalContext<'a, 'mir, 'tcx> {}
pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a, 'mir, 'tcx> {
fn call_intrinsic(
&mut self,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, Borrow>],
dest: PlaceTy<'tcx, Borrow>,
) -> EvalResult<'tcx> {
if self.emulate_intrinsic(instance, args, dest)? {
let this = self.eval_context_mut();
if this.emulate_intrinsic(instance, args, dest)? {
return Ok(());
}
let tcx = &{self.tcx.tcx};
let tcx = &{this.tcx.tcx};
let substs = instance.substs;
// All these intrinsics take raw pointers, so if we access memory directly
// (as opposed to through a place), we have to remember to erase any tag
// that might still hang around!
let intrinsic_name = &self.tcx.item_name(instance.def_id()).as_str()[..];
let intrinsic_name = &this.tcx.item_name(instance.def_id()).as_str()[..];
match intrinsic_name {
"arith_offset" => {
let offset = self.read_scalar(args[1])?.to_isize(self)?;
let ptr = self.read_scalar(args[0])?.not_undef()?;
let offset = this.read_scalar(args[1])?.to_isize(this)?;
let ptr = this.read_scalar(args[0])?.not_undef()?;
let pointee_ty = substs.type_at(0);
let pointee_size = self.layout_of(pointee_ty)?.size.bytes() as i64;
let pointee_size = this.layout_of(pointee_ty)?.size.bytes() as i64;
let offset = offset.overflowing_mul(pointee_size).0;
let result_ptr = ptr.ptr_wrapping_signed_offset(offset, self);
self.write_scalar(result_ptr, dest)?;
let result_ptr = ptr.ptr_wrapping_signed_offset(offset, this);
this.write_scalar(result_ptr, dest)?;
}
"assume" => {
let cond = self.read_scalar(args[0])?.to_bool()?;
let cond = this.read_scalar(args[0])?.to_bool()?;
if !cond {
return err!(AssumptionNotHeld);
}
@ -59,18 +52,18 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
"atomic_load_relaxed" |
"atomic_load_acq" |
"volatile_load" => {
let ptr = self.deref_operand(args[0])?;
let val = self.read_scalar(ptr.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
self.write_scalar(val, dest)?;
let ptr = this.deref_operand(args[0])?;
let val = this.read_scalar(ptr.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
this.write_scalar(val, dest)?;
}
"atomic_store" |
"atomic_store_relaxed" |
"atomic_store_rel" |
"volatile_store" => {
let ptr = self.deref_operand(args[0])?;
let val = self.read_scalar(args[1])?; // make sure it fits into a scalar; otherwise it cannot be atomic
self.write_scalar(val, ptr.into())?;
let ptr = this.deref_operand(args[0])?;
let val = this.read_scalar(args[1])?; // make sure it fits into a scalar; otherwise it cannot be atomic
this.write_scalar(val, ptr.into())?;
}
"atomic_fence_acq" => {
@ -78,25 +71,25 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
}
_ if intrinsic_name.starts_with("atomic_xchg") => {
let ptr = self.deref_operand(args[0])?;
let new = self.read_scalar(args[1])?;
let old = self.read_scalar(ptr.into())?;
self.write_scalar(old, dest)?; // old value is returned
self.write_scalar(new, ptr.into())?;
let ptr = this.deref_operand(args[0])?;
let new = this.read_scalar(args[1])?;
let old = this.read_scalar(ptr.into())?;
this.write_scalar(old, dest)?; // old value is returned
this.write_scalar(new, ptr.into())?;
}
_ if intrinsic_name.starts_with("atomic_cxchg") => {
let ptr = self.deref_operand(args[0])?;
let expect_old = self.read_immediate(args[1])?; // read as immediate for the sake of `binary_op_imm()`
let new = self.read_scalar(args[2])?;
let old = self.read_immediate(ptr.into())?; // read as immediate for the sake of `binary_op_imm()`
let ptr = this.deref_operand(args[0])?;
let expect_old = this.read_immediate(args[1])?; // read as immediate for the sake of `binary_op_imm()`
let new = this.read_scalar(args[2])?;
let old = this.read_immediate(ptr.into())?; // read as immediate for the sake of `binary_op_imm()`
// binary_op_imm will bail if either of them is not a scalar
let (eq, _) = self.binary_op_imm(mir::BinOp::Eq, old, expect_old)?;
let (eq, _) = this.binary_op_imm(mir::BinOp::Eq, old, expect_old)?;
let res = Immediate::ScalarPair(old.to_scalar_or_undef(), eq.into());
self.write_immediate(res, dest)?; // old value is returned
this.write_immediate(res, dest)?; // old value is returned
// update ptr depending on comparison
if eq.to_bool()? {
self.write_scalar(new, ptr.into())?;
this.write_scalar(new, ptr.into())?;
}
}
@ -125,13 +118,13 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
"atomic_xsub_rel" |
"atomic_xsub_acqrel" |
"atomic_xsub_relaxed" => {
let ptr = self.deref_operand(args[0])?;
let ptr = this.deref_operand(args[0])?;
if !ptr.layout.ty.is_integral() {
return err!(Unimplemented(format!("Atomic arithmetic operations only work on integer types")));
}
let rhs = self.read_immediate(args[1])?;
let old = self.read_immediate(ptr.into())?;
self.write_immediate(*old, dest)?; // old value is returned
let rhs = this.read_immediate(args[1])?;
let old = this.read_immediate(ptr.into())?;
this.write_immediate(*old, dest)?; // old value is returned
let op = match intrinsic_name.split('_').nth(1).unwrap() {
"or" => mir::BinOp::BitOr,
"xor" => mir::BinOp::BitXor,
@ -141,7 +134,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
_ => bug!(),
};
// Atomics wrap around on overflow.
self.binop_ignore_overflow(op, old, rhs, ptr.into())?;
this.binop_ignore_overflow(op, old, rhs, ptr.into())?;
}
"breakpoint" => unimplemented!(), // halt miri
@ -149,14 +142,14 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
"copy" |
"copy_nonoverlapping" => {
let elem_ty = substs.type_at(0);
let elem_layout = self.layout_of(elem_ty)?;
let elem_layout = this.layout_of(elem_ty)?;
let elem_size = elem_layout.size.bytes();
let count = self.read_scalar(args[2])?.to_usize(self)?;
let count = this.read_scalar(args[2])?.to_usize(this)?;
let elem_align = elem_layout.align.abi;
// erase tags: this is a raw ptr operation
let src = self.read_scalar(args[0])?.not_undef()?;
let dest = self.read_scalar(args[1])?.not_undef()?;
self.memory_mut().copy(
let src = this.read_scalar(args[0])?.not_undef()?;
let dest = this.read_scalar(args[1])?.not_undef()?;
this.memory_mut().copy(
src,
elem_align,
dest,
@ -167,14 +160,14 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
}
"discriminant_value" => {
let place = self.deref_operand(args[0])?;
let discr_val = self.read_discriminant(place.into())?.0;
self.write_scalar(Scalar::from_uint(discr_val, dest.layout.size), dest)?;
let place = this.deref_operand(args[0])?;
let discr_val = this.read_discriminant(place.into())?.0;
this.write_scalar(Scalar::from_uint(discr_val, dest.layout.size), dest)?;
}
"sinf32" | "fabsf32" | "cosf32" | "sqrtf32" | "expf32" | "exp2f32" | "logf32" |
"log10f32" | "log2f32" | "floorf32" | "ceilf32" | "truncf32" => {
let f = self.read_scalar(args[0])?.to_f32()?;
let f = this.read_scalar(args[0])?.to_f32()?;
let f = match intrinsic_name {
"sinf32" => f.sin(),
"fabsf32" => f.abs(),
@ -190,12 +183,12 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
"truncf32" => f.trunc(),
_ => bug!(),
};
self.write_scalar(Scalar::from_f32(f), dest)?;
this.write_scalar(Scalar::from_f32(f), dest)?;
}
"sinf64" | "fabsf64" | "cosf64" | "sqrtf64" | "expf64" | "exp2f64" | "logf64" |
"log10f64" | "log2f64" | "floorf64" | "ceilf64" | "truncf64" => {
let f = self.read_scalar(args[0])?.to_f64()?;
let f = this.read_scalar(args[0])?.to_f64()?;
let f = match intrinsic_name {
"sinf64" => f.sin(),
"fabsf64" => f.abs(),
@ -211,12 +204,12 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
"truncf64" => f.trunc(),
_ => bug!(),
};
self.write_scalar(Scalar::from_f64(f), dest)?;
this.write_scalar(Scalar::from_f64(f), dest)?;
}
"fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
let a = self.read_immediate(args[0])?;
let b = self.read_immediate(args[1])?;
let a = this.read_immediate(args[0])?;
let b = this.read_immediate(args[1])?;
let op = match intrinsic_name {
"fadd_fast" => mir::BinOp::Add,
"fsub_fast" => mir::BinOp::Sub,
@ -225,19 +218,19 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
"frem_fast" => mir::BinOp::Rem,
_ => bug!(),
};
self.binop_ignore_overflow(op, a, b, dest)?;
this.binop_ignore_overflow(op, a, b, dest)?;
}
"exact_div" => {
// Performs an exact division, resulting in undefined behavior where
// `x % y != 0` or `y == 0` or `x == T::min_value() && y == -1`
let a = self.read_immediate(args[0])?;
let b = self.read_immediate(args[1])?;
let a = this.read_immediate(args[0])?;
let b = this.read_immediate(args[1])?;
// check x % y != 0
if self.binary_op_imm(mir::BinOp::Rem, a, b)?.0.to_bytes()? != 0 {
if this.binary_op_imm(mir::BinOp::Rem, a, b)?.0.to_bits(dest.layout.size)? != 0 {
return err!(ValidationFailure(format!("exact_div: {:?} cannot be divided by {:?}", a, b)));
}
self.binop_ignore_overflow(mir::BinOp::Div, a, b, dest)?;
this.binop_ignore_overflow(mir::BinOp::Div, a, b, dest)?;
},
"likely" | "unlikely" | "forget" => {}
@ -252,21 +245,21 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
if !dest.layout.is_zst() { // nothing to do for ZST
match dest.layout.abi {
layout::Abi::Scalar(ref s) => {
let x = Scalar::from_int(0, s.value.size(self));
self.write_immediate(Immediate::Scalar(x.into()), dest)?;
let x = Scalar::from_int(0, s.value.size(this));
this.write_immediate(Immediate::Scalar(x.into()), dest)?;
}
layout::Abi::ScalarPair(ref s1, ref s2) => {
let x = Scalar::from_int(0, s1.value.size(self));
let y = Scalar::from_int(0, s2.value.size(self));
self.write_immediate(Immediate::ScalarPair(x.into(), y.into()), dest)?;
let x = Scalar::from_int(0, s1.value.size(this));
let y = Scalar::from_int(0, s2.value.size(this));
this.write_immediate(Immediate::ScalarPair(x.into(), y.into()), dest)?;
}
_ => {
// Do it in memory
let mplace = self.force_allocation(dest)?;
let mplace = this.force_allocation(dest)?;
assert!(mplace.meta.is_none());
// not a zst, must be valid pointer
let ptr = mplace.ptr.to_ptr()?;
self.memory_mut().get_mut(ptr.alloc_id)?.write_repeat(tcx, ptr, 0, dest.layout.size)?;
this.memory_mut().get_mut(ptr.alloc_id)?.write_repeat(tcx, ptr, 0, dest.layout.size)?;
}
}
}
@ -274,87 +267,87 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
"pref_align_of" => {
let ty = substs.type_at(0);
let layout = self.layout_of(ty)?;
let layout = this.layout_of(ty)?;
let align = layout.align.pref.bytes();
let ptr_size = self.pointer_size();
let ptr_size = this.pointer_size();
let align_val = Scalar::from_uint(align as u128, ptr_size);
self.write_scalar(align_val, dest)?;
this.write_scalar(align_val, dest)?;
}
"move_val_init" => {
let ptr = self.deref_operand(args[0])?;
self.copy_op(args[1], ptr.into())?;
let ptr = this.deref_operand(args[0])?;
this.copy_op(args[1], ptr.into())?;
}
"offset" => {
let offset = self.read_scalar(args[1])?.to_isize(self)?;
let ptr = self.read_scalar(args[0])?.not_undef()?;
let result_ptr = self.pointer_offset_inbounds(ptr, substs.type_at(0), offset)?;
self.write_scalar(result_ptr, dest)?;
let offset = this.read_scalar(args[1])?.to_isize(this)?;
let ptr = this.read_scalar(args[0])?.not_undef()?;
let result_ptr = this.pointer_offset_inbounds(ptr, substs.type_at(0), offset)?;
this.write_scalar(result_ptr, dest)?;
}
"powf32" => {
let f = self.read_scalar(args[0])?.to_f32()?;
let f2 = self.read_scalar(args[1])?.to_f32()?;
self.write_scalar(
let f = this.read_scalar(args[0])?.to_f32()?;
let f2 = this.read_scalar(args[1])?.to_f32()?;
this.write_scalar(
Scalar::from_f32(f.powf(f2)),
dest,
)?;
}
"powf64" => {
let f = self.read_scalar(args[0])?.to_f64()?;
let f2 = self.read_scalar(args[1])?.to_f64()?;
self.write_scalar(
let f = this.read_scalar(args[0])?.to_f64()?;
let f2 = this.read_scalar(args[1])?.to_f64()?;
this.write_scalar(
Scalar::from_f64(f.powf(f2)),
dest,
)?;
}
"fmaf32" => {
let a = self.read_scalar(args[0])?.to_f32()?;
let b = self.read_scalar(args[1])?.to_f32()?;
let c = self.read_scalar(args[2])?.to_f32()?;
self.write_scalar(
let a = this.read_scalar(args[0])?.to_f32()?;
let b = this.read_scalar(args[1])?.to_f32()?;
let c = this.read_scalar(args[2])?.to_f32()?;
this.write_scalar(
Scalar::from_f32(a * b + c),
dest,
)?;
}
"fmaf64" => {
let a = self.read_scalar(args[0])?.to_f64()?;
let b = self.read_scalar(args[1])?.to_f64()?;
let c = self.read_scalar(args[2])?.to_f64()?;
self.write_scalar(
let a = this.read_scalar(args[0])?.to_f64()?;
let b = this.read_scalar(args[1])?.to_f64()?;
let c = this.read_scalar(args[2])?.to_f64()?;
this.write_scalar(
Scalar::from_f64(a * b + c),
dest,
)?;
}
"powif32" => {
let f = self.read_scalar(args[0])?.to_f32()?;
let i = self.read_scalar(args[1])?.to_i32()?;
self.write_scalar(
let f = this.read_scalar(args[0])?.to_f32()?;
let i = this.read_scalar(args[1])?.to_i32()?;
this.write_scalar(
Scalar::from_f32(f.powi(i)),
dest,
)?;
}
"powif64" => {
let f = self.read_scalar(args[0])?.to_f64()?;
let i = self.read_scalar(args[1])?.to_i32()?;
self.write_scalar(
let f = this.read_scalar(args[0])?.to_f64()?;
let i = this.read_scalar(args[1])?.to_i32()?;
this.write_scalar(
Scalar::from_f64(f.powi(i)),
dest,
)?;
}
"size_of_val" => {
let mplace = self.deref_operand(args[0])?;
let (size, _) = self.size_and_align_of_mplace(mplace)?
let mplace = this.deref_operand(args[0])?;
let (size, _) = this.size_and_align_of_mplace(mplace)?
.expect("size_of_val called on extern type");
let ptr_size = self.pointer_size();
self.write_scalar(
let ptr_size = this.pointer_size();
this.write_scalar(
Scalar::from_uint(size.bytes() as u128, ptr_size),
dest,
)?;
@ -362,11 +355,11 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
"min_align_of_val" |
"align_of_val" => {
let mplace = self.deref_operand(args[0])?;
let (_, align) = self.size_and_align_of_mplace(mplace)?
let mplace = this.deref_operand(args[0])?;
let (_, align) = this.size_and_align_of_mplace(mplace)?
.expect("size_of_val called on extern type");
let ptr_size = self.pointer_size();
self.write_scalar(
let ptr_size = this.pointer_size();
this.write_scalar(
Scalar::from_uint(align.bytes(), ptr_size),
dest,
)?;
@ -375,18 +368,18 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
"type_name" => {
let ty = substs.type_at(0);
let ty_name = ty.to_string();
let value = self.str_to_immediate(&ty_name)?;
self.write_immediate(value, dest)?;
let value = this.str_to_immediate(&ty_name)?;
this.write_immediate(value, dest)?;
}
"unchecked_div" => {
let l = self.read_immediate(args[0])?;
let r = self.read_immediate(args[1])?;
let rval = r.to_scalar()?.to_bytes()?;
let l = this.read_immediate(args[0])?;
let r = this.read_immediate(args[1])?;
let rval = r.to_scalar()?.to_bits(args[1].layout.size)?;
if rval == 0 {
return err!(Intrinsic(format!("Division by 0 in unchecked_div")));
}
self.binop_ignore_overflow(
this.binop_ignore_overflow(
mir::BinOp::Div,
l,
r,
@ -395,13 +388,13 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
}
"unchecked_rem" => {
let l = self.read_immediate(args[0])?;
let r = self.read_immediate(args[1])?;
let rval = r.to_scalar()?.to_bytes()?;
let l = this.read_immediate(args[0])?;
let r = this.read_immediate(args[1])?;
let rval = r.to_scalar()?.to_bits(args[1].layout.size)?;
if rval == 0 {
return err!(Intrinsic(format!("Division by 0 in unchecked_rem")));
}
self.binop_ignore_overflow(
this.binop_ignore_overflow(
mir::BinOp::Rem,
l,
r,
@ -420,18 +413,18 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
match dest.layout.abi {
layout::Abi::Scalar(..) => {
let x = ScalarMaybeUndef::Undef;
self.write_immediate(Immediate::Scalar(x), dest)?;
this.write_immediate(Immediate::Scalar(x), dest)?;
}
layout::Abi::ScalarPair(..) => {
let x = ScalarMaybeUndef::Undef;
self.write_immediate(Immediate::ScalarPair(x, x), dest)?;
this.write_immediate(Immediate::ScalarPair(x, x), dest)?;
}
_ => {
// Do it in memory
let mplace = self.force_allocation(dest)?;
let mplace = this.force_allocation(dest)?;
assert!(mplace.meta.is_none());
let ptr = mplace.ptr.to_ptr()?;
self.memory_mut()
this.memory_mut()
.get_mut(ptr.alloc_id)?
.mark_definedness(ptr, dest.layout.size, false)?;
}
@ -441,15 +434,15 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
"write_bytes" => {
let ty = substs.type_at(0);
let ty_layout = self.layout_of(ty)?;
let val_byte = self.read_scalar(args[1])?.to_u8()?;
let ptr = self.read_scalar(args[0])?.not_undef()?;
let count = self.read_scalar(args[2])?.to_usize(self)?;
self.memory().check_align(ptr, ty_layout.align.abi)?;
let ty_layout = this.layout_of(ty)?;
let val_byte = this.read_scalar(args[1])?.to_u8()?;
let ptr = this.read_scalar(args[0])?.not_undef()?;
let count = this.read_scalar(args[2])?.to_usize(this)?;
this.memory().check_align(ptr, ty_layout.align.abi)?;
let byte_count = ty_layout.size * count;
if byte_count.bytes() != 0 {
let ptr = ptr.to_ptr()?;
self.memory_mut()
this.memory_mut()
.get_mut(ptr.alloc_id)?
.write_repeat(tcx, ptr, val_byte, byte_count)?;
}

View File

@ -43,7 +43,7 @@ pub use crate::intrinsic::EvalContextExt as IntrinsicEvalContextExt;
pub use crate::tls::{EvalContextExt as TlsEvalContextExt, TlsData};
use crate::range_map::RangeMap;
#[allow(unused_imports)] // FIXME rustc bug https://github.com/rust-lang/rust/issues/53682
pub use crate::helpers::{ScalarExt, EvalContextExt as HelpersEvalContextExt};
pub use crate::helpers::{EvalContextExt as HelpersEvalContextExt};
use crate::mono_hash_map::MonoHashMap;
pub use crate::stacked_borrows::{EvalContextExt as StackedBorEvalContextExt};
@ -277,6 +277,21 @@ impl<'tcx> Evaluator<'tcx> {
#[allow(dead_code)] // FIXME https://github.com/rust-lang/rust/issues/47131
type MiriEvalContext<'a, 'mir, 'tcx> = EvalContext<'a, 'mir, 'tcx, Evaluator<'tcx>>;
// A little trait that's useful to be inherited by extension traits
pub trait MiriEvalContextExt<'a, 'mir, 'tcx> {
fn eval_context_ref(&self) -> &MiriEvalContext<'a, 'mir, 'tcx>;
fn eval_context_mut(&mut self) -> &mut MiriEvalContext<'a, 'mir, 'tcx>;
}
impl<'a, 'mir, 'tcx> MiriEvalContextExt<'a, 'mir, 'tcx> for MiriEvalContext<'a, 'mir, 'tcx> {
#[inline(always)]
fn eval_context_ref(&self) -> &MiriEvalContext<'a, 'mir, 'tcx> {
self
}
#[inline(always)]
fn eval_context_mut(&mut self) -> &mut MiriEvalContext<'a, 'mir, 'tcx> {
self
}
}
impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
type MemoryKinds = MiriMemoryKind;

View File

@ -516,60 +516,90 @@ impl<'tcx> Stacks {
}
}
pub trait EvalContextExt<'tcx> {
fn ptr_dereference(
&self,
place: MPlaceTy<'tcx, Borrow>,
size: Size,
mutability: Option<Mutability>,
) -> EvalResult<'tcx>;
fn tag_new_allocation(
&mut self,
id: AllocId,
kind: MemoryKind<MiriMemoryKind>,
) -> Borrow;
/// Reborrow the given place, returning the newly tagged ptr to it.
impl<'a, 'mir, 'tcx> EvalContextPrivExt<'a, 'mir, 'tcx> for crate::MiriEvalContext<'a, 'mir, 'tcx> {}
trait EvalContextPrivExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a, 'mir, 'tcx> {
fn reborrow(
&mut self,
place: MPlaceTy<'tcx, Borrow>,
size: Size,
fn_barrier: bool,
new_bor: Borrow
) -> EvalResult<'tcx>;
) -> EvalResult<'tcx> {
let this = self.eval_context_mut();
let ptr = place.ptr.to_ptr()?;
let barrier = if fn_barrier { Some(this.frame().extra) } else { None };
trace!("reborrow: Creating new reference for {:?} (pointee {}): {:?}",
ptr, place.layout.ty, new_bor);
// Get the allocation. It might not be mutable, so we cannot use `get_mut`.
let alloc = this.memory().get(ptr.alloc_id)?;
alloc.check_bounds(this, ptr, size)?;
// Update the stacks.
if let Borrow::Shr(Some(_)) = new_bor {
// Reference that cares about freezing. We need a frozen-sensitive reborrow.
this.visit_freeze_sensitive(place, size, |cur_ptr, size, frozen| {
let kind = if frozen { RefKind::Frozen } else { RefKind::Raw };
alloc.extra.reborrow(cur_ptr, size, barrier, new_bor, kind)
})?;
} else {
// Just treat this as one big chunk.
let kind = if new_bor.is_unique() { RefKind::Unique } else { RefKind::Raw };
alloc.extra.reborrow(ptr, size, barrier, new_bor, kind)?;
}
Ok(())
}
/// Retag an indidual pointer, returning the retagged version.
fn retag_reference(
&mut self,
ptr: ImmTy<'tcx, Borrow>,
val: ImmTy<'tcx, Borrow>,
mutbl: Mutability,
fn_barrier: bool,
two_phase: bool,
) -> EvalResult<'tcx, Immediate<Borrow>>;
) -> EvalResult<'tcx, Immediate<Borrow>> {
let this = self.eval_context_mut();
// We want a place for where the ptr *points to*, so we get one.
let place = this.ref_to_mplace(val)?;
let size = this.size_and_align_of_mplace(place)?
.map(|(size, _)| size)
.unwrap_or_else(|| place.layout.size);
if size == Size::ZERO {
// Nothing to do for ZSTs.
return Ok(*val);
}
fn retag(
&mut self,
fn_entry: bool,
two_phase: bool,
place: PlaceTy<'tcx, Borrow>
) -> EvalResult<'tcx>;
// Compute new borrow.
let time = this.machine.stacked_borrows.increment_clock();
let new_bor = match mutbl {
MutMutable => Borrow::Uniq(time),
MutImmutable => Borrow::Shr(Some(time)),
};
fn escape_to_raw(
&mut self,
place: MPlaceTy<'tcx, Borrow>,
size: Size,
) -> EvalResult<'tcx>;
// Reborrow.
this.reborrow(place, size, fn_barrier, new_bor)?;
let new_place = place.with_tag(new_bor);
// Handle two-phase borrows.
if two_phase {
assert!(mutbl == MutMutable, "two-phase shared borrows make no sense");
// We immediately share it, to allow read accesses
let two_phase_time = this.machine.stacked_borrows.increment_clock();
let two_phase_bor = Borrow::Shr(Some(two_phase_time));
this.reborrow(new_place, size, /*fn_barrier*/false, two_phase_bor)?;
}
// Return new ptr.
Ok(new_place.to_ref())
}
}
impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> {
impl<'a, 'mir, 'tcx> EvalContextExt<'a, 'mir, 'tcx> for crate::MiriEvalContext<'a, 'mir, 'tcx> {}
pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a, 'mir, 'tcx> {
fn tag_new_allocation(
&mut self,
id: AllocId,
kind: MemoryKind<MiriMemoryKind>,
) -> Borrow {
let this = self.eval_context_mut();
let time = match kind {
MemoryKind::Stack => {
// New unique borrow. This `Uniq` is not accessible by the program,
@ -580,7 +610,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> {
// `reset` which the blog post [1] says to perform when accessing a local.
//
// [1] https://www.ralfj.de/blog/2018/08/07/stacked-borrows.html
self.machine.stacked_borrows.increment_clock()
this.machine.stacked_borrows.increment_clock()
}
_ => {
// Nothing to do for everything else
@ -588,7 +618,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> {
}
};
// Make this the active borrow for this allocation
let alloc = self.memory_mut().get_mut(id).expect("This is a new allocation, it must still exist");
let alloc = this.memory_mut().get_mut(id).expect("This is a new allocation, it must still exist");
let size = Size::from_bytes(alloc.bytes.len() as u64);
alloc.extra.first_item(BorStackItem::Uniq(time), size);
Borrow::Uniq(time)
@ -604,6 +634,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> {
size: Size,
mutability: Option<Mutability>,
) -> EvalResult<'tcx> {
let this = self.eval_context_ref();
trace!("ptr_dereference: Accessing {} reference for {:?} (pointee {})",
if let Some(mutability) = mutability { format!("{:?}", mutability) } else { format!("raw") },
place.ptr, place.layout.ty);
@ -614,13 +645,13 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> {
}
// Get the allocation
let alloc = self.memory().get(ptr.alloc_id)?;
alloc.check_bounds(self, ptr, size)?;
let alloc = this.memory().get(ptr.alloc_id)?;
alloc.check_bounds(this, ptr, size)?;
// If we got here, we do some checking, *but* we leave the tag unchanged.
if let Borrow::Shr(Some(_)) = ptr.tag {
assert_eq!(mutability, Some(MutImmutable));
// We need a frozen-sensitive check
self.visit_freeze_sensitive(place, size, |cur_ptr, size, frozen| {
this.visit_freeze_sensitive(place, size, |cur_ptr, size, frozen| {
let kind = if frozen { RefKind::Frozen } else { RefKind::Raw };
alloc.extra.deref(cur_ptr, size, kind)
})?;
@ -641,86 +672,18 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> {
place: MPlaceTy<'tcx, Borrow>,
size: Size,
) -> EvalResult<'tcx> {
self.reborrow(place, size, /*fn_barrier*/ false, Borrow::default())?;
let this = self.eval_context_mut();
this.reborrow(place, size, /*fn_barrier*/ false, Borrow::default())?;
Ok(())
}
fn reborrow(
&mut self,
place: MPlaceTy<'tcx, Borrow>,
size: Size,
fn_barrier: bool,
new_bor: Borrow
) -> EvalResult<'tcx> {
let ptr = place.ptr.to_ptr()?;
let barrier = if fn_barrier { Some(self.frame().extra) } else { None };
trace!("reborrow: Creating new reference for {:?} (pointee {}): {:?}",
ptr, place.layout.ty, new_bor);
// Get the allocation. It might not be mutable, so we cannot use `get_mut`.
let alloc = self.memory().get(ptr.alloc_id)?;
alloc.check_bounds(self, ptr, size)?;
// Update the stacks.
if let Borrow::Shr(Some(_)) = new_bor {
// Reference that cares about freezing. We need a frozen-sensitive reborrow.
self.visit_freeze_sensitive(place, size, |cur_ptr, size, frozen| {
let kind = if frozen { RefKind::Frozen } else { RefKind::Raw };
alloc.extra.reborrow(cur_ptr, size, barrier, new_bor, kind)
})?;
} else {
// Just treat this as one big chunk.
let kind = if new_bor.is_unique() { RefKind::Unique } else { RefKind::Raw };
alloc.extra.reborrow(ptr, size, barrier, new_bor, kind)?;
}
Ok(())
}
fn retag_reference(
&mut self,
val: ImmTy<'tcx, Borrow>,
mutbl: Mutability,
fn_barrier: bool,
two_phase: bool,
) -> EvalResult<'tcx, Immediate<Borrow>> {
// We want a place for where the ptr *points to*, so we get one.
let place = self.ref_to_mplace(val)?;
let size = self.size_and_align_of_mplace(place)?
.map(|(size, _)| size)
.unwrap_or_else(|| place.layout.size);
if size == Size::ZERO {
// Nothing to do for ZSTs.
return Ok(*val);
}
// Compute new borrow.
let time = self.machine.stacked_borrows.increment_clock();
let new_bor = match mutbl {
MutMutable => Borrow::Uniq(time),
MutImmutable => Borrow::Shr(Some(time)),
};
// Reborrow.
self.reborrow(place, size, fn_barrier, new_bor)?;
let new_place = place.with_tag(new_bor);
// Handle two-phase borrows.
if two_phase {
assert!(mutbl == MutMutable, "two-phase shared borrows make no sense");
// We immediately share it, to allow read accesses
let two_phase_time = self.machine.stacked_borrows.increment_clock();
let two_phase_bor = Borrow::Shr(Some(two_phase_time));
self.reborrow(new_place, size, /*fn_barrier*/false, two_phase_bor)?;
}
// Return new ptr.
Ok(new_place.to_ref())
}
fn retag(
&mut self,
fn_entry: bool,
two_phase: bool,
place: PlaceTy<'tcx, Borrow>
) -> EvalResult<'tcx> {
let this = self.eval_context_mut();
// Determine mutability and whether to add a barrier.
// Cannot use `builtin_deref` because that reports *immutable* for `Box`,
// making it useless.
@ -740,14 +703,14 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> {
// avoids allocating.
if let Some((mutbl, barrier)) = qualify(place.layout.ty, fn_entry) {
// fast path
let val = self.read_immediate(self.place_to_op(place)?)?;
let val = self.retag_reference(val, mutbl, barrier, two_phase)?;
self.write_immediate(val, place)?;
let val = this.read_immediate(this.place_to_op(place)?)?;
let val = this.retag_reference(val, mutbl, barrier, two_phase)?;
this.write_immediate(val, place)?;
return Ok(());
}
let place = self.force_allocation(place)?;
let place = this.force_allocation(place)?;
let mut visitor = RetagVisitor { ecx: self, fn_entry, two_phase };
let mut visitor = RetagVisitor { ecx: this, fn_entry, two_phase };
visitor.visit_value(place)?;
// The actual visitor

View File

@ -34,10 +34,6 @@ impl<'tcx> Default for TlsData<'tcx> {
}
}
pub trait EvalContextExt<'tcx> {
fn run_tls_dtors(&mut self) -> EvalResult<'tcx>;
}
impl<'tcx> TlsData<'tcx> {
pub fn create_tls_key(
&mut self,
@ -133,35 +129,37 @@ impl<'tcx> TlsData<'tcx> {
}
}
impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, 'tcx> {
impl<'a, 'mir, 'tcx> EvalContextExt<'a, 'mir, 'tcx> for crate::MiriEvalContext<'a, 'mir, 'tcx> {}
pub trait EvalContextExt<'a, 'mir, 'tcx: 'a+'mir>: crate::MiriEvalContextExt<'a, 'mir, 'tcx> {
fn run_tls_dtors(&mut self) -> EvalResult<'tcx> {
let mut dtor = self.machine.tls.fetch_tls_dtor(None, &*self.tcx);
let this = self.eval_context_mut();
let mut dtor = this.machine.tls.fetch_tls_dtor(None, &*this.tcx);
// FIXME: replace loop by some structure that works with stepping
while let Some((instance, ptr, key)) = dtor {
trace!("Running TLS dtor {:?} on {:?}", instance, ptr);
// TODO: Potentially, this has to support all the other possible instances?
// See eval_fn_call in interpret/terminator/mod.rs
let mir = self.load_mir(instance.def)?;
let ret_place = MPlaceTy::dangling(self.layout_of(self.tcx.mk_unit())?, self).into();
self.push_stack_frame(
let mir = this.load_mir(instance.def)?;
let ret_place = MPlaceTy::dangling(this.layout_of(this.tcx.mk_unit())?, this).into();
this.push_stack_frame(
instance,
mir.span,
mir,
Some(ret_place),
StackPopCleanup::None { cleanup: true },
)?;
let arg_local = self.frame().mir.args_iter().next().ok_or_else(
let arg_local = this.frame().mir.args_iter().next().ok_or_else(
|| EvalErrorKind::AbiViolation("TLS dtor does not take enough arguments.".to_owned()),
)?;
let dest = self.eval_place(&mir::Place::Local(arg_local))?;
self.write_scalar(ptr, dest)?;
let dest = this.eval_place(&mir::Place::Local(arg_local))?;
this.write_scalar(ptr, dest)?;
// step until out of stackframes
self.run()?;
this.run()?;
dtor = match self.machine.tls.fetch_tls_dtor(Some(key), &*self.tcx) {
dtor = match this.machine.tls.fetch_tls_dtor(Some(key), &*this.tcx) {
dtor @ Some(_) => dtor,
None => self.machine.tls.fetch_tls_dtor(None, &*self.tcx),
None => this.machine.tls.fetch_tls_dtor(None, &*this.tcx),
};
}
// FIXME: On a windows target, call `unsafe extern "system" fn on_tls_callback`.