rust/src/machine.rs

753 lines
26 KiB
Rust
Raw Normal View History

2019-06-29 07:15:05 -05:00
//! Global machine state as well as implementation of the interpreter engine
//! `Machine` trait.
use std::borrow::Cow;
use std::cell::RefCell;
2021-05-16 04:28:01 -05:00
use std::fmt;
use std::num::NonZeroU64;
2020-03-19 17:00:02 -05:00
use std::time::Instant;
use log::trace;
use rand::rngs::StdRng;
use rand::SeedableRng;
2020-04-05 16:03:44 -05:00
use rustc_data_structures::fx::FxHashMap;
use rustc_middle::{
mir,
ty::{
self,
layout::{LayoutCx, LayoutError, LayoutOf, TyAndLayout},
2021-04-15 16:19:23 -05:00
Instance, TyCtxt,
2020-04-05 16:03:44 -05:00
},
};
2020-06-01 12:23:54 -05:00
use rustc_span::def_id::DefId;
2021-05-16 04:28:01 -05:00
use rustc_span::symbol::{sym, Symbol};
use rustc_target::abi::Size;
2021-01-11 05:35:13 -06:00
use rustc_target::spec::abi::Abi;
use crate::*;
2019-06-29 07:37:41 -05:00
// Some global facts about the emulated machine.
2019-10-07 08:39:59 -05:00
pub const PAGE_SIZE: u64 = 4 * 1024; // FIXME: adjust to target architecture
pub const STACK_ADDR: u64 = 32 * PAGE_SIZE; // not really about the "stack", but where we start assigning integer addresses to allocations
pub const STACK_SIZE: u64 = 16 * PAGE_SIZE; // whatever
2019-06-29 07:37:41 -05:00
pub const NUM_CPUS: u64 = 1;
/// Extra data stored with each stack frame
pub struct FrameData<'tcx> {
/// Extra data for Stacked Borrows.
pub call_id: stacked_borrows::CallId,
2019-11-19 07:51:08 -06:00
/// If this is Some(), then this is a special "catch unwind" frame (the frame of `try_fn`
/// called by `try`). When this frame is popped during unwinding a panic,
/// we stop unwinding, use the `CatchUnwindData` to handle catching.
pub catch_unwind: Option<CatchUnwindData<'tcx>>,
2021-05-29 17:09:46 -05:00
/// If `measureme` profiling is enabled, holds timing information
/// for the start of this frame. When we finish executing this frame,
/// we use this to register a completed event with `measureme`.
2021-05-30 10:04:57 -05:00
pub timing: Option<measureme::DetachedTiming>,
}
impl<'tcx> std::fmt::Debug for FrameData<'tcx> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
2021-05-30 10:13:49 -05:00
// Omitting `timing`, it does not support `Debug`.
let FrameData { call_id, catch_unwind, timing: _ } = self;
2021-05-30 10:04:57 -05:00
f.debug_struct("FrameData")
.field("call_id", call_id)
.field("catch_unwind", catch_unwind)
2021-05-30 10:04:57 -05:00
.finish()
}
}
/// Extra memory kinds
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum MiriMemoryKind {
/// `__rust_alloc` memory.
Rust,
/// `malloc` memory.
C,
/// Windows `HeapAlloc` memory.
WinHeap,
/// Memory for args, errno, and other parts of the machine-managed environment.
/// This memory may leak.
2020-02-23 14:55:02 -06:00
Machine,
/// Memory for env vars. Separate from `Machine` because we clean it up and leak-check it.
Env,
2020-03-25 03:05:24 -05:00
/// Globals copied from `tcx`.
/// This memory may leak.
2020-03-25 03:05:24 -05:00
Global,
/// Memory for extern statics.
/// This memory may leak.
ExternStatic,
/// Memory for thread-local statics.
/// This memory may leak.
Tls,
}
impl Into<MemoryKind<MiriMemoryKind>> for MiriMemoryKind {
#[inline(always)]
fn into(self) -> MemoryKind<MiriMemoryKind> {
MemoryKind::Machine(self)
}
}
impl MayLeak for MiriMemoryKind {
#[inline(always)]
fn may_leak(self) -> bool {
use self::MiriMemoryKind::*;
match self {
Rust | C | WinHeap | Env => false,
Machine | Global | ExternStatic | Tls => true,
}
}
}
impl fmt::Display for MiriMemoryKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use self::MiriMemoryKind::*;
match self {
Rust => write!(f, "Rust heap"),
C => write!(f, "C heap"),
WinHeap => write!(f, "Windows heap"),
Machine => write!(f, "machine-managed memory"),
Env => write!(f, "environment variable"),
Global => write!(f, "global (static or const)"),
ExternStatic => write!(f, "extern static"),
2021-05-16 04:28:01 -05:00
Tls => write!(f, "thread-local static"),
}
}
}
2021-07-15 13:33:08 -05:00
/// Pointer provenance (tag).
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Tag {
pub alloc_id: AllocId,
2021-07-18 08:22:09 -05:00
/// Stacked Borrows tag.
2021-07-15 13:33:08 -05:00
pub sb: SbTag,
}
impl Provenance for Tag {
2021-07-18 08:22:09 -05:00
/// We use absolute addresses in the `offset` of a `Pointer<Tag>`.
2021-07-15 13:33:08 -05:00
const OFFSET_IS_ADDR: bool = true;
2021-07-18 08:22:09 -05:00
/// We cannot err on partial overwrites, it happens too often in practice (due to unions).
const ERR_ON_PARTIAL_PTR_OVERWRITE: bool = false;
2021-07-15 13:33:08 -05:00
fn fmt(ptr: &Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let (tag, addr) = ptr.into_parts(); // address is absolute
write!(f, "0x{:x}", addr.bytes())?;
// Forward `alternate` flag to `alloc_id` printing.
if f.alternate() {
write!(f, "[{:#?}]", tag.alloc_id)?;
} else {
write!(f, "[{:?}]", tag.alloc_id)?;
}
// Print Stacked Borrows tag.
write!(f, "{:?}", tag.sb)
}
fn get_alloc_id(self) -> AllocId {
self.alloc_id
}
}
/// Extra per-allocation data
#[derive(Debug, Clone)]
pub struct AllocExtra {
/// Stacked Borrows state is only added if it is enabled.
pub stacked_borrows: Option<stacked_borrows::AllocExtra>,
/// Data race detection via the use of a vector-clock,
/// this is only added if it is enabled.
pub data_race: Option<data_race::AllocExtra>,
}
/// Extra global memory data
2021-05-22 06:24:08 -05:00
#[derive(Debug)]
2020-03-08 11:54:47 -05:00
pub struct MemoryExtra {
pub stacked_borrows: Option<stacked_borrows::MemoryExtra>,
pub data_race: Option<data_race::MemoryExtra>,
pub intptrcast: intptrcast::MemoryExtra,
2021-07-15 13:33:08 -05:00
/// Mapping extern static names to their base pointer.
extern_statics: FxHashMap<Symbol, Pointer<Tag>>,
2019-07-23 14:38:53 -05:00
/// The random number generator used for resolving non-determinism.
/// Needs to be queried by ptr_to_int, hence needs interior mutability.
2019-07-23 14:38:53 -05:00
pub(crate) rng: RefCell<StdRng>,
/// An allocation ID to report when it is being allocated
2020-04-14 18:00:56 -05:00
/// (helps for debugging memory leaks and use after free bugs).
tracked_alloc_id: Option<AllocId>,
2020-04-13 10:51:22 -05:00
/// Controls whether alignment of memory accesses is being checked.
pub(crate) check_alignment: AlignmentCheck,
/// Failure rate of compare_exchange_weak, between 0.0 and 1.0
pub(crate) cmpxchg_weak_failure_rate: f64,
}
2020-03-08 11:54:47 -05:00
impl MemoryExtra {
pub fn new(config: &MiriConfig) -> Self {
let rng = StdRng::seed_from_u64(config.seed.unwrap_or(0));
let stacked_borrows = if config.stacked_borrows {
2021-05-22 06:24:08 -05:00
Some(RefCell::new(stacked_borrows::GlobalState::new(
config.tracked_pointer_tag,
config.tracked_call_id,
config.tag_raw,
2021-05-22 06:24:08 -05:00
)))
} else {
None
};
2021-05-22 07:55:33 -05:00
let data_race =
if config.data_race_detector { Some(data_race::GlobalState::new()) } else { None };
MemoryExtra {
stacked_borrows,
data_race,
intptrcast: Default::default(),
2020-03-02 15:36:15 -06:00
extern_statics: FxHashMap::default(),
rng: RefCell::new(rng),
tracked_alloc_id: config.tracked_alloc_id,
check_alignment: config.check_alignment,
cmpxchg_weak_failure_rate: config.cmpxchg_weak_failure_rate,
}
}
2020-03-28 04:07:23 -05:00
fn add_extern_static<'tcx, 'mir>(
this: &mut MiriEvalContext<'mir, 'tcx>,
name: &str,
2021-07-15 13:33:08 -05:00
ptr: Pointer<Option<Tag>>,
2020-03-28 04:07:23 -05:00
) {
2021-07-15 13:33:08 -05:00
let ptr = ptr.into_pointer_or_addr().unwrap();
this.memory.extra.extern_statics.try_insert(Symbol::intern(name), ptr).unwrap();
2020-03-28 04:07:23 -05:00
}
/// Sets up the "extern statics" for this machine.
2020-03-08 11:54:47 -05:00
pub fn init_extern_statics<'tcx, 'mir>(
this: &mut MiriEvalContext<'mir, 'tcx>,
) -> InterpResult<'tcx> {
2020-11-11 03:29:10 -06:00
match this.tcx.sess.target.os.as_str() {
2020-03-07 14:33:27 -06:00
"linux" => {
// "__cxa_thread_atexit_impl"
// This should be all-zero, pointer-sized.
2020-04-18 10:53:54 -05:00
let layout = this.machine.layouts.usize;
2021-07-04 08:59:55 -05:00
let place = this.allocate(layout, MiriMemoryKind::ExternStatic.into())?;
this.write_scalar(Scalar::from_machine_usize(0, this), &place.into())?;
2020-03-28 04:07:23 -05:00
Self::add_extern_static(this, "__cxa_thread_atexit_impl", place.ptr);
2020-03-07 14:33:27 -06:00
// "environ"
2021-05-16 04:28:01 -05:00
Self::add_extern_static(
this,
"environ",
this.machine.env_vars.environ.unwrap().ptr,
);
2020-03-28 04:07:23 -05:00
}
"windows" => {
// "_tls_used"
// This is some obscure hack that is part of the Windows TLS story. It's a `u8`.
2020-04-18 10:53:54 -05:00
let layout = this.machine.layouts.u8;
2021-07-04 08:59:55 -05:00
let place = this.allocate(layout, MiriMemoryKind::ExternStatic.into())?;
this.write_scalar(Scalar::from_u8(0), &place.into())?;
2020-03-28 04:07:23 -05:00
Self::add_extern_static(this, "_tls_used", place.ptr);
}
2020-03-22 02:51:15 -05:00
_ => {} // No "extern statics" supported on this target
}
Ok(())
}
}
2020-04-05 16:03:44 -05:00
/// Precomputed layouts of primitive types
2020-04-18 10:53:54 -05:00
pub struct PrimitiveLayouts<'tcx> {
pub unit: TyAndLayout<'tcx>,
pub i8: TyAndLayout<'tcx>,
pub i32: TyAndLayout<'tcx>,
pub isize: TyAndLayout<'tcx>,
pub u8: TyAndLayout<'tcx>,
pub u32: TyAndLayout<'tcx>,
pub usize: TyAndLayout<'tcx>,
}
impl<'mir, 'tcx: 'mir> PrimitiveLayouts<'tcx> {
2020-04-05 16:03:44 -05:00
fn new(layout_cx: LayoutCx<'tcx, TyCtxt<'tcx>>) -> Result<Self, LayoutError<'tcx>> {
Ok(Self {
2020-04-18 10:53:54 -05:00
unit: layout_cx.layout_of(layout_cx.tcx.mk_unit())?,
i8: layout_cx.layout_of(layout_cx.tcx.types.i8)?,
2020-04-05 16:03:44 -05:00
i32: layout_cx.layout_of(layout_cx.tcx.types.i32)?,
2020-04-18 10:53:54 -05:00
isize: layout_cx.layout_of(layout_cx.tcx.types.isize)?,
u8: layout_cx.layout_of(layout_cx.tcx.types.u8)?,
2020-04-05 16:03:44 -05:00
u32: layout_cx.layout_of(layout_cx.tcx.types.u32)?,
2020-04-18 10:53:54 -05:00
usize: layout_cx.layout_of(layout_cx.tcx.types.usize)?,
2020-04-05 16:03:44 -05:00
})
}
}
/// The machine itself.
pub struct Evaluator<'mir, 'tcx> {
/// Environment variables set by `setenv`.
/// Miri does not expose env vars from the host to the emulated program.
2020-03-08 11:54:47 -05:00
pub(crate) env_vars: EnvVars<'tcx>,
/// Program arguments (`Option` because we can only initialize them after creating the ecx).
/// These are *pointers* to argc/argv because macOS.
/// We also need the full command line as one string because of Windows.
2021-07-15 13:33:08 -05:00
pub(crate) argc: Option<MemPlace<Tag>>,
pub(crate) argv: Option<MemPlace<Tag>>,
pub(crate) cmd_line: Option<MemPlace<Tag>>,
/// TLS state.
pub(crate) tls: TlsData<'tcx>,
/// What should Miri do when an op requires communicating with the host,
/// such as accessing host env vars, random number generation, and
/// file system access.
pub(crate) isolated_op: IsolatedOp,
2019-09-24 17:28:00 -05:00
/// Whether to enforce the validity invariant.
pub(crate) validate: bool,
/// Whether to enforce validity (e.g., initialization) of integers and floats.
pub(crate) enforce_number_validity: bool,
2021-05-29 12:36:06 -05:00
/// Whether to enforce [ABI](Abi) of function calls.
pub(crate) enforce_abi: bool,
pub(crate) file_handler: shims::posix::FileHandler,
pub(crate) dir_handler: shims::posix::DirHandler,
2020-03-19 17:00:02 -05:00
/// The "time anchor" for this machine's monotone clock (for `Instant` simulation).
pub(crate) time_anchor: Instant,
/// The set of threads.
2020-04-09 14:06:33 -05:00
pub(crate) threads: ThreadManager<'mir, 'tcx>,
2020-04-05 16:03:44 -05:00
/// Precomputed `TyLayout`s for primitive data types that are commonly used inside Miri.
pub(crate) layouts: PrimitiveLayouts<'tcx>,
/// Allocations that are considered roots of static memory (that may leak).
pub(crate) static_roots: Vec<AllocId>,
2021-05-29 17:09:46 -05:00
/// The `measureme` profiler used to record timing information about
/// the emulated program.
2021-05-30 10:04:57 -05:00
profiler: Option<measureme::Profiler>,
2021-05-29 17:09:46 -05:00
/// Used with `profiler` to cache the `StringId`s for event names
/// uesd with `measureme`.
2021-05-30 10:04:57 -05:00
string_cache: FxHashMap<String, measureme::StringId>,
2021-04-15 16:19:23 -05:00
/// Cache of `Instance` exported under the given `Symbol` name.
2021-06-08 07:36:57 -05:00
/// `None` means no `Instance` exported under the given name is found.
pub(crate) exported_symbols_cache: FxHashMap<Symbol, Option<Instance<'tcx>>>,
/// Whether to raise a panic in the context of the evaluated process when unsupported
/// functionality is encountered. If `false`, an error is propagated in the Miri application context
/// instead (default behavior)
pub(crate) panic_on_unsupported: bool,
}
impl<'mir, 'tcx> Evaluator<'mir, 'tcx> {
2021-05-29 17:16:12 -05:00
pub(crate) fn new(config: &MiriConfig, layout_cx: LayoutCx<'tcx, TyCtxt<'tcx>>) -> Self {
2021-05-16 04:28:01 -05:00
let layouts =
PrimitiveLayouts::new(layout_cx).expect("Couldn't get layouts of primitive types");
2021-05-30 10:04:57 -05:00
let profiler = config.measureme_out.as_ref().map(|out| {
measureme::Profiler::new(out).expect("Couldn't create `measureme` profiler")
});
Evaluator {
2019-08-13 16:17:41 -05:00
// `env_vars` could be initialized properly here if `Memory` were available before
// calling this method.
2019-08-14 10:24:35 -05:00
env_vars: EnvVars::default(),
argc: None,
argv: None,
cmd_line: None,
tls: TlsData::default(),
isolated_op: config.isolated_op,
validate: config.validate,
enforce_number_validity: config.check_number_validity,
2021-05-29 12:36:06 -05:00
enforce_abi: config.check_abi,
2019-09-24 17:28:00 -05:00
file_handler: Default::default(),
2020-01-25 12:57:15 -06:00
dir_handler: Default::default(),
2020-03-19 17:00:02 -05:00
time_anchor: Instant::now(),
2020-04-05 16:03:44 -05:00
layouts,
2020-04-19 23:03:23 -05:00
threads: ThreadManager::default(),
static_roots: Vec::new(),
profiler,
string_cache: Default::default(),
2021-04-15 16:19:23 -05:00
exported_symbols_cache: FxHashMap::default(),
panic_on_unsupported: config.panic_on_unsupported,
}
}
pub(crate) fn communicate(&self) -> bool {
self.isolated_op == IsolatedOp::Allow
}
}
2019-07-05 16:47:10 -05:00
/// A rustc InterpCx for Miri.
pub type MiriEvalContext<'mir, 'tcx> = InterpCx<'mir, 'tcx, Evaluator<'mir, 'tcx>>;
/// A little trait that's useful to be inherited by extension traits.
pub trait MiriEvalContextExt<'mir, 'tcx> {
fn eval_context_ref<'a>(&'a self) -> &'a MiriEvalContext<'mir, 'tcx>;
fn eval_context_mut<'a>(&'a mut self) -> &'a mut MiriEvalContext<'mir, 'tcx>;
}
impl<'mir, 'tcx> MiriEvalContextExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> {
#[inline(always)]
fn eval_context_ref(&self) -> &MiriEvalContext<'mir, 'tcx> {
self
}
#[inline(always)]
fn eval_context_mut(&mut self) -> &mut MiriEvalContext<'mir, 'tcx> {
self
}
}
/// Machine hook implementations.
impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
2020-03-25 03:05:24 -05:00
type MemoryKind = MiriMemoryKind;
type FrameExtra = FrameData<'tcx>;
2020-03-08 11:54:47 -05:00
type MemoryExtra = MemoryExtra;
type AllocExtra = AllocExtra;
type PointerTag = Tag;
type ExtraFnVal = Dlsym;
2019-12-23 05:56:23 -06:00
type MemoryMap =
MonoHashMap<AllocId, (MemoryKind<MiriMemoryKind>, Allocation<Tag, Self::AllocExtra>)>;
2020-03-25 03:05:24 -05:00
const GLOBAL_KIND: Option<MiriMemoryKind> = Some(MiriMemoryKind::Global);
2021-07-04 08:59:55 -05:00
const PANIC_ON_ALLOC_FAIL: bool = false;
2020-04-13 10:51:22 -05:00
#[inline(always)]
fn enforce_alignment(memory_extra: &MemoryExtra) -> bool {
memory_extra.check_alignment != AlignmentCheck::None
}
#[inline(always)]
fn force_int_for_alignment_check(memory_extra: &Self::MemoryExtra) -> bool {
memory_extra.check_alignment == AlignmentCheck::Int
2020-04-13 10:51:22 -05:00
}
2019-08-05 08:49:19 -05:00
#[inline(always)]
2019-07-05 16:47:10 -05:00
fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
ecx.machine.validate
}
#[inline(always)]
fn enforce_number_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
ecx.machine.enforce_number_validity
}
2021-05-29 12:36:06 -05:00
#[inline(always)]
fn enforce_abi(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
ecx.machine.enforce_abi
}
#[inline(always)]
2019-12-04 16:31:39 -06:00
fn find_mir_or_eval_fn(
2019-07-05 16:47:10 -05:00
ecx: &mut InterpCx<'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
2021-01-21 20:45:39 -06:00
abi: Abi,
args: &[OpTy<'tcx, Tag>],
ret: Option<(&PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
unwind: StackPopUnwind,
) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>> {
2021-01-21 20:45:39 -06:00
ecx.find_mir_or_eval_fn(instance, abi, args, ret, unwind)
}
#[inline(always)]
fn call_extra_fn(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
fn_val: Dlsym,
2021-01-21 20:45:39 -06:00
abi: Abi,
args: &[OpTy<'tcx, Tag>],
ret: Option<(&PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
_unwind: StackPopUnwind,
) -> InterpResult<'tcx> {
2021-01-21 20:45:39 -06:00
ecx.call_dlsym(fn_val, abi, args, ret)
}
#[inline(always)]
fn call_intrinsic(
2021-09-09 04:36:39 -05:00
ecx: &mut rustc_const_eval::interpret::InterpCx<'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, Tag>],
ret: Option<(&PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
unwind: StackPopUnwind,
) -> InterpResult<'tcx> {
ecx.call_intrinsic(instance, args, ret, unwind)
}
#[inline(always)]
fn assert_panic(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
2020-02-13 07:01:35 -06:00
msg: &mir::AssertMessage<'tcx>,
unwind: Option<mir::BasicBlock>,
) -> InterpResult<'tcx> {
2020-03-11 14:05:44 -05:00
ecx.assert_panic(msg, unwind)
}
2020-03-12 14:46:58 -05:00
#[inline(always)]
2020-12-10 12:53:45 -06:00
fn abort(_ecx: &mut InterpCx<'mir, 'tcx, Self>, msg: String) -> InterpResult<'tcx, !> {
throw_machine_stop!(TerminationInfo::Abort(msg))
2020-03-12 14:46:58 -05:00
}
#[inline(always)]
fn binary_ptr_op(
2021-09-09 04:36:39 -05:00
ecx: &rustc_const_eval::interpret::InterpCx<'mir, 'tcx, Self>,
bin_op: mir::BinOp,
left: &ImmTy<'tcx, Tag>,
right: &ImmTy<'tcx, Tag>,
2020-04-02 17:05:35 -05:00
) -> InterpResult<'tcx, (Scalar<Tag>, bool, ty::Ty<'tcx>)> {
ecx.binary_ptr_op(bin_op, left, right)
}
fn box_alloc(
2019-07-05 16:47:10 -05:00
ecx: &mut InterpCx<'mir, 'tcx, Self>,
dest: &PlaceTy<'tcx, Tag>,
) -> InterpResult<'tcx> {
trace!("box_alloc for {:?}", dest.layout.ty);
let layout = ecx.layout_of(dest.layout.ty.builtin_deref(false).unwrap().ty)?;
// First argument: `size`.
// (`0` is allowed here -- this is expected to be handled by the lang item).
let size = Scalar::from_machine_usize(layout.size.bytes(), ecx);
// Second argument: `align`.
let align = Scalar::from_machine_usize(layout.align.abi.bytes(), ecx);
// Call the `exchange_malloc` lang item.
let malloc = ecx.tcx.lang_items().exchange_malloc_fn().unwrap();
let malloc = ty::Instance::mono(ecx.tcx.tcx, malloc);
ecx.call_function(
malloc,
Abi::Rust,
&[size.into(), align.into()],
Some(dest),
// Don't do anything when we are done. The `statement()` function will increment
// the old stack frame's stmt counter to the next statement, which means that when
// `exchange_malloc` returns, we go on evaluating exactly where we want to be.
StackPopCleanup::None { cleanup: true },
)?;
Ok(())
}
2021-07-15 13:33:08 -05:00
fn thread_local_static_base_pointer(
2020-06-01 12:23:54 -05:00
ecx: &mut InterpCx<'mir, 'tcx, Self>,
def_id: DefId,
2021-07-15 13:33:08 -05:00
) -> InterpResult<'tcx, Pointer<Tag>> {
ecx.get_or_create_thread_local_alloc(def_id)
2020-06-01 12:23:54 -05:00
}
2021-07-15 13:33:08 -05:00
fn extern_static_base_pointer(
2020-07-26 04:15:01 -05:00
memory: &Memory<'mir, 'tcx, Self>,
def_id: DefId,
2021-07-15 13:33:08 -05:00
) -> InterpResult<'tcx, Pointer<Tag>> {
2020-07-26 04:15:01 -05:00
let attrs = memory.tcx.get_attrs(def_id);
2020-08-08 03:27:23 -05:00
let link_name = match memory.tcx.sess.first_attr_value_str_by_name(&attrs, sym::link_name) {
Some(name) => name,
2020-07-26 04:15:01 -05:00
None => memory.tcx.item_name(def_id),
};
2021-07-15 13:33:08 -05:00
if let Some(&ptr) = memory.extra.extern_statics.get(&link_name) {
Ok(ptr)
} else {
2020-07-26 04:15:01 -05:00
throw_unsup_format!("`extern` static {:?} is not supported by Miri", def_id)
}
}
2019-11-29 12:50:37 -06:00
fn init_allocation_extra<'b>(
2021-07-15 13:33:08 -05:00
mem: &Memory<'mir, 'tcx, Self>,
id: AllocId,
alloc: Cow<'b, Allocation>,
2020-03-25 03:05:24 -05:00
kind: Option<MemoryKind<Self::MemoryKind>>,
2021-07-15 13:33:08 -05:00
) -> Cow<'b, Allocation<Self::PointerTag, Self::AllocExtra>> {
if Some(id) == mem.extra.tracked_alloc_id {
register_diagnostic(NonHaltingDiagnostic::CreatedAlloc(id));
}
let kind = kind.expect("we set our STATIC_KIND so this cannot be None");
let alloc = alloc.into_owned();
2021-07-15 13:33:08 -05:00
let stacks = if let Some(stacked_borrows) = &mem.extra.stacked_borrows {
Some(Stacks::new_allocation(id, alloc.size(), stacked_borrows, kind))
2021-05-16 04:28:01 -05:00
} else {
2021-07-15 13:33:08 -05:00
None
2021-05-16 04:28:01 -05:00
};
2021-07-15 13:33:08 -05:00
let race_alloc = if let Some(data_race) = &mem.extra.data_race {
2021-05-17 17:11:05 -05:00
Some(data_race::AllocExtra::new_allocation(&data_race, alloc.size(), kind))
} else {
None
};
2021-07-15 13:33:08 -05:00
let alloc: Allocation<Tag, Self::AllocExtra> = alloc.convert_tag_add_extra(
&mem.tcx,
AllocExtra { stacked_borrows: stacks, data_race: race_alloc },
2021-07-15 13:33:08 -05:00
|ptr| Evaluator::tag_alloc_base_pointer(mem, ptr),
2019-09-05 11:17:58 -05:00
);
2021-07-15 13:33:08 -05:00
Cow::Owned(alloc)
}
fn tag_alloc_base_pointer(
mem: &Memory<'mir, 'tcx, Self>,
ptr: Pointer<AllocId>,
) -> Pointer<Tag> {
let absolute_addr = intptrcast::GlobalState::rel_ptr_to_addr(&mem, ptr);
let sb_tag = if let Some(stacked_borrows) = &mem.extra.stacked_borrows {
stacked_borrows.borrow_mut().base_tag(ptr.provenance)
} else {
SbTag::Untagged
};
Pointer::new(Tag { alloc_id: ptr.provenance, sb: sb_tag }, Size::from_bytes(absolute_addr))
}
#[inline(always)]
fn ptr_from_addr(
mem: &Memory<'mir, 'tcx, Self>,
addr: u64,
) -> Pointer<Option<Self::PointerTag>> {
intptrcast::GlobalState::ptr_from_addr(addr, mem)
}
/// Convert a pointer with provenance into an allocation-offset pair,
/// or a `None` with an absolute address if that conversion is not possible.
fn ptr_get_alloc(
mem: &Memory<'mir, 'tcx, Self>,
ptr: Pointer<Self::PointerTag>,
) -> (AllocId, Size) {
let rel = intptrcast::GlobalState::abs_ptr_to_rel(mem, ptr);
(ptr.provenance.alloc_id, rel)
}
2020-04-14 18:00:56 -05:00
#[inline(always)]
2021-05-17 06:50:45 -05:00
fn memory_read(
2021-05-22 06:24:08 -05:00
memory_extra: &Self::MemoryExtra,
2021-05-20 06:32:18 -05:00
alloc_extra: &AllocExtra,
2021-07-15 13:33:08 -05:00
tag: Tag,
range: AllocRange,
2020-04-14 18:00:56 -05:00
) -> InterpResult<'tcx> {
2021-05-20 06:32:18 -05:00
if let Some(data_race) = &alloc_extra.data_race {
2021-07-15 13:33:08 -05:00
data_race.read(tag.alloc_id, range, memory_extra.data_race.as_ref().unwrap())?;
2020-04-14 18:00:56 -05:00
}
2021-05-20 06:32:18 -05:00
if let Some(stacked_borrows) = &alloc_extra.stacked_borrows {
2021-07-15 13:33:08 -05:00
stacked_borrows.memory_read(
tag.alloc_id,
tag.sb,
range,
memory_extra.stacked_borrows.as_ref().unwrap(),
)
2021-05-17 06:50:45 -05:00
} else {
Ok(())
}
}
2020-05-08 06:56:10 -05:00
2021-05-17 06:50:45 -05:00
#[inline(always)]
fn memory_written(
2021-05-22 06:24:08 -05:00
memory_extra: &mut Self::MemoryExtra,
2021-05-20 06:32:18 -05:00
alloc_extra: &mut AllocExtra,
2021-07-15 13:33:08 -05:00
tag: Tag,
range: AllocRange,
2021-05-17 06:50:45 -05:00
) -> InterpResult<'tcx> {
2021-05-20 06:32:18 -05:00
if let Some(data_race) = &mut alloc_extra.data_race {
2021-07-15 13:33:08 -05:00
data_race.write(tag.alloc_id, range, memory_extra.data_race.as_mut().unwrap())?;
2021-05-17 06:50:45 -05:00
}
2021-05-20 06:32:18 -05:00
if let Some(stacked_borrows) = &mut alloc_extra.stacked_borrows {
2021-05-22 07:55:33 -05:00
stacked_borrows.memory_written(
2021-07-15 13:33:08 -05:00
tag.alloc_id,
tag.sb,
range,
2021-05-22 07:55:33 -05:00
memory_extra.stacked_borrows.as_mut().unwrap(),
)
2021-05-17 06:50:45 -05:00
} else {
Ok(())
}
}
#[inline(always)]
fn memory_deallocated(
memory_extra: &mut Self::MemoryExtra,
2021-05-20 06:32:18 -05:00
alloc_extra: &mut AllocExtra,
2021-07-15 13:33:08 -05:00
tag: Tag,
range: AllocRange,
2021-05-17 06:50:45 -05:00
) -> InterpResult<'tcx> {
2021-07-15 13:33:08 -05:00
if Some(tag.alloc_id) == memory_extra.tracked_alloc_id {
register_diagnostic(NonHaltingDiagnostic::FreedAlloc(tag.alloc_id));
2021-05-17 06:50:45 -05:00
}
2021-05-20 06:32:18 -05:00
if let Some(data_race) = &mut alloc_extra.data_race {
2021-07-15 13:33:08 -05:00
data_race.deallocate(tag.alloc_id, range, memory_extra.data_race.as_mut().unwrap())?;
2021-05-17 06:50:45 -05:00
}
2021-05-20 06:32:18 -05:00
if let Some(stacked_borrows) = &mut alloc_extra.stacked_borrows {
2021-05-22 07:55:33 -05:00
stacked_borrows.memory_deallocated(
2021-07-15 13:33:08 -05:00
tag.alloc_id,
tag.sb,
range,
2021-05-22 07:55:33 -05:00
memory_extra.stacked_borrows.as_mut().unwrap(),
)
2021-05-17 06:50:45 -05:00
} else {
Ok(())
}
2020-04-14 18:00:56 -05:00
}
#[inline(always)]
fn retag(
2019-07-05 16:47:10 -05:00
ecx: &mut InterpCx<'mir, 'tcx, Self>,
kind: mir::RetagKind,
place: &PlaceTy<'tcx, Tag>,
) -> InterpResult<'tcx> {
2021-05-16 04:28:01 -05:00
if ecx.memory.extra.stacked_borrows.is_some() { ecx.retag(kind, place) } else { Ok(()) }
}
#[inline(always)]
2020-04-13 09:08:12 -05:00
fn init_frame_extra(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
frame: Frame<'mir, 'tcx, Tag>,
) -> InterpResult<'tcx, Frame<'mir, 'tcx, Tag, FrameData<'tcx>>> {
2021-05-29 17:09:46 -05:00
// Start recording our event before doing anything else
let timing = if let Some(profiler) = ecx.machine.profiler.as_ref() {
let fn_name = frame.instance.to_string();
let entry = ecx.machine.string_cache.entry(fn_name.clone());
2021-05-29 17:16:12 -05:00
let name = entry.or_insert_with(|| profiler.alloc_string(&*fn_name));
Some(profiler.start_recording_interval_event_detached(
2021-05-29 17:09:46 -05:00
*name,
2021-05-30 10:04:57 -05:00
measureme::EventId::from_label(*name),
2021-05-29 17:09:46 -05:00
ecx.get_active_thread().to_u32(),
))
} else {
None
};
2021-05-29 17:09:46 -05:00
let stacked_borrows = ecx.memory.extra.stacked_borrows.as_ref();
let call_id = stacked_borrows.map_or(NonZeroU64::new(1).unwrap(), |stacked_borrows| {
stacked_borrows.borrow_mut().new_call()
});
let extra = FrameData { call_id, catch_unwind: None, timing };
2020-04-13 09:08:12 -05:00
Ok(frame.with_extra(extra))
}
fn stack<'a>(
2021-05-16 04:28:01 -05:00
ecx: &'a InterpCx<'mir, 'tcx, Self>,
) -> &'a [Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>] {
ecx.active_thread_stack()
}
fn stack_mut<'a>(
2021-05-16 04:28:01 -05:00
ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
) -> &'a mut Vec<Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>> {
ecx.active_thread_stack_mut()
}
2020-04-13 10:31:19 -05:00
#[inline(always)]
fn after_stack_push(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
2021-05-16 04:28:01 -05:00
if ecx.memory.extra.stacked_borrows.is_some() { ecx.retag_return_place() } else { Ok(()) }
2020-04-13 10:31:19 -05:00
}
#[inline(always)]
2020-04-13 09:08:12 -05:00
fn after_stack_pop(
2019-07-05 16:47:10 -05:00
ecx: &mut InterpCx<'mir, 'tcx, Self>,
mut frame: Frame<'mir, 'tcx, Tag, FrameData<'tcx>>,
2019-12-23 05:56:23 -06:00
unwinding: bool,
) -> InterpResult<'tcx, StackPopJump> {
let timing = frame.extra.timing.take();
let res = ecx.handle_stack_pop(frame.extra, unwinding);
if let Some(profiler) = ecx.machine.profiler.as_ref() {
profiler.finish_recording_interval_event(timing.unwrap());
}
res
}
}