adjust for MemoryExtra being merged into Machine
This commit is contained in:
parent
fc2165dd78
commit
6e1f3cd8ff
@ -79,7 +79,6 @@
|
||||
};
|
||||
|
||||
pub type AllocExtra = VClockAlloc;
|
||||
pub type MemoryExtra = GlobalState;
|
||||
|
||||
/// Valid atomic read-write operations, alias of atomic::Ordering (not non-exhaustive).
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
|
||||
@ -596,9 +595,9 @@ fn atomic_compare_exchange_scalar(
|
||||
let eq = this.binary_op(mir::BinOp::Eq, &old, expect_old)?;
|
||||
// If the operation would succeed, but is "weak", fail some portion
|
||||
// of the time, based on `rate`.
|
||||
let rate = this.memory.extra.cmpxchg_weak_failure_rate;
|
||||
let rate = this.machine.cmpxchg_weak_failure_rate;
|
||||
let cmpxchg_success = eq.to_scalar()?.to_bool()?
|
||||
&& (!can_fail_spuriously || this.memory.extra.rng.get_mut().gen::<f64>() < rate);
|
||||
&& (!can_fail_spuriously || this.machine.rng.get_mut().gen::<f64>() < rate);
|
||||
let res = Immediate::ScalarPair(
|
||||
old.to_scalar_or_uninit(),
|
||||
Scalar::from_bool(cmpxchg_success).into(),
|
||||
@ -690,7 +689,7 @@ fn validate_atomic_rmw(
|
||||
/// Update the data-race detector for an atomic fence on the current thread.
|
||||
fn validate_atomic_fence(&mut self, atomic: AtomicFenceOp) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
if let Some(data_race) = &mut this.memory.extra.data_race {
|
||||
if let Some(data_race) = &mut this.machine.data_race {
|
||||
data_race.maybe_perform_sync_operation(move |index, mut clocks| {
|
||||
log::trace!("Atomic fence on {:?} with ordering {:?}", index, atomic);
|
||||
|
||||
@ -725,7 +724,7 @@ pub struct VClockAlloc {
|
||||
impl VClockAlloc {
|
||||
/// Create a new data-race detector for newly allocated memory.
|
||||
pub fn new_allocation(
|
||||
global: &MemoryExtra,
|
||||
global: &GlobalState,
|
||||
len: Size,
|
||||
kind: MemoryKind<MiriMemoryKind>,
|
||||
) -> VClockAlloc {
|
||||
@ -796,7 +795,7 @@ fn find_gt_index(l: &VClock, r: &VClock) -> Option<VectorIdx> {
|
||||
#[cold]
|
||||
#[inline(never)]
|
||||
fn report_data_race<'tcx>(
|
||||
global: &MemoryExtra,
|
||||
global: &GlobalState,
|
||||
range: &MemoryCellClocks,
|
||||
action: &str,
|
||||
is_atomic: bool,
|
||||
@ -950,13 +949,13 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
||||
#[inline]
|
||||
fn allow_data_races_ref<R>(&self, op: impl FnOnce(&MiriEvalContext<'mir, 'tcx>) -> R) -> R {
|
||||
let this = self.eval_context_ref();
|
||||
let old = if let Some(data_race) = &this.memory.extra.data_race {
|
||||
let old = if let Some(data_race) = &this.machine.data_race {
|
||||
data_race.multi_threaded.replace(false)
|
||||
} else {
|
||||
false
|
||||
};
|
||||
let result = op(this);
|
||||
if let Some(data_race) = &this.memory.extra.data_race {
|
||||
if let Some(data_race) = &this.machine.data_race {
|
||||
data_race.multi_threaded.set(old);
|
||||
}
|
||||
result
|
||||
@ -971,13 +970,13 @@ fn allow_data_races_mut<R>(
|
||||
op: impl FnOnce(&mut MiriEvalContext<'mir, 'tcx>) -> R,
|
||||
) -> R {
|
||||
let this = self.eval_context_mut();
|
||||
let old = if let Some(data_race) = &this.memory.extra.data_race {
|
||||
let old = if let Some(data_race) = &this.machine.data_race {
|
||||
data_race.multi_threaded.replace(false)
|
||||
} else {
|
||||
false
|
||||
};
|
||||
let result = op(this);
|
||||
if let Some(data_race) = &this.memory.extra.data_race {
|
||||
if let Some(data_race) = &this.machine.data_race {
|
||||
data_race.multi_threaded.set(old);
|
||||
}
|
||||
result
|
||||
@ -997,14 +996,13 @@ fn validate_atomic_op<A: Debug + Copy>(
|
||||
) -> Result<(), DataRace>,
|
||||
) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_ref();
|
||||
if let Some(data_race) = &this.memory.extra.data_race {
|
||||
if let Some(data_race) = &this.machine.data_race {
|
||||
if data_race.multi_threaded.get() {
|
||||
let size = place.layout.size;
|
||||
let (alloc_id, base_offset, ptr) = this.memory.ptr_get_alloc(place.ptr)?;
|
||||
let (alloc_id, base_offset, ptr) = this.ptr_get_alloc_id(place.ptr)?;
|
||||
// Load and log the atomic operation.
|
||||
// Note that atomic loads are possible even from read-only allocations, so `get_alloc_extra_mut` is not an option.
|
||||
let alloc_meta =
|
||||
&this.memory.get_alloc_extra(alloc_id)?.data_race.as_ref().unwrap();
|
||||
let alloc_meta = &this.get_alloc_extra(alloc_id)?.data_race.as_ref().unwrap();
|
||||
log::trace!(
|
||||
"Atomic op({}) with ordering {:?} on {:?} (size={})",
|
||||
description,
|
||||
|
@ -196,7 +196,7 @@ pub fn report_error<'tcx, 'mir>(
|
||||
Unsupported(_) =>
|
||||
vec![(None, format!("this is likely not a bug in the program; it indicates that the program performed an operation that the interpreter does not support"))],
|
||||
UndefinedBehavior(UndefinedBehaviorInfo::AlignmentCheckFailed { .. })
|
||||
if ecx.memory.extra.check_alignment == AlignmentCheck::Symbolic
|
||||
if ecx.machine.check_alignment == AlignmentCheck::Symbolic
|
||||
=>
|
||||
vec![
|
||||
(None, format!("this usually indicates that your program performed an invalid operation and caused Undefined Behavior")),
|
||||
@ -251,7 +251,7 @@ pub fn report_error<'tcx, 'mir>(
|
||||
access.uninit_offset.bytes(),
|
||||
access.uninit_offset.bytes() + access.uninit_size.bytes(),
|
||||
);
|
||||
eprintln!("{:?}", ecx.memory.dump_alloc(*alloc_id));
|
||||
eprintln!("{:?}", ecx.dump_alloc(*alloc_id));
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
16
src/eval.rs
16
src/eval.rs
@ -153,7 +153,7 @@ pub fn create_ecx<'mir, 'tcx: 'mir>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
entry_id: DefId,
|
||||
entry_type: EntryFnType,
|
||||
config: MiriConfig,
|
||||
config: &MiriConfig,
|
||||
) -> InterpResult<'tcx, (InterpCx<'mir, 'tcx, Evaluator<'mir, 'tcx>>, MPlaceTy<'tcx, Tag>)> {
|
||||
let param_env = ty::ParamEnv::reveal_all();
|
||||
let layout_cx = LayoutCx { tcx, param_env };
|
||||
@ -161,12 +161,10 @@ pub fn create_ecx<'mir, 'tcx: 'mir>(
|
||||
tcx,
|
||||
rustc_span::source_map::DUMMY_SP,
|
||||
param_env,
|
||||
Evaluator::new(&config, layout_cx),
|
||||
MemoryExtra::new(&config),
|
||||
Evaluator::new(config, layout_cx),
|
||||
);
|
||||
// Complete initialization.
|
||||
EnvVars::init(&mut ecx, config.excluded_env_vars, config.forwarded_env_vars)?;
|
||||
MemoryExtra::init_extern_statics(&mut ecx)?;
|
||||
// Some parts of initialization require a full `InterpCx`.
|
||||
Evaluator::late_init(&mut ecx, config)?;
|
||||
|
||||
// Make sure we have MIR. We check MIR for some stable monomorphic function in libcore.
|
||||
let sentinel = ecx.resolve_path(&["core", "ascii", "escape_default"]);
|
||||
@ -260,7 +258,7 @@ pub fn create_ecx<'mir, 'tcx: 'mir>(
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
let main_ptr = ecx.memory.create_fn_alloc(FnVal::Instance(entry_instance));
|
||||
let main_ptr = ecx.create_fn_alloc_ptr(FnVal::Instance(entry_instance));
|
||||
|
||||
ecx.call_function(
|
||||
start_instance,
|
||||
@ -296,7 +294,7 @@ pub fn eval_entry<'tcx>(
|
||||
// Copy setting before we move `config`.
|
||||
let ignore_leaks = config.ignore_leaks;
|
||||
|
||||
let (mut ecx, ret_place) = match create_ecx(tcx, entry_id, entry_type, config) {
|
||||
let (mut ecx, ret_place) = match create_ecx(tcx, entry_id, entry_type, &config) {
|
||||
Ok(v) => v,
|
||||
Err(err) => {
|
||||
err.print_backtrace();
|
||||
@ -354,7 +352,7 @@ pub fn eval_entry<'tcx>(
|
||||
}
|
||||
// Check for memory leaks.
|
||||
info!("Additonal static roots: {:?}", ecx.machine.static_roots);
|
||||
let leaks = ecx.memory.leak_report(&ecx.machine.static_roots);
|
||||
let leaks = ecx.leak_report(&ecx.machine.static_roots);
|
||||
if leaks != 0 {
|
||||
tcx.sess.err("the evaluated program leaked memory");
|
||||
tcx.sess.note_without_error("pass `-Zmiri-ignore-leaks` to disable this check");
|
||||
|
@ -199,11 +199,11 @@ fn gen_random(&mut self, ptr: Pointer<Option<Tag>>, len: u64) -> InterpResult<'t
|
||||
getrandom::getrandom(&mut data)
|
||||
.map_err(|err| err_unsup_format!("host getrandom failed: {}", err))?;
|
||||
} else {
|
||||
let rng = this.memory.extra.rng.get_mut();
|
||||
let rng = this.machine.rng.get_mut();
|
||||
rng.fill_bytes(&mut data);
|
||||
}
|
||||
|
||||
this.memory.write_bytes(ptr, data.iter().copied())
|
||||
this.write_bytes_ptr(ptr, data.iter().copied())
|
||||
}
|
||||
|
||||
/// Call a function: Push the stack frame and pass the arguments.
|
||||
@ -645,7 +645,8 @@ fn read_c_str<'a>(&'a self, ptr: Pointer<Option<Tag>>) -> InterpResult<'tcx, &'a
|
||||
loop {
|
||||
// FIXME: We are re-getting the allocation each time around the loop.
|
||||
// Would be nice if we could somehow "extend" an existing AllocRange.
|
||||
let alloc = this.memory.get(ptr.offset(len, this)?.into(), size1, Align::ONE)?.unwrap(); // not a ZST, so we will get a result
|
||||
let alloc =
|
||||
this.get_ptr_alloc(ptr.offset(len, this)?.into(), size1, Align::ONE)?.unwrap(); // not a ZST, so we will get a result
|
||||
let byte = alloc.read_scalar(alloc_range(Size::ZERO, size1))?.to_u8()?;
|
||||
if byte == 0 {
|
||||
break;
|
||||
@ -655,7 +656,7 @@ fn read_c_str<'a>(&'a self, ptr: Pointer<Option<Tag>>) -> InterpResult<'tcx, &'a
|
||||
}
|
||||
|
||||
// Step 2: get the bytes.
|
||||
this.memory.read_bytes(ptr.into(), len)
|
||||
this.read_bytes_ptr(ptr.into(), len)
|
||||
}
|
||||
|
||||
fn read_wide_str(&self, mut ptr: Pointer<Option<Tag>>) -> InterpResult<'tcx, Vec<u16>> {
|
||||
@ -667,7 +668,7 @@ fn read_wide_str(&self, mut ptr: Pointer<Option<Tag>>) -> InterpResult<'tcx, Vec
|
||||
loop {
|
||||
// FIXME: We are re-getting the allocation each time around the loop.
|
||||
// Would be nice if we could somehow "extend" an existing AllocRange.
|
||||
let alloc = this.memory.get(ptr.into(), size2, align2)?.unwrap(); // not a ZST, so we will get a result
|
||||
let alloc = this.get_ptr_alloc(ptr.into(), size2, align2)?.unwrap(); // not a ZST, so we will get a result
|
||||
let wchar = alloc.read_scalar(alloc_range(Size::ZERO, size2))?.to_u16()?;
|
||||
if wchar == 0 {
|
||||
break;
|
||||
@ -750,8 +751,7 @@ fn check_shim<'a, const N: usize>(
|
||||
/// Mark a machine allocation that was just created as immutable.
|
||||
fn mark_immutable(&mut self, mplace: &MemPlace<Tag>) {
|
||||
let this = self.eval_context_mut();
|
||||
this.memory
|
||||
.mark_immutable(mplace.ptr.into_pointer_or_addr().unwrap().provenance.alloc_id)
|
||||
this.alloc_mark_immutable(mplace.ptr.into_pointer_or_addr().unwrap().provenance.alloc_id)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
@ -9,10 +9,10 @@
|
||||
|
||||
use crate::*;
|
||||
|
||||
pub type MemoryExtra = RefCell<GlobalState>;
|
||||
pub type GlobalState = RefCell<GlobalStateInner>;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct GlobalState {
|
||||
pub struct GlobalStateInner {
|
||||
/// This is used as a map between the address of each allocation and its `AllocId`.
|
||||
/// It is always sorted
|
||||
int_to_ptr_map: Vec<(u64, AllocId)>,
|
||||
@ -29,9 +29,9 @@ pub struct GlobalState {
|
||||
strict_provenance: bool,
|
||||
}
|
||||
|
||||
impl GlobalState {
|
||||
impl GlobalStateInner {
|
||||
pub fn new(config: &MiriConfig) -> Self {
|
||||
GlobalState {
|
||||
GlobalStateInner {
|
||||
int_to_ptr_map: Vec::default(),
|
||||
base_addr: FxHashMap::default(),
|
||||
next_base_addr: STACK_ADDR,
|
||||
@ -40,13 +40,10 @@ pub fn new(config: &MiriConfig) -> Self {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx> GlobalState {
|
||||
pub fn ptr_from_addr(
|
||||
addr: u64,
|
||||
memory: &Memory<'mir, 'tcx, Evaluator<'mir, 'tcx>>,
|
||||
) -> Pointer<Option<Tag>> {
|
||||
impl<'mir, 'tcx> GlobalStateInner {
|
||||
pub fn ptr_from_addr(addr: u64, ecx: &MiriEvalContext<'mir, 'tcx>) -> Pointer<Option<Tag>> {
|
||||
trace!("Casting 0x{:x} to a pointer", addr);
|
||||
let global_state = memory.extra.intptrcast.borrow();
|
||||
let global_state = ecx.machine.intptrcast.borrow();
|
||||
|
||||
if global_state.strict_provenance {
|
||||
return Pointer::new(None, Size::from_bytes(addr));
|
||||
@ -64,7 +61,11 @@ pub fn ptr_from_addr(
|
||||
let offset = addr - glb;
|
||||
// If the offset exceeds the size of the allocation, don't use this `alloc_id`.
|
||||
if offset
|
||||
<= memory.get_size_and_align(alloc_id, AllocCheck::MaybeDead).unwrap().0.bytes()
|
||||
<= ecx
|
||||
.get_alloc_size_and_align(alloc_id, AllocCheck::MaybeDead)
|
||||
.unwrap()
|
||||
.0
|
||||
.bytes()
|
||||
{
|
||||
Some(alloc_id)
|
||||
} else {
|
||||
@ -79,11 +80,8 @@ pub fn ptr_from_addr(
|
||||
)
|
||||
}
|
||||
|
||||
fn alloc_base_addr(
|
||||
memory: &Memory<'mir, 'tcx, Evaluator<'mir, 'tcx>>,
|
||||
alloc_id: AllocId,
|
||||
) -> u64 {
|
||||
let mut global_state = memory.extra.intptrcast.borrow_mut();
|
||||
fn alloc_base_addr(ecx: &MiriEvalContext<'mir, 'tcx>, alloc_id: AllocId) -> u64 {
|
||||
let mut global_state = ecx.machine.intptrcast.borrow_mut();
|
||||
let global_state = &mut *global_state;
|
||||
|
||||
match global_state.base_addr.entry(alloc_id) {
|
||||
@ -92,12 +90,12 @@ fn alloc_base_addr(
|
||||
// There is nothing wrong with a raw pointer being cast to an integer only after
|
||||
// it became dangling. Hence `MaybeDead`.
|
||||
let (size, align) =
|
||||
memory.get_size_and_align(alloc_id, AllocCheck::MaybeDead).unwrap();
|
||||
ecx.get_alloc_size_and_align(alloc_id, AllocCheck::MaybeDead).unwrap();
|
||||
|
||||
// This allocation does not have a base address yet, pick one.
|
||||
// Leave some space to the previous allocation, to give it some chance to be less aligned.
|
||||
let slack = {
|
||||
let mut rng = memory.extra.rng.borrow_mut();
|
||||
let mut rng = ecx.machine.rng.borrow_mut();
|
||||
// This means that `(global_state.next_base_addr + slack) % 16` is uniformly distributed.
|
||||
rng.gen_range(0..16)
|
||||
};
|
||||
@ -129,27 +127,21 @@ fn alloc_base_addr(
|
||||
}
|
||||
|
||||
/// Convert a relative (tcx) pointer to an absolute address.
|
||||
pub fn rel_ptr_to_addr(
|
||||
memory: &Memory<'mir, 'tcx, Evaluator<'mir, 'tcx>>,
|
||||
ptr: Pointer<AllocId>,
|
||||
) -> u64 {
|
||||
pub fn rel_ptr_to_addr(ecx: &MiriEvalContext<'mir, 'tcx>, ptr: Pointer<AllocId>) -> u64 {
|
||||
let (alloc_id, offset) = ptr.into_parts(); // offset is relative
|
||||
let base_addr = GlobalState::alloc_base_addr(memory, alloc_id);
|
||||
let base_addr = GlobalStateInner::alloc_base_addr(ecx, alloc_id);
|
||||
|
||||
// Add offset with the right kind of pointer-overflowing arithmetic.
|
||||
let dl = memory.data_layout();
|
||||
let dl = ecx.data_layout();
|
||||
dl.overflowing_offset(base_addr, offset.bytes()).0
|
||||
}
|
||||
|
||||
pub fn abs_ptr_to_rel(
|
||||
memory: &Memory<'mir, 'tcx, Evaluator<'mir, 'tcx>>,
|
||||
ptr: Pointer<Tag>,
|
||||
) -> Size {
|
||||
pub fn abs_ptr_to_rel(ecx: &MiriEvalContext<'mir, 'tcx>, ptr: Pointer<Tag>) -> Size {
|
||||
let (tag, addr) = ptr.into_parts(); // addr is absolute
|
||||
let base_addr = GlobalState::alloc_base_addr(memory, tag.alloc_id);
|
||||
let base_addr = GlobalStateInner::alloc_base_addr(ecx, tag.alloc_id);
|
||||
|
||||
// Wrapping "addr - base_addr"
|
||||
let dl = memory.data_layout();
|
||||
let dl = ecx.data_layout();
|
||||
let neg_base_addr = (base_addr as i64).wrapping_neg();
|
||||
Size::from_bytes(dl.overflowing_signed_offset(addr.bytes(), neg_base_addr).0)
|
||||
}
|
||||
@ -170,7 +162,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_align_addr() {
|
||||
assert_eq!(GlobalState::align_addr(37, 4), 40);
|
||||
assert_eq!(GlobalState::align_addr(44, 4), 44);
|
||||
assert_eq!(GlobalStateInner::align_addr(37, 4), 40);
|
||||
assert_eq!(GlobalStateInner::align_addr(44, 4), 44);
|
||||
}
|
||||
}
|
||||
|
@ -66,8 +66,8 @@
|
||||
};
|
||||
pub use crate::helpers::EvalContextExt as HelpersEvalContextExt;
|
||||
pub use crate::machine::{
|
||||
AllocExtra, Evaluator, FrameData, MemoryExtra, MiriEvalContext, MiriEvalContextExt,
|
||||
MiriMemoryKind, Tag, NUM_CPUS, PAGE_SIZE, STACK_ADDR, STACK_SIZE,
|
||||
AllocExtra, Evaluator, FrameData, MiriEvalContext, MiriEvalContextExt, MiriMemoryKind, Tag,
|
||||
NUM_CPUS, PAGE_SIZE, STACK_ADDR, STACK_SIZE,
|
||||
};
|
||||
pub use crate::mono_hash_map::MonoHashMap;
|
||||
pub use crate::operator::EvalContextExt as OperatorEvalContextExt;
|
||||
|
284
src/machine.rs
284
src/machine.rs
@ -163,103 +163,6 @@ pub struct AllocExtra {
|
||||
pub data_race: Option<data_race::AllocExtra>,
|
||||
}
|
||||
|
||||
/// Extra global memory data
|
||||
#[derive(Debug)]
|
||||
pub struct MemoryExtra {
|
||||
pub stacked_borrows: Option<stacked_borrows::MemoryExtra>,
|
||||
pub data_race: Option<data_race::MemoryExtra>,
|
||||
pub intptrcast: intptrcast::MemoryExtra,
|
||||
|
||||
/// Mapping extern static names to their base pointer.
|
||||
extern_statics: FxHashMap<Symbol, Pointer<Tag>>,
|
||||
|
||||
/// The random number generator used for resolving non-determinism.
|
||||
/// Needs to be queried by ptr_to_int, hence needs interior mutability.
|
||||
pub(crate) rng: RefCell<StdRng>,
|
||||
|
||||
/// An allocation ID to report when it is being allocated
|
||||
/// (helps for debugging memory leaks and use after free bugs).
|
||||
tracked_alloc_id: Option<AllocId>,
|
||||
|
||||
/// Controls whether alignment of memory accesses is being checked.
|
||||
pub(crate) check_alignment: AlignmentCheck,
|
||||
|
||||
/// Failure rate of compare_exchange_weak, between 0.0 and 1.0
|
||||
pub(crate) cmpxchg_weak_failure_rate: f64,
|
||||
}
|
||||
|
||||
impl MemoryExtra {
|
||||
pub fn new(config: &MiriConfig) -> Self {
|
||||
let rng = StdRng::seed_from_u64(config.seed.unwrap_or(0));
|
||||
let stacked_borrows = if config.stacked_borrows {
|
||||
Some(RefCell::new(stacked_borrows::GlobalState::new(
|
||||
config.tracked_pointer_tag,
|
||||
config.tracked_call_id,
|
||||
config.tag_raw,
|
||||
)))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let data_race =
|
||||
if config.data_race_detector { Some(data_race::GlobalState::new()) } else { None };
|
||||
MemoryExtra {
|
||||
stacked_borrows,
|
||||
data_race,
|
||||
intptrcast: RefCell::new(intptrcast::GlobalState::new(config)),
|
||||
extern_statics: FxHashMap::default(),
|
||||
rng: RefCell::new(rng),
|
||||
tracked_alloc_id: config.tracked_alloc_id,
|
||||
check_alignment: config.check_alignment,
|
||||
cmpxchg_weak_failure_rate: config.cmpxchg_weak_failure_rate,
|
||||
}
|
||||
}
|
||||
|
||||
fn add_extern_static<'tcx, 'mir>(
|
||||
this: &mut MiriEvalContext<'mir, 'tcx>,
|
||||
name: &str,
|
||||
ptr: Pointer<Option<Tag>>,
|
||||
) {
|
||||
let ptr = ptr.into_pointer_or_addr().unwrap();
|
||||
this.memory.extra.extern_statics.try_insert(Symbol::intern(name), ptr).unwrap();
|
||||
}
|
||||
|
||||
/// Sets up the "extern statics" for this machine.
|
||||
pub fn init_extern_statics<'tcx, 'mir>(
|
||||
this: &mut MiriEvalContext<'mir, 'tcx>,
|
||||
) -> InterpResult<'tcx> {
|
||||
match this.tcx.sess.target.os.as_ref() {
|
||||
"linux" => {
|
||||
// "environ"
|
||||
Self::add_extern_static(
|
||||
this,
|
||||
"environ",
|
||||
this.machine.env_vars.environ.unwrap().ptr,
|
||||
);
|
||||
// A couple zero-initialized pointer-sized extern statics.
|
||||
// Most of them are for weak symbols, which we all set to null (indicating that the
|
||||
// symbol is not supported, and triggering fallback code which ends up calling a
|
||||
// syscall that we do support).
|
||||
for name in &["__cxa_thread_atexit_impl", "getrandom", "statx"] {
|
||||
let layout = this.machine.layouts.usize;
|
||||
let place = this.allocate(layout, MiriMemoryKind::ExternStatic.into())?;
|
||||
this.write_scalar(Scalar::from_machine_usize(0, this), &place.into())?;
|
||||
Self::add_extern_static(this, name, place.ptr);
|
||||
}
|
||||
}
|
||||
"windows" => {
|
||||
// "_tls_used"
|
||||
// This is some obscure hack that is part of the Windows TLS story. It's a `u8`.
|
||||
let layout = this.machine.layouts.u8;
|
||||
let place = this.allocate(layout, MiriMemoryKind::ExternStatic.into())?;
|
||||
this.write_scalar(Scalar::from_u8(0), &place.into())?;
|
||||
Self::add_extern_static(this, "_tls_used", place.ptr);
|
||||
}
|
||||
_ => {} // No "extern statics" supported on this target
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Precomputed layouts of primitive types
|
||||
pub struct PrimitiveLayouts<'tcx> {
|
||||
pub unit: TyAndLayout<'tcx>,
|
||||
@ -293,6 +196,10 @@ fn new(layout_cx: LayoutCx<'tcx, TyCtxt<'tcx>>) -> Result<Self, LayoutError<'tcx
|
||||
|
||||
/// The machine itself.
|
||||
pub struct Evaluator<'mir, 'tcx> {
|
||||
pub stacked_borrows: Option<stacked_borrows::GlobalState>,
|
||||
pub data_race: Option<data_race::GlobalState>,
|
||||
pub intptrcast: intptrcast::GlobalState,
|
||||
|
||||
/// Environment variables set by `setenv`.
|
||||
/// Miri does not expose env vars from the host to the emulated program.
|
||||
pub(crate) env_vars: EnvVars<'tcx>,
|
||||
@ -357,6 +264,23 @@ pub struct Evaluator<'mir, 'tcx> {
|
||||
|
||||
/// Crates which are considered local for the purposes of error reporting.
|
||||
pub(crate) local_crates: Vec<CrateNum>,
|
||||
|
||||
/// Mapping extern static names to their base pointer.
|
||||
extern_statics: FxHashMap<Symbol, Pointer<Tag>>,
|
||||
|
||||
/// The random number generator used for resolving non-determinism.
|
||||
/// Needs to be queried by ptr_to_int, hence needs interior mutability.
|
||||
pub(crate) rng: RefCell<StdRng>,
|
||||
|
||||
/// An allocation ID to report when it is being allocated
|
||||
/// (helps for debugging memory leaks and use after free bugs).
|
||||
tracked_alloc_id: Option<AllocId>,
|
||||
|
||||
/// Controls whether alignment of memory accesses is being checked.
|
||||
pub(crate) check_alignment: AlignmentCheck,
|
||||
|
||||
/// Failure rate of compare_exchange_weak, between 0.0 and 1.0
|
||||
pub(crate) cmpxchg_weak_failure_rate: f64,
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx> Evaluator<'mir, 'tcx> {
|
||||
@ -367,9 +291,23 @@ pub(crate) fn new(config: &MiriConfig, layout_cx: LayoutCx<'tcx, TyCtxt<'tcx>>)
|
||||
let profiler = config.measureme_out.as_ref().map(|out| {
|
||||
measureme::Profiler::new(out).expect("Couldn't create `measureme` profiler")
|
||||
});
|
||||
let rng = StdRng::seed_from_u64(config.seed.unwrap_or(0));
|
||||
let stacked_borrows = if config.stacked_borrows {
|
||||
Some(RefCell::new(stacked_borrows::GlobalStateInner::new(
|
||||
config.tracked_pointer_tag,
|
||||
config.tracked_call_id,
|
||||
config.tag_raw,
|
||||
)))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let data_race =
|
||||
if config.data_race_detector { Some(data_race::GlobalState::new()) } else { None };
|
||||
Evaluator {
|
||||
// `env_vars` could be initialized properly here if `Memory` were available before
|
||||
// calling this method.
|
||||
stacked_borrows,
|
||||
data_race,
|
||||
intptrcast: RefCell::new(intptrcast::GlobalStateInner::new(config)),
|
||||
// `env_vars` depends on a full interpreter so we cannot properly initialize it yet.
|
||||
env_vars: EnvVars::default(),
|
||||
argc: None,
|
||||
argv: None,
|
||||
@ -391,9 +329,66 @@ pub(crate) fn new(config: &MiriConfig, layout_cx: LayoutCx<'tcx, TyCtxt<'tcx>>)
|
||||
panic_on_unsupported: config.panic_on_unsupported,
|
||||
backtrace_style: config.backtrace_style,
|
||||
local_crates,
|
||||
extern_statics: FxHashMap::default(),
|
||||
rng: RefCell::new(rng),
|
||||
tracked_alloc_id: config.tracked_alloc_id,
|
||||
check_alignment: config.check_alignment,
|
||||
cmpxchg_weak_failure_rate: config.cmpxchg_weak_failure_rate,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn late_init(
|
||||
this: &mut MiriEvalContext<'mir, 'tcx>,
|
||||
config: &MiriConfig,
|
||||
) -> InterpResult<'tcx> {
|
||||
EnvVars::init(this, config)?;
|
||||
Evaluator::init_extern_statics(this)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn add_extern_static(
|
||||
this: &mut MiriEvalContext<'mir, 'tcx>,
|
||||
name: &str,
|
||||
ptr: Pointer<Option<Tag>>,
|
||||
) {
|
||||
let ptr = ptr.into_pointer_or_addr().unwrap();
|
||||
this.machine.extern_statics.try_insert(Symbol::intern(name), ptr).unwrap();
|
||||
}
|
||||
|
||||
/// Sets up the "extern statics" for this machine.
|
||||
fn init_extern_statics(this: &mut MiriEvalContext<'mir, 'tcx>) -> InterpResult<'tcx> {
|
||||
match this.tcx.sess.target.os.as_ref() {
|
||||
"linux" => {
|
||||
// "environ"
|
||||
Self::add_extern_static(
|
||||
this,
|
||||
"environ",
|
||||
this.machine.env_vars.environ.unwrap().ptr,
|
||||
);
|
||||
// A couple zero-initialized pointer-sized extern statics.
|
||||
// Most of them are for weak symbols, which we all set to null (indicating that the
|
||||
// symbol is not supported, and triggering fallback code which ends up calling a
|
||||
// syscall that we do support).
|
||||
for name in &["__cxa_thread_atexit_impl", "getrandom", "statx"] {
|
||||
let layout = this.machine.layouts.usize;
|
||||
let place = this.allocate(layout, MiriMemoryKind::ExternStatic.into())?;
|
||||
this.write_scalar(Scalar::from_machine_usize(0, this), &place.into())?;
|
||||
Self::add_extern_static(this, name, place.ptr);
|
||||
}
|
||||
}
|
||||
"windows" => {
|
||||
// "_tls_used"
|
||||
// This is some obscure hack that is part of the Windows TLS story. It's a `u8`.
|
||||
let layout = this.machine.layouts.u8;
|
||||
let place = this.allocate(layout, MiriMemoryKind::ExternStatic.into())?;
|
||||
this.write_scalar(Scalar::from_u8(0), &place.into())?;
|
||||
Self::add_extern_static(this, "_tls_used", place.ptr);
|
||||
}
|
||||
_ => {} // No "extern statics" supported on this target
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn communicate(&self) -> bool {
|
||||
self.isolated_op == IsolatedOp::Allow
|
||||
}
|
||||
@ -429,7 +424,6 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
|
||||
type MemoryKind = MiriMemoryKind;
|
||||
|
||||
type FrameExtra = FrameData<'tcx>;
|
||||
type MemoryExtra = MemoryExtra;
|
||||
type AllocExtra = AllocExtra;
|
||||
type PointerTag = Tag;
|
||||
type ExtraFnVal = Dlsym;
|
||||
@ -442,33 +436,33 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
|
||||
const PANIC_ON_ALLOC_FAIL: bool = false;
|
||||
|
||||
#[inline(always)]
|
||||
fn enforce_alignment(memory_extra: &MemoryExtra) -> bool {
|
||||
memory_extra.check_alignment != AlignmentCheck::None
|
||||
fn enforce_alignment(ecx: &MiriEvalContext<'mir, 'tcx>) -> bool {
|
||||
ecx.machine.check_alignment != AlignmentCheck::None
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn force_int_for_alignment_check(memory_extra: &Self::MemoryExtra) -> bool {
|
||||
memory_extra.check_alignment == AlignmentCheck::Int
|
||||
fn force_int_for_alignment_check(ecx: &MiriEvalContext<'mir, 'tcx>) -> bool {
|
||||
ecx.machine.check_alignment == AlignmentCheck::Int
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
|
||||
fn enforce_validity(ecx: &MiriEvalContext<'mir, 'tcx>) -> bool {
|
||||
ecx.machine.validate
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn enforce_number_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
|
||||
fn enforce_number_validity(ecx: &MiriEvalContext<'mir, 'tcx>) -> bool {
|
||||
ecx.machine.enforce_number_validity
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn enforce_abi(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
|
||||
fn enforce_abi(ecx: &MiriEvalContext<'mir, 'tcx>) -> bool {
|
||||
ecx.machine.enforce_abi
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn find_mir_or_eval_fn(
|
||||
ecx: &mut InterpCx<'mir, 'tcx, Self>,
|
||||
ecx: &mut MiriEvalContext<'mir, 'tcx>,
|
||||
instance: ty::Instance<'tcx>,
|
||||
abi: Abi,
|
||||
args: &[OpTy<'tcx, Tag>],
|
||||
@ -480,7 +474,7 @@ fn find_mir_or_eval_fn(
|
||||
|
||||
#[inline(always)]
|
||||
fn call_extra_fn(
|
||||
ecx: &mut InterpCx<'mir, 'tcx, Self>,
|
||||
ecx: &mut MiriEvalContext<'mir, 'tcx>,
|
||||
fn_val: Dlsym,
|
||||
abi: Abi,
|
||||
args: &[OpTy<'tcx, Tag>],
|
||||
@ -492,7 +486,7 @@ fn call_extra_fn(
|
||||
|
||||
#[inline(always)]
|
||||
fn call_intrinsic(
|
||||
ecx: &mut rustc_const_eval::interpret::InterpCx<'mir, 'tcx, Self>,
|
||||
ecx: &mut MiriEvalContext<'mir, 'tcx>,
|
||||
instance: ty::Instance<'tcx>,
|
||||
args: &[OpTy<'tcx, Tag>],
|
||||
ret: Option<(&PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
|
||||
@ -503,7 +497,7 @@ fn call_intrinsic(
|
||||
|
||||
#[inline(always)]
|
||||
fn assert_panic(
|
||||
ecx: &mut InterpCx<'mir, 'tcx, Self>,
|
||||
ecx: &mut MiriEvalContext<'mir, 'tcx>,
|
||||
msg: &mir::AssertMessage<'tcx>,
|
||||
unwind: Option<mir::BasicBlock>,
|
||||
) -> InterpResult<'tcx> {
|
||||
@ -511,13 +505,13 @@ fn assert_panic(
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn abort(_ecx: &mut InterpCx<'mir, 'tcx, Self>, msg: String) -> InterpResult<'tcx, !> {
|
||||
fn abort(_ecx: &mut MiriEvalContext<'mir, 'tcx>, msg: String) -> InterpResult<'tcx, !> {
|
||||
throw_machine_stop!(TerminationInfo::Abort(msg))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn binary_ptr_op(
|
||||
ecx: &rustc_const_eval::interpret::InterpCx<'mir, 'tcx, Self>,
|
||||
ecx: &MiriEvalContext<'mir, 'tcx>,
|
||||
bin_op: mir::BinOp,
|
||||
left: &ImmTy<'tcx, Tag>,
|
||||
right: &ImmTy<'tcx, Tag>,
|
||||
@ -526,22 +520,22 @@ fn binary_ptr_op(
|
||||
}
|
||||
|
||||
fn thread_local_static_base_pointer(
|
||||
ecx: &mut InterpCx<'mir, 'tcx, Self>,
|
||||
ecx: &mut MiriEvalContext<'mir, 'tcx>,
|
||||
def_id: DefId,
|
||||
) -> InterpResult<'tcx, Pointer<Tag>> {
|
||||
ecx.get_or_create_thread_local_alloc(def_id)
|
||||
}
|
||||
|
||||
fn extern_static_base_pointer(
|
||||
memory: &Memory<'mir, 'tcx, Self>,
|
||||
ecx: &MiriEvalContext<'mir, 'tcx>,
|
||||
def_id: DefId,
|
||||
) -> InterpResult<'tcx, Pointer<Tag>> {
|
||||
let attrs = memory.tcx.get_attrs(def_id);
|
||||
let link_name = match memory.tcx.sess.first_attr_value_str_by_name(&attrs, sym::link_name) {
|
||||
let attrs = ecx.tcx.get_attrs(def_id);
|
||||
let link_name = match ecx.tcx.sess.first_attr_value_str_by_name(&attrs, sym::link_name) {
|
||||
Some(name) => name,
|
||||
None => memory.tcx.item_name(def_id),
|
||||
None => ecx.tcx.item_name(def_id),
|
||||
};
|
||||
if let Some(&ptr) = memory.extra.extern_statics.get(&link_name) {
|
||||
if let Some(&ptr) = ecx.machine.extern_statics.get(&link_name) {
|
||||
Ok(ptr)
|
||||
} else {
|
||||
throw_unsup_format!("`extern` static {:?} is not supported by Miri", def_id)
|
||||
@ -549,41 +543,41 @@ fn extern_static_base_pointer(
|
||||
}
|
||||
|
||||
fn init_allocation_extra<'b>(
|
||||
mem: &Memory<'mir, 'tcx, Self>,
|
||||
ecx: &MiriEvalContext<'mir, 'tcx>,
|
||||
id: AllocId,
|
||||
alloc: Cow<'b, Allocation>,
|
||||
kind: Option<MemoryKind<Self::MemoryKind>>,
|
||||
) -> Cow<'b, Allocation<Self::PointerTag, Self::AllocExtra>> {
|
||||
if Some(id) == mem.extra.tracked_alloc_id {
|
||||
if Some(id) == ecx.machine.tracked_alloc_id {
|
||||
register_diagnostic(NonHaltingDiagnostic::CreatedAlloc(id));
|
||||
}
|
||||
|
||||
let kind = kind.expect("we set our STATIC_KIND so this cannot be None");
|
||||
let alloc = alloc.into_owned();
|
||||
let stacks = if let Some(stacked_borrows) = &mem.extra.stacked_borrows {
|
||||
let stacks = if let Some(stacked_borrows) = &ecx.machine.stacked_borrows {
|
||||
Some(Stacks::new_allocation(id, alloc.size(), stacked_borrows, kind))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let race_alloc = if let Some(data_race) = &mem.extra.data_race {
|
||||
let race_alloc = if let Some(data_race) = &ecx.machine.data_race {
|
||||
Some(data_race::AllocExtra::new_allocation(&data_race, alloc.size(), kind))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let alloc: Allocation<Tag, Self::AllocExtra> = alloc.convert_tag_add_extra(
|
||||
&mem.tcx,
|
||||
&ecx.tcx,
|
||||
AllocExtra { stacked_borrows: stacks, data_race: race_alloc },
|
||||
|ptr| Evaluator::tag_alloc_base_pointer(mem, ptr),
|
||||
|ptr| Evaluator::tag_alloc_base_pointer(ecx, ptr),
|
||||
);
|
||||
Cow::Owned(alloc)
|
||||
}
|
||||
|
||||
fn tag_alloc_base_pointer(
|
||||
mem: &Memory<'mir, 'tcx, Self>,
|
||||
ecx: &MiriEvalContext<'mir, 'tcx>,
|
||||
ptr: Pointer<AllocId>,
|
||||
) -> Pointer<Tag> {
|
||||
let absolute_addr = intptrcast::GlobalState::rel_ptr_to_addr(&mem, ptr);
|
||||
let sb_tag = if let Some(stacked_borrows) = &mem.extra.stacked_borrows {
|
||||
let absolute_addr = intptrcast::GlobalStateInner::rel_ptr_to_addr(ecx, ptr);
|
||||
let sb_tag = if let Some(stacked_borrows) = &ecx.machine.stacked_borrows {
|
||||
stacked_borrows.borrow_mut().base_tag(ptr.provenance)
|
||||
} else {
|
||||
SbTag::Untagged
|
||||
@ -593,38 +587,38 @@ fn tag_alloc_base_pointer(
|
||||
|
||||
#[inline(always)]
|
||||
fn ptr_from_addr(
|
||||
mem: &Memory<'mir, 'tcx, Self>,
|
||||
ecx: &MiriEvalContext<'mir, 'tcx>,
|
||||
addr: u64,
|
||||
) -> Pointer<Option<Self::PointerTag>> {
|
||||
intptrcast::GlobalState::ptr_from_addr(addr, mem)
|
||||
intptrcast::GlobalStateInner::ptr_from_addr(addr, ecx)
|
||||
}
|
||||
|
||||
/// Convert a pointer with provenance into an allocation-offset pair,
|
||||
/// or a `None` with an absolute address if that conversion is not possible.
|
||||
fn ptr_get_alloc(
|
||||
mem: &Memory<'mir, 'tcx, Self>,
|
||||
ecx: &MiriEvalContext<'mir, 'tcx>,
|
||||
ptr: Pointer<Self::PointerTag>,
|
||||
) -> (AllocId, Size) {
|
||||
let rel = intptrcast::GlobalState::abs_ptr_to_rel(mem, ptr);
|
||||
let rel = intptrcast::GlobalStateInner::abs_ptr_to_rel(ecx, ptr);
|
||||
(ptr.provenance.alloc_id, rel)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn memory_read(
|
||||
memory_extra: &Self::MemoryExtra,
|
||||
machine: &Self,
|
||||
alloc_extra: &AllocExtra,
|
||||
tag: Tag,
|
||||
range: AllocRange,
|
||||
) -> InterpResult<'tcx> {
|
||||
if let Some(data_race) = &alloc_extra.data_race {
|
||||
data_race.read(tag.alloc_id, range, memory_extra.data_race.as_ref().unwrap())?;
|
||||
data_race.read(tag.alloc_id, range, machine.data_race.as_ref().unwrap())?;
|
||||
}
|
||||
if let Some(stacked_borrows) = &alloc_extra.stacked_borrows {
|
||||
stacked_borrows.memory_read(
|
||||
tag.alloc_id,
|
||||
tag.sb,
|
||||
range,
|
||||
memory_extra.stacked_borrows.as_ref().unwrap(),
|
||||
machine.stacked_borrows.as_ref().unwrap(),
|
||||
)
|
||||
} else {
|
||||
Ok(())
|
||||
@ -633,20 +627,20 @@ fn memory_read(
|
||||
|
||||
#[inline(always)]
|
||||
fn memory_written(
|
||||
memory_extra: &mut Self::MemoryExtra,
|
||||
machine: &mut Self,
|
||||
alloc_extra: &mut AllocExtra,
|
||||
tag: Tag,
|
||||
range: AllocRange,
|
||||
) -> InterpResult<'tcx> {
|
||||
if let Some(data_race) = &mut alloc_extra.data_race {
|
||||
data_race.write(tag.alloc_id, range, memory_extra.data_race.as_mut().unwrap())?;
|
||||
data_race.write(tag.alloc_id, range, machine.data_race.as_mut().unwrap())?;
|
||||
}
|
||||
if let Some(stacked_borrows) = &mut alloc_extra.stacked_borrows {
|
||||
stacked_borrows.memory_written(
|
||||
tag.alloc_id,
|
||||
tag.sb,
|
||||
range,
|
||||
memory_extra.stacked_borrows.as_mut().unwrap(),
|
||||
machine.stacked_borrows.as_mut().unwrap(),
|
||||
)
|
||||
} else {
|
||||
Ok(())
|
||||
@ -655,23 +649,23 @@ fn memory_written(
|
||||
|
||||
#[inline(always)]
|
||||
fn memory_deallocated(
|
||||
memory_extra: &mut Self::MemoryExtra,
|
||||
machine: &mut Self,
|
||||
alloc_extra: &mut AllocExtra,
|
||||
tag: Tag,
|
||||
range: AllocRange,
|
||||
) -> InterpResult<'tcx> {
|
||||
if Some(tag.alloc_id) == memory_extra.tracked_alloc_id {
|
||||
if Some(tag.alloc_id) == machine.tracked_alloc_id {
|
||||
register_diagnostic(NonHaltingDiagnostic::FreedAlloc(tag.alloc_id));
|
||||
}
|
||||
if let Some(data_race) = &mut alloc_extra.data_race {
|
||||
data_race.deallocate(tag.alloc_id, range, memory_extra.data_race.as_mut().unwrap())?;
|
||||
data_race.deallocate(tag.alloc_id, range, machine.data_race.as_mut().unwrap())?;
|
||||
}
|
||||
if let Some(stacked_borrows) = &mut alloc_extra.stacked_borrows {
|
||||
stacked_borrows.memory_deallocated(
|
||||
tag.alloc_id,
|
||||
tag.sb,
|
||||
range,
|
||||
memory_extra.stacked_borrows.as_mut().unwrap(),
|
||||
machine.stacked_borrows.as_mut().unwrap(),
|
||||
)
|
||||
} else {
|
||||
Ok(())
|
||||
@ -684,7 +678,7 @@ fn retag(
|
||||
kind: mir::RetagKind,
|
||||
place: &PlaceTy<'tcx, Tag>,
|
||||
) -> InterpResult<'tcx> {
|
||||
if ecx.memory.extra.stacked_borrows.is_some() { ecx.retag(kind, place) } else { Ok(()) }
|
||||
if ecx.machine.stacked_borrows.is_some() { ecx.retag(kind, place) } else { Ok(()) }
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
@ -707,7 +701,7 @@ fn init_frame_extra(
|
||||
None
|
||||
};
|
||||
|
||||
let stacked_borrows = ecx.memory.extra.stacked_borrows.as_ref();
|
||||
let stacked_borrows = ecx.machine.stacked_borrows.as_ref();
|
||||
let call_id = stacked_borrows.map_or(NonZeroU64::new(1).unwrap(), |stacked_borrows| {
|
||||
stacked_borrows.borrow_mut().new_call()
|
||||
});
|
||||
@ -730,7 +724,7 @@ fn stack_mut<'a>(
|
||||
|
||||
#[inline(always)]
|
||||
fn after_stack_push(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
|
||||
if ecx.memory.extra.stacked_borrows.is_some() { ecx.retag_return_place() } else { Ok(()) }
|
||||
if ecx.machine.stacked_borrows.is_some() { ecx.retag_return_place() } else { Ok(()) }
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
|
@ -64,7 +64,7 @@ fn handle_miri_get_backtrace(
|
||||
// to reconstruct the needed frame information in `handle_miri_resolve_frame`.
|
||||
// Note that we never actually read or write anything from/to this pointer -
|
||||
// all of the data is represented by the pointer value itself.
|
||||
let fn_ptr = this.memory.create_fn_alloc(FnVal::Instance(instance));
|
||||
let fn_ptr = this.create_fn_alloc_ptr(FnVal::Instance(instance));
|
||||
fn_ptr.wrapping_offset(Size::from_bytes(pos.0), this)
|
||||
})
|
||||
.collect();
|
||||
@ -125,7 +125,7 @@ fn resolve_frame_pointer(
|
||||
|
||||
let ptr = this.read_pointer(ptr)?;
|
||||
// Take apart the pointer, we need its pieces.
|
||||
let (alloc_id, offset, ptr) = this.memory.ptr_get_alloc(ptr)?;
|
||||
let (alloc_id, offset, ptr) = this.ptr_get_alloc_id(ptr)?;
|
||||
|
||||
let fn_instance =
|
||||
if let Some(GlobalAlloc::Function(instance)) = this.tcx.get_global_alloc(alloc_id) {
|
||||
@ -159,7 +159,7 @@ fn handle_miri_resolve_frame(
|
||||
|
||||
// Reconstruct the original function pointer,
|
||||
// which we pass to user code.
|
||||
let fn_ptr = this.memory.create_fn_alloc(FnVal::Instance(fn_instance));
|
||||
let fn_ptr = this.create_fn_alloc_ptr(FnVal::Instance(fn_instance));
|
||||
|
||||
let num_fields = dest.layout.fields.count();
|
||||
|
||||
@ -244,8 +244,8 @@ fn handle_miri_resolve_frame_names(
|
||||
|
||||
let (_, _, name, filename) = this.resolve_frame_pointer(ptr)?;
|
||||
|
||||
this.memory.write_bytes(this.read_pointer(name_ptr)?, name.bytes())?;
|
||||
this.memory.write_bytes(this.read_pointer(filename_ptr)?, filename.bytes())?;
|
||||
this.write_bytes_ptr(this.read_pointer(name_ptr)?, name.bytes())?;
|
||||
this.write_bytes_ptr(this.read_pointer(filename_ptr)?, filename.bytes())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -2,6 +2,7 @@
|
||||
use std::env;
|
||||
use std::ffi::{OsStr, OsString};
|
||||
use std::io::ErrorKind;
|
||||
use std::mem;
|
||||
|
||||
use rustc_const_eval::interpret::Pointer;
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
@ -38,20 +39,20 @@ pub struct EnvVars<'tcx> {
|
||||
impl<'tcx> EnvVars<'tcx> {
|
||||
pub(crate) fn init<'mir>(
|
||||
ecx: &mut InterpCx<'mir, 'tcx, Evaluator<'mir, 'tcx>>,
|
||||
mut excluded_env_vars: Vec<String>,
|
||||
forwarded_env_vars: Vec<String>,
|
||||
config: &MiriConfig,
|
||||
) -> InterpResult<'tcx> {
|
||||
let target_os = ecx.tcx.sess.target.os.as_ref();
|
||||
// HACK: Exclude `TERM` var to avoid terminfo trying to open the termcap file.
|
||||
// This is (a) very slow and (b) does not work on Windows.
|
||||
let mut excluded_env_vars = config.excluded_env_vars.clone();
|
||||
excluded_env_vars.push("TERM".to_owned());
|
||||
|
||||
// Skip the loop entirely if we don't want to forward anything.
|
||||
if ecx.machine.communicate() || !forwarded_env_vars.is_empty() {
|
||||
if ecx.machine.communicate() || !config.forwarded_env_vars.is_empty() {
|
||||
for (name, value) in env::vars_os() {
|
||||
let forward = match ecx.machine.communicate() {
|
||||
true => !excluded_env_vars.iter().any(|v| v.as_str() == &name),
|
||||
false => forwarded_env_vars.iter().any(|v| v.as_str() == &name),
|
||||
false => config.forwarded_env_vars.iter().any(|v| v.as_str() == &name),
|
||||
};
|
||||
if forward {
|
||||
let var_ptr = match target_os {
|
||||
@ -75,13 +76,14 @@ pub(crate) fn cleanup<'mir>(
|
||||
ecx: &mut InterpCx<'mir, 'tcx, Evaluator<'mir, 'tcx>>,
|
||||
) -> InterpResult<'tcx> {
|
||||
// Deallocate individual env vars.
|
||||
for (_name, ptr) in ecx.machine.env_vars.map.drain() {
|
||||
ecx.memory.deallocate(ptr, None, MiriMemoryKind::Runtime.into())?;
|
||||
let env_vars = mem::take(&mut ecx.machine.env_vars.map);
|
||||
for (_name, ptr) in env_vars {
|
||||
ecx.deallocate_ptr(ptr, None, MiriMemoryKind::Runtime.into())?;
|
||||
}
|
||||
// Deallocate environ var list.
|
||||
let environ = ecx.machine.env_vars.environ.unwrap();
|
||||
let old_vars_ptr = ecx.read_pointer(&environ.into())?;
|
||||
ecx.memory.deallocate(old_vars_ptr, None, MiriMemoryKind::Runtime.into())?;
|
||||
ecx.deallocate_ptr(old_vars_ptr, None, MiriMemoryKind::Runtime.into())?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@ -199,7 +201,7 @@ fn FreeEnvironmentStringsW(
|
||||
this.assert_target_os("windows", "FreeEnvironmentStringsW");
|
||||
|
||||
let env_block_ptr = this.read_pointer(env_block_op)?;
|
||||
let result = this.memory.deallocate(env_block_ptr, None, MiriMemoryKind::Runtime.into());
|
||||
let result = this.deallocate_ptr(env_block_ptr, None, MiriMemoryKind::Runtime.into());
|
||||
// If the function succeeds, the return value is nonzero.
|
||||
Ok(result.is_ok() as i32)
|
||||
}
|
||||
@ -230,7 +232,7 @@ fn setenv(
|
||||
if let Some((name, value)) = new {
|
||||
let var_ptr = alloc_env_var_as_c_str(&name, &value, &mut this)?;
|
||||
if let Some(var) = this.machine.env_vars.map.insert(name, var_ptr) {
|
||||
this.memory.deallocate(var, None, MiriMemoryKind::Runtime.into())?;
|
||||
this.deallocate_ptr(var, None, MiriMemoryKind::Runtime.into())?;
|
||||
}
|
||||
this.update_environ()?;
|
||||
Ok(0) // return zero on success
|
||||
@ -267,7 +269,7 @@ fn SetEnvironmentVariableW(
|
||||
} else if this.ptr_is_null(value_ptr)? {
|
||||
// Delete environment variable `{name}`
|
||||
if let Some(var) = this.machine.env_vars.map.remove(&name) {
|
||||
this.memory.deallocate(var, None, MiriMemoryKind::Runtime.into())?;
|
||||
this.deallocate_ptr(var, None, MiriMemoryKind::Runtime.into())?;
|
||||
this.update_environ()?;
|
||||
}
|
||||
Ok(1) // return non-zero on success
|
||||
@ -275,7 +277,7 @@ fn SetEnvironmentVariableW(
|
||||
let value = this.read_os_str_from_wide_str(value_ptr)?;
|
||||
let var_ptr = alloc_env_var_as_wide_str(&name, &value, &mut this)?;
|
||||
if let Some(var) = this.machine.env_vars.map.insert(name, var_ptr) {
|
||||
this.memory.deallocate(var, None, MiriMemoryKind::Runtime.into())?;
|
||||
this.deallocate_ptr(var, None, MiriMemoryKind::Runtime.into())?;
|
||||
}
|
||||
this.update_environ()?;
|
||||
Ok(1) // return non-zero on success
|
||||
@ -300,7 +302,7 @@ fn unsetenv(&mut self, name_op: &OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
|
||||
}
|
||||
if let Some(old) = success {
|
||||
if let Some(var) = old {
|
||||
this.memory.deallocate(var, None, MiriMemoryKind::Runtime.into())?;
|
||||
this.deallocate_ptr(var, None, MiriMemoryKind::Runtime.into())?;
|
||||
}
|
||||
this.update_environ()?;
|
||||
Ok(0)
|
||||
@ -436,7 +438,7 @@ fn update_environ(&mut self) -> InterpResult<'tcx> {
|
||||
// Deallocate the old environ list, if any.
|
||||
if let Some(environ) = this.machine.env_vars.environ {
|
||||
let old_vars_ptr = this.read_pointer(&environ.into())?;
|
||||
this.memory.deallocate(old_vars_ptr, None, MiriMemoryKind::Runtime.into())?;
|
||||
this.deallocate_ptr(old_vars_ptr, None, MiriMemoryKind::Runtime.into())?;
|
||||
} else {
|
||||
// No `environ` allocated yet, let's do that.
|
||||
// This is memory backing an extern static, hence `ExternStatic`, not `Env`.
|
||||
|
@ -82,10 +82,10 @@ fn malloc(
|
||||
Ok(Pointer::null())
|
||||
} else {
|
||||
let align = this.min_align(size, kind);
|
||||
let ptr = this.memory.allocate(Size::from_bytes(size), align, kind.into())?;
|
||||
let ptr = this.allocate_ptr(Size::from_bytes(size), align, kind.into())?;
|
||||
if zero_init {
|
||||
// We just allocated this, the access is definitely in-bounds.
|
||||
this.memory.write_bytes(ptr.into(), iter::repeat(0u8).take(size as usize)).unwrap();
|
||||
this.write_bytes_ptr(ptr.into(), iter::repeat(0u8).take(size as usize)).unwrap();
|
||||
}
|
||||
Ok(ptr.into())
|
||||
}
|
||||
@ -94,7 +94,7 @@ fn malloc(
|
||||
fn free(&mut self, ptr: Pointer<Option<Tag>>, kind: MiriMemoryKind) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
if !this.ptr_is_null(ptr)? {
|
||||
this.memory.deallocate(ptr, None, kind.into())?;
|
||||
this.deallocate_ptr(ptr, None, kind.into())?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -112,15 +112,15 @@ fn realloc(
|
||||
Ok(Pointer::null())
|
||||
} else {
|
||||
let new_ptr =
|
||||
this.memory.allocate(Size::from_bytes(new_size), new_align, kind.into())?;
|
||||
this.allocate_ptr(Size::from_bytes(new_size), new_align, kind.into())?;
|
||||
Ok(new_ptr.into())
|
||||
}
|
||||
} else {
|
||||
if new_size == 0 {
|
||||
this.memory.deallocate(old_ptr, None, kind.into())?;
|
||||
this.deallocate_ptr(old_ptr, None, kind.into())?;
|
||||
Ok(Pointer::null())
|
||||
} else {
|
||||
let new_ptr = this.memory.reallocate(
|
||||
let new_ptr = this.reallocate_ptr(
|
||||
old_ptr,
|
||||
None,
|
||||
Size::from_bytes(new_size),
|
||||
@ -373,7 +373,7 @@ fn emulate_foreign_item_by_name(
|
||||
"miri_static_root" => {
|
||||
let &[ref ptr] = this.check_shim(abi, Abi::Rust, link_name, args)?;
|
||||
let ptr = this.read_pointer(ptr)?;
|
||||
let (alloc_id, offset, _) = this.memory.ptr_get_alloc(ptr)?;
|
||||
let (alloc_id, offset, _) = this.ptr_get_alloc_id(ptr)?;
|
||||
if offset != Size::ZERO {
|
||||
throw_unsup_format!("pointer passed to miri_static_root must point to beginning of an allocated block");
|
||||
}
|
||||
@ -440,7 +440,7 @@ fn emulate_foreign_item_by_name(
|
||||
return this.emulate_allocator(Symbol::intern("__rg_alloc"), |this| {
|
||||
Self::check_alloc_request(size, align)?;
|
||||
|
||||
let ptr = this.memory.allocate(
|
||||
let ptr = this.allocate_ptr(
|
||||
Size::from_bytes(size),
|
||||
Align::from_bytes(align).unwrap(),
|
||||
MiriMemoryKind::Rust.into(),
|
||||
@ -457,14 +457,14 @@ fn emulate_foreign_item_by_name(
|
||||
return this.emulate_allocator(Symbol::intern("__rg_alloc_zeroed"), |this| {
|
||||
Self::check_alloc_request(size, align)?;
|
||||
|
||||
let ptr = this.memory.allocate(
|
||||
let ptr = this.allocate_ptr(
|
||||
Size::from_bytes(size),
|
||||
Align::from_bytes(align).unwrap(),
|
||||
MiriMemoryKind::Rust.into(),
|
||||
)?;
|
||||
|
||||
// We just allocated this, the access is definitely in-bounds.
|
||||
this.memory.write_bytes(ptr.into(), iter::repeat(0u8).take(usize::try_from(size).unwrap())).unwrap();
|
||||
this.write_bytes_ptr(ptr.into(), iter::repeat(0u8).take(usize::try_from(size).unwrap())).unwrap();
|
||||
this.write_pointer(ptr, dest)
|
||||
});
|
||||
}
|
||||
@ -476,7 +476,7 @@ fn emulate_foreign_item_by_name(
|
||||
|
||||
return this.emulate_allocator(Symbol::intern("__rg_dealloc"), |this| {
|
||||
// No need to check old_size/align; we anyway check that they match the allocation.
|
||||
this.memory.deallocate(
|
||||
this.deallocate_ptr(
|
||||
ptr,
|
||||
Some((Size::from_bytes(old_size), Align::from_bytes(align).unwrap())),
|
||||
MiriMemoryKind::Rust.into(),
|
||||
@ -495,7 +495,7 @@ fn emulate_foreign_item_by_name(
|
||||
Self::check_alloc_request(new_size, align)?;
|
||||
|
||||
let align = Align::from_bytes(align).unwrap();
|
||||
let new_ptr = this.memory.reallocate(
|
||||
let new_ptr = this.reallocate_ptr(
|
||||
ptr,
|
||||
Some((Size::from_bytes(old_size), align)),
|
||||
Size::from_bytes(new_size),
|
||||
@ -514,8 +514,8 @@ fn emulate_foreign_item_by_name(
|
||||
let n = Size::from_bytes(this.read_scalar(n)?.to_machine_usize(this)?);
|
||||
|
||||
let result = {
|
||||
let left_bytes = this.memory.read_bytes(left, n)?;
|
||||
let right_bytes = this.memory.read_bytes(right, n)?;
|
||||
let left_bytes = this.read_bytes_ptr(left, n)?;
|
||||
let right_bytes = this.read_bytes_ptr(right, n)?;
|
||||
|
||||
use std::cmp::Ordering::*;
|
||||
match left_bytes.cmp(right_bytes) {
|
||||
@ -533,8 +533,7 @@ fn emulate_foreign_item_by_name(
|
||||
let val = this.read_scalar(val)?.to_i32()? as u8;
|
||||
let num = this.read_scalar(num)?.to_machine_usize(this)?;
|
||||
if let Some(idx) = this
|
||||
.memory
|
||||
.read_bytes(ptr, Size::from_bytes(num))?
|
||||
.read_bytes_ptr(ptr, Size::from_bytes(num))?
|
||||
.iter()
|
||||
.rev()
|
||||
.position(|&c| c == val)
|
||||
@ -551,8 +550,7 @@ fn emulate_foreign_item_by_name(
|
||||
let val = this.read_scalar(val)?.to_i32()? as u8;
|
||||
let num = this.read_scalar(num)?.to_machine_usize(this)?;
|
||||
let idx = this
|
||||
.memory
|
||||
.read_bytes(ptr, Size::from_bytes(num))?
|
||||
.read_bytes_ptr(ptr, Size::from_bytes(num))?
|
||||
.iter()
|
||||
.position(|&c| c == val);
|
||||
if let Some(idx) = idx {
|
||||
|
@ -88,8 +88,10 @@ fn call_intrinsic(
|
||||
let byte_count = ty_layout.size.checked_mul(count, this).ok_or_else(|| {
|
||||
err_ub_format!("overflow computing total size of `{}`", intrinsic_name)
|
||||
})?;
|
||||
this.memory
|
||||
.write_bytes(ptr, iter::repeat(val_byte).take(byte_count.bytes() as usize))?;
|
||||
this.write_bytes_ptr(
|
||||
ptr,
|
||||
iter::repeat(val_byte).take(byte_count.bytes() as usize),
|
||||
)?;
|
||||
}
|
||||
|
||||
// Floating-point operations
|
||||
@ -1087,7 +1089,7 @@ fn atomic_load(
|
||||
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
|
||||
// be 8-aligned).
|
||||
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
|
||||
this.memory.check_ptr_access_align(
|
||||
this.check_ptr_access_align(
|
||||
place.ptr,
|
||||
place.layout.size,
|
||||
align,
|
||||
@ -1113,7 +1115,7 @@ fn atomic_store(
|
||||
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
|
||||
// be 8-aligned).
|
||||
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
|
||||
this.memory.check_ptr_access_align(
|
||||
this.check_ptr_access_align(
|
||||
place.ptr,
|
||||
place.layout.size,
|
||||
align,
|
||||
@ -1168,7 +1170,7 @@ fn atomic_op(
|
||||
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
|
||||
// be 8-aligned).
|
||||
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
|
||||
this.memory.check_ptr_access_align(
|
||||
this.check_ptr_access_align(
|
||||
place.ptr,
|
||||
place.layout.size,
|
||||
align,
|
||||
@ -1210,7 +1212,7 @@ fn atomic_exchange(
|
||||
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
|
||||
// be 8-aligned).
|
||||
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
|
||||
this.memory.check_ptr_access_align(
|
||||
this.check_ptr_access_align(
|
||||
place.ptr,
|
||||
place.layout.size,
|
||||
align,
|
||||
@ -1241,7 +1243,7 @@ fn atomic_compare_exchange_impl(
|
||||
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
|
||||
// be 8-aligned).
|
||||
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
|
||||
this.memory.check_ptr_access_align(
|
||||
this.check_ptr_access_align(
|
||||
place.ptr,
|
||||
place.layout.size,
|
||||
align,
|
||||
|
@ -69,7 +69,7 @@ fn align_offset(
|
||||
let this = self.eval_context_mut();
|
||||
let (dest, ret) = ret.unwrap();
|
||||
|
||||
if this.memory.extra.check_alignment != AlignmentCheck::Symbolic {
|
||||
if this.machine.check_alignment != AlignmentCheck::Symbolic {
|
||||
// Just use actual implementation.
|
||||
return Ok(false);
|
||||
}
|
||||
@ -86,7 +86,7 @@ fn align_offset(
|
||||
if let Ok(ptr) = ptr.into_pointer_or_addr() {
|
||||
// Only do anything if we can identify the allocation this goes to.
|
||||
let (_, cur_align) =
|
||||
this.memory.get_size_and_align(ptr.provenance.alloc_id, AllocCheck::MaybeDead)?;
|
||||
this.get_alloc_size_and_align(ptr.provenance.alloc_id, AllocCheck::MaybeDead)?;
|
||||
if cur_align.bytes() >= req_align {
|
||||
// If the allocation alignment is at least the required alignment we use the
|
||||
// real implementation.
|
||||
|
@ -108,8 +108,7 @@ fn write_os_str_to_c_str(
|
||||
return Ok((false, string_length));
|
||||
}
|
||||
self.eval_context_mut()
|
||||
.memory
|
||||
.write_bytes(ptr, bytes.iter().copied().chain(iter::once(0u8)))?;
|
||||
.write_bytes_ptr(ptr, bytes.iter().copied().chain(iter::once(0u8)))?;
|
||||
Ok((true, string_length))
|
||||
}
|
||||
|
||||
@ -152,8 +151,7 @@ fn os_str_to_u16vec<'tcx>(os_str: &OsStr) -> InterpResult<'tcx, Vec<u16>> {
|
||||
let size2 = Size::from_bytes(2);
|
||||
let this = self.eval_context_mut();
|
||||
let mut alloc = this
|
||||
.memory
|
||||
.get_mut(ptr, size2 * string_length, Align::from_bytes(2).unwrap())?
|
||||
.get_ptr_alloc_mut(ptr, size2 * string_length, Align::from_bytes(2).unwrap())?
|
||||
.unwrap(); // not a ZST, so we will get a result
|
||||
for (offset, wchar) in u16_vec.into_iter().chain(iter::once(0x0000)).enumerate() {
|
||||
let offset = u64::try_from(offset).unwrap();
|
||||
|
@ -89,7 +89,7 @@ fn handle_try(
|
||||
let catch_fn = this.read_scalar(catch_fn)?.check_init()?;
|
||||
|
||||
// Now we make a function call, and pass `data` as first and only argument.
|
||||
let f_instance = this.memory.get_fn(try_fn)?.as_instance()?;
|
||||
let f_instance = this.get_ptr_fn(try_fn)?.as_instance()?;
|
||||
trace!("try_fn: {:?}", f_instance);
|
||||
let ret_place = MPlaceTy::dangling(this.machine.layouts.unit).into();
|
||||
this.call_function(
|
||||
@ -123,7 +123,7 @@ fn handle_stack_pop(
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
trace!("handle_stack_pop(extra = {:?}, unwinding = {})", extra, unwinding);
|
||||
if let Some(stacked_borrows) = &this.memory.extra.stacked_borrows {
|
||||
if let Some(stacked_borrows) = &this.machine.stacked_borrows {
|
||||
stacked_borrows.borrow_mut().end_call(extra.call_id);
|
||||
}
|
||||
|
||||
@ -146,7 +146,7 @@ fn handle_stack_pop(
|
||||
|
||||
// Push the `catch_fn` stackframe.
|
||||
let f_instance =
|
||||
this.memory.get_fn(this.scalar_to_ptr(catch_unwind.catch_fn))?.as_instance()?;
|
||||
this.get_ptr_fn(this.scalar_to_ptr(catch_unwind.catch_fn))?.as_instance()?;
|
||||
trace!("catch_fn: {:?}", f_instance);
|
||||
let ret_place = MPlaceTy::dangling(this.machine.layouts.unit).into();
|
||||
this.call_function(
|
||||
|
@ -157,7 +157,7 @@ fn emulate_foreign_item_by_name(
|
||||
if size == 0 {
|
||||
this.write_null(&ret.into())?;
|
||||
} else {
|
||||
let ptr = this.memory.allocate(
|
||||
let ptr = this.allocate_ptr(
|
||||
Size::from_bytes(size),
|
||||
Align::from_bytes(align).unwrap(),
|
||||
MiriMemoryKind::C.into(),
|
||||
@ -174,7 +174,7 @@ fn emulate_foreign_item_by_name(
|
||||
let symbol = this.read_pointer(symbol)?;
|
||||
let symbol_name = this.read_c_str(symbol)?;
|
||||
if let Some(dlsym) = Dlsym::from_str(symbol_name, &this.tcx.sess.target.os)? {
|
||||
let ptr = this.memory.create_fn_alloc(FnVal::Other(dlsym));
|
||||
let ptr = this.create_fn_alloc_ptr(FnVal::Other(dlsym));
|
||||
this.write_pointer(ptr, dest)?;
|
||||
} else {
|
||||
this.write_null(dest)?;
|
||||
@ -214,7 +214,7 @@ fn emulate_foreign_item_by_name(
|
||||
|
||||
// Extract the function type out of the signature (that seems easier than constructing it ourselves).
|
||||
let dtor = if !this.ptr_is_null(dtor)? {
|
||||
Some(this.memory.get_fn(dtor)?.as_instance()?)
|
||||
Some(this.get_ptr_fn(dtor)?.as_instance()?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
@ -34,7 +34,7 @@ fn read<'tcx>(
|
||||
bytes: &mut [u8],
|
||||
) -> InterpResult<'tcx, io::Result<usize>>;
|
||||
fn write<'tcx>(
|
||||
&mut self,
|
||||
&self,
|
||||
communicate_allowed: bool,
|
||||
bytes: &[u8],
|
||||
) -> InterpResult<'tcx, io::Result<usize>>;
|
||||
@ -66,12 +66,12 @@ fn read<'tcx>(
|
||||
}
|
||||
|
||||
fn write<'tcx>(
|
||||
&mut self,
|
||||
&self,
|
||||
communicate_allowed: bool,
|
||||
bytes: &[u8],
|
||||
) -> InterpResult<'tcx, io::Result<usize>> {
|
||||
assert!(communicate_allowed, "isolation should have prevented even opening a file");
|
||||
Ok(self.file.write(bytes))
|
||||
Ok((&mut &self.file).write(bytes))
|
||||
}
|
||||
|
||||
fn seek<'tcx>(
|
||||
@ -133,7 +133,7 @@ fn read<'tcx>(
|
||||
}
|
||||
|
||||
fn write<'tcx>(
|
||||
&mut self,
|
||||
&self,
|
||||
_communicate_allowed: bool,
|
||||
_bytes: &[u8],
|
||||
) -> InterpResult<'tcx, io::Result<usize>> {
|
||||
@ -174,12 +174,12 @@ fn read<'tcx>(
|
||||
}
|
||||
|
||||
fn write<'tcx>(
|
||||
&mut self,
|
||||
&self,
|
||||
_communicate_allowed: bool,
|
||||
bytes: &[u8],
|
||||
) -> InterpResult<'tcx, io::Result<usize>> {
|
||||
// We allow writing to stderr even with isolation enabled.
|
||||
let result = Write::write(self, bytes);
|
||||
let result = Write::write(&mut { self }, bytes);
|
||||
// Stdout is buffered, flush to make sure it appears on the
|
||||
// screen. This is the write() syscall of the interpreted
|
||||
// program, we want it to correspond to a write() syscall on
|
||||
@ -224,13 +224,13 @@ fn read<'tcx>(
|
||||
}
|
||||
|
||||
fn write<'tcx>(
|
||||
&mut self,
|
||||
&self,
|
||||
_communicate_allowed: bool,
|
||||
bytes: &[u8],
|
||||
) -> InterpResult<'tcx, io::Result<usize>> {
|
||||
// We allow writing to stderr even with isolation enabled.
|
||||
// No need to flush, stderr is not buffered.
|
||||
Ok(Write::write(self, bytes))
|
||||
Ok(Write::write(&mut { self }, bytes))
|
||||
}
|
||||
|
||||
fn seek<'tcx>(
|
||||
@ -681,7 +681,7 @@ fn read(&mut self, fd: i32, buf: Pointer<Option<Tag>>, count: u64) -> InterpResu
|
||||
trace!("Reading from FD {}, size {}", fd, count);
|
||||
|
||||
// Check that the *entire* buffer is actually valid memory.
|
||||
this.memory.check_ptr_access_align(
|
||||
this.check_ptr_access_align(
|
||||
buf,
|
||||
Size::from_bytes(count),
|
||||
Align::ONE,
|
||||
@ -707,7 +707,7 @@ fn read(&mut self, fd: i32, buf: Pointer<Option<Tag>>, count: u64) -> InterpResu
|
||||
match result {
|
||||
Ok(read_bytes) => {
|
||||
// If reading to `bytes` did not fail, we write those bytes to the buffer.
|
||||
this.memory.write_bytes(buf, bytes)?;
|
||||
this.write_bytes_ptr(buf, bytes)?;
|
||||
Ok(read_bytes)
|
||||
}
|
||||
Err(e) => {
|
||||
@ -727,7 +727,7 @@ fn write(&mut self, fd: i32, buf: Pointer<Option<Tag>>, count: u64) -> InterpRes
|
||||
// Isolation check is done via `FileDescriptor` trait.
|
||||
|
||||
// Check that the *entire* buffer is actually valid memory.
|
||||
this.memory.check_ptr_access_align(
|
||||
this.check_ptr_access_align(
|
||||
buf,
|
||||
Size::from_bytes(count),
|
||||
Align::ONE,
|
||||
@ -739,8 +739,8 @@ fn write(&mut self, fd: i32, buf: Pointer<Option<Tag>>, count: u64) -> InterpRes
|
||||
let count = count.min(this.machine_isize_max() as u64).min(isize::MAX as u64);
|
||||
let communicate = this.machine.communicate();
|
||||
|
||||
if let Some(file_descriptor) = this.machine.file_handler.handles.get_mut(&fd) {
|
||||
let bytes = this.memory.read_bytes(buf, Size::from_bytes(count))?;
|
||||
if let Some(file_descriptor) = this.machine.file_handler.handles.get(&fd) {
|
||||
let bytes = this.read_bytes_ptr(buf, Size::from_bytes(count))?;
|
||||
let result =
|
||||
file_descriptor.write(communicate, &bytes)?.map(|c| i64::try_from(c).unwrap());
|
||||
this.try_unwrap_io_result(result)
|
||||
@ -1288,7 +1288,7 @@ fn linux_readdir64(&mut self, dirp_op: &OpTy<'tcx, Tag>) -> InterpResult<'tcx, S
|
||||
)?;
|
||||
|
||||
let name_ptr = entry.offset(Size::from_bytes(d_name_offset), this)?;
|
||||
this.memory.write_bytes(name_ptr, name_bytes.iter().copied())?;
|
||||
this.write_bytes_ptr(name_ptr, name_bytes.iter().copied())?;
|
||||
|
||||
entry
|
||||
}
|
||||
@ -1597,7 +1597,7 @@ fn readlink(
|
||||
}
|
||||
// 'readlink' truncates the resolved path if
|
||||
// the provided buffer is not large enough.
|
||||
this.memory.write_bytes(buf, path_bytes.iter().copied())?;
|
||||
this.write_bytes_ptr(buf, path_bytes.iter().copied())?;
|
||||
Ok(path_bytes.len().try_into().unwrap())
|
||||
}
|
||||
Err(e) => {
|
||||
|
@ -79,7 +79,7 @@ pub fn futex<'tcx>(
|
||||
// Check the pointer for alignment and validity.
|
||||
// The API requires `addr` to be a 4-byte aligned pointer, and will
|
||||
// use the 4 bytes at the given address as an (atomic) i32.
|
||||
this.memory.check_ptr_access_align(
|
||||
this.check_ptr_access_align(
|
||||
this.scalar_to_ptr(addr_scalar),
|
||||
Size::from_bytes(4),
|
||||
Align::from_bytes(4).unwrap(),
|
||||
|
@ -121,7 +121,7 @@ fn emulate_foreign_item_by_name(
|
||||
let &[ref dtor, ref data] =
|
||||
this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
|
||||
let dtor = this.read_pointer(dtor)?;
|
||||
let dtor = this.memory.get_fn(dtor)?.as_instance()?;
|
||||
let dtor = this.get_ptr_fn(dtor)?.as_instance()?;
|
||||
let data = this.read_scalar(data)?.check_init()?;
|
||||
let active_thread = this.get_active_thread();
|
||||
this.machine.tls.set_macos_thread_dtor(active_thread, dtor, data)?;
|
||||
|
@ -41,7 +41,7 @@ fn pthread_create(
|
||||
let old_thread_id = this.set_active_thread(new_thread_id);
|
||||
|
||||
// Perform the function pointer load in the new thread frame.
|
||||
let instance = this.memory.get_fn(fn_ptr)?.as_instance()?;
|
||||
let instance = this.get_ptr_fn(fn_ptr)?.as_instance()?;
|
||||
|
||||
// Note: the returned value is currently ignored (see the FIXME in
|
||||
// pthread_join below) because the Rust standard library does not use
|
||||
@ -122,7 +122,7 @@ fn prctl(
|
||||
let mut name = this.get_active_thread_name().to_vec();
|
||||
name.push(0u8);
|
||||
assert!(name.len() <= 16);
|
||||
this.memory.write_bytes(address, name)?;
|
||||
this.write_bytes_ptr(address, name)?;
|
||||
} else {
|
||||
throw_unsup_format!("unsupported prctl option {}", option);
|
||||
}
|
||||
|
@ -249,7 +249,7 @@ fn schedule_windows_tls_dtors(&mut self) -> InterpResult<'tcx> {
|
||||
"p_thread_callback",
|
||||
])?;
|
||||
let thread_callback =
|
||||
this.memory.get_fn(this.scalar_to_ptr(thread_callback))?.as_instance()?;
|
||||
this.get_ptr_fn(this.scalar_to_ptr(thread_callback))?.as_instance()?;
|
||||
|
||||
// The signature of this function is `unsafe extern "system" fn(h: c::LPVOID, dwReason: c::DWORD, pv: c::LPVOID)`.
|
||||
let reason = this.eval_path_scalar(&["std", "sys", "windows", "c", "DLL_THREAD_DETACH"])?;
|
||||
|
@ -87,7 +87,7 @@ fn emulate_foreign_item_by_name(
|
||||
// stdout/stderr
|
||||
use std::io::{self, Write};
|
||||
|
||||
let buf_cont = this.memory.read_bytes(buf, Size::from_bytes(u64::from(n)))?;
|
||||
let buf_cont = this.read_bytes_ptr(buf, Size::from_bytes(u64::from(n)))?;
|
||||
let res = if handle == -11 {
|
||||
io::stdout().write(buf_cont)
|
||||
} else {
|
||||
@ -157,7 +157,7 @@ fn emulate_foreign_item_by_name(
|
||||
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
|
||||
let system_info = this.deref_operand(system_info)?;
|
||||
// Initialize with `0`.
|
||||
this.memory.write_bytes(
|
||||
this.write_bytes_ptr(
|
||||
system_info.ptr,
|
||||
iter::repeat(0u8).take(system_info.layout.size.bytes() as usize),
|
||||
)?;
|
||||
@ -269,7 +269,7 @@ fn emulate_foreign_item_by_name(
|
||||
this.read_scalar(hModule)?.to_machine_isize(this)?;
|
||||
let name = this.read_c_str(this.read_pointer(lpProcName)?)?;
|
||||
if let Some(dlsym) = Dlsym::from_str(name, &this.tcx.sess.target.os)? {
|
||||
let ptr = this.memory.create_fn_alloc(FnVal::Other(dlsym));
|
||||
let ptr = this.create_fn_alloc_ptr(FnVal::Other(dlsym));
|
||||
this.write_pointer(ptr, dest)?;
|
||||
} else {
|
||||
this.write_null(dest)?;
|
||||
|
@ -93,7 +93,7 @@ pub struct Stacks {
|
||||
|
||||
/// Extra global state, available to the memory access hooks.
|
||||
#[derive(Debug)]
|
||||
pub struct GlobalState {
|
||||
pub struct GlobalStateInner {
|
||||
/// Next unused pointer ID (tag).
|
||||
next_ptr_id: PtrId,
|
||||
/// Table storing the "base" tag for each allocation.
|
||||
@ -111,8 +111,8 @@ pub struct GlobalState {
|
||||
/// Whether to track raw pointers.
|
||||
tag_raw: bool,
|
||||
}
|
||||
/// Memory extra state gives us interior mutable access to the global state.
|
||||
pub type MemoryExtra = RefCell<GlobalState>;
|
||||
/// We need interior mutable access to the global state.
|
||||
pub type GlobalState = RefCell<GlobalStateInner>;
|
||||
|
||||
/// Indicates which kind of access is being performed.
|
||||
#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
|
||||
@ -156,13 +156,13 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
}
|
||||
|
||||
/// Utilities for initialization and ID generation
|
||||
impl GlobalState {
|
||||
impl GlobalStateInner {
|
||||
pub fn new(
|
||||
tracked_pointer_tag: Option<PtrId>,
|
||||
tracked_call_id: Option<CallId>,
|
||||
tag_raw: bool,
|
||||
) -> Self {
|
||||
GlobalState {
|
||||
GlobalStateInner {
|
||||
next_ptr_id: NonZeroU64::new(1).unwrap(),
|
||||
base_ptr_ids: FxHashMap::default(),
|
||||
next_call_id: NonZeroU64::new(1).unwrap(),
|
||||
@ -308,7 +308,7 @@ fn find_first_write_incompatible(&self, granting: usize) -> usize {
|
||||
fn check_protector(
|
||||
item: &Item,
|
||||
provoking_access: Option<(SbTag, AccessKind)>,
|
||||
global: &GlobalState,
|
||||
global: &GlobalStateInner,
|
||||
) -> InterpResult<'tcx> {
|
||||
if let SbTag::Tagged(id) = item.tag {
|
||||
if Some(id) == global.tracked_pointer_tag {
|
||||
@ -348,7 +348,7 @@ fn access(
|
||||
access: AccessKind,
|
||||
tag: SbTag,
|
||||
(alloc_id, range, offset): (AllocId, AllocRange, Size), // just for debug printing and error messages
|
||||
global: &GlobalState,
|
||||
global: &GlobalStateInner,
|
||||
) -> InterpResult<'tcx> {
|
||||
// Two main steps: Find granting item, remove incompatible items above.
|
||||
|
||||
@ -396,7 +396,7 @@ fn dealloc(
|
||||
&mut self,
|
||||
tag: SbTag,
|
||||
dbg_ptr: Pointer<AllocId>, // just for debug printing and error messages
|
||||
global: &GlobalState,
|
||||
global: &GlobalStateInner,
|
||||
) -> InterpResult<'tcx> {
|
||||
// Step 1: Find granting item.
|
||||
self.find_granting(AccessKind::Write, tag).ok_or_else(|| {
|
||||
@ -425,7 +425,7 @@ fn grant(
|
||||
derived_from: SbTag,
|
||||
new: Item,
|
||||
(alloc_id, alloc_range, offset): (AllocId, AllocRange, Size), // just for debug printing and error messages
|
||||
global: &GlobalState,
|
||||
global: &GlobalStateInner,
|
||||
) -> InterpResult<'tcx> {
|
||||
// Figure out which access `perm` corresponds to.
|
||||
let access =
|
||||
@ -584,10 +584,10 @@ impl Stacks {
|
||||
pub fn new_allocation(
|
||||
id: AllocId,
|
||||
size: Size,
|
||||
extra: &MemoryExtra,
|
||||
state: &GlobalState,
|
||||
kind: MemoryKind<MiriMemoryKind>,
|
||||
) -> Self {
|
||||
let mut extra = extra.borrow_mut();
|
||||
let mut extra = state.borrow_mut();
|
||||
let (base_tag, perm) = match kind {
|
||||
// New unique borrow. This tag is not accessible by the program,
|
||||
// so it will only ever be used when using the local directly (i.e.,
|
||||
@ -628,7 +628,7 @@ pub fn memory_read<'tcx>(
|
||||
alloc_id: AllocId,
|
||||
tag: SbTag,
|
||||
range: AllocRange,
|
||||
extra: &MemoryExtra,
|
||||
state: &GlobalState,
|
||||
) -> InterpResult<'tcx> {
|
||||
trace!(
|
||||
"read access with tag {:?}: {:?}, size {}",
|
||||
@ -636,7 +636,7 @@ pub fn memory_read<'tcx>(
|
||||
Pointer::new(alloc_id, range.start),
|
||||
range.size.bytes()
|
||||
);
|
||||
let global = &*extra.borrow();
|
||||
let global = &*state.borrow();
|
||||
self.for_each(range, move |offset, stack| {
|
||||
stack.access(AccessKind::Read, tag, (alloc_id, range, offset), global)
|
||||
})
|
||||
@ -648,7 +648,7 @@ pub fn memory_written<'tcx>(
|
||||
alloc_id: AllocId,
|
||||
tag: SbTag,
|
||||
range: AllocRange,
|
||||
extra: &mut MemoryExtra,
|
||||
state: &mut GlobalState,
|
||||
) -> InterpResult<'tcx> {
|
||||
trace!(
|
||||
"write access with tag {:?}: {:?}, size {}",
|
||||
@ -656,7 +656,7 @@ pub fn memory_written<'tcx>(
|
||||
Pointer::new(alloc_id, range.start),
|
||||
range.size.bytes()
|
||||
);
|
||||
let global = extra.get_mut();
|
||||
let global = state.get_mut();
|
||||
self.for_each_mut(range, move |offset, stack| {
|
||||
stack.access(AccessKind::Write, tag, (alloc_id, range, offset), global)
|
||||
})
|
||||
@ -668,10 +668,10 @@ pub fn memory_deallocated<'tcx>(
|
||||
alloc_id: AllocId,
|
||||
tag: SbTag,
|
||||
range: AllocRange,
|
||||
extra: &mut MemoryExtra,
|
||||
state: &mut GlobalState,
|
||||
) -> InterpResult<'tcx> {
|
||||
trace!("deallocation with tag {:?}: {:?}, size {}", tag, alloc_id, range.size.bytes());
|
||||
let global = extra.get_mut();
|
||||
let global = state.get_mut();
|
||||
self.for_each_mut(range, move |offset, stack| {
|
||||
stack.dealloc(tag, Pointer::new(alloc_id, offset), global)
|
||||
})
|
||||
@ -702,12 +702,12 @@ fn reborrow(
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
let (alloc_id, base_offset, ptr) = this.memory.ptr_get_alloc(place.ptr)?;
|
||||
let (alloc_id, base_offset, ptr) = this.ptr_get_alloc_id(place.ptr)?;
|
||||
let orig_tag = ptr.provenance.sb;
|
||||
|
||||
// Ensure we bail out if the pointer goes out-of-bounds (see miri#1050).
|
||||
let (alloc_size, _) =
|
||||
this.memory.get_size_and_align(alloc_id, AllocCheck::Dereferenceable)?;
|
||||
this.get_alloc_size_and_align(alloc_id, AllocCheck::Dereferenceable)?;
|
||||
if base_offset + size > alloc_size {
|
||||
throw_ub!(PointerOutOfBounds {
|
||||
alloc_id,
|
||||
@ -750,10 +750,10 @@ fn reborrow(
|
||||
// We need a frozen-sensitive reborrow.
|
||||
// We have to use shared references to alloc/memory_extra here since
|
||||
// `visit_freeze_sensitive` needs to access the global state.
|
||||
let extra = this.memory.get_alloc_extra(alloc_id)?;
|
||||
let extra = this.get_alloc_extra(alloc_id)?;
|
||||
let stacked_borrows =
|
||||
extra.stacked_borrows.as_ref().expect("we should have Stacked Borrows data");
|
||||
let global = this.memory.extra.stacked_borrows.as_ref().unwrap().borrow();
|
||||
let global = this.machine.stacked_borrows.as_ref().unwrap().borrow();
|
||||
this.visit_freeze_sensitive(place, size, |mut range, frozen| {
|
||||
// Adjust range.
|
||||
range.start += base_offset;
|
||||
@ -774,7 +774,7 @@ fn reborrow(
|
||||
// Here we can avoid `borrow()` calls because we have mutable references.
|
||||
// Note that this asserts that the allocation is mutable -- but since we are creating a
|
||||
// mutable pointer, that seems reasonable.
|
||||
let (alloc_extra, memory_extra) = this.memory.get_alloc_extra_mut(alloc_id)?;
|
||||
let (alloc_extra, memory_extra) = this.get_alloc_extra_mut(alloc_id)?;
|
||||
let stacked_borrows =
|
||||
alloc_extra.stacked_borrows.as_mut().expect("we should have Stacked Borrows data");
|
||||
let global = memory_extra.stacked_borrows.as_mut().unwrap().get_mut();
|
||||
@ -808,7 +808,7 @@ fn retag_reference(
|
||||
|
||||
// Compute new borrow.
|
||||
let new_tag = {
|
||||
let mem_extra = this.memory.extra.stacked_borrows.as_mut().unwrap().get_mut();
|
||||
let mem_extra = this.machine.stacked_borrows.as_mut().unwrap().get_mut();
|
||||
match kind {
|
||||
// Give up tracking for raw pointers.
|
||||
RefKind::Raw { .. } if !mem_extra.tag_raw => SbTag::Untagged,
|
||||
|
16
src/sync.rs
16
src/sync.rs
@ -242,7 +242,7 @@ fn mutex_lock(&mut self, id: MutexId, thread: ThreadId) {
|
||||
mutex.owner = Some(thread);
|
||||
}
|
||||
mutex.lock_count = mutex.lock_count.checked_add(1).unwrap();
|
||||
if let Some(data_race) = &this.memory.extra.data_race {
|
||||
if let Some(data_race) = &this.machine.data_race {
|
||||
data_race.validate_lock_acquire(&mutex.data_race, thread);
|
||||
}
|
||||
}
|
||||
@ -268,7 +268,7 @@ fn mutex_unlock(&mut self, id: MutexId, expected_owner: ThreadId) -> Option<usiz
|
||||
mutex.owner = None;
|
||||
// The mutex is completely unlocked. Try transfering ownership
|
||||
// to another thread.
|
||||
if let Some(data_race) = &this.memory.extra.data_race {
|
||||
if let Some(data_race) = &this.machine.data_race {
|
||||
data_race.validate_lock_release(&mut mutex.data_race, current_owner);
|
||||
}
|
||||
this.mutex_dequeue_and_lock(id);
|
||||
@ -328,7 +328,7 @@ fn rwlock_reader_lock(&mut self, id: RwLockId, reader: ThreadId) {
|
||||
let rwlock = &mut this.machine.threads.sync.rwlocks[id];
|
||||
let count = rwlock.readers.entry(reader).or_insert(0);
|
||||
*count = count.checked_add(1).expect("the reader counter overflowed");
|
||||
if let Some(data_race) = &this.memory.extra.data_race {
|
||||
if let Some(data_race) = &this.machine.data_race {
|
||||
data_race.validate_lock_acquire(&rwlock.data_race, reader);
|
||||
}
|
||||
}
|
||||
@ -352,7 +352,7 @@ fn rwlock_reader_unlock(&mut self, id: RwLockId, reader: ThreadId) -> bool {
|
||||
}
|
||||
Entry::Vacant(_) => return false, // we did not even own this lock
|
||||
}
|
||||
if let Some(data_race) = &this.memory.extra.data_race {
|
||||
if let Some(data_race) = &this.machine.data_race {
|
||||
data_race.validate_lock_release_shared(&mut rwlock.data_race_reader, reader);
|
||||
}
|
||||
|
||||
@ -385,7 +385,7 @@ fn rwlock_writer_lock(&mut self, id: RwLockId, writer: ThreadId) {
|
||||
trace!("rwlock_writer_lock: {:?} now held by {:?}", id, writer);
|
||||
let rwlock = &mut this.machine.threads.sync.rwlocks[id];
|
||||
rwlock.writer = Some(writer);
|
||||
if let Some(data_race) = &this.memory.extra.data_race {
|
||||
if let Some(data_race) = &this.machine.data_race {
|
||||
data_race.validate_lock_acquire(&rwlock.data_race, writer);
|
||||
}
|
||||
}
|
||||
@ -405,7 +405,7 @@ fn rwlock_writer_unlock(&mut self, id: RwLockId, expected_writer: ThreadId) -> b
|
||||
// Release memory to both reader and writer vector clocks
|
||||
// since this writer happens-before both the union of readers once they are finished
|
||||
// and the next writer
|
||||
if let Some(data_race) = &this.memory.extra.data_race {
|
||||
if let Some(data_race) = &this.machine.data_race {
|
||||
data_race.validate_lock_release(&mut rwlock.data_race, current_writer);
|
||||
data_race.validate_lock_release(&mut rwlock.data_race_reader, current_writer);
|
||||
}
|
||||
@ -465,7 +465,7 @@ fn condvar_signal(&mut self, id: CondvarId) -> Option<(ThreadId, MutexId)> {
|
||||
let this = self.eval_context_mut();
|
||||
let current_thread = this.get_active_thread();
|
||||
let condvar = &mut this.machine.threads.sync.condvars[id];
|
||||
let data_race = &this.memory.extra.data_race;
|
||||
let data_race = &this.machine.data_race;
|
||||
|
||||
// Each condvar signal happens-before the end of the condvar wake
|
||||
if let Some(data_race) = data_race {
|
||||
@ -498,7 +498,7 @@ fn futex_wake(&mut self, addr: u64) -> Option<ThreadId> {
|
||||
let this = self.eval_context_mut();
|
||||
let current_thread = this.get_active_thread();
|
||||
let futex = &mut this.machine.threads.sync.futexes.get_mut(&addr)?;
|
||||
let data_race = &this.memory.extra.data_race;
|
||||
let data_race = &this.machine.data_race;
|
||||
|
||||
// Each futex-wake happens-before the end of the futex wait
|
||||
if let Some(data_race) = data_race {
|
||||
|
@ -574,7 +574,7 @@ fn get_or_create_thread_local_alloc(
|
||||
let allocation = tcx.eval_static_initializer(def_id)?;
|
||||
// Create a fresh allocation with this content.
|
||||
let new_alloc =
|
||||
this.memory.allocate_with(allocation.inner().clone(), MiriMemoryKind::Tls.into());
|
||||
this.allocate_raw_ptr(allocation.inner().clone(), MiriMemoryKind::Tls.into());
|
||||
this.machine.threads.set_thread_local_alloc(def_id, new_alloc);
|
||||
Ok(new_alloc)
|
||||
}
|
||||
@ -584,7 +584,7 @@ fn get_or_create_thread_local_alloc(
|
||||
fn create_thread(&mut self) -> ThreadId {
|
||||
let this = self.eval_context_mut();
|
||||
let id = this.machine.threads.create_thread();
|
||||
if let Some(data_race) = &mut this.memory.extra.data_race {
|
||||
if let Some(data_race) = &mut this.machine.data_race {
|
||||
data_race.thread_created(id);
|
||||
}
|
||||
id
|
||||
@ -599,14 +599,14 @@ fn detach_thread(&mut self, thread_id: ThreadId) -> InterpResult<'tcx> {
|
||||
#[inline]
|
||||
fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
this.machine.threads.join_thread(joined_thread_id, this.memory.extra.data_race.as_mut())?;
|
||||
this.machine.threads.join_thread(joined_thread_id, this.machine.data_race.as_mut())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn set_active_thread(&mut self, thread_id: ThreadId) -> ThreadId {
|
||||
let this = self.eval_context_mut();
|
||||
if let Some(data_race) = &this.memory.extra.data_race {
|
||||
if let Some(data_race) = &this.machine.data_race {
|
||||
data_race.thread_set_active(thread_id);
|
||||
}
|
||||
this.machine.threads.set_active_thread_id(thread_id)
|
||||
@ -669,7 +669,7 @@ fn active_thread_stack_mut(&mut self) -> &mut Vec<Frame<'mir, 'tcx, Tag, FrameDa
|
||||
#[inline]
|
||||
fn set_active_thread_name(&mut self, new_thread_name: Vec<u8>) {
|
||||
let this = self.eval_context_mut();
|
||||
if let Some(data_race) = &mut this.memory.extra.data_race {
|
||||
if let Some(data_race) = &mut this.machine.data_race {
|
||||
if let Ok(string) = String::from_utf8(new_thread_name.clone()) {
|
||||
data_race.thread_set_name(this.machine.threads.active_thread, string);
|
||||
}
|
||||
@ -753,7 +753,7 @@ fn run_timeout_callback(&mut self) -> InterpResult<'tcx> {
|
||||
#[inline]
|
||||
fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
|
||||
let this = self.eval_context_mut();
|
||||
let data_race = &this.memory.extra.data_race;
|
||||
let data_race = &this.machine.data_race;
|
||||
this.machine.threads.schedule(data_race)
|
||||
}
|
||||
|
||||
@ -764,8 +764,8 @@ fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
|
||||
#[inline]
|
||||
fn thread_terminated(&mut self) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
for ptr in this.machine.threads.thread_terminated(this.memory.extra.data_race.as_mut()) {
|
||||
this.memory.deallocate(ptr.into(), None, MiriMemoryKind::Tls.into())?;
|
||||
for ptr in this.machine.threads.thread_terminated(this.machine.data_race.as_mut()) {
|
||||
this.deallocate_ptr(ptr.into(), None, MiriMemoryKind::Tls.into())?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user