From c6404f56e786255592d655d9af9742004037a75a Mon Sep 17 00:00:00 2001 From: Oliver Scherer Date: Tue, 23 Oct 2018 17:54:20 +0200 Subject: [PATCH 01/10] Duplicate mod.rs for better diff tracking --- src/librustc/mir/interpret/allocation.rs | 752 +++++++++++++++++++++++ 1 file changed, 752 insertions(+) create mode 100644 src/librustc/mir/interpret/allocation.rs diff --git a/src/librustc/mir/interpret/allocation.rs b/src/librustc/mir/interpret/allocation.rs new file mode 100644 index 00000000000..4c2b2b2d41d --- /dev/null +++ b/src/librustc/mir/interpret/allocation.rs @@ -0,0 +1,752 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! An interpreter for MIR used in CTFE and by miri + +#[macro_export] +macro_rules! err { + ($($tt:tt)*) => { Err($crate::mir::interpret::EvalErrorKind::$($tt)*.into()) }; +} + +mod error; +mod value; + +pub use self::error::{ + EvalError, EvalResult, EvalErrorKind, AssertMessage, ConstEvalErr, struct_error, + FrameInfo, ConstEvalResult, +}; + +pub use self::value::{Scalar, ConstValue}; + +use std::fmt; +use mir; +use hir::def_id::DefId; +use ty::{self, TyCtxt, Instance}; +use ty::layout::{self, Align, HasDataLayout, Size}; +use middle::region; +use std::iter; +use std::io; +use std::ops::{Deref, DerefMut}; +use std::hash::Hash; +use syntax::ast::Mutability; +use rustc_serialize::{Encoder, Decodable, Encodable}; +use rustc_data_structures::sorted_map::SortedMap; +use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::sync::{Lock as Mutex, HashMapExt}; +use rustc_data_structures::tiny_list::TinyList; +use byteorder::{WriteBytesExt, ReadBytesExt, LittleEndian, BigEndian}; +use ty::codec::TyDecoder; +use std::sync::atomic::{AtomicU32, Ordering}; +use std::num::NonZeroU32; + +#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub enum Lock { + NoLock, + WriteLock(DynamicLifetime), + /// This should never be empty -- that would be a read lock held and nobody + /// there to release it... + ReadLock(Vec), +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub struct DynamicLifetime { + pub frame: usize, + pub region: Option, // "None" indicates "until the function ends" +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] +pub enum AccessKind { + Read, + Write, +} + +/// Uniquely identifies a specific constant or static. +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, RustcEncodable, RustcDecodable)] +pub struct GlobalId<'tcx> { + /// For a constant or static, the `Instance` of the item itself. + /// For a promoted global, the `Instance` of the function they belong to. + pub instance: ty::Instance<'tcx>, + + /// The index for promoted globals within their function's `Mir`. + pub promoted: Option, +} + +//////////////////////////////////////////////////////////////////////////////// +// Pointer arithmetic +//////////////////////////////////////////////////////////////////////////////// + +pub trait PointerArithmetic: layout::HasDataLayout { + // These are not supposed to be overridden. + + #[inline(always)] + fn pointer_size(self) -> Size { + self.data_layout().pointer_size + } + + //// Trunace the given value to the pointer size; also return whether there was an overflow + fn truncate_to_ptr(self, val: u128) -> (u64, bool) { + let max_ptr_plus_1 = 1u128 << self.pointer_size().bits(); + ((val % max_ptr_plus_1) as u64, val >= max_ptr_plus_1) + } + + // Overflow checking only works properly on the range from -u64 to +u64. + fn overflowing_signed_offset(self, val: u64, i: i128) -> (u64, bool) { + // FIXME: is it possible to over/underflow here? + if i < 0 { + // trickery to ensure that i64::min_value() works fine + // this formula only works for true negative values, it panics for zero! + let n = u64::max_value() - (i as u64) + 1; + val.overflowing_sub(n) + } else { + self.overflowing_offset(val, i as u64) + } + } + + fn overflowing_offset(self, val: u64, i: u64) -> (u64, bool) { + let (res, over1) = val.overflowing_add(i); + let (res, over2) = self.truncate_to_ptr(res as u128); + (res, over1 || over2) + } + + fn signed_offset<'tcx>(self, val: u64, i: i64) -> EvalResult<'tcx, u64> { + let (res, over) = self.overflowing_signed_offset(val, i as i128); + if over { err!(Overflow(mir::BinOp::Add)) } else { Ok(res) } + } + + fn offset<'tcx>(self, val: u64, i: u64) -> EvalResult<'tcx, u64> { + let (res, over) = self.overflowing_offset(val, i); + if over { err!(Overflow(mir::BinOp::Add)) } else { Ok(res) } + } + + fn wrapping_signed_offset(self, val: u64, i: i64) -> u64 { + self.overflowing_signed_offset(val, i as i128).0 + } +} + +impl PointerArithmetic for T {} + + +/// Pointer is generic over the type that represents a reference to Allocations, +/// thus making it possible for the most convenient representation to be used in +/// each context. +/// +/// Defaults to the index based and loosely coupled AllocId. +/// +/// Pointer is also generic over the `Tag` associated with each pointer, +/// which is used to do provenance tracking during execution. +#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] +pub struct Pointer { + pub alloc_id: Id, + pub offset: Size, + pub tag: Tag, +} + +/// Produces a `Pointer` which points to the beginning of the Allocation +impl From for Pointer { + #[inline(always)] + fn from(alloc_id: AllocId) -> Self { + Pointer::new(alloc_id, Size::ZERO) + } +} + +impl<'tcx> Pointer<()> { + #[inline(always)] + pub fn new(alloc_id: AllocId, offset: Size) -> Self { + Pointer { alloc_id, offset, tag: () } + } + + #[inline(always)] + pub fn with_default_tag(self) -> Pointer + where Tag: Default + { + Pointer::new_with_tag(self.alloc_id, self.offset, Default::default()) + } +} + +impl<'tcx, Tag> Pointer { + #[inline(always)] + pub fn new_with_tag(alloc_id: AllocId, offset: Size, tag: Tag) -> Self { + Pointer { alloc_id, offset, tag } + } + + pub fn wrapping_signed_offset(self, i: i64, cx: C) -> Self { + Pointer::new_with_tag( + self.alloc_id, + Size::from_bytes(cx.data_layout().wrapping_signed_offset(self.offset.bytes(), i)), + self.tag, + ) + } + + pub fn overflowing_signed_offset(self, i: i128, cx: C) -> (Self, bool) { + let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset.bytes(), i); + (Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over) + } + + pub fn signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { + Ok(Pointer::new_with_tag( + self.alloc_id, + Size::from_bytes(cx.data_layout().signed_offset(self.offset.bytes(), i)?), + self.tag, + )) + } + + pub fn overflowing_offset(self, i: Size, cx: C) -> (Self, bool) { + let (res, over) = cx.data_layout().overflowing_offset(self.offset.bytes(), i.bytes()); + (Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over) + } + + pub fn offset(self, i: Size, cx: C) -> EvalResult<'tcx, Self> { + Ok(Pointer::new_with_tag( + self.alloc_id, + Size::from_bytes(cx.data_layout().offset(self.offset.bytes(), i.bytes())?), + self.tag + )) + } + + #[inline] + pub fn erase_tag(self) -> Pointer { + Pointer { alloc_id: self.alloc_id, offset: self.offset, tag: () } + } +} + + +#[derive(Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd, Debug)] +pub struct AllocId(pub u64); + +impl ::rustc_serialize::UseSpecializedEncodable for AllocId {} +impl ::rustc_serialize::UseSpecializedDecodable for AllocId {} + +#[derive(RustcDecodable, RustcEncodable)] +enum AllocKind { + Alloc, + Fn, + Static, +} + +pub fn specialized_encode_alloc_id< + 'a, 'tcx, + E: Encoder, +>( + encoder: &mut E, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + alloc_id: AllocId, +) -> Result<(), E::Error> { + let alloc_type: AllocType<'tcx, &'tcx Allocation> = + tcx.alloc_map.lock().get(alloc_id).expect("no value for AllocId"); + match alloc_type { + AllocType::Memory(alloc) => { + trace!("encoding {:?} with {:#?}", alloc_id, alloc); + AllocKind::Alloc.encode(encoder)?; + alloc.encode(encoder)?; + } + AllocType::Function(fn_instance) => { + trace!("encoding {:?} with {:#?}", alloc_id, fn_instance); + AllocKind::Fn.encode(encoder)?; + fn_instance.encode(encoder)?; + } + AllocType::Static(did) => { + // referring to statics doesn't need to know about their allocations, + // just about its DefId + AllocKind::Static.encode(encoder)?; + did.encode(encoder)?; + } + } + Ok(()) +} + +// Used to avoid infinite recursion when decoding cyclic allocations. +type DecodingSessionId = NonZeroU32; + +#[derive(Clone)] +enum State { + Empty, + InProgressNonAlloc(TinyList), + InProgress(TinyList, AllocId), + Done(AllocId), +} + +pub struct AllocDecodingState { + // For each AllocId we keep track of which decoding state it's currently in. + decoding_state: Vec>, + // The offsets of each allocation in the data stream. + data_offsets: Vec, +} + +impl AllocDecodingState { + + pub fn new_decoding_session(&self) -> AllocDecodingSession<'_> { + static DECODER_SESSION_ID: AtomicU32 = AtomicU32::new(0); + let counter = DECODER_SESSION_ID.fetch_add(1, Ordering::SeqCst); + + // Make sure this is never zero + let session_id = DecodingSessionId::new((counter & 0x7FFFFFFF) + 1).unwrap(); + + AllocDecodingSession { + state: self, + session_id, + } + } + + pub fn new(data_offsets: Vec) -> AllocDecodingState { + let decoding_state: Vec<_> = ::std::iter::repeat(Mutex::new(State::Empty)) + .take(data_offsets.len()) + .collect(); + + AllocDecodingState { + decoding_state: decoding_state, + data_offsets, + } + } +} + +#[derive(Copy, Clone)] +pub struct AllocDecodingSession<'s> { + state: &'s AllocDecodingState, + session_id: DecodingSessionId, +} + +impl<'s> AllocDecodingSession<'s> { + + // Decodes an AllocId in a thread-safe way. + pub fn decode_alloc_id<'a, 'tcx, D>(&self, + decoder: &mut D) + -> Result + where D: TyDecoder<'a, 'tcx>, + 'tcx: 'a, + { + // Read the index of the allocation + let idx = decoder.read_u32()? as usize; + let pos = self.state.data_offsets[idx] as usize; + + // Decode the AllocKind now so that we know if we have to reserve an + // AllocId. + let (alloc_kind, pos) = decoder.with_position(pos, |decoder| { + let alloc_kind = AllocKind::decode(decoder)?; + Ok((alloc_kind, decoder.position())) + })?; + + // Check the decoding state, see if it's already decoded or if we should + // decode it here. + let alloc_id = { + let mut entry = self.state.decoding_state[idx].lock(); + + match *entry { + State::Done(alloc_id) => { + return Ok(alloc_id); + } + ref mut entry @ State::Empty => { + // We are allowed to decode + match alloc_kind { + AllocKind::Alloc => { + // If this is an allocation, we need to reserve an + // AllocId so we can decode cyclic graphs. + let alloc_id = decoder.tcx().alloc_map.lock().reserve(); + *entry = State::InProgress( + TinyList::new_single(self.session_id), + alloc_id); + Some(alloc_id) + }, + AllocKind::Fn | AllocKind::Static => { + // Fns and statics cannot be cyclic and their AllocId + // is determined later by interning + *entry = State::InProgressNonAlloc( + TinyList::new_single(self.session_id)); + None + } + } + } + State::InProgressNonAlloc(ref mut sessions) => { + if sessions.contains(&self.session_id) { + bug!("This should be unreachable") + } else { + // Start decoding concurrently + sessions.insert(self.session_id); + None + } + } + State::InProgress(ref mut sessions, alloc_id) => { + if sessions.contains(&self.session_id) { + // Don't recurse. + return Ok(alloc_id) + } else { + // Start decoding concurrently + sessions.insert(self.session_id); + Some(alloc_id) + } + } + } + }; + + // Now decode the actual data + let alloc_id = decoder.with_position(pos, |decoder| { + match alloc_kind { + AllocKind::Alloc => { + let allocation = <&'tcx Allocation as Decodable>::decode(decoder)?; + // We already have a reserved AllocId. + let alloc_id = alloc_id.unwrap(); + trace!("decoded alloc {:?} {:#?}", alloc_id, allocation); + decoder.tcx().alloc_map.lock().set_id_same_memory(alloc_id, allocation); + Ok(alloc_id) + }, + AllocKind::Fn => { + assert!(alloc_id.is_none()); + trace!("creating fn alloc id"); + let instance = ty::Instance::decode(decoder)?; + trace!("decoded fn alloc instance: {:?}", instance); + let alloc_id = decoder.tcx().alloc_map.lock().create_fn_alloc(instance); + Ok(alloc_id) + }, + AllocKind::Static => { + assert!(alloc_id.is_none()); + trace!("creating extern static alloc id at"); + let did = DefId::decode(decoder)?; + let alloc_id = decoder.tcx().alloc_map.lock().intern_static(did); + Ok(alloc_id) + } + } + })?; + + self.state.decoding_state[idx].with_lock(|entry| { + *entry = State::Done(alloc_id); + }); + + Ok(alloc_id) + } +} + +impl fmt::Display for AllocId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +#[derive(Debug, Clone, Eq, PartialEq, Hash, RustcDecodable, RustcEncodable)] +pub enum AllocType<'tcx, M> { + /// The alloc id is used as a function pointer + Function(Instance<'tcx>), + /// The alloc id points to a "lazy" static variable that did not get computed (yet). + /// This is also used to break the cycle in recursive statics. + Static(DefId), + /// The alloc id points to memory + Memory(M) +} + +pub struct AllocMap<'tcx, M> { + /// Lets you know what an AllocId refers to + id_to_type: FxHashMap>, + + /// Used to ensure that functions and statics only get one associated AllocId + type_interner: FxHashMap, AllocId>, + + /// The AllocId to assign to the next requested id. + /// Always incremented, never gets smaller. + next_id: AllocId, +} + +impl<'tcx, M: fmt::Debug + Eq + Hash + Clone> AllocMap<'tcx, M> { + pub fn new() -> Self { + AllocMap { + id_to_type: Default::default(), + type_interner: Default::default(), + next_id: AllocId(0), + } + } + + /// obtains a new allocation ID that can be referenced but does not + /// yet have an allocation backing it. + pub fn reserve( + &mut self, + ) -> AllocId { + let next = self.next_id; + self.next_id.0 = self.next_id.0 + .checked_add(1) + .expect("You overflowed a u64 by incrementing by 1... \ + You've just earned yourself a free drink if we ever meet. \ + Seriously, how did you do that?!"); + next + } + + fn intern(&mut self, alloc_type: AllocType<'tcx, M>) -> AllocId { + if let Some(&alloc_id) = self.type_interner.get(&alloc_type) { + return alloc_id; + } + let id = self.reserve(); + debug!("creating alloc_type {:?} with id {}", alloc_type, id); + self.id_to_type.insert(id, alloc_type.clone()); + self.type_interner.insert(alloc_type, id); + id + } + + // FIXME: Check if functions have identity. If not, we should not intern these, + // but instead create a new id per use. + // Alternatively we could just make comparing function pointers an error. + pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> AllocId { + self.intern(AllocType::Function(instance)) + } + + pub fn get(&self, id: AllocId) -> Option> { + self.id_to_type.get(&id).cloned() + } + + pub fn unwrap_memory(&self, id: AllocId) -> M { + match self.get(id) { + Some(AllocType::Memory(mem)) => mem, + _ => bug!("expected allocation id {} to point to memory", id), + } + } + + pub fn intern_static(&mut self, static_id: DefId) -> AllocId { + self.intern(AllocType::Static(static_id)) + } + + pub fn allocate(&mut self, mem: M) -> AllocId { + let id = self.reserve(); + self.set_id_memory(id, mem); + id + } + + pub fn set_id_memory(&mut self, id: AllocId, mem: M) { + if let Some(old) = self.id_to_type.insert(id, AllocType::Memory(mem)) { + bug!("tried to set allocation id {}, but it was already existing as {:#?}", id, old); + } + } + + pub fn set_id_same_memory(&mut self, id: AllocId, mem: M) { + self.id_to_type.insert_same(id, AllocType::Memory(mem)); + } +} + +#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] +pub struct Allocation { + /// The actual bytes of the allocation. + /// Note that the bytes of a pointer represent the offset of the pointer + pub bytes: Vec, + /// Maps from byte addresses to extra data for each pointer. + /// Only the first byte of a pointer is inserted into the map; i.e., + /// every entry in this map applies to `pointer_size` consecutive bytes starting + /// at the given offset. + pub relocations: Relocations, + /// Denotes undefined memory. Reading from undefined memory is forbidden in miri + pub undef_mask: UndefMask, + /// The alignment of the allocation to detect unaligned reads. + pub align: Align, + /// Whether the allocation is mutable. + /// Also used by codegen to determine if a static should be put into mutable memory, + /// which happens for `static mut` and `static` with interior mutability. + pub mutability: Mutability, + /// Extra state for the machine. + pub extra: Extra, +} + +impl Allocation { + /// Creates a read-only allocation initialized by the given bytes + pub fn from_bytes(slice: &[u8], align: Align) -> Self { + let mut undef_mask = UndefMask::new(Size::ZERO); + undef_mask.grow(Size::from_bytes(slice.len() as u64), true); + Self { + bytes: slice.to_owned(), + relocations: Relocations::new(), + undef_mask, + align, + mutability: Mutability::Immutable, + extra: Extra::default(), + } + } + + pub fn from_byte_aligned_bytes(slice: &[u8]) -> Self { + Allocation::from_bytes(slice, Align::from_bytes(1, 1).unwrap()) + } + + pub fn undef(size: Size, align: Align) -> Self { + assert_eq!(size.bytes() as usize as u64, size.bytes()); + Allocation { + bytes: vec![0; size.bytes() as usize], + relocations: Relocations::new(), + undef_mask: UndefMask::new(size), + align, + mutability: Mutability::Mutable, + extra: Extra::default(), + } + } +} + +impl<'tcx> ::serialize::UseSpecializedDecodable for &'tcx Allocation {} + +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] +pub struct Relocations(SortedMap); + +impl Relocations { + pub fn new() -> Self { + Relocations(SortedMap::new()) + } + + // The caller must guarantee that the given relocations are already sorted + // by address and contain no duplicates. + pub fn from_presorted(r: Vec<(Size, (Tag, Id))>) -> Self { + Relocations(SortedMap::from_presorted_elements(r)) + } +} + +impl Deref for Relocations { + type Target = SortedMap; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for Relocations { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Methods to access integers in the target endianness +//////////////////////////////////////////////////////////////////////////////// + +pub fn write_target_uint( + endianness: layout::Endian, + mut target: &mut [u8], + data: u128, +) -> Result<(), io::Error> { + let len = target.len(); + match endianness { + layout::Endian::Little => target.write_uint128::(data, len), + layout::Endian::Big => target.write_uint128::(data, len), + } +} + +pub fn read_target_uint(endianness: layout::Endian, mut source: &[u8]) -> Result { + match endianness { + layout::Endian::Little => source.read_uint128::(source.len()), + layout::Endian::Big => source.read_uint128::(source.len()), + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Methods to faciliate working with signed integers stored in a u128 +//////////////////////////////////////////////////////////////////////////////// + +pub fn sign_extend(value: u128, size: Size) -> u128 { + let size = size.bits(); + // sign extend + let shift = 128 - size; + // shift the unsigned value to the left + // and back to the right as signed (essentially fills with FF on the left) + (((value << shift) as i128) >> shift) as u128 +} + +pub fn truncate(value: u128, size: Size) -> u128 { + let size = size.bits(); + let shift = 128 - size; + // truncate (shift left to drop out leftover values, shift right to fill with zeroes) + (value << shift) >> shift +} + +//////////////////////////////////////////////////////////////////////////////// +// Undefined byte tracking +//////////////////////////////////////////////////////////////////////////////// + +type Block = u64; +const BLOCK_SIZE: u64 = 64; + +#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] +pub struct UndefMask { + blocks: Vec, + len: Size, +} + +impl_stable_hash_for!(struct mir::interpret::UndefMask{blocks, len}); + +impl UndefMask { + pub fn new(size: Size) -> Self { + let mut m = UndefMask { + blocks: vec![], + len: Size::ZERO, + }; + m.grow(size, false); + m + } + + /// Check whether the range `start..end` (end-exclusive) is entirely defined. + /// + /// Returns `Ok(())` if it's defined. Otherwise returns the index of the byte + /// at which the first undefined access begins. + #[inline] + pub fn is_range_defined(&self, start: Size, end: Size) -> Result<(), Size> { + if end > self.len { + return Err(self.len); + } + + let idx = (start.bytes()..end.bytes()) + .map(|i| Size::from_bytes(i)) + .find(|&i| !self.get(i)); + + match idx { + Some(idx) => Err(idx), + None => Ok(()) + } + } + + pub fn set_range(&mut self, start: Size, end: Size, new_state: bool) { + let len = self.len; + if end > len { + self.grow(end - len, new_state); + } + self.set_range_inbounds(start, end, new_state); + } + + pub fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) { + for i in start.bytes()..end.bytes() { + self.set(Size::from_bytes(i), new_state); + } + } + + #[inline] + pub fn get(&self, i: Size) -> bool { + let (block, bit) = bit_index(i); + (self.blocks[block] & 1 << bit) != 0 + } + + #[inline] + pub fn set(&mut self, i: Size, new_state: bool) { + let (block, bit) = bit_index(i); + if new_state { + self.blocks[block] |= 1 << bit; + } else { + self.blocks[block] &= !(1 << bit); + } + } + + pub fn grow(&mut self, amount: Size, new_state: bool) { + let unused_trailing_bits = self.blocks.len() as u64 * BLOCK_SIZE - self.len.bytes(); + if amount.bytes() > unused_trailing_bits { + let additional_blocks = amount.bytes() / BLOCK_SIZE + 1; + assert_eq!(additional_blocks as usize as u64, additional_blocks); + self.blocks.extend( + iter::repeat(0).take(additional_blocks as usize), + ); + } + let start = self.len; + self.len += amount; + self.set_range_inbounds(start, start + amount, new_state); + } +} + +#[inline] +fn bit_index(bits: Size) -> (usize, usize) { + let bits = bits.bytes(); + let a = bits / BLOCK_SIZE; + let b = bits % BLOCK_SIZE; + assert_eq!(a as usize as u64, a); + assert_eq!(b as usize as u64, b); + (a as usize, b as usize) +} From 7db04835f9ab3ab3d4b6f8a91547a2c7a4f66091 Mon Sep 17 00:00:00 2001 From: Oliver Scherer Date: Tue, 23 Oct 2018 18:05:32 +0200 Subject: [PATCH 02/10] Move `Allocation` into its own module --- src/librustc/mir/interpret/allocation.rs | 687 +---------------------- src/librustc/mir/interpret/mod.rs | 62 +- 2 files changed, 9 insertions(+), 740 deletions(-) diff --git a/src/librustc/mir/interpret/allocation.rs b/src/librustc/mir/interpret/allocation.rs index 4c2b2b2d41d..8444cf5726f 100644 --- a/src/librustc/mir/interpret/allocation.rs +++ b/src/librustc/mir/interpret/allocation.rs @@ -8,520 +8,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! An interpreter for MIR used in CTFE and by miri +//! The virtual memory representation of the MIR interpreter -#[macro_export] -macro_rules! err { - ($($tt:tt)*) => { Err($crate::mir::interpret::EvalErrorKind::$($tt)*.into()) }; -} - -mod error; -mod value; - -pub use self::error::{ - EvalError, EvalResult, EvalErrorKind, AssertMessage, ConstEvalErr, struct_error, - FrameInfo, ConstEvalResult, +use super::{ + UndefMask, + Relocations, }; -pub use self::value::{Scalar, ConstValue}; - -use std::fmt; -use mir; -use hir::def_id::DefId; -use ty::{self, TyCtxt, Instance}; -use ty::layout::{self, Align, HasDataLayout, Size}; -use middle::region; -use std::iter; -use std::io; -use std::ops::{Deref, DerefMut}; -use std::hash::Hash; +use ty::layout::{Size, Align}; use syntax::ast::Mutability; -use rustc_serialize::{Encoder, Decodable, Encodable}; -use rustc_data_structures::sorted_map::SortedMap; -use rustc_data_structures::fx::FxHashMap; -use rustc_data_structures::sync::{Lock as Mutex, HashMapExt}; -use rustc_data_structures::tiny_list::TinyList; -use byteorder::{WriteBytesExt, ReadBytesExt, LittleEndian, BigEndian}; -use ty::codec::TyDecoder; -use std::sync::atomic::{AtomicU32, Ordering}; -use std::num::NonZeroU32; - -#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] -pub enum Lock { - NoLock, - WriteLock(DynamicLifetime), - /// This should never be empty -- that would be a read lock held and nobody - /// there to release it... - ReadLock(Vec), -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] -pub struct DynamicLifetime { - pub frame: usize, - pub region: Option, // "None" indicates "until the function ends" -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] -pub enum AccessKind { - Read, - Write, -} - -/// Uniquely identifies a specific constant or static. -#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, RustcEncodable, RustcDecodable)] -pub struct GlobalId<'tcx> { - /// For a constant or static, the `Instance` of the item itself. - /// For a promoted global, the `Instance` of the function they belong to. - pub instance: ty::Instance<'tcx>, - - /// The index for promoted globals within their function's `Mir`. - pub promoted: Option, -} - -//////////////////////////////////////////////////////////////////////////////// -// Pointer arithmetic -//////////////////////////////////////////////////////////////////////////////// - -pub trait PointerArithmetic: layout::HasDataLayout { - // These are not supposed to be overridden. - - #[inline(always)] - fn pointer_size(self) -> Size { - self.data_layout().pointer_size - } - - //// Trunace the given value to the pointer size; also return whether there was an overflow - fn truncate_to_ptr(self, val: u128) -> (u64, bool) { - let max_ptr_plus_1 = 1u128 << self.pointer_size().bits(); - ((val % max_ptr_plus_1) as u64, val >= max_ptr_plus_1) - } - - // Overflow checking only works properly on the range from -u64 to +u64. - fn overflowing_signed_offset(self, val: u64, i: i128) -> (u64, bool) { - // FIXME: is it possible to over/underflow here? - if i < 0 { - // trickery to ensure that i64::min_value() works fine - // this formula only works for true negative values, it panics for zero! - let n = u64::max_value() - (i as u64) + 1; - val.overflowing_sub(n) - } else { - self.overflowing_offset(val, i as u64) - } - } - - fn overflowing_offset(self, val: u64, i: u64) -> (u64, bool) { - let (res, over1) = val.overflowing_add(i); - let (res, over2) = self.truncate_to_ptr(res as u128); - (res, over1 || over2) - } - - fn signed_offset<'tcx>(self, val: u64, i: i64) -> EvalResult<'tcx, u64> { - let (res, over) = self.overflowing_signed_offset(val, i as i128); - if over { err!(Overflow(mir::BinOp::Add)) } else { Ok(res) } - } - - fn offset<'tcx>(self, val: u64, i: u64) -> EvalResult<'tcx, u64> { - let (res, over) = self.overflowing_offset(val, i); - if over { err!(Overflow(mir::BinOp::Add)) } else { Ok(res) } - } - - fn wrapping_signed_offset(self, val: u64, i: i64) -> u64 { - self.overflowing_signed_offset(val, i as i128).0 - } -} - -impl PointerArithmetic for T {} - - -/// Pointer is generic over the type that represents a reference to Allocations, -/// thus making it possible for the most convenient representation to be used in -/// each context. -/// -/// Defaults to the index based and loosely coupled AllocId. -/// -/// Pointer is also generic over the `Tag` associated with each pointer, -/// which is used to do provenance tracking during execution. -#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] -pub struct Pointer { - pub alloc_id: Id, - pub offset: Size, - pub tag: Tag, -} - -/// Produces a `Pointer` which points to the beginning of the Allocation -impl From for Pointer { - #[inline(always)] - fn from(alloc_id: AllocId) -> Self { - Pointer::new(alloc_id, Size::ZERO) - } -} - -impl<'tcx> Pointer<()> { - #[inline(always)] - pub fn new(alloc_id: AllocId, offset: Size) -> Self { - Pointer { alloc_id, offset, tag: () } - } - - #[inline(always)] - pub fn with_default_tag(self) -> Pointer - where Tag: Default - { - Pointer::new_with_tag(self.alloc_id, self.offset, Default::default()) - } -} - -impl<'tcx, Tag> Pointer { - #[inline(always)] - pub fn new_with_tag(alloc_id: AllocId, offset: Size, tag: Tag) -> Self { - Pointer { alloc_id, offset, tag } - } - - pub fn wrapping_signed_offset(self, i: i64, cx: C) -> Self { - Pointer::new_with_tag( - self.alloc_id, - Size::from_bytes(cx.data_layout().wrapping_signed_offset(self.offset.bytes(), i)), - self.tag, - ) - } - - pub fn overflowing_signed_offset(self, i: i128, cx: C) -> (Self, bool) { - let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset.bytes(), i); - (Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over) - } - - pub fn signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { - Ok(Pointer::new_with_tag( - self.alloc_id, - Size::from_bytes(cx.data_layout().signed_offset(self.offset.bytes(), i)?), - self.tag, - )) - } - - pub fn overflowing_offset(self, i: Size, cx: C) -> (Self, bool) { - let (res, over) = cx.data_layout().overflowing_offset(self.offset.bytes(), i.bytes()); - (Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over) - } - - pub fn offset(self, i: Size, cx: C) -> EvalResult<'tcx, Self> { - Ok(Pointer::new_with_tag( - self.alloc_id, - Size::from_bytes(cx.data_layout().offset(self.offset.bytes(), i.bytes())?), - self.tag - )) - } - - #[inline] - pub fn erase_tag(self) -> Pointer { - Pointer { alloc_id: self.alloc_id, offset: self.offset, tag: () } - } -} - - -#[derive(Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd, Debug)] -pub struct AllocId(pub u64); - -impl ::rustc_serialize::UseSpecializedEncodable for AllocId {} -impl ::rustc_serialize::UseSpecializedDecodable for AllocId {} - -#[derive(RustcDecodable, RustcEncodable)] -enum AllocKind { - Alloc, - Fn, - Static, -} - -pub fn specialized_encode_alloc_id< - 'a, 'tcx, - E: Encoder, ->( - encoder: &mut E, - tcx: TyCtxt<'a, 'tcx, 'tcx>, - alloc_id: AllocId, -) -> Result<(), E::Error> { - let alloc_type: AllocType<'tcx, &'tcx Allocation> = - tcx.alloc_map.lock().get(alloc_id).expect("no value for AllocId"); - match alloc_type { - AllocType::Memory(alloc) => { - trace!("encoding {:?} with {:#?}", alloc_id, alloc); - AllocKind::Alloc.encode(encoder)?; - alloc.encode(encoder)?; - } - AllocType::Function(fn_instance) => { - trace!("encoding {:?} with {:#?}", alloc_id, fn_instance); - AllocKind::Fn.encode(encoder)?; - fn_instance.encode(encoder)?; - } - AllocType::Static(did) => { - // referring to statics doesn't need to know about their allocations, - // just about its DefId - AllocKind::Static.encode(encoder)?; - did.encode(encoder)?; - } - } - Ok(()) -} - -// Used to avoid infinite recursion when decoding cyclic allocations. -type DecodingSessionId = NonZeroU32; - -#[derive(Clone)] -enum State { - Empty, - InProgressNonAlloc(TinyList), - InProgress(TinyList, AllocId), - Done(AllocId), -} - -pub struct AllocDecodingState { - // For each AllocId we keep track of which decoding state it's currently in. - decoding_state: Vec>, - // The offsets of each allocation in the data stream. - data_offsets: Vec, -} - -impl AllocDecodingState { - - pub fn new_decoding_session(&self) -> AllocDecodingSession<'_> { - static DECODER_SESSION_ID: AtomicU32 = AtomicU32::new(0); - let counter = DECODER_SESSION_ID.fetch_add(1, Ordering::SeqCst); - - // Make sure this is never zero - let session_id = DecodingSessionId::new((counter & 0x7FFFFFFF) + 1).unwrap(); - - AllocDecodingSession { - state: self, - session_id, - } - } - - pub fn new(data_offsets: Vec) -> AllocDecodingState { - let decoding_state: Vec<_> = ::std::iter::repeat(Mutex::new(State::Empty)) - .take(data_offsets.len()) - .collect(); - - AllocDecodingState { - decoding_state: decoding_state, - data_offsets, - } - } -} - -#[derive(Copy, Clone)] -pub struct AllocDecodingSession<'s> { - state: &'s AllocDecodingState, - session_id: DecodingSessionId, -} - -impl<'s> AllocDecodingSession<'s> { - - // Decodes an AllocId in a thread-safe way. - pub fn decode_alloc_id<'a, 'tcx, D>(&self, - decoder: &mut D) - -> Result - where D: TyDecoder<'a, 'tcx>, - 'tcx: 'a, - { - // Read the index of the allocation - let idx = decoder.read_u32()? as usize; - let pos = self.state.data_offsets[idx] as usize; - - // Decode the AllocKind now so that we know if we have to reserve an - // AllocId. - let (alloc_kind, pos) = decoder.with_position(pos, |decoder| { - let alloc_kind = AllocKind::decode(decoder)?; - Ok((alloc_kind, decoder.position())) - })?; - - // Check the decoding state, see if it's already decoded or if we should - // decode it here. - let alloc_id = { - let mut entry = self.state.decoding_state[idx].lock(); - - match *entry { - State::Done(alloc_id) => { - return Ok(alloc_id); - } - ref mut entry @ State::Empty => { - // We are allowed to decode - match alloc_kind { - AllocKind::Alloc => { - // If this is an allocation, we need to reserve an - // AllocId so we can decode cyclic graphs. - let alloc_id = decoder.tcx().alloc_map.lock().reserve(); - *entry = State::InProgress( - TinyList::new_single(self.session_id), - alloc_id); - Some(alloc_id) - }, - AllocKind::Fn | AllocKind::Static => { - // Fns and statics cannot be cyclic and their AllocId - // is determined later by interning - *entry = State::InProgressNonAlloc( - TinyList::new_single(self.session_id)); - None - } - } - } - State::InProgressNonAlloc(ref mut sessions) => { - if sessions.contains(&self.session_id) { - bug!("This should be unreachable") - } else { - // Start decoding concurrently - sessions.insert(self.session_id); - None - } - } - State::InProgress(ref mut sessions, alloc_id) => { - if sessions.contains(&self.session_id) { - // Don't recurse. - return Ok(alloc_id) - } else { - // Start decoding concurrently - sessions.insert(self.session_id); - Some(alloc_id) - } - } - } - }; - - // Now decode the actual data - let alloc_id = decoder.with_position(pos, |decoder| { - match alloc_kind { - AllocKind::Alloc => { - let allocation = <&'tcx Allocation as Decodable>::decode(decoder)?; - // We already have a reserved AllocId. - let alloc_id = alloc_id.unwrap(); - trace!("decoded alloc {:?} {:#?}", alloc_id, allocation); - decoder.tcx().alloc_map.lock().set_id_same_memory(alloc_id, allocation); - Ok(alloc_id) - }, - AllocKind::Fn => { - assert!(alloc_id.is_none()); - trace!("creating fn alloc id"); - let instance = ty::Instance::decode(decoder)?; - trace!("decoded fn alloc instance: {:?}", instance); - let alloc_id = decoder.tcx().alloc_map.lock().create_fn_alloc(instance); - Ok(alloc_id) - }, - AllocKind::Static => { - assert!(alloc_id.is_none()); - trace!("creating extern static alloc id at"); - let did = DefId::decode(decoder)?; - let alloc_id = decoder.tcx().alloc_map.lock().intern_static(did); - Ok(alloc_id) - } - } - })?; - - self.state.decoding_state[idx].with_lock(|entry| { - *entry = State::Done(alloc_id); - }); - - Ok(alloc_id) - } -} - -impl fmt::Display for AllocId { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.0) - } -} - -#[derive(Debug, Clone, Eq, PartialEq, Hash, RustcDecodable, RustcEncodable)] -pub enum AllocType<'tcx, M> { - /// The alloc id is used as a function pointer - Function(Instance<'tcx>), - /// The alloc id points to a "lazy" static variable that did not get computed (yet). - /// This is also used to break the cycle in recursive statics. - Static(DefId), - /// The alloc id points to memory - Memory(M) -} - -pub struct AllocMap<'tcx, M> { - /// Lets you know what an AllocId refers to - id_to_type: FxHashMap>, - - /// Used to ensure that functions and statics only get one associated AllocId - type_interner: FxHashMap, AllocId>, - - /// The AllocId to assign to the next requested id. - /// Always incremented, never gets smaller. - next_id: AllocId, -} - -impl<'tcx, M: fmt::Debug + Eq + Hash + Clone> AllocMap<'tcx, M> { - pub fn new() -> Self { - AllocMap { - id_to_type: Default::default(), - type_interner: Default::default(), - next_id: AllocId(0), - } - } - - /// obtains a new allocation ID that can be referenced but does not - /// yet have an allocation backing it. - pub fn reserve( - &mut self, - ) -> AllocId { - let next = self.next_id; - self.next_id.0 = self.next_id.0 - .checked_add(1) - .expect("You overflowed a u64 by incrementing by 1... \ - You've just earned yourself a free drink if we ever meet. \ - Seriously, how did you do that?!"); - next - } - - fn intern(&mut self, alloc_type: AllocType<'tcx, M>) -> AllocId { - if let Some(&alloc_id) = self.type_interner.get(&alloc_type) { - return alloc_id; - } - let id = self.reserve(); - debug!("creating alloc_type {:?} with id {}", alloc_type, id); - self.id_to_type.insert(id, alloc_type.clone()); - self.type_interner.insert(alloc_type, id); - id - } - - // FIXME: Check if functions have identity. If not, we should not intern these, - // but instead create a new id per use. - // Alternatively we could just make comparing function pointers an error. - pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> AllocId { - self.intern(AllocType::Function(instance)) - } - - pub fn get(&self, id: AllocId) -> Option> { - self.id_to_type.get(&id).cloned() - } - - pub fn unwrap_memory(&self, id: AllocId) -> M { - match self.get(id) { - Some(AllocType::Memory(mem)) => mem, - _ => bug!("expected allocation id {} to point to memory", id), - } - } - - pub fn intern_static(&mut self, static_id: DefId) -> AllocId { - self.intern(AllocType::Static(static_id)) - } - - pub fn allocate(&mut self, mem: M) -> AllocId { - let id = self.reserve(); - self.set_id_memory(id, mem); - id - } - - pub fn set_id_memory(&mut self, id: AllocId, mem: M) { - if let Some(old) = self.id_to_type.insert(id, AllocType::Memory(mem)) { - bug!("tried to set allocation id {}, but it was already existing as {:#?}", id, old); - } - } - - pub fn set_id_same_memory(&mut self, id: AllocId, mem: M) { - self.id_to_type.insert_same(id, AllocType::Memory(mem)); - } -} #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub struct Allocation { @@ -578,175 +73,3 @@ pub fn undef(size: Size, align: Align) -> Self { } impl<'tcx> ::serialize::UseSpecializedDecodable for &'tcx Allocation {} - -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] -pub struct Relocations(SortedMap); - -impl Relocations { - pub fn new() -> Self { - Relocations(SortedMap::new()) - } - - // The caller must guarantee that the given relocations are already sorted - // by address and contain no duplicates. - pub fn from_presorted(r: Vec<(Size, (Tag, Id))>) -> Self { - Relocations(SortedMap::from_presorted_elements(r)) - } -} - -impl Deref for Relocations { - type Target = SortedMap; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl DerefMut for Relocations { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -//////////////////////////////////////////////////////////////////////////////// -// Methods to access integers in the target endianness -//////////////////////////////////////////////////////////////////////////////// - -pub fn write_target_uint( - endianness: layout::Endian, - mut target: &mut [u8], - data: u128, -) -> Result<(), io::Error> { - let len = target.len(); - match endianness { - layout::Endian::Little => target.write_uint128::(data, len), - layout::Endian::Big => target.write_uint128::(data, len), - } -} - -pub fn read_target_uint(endianness: layout::Endian, mut source: &[u8]) -> Result { - match endianness { - layout::Endian::Little => source.read_uint128::(source.len()), - layout::Endian::Big => source.read_uint128::(source.len()), - } -} - -//////////////////////////////////////////////////////////////////////////////// -// Methods to faciliate working with signed integers stored in a u128 -//////////////////////////////////////////////////////////////////////////////// - -pub fn sign_extend(value: u128, size: Size) -> u128 { - let size = size.bits(); - // sign extend - let shift = 128 - size; - // shift the unsigned value to the left - // and back to the right as signed (essentially fills with FF on the left) - (((value << shift) as i128) >> shift) as u128 -} - -pub fn truncate(value: u128, size: Size) -> u128 { - let size = size.bits(); - let shift = 128 - size; - // truncate (shift left to drop out leftover values, shift right to fill with zeroes) - (value << shift) >> shift -} - -//////////////////////////////////////////////////////////////////////////////// -// Undefined byte tracking -//////////////////////////////////////////////////////////////////////////////// - -type Block = u64; -const BLOCK_SIZE: u64 = 64; - -#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] -pub struct UndefMask { - blocks: Vec, - len: Size, -} - -impl_stable_hash_for!(struct mir::interpret::UndefMask{blocks, len}); - -impl UndefMask { - pub fn new(size: Size) -> Self { - let mut m = UndefMask { - blocks: vec![], - len: Size::ZERO, - }; - m.grow(size, false); - m - } - - /// Check whether the range `start..end` (end-exclusive) is entirely defined. - /// - /// Returns `Ok(())` if it's defined. Otherwise returns the index of the byte - /// at which the first undefined access begins. - #[inline] - pub fn is_range_defined(&self, start: Size, end: Size) -> Result<(), Size> { - if end > self.len { - return Err(self.len); - } - - let idx = (start.bytes()..end.bytes()) - .map(|i| Size::from_bytes(i)) - .find(|&i| !self.get(i)); - - match idx { - Some(idx) => Err(idx), - None => Ok(()) - } - } - - pub fn set_range(&mut self, start: Size, end: Size, new_state: bool) { - let len = self.len; - if end > len { - self.grow(end - len, new_state); - } - self.set_range_inbounds(start, end, new_state); - } - - pub fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) { - for i in start.bytes()..end.bytes() { - self.set(Size::from_bytes(i), new_state); - } - } - - #[inline] - pub fn get(&self, i: Size) -> bool { - let (block, bit) = bit_index(i); - (self.blocks[block] & 1 << bit) != 0 - } - - #[inline] - pub fn set(&mut self, i: Size, new_state: bool) { - let (block, bit) = bit_index(i); - if new_state { - self.blocks[block] |= 1 << bit; - } else { - self.blocks[block] &= !(1 << bit); - } - } - - pub fn grow(&mut self, amount: Size, new_state: bool) { - let unused_trailing_bits = self.blocks.len() as u64 * BLOCK_SIZE - self.len.bytes(); - if amount.bytes() > unused_trailing_bits { - let additional_blocks = amount.bytes() / BLOCK_SIZE + 1; - assert_eq!(additional_blocks as usize as u64, additional_blocks); - self.blocks.extend( - iter::repeat(0).take(additional_blocks as usize), - ); - } - let start = self.len; - self.len += amount; - self.set_range_inbounds(start, start + amount, new_state); - } -} - -#[inline] -fn bit_index(bits: Size) -> (usize, usize) { - let bits = bits.bytes(); - let a = bits / BLOCK_SIZE; - let b = bits % BLOCK_SIZE; - assert_eq!(a as usize as u64, a); - assert_eq!(b as usize as u64, b); - (a as usize, b as usize) -} diff --git a/src/librustc/mir/interpret/mod.rs b/src/librustc/mir/interpret/mod.rs index e2abf7970d6..238ffad0ae2 100644 --- a/src/librustc/mir/interpret/mod.rs +++ b/src/librustc/mir/interpret/mod.rs @@ -17,6 +17,7 @@ macro_rules! err { mod error; mod value; +mod allocation; pub use self::error::{ EvalError, EvalResult, EvalErrorKind, AssertMessage, ConstEvalErr, struct_error, @@ -25,17 +26,18 @@ macro_rules! err { pub use self::value::{Scalar, ConstValue}; +pub use self::allocation::Allocation; + use std::fmt; use mir; use hir::def_id::DefId; use ty::{self, TyCtxt, Instance}; -use ty::layout::{self, Align, HasDataLayout, Size}; +use ty::layout::{self, HasDataLayout, Size}; use middle::region; use std::iter; use std::io; use std::ops::{Deref, DerefMut}; use std::hash::Hash; -use syntax::ast::Mutability; use rustc_serialize::{Encoder, Decodable, Encodable}; use rustc_data_structures::sorted_map::SortedMap; use rustc_data_structures::fx::FxHashMap; @@ -528,62 +530,6 @@ pub fn set_id_same_memory(&mut self, id: AllocId, mem: M) { } } -#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] -pub struct Allocation { - /// The actual bytes of the allocation. - /// Note that the bytes of a pointer represent the offset of the pointer - pub bytes: Vec, - /// Maps from byte addresses to extra data for each pointer. - /// Only the first byte of a pointer is inserted into the map; i.e., - /// every entry in this map applies to `pointer_size` consecutive bytes starting - /// at the given offset. - pub relocations: Relocations, - /// Denotes undefined memory. Reading from undefined memory is forbidden in miri - pub undef_mask: UndefMask, - /// The alignment of the allocation to detect unaligned reads. - pub align: Align, - /// Whether the allocation is mutable. - /// Also used by codegen to determine if a static should be put into mutable memory, - /// which happens for `static mut` and `static` with interior mutability. - pub mutability: Mutability, - /// Extra state for the machine. - pub extra: Extra, -} - -impl Allocation { - /// Creates a read-only allocation initialized by the given bytes - pub fn from_bytes(slice: &[u8], align: Align) -> Self { - let mut undef_mask = UndefMask::new(Size::ZERO); - undef_mask.grow(Size::from_bytes(slice.len() as u64), true); - Self { - bytes: slice.to_owned(), - relocations: Relocations::new(), - undef_mask, - align, - mutability: Mutability::Immutable, - extra: Extra::default(), - } - } - - pub fn from_byte_aligned_bytes(slice: &[u8]) -> Self { - Allocation::from_bytes(slice, Align::from_bytes(1, 1).unwrap()) - } - - pub fn undef(size: Size, align: Align) -> Self { - assert_eq!(size.bytes() as usize as u64, size.bytes()); - Allocation { - bytes: vec![0; size.bytes() as usize], - relocations: Relocations::new(), - undef_mask: UndefMask::new(size), - align, - mutability: Mutability::Mutable, - extra: Extra::default(), - } - } -} - -impl<'tcx> ::serialize::UseSpecializedDecodable for &'tcx Allocation {} - #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct Relocations(SortedMap); From 48f6941acfeb96b572314243bb60a13e4db3689d Mon Sep 17 00:00:00 2001 From: Oliver Scherer Date: Tue, 23 Oct 2018 18:19:34 +0200 Subject: [PATCH 03/10] Move `ScalarMaybeUndef` back to rustc --- src/librustc/mir/interpret/mod.rs | 128 +++++++++++++++++++++++++ src/librustc_mir/interpret/operand.rs | 125 +----------------------- src/librustc_mir/interpret/snapshot.rs | 5 - 3 files changed, 129 insertions(+), 129 deletions(-) diff --git a/src/librustc/mir/interpret/mod.rs b/src/librustc/mir/interpret/mod.rs index 238ffad0ae2..2821000ccad 100644 --- a/src/librustc/mir/interpret/mod.rs +++ b/src/librustc/mir/interpret/mod.rs @@ -701,3 +701,131 @@ fn bit_index(bits: Size) -> (usize, usize) { assert_eq!(b as usize as u64, b); (a as usize, b as usize) } + +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] +pub enum ScalarMaybeUndef { + Scalar(Scalar), + Undef, +} + +impl From> for ScalarMaybeUndef { + #[inline(always)] + fn from(s: Scalar) -> Self { + ScalarMaybeUndef::Scalar(s) + } +} + +impl fmt::Display for ScalarMaybeUndef { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ScalarMaybeUndef::Undef => write!(f, "uninitialized bytes"), + ScalarMaybeUndef::Scalar(s) => write!(f, "{}", s), + } + } +} + +impl<'tcx> ScalarMaybeUndef<()> { + #[inline] + pub fn with_default_tag(self) -> ScalarMaybeUndef + where Tag: Default + { + match self { + ScalarMaybeUndef::Scalar(s) => ScalarMaybeUndef::Scalar(s.with_default_tag()), + ScalarMaybeUndef::Undef => ScalarMaybeUndef::Undef, + } + } +} + +impl<'tcx, Tag> ScalarMaybeUndef { + #[inline] + pub fn erase_tag(self) -> ScalarMaybeUndef + { + match self { + ScalarMaybeUndef::Scalar(s) => ScalarMaybeUndef::Scalar(s.erase_tag()), + ScalarMaybeUndef::Undef => ScalarMaybeUndef::Undef, + } + } + + #[inline] + pub fn not_undef(self) -> EvalResult<'static, Scalar> { + match self { + ScalarMaybeUndef::Scalar(scalar) => Ok(scalar), + ScalarMaybeUndef::Undef => err!(ReadUndefBytes(Size::from_bytes(0))), + } + } + + #[inline(always)] + pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { + self.not_undef()?.to_ptr() + } + + #[inline(always)] + pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> { + self.not_undef()?.to_bits(target_size) + } + + #[inline(always)] + pub fn to_bool(self) -> EvalResult<'tcx, bool> { + self.not_undef()?.to_bool() + } + + #[inline(always)] + pub fn to_char(self) -> EvalResult<'tcx, char> { + self.not_undef()?.to_char() + } + + #[inline(always)] + pub fn to_f32(self) -> EvalResult<'tcx, f32> { + self.not_undef()?.to_f32() + } + + #[inline(always)] + pub fn to_f64(self) -> EvalResult<'tcx, f64> { + self.not_undef()?.to_f64() + } + + #[inline(always)] + pub fn to_u8(self) -> EvalResult<'tcx, u8> { + self.not_undef()?.to_u8() + } + + #[inline(always)] + pub fn to_u32(self) -> EvalResult<'tcx, u32> { + self.not_undef()?.to_u32() + } + + #[inline(always)] + pub fn to_u64(self) -> EvalResult<'tcx, u64> { + self.not_undef()?.to_u64() + } + + #[inline(always)] + pub fn to_usize(self, cx: &impl HasDataLayout) -> EvalResult<'tcx, u64> { + self.not_undef()?.to_usize(cx) + } + + #[inline(always)] + pub fn to_i8(self) -> EvalResult<'tcx, i8> { + self.not_undef()?.to_i8() + } + + #[inline(always)] + pub fn to_i32(self) -> EvalResult<'tcx, i32> { + self.not_undef()?.to_i32() + } + + #[inline(always)] + pub fn to_i64(self) -> EvalResult<'tcx, i64> { + self.not_undef()?.to_i64() + } + + #[inline(always)] + pub fn to_isize(self, cx: &impl HasDataLayout) -> EvalResult<'tcx, i64> { + self.not_undef()?.to_isize(cx) + } +} + +impl_stable_hash_for!(enum ::mir::interpret::ScalarMaybeUndef { + Scalar(v), + Undef +}); diff --git a/src/librustc_mir/interpret/operand.rs b/src/librustc_mir/interpret/operand.rs index 83a2d14b7ca..76bf2153167 100644 --- a/src/librustc_mir/interpret/operand.rs +++ b/src/librustc_mir/interpret/operand.rs @@ -23,130 +23,7 @@ EvalResult, EvalErrorKind }; use super::{EvalContext, Machine, MemPlace, MPlaceTy, MemoryKind}; - -#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] -pub enum ScalarMaybeUndef { - Scalar(Scalar), - Undef, -} - -impl From> for ScalarMaybeUndef { - #[inline(always)] - fn from(s: Scalar) -> Self { - ScalarMaybeUndef::Scalar(s) - } -} - -impl fmt::Display for ScalarMaybeUndef { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - ScalarMaybeUndef::Undef => write!(f, "uninitialized bytes"), - ScalarMaybeUndef::Scalar(s) => write!(f, "{}", s), - } - } -} - -impl<'tcx> ScalarMaybeUndef<()> { - #[inline] - pub fn with_default_tag(self) -> ScalarMaybeUndef - where Tag: Default - { - match self { - ScalarMaybeUndef::Scalar(s) => ScalarMaybeUndef::Scalar(s.with_default_tag()), - ScalarMaybeUndef::Undef => ScalarMaybeUndef::Undef, - } - } -} - -impl<'tcx, Tag> ScalarMaybeUndef { - #[inline] - pub fn erase_tag(self) -> ScalarMaybeUndef - { - match self { - ScalarMaybeUndef::Scalar(s) => ScalarMaybeUndef::Scalar(s.erase_tag()), - ScalarMaybeUndef::Undef => ScalarMaybeUndef::Undef, - } - } - - #[inline] - pub fn not_undef(self) -> EvalResult<'static, Scalar> { - match self { - ScalarMaybeUndef::Scalar(scalar) => Ok(scalar), - ScalarMaybeUndef::Undef => err!(ReadUndefBytes(Size::from_bytes(0))), - } - } - - #[inline(always)] - pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { - self.not_undef()?.to_ptr() - } - - #[inline(always)] - pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> { - self.not_undef()?.to_bits(target_size) - } - - #[inline(always)] - pub fn to_bool(self) -> EvalResult<'tcx, bool> { - self.not_undef()?.to_bool() - } - - #[inline(always)] - pub fn to_char(self) -> EvalResult<'tcx, char> { - self.not_undef()?.to_char() - } - - #[inline(always)] - pub fn to_f32(self) -> EvalResult<'tcx, f32> { - self.not_undef()?.to_f32() - } - - #[inline(always)] - pub fn to_f64(self) -> EvalResult<'tcx, f64> { - self.not_undef()?.to_f64() - } - - #[inline(always)] - pub fn to_u8(self) -> EvalResult<'tcx, u8> { - self.not_undef()?.to_u8() - } - - #[inline(always)] - pub fn to_u32(self) -> EvalResult<'tcx, u32> { - self.not_undef()?.to_u32() - } - - #[inline(always)] - pub fn to_u64(self) -> EvalResult<'tcx, u64> { - self.not_undef()?.to_u64() - } - - #[inline(always)] - pub fn to_usize(self, cx: &impl HasDataLayout) -> EvalResult<'tcx, u64> { - self.not_undef()?.to_usize(cx) - } - - #[inline(always)] - pub fn to_i8(self) -> EvalResult<'tcx, i8> { - self.not_undef()?.to_i8() - } - - #[inline(always)] - pub fn to_i32(self) -> EvalResult<'tcx, i32> { - self.not_undef()?.to_i32() - } - - #[inline(always)] - pub fn to_i64(self) -> EvalResult<'tcx, i64> { - self.not_undef()?.to_i64() - } - - #[inline(always)] - pub fn to_isize(self, cx: &impl HasDataLayout) -> EvalResult<'tcx, i64> { - self.not_undef()?.to_isize(cx) - } -} - +pub use rustc::mir::interpret::ScalarMaybeUndef; /// A `Value` represents a single immediate self-contained Rust value. /// diff --git a/src/librustc_mir/interpret/snapshot.rs b/src/librustc_mir/interpret/snapshot.rs index 0926ddb5966..4b63335ad96 100644 --- a/src/librustc_mir/interpret/snapshot.rs +++ b/src/librustc_mir/interpret/snapshot.rs @@ -195,11 +195,6 @@ fn snapshot(&self, ctx: &'a Ctx) -> Self::Item { } } -impl_stable_hash_for!(enum ::interpret::ScalarMaybeUndef { - Scalar(v), - Undef -}); - impl_snapshot_for!(enum ScalarMaybeUndef { Scalar(s), Undef, From 6def30ba6a4a3217f4571e5795ac8b3c269bbbb3 Mon Sep 17 00:00:00 2001 From: Oliver Scherer Date: Tue, 23 Oct 2018 18:32:50 +0200 Subject: [PATCH 04/10] Move the `memory_accessed` hook onto the `Extra` value --- src/librustc/mir/interpret/allocation.rs | 30 ++++++++++++++++++++++++ src/librustc/mir/interpret/mod.rs | 2 +- src/librustc_mir/interpret/machine.rs | 20 ---------------- 3 files changed, 31 insertions(+), 21 deletions(-) diff --git a/src/librustc/mir/interpret/allocation.rs b/src/librustc/mir/interpret/allocation.rs index 8444cf5726f..cf38116b0b4 100644 --- a/src/librustc/mir/interpret/allocation.rs +++ b/src/librustc/mir/interpret/allocation.rs @@ -40,6 +40,36 @@ pub struct Allocation { pub extra: Extra, } +trait AllocationExtra { + /// Hook for performing extra checks on a memory read access. + /// + /// Takes read-only access to the allocation so we can keep all the memory read + /// operations take `&self`. Use a `RefCell` in `AllocExtra` if you + /// need to mutate. + #[inline] + fn memory_read( + &self, + _ptr: Pointer, + _size: Size, + ) -> EvalResult<'tcx> { + Ok(()) + } + + /// Hook for performing extra checks on a memory write access. + /// + /// Takes read-only access to the allocation so we can keep all the memory read + /// operations take `&self`. Use a `RefCell` in `AllocExtra` if you + /// need to mutate. + #[inline] + fn memory_written( + &mut self, + _ptr: Pointer, + _size: Size, + ) -> EvalResult<'tcx> { + Ok(()) + } +} + impl Allocation { /// Creates a read-only allocation initialized by the given bytes pub fn from_bytes(slice: &[u8], align: Align) -> Self { diff --git a/src/librustc/mir/interpret/mod.rs b/src/librustc/mir/interpret/mod.rs index 2821000ccad..282104f7842 100644 --- a/src/librustc/mir/interpret/mod.rs +++ b/src/librustc/mir/interpret/mod.rs @@ -26,7 +26,7 @@ macro_rules! err { pub use self::value::{Scalar, ConstValue}; -pub use self::allocation::Allocation; +pub use self::allocation::{Allocation, MemoryAccess}; use std::fmt; use mir; diff --git a/src/librustc_mir/interpret/machine.rs b/src/librustc_mir/interpret/machine.rs index 27cf28ef41e..7e42fd97c56 100644 --- a/src/librustc_mir/interpret/machine.rs +++ b/src/librustc_mir/interpret/machine.rs @@ -174,26 +174,6 @@ fn box_alloc( dest: PlaceTy<'tcx, Self::PointerTag>, ) -> EvalResult<'tcx>; - /// Hook for performing extra checks on a memory read access. - #[inline] - fn memory_read( - _alloc: &Allocation, - _ptr: Pointer, - _size: Size, - ) -> EvalResult<'tcx> { - Ok(()) - } - - /// Hook for performing extra checks on a memory write access. - #[inline] - fn memory_written( - _alloc: &mut Allocation, - _ptr: Pointer, - _size: Size, - ) -> EvalResult<'tcx> { - Ok(()) - } - /// Hook for performing extra checks when memory gets deallocated. #[inline] fn memory_deallocated( From 2108b6bc9534c1991b5d13a8f8738d4013ddee08 Mon Sep 17 00:00:00 2001 From: Oliver Scherer Date: Thu, 25 Oct 2018 16:09:42 +0200 Subject: [PATCH 05/10] Move UndefMask and Relocations into `allocation.rs` --- src/librustc/mir/interpret/allocation.rs | 139 ++++++++++++++++++++++- src/librustc/mir/interpret/mod.rs | 137 +--------------------- 2 files changed, 138 insertions(+), 138 deletions(-) diff --git a/src/librustc/mir/interpret/allocation.rs b/src/librustc/mir/interpret/allocation.rs index cf38116b0b4..06108633108 100644 --- a/src/librustc/mir/interpret/allocation.rs +++ b/src/librustc/mir/interpret/allocation.rs @@ -10,13 +10,13 @@ //! The virtual memory representation of the MIR interpreter -use super::{ - UndefMask, - Relocations, -}; - use ty::layout::{Size, Align}; use syntax::ast::Mutability; +use rustc_target::abi::HasDataLayout; +use std::iter; +use mir; +use std::ops::{Deref, DerefMut}; +use rustc_data_structures::sorted_map::SortedMap; #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub struct Allocation { @@ -103,3 +103,132 @@ pub fn undef(size: Size, align: Align) -> Self { } impl<'tcx> ::serialize::UseSpecializedDecodable for &'tcx Allocation {} + +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] +pub struct Relocations(SortedMap); + +impl Relocations { + pub fn new() -> Self { + Relocations(SortedMap::new()) + } + + // The caller must guarantee that the given relocations are already sorted + // by address and contain no duplicates. + pub fn from_presorted(r: Vec<(Size, (Tag, Id))>) -> Self { + Relocations(SortedMap::from_presorted_elements(r)) + } +} + +impl Deref for Relocations { + type Target = SortedMap; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for Relocations { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Undefined byte tracking +//////////////////////////////////////////////////////////////////////////////// + +type Block = u64; +const BLOCK_SIZE: u64 = 64; + +#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] +pub struct UndefMask { + blocks: Vec, + len: Size, +} + +impl_stable_hash_for!(struct mir::interpret::UndefMask{blocks, len}); + +impl UndefMask { + pub fn new(size: Size) -> Self { + let mut m = UndefMask { + blocks: vec![], + len: Size::ZERO, + }; + m.grow(size, false); + m + } + + /// Check whether the range `start..end` (end-exclusive) is entirely defined. + /// + /// Returns `Ok(())` if it's defined. Otherwise returns the index of the byte + /// at which the first undefined access begins. + #[inline] + pub fn is_range_defined(&self, start: Size, end: Size) -> Result<(), Size> { + if end > self.len { + return Err(self.len); + } + + let idx = (start.bytes()..end.bytes()) + .map(|i| Size::from_bytes(i)) + .find(|&i| !self.get(i)); + + match idx { + Some(idx) => Err(idx), + None => Ok(()) + } + } + + pub fn set_range(&mut self, start: Size, end: Size, new_state: bool) { + let len = self.len; + if end > len { + self.grow(end - len, new_state); + } + self.set_range_inbounds(start, end, new_state); + } + + pub fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) { + for i in start.bytes()..end.bytes() { + self.set(Size::from_bytes(i), new_state); + } + } + + #[inline] + pub fn get(&self, i: Size) -> bool { + let (block, bit) = bit_index(i); + (self.blocks[block] & 1 << bit) != 0 + } + + #[inline] + pub fn set(&mut self, i: Size, new_state: bool) { + let (block, bit) = bit_index(i); + if new_state { + self.blocks[block] |= 1 << bit; + } else { + self.blocks[block] &= !(1 << bit); + } + } + + pub fn grow(&mut self, amount: Size, new_state: bool) { + let unused_trailing_bits = self.blocks.len() as u64 * BLOCK_SIZE - self.len.bytes(); + if amount.bytes() > unused_trailing_bits { + let additional_blocks = amount.bytes() / BLOCK_SIZE + 1; + assert_eq!(additional_blocks as usize as u64, additional_blocks); + self.blocks.extend( + iter::repeat(0).take(additional_blocks as usize), + ); + } + let start = self.len; + self.len += amount; + self.set_range_inbounds(start, start + amount, new_state); + } +} + +#[inline] +fn bit_index(bits: Size) -> (usize, usize) { + let bits = bits.bytes(); + let a = bits / BLOCK_SIZE; + let b = bits % BLOCK_SIZE; + assert_eq!(a as usize as u64, a); + assert_eq!(b as usize as u64, b); + (a as usize, b as usize) +} diff --git a/src/librustc/mir/interpret/mod.rs b/src/librustc/mir/interpret/mod.rs index 282104f7842..3f1e51b890e 100644 --- a/src/librustc/mir/interpret/mod.rs +++ b/src/librustc/mir/interpret/mod.rs @@ -26,7 +26,10 @@ macro_rules! err { pub use self::value::{Scalar, ConstValue}; -pub use self::allocation::{Allocation, MemoryAccess}; +pub use self::allocation::{ + Allocation, AllocationExtra, + Relocations, UndefMask, +}; use std::fmt; use mir; @@ -34,12 +37,9 @@ macro_rules! err { use ty::{self, TyCtxt, Instance}; use ty::layout::{self, HasDataLayout, Size}; use middle::region; -use std::iter; use std::io; -use std::ops::{Deref, DerefMut}; use std::hash::Hash; use rustc_serialize::{Encoder, Decodable, Encodable}; -use rustc_data_structures::sorted_map::SortedMap; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::sync::{Lock as Mutex, HashMapExt}; use rustc_data_structures::tiny_list::TinyList; @@ -530,35 +530,6 @@ pub fn set_id_same_memory(&mut self, id: AllocId, mem: M) { } } -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] -pub struct Relocations(SortedMap); - -impl Relocations { - pub fn new() -> Self { - Relocations(SortedMap::new()) - } - - // The caller must guarantee that the given relocations are already sorted - // by address and contain no duplicates. - pub fn from_presorted(r: Vec<(Size, (Tag, Id))>) -> Self { - Relocations(SortedMap::from_presorted_elements(r)) - } -} - -impl Deref for Relocations { - type Target = SortedMap; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl DerefMut for Relocations { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - //////////////////////////////////////////////////////////////////////////////// // Methods to access integers in the target endianness //////////////////////////////////////////////////////////////////////////////// @@ -602,106 +573,6 @@ pub fn truncate(value: u128, size: Size) -> u128 { (value << shift) >> shift } -//////////////////////////////////////////////////////////////////////////////// -// Undefined byte tracking -//////////////////////////////////////////////////////////////////////////////// - -type Block = u64; -const BLOCK_SIZE: u64 = 64; - -#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] -pub struct UndefMask { - blocks: Vec, - len: Size, -} - -impl_stable_hash_for!(struct mir::interpret::UndefMask{blocks, len}); - -impl UndefMask { - pub fn new(size: Size) -> Self { - let mut m = UndefMask { - blocks: vec![], - len: Size::ZERO, - }; - m.grow(size, false); - m - } - - /// Check whether the range `start..end` (end-exclusive) is entirely defined. - /// - /// Returns `Ok(())` if it's defined. Otherwise returns the index of the byte - /// at which the first undefined access begins. - #[inline] - pub fn is_range_defined(&self, start: Size, end: Size) -> Result<(), Size> { - if end > self.len { - return Err(self.len); - } - - let idx = (start.bytes()..end.bytes()) - .map(|i| Size::from_bytes(i)) - .find(|&i| !self.get(i)); - - match idx { - Some(idx) => Err(idx), - None => Ok(()) - } - } - - pub fn set_range(&mut self, start: Size, end: Size, new_state: bool) { - let len = self.len; - if end > len { - self.grow(end - len, new_state); - } - self.set_range_inbounds(start, end, new_state); - } - - pub fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) { - for i in start.bytes()..end.bytes() { - self.set(Size::from_bytes(i), new_state); - } - } - - #[inline] - pub fn get(&self, i: Size) -> bool { - let (block, bit) = bit_index(i); - (self.blocks[block] & 1 << bit) != 0 - } - - #[inline] - pub fn set(&mut self, i: Size, new_state: bool) { - let (block, bit) = bit_index(i); - if new_state { - self.blocks[block] |= 1 << bit; - } else { - self.blocks[block] &= !(1 << bit); - } - } - - pub fn grow(&mut self, amount: Size, new_state: bool) { - let unused_trailing_bits = self.blocks.len() as u64 * BLOCK_SIZE - self.len.bytes(); - if amount.bytes() > unused_trailing_bits { - let additional_blocks = amount.bytes() / BLOCK_SIZE + 1; - assert_eq!(additional_blocks as usize as u64, additional_blocks); - self.blocks.extend( - iter::repeat(0).take(additional_blocks as usize), - ); - } - let start = self.len; - self.len += amount; - self.set_range_inbounds(start, start + amount, new_state); - } -} - -#[inline] -fn bit_index(bits: Size) -> (usize, usize) { - let bits = bits.bytes(); - let a = bits / BLOCK_SIZE; - let b = bits % BLOCK_SIZE; - assert_eq!(a as usize as u64, a); - assert_eq!(b as usize as u64, b); - (a as usize, b as usize) -} - #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] pub enum ScalarMaybeUndef { Scalar(Scalar), From 99ed98b1ff42cbe883b67ef70137013345fbdea1 Mon Sep 17 00:00:00 2001 From: Oliver Scherer Date: Thu, 25 Oct 2018 16:11:35 +0200 Subject: [PATCH 06/10] Move ScalarMaybeUndef into `value.rs` --- src/librustc/mir/interpret/mod.rs | 130 +--------------------------- src/librustc/mir/interpret/value.rs | 128 +++++++++++++++++++++++++++ 2 files changed, 129 insertions(+), 129 deletions(-) diff --git a/src/librustc/mir/interpret/mod.rs b/src/librustc/mir/interpret/mod.rs index 3f1e51b890e..509180a54f8 100644 --- a/src/librustc/mir/interpret/mod.rs +++ b/src/librustc/mir/interpret/mod.rs @@ -24,7 +24,7 @@ macro_rules! err { FrameInfo, ConstEvalResult, ErrorHandled, }; -pub use self::value::{Scalar, ConstValue}; +pub use self::value::{Scalar, ConstValue, ScalarMaybeUndef}; pub use self::allocation::{ Allocation, AllocationExtra, @@ -572,131 +572,3 @@ pub fn truncate(value: u128, size: Size) -> u128 { // truncate (shift left to drop out leftover values, shift right to fill with zeroes) (value << shift) >> shift } - -#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] -pub enum ScalarMaybeUndef { - Scalar(Scalar), - Undef, -} - -impl From> for ScalarMaybeUndef { - #[inline(always)] - fn from(s: Scalar) -> Self { - ScalarMaybeUndef::Scalar(s) - } -} - -impl fmt::Display for ScalarMaybeUndef { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - ScalarMaybeUndef::Undef => write!(f, "uninitialized bytes"), - ScalarMaybeUndef::Scalar(s) => write!(f, "{}", s), - } - } -} - -impl<'tcx> ScalarMaybeUndef<()> { - #[inline] - pub fn with_default_tag(self) -> ScalarMaybeUndef - where Tag: Default - { - match self { - ScalarMaybeUndef::Scalar(s) => ScalarMaybeUndef::Scalar(s.with_default_tag()), - ScalarMaybeUndef::Undef => ScalarMaybeUndef::Undef, - } - } -} - -impl<'tcx, Tag> ScalarMaybeUndef { - #[inline] - pub fn erase_tag(self) -> ScalarMaybeUndef - { - match self { - ScalarMaybeUndef::Scalar(s) => ScalarMaybeUndef::Scalar(s.erase_tag()), - ScalarMaybeUndef::Undef => ScalarMaybeUndef::Undef, - } - } - - #[inline] - pub fn not_undef(self) -> EvalResult<'static, Scalar> { - match self { - ScalarMaybeUndef::Scalar(scalar) => Ok(scalar), - ScalarMaybeUndef::Undef => err!(ReadUndefBytes(Size::from_bytes(0))), - } - } - - #[inline(always)] - pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { - self.not_undef()?.to_ptr() - } - - #[inline(always)] - pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> { - self.not_undef()?.to_bits(target_size) - } - - #[inline(always)] - pub fn to_bool(self) -> EvalResult<'tcx, bool> { - self.not_undef()?.to_bool() - } - - #[inline(always)] - pub fn to_char(self) -> EvalResult<'tcx, char> { - self.not_undef()?.to_char() - } - - #[inline(always)] - pub fn to_f32(self) -> EvalResult<'tcx, f32> { - self.not_undef()?.to_f32() - } - - #[inline(always)] - pub fn to_f64(self) -> EvalResult<'tcx, f64> { - self.not_undef()?.to_f64() - } - - #[inline(always)] - pub fn to_u8(self) -> EvalResult<'tcx, u8> { - self.not_undef()?.to_u8() - } - - #[inline(always)] - pub fn to_u32(self) -> EvalResult<'tcx, u32> { - self.not_undef()?.to_u32() - } - - #[inline(always)] - pub fn to_u64(self) -> EvalResult<'tcx, u64> { - self.not_undef()?.to_u64() - } - - #[inline(always)] - pub fn to_usize(self, cx: &impl HasDataLayout) -> EvalResult<'tcx, u64> { - self.not_undef()?.to_usize(cx) - } - - #[inline(always)] - pub fn to_i8(self) -> EvalResult<'tcx, i8> { - self.not_undef()?.to_i8() - } - - #[inline(always)] - pub fn to_i32(self) -> EvalResult<'tcx, i32> { - self.not_undef()?.to_i32() - } - - #[inline(always)] - pub fn to_i64(self) -> EvalResult<'tcx, i64> { - self.not_undef()?.to_i64() - } - - #[inline(always)] - pub fn to_isize(self, cx: &impl HasDataLayout) -> EvalResult<'tcx, i64> { - self.not_undef()?.to_isize(cx) - } -} - -impl_stable_hash_for!(enum ::mir::interpret::ScalarMaybeUndef { - Scalar(v), - Undef -}); diff --git a/src/librustc/mir/interpret/value.rs b/src/librustc/mir/interpret/value.rs index 3f5399396ab..47c42c9431a 100644 --- a/src/librustc/mir/interpret/value.rs +++ b/src/librustc/mir/interpret/value.rs @@ -392,3 +392,131 @@ fn from(ptr: Pointer) -> Self { Scalar::Ptr(ptr) } } + +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] +pub enum ScalarMaybeUndef { + Scalar(Scalar), + Undef, +} + +impl From> for ScalarMaybeUndef { + #[inline(always)] + fn from(s: Scalar) -> Self { + ScalarMaybeUndef::Scalar(s) + } +} + +impl fmt::Display for ScalarMaybeUndef { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ScalarMaybeUndef::Undef => write!(f, "uninitialized bytes"), + ScalarMaybeUndef::Scalar(s) => write!(f, "{}", s), + } + } +} + +impl<'tcx> ScalarMaybeUndef<()> { + #[inline] + pub fn with_default_tag(self) -> ScalarMaybeUndef + where Tag: Default + { + match self { + ScalarMaybeUndef::Scalar(s) => ScalarMaybeUndef::Scalar(s.with_default_tag()), + ScalarMaybeUndef::Undef => ScalarMaybeUndef::Undef, + } + } +} + +impl<'tcx, Tag> ScalarMaybeUndef { + #[inline] + pub fn erase_tag(self) -> ScalarMaybeUndef + { + match self { + ScalarMaybeUndef::Scalar(s) => ScalarMaybeUndef::Scalar(s.erase_tag()), + ScalarMaybeUndef::Undef => ScalarMaybeUndef::Undef, + } + } + + #[inline] + pub fn not_undef(self) -> EvalResult<'static, Scalar> { + match self { + ScalarMaybeUndef::Scalar(scalar) => Ok(scalar), + ScalarMaybeUndef::Undef => err!(ReadUndefBytes(Size::from_bytes(0))), + } + } + + #[inline(always)] + pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { + self.not_undef()?.to_ptr() + } + + #[inline(always)] + pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> { + self.not_undef()?.to_bits(target_size) + } + + #[inline(always)] + pub fn to_bool(self) -> EvalResult<'tcx, bool> { + self.not_undef()?.to_bool() + } + + #[inline(always)] + pub fn to_char(self) -> EvalResult<'tcx, char> { + self.not_undef()?.to_char() + } + + #[inline(always)] + pub fn to_f32(self) -> EvalResult<'tcx, f32> { + self.not_undef()?.to_f32() + } + + #[inline(always)] + pub fn to_f64(self) -> EvalResult<'tcx, f64> { + self.not_undef()?.to_f64() + } + + #[inline(always)] + pub fn to_u8(self) -> EvalResult<'tcx, u8> { + self.not_undef()?.to_u8() + } + + #[inline(always)] + pub fn to_u32(self) -> EvalResult<'tcx, u32> { + self.not_undef()?.to_u32() + } + + #[inline(always)] + pub fn to_u64(self) -> EvalResult<'tcx, u64> { + self.not_undef()?.to_u64() + } + + #[inline(always)] + pub fn to_usize(self, cx: &impl HasDataLayout) -> EvalResult<'tcx, u64> { + self.not_undef()?.to_usize(cx) + } + + #[inline(always)] + pub fn to_i8(self) -> EvalResult<'tcx, i8> { + self.not_undef()?.to_i8() + } + + #[inline(always)] + pub fn to_i32(self) -> EvalResult<'tcx, i32> { + self.not_undef()?.to_i32() + } + + #[inline(always)] + pub fn to_i64(self) -> EvalResult<'tcx, i64> { + self.not_undef()?.to_i64() + } + + #[inline(always)] + pub fn to_isize(self, cx: &impl HasDataLayout) -> EvalResult<'tcx, i64> { + self.not_undef()?.to_isize(cx) + } +} + +impl_stable_hash_for!(enum ::mir::interpret::ScalarMaybeUndef { + Scalar(v), + Undef +}); From 00e524ce340e1f0c11903d2e20e687d396f567d6 Mon Sep 17 00:00:00 2001 From: Oliver Scherer Date: Thu, 25 Oct 2018 18:23:09 +0200 Subject: [PATCH 07/10] Move `Pointer` to its own module --- src/librustc/mir/interpret/mod.rs | 151 +------------------------- src/librustc/mir/interpret/pointer.rs | 151 ++++++++++++++++++++++++++ 2 files changed, 155 insertions(+), 147 deletions(-) create mode 100644 src/librustc/mir/interpret/pointer.rs diff --git a/src/librustc/mir/interpret/mod.rs b/src/librustc/mir/interpret/mod.rs index 509180a54f8..40daf78f546 100644 --- a/src/librustc/mir/interpret/mod.rs +++ b/src/librustc/mir/interpret/mod.rs @@ -18,6 +18,7 @@ macro_rules! err { mod error; mod value; mod allocation; +mod pointer; pub use self::error::{ EvalError, EvalResult, EvalErrorKind, AssertMessage, ConstEvalErr, struct_error, @@ -31,11 +32,13 @@ macro_rules! err { Relocations, UndefMask, }; +pub use self::pointer::{Pointer, PointerArithmetic}; + use std::fmt; use mir; use hir::def_id::DefId; use ty::{self, TyCtxt, Instance}; -use ty::layout::{self, HasDataLayout, Size}; +use ty::layout::{self, Size}; use middle::region; use std::io; use std::hash::Hash; @@ -80,152 +83,6 @@ pub struct GlobalId<'tcx> { pub promoted: Option, } -//////////////////////////////////////////////////////////////////////////////// -// Pointer arithmetic -//////////////////////////////////////////////////////////////////////////////// - -pub trait PointerArithmetic: layout::HasDataLayout { - // These are not supposed to be overridden. - - #[inline(always)] - fn pointer_size(&self) -> Size { - self.data_layout().pointer_size - } - - //// Trunace the given value to the pointer size; also return whether there was an overflow - #[inline] - fn truncate_to_ptr(&self, val: u128) -> (u64, bool) { - let max_ptr_plus_1 = 1u128 << self.pointer_size().bits(); - ((val % max_ptr_plus_1) as u64, val >= max_ptr_plus_1) - } - - #[inline] - fn offset<'tcx>(&self, val: u64, i: u64) -> EvalResult<'tcx, u64> { - let (res, over) = self.overflowing_offset(val, i); - if over { err!(Overflow(mir::BinOp::Add)) } else { Ok(res) } - } - - #[inline] - fn overflowing_offset(&self, val: u64, i: u64) -> (u64, bool) { - let (res, over1) = val.overflowing_add(i); - let (res, over2) = self.truncate_to_ptr(u128::from(res)); - (res, over1 || over2) - } - - #[inline] - fn signed_offset<'tcx>(&self, val: u64, i: i64) -> EvalResult<'tcx, u64> { - let (res, over) = self.overflowing_signed_offset(val, i128::from(i)); - if over { err!(Overflow(mir::BinOp::Add)) } else { Ok(res) } - } - - // Overflow checking only works properly on the range from -u64 to +u64. - #[inline] - fn overflowing_signed_offset(&self, val: u64, i: i128) -> (u64, bool) { - // FIXME: is it possible to over/underflow here? - if i < 0 { - // trickery to ensure that i64::min_value() works fine - // this formula only works for true negative values, it panics for zero! - let n = u64::max_value() - (i as u64) + 1; - val.overflowing_sub(n) - } else { - self.overflowing_offset(val, i as u64) - } - } -} - -impl PointerArithmetic for T {} - - -/// Pointer is generic over the type that represents a reference to Allocations, -/// thus making it possible for the most convenient representation to be used in -/// each context. -/// -/// Defaults to the index based and loosely coupled AllocId. -/// -/// Pointer is also generic over the `Tag` associated with each pointer, -/// which is used to do provenance tracking during execution. -#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] -pub struct Pointer { - pub alloc_id: Id, - pub offset: Size, - pub tag: Tag, -} - -/// Produces a `Pointer` which points to the beginning of the Allocation -impl From for Pointer { - #[inline(always)] - fn from(alloc_id: AllocId) -> Self { - Pointer::new(alloc_id, Size::ZERO) - } -} - -impl<'tcx> Pointer<()> { - #[inline(always)] - pub fn new(alloc_id: AllocId, offset: Size) -> Self { - Pointer { alloc_id, offset, tag: () } - } - - #[inline(always)] - pub fn with_default_tag(self) -> Pointer - where Tag: Default - { - Pointer::new_with_tag(self.alloc_id, self.offset, Default::default()) - } -} - -impl<'tcx, Tag> Pointer { - #[inline(always)] - pub fn new_with_tag(alloc_id: AllocId, offset: Size, tag: Tag) -> Self { - Pointer { alloc_id, offset, tag } - } - - #[inline] - pub fn offset(self, i: Size, cx: &impl HasDataLayout) -> EvalResult<'tcx, Self> { - Ok(Pointer::new_with_tag( - self.alloc_id, - Size::from_bytes(cx.data_layout().offset(self.offset.bytes(), i.bytes())?), - self.tag - )) - } - - #[inline] - pub fn overflowing_offset(self, i: Size, cx: &impl HasDataLayout) -> (Self, bool) { - let (res, over) = cx.data_layout().overflowing_offset(self.offset.bytes(), i.bytes()); - (Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over) - } - - #[inline(always)] - pub fn wrapping_offset(self, i: Size, cx: &impl HasDataLayout) -> Self { - self.overflowing_offset(i, cx).0 - } - - #[inline] - pub fn signed_offset(self, i: i64, cx: &impl HasDataLayout) -> EvalResult<'tcx, Self> { - Ok(Pointer::new_with_tag( - self.alloc_id, - Size::from_bytes(cx.data_layout().signed_offset(self.offset.bytes(), i)?), - self.tag, - )) - } - - #[inline] - pub fn overflowing_signed_offset(self, i: i128, cx: &impl HasDataLayout) -> (Self, bool) { - let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset.bytes(), i); - (Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over) - } - - #[inline(always)] - pub fn wrapping_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> Self { - self.overflowing_signed_offset(i128::from(i), cx).0 - } - - #[inline(always)] - pub fn erase_tag(self) -> Pointer { - Pointer { alloc_id: self.alloc_id, offset: self.offset, tag: () } - } -} - - #[derive(Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd, Debug)] pub struct AllocId(pub u64); diff --git a/src/librustc/mir/interpret/pointer.rs b/src/librustc/mir/interpret/pointer.rs new file mode 100644 index 00000000000..1001f078e28 --- /dev/null +++ b/src/librustc/mir/interpret/pointer.rs @@ -0,0 +1,151 @@ + use mir; +use ty::layout::{self, HasDataLayout, Size}; + +use super::{ + AllocId, EvalResult, +}; + +//////////////////////////////////////////////////////////////////////////////// +// Pointer arithmetic +//////////////////////////////////////////////////////////////////////////////// + +pub trait PointerArithmetic: layout::HasDataLayout { + // These are not supposed to be overridden. + + #[inline(always)] + fn pointer_size(&self) -> Size { + self.data_layout().pointer_size + } + + //// Trunace the given value to the pointer size; also return whether there was an overflow + #[inline] + fn truncate_to_ptr(&self, val: u128) -> (u64, bool) { + let max_ptr_plus_1 = 1u128 << self.pointer_size().bits(); + ((val % max_ptr_plus_1) as u64, val >= max_ptr_plus_1) + } + + #[inline] + fn offset<'tcx>(&self, val: u64, i: u64) -> EvalResult<'tcx, u64> { + let (res, over) = self.overflowing_offset(val, i); + if over { err!(Overflow(mir::BinOp::Add)) } else { Ok(res) } + } + + #[inline] + fn overflowing_offset(&self, val: u64, i: u64) -> (u64, bool) { + let (res, over1) = val.overflowing_add(i); + let (res, over2) = self.truncate_to_ptr(u128::from(res)); + (res, over1 || over2) + } + + #[inline] + fn signed_offset<'tcx>(&self, val: u64, i: i64) -> EvalResult<'tcx, u64> { + let (res, over) = self.overflowing_signed_offset(val, i128::from(i)); + if over { err!(Overflow(mir::BinOp::Add)) } else { Ok(res) } + } + + // Overflow checking only works properly on the range from -u64 to +u64. + #[inline] + fn overflowing_signed_offset(&self, val: u64, i: i128) -> (u64, bool) { + // FIXME: is it possible to over/underflow here? + if i < 0 { + // trickery to ensure that i64::min_value() works fine + // this formula only works for true negative values, it panics for zero! + let n = u64::max_value() - (i as u64) + 1; + val.overflowing_sub(n) + } else { + self.overflowing_offset(val, i as u64) + } + } +} + +impl PointerArithmetic for T {} + + +/// Pointer is generic over the type that represents a reference to Allocations, +/// thus making it possible for the most convenient representation to be used in +/// each context. +/// +/// Defaults to the index based and loosely coupled AllocId. +/// +/// Pointer is also generic over the `Tag` associated with each pointer, +/// which is used to do provenance tracking during execution. +#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] +pub struct Pointer { + pub alloc_id: Id, + pub offset: Size, + pub tag: Tag, +} + +/// Produces a `Pointer` which points to the beginning of the Allocation +impl From for Pointer { + #[inline(always)] + fn from(alloc_id: AllocId) -> Self { + Pointer::new(alloc_id, Size::ZERO) + } +} + +impl<'tcx> Pointer<()> { + #[inline(always)] + pub fn new(alloc_id: AllocId, offset: Size) -> Self { + Pointer { alloc_id, offset, tag: () } + } + + #[inline(always)] + pub fn with_default_tag(self) -> Pointer + where Tag: Default + { + Pointer::new_with_tag(self.alloc_id, self.offset, Default::default()) + } +} + +impl<'tcx, Tag> Pointer { + #[inline(always)] + pub fn new_with_tag(alloc_id: AllocId, offset: Size, tag: Tag) -> Self { + Pointer { alloc_id, offset, tag } + } + + #[inline] + pub fn offset(self, i: Size, cx: &impl HasDataLayout) -> EvalResult<'tcx, Self> { + Ok(Pointer::new_with_tag( + self.alloc_id, + Size::from_bytes(cx.data_layout().offset(self.offset.bytes(), i.bytes())?), + self.tag + )) + } + + #[inline] + pub fn overflowing_offset(self, i: Size, cx: &impl HasDataLayout) -> (Self, bool) { + let (res, over) = cx.data_layout().overflowing_offset(self.offset.bytes(), i.bytes()); + (Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over) + } + + #[inline(always)] + pub fn wrapping_offset(self, i: Size, cx: &impl HasDataLayout) -> Self { + self.overflowing_offset(i, cx).0 + } + + #[inline] + pub fn signed_offset(self, i: i64, cx: &impl HasDataLayout) -> EvalResult<'tcx, Self> { + Ok(Pointer::new_with_tag( + self.alloc_id, + Size::from_bytes(cx.data_layout().signed_offset(self.offset.bytes(), i)?), + self.tag, + )) + } + + #[inline] + pub fn overflowing_signed_offset(self, i: i128, cx: &impl HasDataLayout) -> (Self, bool) { + let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset.bytes(), i); + (Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over) + } + + #[inline(always)] + pub fn wrapping_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> Self { + self.overflowing_signed_offset(i128::from(i), cx).0 + } + + #[inline(always)] + pub fn erase_tag(self) -> Pointer { + Pointer { alloc_id: self.alloc_id, offset: self.offset, tag: () } + } +} \ No newline at end of file From 769ee79a6978226a3ae788524b3276e71ac000d4 Mon Sep 17 00:00:00 2001 From: Oliver Scherer Date: Sun, 4 Nov 2018 15:14:54 +0100 Subject: [PATCH 08/10] Fallout --- src/librustc/mir/interpret/allocation.rs | 11 +++++++---- src/librustc_mir/interpret/machine.rs | 4 ++-- src/librustc_mir/interpret/memory.rs | 6 +++--- src/librustc_mir/interpret/place.rs | 3 ++- src/librustc_mir/interpret/validity.rs | 2 +- 5 files changed, 15 insertions(+), 11 deletions(-) diff --git a/src/librustc/mir/interpret/allocation.rs b/src/librustc/mir/interpret/allocation.rs index 06108633108..cd8064adf52 100644 --- a/src/librustc/mir/interpret/allocation.rs +++ b/src/librustc/mir/interpret/allocation.rs @@ -10,9 +10,10 @@ //! The virtual memory representation of the MIR interpreter +use super::{Pointer, EvalResult, AllocId}; + use ty::layout::{Size, Align}; use syntax::ast::Mutability; -use rustc_target::abi::HasDataLayout; use std::iter; use mir; use std::ops::{Deref, DerefMut}; @@ -40,7 +41,7 @@ pub struct Allocation { pub extra: Extra, } -trait AllocationExtra { +pub trait AllocationExtra: ::std::fmt::Debug + Default + Clone { /// Hook for performing extra checks on a memory read access. /// /// Takes read-only access to the allocation so we can keep all the memory read @@ -49,7 +50,7 @@ trait AllocationExtra { #[inline] fn memory_read( &self, - _ptr: Pointer, + _ptr: Pointer, _size: Size, ) -> EvalResult<'tcx> { Ok(()) @@ -63,13 +64,15 @@ fn memory_read( #[inline] fn memory_written( &mut self, - _ptr: Pointer, + _ptr: Pointer, _size: Size, ) -> EvalResult<'tcx> { Ok(()) } } +impl AllocationExtra<()> for () {} + impl Allocation { /// Creates a read-only allocation initialized by the given bytes pub fn from_bytes(slice: &[u8], align: Align) -> Self { diff --git a/src/librustc_mir/interpret/machine.rs b/src/librustc_mir/interpret/machine.rs index 7e42fd97c56..047996777ea 100644 --- a/src/librustc_mir/interpret/machine.rs +++ b/src/librustc_mir/interpret/machine.rs @@ -20,7 +20,7 @@ use rustc::ty::{self, layout::{Size, TyLayout}, query::TyCtxtAt}; use super::{ - Allocation, AllocId, EvalResult, Scalar, + Allocation, AllocId, EvalResult, Scalar, AllocationExtra, EvalContext, PlaceTy, MPlaceTy, OpTy, Pointer, MemoryKind, }; @@ -78,7 +78,7 @@ pub trait Machine<'a, 'mir, 'tcx>: Sized { type PointerTag: ::std::fmt::Debug + Default + Copy + Eq + Hash + 'static; /// Extra data stored in every allocation. - type AllocExtra: ::std::fmt::Debug + Default + Clone; + type AllocExtra: AllocationExtra; /// Memory's allocation map type MemoryMap: diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs index 6a109efe3c4..ef2510dec35 100644 --- a/src/librustc_mir/interpret/memory.rs +++ b/src/librustc_mir/interpret/memory.rs @@ -28,7 +28,7 @@ use syntax::ast::Mutability; use super::{ - Pointer, AllocId, Allocation, ConstValue, GlobalId, + Pointer, AllocId, Allocation, ConstValue, GlobalId, AllocationExtra, EvalResult, Scalar, EvalErrorKind, AllocType, PointerArithmetic, Machine, AllocMap, MayLeak, ScalarMaybeUndef, ErrorHandled, }; @@ -637,7 +637,7 @@ fn get_bytes_internal( } let alloc = self.get(ptr.alloc_id)?; - M::memory_read(alloc, ptr, size)?; + AllocationExtra::memory_read(&alloc.extra, ptr, size)?; assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes()); assert_eq!(size.bytes() as usize as u64, size.bytes()); @@ -683,7 +683,7 @@ fn get_bytes_mut( self.clear_relocations(ptr, size)?; let alloc = self.get_mut(ptr.alloc_id)?; - M::memory_written(alloc, ptr, size)?; + AllocationExtra::memory_written(&mut alloc.extra, ptr, size)?; assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes()); assert_eq!(size.bytes() as usize as u64, size.bytes()); diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs index 19430c85cf7..6f8bbf3c4a9 100644 --- a/src/librustc_mir/interpret/place.rs +++ b/src/librustc_mir/interpret/place.rs @@ -24,7 +24,7 @@ GlobalId, AllocId, Allocation, Scalar, EvalResult, Pointer, PointerArithmetic }; use super::{ - EvalContext, Machine, AllocMap, + EvalContext, Machine, AllocMap, AllocationExtra, Immediate, ImmTy, ScalarMaybeUndef, Operand, OpTy, MemoryKind }; @@ -264,6 +264,7 @@ impl<'a, 'mir, 'tcx, Tag, M> EvalContext<'a, 'mir, 'tcx, M> Tag: ::std::fmt::Debug+Default+Copy+Eq+Hash+'static, M: Machine<'a, 'mir, 'tcx, PointerTag=Tag>, M::MemoryMap: AllocMap, Allocation)>, + M::AllocExtra: AllocationExtra, { /// Take a value, which represents a (thin or fat) reference, and make it a place. /// Alignment is just based on the type. This is the inverse of `create_ref`. diff --git a/src/librustc_mir/interpret/validity.rs b/src/librustc_mir/interpret/validity.rs index 8fde0c9b8af..84aa5b67566 100644 --- a/src/librustc_mir/interpret/validity.rs +++ b/src/librustc_mir/interpret/validity.rs @@ -17,7 +17,7 @@ use rustc::ty; use rustc_data_structures::fx::FxHashSet; use rustc::mir::interpret::{ - Scalar, AllocType, EvalResult, EvalErrorKind + Scalar, AllocType, EvalResult, EvalErrorKind, }; use super::{ From 5d58a036de126f2d0900376be5da666de6c9f770 Mon Sep 17 00:00:00 2001 From: Oliver Scherer Date: Sun, 4 Nov 2018 16:01:50 +0100 Subject: [PATCH 09/10] Give `AllocationExtra`s access to their entire `Allocation` --- src/librustc/mir/interpret/allocation.rs | 8 ++------ src/librustc_mir/interpret/memory.rs | 4 ++-- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/src/librustc/mir/interpret/allocation.rs b/src/librustc/mir/interpret/allocation.rs index cd8064adf52..e55997099c8 100644 --- a/src/librustc/mir/interpret/allocation.rs +++ b/src/librustc/mir/interpret/allocation.rs @@ -49,7 +49,7 @@ pub trait AllocationExtra: ::std::fmt::Debug + Default + Clone { /// need to mutate. #[inline] fn memory_read( - &self, + _alloc: &Allocation, _ptr: Pointer, _size: Size, ) -> EvalResult<'tcx> { @@ -57,13 +57,9 @@ fn memory_read( } /// Hook for performing extra checks on a memory write access. - /// - /// Takes read-only access to the allocation so we can keep all the memory read - /// operations take `&self`. Use a `RefCell` in `AllocExtra` if you - /// need to mutate. #[inline] fn memory_written( - &mut self, + _alloc: &mut Allocation, _ptr: Pointer, _size: Size, ) -> EvalResult<'tcx> { diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs index ef2510dec35..10bc984a447 100644 --- a/src/librustc_mir/interpret/memory.rs +++ b/src/librustc_mir/interpret/memory.rs @@ -637,7 +637,7 @@ fn get_bytes_internal( } let alloc = self.get(ptr.alloc_id)?; - AllocationExtra::memory_read(&alloc.extra, ptr, size)?; + AllocationExtra::memory_read(alloc, ptr, size)?; assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes()); assert_eq!(size.bytes() as usize as u64, size.bytes()); @@ -683,7 +683,7 @@ fn get_bytes_mut( self.clear_relocations(ptr, size)?; let alloc = self.get_mut(ptr.alloc_id)?; - AllocationExtra::memory_written(&mut alloc.extra, ptr, size)?; + AllocationExtra::memory_written(alloc, ptr, size)?; assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes()); assert_eq!(size.bytes() as usize as u64, size.bytes()); From 428af73e7c3b77a05a4d998e4c0fe3146b80c332 Mon Sep 17 00:00:00 2001 From: Oliver Scherer Date: Wed, 7 Nov 2018 16:19:51 +0100 Subject: [PATCH 10/10] Rebase fallout --- src/librustc/mir/interpret/pointer.rs | 4 ++-- src/librustc_mir/interpret/operand.rs | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/librustc/mir/interpret/pointer.rs b/src/librustc/mir/interpret/pointer.rs index 1001f078e28..969f2c0e837 100644 --- a/src/librustc/mir/interpret/pointer.rs +++ b/src/librustc/mir/interpret/pointer.rs @@ -1,4 +1,4 @@ - use mir; +use mir; use ty::layout::{self, HasDataLayout, Size}; use super::{ @@ -148,4 +148,4 @@ pub fn wrapping_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> Self { pub fn erase_tag(self) -> Pointer { Pointer { alloc_id: self.alloc_id, offset: self.offset, tag: () } } -} \ No newline at end of file +} diff --git a/src/librustc_mir/interpret/operand.rs b/src/librustc_mir/interpret/operand.rs index 76bf2153167..d43ec0bd349 100644 --- a/src/librustc_mir/interpret/operand.rs +++ b/src/librustc_mir/interpret/operand.rs @@ -12,7 +12,6 @@ //! All high-level functions to read from memory work on operands as sources. use std::convert::TryInto; -use std::fmt; use rustc::{mir, ty}; use rustc::ty::layout::{self, Size, LayoutOf, TyLayout, HasDataLayout, IntegerExt};