CTFE: throw unsupported error when partially overwriting a pointer

This commit is contained in:
Ralf Jung 2021-07-18 11:15:17 +02:00
parent e66a8c260c
commit 14de6ec8d8
4 changed files with 69 additions and 27 deletions

View File

@ -12,7 +12,7 @@
use rustc_target::abi::{Align, HasDataLayout, Size};
use super::{
read_target_uint, write_target_uint, AllocId, InterpError, InterpResult, Pointer,
read_target_uint, write_target_uint, AllocId, InterpError, InterpResult, Pointer, Provenance,
ResourceExhaustionInfo, Scalar, ScalarMaybeUninit, UndefinedBehaviorInfo, UninitBytesAccess,
UnsupportedOpInfo,
};
@ -53,6 +53,8 @@ pub struct Allocation<Tag = AllocId, Extra = ()> {
pub enum AllocError {
/// Encountered a pointer where we needed raw bytes.
ReadPointerAsBytes,
/// Partially overwriting a pointer.
PartialPointerOverwrite(Size),
/// Using uninitialized data where it is not allowed.
InvalidUninitBytes(Option<UninitBytesAccess>),
}
@ -60,11 +62,13 @@ pub enum AllocError {
impl AllocError {
pub fn to_interp_error<'tcx>(self, alloc_id: AllocId) -> InterpError<'tcx> {
use AllocError::*;
match self {
AllocError::ReadPointerAsBytes => {
InterpError::Unsupported(UnsupportedOpInfo::ReadPointerAsBytes)
}
AllocError::InvalidUninitBytes(info) => InterpError::UndefinedBehavior(
ReadPointerAsBytes => InterpError::Unsupported(UnsupportedOpInfo::ReadPointerAsBytes),
PartialPointerOverwrite(offset) => InterpError::Unsupported(
UnsupportedOpInfo::PartialPointerOverwrite(Pointer::new(alloc_id, offset)),
),
InvalidUninitBytes(info) => InterpError::UndefinedBehavior(
UndefinedBehaviorInfo::InvalidUninitBytes(info.map(|b| (alloc_id, b))),
),
}
@ -218,7 +222,7 @@ pub fn relocations(&self) -> &Relocations<Tag> {
}
/// Byte accessors.
impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
impl<Tag: Provenance, Extra> Allocation<Tag, Extra> {
/// The last argument controls whether we error out when there are uninitialized
/// or pointer bytes. You should never call this, call `get_bytes` or
/// `get_bytes_with_uninit_and_ptr` instead,
@ -275,30 +279,35 @@ pub fn get_bytes_with_uninit_and_ptr(
/// It is the caller's responsibility to check bounds and alignment beforehand.
/// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
/// on `InterpCx` instead.
pub fn get_bytes_mut(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> &mut [u8] {
pub fn get_bytes_mut(
&mut self,
cx: &impl HasDataLayout,
range: AllocRange,
) -> AllocResult<&mut [u8]> {
self.mark_init(range, true);
self.clear_relocations(cx, range);
self.clear_relocations(cx, range)?;
&mut self.bytes[range.start.bytes_usize()..range.end().bytes_usize()]
Ok(&mut self.bytes[range.start.bytes_usize()..range.end().bytes_usize()])
}
/// A raw pointer variant of `get_bytes_mut` that avoids invalidating existing aliases into this memory.
pub fn get_bytes_mut_ptr(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> *mut [u8] {
pub fn get_bytes_mut_ptr(
&mut self,
cx: &impl HasDataLayout,
range: AllocRange,
) -> AllocResult<*mut [u8]> {
self.mark_init(range, true);
// This also clears relocations that just overlap with the written range. So writing to some
// byte can de-initialize its neighbors! See
// <https://github.com/rust-lang/rust/issues/87184> for details.
self.clear_relocations(cx, range);
self.clear_relocations(cx, range)?;
assert!(range.end().bytes_usize() <= self.bytes.len()); // need to do our own bounds-check
let begin_ptr = self.bytes.as_mut_ptr().wrapping_add(range.start.bytes_usize());
let len = range.end().bytes_usize() - range.start.bytes_usize();
ptr::slice_from_raw_parts_mut(begin_ptr, len)
Ok(ptr::slice_from_raw_parts_mut(begin_ptr, len))
}
}
/// Reading and writing.
impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
impl<Tag: Provenance, Extra> Allocation<Tag, Extra> {
/// Validates that `ptr.offset` and `ptr.offset + size` do not point to the middle of a
/// relocation. If `allow_uninit_and_ptr` is `false`, also enforces that the memory in the
/// given range contains neither relocations nor uninitialized bytes.
@ -395,7 +404,7 @@ pub fn write_scalar(
};
let endian = cx.data_layout().endian;
let dst = self.get_bytes_mut(cx, range);
let dst = self.get_bytes_mut(cx, range)?;
write_target_uint(endian, dst, bytes).unwrap();
// See if we have to also write a relocation.
@ -433,13 +442,16 @@ fn check_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> Alloc
/// uninitialized. This is a somewhat odd "spooky action at a distance",
/// but it allows strictly more code to run than if we would just error
/// immediately in that case.
fn clear_relocations(&mut self, cx: &impl HasDataLayout, range: AllocRange) {
fn clear_relocations(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult
where
Tag: Provenance,
{
// Find the start and end of the given range and its outermost relocations.
let (first, last) = {
// Find all relocations overlapping the given range.
let relocations = self.get_relocations(cx, range);
if relocations.is_empty() {
return;
return Ok(());
}
(
@ -450,17 +462,27 @@ fn clear_relocations(&mut self, cx: &impl HasDataLayout, range: AllocRange) {
let start = range.start;
let end = range.end();
// Mark parts of the outermost relocations as uninitialized if they partially fall outside the
// given range.
// We need to handle clearing the relocations from parts of a pointer. See
// <https://github.com/rust-lang/rust/issues/87184> for details.
if first < start {
if Tag::ERR_ON_PARTIAL_PTR_OVERWRITE {
return Err(AllocError::PartialPointerOverwrite(first));
}
self.init_mask.set_range(first, start, false);
}
if last > end {
if Tag::ERR_ON_PARTIAL_PTR_OVERWRITE {
return Err(AllocError::PartialPointerOverwrite(
last - cx.data_layout().pointer_size,
));
}
self.init_mask.set_range(end, last, false);
}
// Forget all the relocations.
self.relocations.0.remove_range(first..last);
Ok(())
}
/// Errors if there are relocations overlapping with the edges of the

View File

@ -404,6 +404,9 @@ pub enum UnsupportedOpInfo {
Unsupported(String),
/// Encountered a pointer where we needed raw bytes.
ReadPointerAsBytes,
/// Overwriting parts ofa pointer; the resulting state cannot be represented in our
/// `Allocation` data structure.
PartialPointerOverwrite(Pointer<AllocId>),
//
// The variants below are only reachable from CTFE/const prop, miri will never emit them.
//
@ -418,9 +421,12 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use UnsupportedOpInfo::*;
match self {
Unsupported(ref msg) => write!(f, "{}", msg),
ReadExternStatic(did) => write!(f, "cannot read from extern static ({:?})", did),
ReadPointerAsBytes => write!(f, "unable to turn pointer into raw bytes",),
ReadPointerAsBytes => write!(f, "unable to turn pointer into raw bytes"),
PartialPointerOverwrite(ptr) => {
write!(f, "unable to overwrite parts of a pointer in memory at {:?}", ptr)
}
ThreadLocalStatic(did) => write!(f, "cannot access thread local static ({:?})", did),
ReadExternStatic(did) => write!(f, "cannot read from extern static ({:?})", did),
}
}
}

View File

@ -108,6 +108,10 @@ pub trait Provenance: Copy + fmt::Debug {
/// If `false`, ptr-to-int casts are not supported. The offset *must* be relative in that case.
const OFFSET_IS_ADDR: bool;
/// We also use this trait to control whether to abort execution when a pointer is being partially overwritten
/// (this avoids a separate trait in `allocation.rs` just for this purpose).
const ERR_ON_PARTIAL_PTR_OVERWRITE: bool;
/// Determines how a pointer should be printed.
fn fmt(ptr: &Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result
where
@ -123,6 +127,9 @@ impl Provenance for AllocId {
// so ptr-to-int casts are not possible (since we do not know the global physical offset).
const OFFSET_IS_ADDR: bool = false;
// For now, do not allow this, so that we keep our options open.
const ERR_ON_PARTIAL_PTR_OVERWRITE: bool = true;
fn fmt(ptr: &Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Forward `alternate` flag to `alloc_id` printing.
if f.alternate() {

View File

@ -907,7 +907,7 @@ fn write_allocation_track_relocs<'tcx, Tag: Provenance, Extra>(
}
/// Reading and writing.
impl<'tcx, 'a, Tag: Copy, Extra> AllocRefMut<'a, 'tcx, Tag, Extra> {
impl<'tcx, 'a, Tag: Provenance, Extra> AllocRefMut<'a, 'tcx, Tag, Extra> {
pub fn write_scalar(
&mut self,
range: AllocRange,
@ -928,7 +928,7 @@ pub fn write_ptr_sized(
}
}
impl<'tcx, 'a, Tag: Copy, Extra> AllocRef<'a, 'tcx, Tag, Extra> {
impl<'tcx, 'a, Tag: Provenance, Extra> AllocRef<'a, 'tcx, Tag, Extra> {
pub fn read_scalar(&self, range: AllocRange) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
Ok(self
.alloc
@ -998,7 +998,11 @@ pub fn write_bytes(
// Side-step AllocRef and directly access the underlying bytes more efficiently.
// (We are staying inside the bounds here so all is good.)
let bytes = alloc_ref.alloc.get_bytes_mut(&alloc_ref.tcx, alloc_ref.range);
let alloc_id = alloc_ref.alloc_id;
let bytes = alloc_ref
.alloc
.get_bytes_mut(&alloc_ref.tcx, alloc_ref.range)
.map_err(move |e| e.to_interp_error(alloc_id))?;
// `zip` would stop when the first iterator ends; we want to definitely
// cover all of `bytes`.
for dest in bytes {
@ -1072,7 +1076,10 @@ pub fn copy_repeatedly(
let (dest_alloc, extra) = self.get_raw_mut(dest_alloc_id)?;
let dest_range = alloc_range(dest_offset, size * num_copies);
M::memory_written(extra, &mut dest_alloc.extra, dest.provenance, dest_range)?;
let dest_bytes = dest_alloc.get_bytes_mut_ptr(&tcx, dest_range).as_mut_ptr();
let dest_bytes = dest_alloc
.get_bytes_mut_ptr(&tcx, dest_range)
.map_err(|e| e.to_interp_error(dest_alloc_id))?
.as_mut_ptr();
if compressed.no_bytes_init() {
// Fast path: If all bytes are `uninit` then there is nothing to copy. The target range