Merge branch 'master' into self-referential-generator

This commit is contained in:
Ralf Jung 2018-11-26 09:47:22 +01:00 committed by GitHub
commit 1edba2337b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 109 additions and 98 deletions

View File

@ -1 +1 @@
nightly-2018-11-20
nightly-2018-11-26

View File

@ -1,4 +1,4 @@
#![feature(rustc_private, extern_crate_item_prelude)]
#![feature(rustc_private)]
extern crate miri;
extern crate getopts;
extern crate rustc;

View File

@ -1,4 +1,4 @@
#![feature(rustc_private, extern_crate_item_prelude)]
#![feature(rustc_private)]
extern crate getopts;
extern crate miri;

View File

@ -114,6 +114,8 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
None => self.tcx.item_name(def_id).as_str(),
};
let tcx = &{self.tcx.tcx};
// All these functions take raw pointers, so if we access memory directly
// (as opposed to through a place), we have to remember to erase any tag
// that might still hang around!
@ -124,7 +126,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
if size == 0 {
self.write_null(dest)?;
} else {
let align = self.tcx.data_layout.pointer_align;
let align = self.tcx.data_layout.pointer_align.abi;
let ptr = self.memory_mut().allocate(Size::from_bytes(size), align, MiriMemoryKind::C.into())?;
self.write_scalar(Scalar::Ptr(ptr.with_default_tag()), dest)?;
}
@ -153,7 +155,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
let ptr = self.memory_mut()
.allocate(
Size::from_bytes(size),
Align::from_bytes(align, align).unwrap(),
Align::from_bytes(align).unwrap(),
MiriMemoryKind::Rust.into()
)?
.with_default_tag();
@ -171,11 +173,13 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
let ptr = self.memory_mut()
.allocate(
Size::from_bytes(size),
Align::from_bytes(align, align).unwrap(),
Align::from_bytes(align).unwrap(),
MiriMemoryKind::Rust.into()
)?
.with_default_tag();
self.memory_mut().write_repeat(ptr.into(), 0, Size::from_bytes(size))?;
self.memory_mut()
.get_mut(ptr.alloc_id)?
.write_repeat(tcx, ptr, 0, Size::from_bytes(size))?;
self.write_scalar(Scalar::Ptr(ptr), dest)?;
}
"__rust_dealloc" => {
@ -190,7 +194,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
}
self.memory_mut().deallocate(
ptr,
Some((Size::from_bytes(old_size), Align::from_bytes(align, align).unwrap())),
Some((Size::from_bytes(old_size), Align::from_bytes(align).unwrap())),
MiriMemoryKind::Rust.into(),
)?;
}
@ -208,9 +212,9 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
let new_ptr = self.memory_mut().reallocate(
ptr,
Size::from_bytes(old_size),
Align::from_bytes(align, align).unwrap(),
Align::from_bytes(align).unwrap(),
Size::from_bytes(new_size),
Align::from_bytes(align, align).unwrap(),
Align::from_bytes(align).unwrap(),
MiriMemoryKind::Rust.into(),
)?;
self.write_scalar(Scalar::Ptr(new_ptr.with_default_tag()), dest)?;
@ -239,7 +243,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
"dlsym" => {
let _handle = self.read_scalar(args[0])?;
let symbol = self.read_scalar(args[1])?.to_ptr()?;
let symbol_name = self.memory().read_c_str(symbol)?;
let symbol_name = self.memory().get(symbol.alloc_id)?.read_c_str(tcx, symbol)?;
let err = format!("bad c unicode symbol: {:?}", symbol_name);
let symbol_name = ::std::str::from_utf8(symbol_name).unwrap_or(&err);
return err!(Unimplemented(format!(
@ -346,7 +350,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
"getenv" => {
let result = {
let name_ptr = self.read_scalar(args[0])?.to_ptr()?;
let name = self.memory().read_c_str(name_ptr)?;
let name = self.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?;
match self.machine.env_vars.get(name) {
Some(&var) => Scalar::Ptr(var),
None => Scalar::ptr_null(&*self.tcx),
@ -360,8 +364,8 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
{
let name_ptr = self.read_scalar(args[0])?.not_undef()?;
if !name_ptr.is_null_ptr(self) {
let name = self.memory().read_c_str(name_ptr.to_ptr()?
)?.to_owned();
let name_ptr = name_ptr.to_ptr()?;
let name = self.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?.to_owned();
if !name.is_empty() && !name.contains(&b'=') {
success = Some(self.machine.env_vars.remove(&name));
}
@ -382,9 +386,10 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
{
let name_ptr = self.read_scalar(args[0])?.not_undef()?;
let value_ptr = self.read_scalar(args[1])?.to_ptr()?;
let value = self.memory().read_c_str(value_ptr)?;
let value = self.memory().get(value_ptr.alloc_id)?.read_c_str(tcx, value_ptr)?;
if !name_ptr.is_null_ptr(self) {
let name = self.memory().read_c_str(name_ptr.to_ptr()?)?;
let name_ptr = name_ptr.to_ptr()?;
let name = self.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?;
if !name.is_empty() && !name.contains(&b'=') {
new = Some((name.to_owned(), value.to_owned()));
}
@ -394,12 +399,15 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
// +1 for the null terminator
let value_copy = self.memory_mut().allocate(
Size::from_bytes((value.len() + 1) as u64),
Align::from_bytes(1, 1).unwrap(),
Align::from_bytes(1).unwrap(),
MiriMemoryKind::Env.into(),
)?.with_default_tag();
self.memory_mut().write_bytes(value_copy.into(), &value)?;
let trailing_zero_ptr = value_copy.offset(Size::from_bytes(value.len() as u64), self)?.into();
self.memory_mut().write_bytes(trailing_zero_ptr, &[0])?;
{
let alloc = self.memory_mut().get_mut(value_copy.alloc_id)?;
alloc.write_bytes(tcx, value_copy, &value)?;
let trailing_zero_ptr = value_copy.offset(Size::from_bytes(value.len() as u64), tcx)?;
alloc.write_bytes(tcx, trailing_zero_ptr, &[0])?;
}
if let Some(var) = self.machine.env_vars.insert(
name.to_owned(),
value_copy,
@ -444,7 +452,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
"strlen" => {
let ptr = self.read_scalar(args[0])?.to_ptr()?;
let n = self.memory().read_c_str(ptr)?.len();
let n = self.memory().get(ptr.alloc_id)?.read_c_str(tcx, ptr)?.len();
self.write_scalar(Scalar::from_uint(n as u64, dest.layout.size), dest)?;
}
@ -469,10 +477,9 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
instance,
promoted: None,
};
let const_val = self.const_eval(cid)?;
let value = const_val.unwrap_bits(
self.tcx.tcx,
ty::ParamEnv::empty().and(self.tcx.types.i32)) as i32;
let const_val = self.const_eval_raw(cid)?;
let const_val = self.read_scalar(const_val.into())?;
let value = const_val.to_i32()?;
if value == name {
result = Some(path_value);
break;
@ -508,13 +515,15 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
let key_layout = self.layout_of(key_type)?;
// Create key and write it into the memory where key_ptr wants it
let key = self.machine.tls.create_tls_key(dtor, &*self.tcx) as u128;
let key = self.machine.tls.create_tls_key(dtor, tcx) as u128;
if key_layout.size.bits() < 128 && key >= (1u128 << key_layout.size.bits() as u128) {
return err!(OutOfTls);
}
self.memory_mut().write_scalar(
self.memory().check_align(key_ptr.into(), key_layout.align.abi)?;
self.memory_mut().get_mut(key_ptr.alloc_id)?.write_scalar(
tcx,
key_ptr,
key_layout.align,
Scalar::from_uint(key, key_layout.size).into(),
key_layout.size,
)?;
@ -611,7 +620,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo
// This just creates a key; Windows does not natively support TLS dtors.
// Create key and return it
let key = self.machine.tls.create_tls_key(None, &*self.tcx) as u128;
let key = self.machine.tls.create_tls_key(None, tcx) as u128;
// Figure out how large a TLS key actually is. This is c::DWORD.
if dest.layout.size.bits() < 128 && key >= (1u128 << dest.layout.size.bits() as u128) {

View File

@ -130,9 +130,10 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super:
unsafe_cell_action: |place| {
trace!("unsafe_cell_action on {:?}", place.ptr);
// We need a size to go on.
let (unsafe_cell_size, _) = self.size_and_align_of_mplace(place)?
let unsafe_cell_size = self.size_and_align_of_mplace(place)?
.map(|(size, _)| size)
// for extern types, just cover what we can
.unwrap_or_else(|| place.layout.size_and_align());
.unwrap_or_else(|| place.layout.size);
// Now handle this `UnsafeCell`, unless it is empty.
if unsafe_cell_size != Size::ZERO {
unsafe_cell_action(place.ptr, unsafe_cell_size)

View File

@ -28,7 +28,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
if self.emulate_intrinsic(instance, args, dest)? {
return Ok(());
}
let tcx = &{self.tcx.tcx};
let substs = instance.substs;
// All these intrinsics take raw pointers, so if we access memory directly
@ -152,7 +152,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
let elem_layout = self.layout_of(elem_ty)?;
let elem_size = elem_layout.size.bytes();
let count = self.read_scalar(args[2])?.to_usize(self)?;
let elem_align = elem_layout.align;
let elem_align = elem_layout.align.abi;
// erase tags: this is a raw ptr operation
let src = self.read_scalar(args[0])?.not_undef()?;
let dest = self.read_scalar(args[1])?.not_undef()?;
@ -248,6 +248,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
// FIXME: We do not properly validate in case of ZSTs and when doing it in memory!
// However, this only affects direct calls of the intrinsic; calls to the stable
// functions wrapping them do get their validation.
// FIXME: should we check that the destination pointer is aligned even for ZSTs?
if !dest.layout.is_zst() { // nothing to do for ZST
match dest.layout.abi {
layout::Abi::Scalar(ref s) => {
@ -263,7 +264,9 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
// Do it in memory
let mplace = self.force_allocation(dest)?;
assert!(mplace.meta.is_none());
self.memory_mut().write_repeat(mplace.ptr, 0, dest.layout.size)?;
// not a zst, must be valid pointer
let ptr = mplace.ptr.to_ptr()?;
self.memory_mut().get_mut(ptr.alloc_id)?.write_repeat(tcx, ptr, 0, dest.layout.size)?;
}
}
}
@ -272,7 +275,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
"pref_align_of" => {
let ty = substs.type_at(0);
let layout = self.layout_of(ty)?;
let align = layout.align.pref();
let align = layout.align.pref.bytes();
let ptr_size = self.pointer_size();
let align_val = Scalar::from_uint(align as u128, ptr_size);
self.write_scalar(align_val, dest)?;
@ -364,7 +367,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
.expect("size_of_val called on extern type");
let ptr_size = self.pointer_size();
self.write_scalar(
Scalar::from_uint(align.abi(), ptr_size),
Scalar::from_uint(align.bytes(), ptr_size),
dest,
)?;
}
@ -412,6 +415,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
// FIXME: We do not properly validate in case of ZSTs and when doing it in memory!
// However, this only affects direct calls of the intrinsic; calls to the stable
// functions wrapping them do get their validation.
// FIXME: should we check alignment for ZSTs?
if !dest.layout.is_zst() { // nothing to do for ZST
match dest.layout.abi {
layout::Abi::Scalar(..) => {
@ -426,7 +430,10 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
// Do it in memory
let mplace = self.force_allocation(dest)?;
assert!(mplace.meta.is_none());
self.memory_mut().mark_definedness(mplace.ptr.to_ptr()?, dest.layout.size, false)?;
let ptr = mplace.ptr.to_ptr()?;
self.memory_mut()
.get_mut(ptr.alloc_id)?
.mark_definedness(ptr, dest.layout.size, false)?;
}
}
}
@ -438,8 +445,14 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
let val_byte = self.read_scalar(args[1])?.to_u8()?;
let ptr = self.read_scalar(args[0])?.not_undef()?;
let count = self.read_scalar(args[2])?.to_usize(self)?;
self.memory().check_align(ptr, ty_layout.align)?;
self.memory_mut().write_repeat(ptr, val_byte, ty_layout.size * count)?;
self.memory().check_align(ptr, ty_layout.align.abi)?;
let byte_count = ty_layout.size * count;
if byte_count.bytes() != 0 {
let ptr = ptr.to_ptr()?;
self.memory_mut()
.get_mut(ptr.alloc_id)?
.write_repeat(tcx, ptr, val_byte, byte_count)?;
}
}
name => return err!(Unimplemented(format!("unimplemented intrinsic: {}", name))),

View File

@ -1,4 +1,4 @@
#![feature(rustc_private, extern_crate_item_prelude)]
#![feature(rustc_private)]
#![cfg_attr(feature = "cargo-clippy", allow(cast_lossless))]
@ -397,7 +397,7 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
// Second argument: align
let arg = ecx.eval_place(&mir::Place::Local(args.next().unwrap()))?;
let align = layout.align.abi();
let align = layout.align.abi.bytes();
ecx.write_scalar(Scalar::from_uint(align, arg.layout.size), arg)?;
// No more arguments
@ -419,7 +419,7 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
"__cxa_thread_atexit_impl" => {
// This should be all-zero, pointer-sized
let data = vec![0; tcx.data_layout.pointer_size.bytes() as usize];
Allocation::from_bytes(&data[..], tcx.data_layout.pointer_align)
Allocation::from_bytes(&data[..], tcx.data_layout.pointer_align.abi)
}
_ => return err!(Unimplemented(
format!("can't access foreign static: {}", link_name),
@ -458,9 +458,9 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
place: MPlaceTy<'tcx, Borrow>,
mutability: Option<hir::Mutability>,
) -> EvalResult<'tcx, Scalar<Borrow>> {
let (size, _) = ecx.size_and_align_of_mplace(place)?
let size = ecx.size_and_align_of_mplace(place)?.map(|(size, _)| size)
// for extern types, just cover what we can
.unwrap_or_else(|| place.layout.size_and_align());
.unwrap_or_else(|| place.layout.size);
if !ecx.tcx.sess.opts.debugging_opts.mir_emit_retag ||
!Self::enforce_validity(ecx) || size == Size::ZERO
{
@ -498,9 +498,9 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> {
// This is deliberately NOT `deref_operand` as we do not want `tag_dereference`
// to be called! That would kill the original tag if we got a raw ptr.
let place = ecx.ref_to_mplace(ecx.read_immediate(ptr)?)?;
let (size, _) = ecx.size_and_align_of_mplace(place)?
let size = ecx.size_and_align_of_mplace(place)?.map(|(size, _)| size)
// for extern types, just cover what we can
.unwrap_or_else(|| place.layout.size_and_align());
.unwrap_or_else(|| place.layout.size);
if !ecx.tcx.sess.opts.debugging_opts.mir_emit_retag ||
!ecx.machine.validate || size == Size::ZERO
{

View File

@ -142,10 +142,12 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
// allocations sit right next to each other. The C/C++ standards are
// somewhat fuzzy about this case, so I think for now this check is
// "good enough".
// We require liveness, as dead allocations can of course overlap.
self.memory().check_bounds_ptr(left, InboundsCheck::Live)?;
self.memory().check_bounds_ptr(right, InboundsCheck::Live)?;
// Two live in-bounds pointers, we can compare across allocations
// Dead allocations in miri cannot overlap with live allocations, but
// on read hardware this can easily happen. Thus for comparisons we require
// both pointers to be live.
self.memory().get(left.alloc_id)?.check_bounds_ptr(left)?;
self.memory().get(right.alloc_id)?.check_bounds_ptr(right)?;
// Two in-bounds pointers, we can compare across allocations
left == right
}
}
@ -158,7 +160,8 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
// Case I: Comparing with NULL
if bits == 0 {
// Test if the ptr is in-bounds. Then it cannot be NULL.
if self.memory().check_bounds_ptr(ptr, InboundsCheck::MaybeDead).is_ok() {
// Even dangling pointers cannot be NULL.
if self.memory().check_bounds_ptr_maybe_dead(ptr).is_ok() {
return Ok(false);
}
}
@ -166,12 +169,12 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
let (alloc_size, alloc_align) = self.memory().get_size_and_align(ptr.alloc_id);
// Case II: Alignment gives it away
if ptr.offset.bytes() % alloc_align.abi() == 0 {
if ptr.offset.bytes() % alloc_align.bytes() == 0 {
// The offset maintains the allocation alignment, so we know `base+offset`
// is aligned by `alloc_align`.
// FIXME: We could be even more general, e.g. offset 2 into a 4-aligned
// allocation cannot equal 3.
if bits % alloc_align.abi() != 0 {
if bits % alloc_align.bytes() != 0 {
// The integer is *not* aligned. So they cannot be equal.
return Ok(false);
}
@ -226,7 +229,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
map_to_primval(left.overflowing_offset(Size::from_bytes(right as u64), self)),
BitAnd if !signed => {
let ptr_base_align = self.memory().get(left.alloc_id)?.align.abi();
let ptr_base_align = self.memory().get(left.alloc_id)?.align.bytes();
let base_mask = {
// FIXME: Use interpret::truncate, once that takes a Size instead of a Layout
let shift = 128 - self.memory().pointer_size().bits();
@ -259,7 +262,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
Rem if !signed => {
// Doing modulo a divisor of the alignment is allowed.
// (Intuition: Modulo a divisor leaks less information.)
let ptr_base_align = self.memory().get(left.alloc_id)?.align.abi();
let ptr_base_align = self.memory().get(left.alloc_id)?.align.bytes();
let right = right as u64;
let ptr_size = self.memory().pointer_size().bytes() as u8;
if right == 1 {
@ -298,9 +301,10 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, '
if let Scalar::Ptr(ptr) = ptr {
// Both old and new pointer must be in-bounds of a *live* allocation.
// (Of the same allocation, but that part is trivial with our representation.)
self.memory().check_bounds_ptr(ptr, InboundsCheck::Live)?;
let alloc = self.memory().get(ptr.alloc_id)?;
alloc.check_bounds_ptr(ptr)?;
let ptr = ptr.signed_offset(offset, self)?;
self.memory().check_bounds_ptr(ptr, InboundsCheck::Live)?;
alloc.check_bounds_ptr(ptr)?;
Ok(Scalar::Ptr(ptr))
} else {
// An integer pointer. They can only be offset by 0, and we pretend there

View File

@ -5,7 +5,7 @@ use rustc::hir::{Mutability, MutMutable, MutImmutable};
use crate::{
EvalResult, EvalErrorKind, MiriEvalContext, HelpersEvalContextExt, Evaluator, MutValueVisitor,
MemoryKind, MiriMemoryKind, RangeMap, AllocId, Allocation, AllocationExtra, InboundsCheck,
MemoryKind, MiriMemoryKind, RangeMap, AllocId, Allocation, AllocationExtra,
Pointer, MemPlace, Scalar, Immediate, ImmTy, PlaceTy, MPlaceTy,
};
@ -151,6 +151,10 @@ impl<'tcx> Stack {
/// Returns the index of the item we matched, `None` if it was the frozen one.
/// `kind` indicates which kind of reference is being dereferenced.
fn deref(&self, bor: Borrow, kind: RefKind) -> Result<Option<usize>, String> {
// Exclude unique ref with frozen tag.
if let (RefKind::Unique, Borrow::Shr(Some(_))) = (kind, bor) {
return Err(format!("Encountered mutable reference with frozen tag ({:?})", bor));
}
// Checks related to freezing
match bor {
Borrow::Shr(Some(bor_t)) if kind == RefKind::Frozen => {
@ -490,41 +494,14 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> {
if let Some(mutability) = mutability { format!("{:?}", mutability) } else { format!("raw") },
place.ptr, place.layout.ty);
let ptr = place.ptr.to_ptr()?;
// In principle we should not have to do anything here. However, with transmutes involved,
// it can happen that the tag of `ptr` does not actually match `mutability`, and we
// should adjust for that.
// Notably, the compiler can introduce such transmutes by optimizing away `&[mut]*`.
// That can transmute a raw ptr to a (shared/mut) ref, and a mut ref to a shared one.
match (mutability, ptr.tag) {
(None, _) => {
// No further validation on raw accesses.
return Ok(());
}
(Some(MutMutable), Borrow::Uniq(_)) |
(Some(MutImmutable), Borrow::Shr(_)) => {
// Expected combinations. Nothing to do.
}
(Some(MutMutable), Borrow::Shr(None)) => {
// Raw transmuted to mut ref. This is something real unsafe code does.
// We cannot reborrow here because we do not want to mutate state on a deref.
}
(Some(MutImmutable), Borrow::Uniq(_)) => {
// A mut got transmuted to shr. Can happen even from compiler transformations:
// `&*x` gets optimized to `x` even when `x` is a `&mut`.
}
(Some(MutMutable), Borrow::Shr(Some(_))) => {
// This is just invalid: A shr got transmuted to a mut.
// If we ever allow this, we have to consider what we do when a turn a
// `Raw`-tagged `&mut` into a raw pointer pointing to a frozen location.
// We probably do not want to allow that, but we have to allow
// turning a `Raw`-tagged `&` into a raw ptr to a frozen location.
return err!(MachineError(format!("Encountered mutable reference with frozen tag {:?}", ptr.tag)))
}
if mutability.is_none() {
// No further checks on raw derefs -- only the access itself will be checked.
return Ok(());
}
// Get the allocation
self.memory().check_bounds(ptr, size, InboundsCheck::Live)?;
let alloc = self.memory().get(ptr.alloc_id).expect("We checked that the ptr is fine!");
let alloc = self.memory().get(ptr.alloc_id)?;
alloc.check_bounds(self, ptr, size)?;
// If we got here, we do some checking, *but* we leave the tag unchanged.
if let Borrow::Shr(Some(_)) = ptr.tag {
assert_eq!(mutability, Some(MutImmutable));
@ -566,8 +543,8 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> {
ptr, place.layout.ty, new_bor);
// Get the allocation. It might not be mutable, so we cannot use `get_mut`.
self.memory().check_bounds(ptr, size, InboundsCheck::Live)?;
let alloc = self.memory().get(ptr.alloc_id).expect("We checked that the ptr is fine!");
let alloc = self.memory().get(ptr.alloc_id)?;
alloc.check_bounds(self, ptr, size)?;
// Update the stacks.
if let Borrow::Shr(Some(_)) = new_bor {
// Reference that cares about freezing. We need a frozen-sensitive reborrow.

View File

@ -25,5 +25,5 @@ fn unknown_code_2() { unsafe {
} }
fn main() {
assert_eq!(demo_mut_advanced_unique(Box::new(0)), 5);
demo_mut_advanced_unique(Box::new(0));
}

View File

@ -25,5 +25,5 @@ fn unknown_code_2() { unsafe {
} }
fn main() {
assert_eq!(demo_mut_advanced_unique(&mut 0), 5);
demo_mut_advanced_unique(&mut 0);
}

View File

@ -1,6 +1,3 @@
// FIXME still considering whether we are okay with this not being an error
// ignore-test
static X: usize = 5;
#[allow(mutable_transmutes)]

View File

@ -10,7 +10,7 @@
//ignore-windows: Uses POSIX APIs
#![feature(libc, extern_crate_item_prelude)]
#![feature(libc)]
#![allow(unused_extern_crates)] // rustc bug https://github.com/rust-lang/rust/issues/56098
extern crate libc;

View File

@ -7,6 +7,7 @@ fn main() {
mut_shr_raw();
mut_raw_then_mut_shr();
mut_raw_mut();
partially_invalidate_mut();
}
// Deref a raw ptr to access a field of a large struct, where the field
@ -97,3 +98,12 @@ fn mut_raw_mut() {
}
assert_eq!(x, 4);
}
fn partially_invalidate_mut() {
let data = &mut (0u8, 0u8);
let reborrow = &mut *data as *mut (u8, u8);
let shard = unsafe { &mut (*reborrow).0 };
data.1 += 1; // the deref overlaps with `shard`, but that is okay; the access does not overlap.
*shard += 1; // so we can still use `shard`.
assert_eq!(*data, (1, 1));
}