Merge remote-tracking branch 'origin/master' into rustup

This commit is contained in:
Ralf Jung 2019-07-06 09:29:17 +02:00
commit b324cbf282
26 changed files with 367 additions and 222 deletions

1
.gitattributes vendored Normal file
View File

@ -0,0 +1 @@
* text=auto eol=lf

View File

@ -279,6 +279,8 @@ Several `-Z` flags are relevant for Miri:
* `-Zalways-encode-mir` makes rustc dump MIR even for completely monomorphic
functions. This is needed so that Miri can execute such functions, so Miri
sets this flag per default.
* `-Zmir-emit-retag` controls whether `Retag` statements are emitted. Miri
enables this per default because it is needed for validation.
Moreover, Miri recognizes some environment variables:
@ -327,6 +329,7 @@ Miri has already found a number of bugs in the Rust standard library and beyond,
Definite bugs found:
* [`Debug for vec_deque::Iter` accessing uninitialized memory](https://github.com/rust-lang/rust/issues/53566)
* [`Vec::into_iter` doing an unaligned ZST read](https://github.com/rust-lang/rust/pull/53804)
* [`From<&[T]> for Rc` creating a not sufficiently aligned reference](https://github.com/rust-lang/rust/issues/54908)
* [`BTreeMap` creating a shared reference pointing to a too small allocation](https://github.com/rust-lang/rust/issues/54957)
* [`Vec::append` creating a dangling reference](https://github.com/rust-lang/rust/pull/61082)

View File

@ -1 +1 @@
7e08576e4276a97b523c25bfd196d419c39c7b87
24a9bcbb7cb0d8bdc11b8252a9c13f7562c7e4ca

View File

@ -10,7 +10,7 @@
use rustc::mir;
use crate::{
InterpResult, InterpError, InterpretCx, StackPopCleanup, struct_error,
InterpResult, InterpError, InterpCx, StackPopCleanup, struct_error,
Scalar, Tag, Pointer,
MemoryExtra, MiriMemoryKind, Evaluator, TlsEvalContextExt,
};
@ -30,14 +30,23 @@ pub fn create_ecx<'mir, 'tcx: 'mir>(
tcx: TyCtxt<'tcx>,
main_id: DefId,
config: MiriConfig,
) -> InterpResult<'tcx, InterpretCx<'mir, 'tcx, Evaluator<'tcx>>> {
let mut ecx = InterpretCx::new(
) -> InterpResult<'tcx, InterpCx<'mir, 'tcx, Evaluator<'tcx>>> {
// FIXME(https://github.com/rust-lang/miri/pull/803): no validation on Windows.
let target_os = tcx.sess.target.target.target_os.to_lowercase();
let validate = if target_os == "windows" {
false
} else {
config.validate
};
let mut ecx = InterpCx::new(
tcx.at(syntax::source_map::DUMMY_SP),
ty::ParamEnv::reveal_all(),
Evaluator::new(config.validate),
MemoryExtra::with_rng(config.seed.map(StdRng::seed_from_u64)),
Evaluator::new(),
MemoryExtra::new(config.seed.map(StdRng::seed_from_u64), validate),
);
let main_instance = ty::Instance::mono(ecx.tcx.tcx, main_id);
let main_mir = ecx.load_mir(main_instance.def)?;

View File

@ -43,6 +43,28 @@ fn resolve_path(&self, path: &[&str]) -> InterpResult<'tcx, ty::Instance<'tcx>>
})
}
/// Write a 0 of the appropriate size to `dest`.
fn write_null(&mut self, dest: PlaceTy<'tcx, Tag>) -> InterpResult<'tcx> {
self.eval_context_mut().write_scalar(Scalar::from_int(0, dest.layout.size), dest)
}
/// Test if this immediate equals 0.
fn is_null(&self, val: Scalar<Tag>) -> InterpResult<'tcx, bool> {
let this = self.eval_context_ref();
let null = Scalar::from_int(0, this.memory().pointer_size());
this.ptr_eq(val, null)
}
/// Turn a Scalar into an Option<NonNullScalar>
fn test_null(&self, val: Scalar<Tag>) -> InterpResult<'tcx, Option<Scalar<Tag>>> {
let this = self.eval_context_ref();
Ok(if this.is_null(val)? {
None
} else {
Some(val)
})
}
/// Visits the memory covered by `place`, sensitive to freezing: the 3rd parameter
/// will be true if this is frozen, false if this is in an `UnsafeCell`.
fn visit_freeze_sensitive(
@ -58,6 +80,7 @@ fn visit_freeze_sensitive(
.map(|(size, _)| size)
.unwrap_or_else(|| place.layout.size)
);
assert!(size.bytes() > 0);
// Store how far we proceeded into the place so far. Everything to the left of
// this offset has already been handled, in the sense that the frozen parts
// have had `action` called on them.

View File

@ -4,7 +4,8 @@
use rand::Rng;
use rustc_mir::interpret::{AllocId, Pointer, InterpResult, Memory, AllocCheck};
use rustc::ty::layout::HasDataLayout;
use rustc_mir::interpret::{AllocId, Pointer, InterpResult, Memory, AllocCheck, PointerArithmetic};
use rustc_target::abi::Size;
use crate::{Evaluator, Tag, STACK_ADDR};
@ -75,7 +76,9 @@ pub fn ptr_to_int(
let mut global_state = memory.extra.intptrcast.borrow_mut();
let global_state = &mut *global_state;
let (size, align) = memory.get_size_and_align(ptr.alloc_id, AllocCheck::Live)?;
// There is nothing wrong with a raw pointer being cast to an integer only after
// it became dangling. Hence `MaybeDead`.
let (size, align) = memory.get_size_and_align(ptr.alloc_id, AllocCheck::MaybeDead)?;
let base_addr = match global_state.base_addr.entry(ptr.alloc_id) {
Entry::Occupied(entry) => *entry.get(),
@ -107,7 +110,9 @@ pub fn ptr_to_int(
};
debug_assert_eq!(base_addr % align.bytes(), 0); // sanity check
Ok(base_addr + ptr.offset.bytes())
// Add offset with the right kind of pointer-overflowing arithmetic.
let dl = memory.data_layout();
Ok(dl.overflowing_offset(base_addr, ptr.offset.bytes()).0)
}
/// Shifts `addr` to make it aligned with `align` by rounding `addr` to the smallest multiple

View File

@ -15,7 +15,6 @@
mod shims;
mod operator;
mod helpers;
mod tls;
mod range_map;
mod mono_hash_map;
mod stacked_borrows;
@ -28,10 +27,11 @@
// Resolve ambiguity.
pub use rustc_mir::interpret::{self, AllocMap, PlaceTy};
pub use crate::shims::{EvalContextExt as ShimsEvalContextExt};
pub use crate::shims::foreign_items::EvalContextExt as ForeignItemsEvalContextExt;
pub use crate::shims::intrinsics::EvalContextExt as IntrinsicsEvalContextExt;
pub use crate::shims::tls::{EvalContextExt as TlsEvalContextExt, TlsData};
pub use crate::operator::EvalContextExt as OperatorEvalContextExt;
pub use crate::tls::{EvalContextExt as TlsEvalContextExt, TlsData};
pub use crate::range_map::RangeMap;
pub use crate::helpers::{EvalContextExt as HelpersEvalContextExt};
pub use crate::mono_hash_map::MonoHashMap;

View File

@ -28,6 +28,8 @@ pub enum MiriMemoryKind {
Rust,
/// `malloc` memory.
C,
/// Windows `HeapAlloc` memory.
WinHeap,
/// Part of env var emulation.
Env,
/// Statics.
@ -44,7 +46,8 @@ fn into(self) -> MemoryKind<MiriMemoryKind> {
/// Extra per-allocation data
#[derive(Debug, Clone)]
pub struct AllocExtra {
pub stacked_borrows: stacked_borrows::AllocExtra,
/// Stacked Borrows state is only added if validation is enabled.
pub stacked_borrows: Option<stacked_borrows::AllocExtra>,
}
/// Extra global memory data
@ -52,17 +55,22 @@ pub struct AllocExtra {
pub struct MemoryExtra {
pub stacked_borrows: stacked_borrows::MemoryExtra,
pub intptrcast: intptrcast::MemoryExtra,
/// The random number generator to use if Miri is running in non-deterministic mode and to
/// enable intptrcast
pub(crate) rng: Option<RefCell<StdRng>>
pub(crate) rng: Option<RefCell<StdRng>>,
/// Whether to enforce the validity invariant.
pub(crate) validate: bool,
}
impl MemoryExtra {
pub fn with_rng(rng: Option<StdRng>) -> Self {
pub fn new(rng: Option<StdRng>, validate: bool) -> Self {
MemoryExtra {
stacked_borrows: Default::default(),
intptrcast: Default::default(),
rng: rng.map(RefCell::new),
validate,
}
}
}
@ -85,13 +93,10 @@ pub struct Evaluator<'tcx> {
/// TLS state.
pub(crate) tls: TlsData<'tcx>,
/// Whether to enforce the validity invariant.
pub(crate) validate: bool,
}
impl<'tcx> Evaluator<'tcx> {
pub(crate) fn new(validate: bool) -> Self {
pub(crate) fn new() -> Self {
Evaluator {
env_vars: HashMap::default(),
argc: None,
@ -99,13 +104,12 @@ pub(crate) fn new(validate: bool) -> Self {
cmd_line: None,
last_error: 0,
tls: TlsData::default(),
validate,
}
}
}
/// A rustc InterpretCx for Miri.
pub type MiriEvalContext<'mir, 'tcx> = InterpretCx<'mir, 'tcx, Evaluator<'tcx>>;
/// A rustc InterpCx for Miri.
pub type MiriEvalContext<'mir, 'tcx> = InterpCx<'mir, 'tcx, Evaluator<'tcx>>;
/// A little trait that's useful to be inherited by extension traits.
pub trait MiriEvalContextExt<'mir, 'tcx> {
@ -137,14 +141,14 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'tcx> {
const STATIC_KIND: Option<MiriMemoryKind> = Some(MiriMemoryKind::Static);
#[inline(always)]
fn enforce_validity(ecx: &InterpretCx<'mir, 'tcx, Self>) -> bool {
ecx.machine.validate
fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
ecx.memory().extra.validate
}
/// Returns `Ok()` when the function was handled; fail otherwise.
#[inline(always)]
fn find_fn(
ecx: &mut InterpretCx<'mir, 'tcx, Self>,
ecx: &mut InterpCx<'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, Tag>],
dest: Option<PlaceTy<'tcx, Tag>>,
@ -155,7 +159,7 @@ fn find_fn(
#[inline(always)]
fn call_intrinsic(
ecx: &mut rustc_mir::interpret::InterpretCx<'mir, 'tcx, Self>,
ecx: &mut rustc_mir::interpret::InterpCx<'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, Tag>],
dest: PlaceTy<'tcx, Tag>,
@ -165,7 +169,7 @@ fn call_intrinsic(
#[inline(always)]
fn ptr_op(
ecx: &rustc_mir::interpret::InterpretCx<'mir, 'tcx, Self>,
ecx: &rustc_mir::interpret::InterpCx<'mir, 'tcx, Self>,
bin_op: mir::BinOp,
left: ImmTy<'tcx, Tag>,
right: ImmTy<'tcx, Tag>,
@ -174,7 +178,7 @@ fn ptr_op(
}
fn box_alloc(
ecx: &mut InterpretCx<'mir, 'tcx, Self>,
ecx: &mut InterpCx<'mir, 'tcx, Self>,
dest: PlaceTy<'tcx, Tag>,
) -> InterpResult<'tcx> {
trace!("box_alloc for {:?}", dest.layout.ty);
@ -240,7 +244,7 @@ fn find_foreign_static(
}
#[inline(always)]
fn before_terminator(_ecx: &mut InterpretCx<'mir, 'tcx, Self>) -> InterpResult<'tcx>
fn before_terminator(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx>
{
// We are not interested in detecting loops.
Ok(())
@ -254,12 +258,17 @@ fn tag_allocation<'b>(
) -> (Cow<'b, Allocation<Self::PointerTag, Self::AllocExtra>>, Self::PointerTag) {
let kind = kind.expect("we set our STATIC_KIND so this cannot be None");
let alloc = alloc.into_owned();
let (stacks, base_tag) = Stacks::new_allocation(
id,
Size::from_bytes(alloc.bytes.len() as u64),
Rc::clone(&memory.extra.stacked_borrows),
kind,
);
let (stacks, base_tag) = if !memory.extra.validate {
(None, Tag::Untagged)
} else {
let (stacks, base_tag) = Stacks::new_allocation(
id,
Size::from_bytes(alloc.bytes.len() as u64),
Rc::clone(&memory.extra.stacked_borrows),
kind,
);
(Some(stacks), base_tag)
};
if kind != MiriMemoryKind::Static.into() {
assert!(alloc.relocations.is_empty(), "Only statics can come initialized with inner pointers");
// Now we can rely on the inner pointers being static, too.
@ -271,7 +280,14 @@ fn tag_allocation<'b>(
alloc.relocations.iter()
// The allocations in the relocations (pointers stored *inside* this allocation)
// all get the base pointer tag.
.map(|&(offset, ((), alloc))| (offset, (memory_extra.static_base_ptr(alloc), alloc)))
.map(|&(offset, ((), alloc))| {
let tag = if !memory.extra.validate {
Tag::Untagged
} else {
memory_extra.static_base_ptr(alloc)
};
(offset, (tag, alloc))
})
.collect()
),
undef_mask: alloc.undef_mask,
@ -289,21 +305,21 @@ fn tag_static_base_pointer(
id: AllocId,
memory: &Memory<'mir, 'tcx, Self>,
) -> Self::PointerTag {
memory.extra.stacked_borrows.borrow_mut().static_base_ptr(id)
if !memory.extra.validate {
Tag::Untagged
} else {
memory.extra.stacked_borrows.borrow_mut().static_base_ptr(id)
}
}
#[inline(always)]
fn retag(
ecx: &mut InterpretCx<'mir, 'tcx, Self>,
ecx: &mut InterpCx<'mir, 'tcx, Self>,
kind: mir::RetagKind,
place: PlaceTy<'tcx, Tag>,
) -> InterpResult<'tcx> {
if !ecx.tcx.sess.opts.debugging_opts.mir_emit_retag || !Self::enforce_validity(ecx) {
// No tracking, or no retagging. The latter is possible because a dependency of ours
// might be called with different flags than we are, so there are `Retag`
// statements but we do not want to execute them.
// Also, honor the whitelist in `enforce_validity` because otherwise we might retag
// uninitialized data.
if !Self::enforce_validity(ecx) {
// No tracking.
Ok(())
} else {
ecx.retag(kind, place)
@ -312,14 +328,14 @@ fn retag(
#[inline(always)]
fn stack_push(
ecx: &mut InterpretCx<'mir, 'tcx, Self>,
ecx: &mut InterpCx<'mir, 'tcx, Self>,
) -> InterpResult<'tcx, stacked_borrows::CallId> {
Ok(ecx.memory().extra.stacked_borrows.borrow_mut().new_call())
}
#[inline(always)]
fn stack_pop(
ecx: &mut InterpretCx<'mir, 'tcx, Self>,
ecx: &mut InterpCx<'mir, 'tcx, Self>,
extra: stacked_borrows::CallId,
) -> InterpResult<'tcx> {
Ok(ecx.memory().extra.stacked_borrows.borrow_mut().end_call(extra))
@ -357,7 +373,11 @@ fn memory_read<'tcx>(
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx> {
alloc.extra.stacked_borrows.memory_read(ptr, size)
if let Some(ref stacked_borrows) = alloc.extra.stacked_borrows {
stacked_borrows.memory_read(ptr, size)
} else {
Ok(())
}
}
#[inline(always)]
@ -366,7 +386,11 @@ fn memory_written<'tcx>(
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx> {
alloc.extra.stacked_borrows.memory_written(ptr, size)
if let Some(ref mut stacked_borrows) = alloc.extra.stacked_borrows {
stacked_borrows.memory_written(ptr, size)
} else {
Ok(())
}
}
#[inline(always)]
@ -375,7 +399,11 @@ fn memory_deallocated<'tcx>(
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx> {
alloc.extra.stacked_borrows.memory_deallocated(ptr, size)
if let Some(ref mut stacked_borrows) = alloc.extra.stacked_borrows {
stacked_borrows.memory_deallocated(ptr, size)
} else {
Ok(())
}
}
}
@ -384,7 +412,7 @@ impl MayLeak for MiriMemoryKind {
fn may_leak(self) -> bool {
use self::MiriMemoryKind::*;
match self {
Rust | C => false,
Rust | C | WinHeap => false,
Env | Static => true,
}
}

View File

@ -56,10 +56,12 @@ fn ptr_op(
trace!("ptr_op: {:?} {:?} {:?}", *left, bin_op, *right);
// If intptrcast is enabled and the operation is not an offset
// we can force the cast from pointers to integer addresses and
// then dispatch to rustc binary operation method
if self.memory().extra.rng.is_some() && bin_op != Offset {
// If intptrcast is enabled, treat everything of integer *type* at integer *value*.
if self.memory().extra.rng.is_some() && left.layout.ty.is_integral() {
// This is actually an integer operation, so dispatch back to the core engine.
// TODO: Once intptrcast is the default, librustc_mir should never even call us
// for integer types.
assert!(right.layout.ty.is_integral());
let l_bits = self.force_bits(left.imm.to_scalar()?, left.layout.size)?;
let r_bits = self.force_bits(right.imm.to_scalar()?, right.layout.size)?;
@ -186,6 +188,13 @@ fn ptr_eq(
right: Scalar<Tag>,
) -> InterpResult<'tcx, bool> {
let size = self.pointer_size();
if self.memory().extra.rng.is_some() {
// Just compare the integers.
// TODO: Do we really want to *always* do that, even when comparing two live in-bounds pointers?
let left = self.force_bits(left, size)?;
let right = self.force_bits(right, size)?;
return Ok(left == right);
}
Ok(match (left, right) {
(Scalar::Raw { .. }, Scalar::Raw { .. }) =>
left.to_bits(size)? == right.to_bits(size)?,
@ -206,7 +215,7 @@ fn ptr_eq(
// on read hardware this can easily happen. Thus for comparisons we require
// both pointers to be live.
if self.pointer_inbounds(left).is_ok() && self.pointer_inbounds(right).is_ok() {
// Two in-bounds pointers in different allocations are different.
// Two in-bounds (and hence live) pointers in different allocations are different.
false
} else {
return err!(InvalidPointerMath);
@ -303,7 +312,9 @@ fn map_to_primval((res, over): (Pointer<Tag>, bool)) -> (Scalar<Tag>, bool) {
map_to_primval(left.overflowing_offset(Size::from_bytes(right as u64), self)),
BitAnd if !signed => {
let ptr_base_align = self.memory().get(left.alloc_id)?.align.bytes();
let ptr_base_align = self.memory().get_size_and_align(left.alloc_id, AllocCheck::MaybeDead)
.expect("alloc info with MaybeDead cannot fail")
.1.bytes();
let base_mask = {
// FIXME: use `interpret::truncate`, once that takes a `Size` instead of a `Layout`.
let shift = 128 - self.memory().pointer_size().bits();
@ -337,7 +348,9 @@ fn map_to_primval((res, over): (Pointer<Tag>, bool)) -> (Scalar<Tag>, bool) {
Rem if !signed => {
// Doing modulo a divisor of the alignment is allowed.
// (Intuition: modulo a divisor leaks less information.)
let ptr_base_align = self.memory().get(left.alloc_id)?.align.bytes();
let ptr_base_align = self.memory().get_size_and_align(left.alloc_id, AllocCheck::MaybeDead)
.expect("alloc info with MaybeDead cannot fail")
.1.bytes();
let right = right as u64;
let ptr_size = self.memory().pointer_size();
if right == 1 {
@ -384,21 +397,24 @@ fn pointer_offset_inbounds(
.checked_mul(pointee_size)
.ok_or_else(|| InterpError::Overflow(mir::BinOp::Mul))?;
// Now let's see what kind of pointer this is.
if let Scalar::Ptr(ptr) = ptr {
// Both old and new pointer must be in-bounds of a *live* allocation.
// (Of the same allocation, but that part is trivial with our representation.)
self.pointer_inbounds(ptr)?;
let ptr = ptr.signed_offset(offset, self)?;
self.pointer_inbounds(ptr)?;
Ok(Scalar::Ptr(ptr))
} else {
// An integer pointer. They can only be offset by 0, and we pretend there
// is a little zero-sized allocation here.
if offset == 0 {
Ok(ptr)
} else {
err!(InvalidPointerMath)
let ptr = if offset == 0 {
match ptr {
Scalar::Ptr(ptr) => ptr,
Scalar::Raw { .. } => {
// Offset 0 on an integer. We accept that, pretending there is
// a little zero-sized allocation here.
return Ok(ptr);
}
}
}
} else {
// Offset > 0. We *require* a pointer.
self.force_ptr(ptr)?
};
// Both old and new pointer must be in-bounds of a *live* allocation.
// (Of the same allocation, but that part is trivial with our representation.)
self.pointer_inbounds(ptr)?;
let ptr = ptr.signed_offset(offset, self)?;
self.pointer_inbounds(ptr)?;
Ok(Scalar::Ptr(ptr))
}
}

View File

@ -1,4 +1,3 @@
use rustc::ty;
use rustc::ty::layout::{Align, LayoutOf, Size};
use rustc::hir::def_id::DefId;
use rustc::mir;
@ -11,48 +10,8 @@
impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
fn find_fn(
&mut self,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, Tag>],
dest: Option<PlaceTy<'tcx, Tag>>,
ret: Option<mir::BasicBlock>,
) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>> {
let this = self.eval_context_mut();
trace!("eval_fn_call: {:#?}, {:?}", instance, dest.map(|place| *place));
// First, run the common hooks also supported by CTFE.
if this.hook_fn(instance, args, dest)? {
this.goto_block(ret)?;
return Ok(None);
}
// There are some more lang items we want to hook that CTFE does not hook (yet).
if this.tcx.lang_items().align_offset_fn() == Some(instance.def.def_id()) {
// FIXME: return a real value in case the target allocation has an
// alignment bigger than the one requested.
let n = u128::max_value();
let dest = dest.unwrap();
let n = this.truncate(n, dest.layout);
this.write_scalar(Scalar::from_uint(n, dest.layout.size), dest)?;
this.goto_block(ret)?;
return Ok(None);
}
// Try to see if we can do something about foreign items.
if this.tcx.is_foreign_item(instance.def_id()) {
// An external function that we cannot find MIR for, but we can still run enough
// of them to make miri viable.
this.emulate_foreign_item(instance.def_id(), args, dest, ret)?;
// `goto_block` already handled.
return Ok(None);
}
// Otherwise, load the MIR.
Ok(Some(this.load_mir(instance.def)?))
}
/// Returns the minimum alignment for the target architecture.
fn min_align(&self) -> Align {
/// Returns the minimum alignment for the target architecture for allocations of the given size.
fn min_align(&self, size: u64, kind: MiriMemoryKind) -> Align {
let this = self.eval_context_ref();
// List taken from `libstd/sys_common/alloc.rs`.
let min_align = match this.tcx.tcx.sess.target.target.arch.as_str() {
@ -60,21 +19,39 @@ fn min_align(&self) -> Align {
"x86_64" | "aarch64" | "mips64" | "s390x" | "sparc64" => 16,
arch => bug!("Unsupported target architecture: {}", arch),
};
Align::from_bytes(min_align).unwrap()
// Windows always aligns, even small allocations.
// Source: <https://support.microsoft.com/en-us/help/286470/how-to-use-pageheap-exe-in-windows-xp-windows-2000-and-windows-server>
// But jemalloc does not, so for the C heap we only align if the allocation is sufficiently big.
if kind == MiriMemoryKind::WinHeap || size >= min_align {
return Align::from_bytes(min_align).unwrap();
}
// We have `size < min_align`. Round `size` *down* to the next power of two and use that.
fn prev_power_of_two(x: u64) -> u64 {
let next_pow2 = x.next_power_of_two();
if next_pow2 == x {
// x *is* a power of two, just use that.
x
} else {
// x is between two powers, so next = 2*prev.
next_pow2 / 2
}
}
Align::from_bytes(prev_power_of_two(size)).unwrap()
}
fn malloc(
&mut self,
size: u64,
zero_init: bool,
kind: MiriMemoryKind,
) -> Scalar<Tag> {
let this = self.eval_context_mut();
let tcx = &{this.tcx.tcx};
if size == 0 {
Scalar::from_int(0, this.pointer_size())
} else {
let align = this.min_align();
let ptr = this.memory_mut().allocate(Size::from_bytes(size), align, MiriMemoryKind::C.into());
let align = this.min_align(size, kind);
let ptr = this.memory_mut().allocate(Size::from_bytes(size), align, kind.into());
if zero_init {
// We just allocated this, the access cannot fail
this.memory_mut()
@ -88,13 +65,15 @@ fn malloc(
fn free(
&mut self,
ptr: Scalar<Tag>,
kind: MiriMemoryKind,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
if !ptr.is_null_ptr(this) {
if !this.is_null(ptr)? {
let ptr = this.force_ptr(ptr)?;
this.memory_mut().deallocate(
ptr.to_ptr()?,
ptr,
None,
MiriMemoryKind::C.into(),
kind.into(),
)?;
}
Ok(())
@ -104,39 +83,38 @@ fn realloc(
&mut self,
old_ptr: Scalar<Tag>,
new_size: u64,
kind: MiriMemoryKind,
) -> InterpResult<'tcx, Scalar<Tag>> {
let this = self.eval_context_mut();
let align = this.min_align();
if old_ptr.is_null_ptr(this) {
let new_align = this.min_align(new_size, kind);
if this.is_null(old_ptr)? {
if new_size == 0 {
Ok(Scalar::from_int(0, this.pointer_size()))
} else {
let new_ptr = this.memory_mut().allocate(
Size::from_bytes(new_size),
align,
MiriMemoryKind::C.into()
new_align,
kind.into()
);
Ok(Scalar::Ptr(new_ptr))
}
} else {
let old_ptr = old_ptr.to_ptr()?;
let old_ptr = this.force_ptr(old_ptr)?;
let memory = this.memory_mut();
let old_size = Size::from_bytes(memory.get(old_ptr.alloc_id)?.bytes.len() as u64);
if new_size == 0 {
memory.deallocate(
old_ptr,
Some((old_size, align)),
MiriMemoryKind::C.into(),
None,
kind.into(),
)?;
Ok(Scalar::from_int(0, this.pointer_size()))
} else {
let new_ptr = memory.reallocate(
old_ptr,
old_size,
align,
None,
Size::from_bytes(new_size),
align,
MiriMemoryKind::C.into(),
new_align,
kind.into(),
)?;
Ok(Scalar::Ptr(new_ptr))
}
@ -185,14 +163,14 @@ fn emulate_foreign_item(
match link_name {
"malloc" => {
let size = this.read_scalar(args[0])?.to_usize(this)?;
let res = this.malloc(size, /*zero_init:*/ false);
let res = this.malloc(size, /*zero_init:*/ false, MiriMemoryKind::C);
this.write_scalar(res, dest)?;
}
"calloc" => {
let items = this.read_scalar(args[0])?.to_usize(this)?;
let len = this.read_scalar(args[1])?.to_usize(this)?;
let size = items.checked_mul(len).ok_or_else(|| InterpError::Overflow(mir::BinOp::Mul))?;
let res = this.malloc(size, /*zero_init:*/ true);
let res = this.malloc(size, /*zero_init:*/ true, MiriMemoryKind::C);
this.write_scalar(res, dest)?;
}
"posix_memalign" => {
@ -227,12 +205,12 @@ fn emulate_foreign_item(
}
"free" => {
let ptr = this.read_scalar(args[0])?.not_undef()?;
this.free(ptr)?;
this.free(ptr, MiriMemoryKind::C)?;
}
"realloc" => {
let old_ptr = this.read_scalar(args[0])?.not_undef()?;
let new_size = this.read_scalar(args[1])?.to_usize(this)?;
let res = this.realloc(old_ptr, new_size)?;
let res = this.realloc(old_ptr, new_size, MiriMemoryKind::C)?;
this.write_scalar(res, dest)?;
}
@ -268,13 +246,14 @@ fn emulate_foreign_item(
Align::from_bytes(align).unwrap(),
MiriMemoryKind::Rust.into()
);
// We just allocated this, the access cannot fail
this.memory_mut()
.get_mut(ptr.alloc_id)?
.write_repeat(tcx, ptr, 0, Size::from_bytes(size))?;
.get_mut(ptr.alloc_id).unwrap()
.write_repeat(tcx, ptr, 0, Size::from_bytes(size)).unwrap();
this.write_scalar(Scalar::Ptr(ptr), dest)?;
}
"__rust_dealloc" => {
let ptr = this.read_scalar(args[0])?.to_ptr()?;
let ptr = this.read_scalar(args[0])?.not_undef()?;
let old_size = this.read_scalar(args[1])?.to_usize(this)?;
let align = this.read_scalar(args[2])?.to_usize(this)?;
if old_size == 0 {
@ -283,6 +262,7 @@ fn emulate_foreign_item(
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
let ptr = this.force_ptr(ptr)?;
this.memory_mut().deallocate(
ptr,
Some((Size::from_bytes(old_size), Align::from_bytes(align).unwrap())),
@ -300,12 +280,12 @@ fn emulate_foreign_item(
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
let align = Align::from_bytes(align).unwrap();
let new_ptr = this.memory_mut().reallocate(
ptr,
Size::from_bytes(old_size),
Align::from_bytes(align).unwrap(),
Some((Size::from_bytes(old_size), align)),
Size::from_bytes(new_size),
Align::from_bytes(align).unwrap(),
align,
MiriMemoryKind::Rust.into(),
)?;
this.write_scalar(Scalar::Ptr(new_ptr), dest)?;
@ -365,7 +345,7 @@ fn emulate_foreign_item(
trace!("__rust_maybe_catch_panic: {:?}", f_instance);
// Now we make a function call.
// TODO: consider making this reusable? `InterpretCx::step` does something similar
// TODO: consider making this reusable? `InterpCx::step` does something similar
// for the TLS destructors, and of course `eval_main`.
let mir = this.load_mir(f_instance.def)?;
let ret_place = MPlaceTy::dangling(this.layout_of(this.tcx.mk_unit())?, this).into();
@ -467,7 +447,7 @@ fn emulate_foreign_item(
let mut success = None;
{
let name_ptr = this.read_scalar(args[0])?.not_undef()?;
if !name_ptr.is_null_ptr(this) {
if !this.is_null(name_ptr)? {
let name_ptr = name_ptr.to_ptr()?;
let name = this
.memory()
@ -495,7 +475,7 @@ fn emulate_foreign_item(
let name_ptr = this.read_scalar(args[0])?.not_undef()?;
let value_ptr = this.read_scalar(args[1])?.to_ptr()?;
let value = this.memory().get(value_ptr.alloc_id)?.read_c_str(tcx, value_ptr)?;
if !name_ptr.is_null_ptr(this) {
if !this.is_null(name_ptr)? {
let name_ptr = name_ptr.to_ptr()?;
let name = this.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?;
if !name.is_empty() && !name.contains(&b'=') {
@ -510,15 +490,15 @@ fn emulate_foreign_item(
Align::from_bytes(1).unwrap(),
MiriMemoryKind::Env.into(),
);
{
let alloc = this.memory_mut().get_mut(value_copy.alloc_id)?;
alloc.write_bytes(tcx, value_copy, &value)?;
let trailing_zero_ptr = value_copy.offset(
Size::from_bytes(value.len() as u64),
tcx,
)?;
alloc.write_bytes(tcx, trailing_zero_ptr, &[0])?;
}
// We just allocated these, so the write cannot fail.
let alloc = this.memory_mut().get_mut(value_copy.alloc_id).unwrap();
alloc.write_bytes(tcx, value_copy, &value).unwrap();
let trailing_zero_ptr = value_copy.offset(
Size::from_bytes(value.len() as u64),
tcx,
).unwrap();
alloc.write_bytes(tcx, trailing_zero_ptr, &[0]).unwrap();
if let Some(var) = this.machine.env_vars.insert(
name.to_owned(),
value_copy,
@ -678,14 +658,9 @@ fn emulate_foreign_item(
let key_ptr = this.read_scalar(args[0])?.not_undef()?;
// Extract the function type out of the signature (that seems easier than constructing it ourselves).
let dtor = match this.read_scalar(args[1])?.not_undef()? {
Scalar::Ptr(dtor_ptr) => Some(this.memory().get_fn(dtor_ptr)?),
Scalar::Raw { data: 0, size } => {
// NULL pointer
assert_eq!(size as u64, this.memory().pointer_size().bytes());
None
},
Scalar::Raw { .. } => return err!(ReadBytesAsPointer),
let dtor = match this.test_null(this.read_scalar(args[1])?.not_undef()?)? {
Some(dtor_ptr) => Some(this.memory().get_fn(dtor_ptr.to_ptr()?)?),
None => None,
};
// Figure out how large a pthread TLS key actually is.
@ -697,7 +672,7 @@ fn emulate_foreign_item(
let key_layout = this.layout_of(key_type)?;
// Create key and write it into the memory where `key_ptr` wants it.
let key = this.machine.tls.create_tls_key(dtor, tcx) as u128;
let key = this.machine.tls.create_tls_key(dtor) as u128;
if key_layout.size.bits() < 128 && key >= (1u128 << key_layout.size.bits() as u128) {
return err!(OutOfTls);
}
@ -722,13 +697,13 @@ fn emulate_foreign_item(
}
"pthread_getspecific" => {
let key = this.read_scalar(args[0])?.to_bits(args[0].layout.size)?;
let ptr = this.machine.tls.load_tls(key)?;
let ptr = this.machine.tls.load_tls(key, tcx)?;
this.write_scalar(ptr, dest)?;
}
"pthread_setspecific" => {
let key = this.read_scalar(args[0])?.to_bits(args[0].layout.size)?;
let new_ptr = this.read_scalar(args[1])?.not_undef()?;
this.machine.tls.store_tls(key, new_ptr)?;
this.machine.tls.store_tls(key, this.test_null(new_ptr)?)?;
// Return success (`0`).
this.write_null(dest)?;
@ -808,14 +783,14 @@ fn emulate_foreign_item(
let flags = this.read_scalar(args[1])?.to_u32()?;
let size = this.read_scalar(args[2])?.to_usize(this)?;
let zero_init = (flags & 0x00000008) != 0; // HEAP_ZERO_MEMORY
let res = this.malloc(size, zero_init);
let res = this.malloc(size, zero_init, MiriMemoryKind::WinHeap);
this.write_scalar(res, dest)?;
}
"HeapFree" => {
let _handle = this.read_scalar(args[0])?.to_isize(this)?;
let _flags = this.read_scalar(args[1])?.to_u32()?;
let ptr = this.read_scalar(args[2])?.not_undef()?;
this.free(ptr)?;
this.free(ptr, MiriMemoryKind::WinHeap)?;
this.write_scalar(Scalar::from_int(1, Size::from_bytes(4)), dest)?;
}
"HeapReAlloc" => {
@ -823,7 +798,7 @@ fn emulate_foreign_item(
let _flags = this.read_scalar(args[1])?.to_u32()?;
let ptr = this.read_scalar(args[2])?.not_undef()?;
let size = this.read_scalar(args[3])?.to_usize(this)?;
let res = this.realloc(ptr, size)?;
let res = this.realloc(ptr, size, MiriMemoryKind::WinHeap)?;
this.write_scalar(res, dest)?;
}
@ -855,7 +830,14 @@ fn emulate_foreign_item(
},
"GetSystemInfo" => {
let system_info = this.deref_operand(args[0])?;
let system_info_ptr = system_info.ptr.to_ptr()?;
let (system_info_ptr, align) = system_info.to_scalar_ptr_align();
let system_info_ptr = this.memory()
.check_ptr_access(
system_info_ptr,
system_info.layout.size,
align,
)?
.expect("cannot be a ZST");
// Initialize with `0`.
this.memory_mut().get_mut(system_info_ptr.alloc_id)?
.write_repeat(tcx, system_info_ptr, 0, system_info.layout.size)?;
@ -875,7 +857,7 @@ fn emulate_foreign_item(
// This just creates a key; Windows does not natively support TLS destructors.
// Create key and return it.
let key = this.machine.tls.create_tls_key(None, tcx) as u128;
let key = this.machine.tls.create_tls_key(None) as u128;
// Figure out how large a TLS key actually is. This is `c::DWORD`.
if dest.layout.size.bits() < 128
@ -886,13 +868,13 @@ fn emulate_foreign_item(
}
"TlsGetValue" => {
let key = this.read_scalar(args[0])?.to_u32()? as u128;
let ptr = this.machine.tls.load_tls(key)?;
let ptr = this.machine.tls.load_tls(key, tcx)?;
this.write_scalar(ptr, dest)?;
}
"TlsSetValue" => {
let key = this.read_scalar(args[0])?.to_u32()? as u128;
let new_ptr = this.read_scalar(args[1])?.not_undef()?;
this.machine.tls.store_tls(key, new_ptr)?;
this.machine.tls.store_tls(key, this.test_null(new_ptr)?)?;
// Return success (`1`).
this.write_scalar(Scalar::from_int(1, dest.layout.size), dest)?;
@ -969,10 +951,6 @@ fn emulate_foreign_item(
Ok(())
}
fn write_null(&mut self, dest: PlaceTy<'tcx, Tag>) -> InterpResult<'tcx> {
self.eval_context_mut().write_scalar(Scalar::from_int(0, dest.layout.size), dest)
}
/// Evaluates the scalar at the specified path. Returns Some(val)
/// if the path could be resolved, and None otherwise
fn eval_path_scalar(&mut self, path: &[&str]) -> InterpResult<'tcx, Option<ScalarMaybeUndef<Tag>>> {

View File

@ -1,2 +1,50 @@
pub mod foreign_items;
pub mod intrinsics;
pub mod tls;
use rustc::{ty, mir};
use crate::*;
impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
fn find_fn(
&mut self,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, Tag>],
dest: Option<PlaceTy<'tcx, Tag>>,
ret: Option<mir::BasicBlock>,
) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>> {
let this = self.eval_context_mut();
trace!("eval_fn_call: {:#?}, {:?}", instance, dest.map(|place| *place));
// First, run the common hooks also supported by CTFE.
if this.hook_fn(instance, args, dest)? {
this.goto_block(ret)?;
return Ok(None);
}
// There are some more lang items we want to hook that CTFE does not hook (yet).
if this.tcx.lang_items().align_offset_fn() == Some(instance.def.def_id()) {
// FIXME: return a real value in case the target allocation has an
// alignment bigger than the one requested.
let n = u128::max_value();
let dest = dest.unwrap();
let n = this.truncate(n, dest.layout);
this.write_scalar(Scalar::from_uint(n, dest.layout.size), dest)?;
this.goto_block(ret)?;
return Ok(None);
}
// Try to see if we can do something about foreign items.
if this.tcx.is_foreign_item(instance.def_id()) {
// An external function that we cannot find MIR for, but we can still run enough
// of them to make miri viable.
this.emulate_foreign_item(instance.def_id(), args, dest, ret)?;
// `goto_block` already handled.
return Ok(None);
}
// Otherwise, load the MIR.
Ok(Some(this.load_mir(instance.def)?))
}
}

View File

@ -8,13 +8,17 @@
use crate::{
InterpResult, InterpError, StackPopCleanup,
MPlaceTy, Scalar, Tag,
HelpersEvalContextExt,
};
pub type TlsKey = u128;
#[derive(Copy, Clone, Debug)]
pub struct TlsEntry<'tcx> {
pub(crate) data: Scalar<Tag>, // Will eventually become a map from thread IDs to `Scalar`s, if we ever support more than one thread.
/// The data for this key. None is used to represent NULL.
/// (We normalize this early to avoid having to do a NULL-ptr-test each time we access the data.)
/// Will eventually become a map from thread IDs to `Scalar`s, if we ever support more than one thread.
pub(crate) data: Option<Scalar<Tag>>,
pub(crate) dtor: Option<ty::Instance<'tcx>>,
}
@ -40,14 +44,13 @@ impl<'tcx> TlsData<'tcx> {
pub fn create_tls_key(
&mut self,
dtor: Option<ty::Instance<'tcx>>,
cx: &impl HasDataLayout,
) -> TlsKey {
let new_key = self.next_key;
self.next_key += 1;
self.keys.insert(
new_key,
TlsEntry {
data: Scalar::ptr_null(cx).into(),
data: None,
dtor,
},
);
@ -65,17 +68,21 @@ pub fn delete_tls_key(&mut self, key: TlsKey) -> InterpResult<'tcx> {
}
}
pub fn load_tls(&mut self, key: TlsKey) -> InterpResult<'tcx, Scalar<Tag>> {
pub fn load_tls(
&mut self,
key: TlsKey,
cx: &impl HasDataLayout,
) -> InterpResult<'tcx, Scalar<Tag>> {
match self.keys.get(&key) {
Some(&TlsEntry { data, .. }) => {
trace!("TLS key {} loaded: {:?}", key, data);
Ok(data)
Ok(data.unwrap_or_else(|| Scalar::ptr_null(cx).into()))
}
None => err!(TlsOutOfBounds),
}
}
pub fn store_tls(&mut self, key: TlsKey, new_data: Scalar<Tag>) -> InterpResult<'tcx> {
pub fn store_tls(&mut self, key: TlsKey, new_data: Option<Scalar<Tag>>) -> InterpResult<'tcx> {
match self.keys.get_mut(&key) {
Some(&mut TlsEntry { ref mut data, .. }) => {
trace!("TLS key {} stored: {:?}", key, new_data);
@ -107,7 +114,6 @@ pub fn store_tls(&mut self, key: TlsKey, new_data: Scalar<Tag>) -> InterpResult<
fn fetch_tls_dtor(
&mut self,
key: Option<TlsKey>,
cx: &impl HasDataLayout,
) -> Option<(ty::Instance<'tcx>, Scalar<Tag>, TlsKey)> {
use std::collections::Bound::*;
@ -119,10 +125,10 @@ fn fetch_tls_dtor(
for (&key, &mut TlsEntry { ref mut data, dtor }) in
thread_local.range_mut((start, Unbounded))
{
if !data.is_null_ptr(cx) {
if let Some(data_scalar) = *data {
if let Some(dtor) = dtor {
let ret = Some((dtor, *data, key));
*data = Scalar::ptr_null(cx);
let ret = Some((dtor, data_scalar, key));
*data = None;
return ret;
}
}
@ -135,10 +141,11 @@ impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tc
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
fn run_tls_dtors(&mut self) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let mut dtor = this.machine.tls.fetch_tls_dtor(None, &*this.tcx);
let mut dtor = this.machine.tls.fetch_tls_dtor(None);
// FIXME: replace loop by some structure that works with stepping
while let Some((instance, ptr, key)) = dtor {
trace!("Running TLS dtor {:?} on {:?}", instance, ptr);
assert!(!this.is_null(ptr).unwrap(), "Data can't be NULL when dtor is called!");
// TODO: Potentially, this has to support all the other possible instances?
// See eval_fn_call in interpret/terminator/mod.rs
let mir = this.load_mir(instance.def)?;
@ -159,9 +166,9 @@ fn run_tls_dtors(&mut self) -> InterpResult<'tcx> {
// step until out of stackframes
this.run()?;
dtor = match this.machine.tls.fetch_tls_dtor(Some(key), &*this.tcx) {
dtor = match this.machine.tls.fetch_tls_dtor(Some(key)) {
dtor @ Some(_) => dtor,
None => this.machine.tls.fetch_tls_dtor(None, &*this.tcx),
None => this.machine.tls.fetch_tls_dtor(None),
};
}
// FIXME: On a windows target, call `unsafe extern "system" fn on_tls_callback`.

View File

@ -541,6 +541,7 @@ fn reborrow(
// Get the allocation. It might not be mutable, so we cannot use `get_mut`.
let alloc = this.memory().get(ptr.alloc_id)?;
let stacked_borrows = alloc.extra.stacked_borrows.as_ref().expect("we should have Stacked Borrows data");
// Update the stacks.
// Make sure that raw pointers and mutable shared references are reborrowed "weak":
// There could be existing unique pointers reborrowed from them that should remain valid!
@ -556,14 +557,14 @@ fn reborrow(
// We are only ever `SharedReadOnly` inside the frozen bits.
let perm = if frozen { Permission::SharedReadOnly } else { Permission::SharedReadWrite };
let item = Item { perm, tag: new_tag, protector };
alloc.extra.stacked_borrows.for_each(cur_ptr, size, |stack, global| {
stacked_borrows.for_each(cur_ptr, size, |stack, global| {
stack.grant(cur_ptr.tag, item, global)
})
});
}
};
let item = Item { perm, tag: new_tag, protector };
alloc.extra.stacked_borrows.for_each(ptr, size, |stack, global| {
stacked_borrows.for_each(ptr, size, |stack, global| {
stack.grant(ptr.tag, item, global)
})
}

View File

@ -52,8 +52,9 @@ def test_cargo_miri_run():
)
def test_cargo_miri_test():
# FIXME: enable validation again, once that no longer conflicts with intptrcast
test("cargo miri test",
cargo_miri("test") + ["--", "-Zmiri-seed=feed"],
cargo_miri("test") + ["--", "-Zmiri-seed=feed", "-Zmiri-disable-validation"],
"test.stdout.ref", "test.stderr.ref"
)
test("cargo miri test (with filter)",

View File

@ -1,4 +1,4 @@
// error-pattern: invalid arithmetic on pointers
// error-pattern: tried to interpret some bytes as a pointer
fn main() {
// Can't offset an integer pointer by non-zero offset.

View File

@ -69,7 +69,7 @@ fn compile_fail(path: &str, target: &str, opt: bool) {
run_tests("compile-fail", path, target, flags);
}
fn miri_pass(path: &str, target: &str, opt: bool) {
fn miri_pass(path: &str, target: &str, opt: bool, noseed: bool) {
let opt_str = if opt { " with optimizations" } else { "" };
eprintln!("{}", format!(
"## Running run-pass tests in {} against miri for target {}{}",
@ -81,6 +81,9 @@ fn miri_pass(path: &str, target: &str, opt: bool) {
let mut flags = Vec::new();
if opt {
flags.push("-Zmir-opt-level=3".to_owned());
} else if !noseed {
// Run with intptrcast. Avoid test matrix explosion by doing either this or opt-level=3.
flags.push("-Zmiri-seed=".to_owned());
}
run_tests("ui", path, target, flags);
@ -104,11 +107,14 @@ fn get_target() -> String {
}
fn run_pass_miri(opt: bool) {
miri_pass("tests/run-pass", &get_target(), opt);
miri_pass("tests/run-pass", &get_target(), opt, false);
miri_pass("tests/run-pass-noseed", &get_target(), opt, true);
}
fn compile_fail_miri(opt: bool) {
compile_fail("tests/compile-fail", &get_target(), opt);
if !cfg!(windows) { // FIXME re-enable on Windows
compile_fail("tests/compile-fail", &get_target(), opt);
}
}
fn test_runner(_tests: &[&()]) {

View File

@ -41,7 +41,7 @@ fn check_alloc<T: Alloc>(mut allocator: T) { unsafe {
}
} }
fn check_overalign_requests<T: Alloc>(mut allocator: T) {
fn check_align_requests<T: Alloc>(mut allocator: T) {
for &size in &[2, 8, 64] { // size less than and bigger than alignment
for &align in &[4, 8, 16, 32] { // Be sure to cover less than and bigger than `MIN_ALIGN` for all architectures
let iterations = 32;
@ -88,8 +88,8 @@ fn box_to_global() {
fn main() {
check_alloc(System);
check_alloc(Global);
check_overalign_requests(System);
check_overalign_requests(Global);
check_align_requests(System);
check_align_requests(Global);
global_to_box();
box_to_global();
}

View File

@ -0,0 +1,26 @@
// compile-flags: -Zmiri-seed=0000000000000000
// This returns a miri pointer at type usize, if the argument is a proper pointer
fn transmute_ptr_to_int<T>(x: *const T) -> usize {
unsafe { std::mem::transmute(x) }
}
fn main() {
// Some casting-to-int with arithmetic.
let x = &42 as *const i32 as usize;
let y = x * 2;
assert_eq!(y, x + x);
let z = y as u8 as usize;
assert_eq!(z, y % 256);
// Pointer string formatting! We can't check the output as it changes when libstd changes,
// but we can make sure Miri does not error.
format!("{:?}", &mut 13 as *mut _);
// Check that intptrcast is triggered for explicit casts and that it is consistent with
// transmuting.
let a: *const i32 = &42;
let b = transmute_ptr_to_int(a) as u8;
let c = a as usize as u8;
assert_eq!(b, c);
}

View File

@ -1,4 +1,5 @@
//ignore-windows: Uses POSIX APIs
//compile-flags: -Zmiri-seed=
#![feature(rustc_private)]
@ -7,6 +8,14 @@
extern crate libc;
fn main() {
// Test that small allocations sometimes *are* not very aligned.
let saw_unaligned = (0..64).any(|_| unsafe {
let p = libc::malloc(3);
libc::free(p);
(p as usize) % 4 != 0 // find any that this is *not* 4-aligned
});
assert!(saw_unaligned);
unsafe {
// Use calloc for initialized memory
let p1 = libc::calloc(20, 1);

View File

@ -1,3 +1,4 @@
// FIXME move this to run-pass, it should work with intptrcast.
use std::mem;
use std::ptr;

View File

@ -1,3 +1,5 @@
// FIXME move this to run-pass, it should work with intptrcast.
fn f() -> i32 { 42 }
fn main() {

View File

@ -1,14 +0,0 @@
// compile-flags: -Zmiri-seed=0000000000000000
fn main() {
// Some casting-to-int with arithmetic.
let x = &42 as *const i32 as usize;
let y = x * 2;
assert_eq!(y, x + x);
let z = y as u8 as usize;
assert_eq!(z, y % 256);
// Pointer string formatting! We can't check the output as it changes when libstd changes,
// but we can make sure Miri does not error.
format!("{:?}", &mut 13 as *mut _);
}

View File

@ -1,6 +0,0 @@
// compile-flags: -Zmiri-seed=
fn main() {
println!("Hello {}", 13);
println!("{:0<width$}", "hello", width = 10);
}

View File

@ -1,2 +0,0 @@
Hello 13
hello00000

View File

@ -1,3 +1,6 @@
// Validation disallows this becuase the reference is never cast to a raw pointer.
// compile-flags: -Zmiri-disable-validation
fn main() {
// If we are careful, we can exploit data layout...
let raw = unsafe {