rust/src/operator.rs

356 lines
16 KiB
Rust
Raw Normal View History

2019-02-08 09:27:00 -06:00
use rustc::ty::Ty;
use rustc::mir;
2018-11-01 02:56:41 -05:00
use crate::*;
pub trait EvalContextExt<'tcx> {
fn ptr_op(
&self,
bin_op: mir::BinOp,
left: ImmTy<'tcx, Tag>,
right: ImmTy<'tcx, Tag>,
2019-06-08 15:14:47 -05:00
) -> InterpResult<'tcx, (Scalar<Tag>, bool)>;
fn ptr_int_arithmetic(
&self,
bin_op: mir::BinOp,
left: Pointer<Tag>,
2018-08-14 13:25:56 -05:00
right: u128,
signed: bool,
2019-06-08 15:14:47 -05:00
) -> InterpResult<'tcx, (Scalar<Tag>, bool)>;
fn ptr_eq(
&self,
left: Scalar<Tag>,
right: Scalar<Tag>,
2019-06-08 15:14:47 -05:00
) -> InterpResult<'tcx, bool>;
fn pointer_offset_inbounds(
&self,
ptr: Scalar<Tag>,
pointee_ty: Ty<'tcx>,
offset: i64,
2019-06-08 15:14:47 -05:00
) -> InterpResult<'tcx, Scalar<Tag>>;
}
2019-06-13 01:52:04 -05:00
impl<'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'mir, 'tcx> {
fn ptr_op(
&self,
bin_op: mir::BinOp,
left: ImmTy<'tcx, Tag>,
right: ImmTy<'tcx, Tag>,
2019-06-08 15:14:47 -05:00
) -> InterpResult<'tcx, (Scalar<Tag>, bool)> {
use rustc::mir::BinOp::*;
2019-02-08 09:27:00 -06:00
trace!("ptr_op: {:?} {:?} {:?}", *left, bin_op, *right);
// Operations that support fat pointers
match bin_op {
Eq | Ne => {
let eq = match (*left, *right) {
(Immediate::Scalar(left), Immediate::Scalar(right)) =>
self.ptr_eq(left.not_undef()?, right.not_undef()?)?,
(Immediate::ScalarPair(left1, left2), Immediate::ScalarPair(right1, right2)) =>
self.ptr_eq(left1.not_undef()?, right1.not_undef()?)? &&
self.ptr_eq(left2.not_undef()?, right2.not_undef()?)?,
_ => bug!("Type system should not allow comparing Scalar with ScalarPair"),
};
return Ok((Scalar::from_bool(if bin_op == Eq { eq } else { !eq }), false));
}
_ => {},
}
2019-02-15 19:29:38 -06:00
// Now we expect no more fat pointers.
2019-02-08 07:01:40 -06:00
let left_layout = left.layout;
let left = left.to_scalar()?;
let right_layout = right.layout;
let right = right.to_scalar()?;
debug_assert!(left.is_ptr() || right.is_ptr() || bin_op == Offset);
match bin_op {
Offset => {
let pointee_ty = left_layout.ty
2018-01-14 11:59:13 -06:00
.builtin_deref(true)
.expect("Offset called on non-ptr type")
.ty;
let ptr = self.pointer_offset_inbounds(
2018-07-10 10:32:38 -05:00
left,
pointee_ty,
right.to_isize(self)?,
)?;
Ok((ptr, false))
}
// These need both to be pointer, and fail if they are not in the same location
Lt | Le | Gt | Ge | Sub if left.is_ptr() && right.is_ptr() => {
let left = left.to_ptr().expect("we checked is_ptr");
let right = right.to_ptr().expect("we checked is_ptr");
if left.alloc_id == right.alloc_id {
let res = match bin_op {
Lt => left.offset < right.offset,
Le => left.offset <= right.offset,
Gt => left.offset > right.offset,
Ge => left.offset >= right.offset,
Sub => {
// subtract the offsets
2018-10-19 12:51:41 -05:00
let left_offset = Scalar::from_uint(left.offset.bytes(), self.memory().pointer_size());
let right_offset = Scalar::from_uint(right.offset.bytes(), self.memory().pointer_size());
let layout = self.layout_of(self.tcx.types.usize)?;
return self.binary_op(
Sub,
2019-02-08 07:01:40 -06:00
ImmTy::from_scalar(left_offset, layout),
ImmTy::from_scalar(right_offset, layout),
)
}
_ => bug!("We already established it has to be one of these operators."),
};
Ok((Scalar::from_bool(res), false))
} else {
// Both are pointers, but from different allocations.
2017-08-02 09:59:01 -05:00
err!(InvalidPointerMath)
}
}
// These work if the left operand is a pointer, and the right an integer
Add | BitAnd | Sub | Rem if left.is_ptr() && right.is_bits() => {
// Cast to i128 is fine as we checked the kind to be ptr-sized
self.ptr_int_arithmetic(
bin_op,
left.to_ptr().expect("we checked is_ptr"),
2018-10-19 12:51:41 -05:00
right.to_bits(self.memory().pointer_size()).expect("we checked is_bits"),
right_layout.abi.is_signed(),
)
}
// Commutative operators also work if the integer is on the left
Add | BitAnd if left.is_bits() && right.is_ptr() => {
// This is a commutative operation, just swap the operands
self.ptr_int_arithmetic(
bin_op,
right.to_ptr().expect("we checked is_ptr"),
2018-10-19 12:51:41 -05:00
left.to_bits(self.memory().pointer_size()).expect("we checked is_bits"),
left_layout.abi.is_signed(),
)
}
// Nothing else works
_ => err!(InvalidPointerMath),
}
}
fn ptr_eq(
&self,
left: Scalar<Tag>,
right: Scalar<Tag>,
2019-06-08 15:14:47 -05:00
) -> InterpResult<'tcx, bool> {
2019-02-08 09:27:00 -06:00
let size = self.pointer_size();
Ok(match (left, right) {
2019-05-26 07:43:34 -05:00
(Scalar::Raw { .. }, Scalar::Raw { .. }) =>
left.to_bits(size)? == right.to_bits(size)?,
(Scalar::Ptr(left), Scalar::Ptr(right)) => {
// Comparison illegal if one of them is out-of-bounds, *unless* they
// are in the same allocation.
if left.alloc_id == right.alloc_id {
left.offset == right.offset
} else {
2019-02-15 19:29:38 -06:00
// This accepts one-past-the end. Thus, there is still technically
// some non-determinism that we do not fully rule out when two
2019-02-15 19:29:38 -06:00
// allocations sit right next to each other. The C/C++ standards are
// somewhat fuzzy about this case, so pragmatically speaking I think
// for now this check is "good enough".
// FIXME: Once we support intptrcast, we could try to fix these holes.
// Dead allocations in miri cannot overlap with live allocations, but
// on read hardware this can easily happen. Thus for comparisons we require
// both pointers to be live.
2019-05-27 11:50:32 -05:00
self.memory().check_bounds_ptr(left, InboundsCheck::Live, CheckInAllocMsg::InboundsTest)?;
self.memory().check_bounds_ptr(right, InboundsCheck::Live, CheckInAllocMsg::InboundsTest)?;
2019-02-15 19:29:38 -06:00
// Two in-bounds pointers, we can compare across allocations.
left == right
}
}
2019-02-15 19:29:38 -06:00
// Comparing ptr and integer.
2019-05-26 07:43:34 -05:00
(Scalar::Ptr(ptr), Scalar::Raw { data, size }) |
(Scalar::Raw { data, size }, Scalar::Ptr(ptr)) => {
assert_eq!(size as u64, self.pointer_size().bytes());
2019-05-26 07:43:34 -05:00
let bits = data as u64;
// Case I: Comparing real pointers with "small" integers.
// Really we should only do this for NULL, but pragmatically speaking on non-bare-metal systems,
// an allocation will never be at the very bottom of the address space.
// Such comparisons can arise when comparing empty slices, which sometimes are "fake"
// integer pointers (okay because the slice is empty) and sometimes point into a
// real allocation.
// The most common source of such integer pointers is `NonNull::dangling()`, which
// equals the type's alignment. i128 might have an alignment of 16 bytes, but few types have
// alignment 32 or higher, hence the limit of 32.
// FIXME: Once we support intptrcast, we could try to fix these holes.
if bits < 32 {
// Test if the ptr is in-bounds. Then it cannot be NULL.
// Even dangling pointers cannot be NULL.
2019-05-27 11:50:32 -05:00
if self.memory().check_bounds_ptr(ptr, InboundsCheck::MaybeDead, CheckInAllocMsg::NullPointerTest).is_ok() {
return Ok(false);
}
}
let (alloc_size, alloc_align) = self.memory()
.get_size_and_align(ptr.alloc_id, InboundsCheck::MaybeDead)
.expect("determining size+align of dead ptr cannot fail");
// Case II: Alignment gives it away
2018-11-23 02:46:51 -06:00
if ptr.offset.bytes() % alloc_align.bytes() == 0 {
// The offset maintains the allocation alignment, so we know `base+offset`
// is aligned by `alloc_align`.
2019-02-15 19:29:38 -06:00
// FIXME: We could be even more general, e.g., offset 2 into a 4-aligned
// allocation cannot equal 3.
2018-11-23 02:46:51 -06:00
if bits % alloc_align.bytes() != 0 {
// The integer is *not* aligned. So they cannot be equal.
return Ok(false);
}
}
// Case III: The integer is too big, and the allocation goes on a bit
// without wrapping around the address space.
{
// Compute the highest address at which this allocation could live.
// Substract one more, because it must be possible to add the size
2019-02-15 19:29:38 -06:00
// to the base address without overflowing; that is, the very last address
// of the address space is never dereferencable (but it can be in-bounds, i.e.,
// one-past-the-end).
let max_base_addr =
((1u128 << self.pointer_size().bits())
- u128::from(alloc_size.bytes())
- 1
) as u64;
if let Some(max_addr) = max_base_addr.checked_add(ptr.offset.bytes()) {
if bits > max_addr {
2019-02-15 19:29:38 -06:00
// The integer is too big, this cannot possibly be equal.
return Ok(false)
}
}
}
// None of the supported cases.
return err!(InvalidPointerMath);
}
})
}
fn ptr_int_arithmetic(
&self,
bin_op: mir::BinOp,
left: Pointer<Tag>,
2018-08-14 13:25:56 -05:00
right: u128,
signed: bool,
2019-06-08 15:14:47 -05:00
) -> InterpResult<'tcx, (Scalar<Tag>, bool)> {
use rustc::mir::BinOp::*;
fn map_to_primval((res, over): (Pointer<Tag>, bool)) -> (Scalar<Tag>, bool) {
2018-05-26 10:07:34 -05:00
(Scalar::Ptr(res), over)
}
Ok(match bin_op {
Sub =>
2019-02-15 19:29:38 -06:00
// The only way this can overflow is by underflowing, so signdeness of the right
// operands does not matter.
2018-08-14 13:25:56 -05:00
map_to_primval(left.overflowing_signed_offset(-(right as i128), self)),
Add if signed =>
2018-08-14 13:25:56 -05:00
map_to_primval(left.overflowing_signed_offset(right as i128, self)),
Add if !signed =>
map_to_primval(left.overflowing_offset(Size::from_bytes(right as u64), self)),
BitAnd if !signed => {
2018-11-23 02:46:51 -06:00
let ptr_base_align = self.memory().get(left.alloc_id)?.align.bytes();
2018-08-14 13:25:56 -05:00
let base_mask = {
2019-02-15 19:29:38 -06:00
// FIXME: use `interpret::truncate`, once that takes a `Size` instead of a `Layout`.
2018-10-19 12:51:41 -05:00
let shift = 128 - self.memory().pointer_size().bits();
2018-08-14 13:25:56 -05:00
let value = !(ptr_base_align as u128 - 1);
2019-02-15 19:29:38 -06:00
// Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
2018-08-14 13:25:56 -05:00
(value << shift) >> shift
};
2019-05-26 07:43:34 -05:00
let ptr_size = self.memory().pointer_size();
2019-02-15 19:29:38 -06:00
trace!("ptr BitAnd, align {}, operand {:#010x}, base_mask {:#010x}",
2018-08-14 13:25:56 -05:00
ptr_base_align, right, base_mask);
if right & base_mask == base_mask {
2019-02-15 19:29:38 -06:00
// Case 1: the base address bits are all preserved, i.e., right is all-1 there.
2018-08-14 13:25:56 -05:00
let offset = (left.offset.bytes() as u128 & right) as u64;
(
Scalar::Ptr(Pointer::new_with_tag(
left.alloc_id,
Size::from_bytes(offset),
left.tag,
)),
false,
)
} else if right & base_mask == 0 {
2019-02-15 19:29:38 -06:00
// Case 2: the base address bits are all taken away, i.e., right is all-0 there.
2019-05-26 07:43:34 -05:00
let v = Scalar::from_uint((left.offset.bytes() as u128) & right, ptr_size);
(v, false)
} else {
2017-08-02 09:59:01 -05:00
return err!(ReadPointerAsBytes);
}
}
Rem if !signed => {
2018-08-14 05:16:29 -05:00
// Doing modulo a divisor of the alignment is allowed.
2019-02-15 19:29:38 -06:00
// (Intuition: modulo a divisor leaks less information.)
2018-11-23 02:46:51 -06:00
let ptr_base_align = self.memory().get(left.alloc_id)?.align.bytes();
let right = right as u64;
2019-05-26 07:43:34 -05:00
let ptr_size = self.memory().pointer_size();
if right == 1 {
2019-02-15 19:29:38 -06:00
// Modulo 1 is always 0.
2019-05-26 07:43:34 -05:00
(Scalar::from_uint(0u32, ptr_size), false)
2018-08-14 05:16:29 -05:00
} else if ptr_base_align % right == 0 {
2019-02-15 19:29:38 -06:00
// The base address would be cancelled out by the modulo operation, so we can
// just take the modulo of the offset.
(
2019-05-26 07:43:34 -05:00
Scalar::from_uint((left.offset.bytes() % right) as u128, ptr_size),
2019-02-15 19:29:38 -06:00
false,
)
} else {
2017-08-02 09:59:01 -05:00
return err!(ReadPointerAsBytes);
}
}
_ => {
2019-02-15 19:29:38 -06:00
let msg = format!(
"unimplemented binary op on pointer {:?}: {:?}, {:?} ({})",
bin_op,
left,
right,
if signed { "signed" } else { "unsigned" }
);
2017-08-02 09:59:01 -05:00
return err!(Unimplemented(msg));
}
})
}
2019-02-15 19:29:38 -06:00
/// Raises an error if the offset moves the pointer outside of its allocation.
/// We consider ZSTs their own huge allocation that doesn't overlap with anything (and nothing
/// moves in there because the size is 0). We also consider the NULL pointer its own separate
/// allocation, and all the remaining integers pointers their own allocation.
fn pointer_offset_inbounds(
&self,
ptr: Scalar<Tag>,
pointee_ty: Ty<'tcx>,
offset: i64,
2019-06-08 15:14:47 -05:00
) -> InterpResult<'tcx, Scalar<Tag>> {
2019-02-15 19:29:38 -06:00
// FIXME: assuming here that type size is less than `i64::max_value()`.
let pointee_size = self.layout_of(pointee_ty)?.size.bytes() as i64;
2019-02-15 19:29:38 -06:00
let offset = offset
.checked_mul(pointee_size)
2019-04-03 03:48:11 -05:00
.ok_or_else(|| InterpError::Overflow(mir::BinOp::Mul))?;
2019-02-15 19:29:38 -06:00
// Now let's see what kind of pointer this is.
if let Scalar::Ptr(ptr) = ptr {
// Both old and new pointer must be in-bounds of a *live* allocation.
// (Of the same allocation, but that part is trivial with our representation.)
2019-05-27 11:50:32 -05:00
self.memory().check_bounds_ptr(ptr, InboundsCheck::Live, CheckInAllocMsg::InboundsTest)?;
let ptr = ptr.signed_offset(offset, self)?;
2019-05-27 11:50:32 -05:00
self.memory().check_bounds_ptr(ptr, InboundsCheck::Live, CheckInAllocMsg::InboundsTest)?;
Ok(Scalar::Ptr(ptr))
} else {
// An integer pointer. They can only be offset by 0, and we pretend there
// is a little zero-sized allocation here.
if offset == 0 {
Ok(ptr)
} else {
err!(InvalidPointerMath)
}
}
}
}