rust/src/shims/intrinsics.rs

568 lines
24 KiB
Rust
Raw Normal View History

use std::iter;
use rustc_apfloat::Float;
use rustc::mir;
2019-06-08 22:14:47 +02:00
use rustc::mir::interpret::{InterpResult, PointerArithmetic};
use rustc::ty::layout::{self, LayoutOf, Size, Align};
2017-11-21 13:32:40 +01:00
use rustc::ty;
2019-10-30 10:16:58 +01:00
use syntax::source_map::Span;
2016-09-20 16:05:30 +02:00
2018-11-01 08:56:41 +01:00
use crate::{
2019-08-10 21:19:25 +02:00
PlaceTy, OpTy, Immediate, Scalar, Tag,
OperatorEvalContextExt
2018-10-02 09:25:55 +02:00
};
2018-05-26 17:07:34 +02:00
2019-06-13 08:52:04 +02:00
impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
fn call_intrinsic(
2016-09-20 16:05:30 +02:00
&mut self,
2019-10-30 10:16:58 +01:00
span: Span,
2017-03-21 13:53:55 +01:00
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, Tag>],
dest: PlaceTy<'tcx, Tag>,
2019-06-08 22:14:47 +02:00
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
2019-10-30 10:16:58 +01:00
if this.emulate_intrinsic(span, instance, args, dest)? {
return Ok(());
}
let tcx = &{this.tcx.tcx};
2017-03-29 09:10:05 +02:00
let substs = instance.substs;
2016-09-20 16:05:30 +02:00
// All these intrinsics take raw pointers, so if we access memory directly
// (as opposed to through a place), we have to remember to erase any tag
// that might still hang around!
let intrinsic_name = &*tcx.item_name(instance.def_id()).as_str();
2019-09-05 18:17:58 +02:00
match intrinsic_name {
2016-09-20 16:05:30 +02:00
"arith_offset" => {
let offset = this.read_scalar(args[1])?.to_isize(this)?;
let ptr = this.read_scalar(args[0])?.not_undef()?;
let pointee_ty = substs.type_at(0);
let pointee_size = this.layout_of(pointee_ty)?.size.bytes() as i64;
let offset = offset.overflowing_mul(pointee_size).0;
let result_ptr = ptr.ptr_wrapping_signed_offset(offset, this);
this.write_scalar(result_ptr, dest)?;
2016-09-20 16:05:30 +02:00
}
"assume" => {
let cond = this.read_scalar(args[0])?.to_bool()?;
if !cond {
2019-08-05 10:45:48 +02:00
throw_ub_format!("`assume` intrinsic called with `false`");
}
2016-09-20 16:05:30 +02:00
}
"volatile_load" => {
let place = this.deref_operand(args[0])?;
this.copy_op(place.into(), dest)?;
}
"volatile_store" => {
let place = this.deref_operand(args[0])?;
this.copy_op(args[1], place.into())?;
}
2016-10-14 03:49:02 -06:00
"atomic_load" |
"atomic_load_relaxed" |
"atomic_load_acq" => {
let place = this.deref_operand(args[0])?;
let val = this.read_scalar(place.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
2016-10-14 03:49:02 -06:00
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
2019-10-18 11:11:50 +09:00
this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
this.write_scalar(val, dest)?;
}
2016-10-14 03:49:02 -06:00
"atomic_store" |
2016-12-15 18:27:47 +01:00
"atomic_store_relaxed" |
"atomic_store_rel" => {
let place = this.deref_operand(args[0])?;
let val = this.read_scalar(args[1])?; // make sure it fits into a scalar; otherwise it cannot be atomic
2016-10-14 03:49:02 -06:00
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
2019-10-18 11:11:50 +09:00
this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
this.write_scalar(val, place.into())?;
}
"atomic_fence_acq" |
"atomic_fence_rel" |
"atomic_fence_acqrel" |
"atomic_fence" => {
2016-11-03 17:32:06 +01:00
// we are inherently singlethreaded and singlecored, this is a nop
}
2017-03-14 13:05:51 +01:00
_ if intrinsic_name.starts_with("atomic_xchg") => {
let place = this.deref_operand(args[0])?;
let new = this.read_scalar(args[1])?;
let old = this.read_scalar(place.into())?;
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
2019-10-18 11:11:50 +09:00
this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
this.write_scalar(old, dest)?; // old value is returned
this.write_scalar(new, place.into())?;
2016-11-15 15:19:38 +01:00
}
2017-03-14 13:05:51 +01:00
_ if intrinsic_name.starts_with("atomic_cxchg") => {
let place = this.deref_operand(args[0])?;
2019-02-08 14:01:40 +01:00
let expect_old = this.read_immediate(args[1])?; // read as immediate for the sake of `binary_op()`
let new = this.read_scalar(args[2])?;
let old = this.read_immediate(place.into())?; // read as immediate for the sake of `binary_op()`
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
2019-10-18 11:11:50 +09:00
this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
2019-02-08 14:01:40 +01:00
// binary_op will bail if either of them is not a scalar
2019-08-10 21:19:25 +02:00
let eq = this.overflowing_binary_op(mir::BinOp::Eq, old, expect_old)?.0;
2018-11-05 08:51:55 +01:00
let res = Immediate::ScalarPair(old.to_scalar_or_undef(), eq.into());
this.write_immediate(res, dest)?; // old value is returned
// update ptr depending on comparison
if eq.to_bool()? {
this.write_scalar(new, place.into())?;
}
}
"atomic_or" |
"atomic_or_acq" |
"atomic_or_rel" |
"atomic_or_acqrel" |
"atomic_or_relaxed" |
"atomic_xor" |
"atomic_xor_acq" |
"atomic_xor_rel" |
"atomic_xor_acqrel" |
"atomic_xor_relaxed" |
"atomic_and" |
"atomic_and_acq" |
"atomic_and_rel" |
"atomic_and_acqrel" |
"atomic_and_relaxed" |
2019-02-06 11:38:40 +01:00
"atomic_nand" |
"atomic_nand_acq" |
"atomic_nand_rel" |
"atomic_nand_acqrel" |
"atomic_nand_relaxed" |
"atomic_xadd" |
"atomic_xadd_acq" |
"atomic_xadd_rel" |
"atomic_xadd_acqrel" |
"atomic_xadd_relaxed" |
"atomic_xsub" |
"atomic_xsub_acq" |
"atomic_xsub_rel" |
"atomic_xsub_acqrel" |
"atomic_xsub_relaxed" => {
let place = this.deref_operand(args[0])?;
if !place.layout.ty.is_integral() {
2019-08-03 20:31:33 +02:00
bug!("Atomic arithmetic operations only work on integer types");
}
let rhs = this.read_immediate(args[1])?;
let old = this.read_immediate(place.into())?;
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
2019-10-18 11:11:50 +09:00
this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
this.write_immediate(*old, dest)?; // old value is returned
2019-02-06 11:38:40 +01:00
let (op, neg) = match intrinsic_name.split('_').nth(1).unwrap() {
"or" => (mir::BinOp::BitOr, false),
"xor" => (mir::BinOp::BitXor, false),
"and" => (mir::BinOp::BitAnd, false),
"xadd" => (mir::BinOp::Add, false),
"xsub" => (mir::BinOp::Sub, false),
"nand" => (mir::BinOp::BitAnd, true),
2017-03-14 13:05:51 +01:00
_ => bug!(),
2016-11-03 17:32:06 +01:00
};
2018-10-12 09:07:56 +02:00
// Atomics wrap around on overflow.
2019-08-10 21:19:25 +02:00
let val = this.binary_op(op, old, rhs)?;
2019-02-06 11:38:40 +01:00
let val = if neg {
2019-08-10 21:19:25 +02:00
this.unary_op(mir::UnOp::Not, val)?
2019-02-06 11:38:40 +01:00
} else {
val
};
2019-08-10 21:19:25 +02:00
this.write_immediate(*val, place.into())?;
}
2016-11-03 17:32:06 +01:00
2016-09-20 16:05:30 +02:00
"breakpoint" => unimplemented!(), // halt miri
"copy" |
"copy_nonoverlapping" => {
2017-03-29 09:10:05 +02:00
let elem_ty = substs.type_at(0);
let elem_layout = this.layout_of(elem_ty)?;
2017-12-06 15:03:24 +01:00
let elem_size = elem_layout.size.bytes();
let count = this.read_scalar(args[2])?.to_usize(this)?;
2018-11-23 09:46:51 +01:00
let elem_align = elem_layout.align.abi;
let size = Size::from_bytes(count * elem_size);
let src = this.read_scalar(args[0])?.not_undef()?;
2019-10-18 11:11:50 +09:00
let src = this.memory.check_ptr_access(src, size, elem_align)?;
let dest = this.read_scalar(args[1])?.not_undef()?;
2019-10-18 11:11:50 +09:00
let dest = this.memory.check_ptr_access(dest, size, elem_align)?;
if let (Some(src), Some(dest)) = (src, dest) {
2019-10-18 11:11:50 +09:00
this.memory.copy(
src,
dest,
size,
intrinsic_name.ends_with("_nonoverlapping"),
)?;
}
2016-09-20 16:05:30 +02:00
}
"discriminant_value" => {
let place = this.deref_operand(args[0])?;
let discr_val = this.read_discriminant(place.into())?.0;
this.write_scalar(Scalar::from_uint(discr_val, dest.layout.size), dest)?;
2016-09-20 16:05:30 +02:00
}
"sinf32" | "fabsf32" | "cosf32" | "sqrtf32" | "expf32" | "exp2f32" | "logf32" |
"log10f32" | "log2f32" | "floorf32" | "ceilf32" | "truncf32" | "roundf32" => {
2019-06-09 00:12:57 +02:00
// FIXME: Using host floats.
let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
2019-09-05 18:17:58 +02:00
let f = match intrinsic_name {
2017-03-14 12:35:38 +01:00
"sinf32" => f.sin(),
"fabsf32" => f.abs(),
"cosf32" => f.cos(),
"sqrtf32" => f.sqrt(),
"expf32" => f.exp(),
"exp2f32" => f.exp2(),
"logf32" => f.ln(),
"log10f32" => f.log10(),
"log2f32" => f.log2(),
"floorf32" => f.floor(),
"ceilf32" => f.ceil(),
"truncf32" => f.trunc(),
"roundf32" => f.round(),
2017-03-14 12:35:38 +01:00
_ => bug!(),
};
2019-06-09 00:12:57 +02:00
this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?;
2016-09-20 16:05:30 +02:00
}
"sinf64" | "fabsf64" | "cosf64" | "sqrtf64" | "expf64" | "exp2f64" | "logf64" |
"log10f64" | "log2f64" | "floorf64" | "ceilf64" | "truncf64" | "roundf64" => {
2019-06-09 00:12:57 +02:00
// FIXME: Using host floats.
let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
2019-09-05 18:17:58 +02:00
let f = match intrinsic_name {
2017-03-14 12:35:38 +01:00
"sinf64" => f.sin(),
"fabsf64" => f.abs(),
"cosf64" => f.cos(),
"sqrtf64" => f.sqrt(),
"expf64" => f.exp(),
"exp2f64" => f.exp2(),
"logf64" => f.ln(),
"log10f64" => f.log10(),
"log2f64" => f.log2(),
"floorf64" => f.floor(),
"ceilf64" => f.ceil(),
"truncf64" => f.trunc(),
"roundf64" => f.round(),
2017-03-14 12:35:38 +01:00
_ => bug!(),
};
2019-06-09 00:12:57 +02:00
this.write_scalar(Scalar::from_u64(f.to_bits()), dest)?;
2016-09-20 16:05:30 +02:00
}
"fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
let a = this.read_immediate(args[0])?;
let b = this.read_immediate(args[1])?;
2019-09-05 18:17:58 +02:00
let op = match intrinsic_name {
"fadd_fast" => mir::BinOp::Add,
"fsub_fast" => mir::BinOp::Sub,
"fmul_fast" => mir::BinOp::Mul,
"fdiv_fast" => mir::BinOp::Div,
"frem_fast" => mir::BinOp::Rem,
_ => bug!(),
};
this.binop_ignore_overflow(op, a, b, dest)?;
2016-09-20 16:05:30 +02:00
}
"minnumf32" | "maxnumf32" => {
let a = this.read_scalar(args[0])?.to_f32()?;
let b = this.read_scalar(args[1])?.to_f32()?;
2019-09-05 18:17:58 +02:00
let res = if intrinsic_name.starts_with("min") {
a.min(b)
} else {
a.max(b)
};
this.write_scalar(Scalar::from_f32(res), dest)?;
}
"minnumf64" | "maxnumf64" => {
let a = this.read_scalar(args[0])?.to_f64()?;
let b = this.read_scalar(args[1])?.to_f64()?;
2019-09-05 18:17:58 +02:00
let res = if intrinsic_name.starts_with("min") {
a.min(b)
} else {
a.max(b)
};
this.write_scalar(Scalar::from_f64(res), dest)?;
}
2019-11-03 10:03:30 +01:00
"exact_div" =>
this.exact_div(
this.read_immediate(args[0])?,
this.read_immediate(args[1])?,
dest,
)?,
2018-05-07 18:02:57 +02:00
2019-02-09 13:07:55 +01:00
"forget" => {}
"likely" | "unlikely" => {
// These just return their argument
let b = this.read_immediate(args[0])?;
this.write_immediate(*b, dest)?;
}
2016-09-20 16:05:30 +02:00
"init" => {
// Check fast path: we don't want to force an allocation in case the destination is a simple value,
// but we also do not want to create a new allocation with 0s and then copy that over.
// FIXME: We do not properly validate in case of ZSTs and when doing it in memory!
// However, this only affects direct calls of the intrinsic; calls to the stable
// functions wrapping them do get their validation.
// FIXME: should we check that the destination pointer is aligned even for ZSTs?
2019-02-16 01:29:38 +00:00
if !dest.layout.is_zst() {
match dest.layout.abi {
layout::Abi::Scalar(ref s) => {
let x = Scalar::from_int(0, s.value.size(this));
2019-07-24 20:42:53 +02:00
this.write_scalar(x, dest)?;
}
layout::Abi::ScalarPair(ref s1, ref s2) => {
let x = Scalar::from_int(0, s1.value.size(this));
let y = Scalar::from_int(0, s2.value.size(this));
this.write_immediate(Immediate::ScalarPair(x.into(), y.into()), dest)?;
}
_ => {
// Do it in memory
let mplace = this.force_allocation(dest)?;
mplace.meta.unwrap_none(); // must be sized
this.memory.write_bytes(mplace.ptr, iter::repeat(0u8).take(dest.layout.size.bytes() as usize))?;
}
2018-04-07 11:43:46 +02:00
}
}
}
2016-09-20 16:05:30 +02:00
"pref_align_of" => {
2017-03-29 09:10:05 +02:00
let ty = substs.type_at(0);
let layout = this.layout_of(ty)?;
2018-11-23 09:46:51 +01:00
let align = layout.align.pref.bytes();
let ptr_size = this.pointer_size();
2018-08-07 15:22:11 +02:00
let align_val = Scalar::from_uint(align as u128, ptr_size);
this.write_scalar(align_val, dest)?;
2016-09-20 16:05:30 +02:00
}
"move_val_init" => {
2019-08-04 10:54:07 +02:00
let place = this.deref_operand(args[0])?;
this.copy_op(args[1], place.into())?;
2016-09-20 16:05:30 +02:00
}
"offset" => {
let offset = this.read_scalar(args[1])?.to_isize(this)?;
let ptr = this.read_scalar(args[0])?.not_undef()?;
let result_ptr = this.pointer_offset_inbounds(ptr, substs.type_at(0), offset)?;
this.write_scalar(result_ptr, dest)?;
2016-09-20 16:05:30 +02:00
}
"panic_if_uninhabited" => {
let ty = substs.type_at(0);
let layout = this.layout_of(ty)?;
if layout.abi.is_uninhabited() {
// FIXME: This should throw a panic in the interpreted program instead.
throw_unsup_format!("Trying to instantiate uninhabited type {}", ty)
}
}
2017-03-14 12:35:38 +01:00
"powf32" => {
2019-06-09 00:12:57 +02:00
// FIXME: Using host floats.
let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
let f2 = f32::from_bits(this.read_scalar(args[1])?.to_u32()?);
this.write_scalar(
2019-06-09 00:12:57 +02:00
Scalar::from_u32(f.powf(f2).to_bits()),
dest,
)?;
2016-09-20 16:05:30 +02:00
}
2017-03-14 12:35:38 +01:00
"powf64" => {
2019-06-09 00:12:57 +02:00
// FIXME: Using host floats.
let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
let f2 = f64::from_bits(this.read_scalar(args[1])?.to_u64()?);
this.write_scalar(
2019-06-09 00:12:57 +02:00
Scalar::from_u64(f.powf(f2).to_bits()),
dest,
)?;
2016-09-20 16:05:30 +02:00
}
2017-03-14 12:35:38 +01:00
"fmaf32" => {
let a = this.read_scalar(args[0])?.to_f32()?;
let b = this.read_scalar(args[1])?.to_f32()?;
let c = this.read_scalar(args[2])?.to_f32()?;
2019-06-10 09:49:06 +02:00
let res = a.mul_add(b, c).value;
this.write_scalar(
2019-06-10 09:49:06 +02:00
Scalar::from_f32(res),
dest,
)?;
2017-03-14 12:35:38 +01:00
}
"fmaf64" => {
let a = this.read_scalar(args[0])?.to_f64()?;
let b = this.read_scalar(args[1])?.to_f64()?;
let c = this.read_scalar(args[2])?.to_f64()?;
2019-06-10 09:49:06 +02:00
let res = a.mul_add(b, c).value;
this.write_scalar(
2019-06-10 09:49:06 +02:00
Scalar::from_f64(res),
dest,
)?;
2016-09-20 16:05:30 +02:00
}
2017-03-14 12:35:38 +01:00
"powif32" => {
2019-06-09 00:12:57 +02:00
// FIXME: Using host floats.
let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
let i = this.read_scalar(args[1])?.to_i32()?;
this.write_scalar(
2019-06-09 00:12:57 +02:00
Scalar::from_u32(f.powi(i).to_bits()),
dest,
)?;
2016-09-20 16:05:30 +02:00
}
2017-03-14 12:35:38 +01:00
"powif64" => {
2019-06-09 00:12:57 +02:00
// FIXME: Using host floats.
let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
let i = this.read_scalar(args[1])?.to_i32()?;
this.write_scalar(
2019-06-09 00:12:57 +02:00
Scalar::from_u64(f.powi(i).to_bits()),
dest,
)?;
2016-09-20 16:05:30 +02:00
}
"size_of_val" => {
let mplace = this.deref_operand(args[0])?;
let (size, _) = this.size_and_align_of_mplace(mplace)?
.expect("size_of_val called on extern type");
let ptr_size = this.pointer_size();
this.write_scalar(
2018-08-07 15:22:11 +02:00
Scalar::from_uint(size.bytes() as u128, ptr_size),
dest,
)?;
2016-09-20 16:05:30 +02:00
}
2016-11-03 17:32:06 +01:00
"min_align_of_val" |
"align_of_val" => {
let mplace = this.deref_operand(args[0])?;
let (_, align) = this.size_and_align_of_mplace(mplace)?
.expect("size_of_val called on extern type");
let ptr_size = this.pointer_size();
this.write_scalar(
2018-11-23 09:46:51 +01:00
Scalar::from_uint(align.bytes(), ptr_size),
dest,
)?;
2016-11-03 17:32:06 +01:00
}
"unchecked_div" => {
let l = this.read_immediate(args[0])?;
let r = this.read_immediate(args[1])?;
2018-12-11 14:32:59 +01:00
let rval = r.to_scalar()?.to_bits(args[1].layout.size)?;
if rval == 0 {
2019-08-03 20:31:33 +02:00
throw_ub_format!("Division by 0 in unchecked_div");
}
this.binop_ignore_overflow(
mir::BinOp::Div,
l,
r,
dest,
)?;
}
"unchecked_rem" => {
let l = this.read_immediate(args[0])?;
let r = this.read_immediate(args[1])?;
2018-12-11 14:32:59 +01:00
let rval = r.to_scalar()?.to_bits(args[1].layout.size)?;
if rval == 0 {
2019-08-03 20:31:33 +02:00
throw_ub_format!("Division by 0 in unchecked_rem");
}
this.binop_ignore_overflow(
mir::BinOp::Rem,
l,
r,
dest,
)?;
}
"unchecked_add" | "unchecked_sub" | "unchecked_mul" => {
let l = this.read_immediate(args[0])?;
let r = this.read_immediate(args[1])?;
2019-09-05 18:17:58 +02:00
let op = match intrinsic_name {
"unchecked_add" => mir::BinOp::Add,
"unchecked_sub" => mir::BinOp::Sub,
"unchecked_mul" => mir::BinOp::Mul,
_ => bug!(),
};
2019-08-10 21:19:25 +02:00
let (res, overflowed, _ty) = this.overflowing_binary_op(op, l, r)?;
if overflowed {
2019-09-05 18:17:58 +02:00
throw_ub_format!("Overflowing arithmetic in {}", intrinsic_name);
}
this.write_scalar(res, dest)?;
}
"uninit" => {
// Check fast path: we don't want to force an allocation in case the destination is a simple value,
// but we also do not want to create a new allocation with 0s and then copy that over.
// FIXME: We do not properly validate in case of ZSTs and when doing it in memory!
// However, this only affects direct calls of the intrinsic; calls to the stable
// functions wrapping them do get their validation.
// FIXME: should we check alignment for ZSTs?
use crate::ScalarMaybeUndef;
if !dest.layout.is_zst() {
match dest.layout.abi {
layout::Abi::Scalar(..) => {
let x = ScalarMaybeUndef::Undef;
this.write_immediate(Immediate::Scalar(x), dest)?;
}
layout::Abi::ScalarPair(..) => {
let x = ScalarMaybeUndef::Undef;
this.write_immediate(Immediate::ScalarPair(x, x), dest)?;
}
_ => {
// Do it in memory
let mplace = this.force_allocation(dest)?;
mplace.meta.unwrap_none();
let ptr = mplace.ptr.to_ptr()?;
2019-10-19 12:28:39 +02:00
// We know the return place is in-bounds
2019-10-18 11:11:50 +09:00
this.memory
.get_mut(ptr.alloc_id)?
.mark_definedness(ptr, dest.layout.size, false);
}
}
}
}
2017-03-13 19:28:29 -04:00
"write_bytes" => {
2017-03-29 09:10:05 +02:00
let ty = substs.type_at(0);
let ty_layout = this.layout_of(ty)?;
let val_byte = this.read_scalar(args[1])?.to_u8()?;
let ptr = this.read_scalar(args[0])?.not_undef()?;
let count = this.read_scalar(args[2])?.to_usize(this)?;
let byte_count = ty_layout.size * count;
this.memory.write_bytes(ptr, iter::repeat(val_byte).take(byte_count.bytes() as usize))?;
2017-03-13 19:28:29 -04:00
}
2019-08-03 20:31:33 +02:00
name => throw_unsup_format!("unimplemented intrinsic: {}", name),
2016-09-20 16:05:30 +02:00
}
Ok(())
}
}