Implement some intrinsics

This commit is contained in:
hkalbasi 2023-03-20 21:48:01 +03:30
parent 8e73ea5253
commit 3303a6eff5
7 changed files with 521 additions and 145 deletions

View File

@ -11,6 +11,8 @@
ConstEvalError,
};
mod intrinsics;
fn simplify(e: ConstEvalError) -> ConstEvalError {
match e {
ConstEvalError::MirEvalError(MirEvalError::InFunction(_, e)) => {
@ -82,6 +84,49 @@ fn bit_op() {
check_number(r#"const GOAL: i8 = 1 << 8"#, 0);
}
#[test]
fn casts() {
check_number(r#"const GOAL: usize = 12 as *const i32 as usize"#, 12);
check_number(
r#"
//- minicore: coerce_unsized, index, slice
const GOAL: i32 = {
let a = [10, 20, 3, 15];
let x: &[i32] = &a;
let y: *const [i32] = x;
let z = y as *const i32;
unsafe { *z }
};
"#,
10,
);
check_number(
r#"
//- minicore: coerce_unsized, index, slice
const GOAL: i16 = {
let a = &mut 5;
let z = a as *mut _;
unsafe { *z }
};
"#,
5,
);
check_number(
r#"
//- minicore: coerce_unsized, index, slice
const GOAL: usize = {
let a = [10, 20, 3, 15];
let x: &[i32] = &a;
let y: *const [i32] = x;
let z = y as *const [u8]; // slice fat pointer cast don't touch metadata
let w = unsafe { &*z };
w.len()
};
"#,
4,
);
}
#[test]
fn locals() {
check_number(
@ -279,20 +324,6 @@ const fn add(x: usize, y: usize) -> usize {
);
}
#[test]
fn intrinsics() {
check_number(
r#"
extern "rust-intrinsic" {
pub fn size_of<T>() -> usize;
}
const GOAL: usize = size_of::<i32>();
"#,
4,
);
}
#[test]
fn trait_basic() {
check_number(
@ -1353,6 +1384,17 @@ fn array_and_index() {
check_number(
r#"
//- minicore: coerce_unsized, index, slice
const GOAL: usize = {
let a = [1, 2, 3];
let x: &[i32] = &a;
let y = &*x;
y.len()
};"#,
3,
);
check_number(
r#"
//- minicore: coerce_unsized, index, slice
const GOAL: usize = [1, 2, 3, 4, 5].len();"#,
5,
);

View File

@ -0,0 +1,162 @@
use super::*;
#[test]
fn size_of() {
check_number(
r#"
extern "rust-intrinsic" {
pub fn size_of<T>() -> usize;
}
const GOAL: usize = size_of::<i32>();
"#,
4,
);
}
#[test]
fn transmute() {
check_number(
r#"
extern "rust-intrinsic" {
pub fn transmute<T, U>(e: T) -> U;
}
const GOAL: i32 = transmute((1i16, 1i16));
"#,
0x00010001,
);
}
#[test]
fn const_eval_select() {
check_number(
r#"
extern "rust-intrinsic" {
pub fn const_eval_select<ARG, F, G, RET>(arg: ARG, called_in_const: F, called_at_rt: G) -> RET
where
G: FnOnce<ARG, Output = RET>,
F: FnOnce<ARG, Output = RET>;
}
const fn in_const(x: i32, y: i32) -> i32 {
x + y
}
fn in_rt(x: i32, y: i32) -> i32 {
x + y
}
const GOAL: i32 = const_eval_select((2, 3), in_const, in_rt);
"#,
5,
);
}
#[test]
fn wrapping_add() {
check_number(
r#"
extern "rust-intrinsic" {
pub fn wrapping_add<T>(a: T, b: T) -> T;
}
const GOAL: u8 = wrapping_add(10, 250);
"#,
4,
);
}
#[test]
fn offset() {
check_number(
r#"
//- minicore: coerce_unsized, index, slice
extern "rust-intrinsic" {
pub fn offset<T>(dst: *const T, offset: isize) -> *const T;
}
const GOAL: u8 = unsafe {
let ar: &[(u8, u8, u8)] = &[
(10, 11, 12),
(20, 21, 22),
(30, 31, 32),
(40, 41, 42),
(50, 51, 52),
];
let ar: *const [(u8, u8, u8)] = ar;
let ar = ar as *const (u8, u8, u8);
let element = offset(ar, 2);
element.1
};
"#,
31,
);
}
#[test]
fn arith_offset() {
check_number(
r#"
//- minicore: coerce_unsized, index, slice
extern "rust-intrinsic" {
pub fn arith_offset<T>(dst: *const T, offset: isize) -> *const T;
}
const GOAL: u8 = unsafe {
let ar: &[(u8, u8, u8)] = &[
(10, 11, 12),
(20, 21, 22),
(30, 31, 32),
(40, 41, 42),
(50, 51, 52),
];
let ar: *const [(u8, u8, u8)] = ar;
let ar = ar as *const (u8, u8, u8);
let element = arith_offset(arith_offset(ar, 102), -100);
element.1
};
"#,
31,
);
}
#[test]
fn copy_nonoverlapping() {
check_number(
r#"
extern "rust-intrinsic" {
pub fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
}
const GOAL: u8 = unsafe {
let mut x = 2;
let y = 5;
copy_nonoverlapping(&y, &mut x, 1);
x
};
"#,
5,
);
}
#[test]
fn copy() {
check_number(
r#"
//- minicore: coerce_unsized, index, slice
extern "rust-intrinsic" {
pub fn copy<T>(src: *const T, dst: *mut T, count: usize);
}
const GOAL: i32 = unsafe {
let mut x = [1i32, 2, 3, 4, 5];
let y = (&mut x as *mut _) as *mut i32;
let z = (y as usize + 4) as *const i32;
copy(z, y, 4);
x[0] + x[1] + x[2] + x[3] + x[4]
};
"#,
19,
);
}

View File

@ -630,8 +630,15 @@ fn infer_expr_inner(&mut self, tgt_expr: ExprId, expected: &Expectation) -> Ty {
Expr::Cast { expr, type_ref } => {
let cast_ty = self.make_ty(type_ref);
// FIXME: propagate the "castable to" expectation
let _inner_ty = self.infer_expr_no_expect(*expr);
// FIXME check the cast...
let inner_ty = self.infer_expr_no_expect(*expr);
match (inner_ty.kind(Interner), cast_ty.kind(Interner)) {
(TyKind::Ref(_, _, inner), TyKind::Raw(_, cast)) => {
// FIXME: record invalid cast diagnostic in case of mismatch
self.unify(inner, cast);
}
// FIXME check the other kinds of cast...
_ => (),
}
cast_ty
}
Expr::Ref { expr, rawness, mutability } => {

View File

@ -96,11 +96,18 @@ enum Address {
use Address::*;
#[derive(Debug, Clone, Copy)]
struct Interval {
addr: Address,
size: usize,
}
#[derive(Debug, Clone)]
struct IntervalAndTy {
interval: Interval,
ty: Ty,
}
impl Interval {
fn new(addr: Address, size: usize) -> Self {
Self { addr, size }
@ -110,11 +117,37 @@ fn get<'a>(&self, memory: &'a Evaluator<'a>) -> Result<&'a [u8]> {
memory.read_memory(self.addr, self.size)
}
fn write_from_bytes(&self, memory: &mut Evaluator<'_>, bytes: &[u8]) -> Result<()> {
memory.write_memory(self.addr, bytes)
}
fn write_from_interval(&self, memory: &mut Evaluator<'_>, interval: Interval) -> Result<()> {
// FIXME: this could be more efficent
let bytes = &interval.get(memory)?.to_vec();
memory.write_memory(self.addr, bytes)
}
fn slice(self, range: Range<usize>) -> Interval {
Interval { addr: self.addr.offset(range.start), size: range.len() }
}
}
impl IntervalAndTy {
fn get<'a>(&self, memory: &'a Evaluator<'a>) -> Result<&'a [u8]> {
memory.read_memory(self.interval.addr, self.interval.size)
}
fn new(
addr: Address,
ty: Ty,
evaluator: &Evaluator<'_>,
locals: &Locals<'_>,
) -> Result<IntervalAndTy> {
let size = evaluator.size_of_sized(&ty, locals, "type of interval")?;
Ok(IntervalAndTy { interval: Interval { addr, size }, ty })
}
}
enum IntervalOrOwned {
Owned(Vec<u8>),
Borrowed(Interval),
@ -135,7 +168,7 @@ fn from_bytes(x: &[u8]) -> Result<Self> {
fn from_usize(x: usize) -> Self {
if x > usize::MAX / 2 {
Stack(usize::MAX - x)
Stack(x - usize::MAX / 2)
} else {
Heap(x)
}
@ -147,7 +180,7 @@ fn to_bytes(&self) -> Vec<u8> {
fn to_usize(&self) -> usize {
let as_num = match self {
Stack(x) => usize::MAX - *x,
Stack(x) => *x + usize::MAX / 2,
Heap(x) => *x,
};
as_num
@ -174,7 +207,7 @@ pub enum MirEvalError {
/// Means that code had undefined behavior. We don't try to actively detect UB, but if it was detected
/// then use this type of error.
UndefinedBehavior(&'static str),
Panic,
Panic(String),
MirLowerError(FunctionId, MirLowerError),
TypeIsUnsized(Ty, &'static str),
NotSupported(String),
@ -197,7 +230,7 @@ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Self::UndefinedBehavior(arg0) => {
f.debug_tuple("UndefinedBehavior").field(arg0).finish()
}
Self::Panic => write!(f, "Panic"),
Self::Panic(msg) => write!(f, "Panic with message:\n{msg:?}"),
Self::TargetDataLayoutNotAvailable => write!(f, "TargetDataLayoutNotAvailable"),
Self::TypeIsUnsized(ty, it) => write!(f, "{ty:?} is unsized. {it} should be sized."),
Self::ExecutionLimitExceeded => write!(f, "execution limit exceeded"),
@ -289,7 +322,19 @@ pub fn new<'a>(
}
fn place_addr(&self, p: &Place, locals: &Locals<'_>) -> Result<Address> {
Ok(self.place_addr_and_ty(p, locals)?.0)
Ok(self.place_addr_and_ty_and_metadata(p, locals)?.0)
}
fn place_interval(&self, p: &Place, locals: &Locals<'_>) -> Result<Interval> {
let place_addr_and_ty = self.place_addr_and_ty_and_metadata(p, locals)?;
Ok(Interval {
addr: place_addr_and_ty.0,
size: self.size_of_sized(
&place_addr_and_ty.1,
locals,
"Type of place that we need its interval",
)?,
})
}
fn ptr_size(&self) -> usize {
@ -299,10 +344,15 @@ fn ptr_size(&self) -> usize {
}
}
fn place_addr_and_ty<'a>(&'a self, p: &Place, locals: &'a Locals<'a>) -> Result<(Address, Ty)> {
fn place_addr_and_ty_and_metadata<'a>(
&'a self,
p: &Place,
locals: &'a Locals<'a>,
) -> Result<(Address, Ty, Option<Interval>)> {
let mut addr = locals.ptr[p.local];
let mut ty: Ty =
self.ty_filler(&locals.body.locals[p.local].ty, locals.subst, locals.body.owner)?;
let mut metadata = None; // locals are always sized
for proj in &p.projection {
match proj {
ProjectionElem::Deref => {
@ -314,12 +364,18 @@ fn place_addr_and_ty<'a>(&'a self, p: &Place, locals: &'a Locals<'a>) -> Result<
))
}
};
metadata = if self.size_of(&ty, locals)?.is_none() {
Some(Interval { addr: addr.offset(self.ptr_size()), size: self.ptr_size() })
} else {
None
};
let x = from_bytes!(usize, self.read_memory(addr, self.ptr_size())?);
addr = Address::from_usize(x);
}
ProjectionElem::Index(op) => {
let offset =
from_bytes!(usize, self.read_memory(locals.ptr[*op], self.ptr_size())?);
metadata = None; // Result of index is always sized
match &ty.data(Interner).kind {
TyKind::Ref(_, _, inner) => match &inner.data(Interner).kind {
TyKind::Slice(inner) => {
@ -357,6 +413,7 @@ fn place_addr_and_ty<'a>(&'a self, p: &Place, locals: &'a Locals<'a>) -> Result<
.clone();
let offset = layout.fields.offset(f).bytes_usize();
addr = addr.offset(offset);
metadata = None; // tuple field is always sized
}
_ => return Err(MirEvalError::TypeError("Only tuple has tuple fields")),
},
@ -386,6 +443,8 @@ fn place_addr_and_ty<'a>(&'a self, p: &Place, locals: &'a Locals<'a>) -> Result<
.offset(u32::from(f.local_id.into_raw()) as usize)
.bytes_usize();
addr = addr.offset(offset);
// FIXME: support structs with unsized fields
metadata = None;
}
_ => return Err(MirEvalError::TypeError("Only adt has fields")),
},
@ -396,7 +455,7 @@ fn place_addr_and_ty<'a>(&'a self, p: &Place, locals: &'a Locals<'a>) -> Result<
ProjectionElem::OpaqueCast(_) => not_supported!("opaque cast"),
}
}
Ok((addr, ty))
Ok((addr, ty, metadata))
}
fn layout(&self, ty: &Ty) -> Result<Layout> {
@ -411,16 +470,23 @@ fn layout_adt(&self, adt: AdtId, subst: Substitution) -> Result<Layout> {
}
fn place_ty<'a>(&'a self, p: &Place, locals: &'a Locals<'a>) -> Result<Ty> {
Ok(self.place_addr_and_ty(p, locals)?.1)
Ok(self.place_addr_and_ty_and_metadata(p, locals)?.1)
}
fn operand_ty<'a>(&'a self, o: &'a Operand, locals: &'a Locals<'a>) -> Result<Ty> {
fn operand_ty(&self, o: &Operand, locals: &Locals<'_>) -> Result<Ty> {
Ok(match o {
Operand::Copy(p) | Operand::Move(p) => self.place_ty(p, locals)?,
Operand::Constant(c) => c.data(Interner).ty.clone(),
})
}
fn operand_ty_and_eval(&mut self, o: &Operand, locals: &Locals<'_>) -> Result<IntervalAndTy> {
Ok(IntervalAndTy {
interval: self.eval_operand(o, locals)?,
ty: self.operand_ty(o, locals)?,
})
}
fn interpret_mir(
&mut self,
body: &MirBody,
@ -498,14 +564,19 @@ fn interpret_mir(
cleanup: _,
from_hir_call: _,
} => {
let destination = self.place_interval(destination, &locals)?;
let fn_ty = self.operand_ty(func, &locals)?;
let args = args
.iter()
.map(|x| self.operand_ty_and_eval(x, &locals))
.collect::<Result<Vec<_>>>()?;
match &fn_ty.data(Interner).kind {
TyKind::Function(_) => {
let bytes = self.eval_operand(func, &locals)?;
self.exec_fn_pointer(bytes, destination, args, &locals)?;
self.exec_fn_pointer(bytes, destination, &args, &locals)?;
}
TyKind::FnDef(def, generic_args) => {
self.exec_fn_def(*def, generic_args, destination, args, &locals)?;
self.exec_fn_def(*def, generic_args, destination, &args, &locals)?;
}
x => not_supported!("unknown function type {x:?}"),
}
@ -545,8 +616,12 @@ fn eval_rvalue<'a>(
Ok(match r {
Rvalue::Use(x) => Borrowed(self.eval_operand(x, locals)?),
Rvalue::Ref(_, p) => {
let addr = self.place_addr(p, locals)?;
Owned(addr.to_bytes())
let (addr, _, metadata) = self.place_addr_and_ty_and_metadata(p, locals)?;
let mut r = addr.to_bytes();
if let Some(metadata) = metadata {
r.extend(metadata.get(self)?);
}
Owned(r)
}
Rvalue::Len(_) => not_supported!("rvalue len"),
Rvalue::UnaryOp(op, val) => {
@ -624,8 +699,12 @@ fn eval_rvalue<'a>(
let r = match op {
BinOp::Add => l128.overflowing_add(r128).0,
BinOp::Mul => l128.overflowing_mul(r128).0,
BinOp::Div => l128.checked_div(r128).ok_or(MirEvalError::Panic)?,
BinOp::Rem => l128.checked_rem(r128).ok_or(MirEvalError::Panic)?,
BinOp::Div => l128.checked_div(r128).ok_or_else(|| {
MirEvalError::Panic(format!("Overflow in {op:?}"))
})?,
BinOp::Rem => l128.checked_rem(r128).ok_or_else(|| {
MirEvalError::Panic(format!("Overflow in {op:?}"))
})?,
BinOp::Sub => l128.overflowing_sub(r128).0,
BinOp::BitAnd => l128 & r128,
BinOp::BitOr => l128 | r128,
@ -635,16 +714,16 @@ fn eval_rvalue<'a>(
let r = r.to_le_bytes();
for &k in &r[lc.len()..] {
if k != 0 && (k != 255 || !is_signed) {
return Err(MirEvalError::Panic);
return Err(MirEvalError::Panic(format!("Overflow in {op:?}")));
}
}
Owned(r[0..lc.len()].into())
}
BinOp::Shl | BinOp::Shr => {
let shift_amout = if r128 < 0 {
return Err(MirEvalError::Panic);
return Err(MirEvalError::Panic(format!("Overflow in {op:?}")));
} else if r128 > 128 {
return Err(MirEvalError::Panic);
return Err(MirEvalError::Panic(format!("Overflow in {op:?}")));
} else {
r128 as u8
};
@ -720,47 +799,54 @@ fn eval_rvalue<'a>(
}
Rvalue::ShallowInitBox(_, _) => not_supported!("shallow init box"),
Rvalue::CopyForDeref(_) => not_supported!("copy for deref"),
Rvalue::Aggregate(kind, values) => match kind {
AggregateKind::Array(_) => {
let mut r = vec![];
for x in values {
let value = self.eval_operand(x, locals)?.get(&self)?;
r.extend(value);
Rvalue::Aggregate(kind, values) => {
let values = values
.iter()
.map(|x| self.eval_operand(x, locals))
.collect::<Result<Vec<_>>>()?;
match kind {
AggregateKind::Array(_) => {
let mut r = vec![];
for x in values {
let value = x.get(&self)?;
r.extend(value);
}
Owned(r)
}
AggregateKind::Tuple(ty) => {
let layout = self.layout(&ty)?;
Owned(self.make_by_layout(
layout.size.bytes_usize(),
&layout,
None,
values.iter().copied(),
)?)
}
AggregateKind::Union(x, f) => {
let layout = self.layout_adt((*x).into(), Substitution::empty(Interner))?;
let offset = layout
.fields
.offset(u32::from(f.local_id.into_raw()) as usize)
.bytes_usize();
let op = values[0].get(&self)?;
let mut result = vec![0; layout.size.bytes_usize()];
result[offset..offset + op.len()].copy_from_slice(op);
Owned(result)
}
AggregateKind::Adt(x, subst) => {
let subst = self.subst_filler(subst, locals);
let (size, variant_layout, tag) =
self.layout_of_variant(*x, subst, locals)?;
Owned(self.make_by_layout(
size,
&variant_layout,
tag,
values.iter().copied(),
)?)
}
Owned(r)
}
AggregateKind::Tuple(ty) => {
let layout = self.layout(&ty)?;
Owned(self.make_by_layout(
layout.size.bytes_usize(),
&layout,
None,
values,
locals,
)?)
}
AggregateKind::Union(x, f) => {
let layout = self.layout_adt((*x).into(), Substitution::empty(Interner))?;
let offset = layout
.fields
.offset(u32::from(f.local_id.into_raw()) as usize)
.bytes_usize();
let op = self.eval_operand(&values[0], locals)?.get(&self)?;
let mut result = vec![0; layout.size.bytes_usize()];
result[offset..offset + op.len()].copy_from_slice(op);
Owned(result)
}
AggregateKind::Adt(x, subst) => {
let subst = self.subst_filler(subst, locals);
let (size, variant_layout, tag) = self.layout_of_variant(*x, subst, locals)?;
Owned(self.make_by_layout(size, &variant_layout, tag, values, locals)?)
}
},
}
Rvalue::Cast(kind, operand, target_ty) => match kind {
CastKind::PointerExposeAddress => not_supported!("exposing pointer address"),
CastKind::PointerFromExposedAddress => {
not_supported!("creating pointer from exposed address")
}
CastKind::Pointer(cast) => match cast {
PointerCast::ReifyFnPointer => {
let current_ty = self.operand_ty(operand, locals)?;
@ -818,7 +904,9 @@ fn eval_rvalue<'a>(
x => not_supported!("pointer cast {x:?}"),
},
CastKind::DynStar => not_supported!("dyn star cast"),
CastKind::IntToInt => {
CastKind::IntToInt
| CastKind::PointerExposeAddress
| CastKind::PointerFromExposedAddress => {
// FIXME: handle signed cast
let current = pad16(self.eval_operand(operand, locals)?.get(&self)?, false);
let dest_size =
@ -828,7 +916,12 @@ fn eval_rvalue<'a>(
CastKind::FloatToInt => not_supported!("float to int cast"),
CastKind::FloatToFloat => not_supported!("float to float cast"),
CastKind::IntToFloat => not_supported!("float to int cast"),
CastKind::PtrToPtr => not_supported!("ptr to ptr cast"),
CastKind::PtrToPtr => {
let current = pad16(self.eval_operand(operand, locals)?.get(&self)?, false);
let dest_size =
self.size_of_sized(target_ty, locals, "destination of ptr to ptr cast")?;
Owned(current[0..dest_size].to_vec())
}
CastKind::FnPtrToPtr => not_supported!("fn ptr to ptr cast"),
},
})
@ -895,16 +988,15 @@ fn make_by_layout(
size: usize, // Not neccessarily equal to variant_layout.size
variant_layout: &Layout,
tag: Option<(usize, usize, i128)>,
values: &[Operand],
locals: &Locals<'_>,
values: impl Iterator<Item = Interval>,
) -> Result<Vec<u8>> {
let mut result = vec![0; size];
if let Some((offset, size, value)) = tag {
result[offset..offset + size].copy_from_slice(&value.to_le_bytes()[0..size]);
}
for (i, op) in values.iter().enumerate() {
for (i, op) in values.enumerate() {
let offset = variant_layout.fields.offset(i).bytes_usize();
let op = self.eval_operand(op, locals)?.get(&self)?;
let op = op.get(&self)?;
result[offset..offset + op.len()].copy_from_slice(op);
}
Ok(result)
@ -1196,28 +1288,89 @@ fn patch_addresses(
}
fn exec_intrinsic(
&self,
&mut self,
as_str: &str,
mut arg_bytes: impl Iterator<Item = Vec<u8>>,
args: &[IntervalAndTy],
generic_args: Substitution,
destination: Interval,
locals: &Locals<'_>,
) -> Result<Vec<u8>> {
) -> Result<()> {
match as_str {
"size_of" => {
let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|x| x.ty(Interner)) else {
return Err(MirEvalError::TypeError("size_of generic arg is not provided"));
};
let size = self.size_of(ty, locals)?;
match size {
Some(x) => Ok(x.to_le_bytes().to_vec()),
None => return Err(MirEvalError::TypeError("size_of arg is unsized")),
}
let size = self.size_of_sized(ty, locals, "size_of arg")?;
destination.write_from_bytes(self, &size.to_le_bytes()[0..destination.size])
}
"wrapping_add" => {
let [lhs, rhs] = args else {
return Err(MirEvalError::TypeError("const_eval_select args are not provided"));
};
let lhs = u128::from_le_bytes(pad16(lhs.get(self)?, false));
let rhs = u128::from_le_bytes(pad16(rhs.get(self)?, false));
let ans = lhs.wrapping_add(rhs);
destination.write_from_bytes(self, &ans.to_le_bytes()[0..destination.size])
}
"copy" | "copy_nonoverlapping" => {
let [src, dst, offset] = args else {
return Err(MirEvalError::TypeError("copy_nonoverlapping args are not provided"));
};
let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|x| x.ty(Interner)) else {
return Err(MirEvalError::TypeError("copy_nonoverlapping generic arg is not provided"));
};
let src = Address::from_bytes(src.get(self)?)?;
let dst = Address::from_bytes(dst.get(self)?)?;
let offset = from_bytes!(usize, offset.get(self)?);
let size = self.size_of_sized(ty, locals, "copy_nonoverlapping ptr type")?;
let size = offset * size;
let src = Interval { addr: src, size };
let dst = Interval { addr: dst, size };
dst.write_from_interval(self, src)
}
"offset" | "arith_offset" => {
let [ptr, offset] = args else {
return Err(MirEvalError::TypeError("offset args are not provided"));
};
let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|x| x.ty(Interner)) else {
return Err(MirEvalError::TypeError("offset generic arg is not provided"));
};
let ptr = u128::from_le_bytes(pad16(ptr.get(self)?, false));
let offset = u128::from_le_bytes(pad16(offset.get(self)?, false));
let size = self.size_of_sized(ty, locals, "offset ptr type")? as u128;
let ans = ptr + offset * size;
destination.write_from_bytes(self, &ans.to_le_bytes()[0..destination.size])
}
"assert_inhabited" | "assert_zero_valid" | "assert_uninit_valid" => {
// FIXME: We should actually implement these checks
Ok(())
}
"forget" => {
// We don't call any drop glue yet, so there is nothing here
Ok(())
}
"transmute" => {
let Some(arg) = arg_bytes.next() else {
let [arg] = args else {
return Err(MirEvalError::TypeError("trasmute arg is not provided"));
};
Ok(arg)
destination.write_from_interval(self, arg.interval)
}
"const_eval_select" => {
let [tuple, const_fn, _] = args else {
return Err(MirEvalError::TypeError("const_eval_select args are not provided"));
};
let mut args = vec![const_fn.clone()];
let TyKind::Tuple(_, fields) = tuple.ty.kind(Interner) else {
return Err(MirEvalError::TypeError("const_eval_select arg[0] is not a tuple"));
};
let layout = self.layout(&tuple.ty)?;
for (i, field) in fields.iter(Interner).enumerate() {
let field = field.assert_ty_ref(Interner).clone();
let offset = layout.fields.offset(i).bytes_usize();
let addr = tuple.interval.addr.offset(offset);
args.push(IntervalAndTy::new(addr, field, self, locals)?);
}
self.exec_fn_trait(&args, destination, locals)
}
_ => not_supported!("unknown intrinsic {as_str}"),
}
@ -1226,8 +1379,8 @@ fn exec_intrinsic(
fn exec_fn_pointer(
&mut self,
bytes: Interval,
destination: &Place,
args: &[Operand],
destination: Interval,
args: &[IntervalAndTy],
locals: &Locals<'_>,
) -> Result<()> {
let id = from_bytes!(usize, bytes.get(self)?);
@ -1244,38 +1397,41 @@ fn exec_fn_def(
&mut self,
def: FnDefId,
generic_args: &Substitution,
destination: &Place,
args: &[Operand],
destination: Interval,
args: &[IntervalAndTy],
locals: &Locals<'_>,
) -> Result<()> {
let def: CallableDefId = from_chalk(self.db, def);
let generic_args = self.subst_filler(generic_args, &locals);
match def {
CallableDefId::FunctionId(def) => {
let dest_addr = self.place_addr(destination, &locals)?;
if let Some(x) = self.detect_fn_trait(def) {
self.exec_fn_trait(x, &args, destination, locals)?;
if let Some(_) = self.detect_fn_trait(def) {
self.exec_fn_trait(&args, destination, locals)?;
return Ok(());
}
let arg_bytes = args
.iter()
.map(|x| Ok(self.eval_operand(x, &locals)?.get(&self)?.to_owned()))
.collect::<Result<Vec<_>>>()?;
self.exec_fn_with_args(def, arg_bytes, generic_args, locals, dest_addr)?;
self.exec_fn_with_args(def, args, generic_args, locals, destination)?;
}
CallableDefId::StructId(id) => {
let (size, variant_layout, tag) =
self.layout_of_variant(id.into(), generic_args.clone(), &locals)?;
let result = self.make_by_layout(size, &variant_layout, tag, args, &locals)?;
let dest_addr = self.place_addr(destination, &locals)?;
self.write_memory(dest_addr, &result)?;
let result = self.make_by_layout(
size,
&variant_layout,
tag,
args.iter().map(|x| x.interval),
)?;
destination.write_from_bytes(self, &result)?;
}
CallableDefId::EnumVariantId(id) => {
let (size, variant_layout, tag) =
self.layout_of_variant(id.into(), generic_args.clone(), &locals)?;
let result = self.make_by_layout(size, &variant_layout, tag, args, &locals)?;
let dest_addr = self.place_addr(destination, &locals)?;
self.write_memory(dest_addr, &result)?;
let result = self.make_by_layout(
size,
&variant_layout,
tag,
args.iter().map(|x| x.interval),
)?;
destination.write_from_bytes(self, &result)?;
}
}
Ok(())
@ -1284,10 +1440,10 @@ fn exec_fn_def(
fn exec_fn_with_args(
&mut self,
def: FunctionId,
arg_bytes: Vec<Vec<u8>>,
args: &[IntervalAndTy],
generic_args: Substitution,
locals: &Locals<'_>,
dest_addr: Address,
destination: Interval,
) -> Result<()> {
let function_data = self.db.function_data(def);
let is_intrinsic = match &function_data.abi {
@ -1301,14 +1457,18 @@ fn exec_fn_with_args(
_ => false,
},
};
let result = if is_intrinsic {
self.exec_intrinsic(
if is_intrinsic {
return self.exec_intrinsic(
function_data.name.as_text().unwrap_or_default().as_str(),
arg_bytes.iter().cloned(),
args,
generic_args,
destination,
&locals,
)?
} else if let Some(x) = self.detect_lang_function(def) {
);
}
let arg_bytes =
args.iter().map(|x| Ok(x.get(&self)?.to_owned())).collect::<Result<Vec<_>>>()?;
let result = if let Some(x) = self.detect_lang_function(def) {
self.exec_lang_item(x, &arg_bytes)?
} else {
if let Some(self_ty_idx) =
@ -1321,9 +1481,12 @@ fn exec_fn_with_args(
let ty = self
.vtable_map
.ty_of_bytes(&arg_bytes[0][self.ptr_size()..self.ptr_size() * 2])?;
let mut args_for_target = args.to_vec();
args_for_target[0] = IntervalAndTy {
interval: args_for_target[0].interval.slice(0..self.ptr_size()),
ty: ty.clone(),
};
let ty = GenericArgData::Ty(ty.clone()).intern(Interner);
let mut args_for_target = arg_bytes;
args_for_target[0] = args_for_target[0][0..self.ptr_size()].to_vec();
let generics_for_target = Substitution::from_iter(
Interner,
generic_args.iter(Interner).enumerate().map(|(i, x)| {
@ -1336,10 +1499,10 @@ fn exec_fn_with_args(
);
return self.exec_fn_with_args(
def,
args_for_target,
&args_for_target,
generics_for_target,
locals,
dest_addr,
destination,
);
}
let (imp, generic_args) =
@ -1351,20 +1514,19 @@ fn exec_fn_with_args(
self.interpret_mir(&mir_body, arg_bytes.iter().cloned(), generic_args)
.map_err(|e| MirEvalError::InFunction(imp, Box::new(e)))?
};
self.write_memory(dest_addr, &result)?;
destination.write_from_bytes(self, &result)?;
Ok(())
}
fn exec_fn_trait(
&mut self,
ft: FnTrait,
args: &[Operand],
destination: &Place,
args: &[IntervalAndTy],
destination: Interval,
locals: &Locals<'_>,
) -> Result<()> {
let func = args.get(0).ok_or(MirEvalError::TypeError("fn trait with no arg"))?;
let mut func_ty = self.operand_ty(func, locals)?;
let mut func_data = self.eval_operand(func, locals)?;
let mut func_ty = func.ty.clone();
let mut func_data = func.interval;
while let TyKind::Ref(_, _, z) = func_ty.kind(Interner) {
func_ty = z.clone();
if matches!(func_ty.kind(Interner), TyKind::Dyn(_)) {
@ -1383,7 +1545,7 @@ fn exec_fn_trait(
TyKind::Function(_) => {
self.exec_fn_pointer(func_data, destination, &args[1..], locals)?;
}
x => not_supported!("Call {ft:?} trait methods with type {x:?}"),
x => not_supported!("Call FnTrait methods with type {x:?}"),
}
Ok(())
}
@ -1392,7 +1554,10 @@ fn exec_lang_item(&self, x: LangItem, args: &[Vec<u8>]) -> Result<Vec<u8>> {
use LangItem::*;
let mut args = args.iter();
match x {
PanicFmt | BeginPanic => Err(MirEvalError::Panic),
// FIXME: we want to find the panic message from arguments, but it wouldn't work
// currently even if we do that, since macro expansion of panic related macros
// is dummy.
PanicFmt | BeginPanic => Err(MirEvalError::Panic("<format-args>".to_string())),
SliceLen => {
let arg = args
.next()

View File

@ -1285,6 +1285,11 @@ fn cast_kind(source_ty: &Ty, target_ty: &Ty) -> Result<CastKind> {
(_, chalk_ir::Scalar::Float(_)) => CastKind::IntToFloat,
(_, _) => CastKind::IntToInt,
},
(TyKind::Scalar(_), TyKind::Raw(..)) => CastKind::PointerFromExposedAddress,
(TyKind::Raw(..), TyKind::Scalar(_)) => CastKind::PointerExposeAddress,
(TyKind::Raw(..) | TyKind::Ref(..), TyKind::Raw(..) | TyKind::Ref(..)) => {
CastKind::PtrToPtr
}
// Enum to int casts
(TyKind::Scalar(_), TyKind::Adt(..)) | (TyKind::Adt(..), TyKind::Scalar(_)) => {
CastKind::IntToInt

View File

@ -2696,6 +2696,21 @@ fn f() {
)
}
#[test]
fn infer_ref_to_raw_cast() {
check_types(
r#"
struct S;
fn f() {
let s = &mut S;
let s = s as *mut _;
//^ *mut S
}
"#,
);
}
#[test]
fn infer_missing_type() {
check_types(
@ -3258,25 +3273,6 @@ fn f<T>(t: Ark<T>) {
);
}
// FIXME
#[test]
fn castable_to2() {
check_infer(
r#"
fn func() {
let x = &0u32 as *const _;
}
"#,
expect![[r#"
10..44 '{ ...t _; }': ()
20..21 'x': *const {unknown}
24..29 '&0u32': &u32
24..41 '&0u32 ...onst _': *const {unknown}
25..29 '0u32': u32
"#]],
);
}
#[test]
fn issue_14275() {
// FIXME: evaluate const generic

View File

@ -409,7 +409,6 @@ pub(super) fn definition(
}
match it.eval(db) {
Ok(()) => Some("pass".into()),
Err(MirEvalError::Panic) => Some("fail".into()),
Err(MirEvalError::MirLowerError(f, e)) => {
let name = &db.function_data(f).name;
Some(format!("error: fail to lower {name} due {e:?}"))