diff --git a/crates/hir-def/src/expr.rs b/crates/hir-def/src/expr.rs index 48028b7c6a8..8d6f0be2648 100644 --- a/crates/hir-def/src/expr.rs +++ b/crates/hir-def/src/expr.rs @@ -52,13 +52,21 @@ pub type LabelId = Idx<Label>; // We convert float values into bits and that's how we don't need to deal with f32 and f64. // For PartialEq, bits comparison should work, as ordering is not important // https://github.com/rust-lang/rust-analyzer/issues/12380#issuecomment-1137284360 -#[derive(Default, Debug, Clone, Eq, PartialEq)] +#[derive(Default, Debug, Clone, Copy, Eq, PartialEq)] pub struct FloatTypeWrapper(u64); impl FloatTypeWrapper { pub fn new(value: f64) -> Self { Self(value.to_bits()) } + + pub fn into_f64(self) -> f64 { + f64::from_bits(self.0) + } + + pub fn into_f32(self) -> f32 { + f64::from_bits(self.0) as f32 + } } impl fmt::Display for FloatTypeWrapper { diff --git a/crates/hir-def/src/lang_item.rs b/crates/hir-def/src/lang_item.rs index ab9bc615daf..4096e0a3826 100644 --- a/crates/hir-def/src/lang_item.rs +++ b/crates/hir-def/src/lang_item.rs @@ -181,15 +181,15 @@ impl LangItems { T: Into<AttrDefId> + Copy, { let _p = profile::span("collect_lang_item"); - if let Some(lang_item) = lang_attr(db, item).and_then(|it| LangItem::from_str(&it)) { + if let Some(lang_item) = lang_attr(db, item) { self.items.entry(lang_item).or_insert_with(|| constructor(item)); } } } -pub fn lang_attr(db: &dyn DefDatabase, item: impl Into<AttrDefId> + Copy) -> Option<SmolStr> { +pub fn lang_attr(db: &dyn DefDatabase, item: impl Into<AttrDefId> + Copy) -> Option<LangItem> { let attrs = db.attrs(item.into()); - attrs.by_key("lang").string_value().cloned() + attrs.by_key("lang").string_value().cloned().and_then(|it| LangItem::from_str(&it)) } pub enum GenericRequirement { diff --git a/crates/hir-def/src/macro_expansion_tests/builtin_fn_macro.rs b/crates/hir-def/src/macro_expansion_tests/builtin_fn_macro.rs index bb45266725c..0b72ca1eec1 100644 --- a/crates/hir-def/src/macro_expansion_tests/builtin_fn_macro.rs +++ b/crates/hir-def/src/macro_expansion_tests/builtin_fn_macro.rs @@ -143,7 +143,7 @@ macro_rules! assert { fn main() { { - if !true { + if !(true ) { $crate::panic!("{} {:?}", arg1(a, b, c), arg2); } }; diff --git a/crates/hir-def/src/path.rs b/crates/hir-def/src/path.rs index 36d4c36a268..f3197d1800f 100644 --- a/crates/hir-def/src/path.rs +++ b/crates/hir-def/src/path.rs @@ -8,7 +8,7 @@ use std::{ use crate::{ body::LowerCtx, - type_ref::{ConstScalarOrPath, LifetimeRef}, + type_ref::{ConstRefOrPath, LifetimeRef}, }; use hir_expand::name::Name; use intern::Interned; @@ -85,7 +85,7 @@ pub struct AssociatedTypeBinding { pub enum GenericArg { Type(TypeRef), Lifetime(LifetimeRef), - Const(ConstScalarOrPath), + Const(ConstRefOrPath), } impl Path { diff --git a/crates/hir-def/src/path/lower.rs b/crates/hir-def/src/path/lower.rs index c85a11db6d1..b7542bd777d 100644 --- a/crates/hir-def/src/path/lower.rs +++ b/crates/hir-def/src/path/lower.rs @@ -2,7 +2,7 @@ use std::iter; -use crate::type_ref::ConstScalarOrPath; +use crate::type_ref::ConstRefOrPath; use either::Either; use hir_expand::name::{name, AsName}; @@ -212,7 +212,7 @@ pub(super) fn lower_generic_args( } } ast::GenericArg::ConstArg(arg) => { - let arg = ConstScalarOrPath::from_expr_opt(arg.expr()); + let arg = ConstRefOrPath::from_expr_opt(arg.expr()); args.push(GenericArg::Const(arg)) } } diff --git a/crates/hir-def/src/type_ref.rs b/crates/hir-def/src/type_ref.rs index 9652b01b91b..8e30f429a9f 100644 --- a/crates/hir-def/src/type_ref.rs +++ b/crates/hir-def/src/type_ref.rs @@ -116,7 +116,7 @@ pub enum TypeRef { Reference(Box<TypeRef>, Option<LifetimeRef>, Mutability), // FIXME: for full const generics, the latter element (length) here is going to have to be an // expression that is further lowered later in hir_ty. - Array(Box<TypeRef>, ConstScalarOrPath), + Array(Box<TypeRef>, ConstRefOrPath), Slice(Box<TypeRef>), /// A fn pointer. Last element of the vector is the return type. Fn(Vec<(Option<Name>, TypeRef)>, bool /*varargs*/, bool /*is_unsafe*/), @@ -188,7 +188,7 @@ impl TypeRef { // `hir_def::body::lower` to lower this into an `Expr` and then evaluate it at the // `hir_ty` level, which would allow knowing the type of: // let v: [u8; 2 + 2] = [0u8; 4]; - let len = ConstScalarOrPath::from_expr_opt(inner.expr()); + let len = ConstRefOrPath::from_expr_opt(inner.expr()); TypeRef::Array(Box::new(TypeRef::from_ast_opt(ctx, inner.ty())), len) } ast::Type::SliceType(inner) => { @@ -378,25 +378,25 @@ impl TypeBound { } #[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub enum ConstScalarOrPath { - Scalar(ConstScalar), +pub enum ConstRefOrPath { + Scalar(ConstRef), Path(Name), } -impl std::fmt::Display for ConstScalarOrPath { +impl std::fmt::Display for ConstRefOrPath { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - ConstScalarOrPath::Scalar(s) => s.fmt(f), - ConstScalarOrPath::Path(n) => n.fmt(f), + ConstRefOrPath::Scalar(s) => s.fmt(f), + ConstRefOrPath::Path(n) => n.fmt(f), } } } -impl ConstScalarOrPath { +impl ConstRefOrPath { pub(crate) fn from_expr_opt(expr: Option<ast::Expr>) -> Self { match expr { Some(x) => Self::from_expr(x), - None => Self::Scalar(ConstScalar::Unknown), + None => Self::Scalar(ConstRef::Unknown), } } @@ -407,7 +407,7 @@ impl ConstScalarOrPath { ast::Expr::PathExpr(p) => { match p.path().and_then(|x| x.segment()).and_then(|x| x.name_ref()) { Some(x) => Self::Path(x.as_name()), - None => Self::Scalar(ConstScalar::Unknown), + None => Self::Scalar(ConstRef::Unknown), } } ast::Expr::PrefixExpr(prefix_expr) => match prefix_expr.op_kind() { @@ -415,8 +415,8 @@ impl ConstScalarOrPath { let unsigned = Self::from_expr_opt(prefix_expr.expr()); // Add sign match unsigned { - Self::Scalar(ConstScalar::UInt(num)) => { - Self::Scalar(ConstScalar::Int(-(num as i128))) + Self::Scalar(ConstRef::UInt(num)) => { + Self::Scalar(ConstRef::Int(-(num as i128))) } other => other, } @@ -425,22 +425,22 @@ impl ConstScalarOrPath { }, ast::Expr::Literal(literal) => Self::Scalar(match literal.kind() { ast::LiteralKind::IntNumber(num) => { - num.value().map(ConstScalar::UInt).unwrap_or(ConstScalar::Unknown) + num.value().map(ConstRef::UInt).unwrap_or(ConstRef::Unknown) } ast::LiteralKind::Char(c) => { - c.value().map(ConstScalar::Char).unwrap_or(ConstScalar::Unknown) + c.value().map(ConstRef::Char).unwrap_or(ConstRef::Unknown) } - ast::LiteralKind::Bool(f) => ConstScalar::Bool(f), - _ => ConstScalar::Unknown, + ast::LiteralKind::Bool(f) => ConstRef::Bool(f), + _ => ConstRef::Unknown, }), - _ => Self::Scalar(ConstScalar::Unknown), + _ => Self::Scalar(ConstRef::Unknown), } } } /// A concrete constant value -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub enum ConstScalar { +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub enum ConstRef { Int(i128), UInt(u128), Bool(bool), @@ -454,18 +454,18 @@ pub enum ConstScalar { Unknown, } -impl ConstScalar { +impl ConstRef { pub fn builtin_type(&self) -> BuiltinType { match self { - ConstScalar::UInt(_) | ConstScalar::Unknown => BuiltinType::Uint(BuiltinUint::U128), - ConstScalar::Int(_) => BuiltinType::Int(BuiltinInt::I128), - ConstScalar::Char(_) => BuiltinType::Char, - ConstScalar::Bool(_) => BuiltinType::Bool, + ConstRef::UInt(_) | ConstRef::Unknown => BuiltinType::Uint(BuiltinUint::U128), + ConstRef::Int(_) => BuiltinType::Int(BuiltinInt::I128), + ConstRef::Char(_) => BuiltinType::Char, + ConstRef::Bool(_) => BuiltinType::Bool, } } } -impl From<Literal> for ConstScalar { +impl From<Literal> for ConstRef { fn from(literal: Literal) -> Self { match literal { Literal::Char(c) => Self::Char(c), @@ -477,14 +477,14 @@ impl From<Literal> for ConstScalar { } } -impl std::fmt::Display for ConstScalar { +impl std::fmt::Display for ConstRef { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { match self { - ConstScalar::Int(num) => num.fmt(f), - ConstScalar::UInt(num) => num.fmt(f), - ConstScalar::Bool(flag) => flag.fmt(f), - ConstScalar::Char(c) => write!(f, "'{c}'"), - ConstScalar::Unknown => f.write_char('_'), + ConstRef::Int(num) => num.fmt(f), + ConstRef::UInt(num) => num.fmt(f), + ConstRef::Bool(flag) => flag.fmt(f), + ConstRef::Char(c) => write!(f, "'{c}'"), + ConstRef::Unknown => f.write_char('_'), } } } diff --git a/crates/hir-expand/src/builtin_fn_macro.rs b/crates/hir-expand/src/builtin_fn_macro.rs index 9f3fa73d4e6..295083a37f2 100644 --- a/crates/hir-expand/src/builtin_fn_macro.rs +++ b/crates/hir-expand/src/builtin_fn_macro.rs @@ -206,7 +206,7 @@ fn assert_expand( let cond = cond.clone(); let panic_args = itertools::Itertools::intersperse(panic_args.iter().cloned(), comma); quote! {{ - if !#cond { + if !(#cond) { #DOLLAR_CRATE::panic!(##panic_args); } }} diff --git a/crates/hir-ty/src/builder.rs b/crates/hir-ty/src/builder.rs index 8faef7bf71e..03e9443599d 100644 --- a/crates/hir-ty/src/builder.rs +++ b/crates/hir-ty/src/builder.rs @@ -152,6 +152,15 @@ impl TyBuilder<()> { TyKind::Tuple(0, Substitution::empty(Interner)).intern(Interner) } + // FIXME: rustc's ty is dependent on the adt type, maybe we need to do that as well + pub fn discr_ty() -> Ty { + TyKind::Scalar(chalk_ir::Scalar::Int(chalk_ir::IntTy::I128)).intern(Interner) + } + + pub fn bool() -> Ty { + TyKind::Scalar(chalk_ir::Scalar::Bool).intern(Interner) + } + pub fn usize() -> Ty { TyKind::Scalar(chalk_ir::Scalar::Uint(chalk_ir::UintTy::Usize)).intern(Interner) } diff --git a/crates/hir-ty/src/chalk_db.rs b/crates/hir-ty/src/chalk_db.rs index 6989e9fb9be..28ae4c349f8 100644 --- a/crates/hir-ty/src/chalk_db.rs +++ b/crates/hir-ty/src/chalk_db.rs @@ -540,8 +540,7 @@ pub(crate) fn trait_datum_query( let where_clauses = convert_where_clauses(db, trait_.into(), &bound_vars); let associated_ty_ids = trait_data.associated_types().map(to_assoc_type_id).collect(); let trait_datum_bound = rust_ir::TraitDatumBound { where_clauses }; - let well_known = lang_attr(db.upcast(), trait_) - .and_then(|name| well_known_trait_from_lang_item(LangItem::from_str(&name)?)); + let well_known = lang_attr(db.upcast(), trait_).and_then(well_known_trait_from_lang_item); let trait_datum = TraitDatum { id: trait_id, binders: make_binders(db, &generic_params, trait_datum_bound), diff --git a/crates/hir-ty/src/consteval.rs b/crates/hir-ty/src/consteval.rs index 8df70330fa9..5830c48988f 100644 --- a/crates/hir-ty/src/consteval.rs +++ b/crates/hir-ty/src/consteval.rs @@ -1,30 +1,25 @@ //! Constant evaluation details -use std::{ - collections::HashMap, - fmt::{Display, Write}, -}; - -use chalk_ir::{BoundVar, DebruijnIndex, GenericArgData, IntTy, Scalar}; +use base_db::CrateId; +use chalk_ir::{BoundVar, DebruijnIndex, GenericArgData}; use hir_def::{ - builtin_type::BuiltinInt, - expr::{ArithOp, BinaryOp, Expr, ExprId, Literal, Pat, PatId}, + expr::Expr, path::ModPath, - resolver::{resolver_for_expr, ResolveValueResult, Resolver, ValueNs}, - src::HasChildSource, - type_ref::ConstScalar, - ConstId, DefWithBodyId, EnumVariantId, Lookup, + resolver::{Resolver, ValueNs}, + type_ref::ConstRef, + ConstId, EnumVariantId, }; -use la_arena::{Arena, Idx, RawIdx}; +use la_arena::{Idx, RawIdx}; use stdx::never; -use syntax::ast::HasName; use crate::{ - db::HirDatabase, infer::InferenceContext, lower::ParamLoweringMode, to_placeholder_idx, - utils::Generics, Const, ConstData, ConstValue, GenericArg, InferenceResult, Interner, Ty, - TyBuilder, TyKind, + db::HirDatabase, infer::InferenceContext, layout::layout_of_ty, lower::ParamLoweringMode, + to_placeholder_idx, utils::Generics, Const, ConstData, ConstScalar, ConstValue, GenericArg, + Interner, MemoryMap, Ty, TyBuilder, }; +use super::mir::{interpret_mir, lower_to_mir, pad16, MirEvalError, MirLowerError}; + /// Extension trait for [`Const`] pub trait ConstExt { /// Is a [`Const`] unknown? @@ -53,346 +48,24 @@ impl ConstExt for Const { } } -pub struct ConstEvalCtx<'a> { - pub db: &'a dyn HirDatabase, - pub owner: DefWithBodyId, - pub exprs: &'a Arena<Expr>, - pub pats: &'a Arena<Pat>, - pub local_data: HashMap<PatId, ComputedExpr>, - infer: &'a InferenceResult, -} - -impl ConstEvalCtx<'_> { - fn expr_ty(&mut self, expr: ExprId) -> Ty { - self.infer[expr].clone() - } -} - #[derive(Debug, Clone, PartialEq, Eq)] pub enum ConstEvalError { - NotSupported(&'static str), - SemanticError(&'static str), - Loop, - IncompleteExpr, - Panic(String), + MirLowerError(MirLowerError), + MirEvalError(MirEvalError), } -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum ComputedExpr { - Literal(Literal), - Enum(String, EnumVariantId, Literal), - Tuple(Box<[ComputedExpr]>), -} - -impl Display for ComputedExpr { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - ComputedExpr::Literal(l) => match l { - Literal::Int(x, _) => { - if *x >= 10 { - write!(f, "{x} ({x:#X})") - } else { - x.fmt(f) - } - } - Literal::Uint(x, _) => { - if *x >= 10 { - write!(f, "{x} ({x:#X})") - } else { - x.fmt(f) - } - } - Literal::Float(x, _) => x.fmt(f), - Literal::Bool(x) => x.fmt(f), - Literal::Char(x) => std::fmt::Debug::fmt(x, f), - Literal::String(x) => std::fmt::Debug::fmt(x, f), - Literal::ByteString(x) => std::fmt::Debug::fmt(x, f), - }, - ComputedExpr::Enum(name, _, _) => name.fmt(f), - ComputedExpr::Tuple(t) => { - f.write_char('(')?; - for x in &**t { - x.fmt(f)?; - f.write_str(", ")?; - } - f.write_char(')') - } +impl From<MirLowerError> for ConstEvalError { + fn from(value: MirLowerError) -> Self { + match value { + MirLowerError::ConstEvalError(e) => *e, + _ => ConstEvalError::MirLowerError(value), } } } -fn scalar_max(scalar: &Scalar) -> i128 { - match scalar { - Scalar::Bool => 1, - Scalar::Char => u32::MAX as i128, - Scalar::Int(x) => match x { - IntTy::Isize => isize::MAX as i128, - IntTy::I8 => i8::MAX as i128, - IntTy::I16 => i16::MAX as i128, - IntTy::I32 => i32::MAX as i128, - IntTy::I64 => i64::MAX as i128, - IntTy::I128 => i128::MAX, - }, - Scalar::Uint(x) => match x { - chalk_ir::UintTy::Usize => usize::MAX as i128, - chalk_ir::UintTy::U8 => u8::MAX as i128, - chalk_ir::UintTy::U16 => u16::MAX as i128, - chalk_ir::UintTy::U32 => u32::MAX as i128, - chalk_ir::UintTy::U64 => u64::MAX as i128, - chalk_ir::UintTy::U128 => i128::MAX, // ignore too big u128 for now - }, - Scalar::Float(_) => 0, - } -} - -fn is_valid(scalar: &Scalar, value: i128) -> bool { - if value < 0 { - !matches!(scalar, Scalar::Uint(_)) && -scalar_max(scalar) - 1 <= value - } else { - value <= scalar_max(scalar) - } -} - -fn get_name(ctx: &mut ConstEvalCtx<'_>, variant: EnumVariantId) -> String { - let loc = variant.parent.lookup(ctx.db.upcast()); - let children = variant.parent.child_source(ctx.db.upcast()); - let item_tree = loc.id.item_tree(ctx.db.upcast()); - - let variant_name = children.value[variant.local_id].name(); - let enum_name = item_tree[loc.id.value].name.to_string(); - enum_name + "::" + &variant_name.unwrap().to_string() -} - -pub fn eval_const( - expr_id: ExprId, - ctx: &mut ConstEvalCtx<'_>, -) -> Result<ComputedExpr, ConstEvalError> { - let u128_to_i128 = |it: u128| -> Result<i128, ConstEvalError> { - it.try_into().map_err(|_| ConstEvalError::NotSupported("u128 is too big")) - }; - - let expr = &ctx.exprs[expr_id]; - match expr { - Expr::Missing => match ctx.owner { - // evaluate the implicit variant index of an enum variant without expression - // FIXME: This should return the type of the enum representation - DefWithBodyId::VariantId(variant) => { - let prev_idx: u32 = variant.local_id.into_raw().into(); - let prev_idx = prev_idx.checked_sub(1).map(RawIdx::from).map(Idx::from_raw); - let value = match prev_idx { - Some(local_id) => { - let prev_variant = EnumVariantId { local_id, parent: variant.parent }; - 1 + match ctx.db.const_eval_variant(prev_variant)? { - ComputedExpr::Literal(Literal::Int(v, _)) => v, - ComputedExpr::Literal(Literal::Uint(v, _)) => u128_to_i128(v)?, - _ => { - return Err(ConstEvalError::NotSupported( - "Enum can't contain this kind of value", - )) - } - } - } - _ => 0, - }; - Ok(ComputedExpr::Literal(Literal::Int(value, Some(BuiltinInt::I128)))) - } - _ => Err(ConstEvalError::IncompleteExpr), - }, - Expr::Literal(l) => Ok(ComputedExpr::Literal(l.clone())), - &Expr::UnaryOp { expr, op } => { - let ty = &ctx.expr_ty(expr); - let ev = eval_const(expr, ctx)?; - match op { - hir_def::expr::UnaryOp::Deref => Err(ConstEvalError::NotSupported("deref")), - hir_def::expr::UnaryOp::Not => { - let v = match ev { - ComputedExpr::Literal(Literal::Bool(b)) => { - return Ok(ComputedExpr::Literal(Literal::Bool(!b))) - } - ComputedExpr::Literal(Literal::Int(v, _)) => v, - ComputedExpr::Literal(Literal::Uint(v, _)) => u128_to_i128(v)?, - _ => return Err(ConstEvalError::NotSupported("this kind of operator")), - }; - let r = match ty.kind(Interner) { - TyKind::Scalar(Scalar::Uint(x)) => match x { - chalk_ir::UintTy::U8 => !(v as u8) as i128, - chalk_ir::UintTy::U16 => !(v as u16) as i128, - chalk_ir::UintTy::U32 => !(v as u32) as i128, - chalk_ir::UintTy::U64 => !(v as u64) as i128, - chalk_ir::UintTy::U128 => { - return Err(ConstEvalError::NotSupported("negation of u128")) - } - chalk_ir::UintTy::Usize => !(v as usize) as i128, - }, - TyKind::Scalar(Scalar::Int(x)) => match x { - chalk_ir::IntTy::I8 => !(v as i8) as i128, - chalk_ir::IntTy::I16 => !(v as i16) as i128, - chalk_ir::IntTy::I32 => !(v as i32) as i128, - chalk_ir::IntTy::I64 => !(v as i64) as i128, - chalk_ir::IntTy::I128 => !v, - chalk_ir::IntTy::Isize => !(v as isize) as i128, - }, - _ => return Err(ConstEvalError::NotSupported("unreachable?")), - }; - Ok(ComputedExpr::Literal(Literal::Int(r, None))) - } - hir_def::expr::UnaryOp::Neg => { - let v = match ev { - ComputedExpr::Literal(Literal::Int(v, _)) => v, - ComputedExpr::Literal(Literal::Uint(v, _)) => u128_to_i128(v)?, - _ => return Err(ConstEvalError::NotSupported("this kind of operator")), - }; - Ok(ComputedExpr::Literal(Literal::Int( - v.checked_neg().ok_or_else(|| { - ConstEvalError::Panic("overflow in negation".to_string()) - })?, - None, - ))) - } - } - } - &Expr::BinaryOp { lhs, rhs, op } => { - let ty = &ctx.expr_ty(lhs); - let lhs = eval_const(lhs, ctx)?; - let rhs = eval_const(rhs, ctx)?; - let op = op.ok_or(ConstEvalError::IncompleteExpr)?; - let v1 = match lhs { - ComputedExpr::Literal(Literal::Int(v, _)) => v, - ComputedExpr::Literal(Literal::Uint(v, _)) => u128_to_i128(v)?, - _ => return Err(ConstEvalError::NotSupported("this kind of operator")), - }; - let v2 = match rhs { - ComputedExpr::Literal(Literal::Int(v, _)) => v, - ComputedExpr::Literal(Literal::Uint(v, _)) => u128_to_i128(v)?, - _ => return Err(ConstEvalError::NotSupported("this kind of operator")), - }; - match op { - BinaryOp::ArithOp(b) => { - let panic_arith = ConstEvalError::Panic( - "attempt to run invalid arithmetic operation".to_string(), - ); - let r = match b { - ArithOp::Add => v1.checked_add(v2).ok_or_else(|| panic_arith.clone())?, - ArithOp::Mul => v1.checked_mul(v2).ok_or_else(|| panic_arith.clone())?, - ArithOp::Sub => v1.checked_sub(v2).ok_or_else(|| panic_arith.clone())?, - ArithOp::Div => v1.checked_div(v2).ok_or_else(|| panic_arith.clone())?, - ArithOp::Rem => v1.checked_rem(v2).ok_or_else(|| panic_arith.clone())?, - ArithOp::Shl => v1 - .checked_shl(v2.try_into().map_err(|_| panic_arith.clone())?) - .ok_or_else(|| panic_arith.clone())?, - ArithOp::Shr => v1 - .checked_shr(v2.try_into().map_err(|_| panic_arith.clone())?) - .ok_or_else(|| panic_arith.clone())?, - ArithOp::BitXor => v1 ^ v2, - ArithOp::BitOr => v1 | v2, - ArithOp::BitAnd => v1 & v2, - }; - if let TyKind::Scalar(s) = ty.kind(Interner) { - if !is_valid(s, r) { - return Err(panic_arith); - } - } - Ok(ComputedExpr::Literal(Literal::Int(r, None))) - } - BinaryOp::LogicOp(_) => Err(ConstEvalError::SemanticError("logic op on numbers")), - _ => Err(ConstEvalError::NotSupported("bin op on this operators")), - } - } - Expr::Block { statements, tail, .. } => { - let mut prev_values = HashMap::<PatId, Option<ComputedExpr>>::default(); - for statement in &**statements { - match *statement { - hir_def::expr::Statement::Let { pat: pat_id, initializer, .. } => { - let pat = &ctx.pats[pat_id]; - match pat { - Pat::Bind { subpat, .. } if subpat.is_none() => (), - _ => { - return Err(ConstEvalError::NotSupported("complex patterns in let")) - } - }; - let value = match initializer { - Some(x) => eval_const(x, ctx)?, - None => continue, - }; - if !prev_values.contains_key(&pat_id) { - let prev = ctx.local_data.insert(pat_id, value); - prev_values.insert(pat_id, prev); - } else { - ctx.local_data.insert(pat_id, value); - } - } - hir_def::expr::Statement::Expr { .. } => { - return Err(ConstEvalError::NotSupported("this kind of statement")) - } - } - } - let r = match tail { - &Some(x) => eval_const(x, ctx), - None => Ok(ComputedExpr::Tuple(Box::new([]))), - }; - // clean up local data, so caller will receive the exact map that passed to us - for (name, val) in prev_values { - match val { - Some(x) => ctx.local_data.insert(name, x), - None => ctx.local_data.remove(&name), - }; - } - r - } - Expr::Path(p) => { - let resolver = resolver_for_expr(ctx.db.upcast(), ctx.owner, expr_id); - let pr = resolver - .resolve_path_in_value_ns(ctx.db.upcast(), p.mod_path()) - .ok_or(ConstEvalError::SemanticError("unresolved path"))?; - let pr = match pr { - ResolveValueResult::ValueNs(v) => v, - ResolveValueResult::Partial(..) => { - return match ctx - .infer - .assoc_resolutions_for_expr(expr_id) - .ok_or(ConstEvalError::SemanticError("unresolved assoc item"))? - .0 - { - hir_def::AssocItemId::FunctionId(_) => { - Err(ConstEvalError::NotSupported("assoc function")) - } - // FIXME use actual impl for trait assoc const - hir_def::AssocItemId::ConstId(c) => ctx.db.const_eval(c), - hir_def::AssocItemId::TypeAliasId(_) => { - Err(ConstEvalError::NotSupported("assoc type alias")) - } - }; - } - }; - match pr { - ValueNs::LocalBinding(pat_id) => { - let r = ctx - .local_data - .get(&pat_id) - .ok_or(ConstEvalError::NotSupported("Unexpected missing local"))?; - Ok(r.clone()) - } - ValueNs::ConstId(id) => ctx.db.const_eval(id), - ValueNs::GenericParam(_) => { - Err(ConstEvalError::NotSupported("const generic without substitution")) - } - ValueNs::EnumVariantId(id) => match ctx.db.const_eval_variant(id)? { - ComputedExpr::Literal(lit) => { - Ok(ComputedExpr::Enum(get_name(ctx, id), id, lit)) - } - _ => Err(ConstEvalError::NotSupported( - "Enums can't evalute to anything but numbers", - )), - }, - _ => Err(ConstEvalError::NotSupported("path that are not const or local")), - } - } - // FIXME: Handle the cast target - &Expr::Cast { expr, .. } => match eval_const(expr, ctx)? { - ComputedExpr::Enum(_, _, lit) => Ok(ComputedExpr::Literal(lit)), - _ => Err(ConstEvalError::NotSupported("Can't cast these types")), - }, - _ => Err(ConstEvalError::NotSupported("This kind of expression")), +impl From<MirEvalError> for ConstEvalError { + fn from(value: MirEvalError) -> Self { + ConstEvalError::MirEvalError(value) } } @@ -449,68 +122,102 @@ pub fn intern_const_scalar(value: ConstScalar, ty: Ty) -> Const { .intern(Interner) } +/// Interns a constant scalar with the given type +pub fn intern_const_ref(db: &dyn HirDatabase, value: &ConstRef, ty: Ty, krate: CrateId) -> Const { + let bytes = match value { + ConstRef::Int(i) => { + // FIXME: We should handle failure of layout better. + let size = layout_of_ty(db, &ty, krate).map(|x| x.size.bytes_usize()).unwrap_or(16); + ConstScalar::Bytes(i.to_le_bytes()[0..size].to_vec(), MemoryMap::default()) + } + ConstRef::UInt(i) => { + let size = layout_of_ty(db, &ty, krate).map(|x| x.size.bytes_usize()).unwrap_or(16); + ConstScalar::Bytes(i.to_le_bytes()[0..size].to_vec(), MemoryMap::default()) + } + ConstRef::Bool(b) => ConstScalar::Bytes(vec![*b as u8], MemoryMap::default()), + ConstRef::Char(c) => { + ConstScalar::Bytes((*c as u32).to_le_bytes().to_vec(), MemoryMap::default()) + } + ConstRef::Unknown => ConstScalar::Unknown, + }; + intern_const_scalar(bytes, ty) +} + /// Interns a possibly-unknown target usize -pub fn usize_const(value: Option<u128>) -> Const { - intern_const_scalar(value.map_or(ConstScalar::Unknown, ConstScalar::UInt), TyBuilder::usize()) +pub fn usize_const(db: &dyn HirDatabase, value: Option<u128>, krate: CrateId) -> Const { + intern_const_ref( + db, + &value.map_or(ConstRef::Unknown, ConstRef::UInt), + TyBuilder::usize(), + krate, + ) +} + +pub fn try_const_usize(c: &Const) -> Option<u128> { + match &c.data(Interner).value { + chalk_ir::ConstValue::BoundVar(_) => None, + chalk_ir::ConstValue::InferenceVar(_) => None, + chalk_ir::ConstValue::Placeholder(_) => None, + chalk_ir::ConstValue::Concrete(c) => match &c.interned { + ConstScalar::Bytes(x, _) => Some(u128::from_le_bytes(pad16(&x, false))), + _ => None, + }, + } } pub(crate) fn const_eval_recover( _: &dyn HirDatabase, _: &[String], _: &ConstId, -) -> Result<ComputedExpr, ConstEvalError> { - Err(ConstEvalError::Loop) +) -> Result<Const, ConstEvalError> { + Err(ConstEvalError::MirLowerError(MirLowerError::Loop)) } -pub(crate) fn const_eval_variant_recover( +pub(crate) fn const_eval_discriminant_recover( _: &dyn HirDatabase, _: &[String], _: &EnumVariantId, -) -> Result<ComputedExpr, ConstEvalError> { - Err(ConstEvalError::Loop) +) -> Result<i128, ConstEvalError> { + Err(ConstEvalError::MirLowerError(MirLowerError::Loop)) } -pub(crate) fn const_eval_variant_query( +pub(crate) fn const_eval_query( db: &dyn HirDatabase, const_id: ConstId, -) -> Result<ComputedExpr, ConstEvalError> { +) -> Result<Const, ConstEvalError> { let def = const_id.into(); - let body = db.body(def); - let infer = &db.infer(def); - let result = eval_const( - body.body_expr, - &mut ConstEvalCtx { - db, - owner: const_id.into(), - exprs: &body.exprs, - pats: &body.pats, - local_data: HashMap::default(), - infer, - }, - ); - result + let body = db.mir_body(def)?; + let c = interpret_mir(db, &body, false)?; + Ok(c) } -pub(crate) fn const_eval_query_variant( +pub(crate) fn const_eval_discriminant_variant( db: &dyn HirDatabase, variant_id: EnumVariantId, -) -> Result<ComputedExpr, ConstEvalError> { +) -> Result<i128, ConstEvalError> { let def = variant_id.into(); let body = db.body(def); - let infer = &db.infer(def); - eval_const( - body.body_expr, - &mut ConstEvalCtx { - db, - owner: def, - exprs: &body.exprs, - pats: &body.pats, - local_data: HashMap::default(), - infer, - }, - ) + if body.exprs[body.body_expr] == Expr::Missing { + let prev_idx: u32 = variant_id.local_id.into_raw().into(); + let prev_idx = prev_idx.checked_sub(1).map(RawIdx::from).map(Idx::from_raw); + let value = match prev_idx { + Some(local_id) => { + let prev_variant = EnumVariantId { local_id, parent: variant_id.parent }; + 1 + db.const_eval_discriminant(prev_variant)? + } + _ => 0, + }; + return Ok(value); + } + let mir_body = db.mir_body(def)?; + let c = interpret_mir(db, &mir_body, false)?; + let c = try_const_usize(&c).unwrap() as i128; + Ok(c) } +// FIXME: Ideally constants in const eval should have separate body (issue #7434), and this function should +// get an `InferenceResult` instead of an `InferenceContext`. And we should remove `ctx.clone().resolve_all()` here +// and make this function private. See the fixme comment on `InferenceContext::resolve_all`. pub(crate) fn eval_to_const( expr: Idx<Expr>, mode: ParamLoweringMode, @@ -518,28 +225,20 @@ pub(crate) fn eval_to_const( args: impl FnOnce() -> Generics, debruijn: DebruijnIndex, ) -> Const { + let db = ctx.db; if let Expr::Path(p) = &ctx.body.exprs[expr] { - let db = ctx.db; let resolver = &ctx.resolver; if let Some(c) = path_to_const(db, resolver, p.mod_path(), mode, args, debruijn) { return c; } } - let body = ctx.body.clone(); - let mut ctx = ConstEvalCtx { - db: ctx.db, - owner: ctx.owner, - exprs: &body.exprs, - pats: &body.pats, - local_data: HashMap::default(), - infer: &ctx.result, - }; - let computed_expr = eval_const(expr, &mut ctx); - let const_scalar = match computed_expr { - Ok(ComputedExpr::Literal(literal)) => literal.into(), - _ => ConstScalar::Unknown, - }; - intern_const_scalar(const_scalar, TyBuilder::usize()) + let infer = ctx.clone().resolve_all(); + if let Ok(mir_body) = lower_to_mir(ctx.db, ctx.owner, &ctx.body, &infer, expr) { + if let Ok(result) = interpret_mir(db, &mir_body, true) { + return result; + } + } + unknown_const(infer[expr].clone()) } #[cfg(test)] diff --git a/crates/hir-ty/src/consteval/tests.rs b/crates/hir-ty/src/consteval/tests.rs index 3c930c077b3..19145b2d98e 100644 --- a/crates/hir-ty/src/consteval/tests.rs +++ b/crates/hir-ty/src/consteval/tests.rs @@ -1,24 +1,44 @@ use base_db::fixture::WithFixture; -use hir_def::{db::DefDatabase, expr::Literal}; +use hir_def::db::DefDatabase; -use crate::{consteval::ComputedExpr, db::HirDatabase, test_db::TestDB}; +use crate::{ + consteval::try_const_usize, db::HirDatabase, test_db::TestDB, Const, ConstScalar, Interner, +}; -use super::ConstEvalError; +use super::{ + super::mir::{MirEvalError, MirLowerError}, + ConstEvalError, +}; -fn check_fail(ra_fixture: &str, error: ConstEvalError) { - assert_eq!(eval_goal(ra_fixture), Err(error)); -} - -fn check_number(ra_fixture: &str, answer: i128) { - let r = eval_goal(ra_fixture).unwrap(); - match r { - ComputedExpr::Literal(Literal::Int(r, _)) => assert_eq!(r, answer), - ComputedExpr::Literal(Literal::Uint(r, _)) => assert_eq!(r, answer as u128), - x => panic!("Expected number but found {x:?}"), +fn simplify(e: ConstEvalError) -> ConstEvalError { + match e { + ConstEvalError::MirEvalError(MirEvalError::InFunction(_, e)) => { + simplify(ConstEvalError::MirEvalError(*e)) + } + _ => e, } } -fn eval_goal(ra_fixture: &str) -> Result<ComputedExpr, ConstEvalError> { +#[track_caller] +fn check_fail(ra_fixture: &str, error: ConstEvalError) { + assert_eq!(eval_goal(ra_fixture).map_err(simplify), Err(error)); +} + +#[track_caller] +fn check_number(ra_fixture: &str, answer: i128) { + let r = eval_goal(ra_fixture).unwrap(); + match &r.data(Interner).value { + chalk_ir::ConstValue::Concrete(c) => match &c.interned { + ConstScalar::Bytes(b, _) => { + assert_eq!(b, &answer.to_le_bytes()[0..b.len()]); + } + x => panic!("Expected number but found {:?}", x), + }, + _ => panic!("result of const eval wasn't a concrete const"), + } +} + +fn eval_goal(ra_fixture: &str) -> Result<Const, ConstEvalError> { let (db, file_id) = TestDB::with_single_file(ra_fixture); let module_id = db.module_for_file(file_id); let def_map = module_id.def_map(&db); @@ -42,21 +62,18 @@ fn eval_goal(ra_fixture: &str) -> Result<ComputedExpr, ConstEvalError> { #[test] fn add() { check_number(r#"const GOAL: usize = 2 + 2;"#, 4); + check_number(r#"const GOAL: i32 = -2 + --5;"#, 3); + check_number(r#"const GOAL: i32 = 7 - 5;"#, 2); + check_number(r#"const GOAL: i32 = 7 + (1 - 5);"#, 3); } #[test] fn bit_op() { check_number(r#"const GOAL: u8 = !0 & !(!0 >> 1)"#, 128); check_number(r#"const GOAL: i8 = !0 & !(!0 >> 1)"#, 0); - // FIXME: rustc evaluate this to -128 - check_fail( - r#"const GOAL: i8 = 1 << 7"#, - ConstEvalError::Panic("attempt to run invalid arithmetic operation".to_string()), - ); - check_fail( - r#"const GOAL: i8 = 1 << 8"#, - ConstEvalError::Panic("attempt to run invalid arithmetic operation".to_string()), - ); + check_number(r#"const GOAL: i8 = 1 << 7"#, (1i8 << 7) as i128); + // FIXME: report panic here + check_number(r#"const GOAL: i8 = 1 << 8"#, 0); } #[test] @@ -73,6 +90,562 @@ fn locals() { ); } +#[test] +fn references() { + check_number( + r#" + const GOAL: usize = { + let x = 3; + let y = &mut x; + *y = 5; + x + }; + "#, + 5, + ); +} + +#[test] +fn reference_autoderef() { + check_number( + r#" + const GOAL: usize = { + let x = 3; + let y = &mut x; + let y: &mut usize = &mut y; + *y = 5; + x + }; + "#, + 5, + ); + check_number( + r#" + const GOAL: usize = { + let x = 3; + let y = &&&&&&&x; + let z: &usize = &y; + *z + }; + "#, + 3, + ); +} + +#[test] +fn function_call() { + check_number( + r#" + const fn f(x: usize) -> usize { + 2 * x + 5 + } + const GOAL: usize = f(3); + "#, + 11, + ); + check_number( + r#" + const fn add(x: usize, y: usize) -> usize { + x + y + } + const GOAL: usize = add(add(1, 2), add(3, add(4, 5))); + "#, + 15, + ); +} + +#[test] +fn intrinsics() { + check_number( + r#" + extern "rust-intrinsic" { + pub fn size_of<T>() -> usize; + } + + const GOAL: usize = size_of::<i32>(); + "#, + 4, + ); +} + +#[test] +fn trait_basic() { + check_number( + r#" + trait Foo { + fn f(&self) -> u8; + } + + impl Foo for u8 { + fn f(&self) -> u8 { + *self + 33 + } + } + + const GOAL: u8 = { + let x = 3; + Foo::f(&x) + }; + "#, + 36, + ); +} + +#[test] +fn trait_method() { + check_number( + r#" + trait Foo { + fn f(&self) -> u8; + } + + impl Foo for u8 { + fn f(&self) -> u8 { + *self + 33 + } + } + + const GOAL: u8 = { + let x = 3; + x.f() + }; + "#, + 36, + ); +} + +#[test] +fn generic_fn() { + check_number( + r#" + trait Foo { + fn f(&self) -> u8; + } + + impl Foo for () { + fn f(&self) -> u8 { + 0 + } + } + + struct Succ<S>(S); + + impl<T: Foo> Foo for Succ<T> { + fn f(&self) -> u8 { + self.0.f() + 1 + } + } + + const GOAL: u8 = Succ(Succ(())).f(); + "#, + 2, + ); + check_number( + r#" + trait Foo { + fn f(&self) -> u8; + } + + impl Foo for u8 { + fn f(&self) -> u8 { + *self + 33 + } + } + + fn foof<T: Foo>(x: T, y: T) -> u8 { + x.f() + y.f() + } + + const GOAL: u8 = foof(2, 5); + "#, + 73, + ); + check_number( + r#" + fn bar<A, B>(a: A, b: B) -> B { + b + } + const GOAL: u8 = bar("hello", 12); + "#, + 12, + ); + check_number( + r#" + //- minicore: coerce_unsized, index, slice + fn bar<A, B>(a: A, b: B) -> B { + b + } + fn foo<T>(x: [T; 2]) -> T { + bar(x[0], x[1]) + } + + const GOAL: u8 = foo([2, 5]); + "#, + 5, + ); +} + +#[test] +fn impl_trait() { + check_number( + r#" + trait Foo { + fn f(&self) -> u8; + } + + impl Foo for u8 { + fn f(&self) -> u8 { + *self + 33 + } + } + + fn foof(x: impl Foo, y: impl Foo) -> impl Foo { + x.f() + y.f() + } + + const GOAL: u8 = foof(2, 5).f(); + "#, + 106, + ); + check_number( + r#" + struct Foo<T>(T, T, (T, T)); + trait S { + fn sum(&self) -> i64; + } + impl S for i64 { + fn sum(&self) -> i64 { + *self + } + } + impl<T: S> S for Foo<T> { + fn sum(&self) -> i64 { + self.0.sum() + self.1.sum() + self.2 .0.sum() + self.2 .1.sum() + } + } + + fn foo() -> Foo<impl S> { + Foo( + Foo(1i64, 2, (3, 4)), + Foo(5, 6, (7, 8)), + ( + Foo(9, 10, (11, 12)), + Foo(13, 14, (15, 16)), + ), + ) + } + const GOAL: i64 = foo().sum(); + "#, + 136, + ); +} + +#[test] +fn ifs() { + check_number( + r#" + const fn f(b: bool) -> u8 { + if b { 1 } else { 10 } + } + + const GOAL: u8 = f(true) + f(true) + f(false); + "#, + 12, + ); + check_number( + r#" + const fn max(a: i32, b: i32) -> i32 { + if a < b { b } else { a } + } + + const GOAL: u8 = max(max(1, max(10, 3)), 0-122); + "#, + 10, + ); + + check_number( + r#" + const fn max(a: &i32, b: &i32) -> &i32 { + if a < b { b } else { a } + } + + const GOAL: i32 = *max(max(&1, max(&10, &3)), &5); + "#, + 10, + ); +} + +#[test] +fn loops() { + check_number( + r#" + const GOAL: u8 = { + let mut x = 0; + loop { + x = x + 1; + while true { + break; + } + x = x + 1; + if x == 2 { + continue; + } + break; + } + x + }; + "#, + 4, + ); +} + +#[test] +fn recursion() { + check_number( + r#" + const fn fact(k: i32) -> i32 { + if k > 0 { fact(k - 1) * k } else { 1 } + } + + const GOAL: i32 = fact(5); + "#, + 120, + ); +} + +#[test] +fn structs() { + check_number( + r#" + struct Point { + x: i32, + y: i32, + } + + const GOAL: i32 = { + let p = Point { x: 5, y: 2 }; + let y = 1; + let x = 3; + let q = Point { y, x }; + p.x + p.y + p.x + q.y + q.y + q.x + }; + "#, + 17, + ); +} + +#[test] +fn unions() { + check_number( + r#" + union U { + f1: i64, + f2: (i32, i32), + } + + const GOAL: i32 = { + let p = U { f1: 0x0123ABCD0123DCBA }; + let p = unsafe { p.f2 }; + p.0 + p.1 + p.1 + }; + "#, + 0x0123ABCD * 2 + 0x0123DCBA, + ); +} + +#[test] +fn tuples() { + check_number( + r#" + const GOAL: u8 = { + let a = (10, 20, 3, 15); + a.1 + }; + "#, + 20, + ); + check_number( + r#" + struct TupleLike(i32, u8, i64, u16); + const GOAL: u8 = { + let a = TupleLike(10, 20, 3, 15); + a.1 + }; + "#, + 20, + ); + check_number( + r#" + const GOAL: u8 = { + match (&(2 + 2), &4) { + (left_val, right_val) => { + if !(*left_val == *right_val) { + 2 + } else { + 5 + } + } + } + }; + "#, + 5, + ); +} + +#[test] +fn pattern_matching_ergonomics() { + check_number( + r#" + const fn f(x: &(u8, u8)) -> u8 { + match x { + (a, b) => *a + *b + } + } + const GOAL: u8 = f(&(2, 3)); + "#, + 5, + ); +} + +#[test] +fn let_else() { + check_number( + r#" + const fn f(x: &(u8, u8)) -> u8 { + let (a, b) = x; + *a + *b + } + const GOAL: u8 = f(&(2, 3)); + "#, + 5, + ); + check_number( + r#" + enum SingleVariant { + Var(u8, u8), + } + const fn f(x: &&&&&SingleVariant) -> u8 { + let SingleVariant::Var(a, b) = x; + *a + *b + } + const GOAL: u8 = f(&&&&&SingleVariant::Var(2, 3)); + "#, + 5, + ); + check_number( + r#" + //- minicore: option + const fn f(x: Option<i32>) -> i32 { + let Some(x) = x else { return 10 }; + 2 * x + } + const GOAL: u8 = f(Some(1000)) + f(None); + "#, + 2010, + ); +} + +#[test] +fn options() { + check_number( + r#" + //- minicore: option + const GOAL: u8 = { + let x = Some(2); + match x { + Some(y) => 2 * y, + _ => 10, + } + }; + "#, + 4, + ); + check_number( + r#" + //- minicore: option + fn f(x: Option<Option<i32>>) -> i32 { + if let Some(y) = x && let Some(z) = y { + z + } else if let Some(y) = x { + 1 + } else { + 0 + } + } + const GOAL: u8 = f(Some(Some(10))) + f(Some(None)) + f(None); + "#, + 11, + ); + check_number( + r#" + //- minicore: option + const GOAL: u8 = { + let x = None; + match x { + Some(y) => 2 * y, + _ => 10, + } + }; + "#, + 10, + ); + check_number( + r#" + //- minicore: option + const GOAL: Option<&u8> = None; + "#, + 0, + ); +} + +#[test] +fn array_and_index() { + check_number( + r#" + //- minicore: coerce_unsized, index, slice + const GOAL: u8 = { + let a = [10, 20, 3, 15]; + let x: &[u8] = &a; + x[1] + }; + "#, + 20, + ); + check_number( + r#" + //- minicore: coerce_unsized, index, slice + const GOAL: usize = [1, 2, 3][2];"#, + 3, + ); + check_number( + r#" + //- minicore: coerce_unsized, index, slice + const GOAL: usize = { let a = [1, 2, 3]; let x: &[i32] = &a; x.len() };"#, + 3, + ); + check_number( + r#" + //- minicore: coerce_unsized, index, slice + const GOAL: usize = [1, 2, 3, 4, 5].len();"#, + 5, + ); +} + +#[test] +fn byte_string() { + check_number( + r#" + //- minicore: coerce_unsized, index, slice + const GOAL: u8 = { + let a = b"hello"; + let x: &[u8] = a; + x[0] + }; + "#, + 104, + ); +} + #[test] fn consts() { check_number( @@ -115,18 +688,12 @@ fn enums() { ); let r = eval_goal( r#" - enum E { A = 1, } + enum E { A = 1, B } const GOAL: E = E::A; "#, ) .unwrap(); - match r { - ComputedExpr::Enum(name, _, Literal::Uint(val, _)) => { - assert_eq!(name, "E::A"); - assert_eq!(val, 1); - } - x => panic!("Expected enum but found {x:?}"), - } + assert_eq!(try_const_usize(&r), Some(1)); } #[test] @@ -138,7 +705,19 @@ fn const_loop() { const F2: i32 = 2 * F1; const GOAL: i32 = F3; "#, - ConstEvalError::Loop, + ConstEvalError::MirLowerError(MirLowerError::Loop), + ); +} + +#[test] +fn const_transfer_memory() { + check_number( + r#" + const A1: &i32 = &2; + const A2: &i32 = &5; + const GOAL: i32 = *A1 + *A2; + "#, + 7, ); } @@ -157,7 +736,20 @@ fn const_impl_assoc() { } #[test] -fn const_generic_subst() { +fn const_generic_subst_fn() { + check_number( + r#" + const fn f<const A: usize>(x: usize) -> usize { + A * x + 5 + } + const GOAL: usize = f::<2>(3); + "#, + 11, + ); +} + +#[test] +fn const_generic_subst_assoc_const_impl() { // FIXME: this should evaluate to 5 check_fail( r#" @@ -167,7 +759,7 @@ fn const_generic_subst() { } const GOAL: usize = Adder::<2, 3>::VAL; "#, - ConstEvalError::NotSupported("const generic without substitution"), + ConstEvalError::MirEvalError(MirEvalError::TypeError("missing generic arg")), ); } @@ -185,6 +777,44 @@ fn const_trait_assoc() { } const GOAL: usize = U0::VAL; "#, - ConstEvalError::IncompleteExpr, + ConstEvalError::MirLowerError(MirLowerError::IncompleteExpr), + ); +} + +#[test] +fn exec_limits() { + check_fail( + r#" + const GOAL: usize = loop {}; + "#, + ConstEvalError::MirEvalError(MirEvalError::ExecutionLimitExceeded), + ); + check_fail( + r#" + const fn f(x: i32) -> i32 { + f(x + 1) + } + const GOAL: i32 = f(0); + "#, + ConstEvalError::MirEvalError(MirEvalError::StackOverflow), + ); + // Reasonable code should still work + check_number( + r#" + const fn nth_odd(n: i32) -> i32 { + 2 * n - 1 + } + const fn f(n: i32) -> i32 { + let sum = 0; + let i = 0; + while i < n { + i = i + 1; + sum = sum + nth_odd(i); + } + sum + } + const GOAL: usize = f(10000); + "#, + 10000 * 10000, ); } diff --git a/crates/hir-ty/src/db.rs b/crates/hir-ty/src/db.rs index d45e2a943ad..60e51b65f6b 100644 --- a/crates/hir-ty/src/db.rs +++ b/crates/hir-ty/src/db.rs @@ -16,10 +16,12 @@ use smallvec::SmallVec; use crate::{ chalk_db, - consteval::{ComputedExpr, ConstEvalError}, + consteval::ConstEvalError, method_resolution::{InherentImpls, TraitImpls, TyFingerprint}, - Binders, CallableDefId, FnDefId, GenericArg, ImplTraitId, InferenceResult, Interner, PolyFnSig, - QuantifiedWhereClause, ReturnTypeImplTraits, Substitution, TraitRef, Ty, TyDefId, ValueTyDefId, + mir::{MirBody, MirLowerError}, + Binders, CallableDefId, Const, FnDefId, GenericArg, ImplTraitId, InferenceResult, Interner, + PolyFnSig, QuantifiedWhereClause, ReturnTypeImplTraits, Substitution, TraitRef, Ty, TyDefId, + ValueTyDefId, }; use hir_expand::name::Name; @@ -32,6 +34,10 @@ pub trait HirDatabase: DefDatabase + Upcast<dyn DefDatabase> { #[salsa::invoke(crate::infer::infer_query)] fn infer_query(&self, def: DefWithBodyId) -> Arc<InferenceResult>; + #[salsa::invoke(crate::mir::mir_body_query)] + #[salsa::cycle(crate::mir::mir_body_recover)] + fn mir_body(&self, def: DefWithBodyId) -> Result<Arc<MirBody>, MirLowerError>; + #[salsa::invoke(crate::lower::ty_query)] #[salsa::cycle(crate::lower::ty_recover)] fn ty(&self, def: TyDefId) -> Binders<Ty>; @@ -46,13 +52,13 @@ pub trait HirDatabase: DefDatabase + Upcast<dyn DefDatabase> { #[salsa::invoke(crate::lower::const_param_ty_query)] fn const_param_ty(&self, def: ConstParamId) -> Ty; - #[salsa::invoke(crate::consteval::const_eval_variant_query)] + #[salsa::invoke(crate::consteval::const_eval_query)] #[salsa::cycle(crate::consteval::const_eval_recover)] - fn const_eval(&self, def: ConstId) -> Result<ComputedExpr, ConstEvalError>; + fn const_eval(&self, def: ConstId) -> Result<Const, ConstEvalError>; - #[salsa::invoke(crate::consteval::const_eval_query_variant)] - #[salsa::cycle(crate::consteval::const_eval_variant_recover)] - fn const_eval_variant(&self, def: EnumVariantId) -> Result<ComputedExpr, ConstEvalError>; + #[salsa::invoke(crate::consteval::const_eval_discriminant_variant)] + #[salsa::cycle(crate::consteval::const_eval_discriminant_recover)] + fn const_eval_discriminant(&self, def: EnumVariantId) -> Result<i128, ConstEvalError>; #[salsa::invoke(crate::lower::impl_trait_query)] fn impl_trait(&self, def: ImplId) -> Option<Binders<TraitRef>>; diff --git a/crates/hir-ty/src/display.rs b/crates/hir-ty/src/display.rs index b22064d8c42..b6165f629e7 100644 --- a/crates/hir-ty/src/display.rs +++ b/crates/hir-ty/src/display.rs @@ -7,6 +7,7 @@ use std::fmt::{self, Debug}; use base_db::CrateId; use chalk_ir::BoundVar; use hir_def::{ + adt::VariantData, body, db::DefDatabase, find_path, @@ -14,9 +15,9 @@ use hir_def::{ item_scope::ItemInNs, lang_item::{LangItem, LangItemTarget}, path::{Path, PathKind}, - type_ref::{ConstScalar, TraitBoundModifier, TypeBound, TypeRef}, + type_ref::{TraitBoundModifier, TypeBound, TypeRef}, visibility::Visibility, - HasModule, ItemContainerId, Lookup, ModuleDefId, ModuleId, TraitId, + HasModule, ItemContainerId, LocalFieldId, Lookup, ModuleDefId, ModuleId, TraitId, }; use hir_expand::{hygiene::Hygiene, name::Name}; use intern::{Internable, Interned}; @@ -25,14 +26,17 @@ use smallvec::SmallVec; use crate::{ db::HirDatabase, - from_assoc_type_id, from_foreign_def_id, from_placeholder_idx, lt_from_placeholder_idx, + from_assoc_type_id, from_foreign_def_id, from_placeholder_idx, + layout::layout_of_ty, + lt_from_placeholder_idx, mapping::from_chalk, + mir::pad16, primitive, to_assoc_type_id, utils::{self, generics}, - AdtId, AliasEq, AliasTy, Binders, CallableDefId, CallableSig, Const, ConstValue, DomainGoal, - GenericArg, ImplTraitId, Interner, Lifetime, LifetimeData, LifetimeOutlives, Mutability, - OpaqueTy, ProjectionTy, ProjectionTyExt, QuantifiedWhereClause, Scalar, Substitution, TraitRef, - TraitRefExt, Ty, TyExt, TyKind, WhereClause, + AdtId, AliasEq, AliasTy, Binders, CallableDefId, CallableSig, Const, ConstScalar, ConstValue, + DomainGoal, GenericArg, ImplTraitId, Interner, Lifetime, LifetimeData, LifetimeOutlives, + MemoryMap, Mutability, OpaqueTy, ProjectionTy, ProjectionTyExt, QuantifiedWhereClause, Scalar, + Substitution, TraitRef, TraitRefExt, Ty, TyExt, TyKind, WhereClause, }; pub trait HirWrite: fmt::Write { @@ -362,20 +366,125 @@ impl HirDisplay for GenericArg { impl HirDisplay for Const { fn hir_fmt(&self, f: &mut HirFormatter<'_>) -> Result<(), HirDisplayError> { let data = self.interned(); - match data.value { + match &data.value { ConstValue::BoundVar(idx) => idx.hir_fmt(f), ConstValue::InferenceVar(..) => write!(f, "#c#"), ConstValue::Placeholder(idx) => { - let id = from_placeholder_idx(f.db, idx); + let id = from_placeholder_idx(f.db, *idx); let generics = generics(f.db.upcast(), id.parent); let param_data = &generics.params.type_or_consts[id.local_id]; write!(f, "{}", param_data.name().unwrap()) } - ConstValue::Concrete(c) => write!(f, "{}", c.interned), + ConstValue::Concrete(c) => match &c.interned { + ConstScalar::Bytes(b, m) => render_const_scalar(f, &b, m, &data.ty), + ConstScalar::Unknown => f.write_char('_'), + }, } } } +fn render_const_scalar( + f: &mut HirFormatter<'_>, + b: &[u8], + memory_map: &MemoryMap, + ty: &Ty, +) -> Result<(), HirDisplayError> { + match ty.kind(Interner) { + chalk_ir::TyKind::Scalar(s) => match s { + Scalar::Bool => write!(f, "{}", if b[0] == 0 { false } else { true }), + Scalar::Char => { + let x = u128::from_le_bytes(pad16(b, false)) as u32; + let Ok(c) = char::try_from(x) else { + return f.write_str("<unicode-error>"); + }; + write!(f, "{c:?}") + } + Scalar::Int(_) => { + let x = i128::from_le_bytes(pad16(b, true)); + write!(f, "{x}") + } + Scalar::Uint(_) => { + let x = u128::from_le_bytes(pad16(b, false)); + write!(f, "{x}") + } + Scalar::Float(fl) => match fl { + chalk_ir::FloatTy::F32 => { + let x = f32::from_le_bytes(b.try_into().unwrap()); + write!(f, "{x:?}") + } + chalk_ir::FloatTy::F64 => { + let x = f64::from_le_bytes(b.try_into().unwrap()); + write!(f, "{x:?}") + } + }, + }, + chalk_ir::TyKind::Ref(_, _, t) => match t.kind(Interner) { + chalk_ir::TyKind::Str => { + let addr = usize::from_le_bytes(b[0..b.len() / 2].try_into().unwrap()); + let bytes = memory_map.0.get(&addr).map(|x| &**x).unwrap_or(&[]); + let s = std::str::from_utf8(bytes).unwrap_or("<utf8-error>"); + write!(f, "{s:?}") + } + _ => f.write_str("<error>"), + }, + chalk_ir::TyKind::Adt(adt, subst) => match adt.0 { + hir_def::AdtId::StructId(s) => { + let data = f.db.struct_data(s); + let Ok(layout) = f.db.layout_of_adt(adt.0, subst.clone()) else { + return f.write_str("<layout-error>"); + }; + match data.variant_data.as_ref() { + VariantData::Record(fields) | VariantData::Tuple(fields) => { + let field_types = f.db.field_types(s.into()); + let krate = adt.0.module(f.db.upcast()).krate(); + let render_field = |f: &mut HirFormatter<'_>, id: LocalFieldId| { + let offset = layout + .fields + .offset(u32::from(id.into_raw()) as usize) + .bytes_usize(); + let ty = field_types[id].clone().substitute(Interner, subst); + let Ok(layout) = layout_of_ty(f.db, &ty, krate) else { + return f.write_str("<layout-error>"); + }; + let size = layout.size.bytes_usize(); + render_const_scalar(f, &b[offset..offset + size], memory_map, &ty) + }; + let mut it = fields.iter(); + if matches!(data.variant_data.as_ref(), VariantData::Record(_)) { + write!(f, "{} {{", data.name)?; + if let Some((id, data)) = it.next() { + write!(f, " {}: ", data.name)?; + render_field(f, id)?; + } + for (id, data) in it { + write!(f, ", {}: ", data.name)?; + render_field(f, id)?; + } + write!(f, " }}")?; + } else { + let mut it = it.map(|x| x.0); + write!(f, "{}(", data.name)?; + if let Some(id) = it.next() { + render_field(f, id)?; + } + for id in it { + write!(f, ", ")?; + render_field(f, id)?; + } + write!(f, ")")?; + } + return Ok(()); + } + VariantData::Unit => write!(f, "{}", data.name), + } + } + hir_def::AdtId::UnionId(u) => write!(f, "{}", f.db.union_data(u).name), + hir_def::AdtId::EnumId(_) => f.write_str("<enum-not-supported>"), + }, + _ => f.write_str("<error>"), + } +} + impl HirDisplay for BoundVar { fn hir_fmt(&self, f: &mut HirFormatter<'_>) -> Result<(), HirDisplayError> { write!(f, "?{}.{}", self.debruijn.depth(), self.index) @@ -614,8 +723,9 @@ impl HirDisplay for Ty { { return true; } - if let Some(ConstValue::Concrete(c)) = - parameter.constant(Interner).map(|x| x.data(Interner).value) + if let Some(ConstValue::Concrete(c)) = parameter + .constant(Interner) + .map(|x| &x.data(Interner).value) { if c.interned == ConstScalar::Unknown { return true; diff --git a/crates/hir-ty/src/infer.rs b/crates/hir-ty/src/infer.rs index 512ba63dfca..c77a5e07375 100644 --- a/crates/hir-ty/src/infer.rs +++ b/crates/hir-ty/src/infer.rs @@ -512,7 +512,11 @@ impl<'a> InferenceContext<'a> { } } - fn resolve_all(self) -> InferenceResult { + // FIXME: This function should be private in module. It is currently only used in the consteval, since we need + // `InferenceResult` in the middle of inference. See the fixme comment in `consteval::eval_to_const`. If you + // used this function for another workaround, mention it here. If you really need this function and believe that + // there is no problem in it being `pub(crate)`, remove this comment. + pub(crate) fn resolve_all(self) -> InferenceResult { let InferenceContext { mut table, mut result, .. } = self; table.fallback_if_possible(); @@ -681,11 +685,9 @@ impl<'a> InferenceContext<'a> { /// Replaces ConstScalar::Unknown by a new type var, so we can maybe still infer it. fn insert_const_vars_shallow(&mut self, c: Const) -> Const { let data = c.data(Interner); - match data.value { + match &data.value { ConstValue::Concrete(cc) => match cc.interned { - hir_def::type_ref::ConstScalar::Unknown => { - self.table.new_const_var(data.ty.clone()) - } + crate::ConstScalar::Unknown => self.table.new_const_var(data.ty.clone()), _ => c, }, _ => c, diff --git a/crates/hir-ty/src/infer/expr.rs b/crates/hir-ty/src/infer/expr.rs index 175fded8cca..e169cbef497 100644 --- a/crates/hir-ty/src/infer/expr.rs +++ b/crates/hir-ty/src/infer/expr.rs @@ -822,7 +822,11 @@ impl<'a> InferenceContext<'a> { let cur_elem_ty = self.infer_expr_inner(expr, &expected); coerce.coerce(self, Some(expr), &cur_elem_ty); } - consteval::usize_const(Some(elements.len() as u128)) + consteval::usize_const( + self.db, + Some(elements.len() as u128), + self.resolver.krate(), + ) } &Array::Repeat { initializer, repeat } => { self.infer_expr_coerce(initializer, &Expectation::has_type(elem_ty)); @@ -843,7 +847,7 @@ impl<'a> InferenceContext<'a> { DebruijnIndex::INNERMOST, ) } else { - consteval::usize_const(None) + consteval::usize_const(self.db, None, self.resolver.krate()) } } }; @@ -859,7 +863,11 @@ impl<'a> InferenceContext<'a> { Literal::ByteString(bs) => { let byte_type = TyKind::Scalar(Scalar::Uint(UintTy::U8)).intern(Interner); - let len = consteval::usize_const(Some(bs.len() as u128)); + let len = consteval::usize_const( + self.db, + Some(bs.len() as u128), + self.resolver.krate(), + ); let array_type = TyKind::Array(byte_type, len).intern(Interner); TyKind::Ref(Mutability::Not, static_lifetime(), array_type).intern(Interner) @@ -982,8 +990,11 @@ impl<'a> InferenceContext<'a> { // type and length). This should not be just an error type, // because we are to compute the unifiability of this type and // `rhs_ty` in the end of this function to issue type mismatches. - _ => TyKind::Array(self.err_ty(), crate::consteval::usize_const(None)) - .intern(Interner), + _ => TyKind::Array( + self.err_ty(), + crate::consteval::usize_const(self.db, None, self.resolver.krate()), + ) + .intern(Interner), } } Expr::RecordLit { path, fields, .. } => { diff --git a/crates/hir-ty/src/infer/pat.rs b/crates/hir-ty/src/infer/pat.rs index f154dac8e87..2c935733c06 100644 --- a/crates/hir-ty/src/infer/pat.rs +++ b/crates/hir-ty/src/infer/pat.rs @@ -6,17 +6,15 @@ use chalk_ir::Mutability; use hir_def::{ expr::{BindingAnnotation, Expr, Literal, Pat, PatId}, path::Path, - type_ref::ConstScalar, }; use hir_expand::name::Name; use crate::{ - consteval::intern_const_scalar, + consteval::{try_const_usize, usize_const}, infer::{BindingMode, Expectation, InferenceContext, TypeMismatch}, lower::lower_to_chalk_mutability, primitive::UintTy, - static_lifetime, ConcreteConst, ConstValue, Interner, Scalar, Substitution, Ty, TyBuilder, - TyExt, TyKind, + static_lifetime, Interner, Scalar, Substitution, Ty, TyBuilder, TyExt, TyKind, }; use super::PatLike; @@ -264,18 +262,13 @@ impl<'a> InferenceContext<'a> { if let &Some(slice_pat_id) = slice { let rest_pat_ty = match expected.kind(Interner) { TyKind::Array(_, length) => { - let len = match length.data(Interner).value { - ConstValue::Concrete(ConcreteConst { - interned: ConstScalar::UInt(len), - }) => len.checked_sub((prefix.len() + suffix.len()) as u128), - _ => None, - }; + let len = try_const_usize(length); + let len = len.and_then(|len| { + len.checked_sub((prefix.len() + suffix.len()) as u128) + }); TyKind::Array( elem_ty.clone(), - intern_const_scalar( - len.map_or(ConstScalar::Unknown, |len| ConstScalar::UInt(len)), - TyBuilder::usize(), - ), + usize_const(self.db, len, self.resolver.krate()), ) } _ => TyKind::Slice(elem_ty.clone()), diff --git a/crates/hir-ty/src/infer/unify.rs b/crates/hir-ty/src/infer/unify.rs index 46ed3533c8c..504f0743aa9 100644 --- a/crates/hir-ty/src/infer/unify.rs +++ b/crates/hir-ty/src/infer/unify.rs @@ -704,14 +704,13 @@ impl<'a> fmt::Debug for InferenceTable<'a> { mod resolve { use super::InferenceTable; use crate::{ - ConcreteConst, Const, ConstData, ConstValue, DebruijnIndex, GenericArg, InferenceVar, - Interner, Lifetime, Ty, TyVariableKind, VariableKind, + ConcreteConst, Const, ConstData, ConstScalar, ConstValue, DebruijnIndex, GenericArg, + InferenceVar, Interner, Lifetime, Ty, TyVariableKind, VariableKind, }; use chalk_ir::{ cast::Cast, fold::{TypeFoldable, TypeFolder}, }; - use hir_def::type_ref::ConstScalar; #[derive(chalk_derive::FallibleTypeFolder)] #[has_interner(Interner)] diff --git a/crates/hir-ty/src/inhabitedness.rs b/crates/hir-ty/src/inhabitedness.rs index 0c547192ac0..36af78153d4 100644 --- a/crates/hir-ty/src/inhabitedness.rs +++ b/crates/hir-ty/src/inhabitedness.rs @@ -6,12 +6,12 @@ use chalk_ir::{ DebruijnIndex, }; use hir_def::{ - adt::VariantData, attr::Attrs, type_ref::ConstScalar, visibility::Visibility, AdtId, - EnumVariantId, HasModule, Lookup, ModuleId, VariantId, + adt::VariantData, attr::Attrs, visibility::Visibility, AdtId, EnumVariantId, HasModule, Lookup, + ModuleId, VariantId, }; use crate::{ - db::HirDatabase, Binders, ConcreteConst, Const, ConstValue, Interner, Substitution, Ty, TyKind, + consteval::try_const_usize, db::HirDatabase, Binders, Interner, Substitution, Ty, TyKind, }; /// Checks whether a type is visibly uninhabited from a particular module. @@ -69,7 +69,7 @@ impl TypeVisitor<Interner> for UninhabitedFrom<'_> { TyKind::Adt(adt, subst) => self.visit_adt(adt.0, subst), TyKind::Never => BREAK_VISIBLY_UNINHABITED, TyKind::Tuple(..) => ty.super_visit_with(self, outer_binder), - TyKind::Array(item_ty, len) => match try_usize_const(len) { + TyKind::Array(item_ty, len) => match try_const_usize(len) { Some(0) | None => CONTINUE_OPAQUELY_INHABITED, Some(1..) => item_ty.super_visit_with(self, outer_binder), }, @@ -160,14 +160,3 @@ impl UninhabitedFrom<'_> { } } } - -fn try_usize_const(c: &Const) -> Option<u128> { - let data = &c.data(Interner); - if data.ty.kind(Interner) != &TyKind::Scalar(chalk_ir::Scalar::Uint(chalk_ir::UintTy::Usize)) { - return None; - } - match data.value { - ConstValue::Concrete(ConcreteConst { interned: ConstScalar::UInt(value) }) => Some(value), - _ => None, - } -} diff --git a/crates/hir-ty/src/interner.rs b/crates/hir-ty/src/interner.rs index 7bf73560cbe..aea7e9762fd 100644 --- a/crates/hir-ty/src/interner.rs +++ b/crates/hir-ty/src/interner.rs @@ -1,10 +1,10 @@ //! Implementation of the Chalk `Interner` trait, which allows customizing the //! representation of the various objects Chalk deals with (types, goals etc.). -use crate::{chalk_db, tls, GenericArg}; +use crate::{chalk_db, tls, ConstScalar, GenericArg}; use base_db::salsa::InternId; use chalk_ir::{Goal, GoalData}; -use hir_def::{type_ref::ConstScalar, TypeAliasId}; +use hir_def::TypeAliasId; use intern::{impl_internable, Interned}; use smallvec::SmallVec; use std::{fmt, sync::Arc}; diff --git a/crates/hir-ty/src/layout.rs b/crates/hir-ty/src/layout.rs index c82c274524a..a321c211bf4 100644 --- a/crates/hir-ty/src/layout.rs +++ b/crates/hir-ty/src/layout.rs @@ -11,7 +11,7 @@ use hir_def::{ }; use stdx::never; -use crate::{db::HirDatabase, Interner, Substitution, Ty}; +use crate::{consteval::try_const_usize, db::HirDatabase, Interner, Substitution, Ty}; use self::adt::struct_variant_idx; pub use self::{ @@ -122,17 +122,9 @@ pub fn layout_of_ty(db: &dyn HirDatabase, ty: &Ty, krate: CrateId) -> Result<Lay cx.univariant(dl, &fields, &ReprOptions::default(), kind).ok_or(LayoutError::Unknown)? } TyKind::Array(element, count) => { - let count = match count.data(Interner).value { - chalk_ir::ConstValue::Concrete(c) => match c.interned { - hir_def::type_ref::ConstScalar::Int(x) => x as u64, - hir_def::type_ref::ConstScalar::UInt(x) => x as u64, - hir_def::type_ref::ConstScalar::Unknown => { - user_error!("unknown const generic parameter") - } - _ => user_error!("mismatched type of const generic parameter"), - }, - _ => return Err(LayoutError::HasPlaceholder), - }; + let count = try_const_usize(&count).ok_or(LayoutError::UserError( + "mismatched type of const generic parameter".to_string(), + ))? as u64; let element = layout_of_ty(db, element, krate)?; let size = element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow)?; diff --git a/crates/hir-ty/src/layout/adt.rs b/crates/hir-ty/src/layout/adt.rs index cb7968c1446..b22d0fe8ded 100644 --- a/crates/hir-ty/src/layout/adt.rs +++ b/crates/hir-ty/src/layout/adt.rs @@ -76,17 +76,8 @@ pub fn layout_of_adt_query( |min, max| Integer::repr_discr(&dl, &repr, min, max).unwrap_or((Integer::I8, false)), variants.iter_enumerated().filter_map(|(id, _)| { let AdtId::EnumId(e) = def else { return None }; - let d = match db - .const_eval_variant(EnumVariantId { parent: e, local_id: id.0 }) - .ok()? - { - crate::consteval::ComputedExpr::Literal(l) => match l { - hir_def::expr::Literal::Int(i, _) => i, - hir_def::expr::Literal::Uint(i, _) => i as i128, - _ => return None, - }, - _ => return None, - }; + let d = + db.const_eval_discriminant(EnumVariantId { parent: e, local_id: id.0 }).ok()?; Some((id, d)) }), // FIXME: The current code for niche-filling relies on variant indices diff --git a/crates/hir-ty/src/layout/tests.rs b/crates/hir-ty/src/layout/tests.rs index 067bdc960da..546044fc13a 100644 --- a/crates/hir-ty/src/layout/tests.rs +++ b/crates/hir-ty/src/layout/tests.rs @@ -82,8 +82,8 @@ fn eval_expr(ra_fixture: &str, minicore: &str) -> Result<Layout, LayoutError> { #[track_caller] fn check_size_and_align(ra_fixture: &str, minicore: &str, size: u64, align: u64) { let l = eval_goal(ra_fixture, minicore).unwrap(); - assert_eq!(l.size.bytes(), size); - assert_eq!(l.align.abi.bytes(), align); + assert_eq!(l.size.bytes(), size, "size mismatch"); + assert_eq!(l.align.abi.bytes(), align, "align mismatch"); } #[track_caller] @@ -300,4 +300,9 @@ fn enums_with_discriminants() { C, // implicitly becomes 256, so we need two bytes } } + size_and_align! { + enum Goal { + A = 1, // This one is (perhaps surprisingly) zero sized. + } + } } diff --git a/crates/hir-ty/src/lib.rs b/crates/hir-ty/src/lib.rs index 6056043dcd9..9c63d67ab19 100644 --- a/crates/hir-ty/src/lib.rs +++ b/crates/hir-ty/src/lib.rs @@ -13,6 +13,7 @@ mod builder; mod chalk_db; mod chalk_ext; pub mod consteval; +pub mod mir; mod infer; mod inhabitedness; mod interner; @@ -34,7 +35,7 @@ mod tests; #[cfg(test)] mod test_db; -use std::sync::Arc; +use std::{collections::HashMap, hash::Hash, sync::Arc}; use chalk_ir::{ fold::{Shift, TypeFoldable}, @@ -46,6 +47,7 @@ use either::Either; use hir_def::{expr::ExprId, type_ref::Rawness, TypeOrConstParamId}; use hir_expand::name; use la_arena::{Arena, Idx}; +use mir::MirEvalError; use rustc_hash::FxHashSet; use traits::FnTrait; use utils::Generics; @@ -145,6 +147,49 @@ pub type ConstrainedSubst = chalk_ir::ConstrainedSubst<Interner>; pub type Guidance = chalk_solve::Guidance<Interner>; pub type WhereClause = chalk_ir::WhereClause<Interner>; +/// A constant can have reference to other things. Memory map job is holding +/// the neccessary bits of memory of the const eval session to keep the constant +/// meaningful. +#[derive(Debug, Default, Clone, PartialEq, Eq)] +pub struct MemoryMap(pub HashMap<usize, Vec<u8>>); + +impl MemoryMap { + fn insert(&mut self, addr: usize, x: Vec<u8>) { + self.0.insert(addr, x); + } + + /// This functions convert each address by a function `f` which gets the byte intervals and assign an address + /// to them. It is useful when you want to load a constant with a memory map in a new memory. You can pass an + /// allocator function as `f` and it will return a mapping of old addresses to new addresses. + fn transform_addresses( + &self, + mut f: impl FnMut(&[u8]) -> Result<usize, MirEvalError>, + ) -> Result<HashMap<usize, usize>, MirEvalError> { + self.0.iter().map(|x| Ok((*x.0, f(x.1)?))).collect() + } +} + +/// A concrete constant value +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum ConstScalar { + Bytes(Vec<u8>, MemoryMap), + /// Case of an unknown value that rustc might know but we don't + // FIXME: this is a hack to get around chalk not being able to represent unevaluatable + // constants + // https://github.com/rust-lang/rust-analyzer/pull/8813#issuecomment-840679177 + // https://rust-lang.zulipchat.com/#narrow/stream/144729-wg-traits/topic/Handling.20non.20evaluatable.20constants'.20equality/near/238386348 + Unknown, +} + +impl Hash for ConstScalar { + fn hash<H: std::hash::Hasher>(&self, state: &mut H) { + core::mem::discriminant(self).hash(state); + if let ConstScalar::Bytes(b, _) = self { + b.hash(state) + } + } +} + /// Return an index of a parameter in the generic type parameter list by it's id. pub fn param_idx(db: &dyn HirDatabase, id: TypeOrConstParamId) -> Option<usize> { generics(db.upcast(), id.parent).param_idx(id) diff --git a/crates/hir-ty/src/lower.rs b/crates/hir-ty/src/lower.rs index 140ed7f5919..296c6c5b21f 100644 --- a/crates/hir-ty/src/lower.rs +++ b/crates/hir-ty/src/lower.rs @@ -27,9 +27,7 @@ use hir_def::{ lang_item::{lang_attr, LangItem}, path::{GenericArg, ModPath, Path, PathKind, PathSegment, PathSegments}, resolver::{HasResolver, Resolver, TypeNs}, - type_ref::{ - ConstScalarOrPath, TraitBoundModifier, TraitRef as HirTraitRef, TypeBound, TypeRef, - }, + type_ref::{ConstRefOrPath, TraitBoundModifier, TraitRef as HirTraitRef, TypeBound, TypeRef}, AdtId, AssocItemId, ConstId, ConstParamId, EnumId, EnumVariantId, FunctionId, GenericDefId, HasModule, ImplId, ItemContainerId, LocalFieldId, Lookup, ModuleDefId, StaticId, StructId, TraitId, TypeAliasId, TypeOrConstParamId, TypeParamId, UnionId, VariantId, @@ -44,7 +42,7 @@ use syntax::ast; use crate::{ all_super_traits, - consteval::{intern_const_scalar, path_to_const, unknown_const, unknown_const_as_generic}, + consteval::{intern_const_ref, path_to_const, unknown_const, unknown_const_as_generic}, db::HirDatabase, make_binders, mapping::{from_chalk_trait_id, ToChalk}, @@ -968,7 +966,7 @@ impl<'a> TyLoweringContext<'a> { // - `Destruct` impls are built-in in 1.62 (current nightlies as of 08-04-2022), so until // the builtin impls are supported by Chalk, we ignore them here. if let Some(lang) = lang_attr(self.db.upcast(), tr.hir_trait_id()) { - if lang == "drop" || lang == "destruct" { + if matches!(lang, LangItem::Drop | LangItem::Destruct) { return false; } } @@ -1919,7 +1917,7 @@ pub(crate) fn generic_arg_to_chalk<'a, T>( arg: &'a GenericArg, this: &mut T, for_type: impl FnOnce(&mut T, &TypeRef) -> Ty + 'a, - for_const: impl FnOnce(&mut T, &ConstScalarOrPath, Ty) -> Const + 'a, + for_const: impl FnOnce(&mut T, &ConstRefOrPath, Ty) -> Const + 'a, ) -> Option<crate::GenericArg> { let kind = match kind_id { Either::Left(_) => ParamKind::Type, @@ -1947,7 +1945,7 @@ pub(crate) fn generic_arg_to_chalk<'a, T>( let p = p.mod_path(); if p.kind == PathKind::Plain { if let [n] = p.segments() { - let c = ConstScalarOrPath::Path(n.clone()); + let c = ConstRefOrPath::Path(n.clone()); return Some( GenericArgData::Const(for_const(this, &c, c_ty)).intern(Interner), ); @@ -1964,14 +1962,14 @@ pub(crate) fn const_or_path_to_chalk( db: &dyn HirDatabase, resolver: &Resolver, expected_ty: Ty, - value: &ConstScalarOrPath, + value: &ConstRefOrPath, mode: ParamLoweringMode, args: impl FnOnce() -> Generics, debruijn: DebruijnIndex, ) -> Const { match value { - ConstScalarOrPath::Scalar(s) => intern_const_scalar(*s, expected_ty), - ConstScalarOrPath::Path(n) => { + ConstRefOrPath::Scalar(s) => intern_const_ref(db, s, expected_ty, resolver.krate()), + ConstRefOrPath::Path(n) => { let path = ModPath::from_segments(PathKind::Plain, Some(n.clone())); path_to_const(db, resolver, &path, mode, args, debruijn) .unwrap_or_else(|| unknown_const(expected_ty)) diff --git a/crates/hir-ty/src/method_resolution.rs b/crates/hir-ty/src/method_resolution.rs index 1a5ee97fd04..8dd34bc3882 100644 --- a/crates/hir-ty/src/method_resolution.rs +++ b/crates/hir-ty/src/method_resolution.rs @@ -660,10 +660,10 @@ pub fn lookup_impl_const( env: Arc<TraitEnvironment>, const_id: ConstId, subs: Substitution, -) -> ConstId { +) -> (ConstId, Substitution) { let trait_id = match const_id.lookup(db.upcast()).container { ItemContainerId::TraitId(id) => id, - _ => return const_id, + _ => return (const_id, subs), }; let substitution = Substitution::from_iter(Interner, subs.iter(Interner)); let trait_ref = TraitRef { trait_id: to_chalk_trait_id(trait_id), substitution }; @@ -671,12 +671,14 @@ pub fn lookup_impl_const( let const_data = db.const_data(const_id); let name = match const_data.name.as_ref() { Some(name) => name, - None => return const_id, + None => return (const_id, subs), }; lookup_impl_assoc_item_for_trait_ref(trait_ref, db, env, name) - .and_then(|assoc| if let AssocItemId::ConstId(id) = assoc { Some(id) } else { None }) - .unwrap_or(const_id) + .and_then( + |assoc| if let (AssocItemId::ConstId(id), s) = assoc { Some((id, s)) } else { None }, + ) + .unwrap_or((const_id, subs)) } /// Looks up the impl method that actually runs for the trait method `func`. @@ -687,10 +689,10 @@ pub fn lookup_impl_method( env: Arc<TraitEnvironment>, func: FunctionId, fn_subst: Substitution, -) -> FunctionId { +) -> (FunctionId, Substitution) { let trait_id = match func.lookup(db.upcast()).container { ItemContainerId::TraitId(id) => id, - _ => return func, + _ => return (func, fn_subst), }; let trait_params = db.generic_params(trait_id.into()).type_or_consts.len(); let fn_params = fn_subst.len(Interner) - trait_params; @@ -701,8 +703,14 @@ pub fn lookup_impl_method( let name = &db.function_data(func).name; lookup_impl_assoc_item_for_trait_ref(trait_ref, db, env, name) - .and_then(|assoc| if let AssocItemId::FunctionId(id) = assoc { Some(id) } else { None }) - .unwrap_or(func) + .and_then(|assoc| { + if let (AssocItemId::FunctionId(id), subst) = assoc { + Some((id, subst)) + } else { + None + } + }) + .unwrap_or((func, fn_subst)) } fn lookup_impl_assoc_item_for_trait_ref( @@ -710,7 +718,7 @@ fn lookup_impl_assoc_item_for_trait_ref( db: &dyn HirDatabase, env: Arc<TraitEnvironment>, name: &Name, -) -> Option<AssocItemId> { +) -> Option<(AssocItemId, Substitution)> { let self_ty = trait_ref.self_type_parameter(Interner); let self_ty_fp = TyFingerprint::for_trait_impl(&self_ty)?; let impls = db.trait_impls_in_deps(env.krate); @@ -718,8 +726,8 @@ fn lookup_impl_assoc_item_for_trait_ref( let table = InferenceTable::new(db, env); - let impl_data = find_matching_impl(impls, table, trait_ref)?; - impl_data.items.iter().find_map(|&it| match it { + let (impl_data, impl_subst) = find_matching_impl(impls, table, trait_ref)?; + let item = impl_data.items.iter().find_map(|&it| match it { AssocItemId::FunctionId(f) => { (db.function_data(f).name == *name).then_some(AssocItemId::FunctionId(f)) } @@ -730,14 +738,15 @@ fn lookup_impl_assoc_item_for_trait_ref( .map(|n| n == name) .and_then(|result| if result { Some(AssocItemId::ConstId(c)) } else { None }), AssocItemId::TypeAliasId(_) => None, - }) + })?; + Some((item, impl_subst)) } fn find_matching_impl( mut impls: impl Iterator<Item = ImplId>, mut table: InferenceTable<'_>, actual_trait_ref: TraitRef, -) -> Option<Arc<ImplData>> { +) -> Option<(Arc<ImplData>, Substitution)> { let db = table.db; loop { let impl_ = impls.next()?; @@ -758,7 +767,7 @@ fn find_matching_impl( .into_iter() .map(|b| b.cast(Interner)); let goal = crate::Goal::all(Interner, wcs); - table.try_obligation(goal).map(|_| impl_data) + table.try_obligation(goal).map(|_| (impl_data, table.resolve_completely(impl_substs))) }); if r.is_some() { break r; diff --git a/crates/hir-ty/src/mir.rs b/crates/hir-ty/src/mir.rs new file mode 100644 index 00000000000..00fa6b79522 --- /dev/null +++ b/crates/hir-ty/src/mir.rs @@ -0,0 +1,811 @@ +//! MIR definitions and implementation + +use std::iter; + +use crate::{ + infer::PointerCast, Const, ConstScalar, InferenceResult, Interner, MemoryMap, Substitution, Ty, +}; +use chalk_ir::Mutability; +use hir_def::{ + expr::{Expr, Ordering}, + DefWithBodyId, FieldId, UnionId, VariantId, +}; +use la_arena::{Arena, Idx, RawIdx}; + +mod eval; +mod lower; + +pub use eval::{interpret_mir, pad16, Evaluator, MirEvalError}; +pub use lower::{lower_to_mir, mir_body_query, mir_body_recover, MirLowerError}; +use smallvec::{smallvec, SmallVec}; + +use super::consteval::{intern_const_scalar, try_const_usize}; + +pub type BasicBlockId = Idx<BasicBlock>; +pub type LocalId = Idx<Local>; + +fn return_slot() -> LocalId { + LocalId::from_raw(RawIdx::from(0)) +} + +#[derive(Debug, PartialEq, Eq)] +pub struct Local { + pub mutability: Mutability, + //pub local_info: Option<Box<LocalInfo>>, + //pub internal: bool, + //pub is_block_tail: Option<BlockTailInfo>, + pub ty: Ty, + //pub user_ty: Option<Box<UserTypeProjections>>, + //pub source_info: SourceInfo, +} + +/// An operand in MIR represents a "value" in Rust, the definition of which is undecided and part of +/// the memory model. One proposal for a definition of values can be found [on UCG][value-def]. +/// +/// [value-def]: https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/value-domain.md +/// +/// The most common way to create values is via loading a place. Loading a place is an operation +/// which reads the memory of the place and converts it to a value. This is a fundamentally *typed* +/// operation. The nature of the value produced depends on the type of the conversion. Furthermore, +/// there may be other effects: if the type has a validity constraint loading the place might be UB +/// if the validity constraint is not met. +/// +/// **Needs clarification:** Ralf proposes that loading a place not have side-effects. +/// This is what is implemented in miri today. Are these the semantics we want for MIR? Is this +/// something we can even decide without knowing more about Rust's memory model? +/// +/// **Needs clarifiation:** Is loading a place that has its variant index set well-formed? Miri +/// currently implements it, but it seems like this may be something to check against in the +/// validator. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum Operand { + /// Creates a value by loading the given place. + /// + /// Before drop elaboration, the type of the place must be `Copy`. After drop elaboration there + /// is no such requirement. + Copy(Place), + + /// Creates a value by performing loading the place, just like the `Copy` operand. + /// + /// This *may* additionally overwrite the place with `uninit` bytes, depending on how we decide + /// in [UCG#188]. You should not emit MIR that may attempt a subsequent second load of this + /// place without first re-initializing it. + /// + /// [UCG#188]: https://github.com/rust-lang/unsafe-code-guidelines/issues/188 + Move(Place), + /// Constants are already semantically values, and remain unchanged. + Constant(Const), +} + +impl Operand { + fn from_concrete_const(data: Vec<u8>, memory_map: MemoryMap, ty: Ty) -> Self { + Operand::Constant(intern_const_scalar(ConstScalar::Bytes(data, memory_map), ty)) + } + + fn from_bytes(data: Vec<u8>, ty: Ty) -> Self { + Operand::from_concrete_const(data, MemoryMap::default(), ty) + } +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum ProjectionElem<V, T> { + Deref, + Field(FieldId), + TupleField(usize), + Index(V), + ConstantIndex { offset: u64, min_length: u64, from_end: bool }, + Subslice { from: u64, to: u64, from_end: bool }, + //Downcast(Option<Symbol>, VariantIdx), + OpaqueCast(T), +} + +type PlaceElem = ProjectionElem<LocalId, Ty>; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Place { + pub local: LocalId, + pub projection: Vec<PlaceElem>, +} + +impl From<LocalId> for Place { + fn from(local: LocalId) -> Self { + Self { local, projection: vec![] } + } +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum AggregateKind { + /// The type is of the element + Array(Ty), + /// The type is of the tuple + Tuple(Ty), + Adt(VariantId, Substitution), + Union(UnionId, FieldId), + //Closure(LocalDefId, SubstsRef), + //Generator(LocalDefId, SubstsRef, Movability), +} + +#[derive(Debug, Clone, Hash, PartialEq, Eq)] +pub struct SwitchTargets { + /// Possible values. The locations to branch to in each case + /// are found in the corresponding indices from the `targets` vector. + values: SmallVec<[u128; 1]>, + + /// Possible branch sites. The last element of this vector is used + /// for the otherwise branch, so targets.len() == values.len() + 1 + /// should hold. + // + // This invariant is quite non-obvious and also could be improved. + // One way to make this invariant is to have something like this instead: + // + // branches: Vec<(ConstInt, BasicBlock)>, + // otherwise: Option<BasicBlock> // exhaustive if None + // + // However we’ve decided to keep this as-is until we figure a case + // where some other approach seems to be strictly better than other. + targets: SmallVec<[BasicBlockId; 2]>, +} + +impl SwitchTargets { + /// Creates switch targets from an iterator of values and target blocks. + /// + /// The iterator may be empty, in which case the `SwitchInt` instruction is equivalent to + /// `goto otherwise;`. + pub fn new( + targets: impl Iterator<Item = (u128, BasicBlockId)>, + otherwise: BasicBlockId, + ) -> Self { + let (values, mut targets): (SmallVec<_>, SmallVec<_>) = targets.unzip(); + targets.push(otherwise); + Self { values, targets } + } + + /// Builds a switch targets definition that jumps to `then` if the tested value equals `value`, + /// and to `else_` if not. + pub fn static_if(value: u128, then: BasicBlockId, else_: BasicBlockId) -> Self { + Self { values: smallvec![value], targets: smallvec![then, else_] } + } + + /// Returns the fallback target that is jumped to when none of the values match the operand. + pub fn otherwise(&self) -> BasicBlockId { + *self.targets.last().unwrap() + } + + /// Returns an iterator over the switch targets. + /// + /// The iterator will yield tuples containing the value and corresponding target to jump to, not + /// including the `otherwise` fallback target. + /// + /// Note that this may yield 0 elements. Only the `otherwise` branch is mandatory. + pub fn iter(&self) -> impl Iterator<Item = (u128, BasicBlockId)> + '_ { + iter::zip(&self.values, &self.targets).map(|(x, y)| (*x, *y)) + } + + /// Finds the `BasicBlock` to which this `SwitchInt` will branch given the + /// specific value. This cannot fail, as it'll return the `otherwise` + /// branch if there's not a specific match for the value. + pub fn target_for_value(&self, value: u128) -> BasicBlockId { + self.iter().find_map(|(v, t)| (v == value).then_some(t)).unwrap_or_else(|| self.otherwise()) + } +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum Terminator { + /// Block has one successor; we continue execution there. + Goto { target: BasicBlockId }, + + /// Switches based on the computed value. + /// + /// First, evaluates the `discr` operand. The type of the operand must be a signed or unsigned + /// integer, char, or bool, and must match the given type. Then, if the list of switch targets + /// contains the computed value, continues execution at the associated basic block. Otherwise, + /// continues execution at the "otherwise" basic block. + /// + /// Target values may not appear more than once. + SwitchInt { + /// The discriminant value being tested. + discr: Operand, + + targets: SwitchTargets, + }, + + /// Indicates that the landing pad is finished and that the process should continue unwinding. + /// + /// Like a return, this marks the end of this invocation of the function. + /// + /// Only permitted in cleanup blocks. `Resume` is not permitted with `-C unwind=abort` after + /// deaggregation runs. + Resume, + + /// Indicates that the landing pad is finished and that the process should abort. + /// + /// Used to prevent unwinding for foreign items or with `-C unwind=abort`. Only permitted in + /// cleanup blocks. + Abort, + + /// Returns from the function. + /// + /// Like function calls, the exact semantics of returns in Rust are unclear. Returning very + /// likely at least assigns the value currently in the return place (`_0`) to the place + /// specified in the associated `Call` terminator in the calling function, as if assigned via + /// `dest = move _0`. It might additionally do other things, like have side-effects in the + /// aliasing model. + /// + /// If the body is a generator body, this has slightly different semantics; it instead causes a + /// `GeneratorState::Returned(_0)` to be created (as if by an `Aggregate` rvalue) and assigned + /// to the return place. + Return, + + /// Indicates a terminator that can never be reached. + /// + /// Executing this terminator is UB. + Unreachable, + + /// The behavior of this statement differs significantly before and after drop elaboration. + /// After drop elaboration, `Drop` executes the drop glue for the specified place, after which + /// it continues execution/unwinds at the given basic blocks. It is possible that executing drop + /// glue is special - this would be part of Rust's memory model. (**FIXME**: due we have an + /// issue tracking if drop glue has any interesting semantics in addition to those of a function + /// call?) + /// + /// `Drop` before drop elaboration is a *conditional* execution of the drop glue. Specifically, the + /// `Drop` will be executed if... + /// + /// **Needs clarification**: End of that sentence. This in effect should document the exact + /// behavior of drop elaboration. The following sounds vaguely right, but I'm not quite sure: + /// + /// > The drop glue is executed if, among all statements executed within this `Body`, an assignment to + /// > the place or one of its "parents" occurred more recently than a move out of it. This does not + /// > consider indirect assignments. + Drop { place: Place, target: BasicBlockId, unwind: Option<BasicBlockId> }, + + /// Drops the place and assigns a new value to it. + /// + /// This first performs the exact same operation as the pre drop-elaboration `Drop` terminator; + /// it then additionally assigns the `value` to the `place` as if by an assignment statement. + /// This assignment occurs both in the unwind and the regular code paths. The semantics are best + /// explained by the elaboration: + /// + /// ```ignore (MIR) + /// BB0 { + /// DropAndReplace(P <- V, goto BB1, unwind BB2) + /// } + /// ``` + /// + /// becomes + /// + /// ```ignore (MIR) + /// BB0 { + /// Drop(P, goto BB1, unwind BB2) + /// } + /// BB1 { + /// // P is now uninitialized + /// P <- V + /// } + /// BB2 { + /// // P is now uninitialized -- its dtor panicked + /// P <- V + /// } + /// ``` + /// + /// Disallowed after drop elaboration. + DropAndReplace { + place: Place, + value: Operand, + target: BasicBlockId, + unwind: Option<BasicBlockId>, + }, + + /// Roughly speaking, evaluates the `func` operand and the arguments, and starts execution of + /// the referred to function. The operand types must match the argument types of the function. + /// The return place type must match the return type. The type of the `func` operand must be + /// callable, meaning either a function pointer, a function type, or a closure type. + /// + /// **Needs clarification**: The exact semantics of this. Current backends rely on `move` + /// operands not aliasing the return place. It is unclear how this is justified in MIR, see + /// [#71117]. + /// + /// [#71117]: https://github.com/rust-lang/rust/issues/71117 + Call { + /// The function that’s being called. + func: Operand, + /// Arguments the function is called with. + /// These are owned by the callee, which is free to modify them. + /// This allows the memory occupied by "by-value" arguments to be + /// reused across function calls without duplicating the contents. + args: Vec<Operand>, + /// Where the returned value will be written + destination: Place, + /// Where to go after this call returns. If none, the call necessarily diverges. + target: Option<BasicBlockId>, + /// Cleanups to be done if the call unwinds. + cleanup: Option<BasicBlockId>, + /// `true` if this is from a call in HIR rather than from an overloaded + /// operator. True for overloaded function call. + from_hir_call: bool, + // This `Span` is the span of the function, without the dot and receiver + // (e.g. `foo(a, b)` in `x.foo(a, b)` + //fn_span: Span, + }, + + /// Evaluates the operand, which must have type `bool`. If it is not equal to `expected`, + /// initiates a panic. Initiating a panic corresponds to a `Call` terminator with some + /// unspecified constant as the function to call, all the operands stored in the `AssertMessage` + /// as parameters, and `None` for the destination. Keep in mind that the `cleanup` path is not + /// necessarily executed even in the case of a panic, for example in `-C panic=abort`. If the + /// assertion does not fail, execution continues at the specified basic block. + Assert { + cond: Operand, + expected: bool, + //msg: AssertMessage, + target: BasicBlockId, + cleanup: Option<BasicBlockId>, + }, + + /// Marks a suspend point. + /// + /// Like `Return` terminators in generator bodies, this computes `value` and then a + /// `GeneratorState::Yielded(value)` as if by `Aggregate` rvalue. That value is then assigned to + /// the return place of the function calling this one, and execution continues in the calling + /// function. When next invoked with the same first argument, execution of this function + /// continues at the `resume` basic block, with the second argument written to the `resume_arg` + /// place. If the generator is dropped before then, the `drop` basic block is invoked. + /// + /// Not permitted in bodies that are not generator bodies, or after generator lowering. + /// + /// **Needs clarification**: What about the evaluation order of the `resume_arg` and `value`? + Yield { + /// The value to return. + value: Operand, + /// Where to resume to. + resume: BasicBlockId, + /// The place to store the resume argument in. + resume_arg: Place, + /// Cleanup to be done if the generator is dropped at this suspend point. + drop: Option<BasicBlockId>, + }, + + /// Indicates the end of dropping a generator. + /// + /// Semantically just a `return` (from the generators drop glue). Only permitted in the same situations + /// as `yield`. + /// + /// **Needs clarification**: Is that even correct? The generator drop code is always confusing + /// to me, because it's not even really in the current body. + /// + /// **Needs clarification**: Are there type system constraints on these terminators? Should + /// there be a "block type" like `cleanup` blocks for them? + GeneratorDrop, + + /// A block where control flow only ever takes one real path, but borrowck needs to be more + /// conservative. + /// + /// At runtime this is semantically just a goto. + /// + /// Disallowed after drop elaboration. + FalseEdge { + /// The target normal control flow will take. + real_target: BasicBlockId, + /// A block control flow could conceptually jump to, but won't in + /// practice. + imaginary_target: BasicBlockId, + }, + + /// A terminator for blocks that only take one path in reality, but where we reserve the right + /// to unwind in borrowck, even if it won't happen in practice. This can arise in infinite loops + /// with no function calls for example. + /// + /// At runtime this is semantically just a goto. + /// + /// Disallowed after drop elaboration. + FalseUnwind { + /// The target normal control flow will take. + real_target: BasicBlockId, + /// The imaginary cleanup block link. This particular path will never be taken + /// in practice, but in order to avoid fragility we want to always + /// consider it in borrowck. We don't want to accept programs which + /// pass borrowck only when `panic=abort` or some assertions are disabled + /// due to release vs. debug mode builds. This needs to be an `Option` because + /// of the `remove_noop_landing_pads` and `abort_unwinding_calls` passes. + unwind: Option<BasicBlockId>, + }, +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum BorrowKind { + /// Data must be immutable and is aliasable. + Shared, + + /// The immediately borrowed place must be immutable, but projections from + /// it don't need to be. For example, a shallow borrow of `a.b` doesn't + /// conflict with a mutable borrow of `a.b.c`. + /// + /// This is used when lowering matches: when matching on a place we want to + /// ensure that place have the same value from the start of the match until + /// an arm is selected. This prevents this code from compiling: + /// ```compile_fail,E0510 + /// let mut x = &Some(0); + /// match *x { + /// None => (), + /// Some(_) if { x = &None; false } => (), + /// Some(_) => (), + /// } + /// ``` + /// This can't be a shared borrow because mutably borrowing (*x as Some).0 + /// should not prevent `if let None = x { ... }`, for example, because the + /// mutating `(*x as Some).0` can't affect the discriminant of `x`. + /// We can also report errors with this kind of borrow differently. + Shallow, + + /// Data must be immutable but not aliasable. This kind of borrow + /// cannot currently be expressed by the user and is used only in + /// implicit closure bindings. It is needed when the closure is + /// borrowing or mutating a mutable referent, e.g.: + /// ``` + /// let mut z = 3; + /// let x: &mut isize = &mut z; + /// let y = || *x += 5; + /// ``` + /// If we were to try to translate this closure into a more explicit + /// form, we'd encounter an error with the code as written: + /// ```compile_fail,E0594 + /// struct Env<'a> { x: &'a &'a mut isize } + /// let mut z = 3; + /// let x: &mut isize = &mut z; + /// let y = (&mut Env { x: &x }, fn_ptr); // Closure is pair of env and fn + /// fn fn_ptr(env: &mut Env) { **env.x += 5; } + /// ``` + /// This is then illegal because you cannot mutate an `&mut` found + /// in an aliasable location. To solve, you'd have to translate with + /// an `&mut` borrow: + /// ```compile_fail,E0596 + /// struct Env<'a> { x: &'a mut &'a mut isize } + /// let mut z = 3; + /// let x: &mut isize = &mut z; + /// let y = (&mut Env { x: &mut x }, fn_ptr); // changed from &x to &mut x + /// fn fn_ptr(env: &mut Env) { **env.x += 5; } + /// ``` + /// Now the assignment to `**env.x` is legal, but creating a + /// mutable pointer to `x` is not because `x` is not mutable. We + /// could fix this by declaring `x` as `let mut x`. This is ok in + /// user code, if awkward, but extra weird for closures, since the + /// borrow is hidden. + /// + /// So we introduce a "unique imm" borrow -- the referent is + /// immutable, but not aliasable. This solves the problem. For + /// simplicity, we don't give users the way to express this + /// borrow, it's just used when translating closures. + Unique, + + /// Data is mutable and not aliasable. + Mut { + /// `true` if this borrow arose from method-call auto-ref + /// (i.e., `adjustment::Adjust::Borrow`). + allow_two_phase_borrow: bool, + }, +} + +impl BorrowKind { + fn from_hir(m: hir_def::type_ref::Mutability) -> Self { + match m { + hir_def::type_ref::Mutability::Shared => BorrowKind::Shared, + hir_def::type_ref::Mutability::Mut => BorrowKind::Mut { allow_two_phase_borrow: false }, + } + } + + fn from_chalk(m: Mutability) -> Self { + match m { + Mutability::Not => BorrowKind::Shared, + Mutability::Mut => BorrowKind::Mut { allow_two_phase_borrow: false }, + } + } +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub enum UnOp { + /// The `!` operator for logical inversion + Not, + /// The `-` operator for negation + Neg, +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum BinOp { + /// The `+` operator (addition) + Add, + /// The `-` operator (subtraction) + Sub, + /// The `*` operator (multiplication) + Mul, + /// The `/` operator (division) + /// + /// Division by zero is UB, because the compiler should have inserted checks + /// prior to this. + Div, + /// The `%` operator (modulus) + /// + /// Using zero as the modulus (second operand) is UB, because the compiler + /// should have inserted checks prior to this. + Rem, + /// The `^` operator (bitwise xor) + BitXor, + /// The `&` operator (bitwise and) + BitAnd, + /// The `|` operator (bitwise or) + BitOr, + /// The `<<` operator (shift left) + /// + /// The offset is truncated to the size of the first operand before shifting. + Shl, + /// The `>>` operator (shift right) + /// + /// The offset is truncated to the size of the first operand before shifting. + Shr, + /// The `==` operator (equality) + Eq, + /// The `<` operator (less than) + Lt, + /// The `<=` operator (less than or equal to) + Le, + /// The `!=` operator (not equal to) + Ne, + /// The `>=` operator (greater than or equal to) + Ge, + /// The `>` operator (greater than) + Gt, + /// The `ptr.offset` operator + Offset, +} + +impl From<hir_def::expr::ArithOp> for BinOp { + fn from(value: hir_def::expr::ArithOp) -> Self { + match value { + hir_def::expr::ArithOp::Add => BinOp::Add, + hir_def::expr::ArithOp::Mul => BinOp::Mul, + hir_def::expr::ArithOp::Sub => BinOp::Sub, + hir_def::expr::ArithOp::Div => BinOp::Div, + hir_def::expr::ArithOp::Rem => BinOp::Rem, + hir_def::expr::ArithOp::Shl => BinOp::Shl, + hir_def::expr::ArithOp::Shr => BinOp::Shr, + hir_def::expr::ArithOp::BitXor => BinOp::BitXor, + hir_def::expr::ArithOp::BitOr => BinOp::BitOr, + hir_def::expr::ArithOp::BitAnd => BinOp::BitAnd, + } + } +} + +impl From<hir_def::expr::CmpOp> for BinOp { + fn from(value: hir_def::expr::CmpOp) -> Self { + match value { + hir_def::expr::CmpOp::Eq { negated: false } => BinOp::Eq, + hir_def::expr::CmpOp::Eq { negated: true } => BinOp::Ne, + hir_def::expr::CmpOp::Ord { ordering: Ordering::Greater, strict: false } => BinOp::Ge, + hir_def::expr::CmpOp::Ord { ordering: Ordering::Greater, strict: true } => BinOp::Gt, + hir_def::expr::CmpOp::Ord { ordering: Ordering::Less, strict: false } => BinOp::Le, + hir_def::expr::CmpOp::Ord { ordering: Ordering::Less, strict: true } => BinOp::Lt, + } + } +} + +impl From<Operand> for Rvalue { + fn from(x: Operand) -> Self { + Self::Use(x) + } +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum CastKind { + /// An exposing pointer to address cast. A cast between a pointer and an integer type, or + /// between a function pointer and an integer type. + /// See the docs on `expose_addr` for more details. + PointerExposeAddress, + /// An address-to-pointer cast that picks up an exposed provenance. + /// See the docs on `from_exposed_addr` for more details. + PointerFromExposedAddress, + /// All sorts of pointer-to-pointer casts. Note that reference-to-raw-ptr casts are + /// translated into `&raw mut/const *r`, i.e., they are not actually casts. + Pointer(PointerCast), + /// Cast into a dyn* object. + DynStar, + IntToInt, + FloatToInt, + FloatToFloat, + IntToFloat, + PtrToPtr, + FnPtrToPtr, +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum Rvalue { + /// Yields the operand unchanged + Use(Operand), + + /// Creates an array where each element is the value of the operand. + /// + /// This is the cause of a bug in the case where the repetition count is zero because the value + /// is not dropped, see [#74836]. + /// + /// Corresponds to source code like `[x; 32]`. + /// + /// [#74836]: https://github.com/rust-lang/rust/issues/74836 + //Repeat(Operand, ty::Const), + + /// Creates a reference of the indicated kind to the place. + /// + /// There is not much to document here, because besides the obvious parts the semantics of this + /// are essentially entirely a part of the aliasing model. There are many UCG issues discussing + /// exactly what the behavior of this operation should be. + /// + /// `Shallow` borrows are disallowed after drop lowering. + Ref(BorrowKind, Place), + + /// Creates a pointer/reference to the given thread local. + /// + /// The yielded type is a `*mut T` if the static is mutable, otherwise if the static is extern a + /// `*const T`, and if neither of those apply a `&T`. + /// + /// **Note:** This is a runtime operation that actually executes code and is in this sense more + /// like a function call. Also, eliminating dead stores of this rvalue causes `fn main() {}` to + /// SIGILL for some reason that I (JakobDegen) never got a chance to look into. + /// + /// **Needs clarification**: Are there weird additional semantics here related to the runtime + /// nature of this operation? + //ThreadLocalRef(DefId), + + /// Creates a pointer with the indicated mutability to the place. + /// + /// This is generated by pointer casts like `&v as *const _` or raw address of expressions like + /// `&raw v` or `addr_of!(v)`. + /// + /// Like with references, the semantics of this operation are heavily dependent on the aliasing + /// model. + //AddressOf(Mutability, Place), + + /// Yields the length of the place, as a `usize`. + /// + /// If the type of the place is an array, this is the array length. For slices (`[T]`, not + /// `&[T]`) this accesses the place's metadata to determine the length. This rvalue is + /// ill-formed for places of other types. + Len(Place), + + /// Performs essentially all of the casts that can be performed via `as`. + /// + /// This allows for casts from/to a variety of types. + /// + /// **FIXME**: Document exactly which `CastKind`s allow which types of casts. Figure out why + /// `ArrayToPointer` and `MutToConstPointer` are special. + Cast(CastKind, Operand, Ty), + + /// * `Offset` has the same semantics as [`offset`](pointer::offset), except that the second + /// parameter may be a `usize` as well. + /// * The comparison operations accept `bool`s, `char`s, signed or unsigned integers, floats, + /// raw pointers, or function pointers and return a `bool`. The types of the operands must be + /// matching, up to the usual caveat of the lifetimes in function pointers. + /// * Left and right shift operations accept signed or unsigned integers not necessarily of the + /// same type and return a value of the same type as their LHS. Like in Rust, the RHS is + /// truncated as needed. + /// * The `Bit*` operations accept signed integers, unsigned integers, or bools with matching + /// types and return a value of that type. + /// * The remaining operations accept signed integers, unsigned integers, or floats with + /// matching types and return a value of that type. + //BinaryOp(BinOp, Box<(Operand, Operand)>), + + /// Same as `BinaryOp`, but yields `(T, bool)` with a `bool` indicating an error condition. + /// + /// When overflow checking is disabled and we are generating run-time code, the error condition + /// is false. Otherwise, and always during CTFE, the error condition is determined as described + /// below. + /// + /// For addition, subtraction, and multiplication on integers the error condition is set when + /// the infinite precision result would be unequal to the actual result. + /// + /// For shift operations on integers the error condition is set when the value of right-hand + /// side is greater than or equal to the number of bits in the type of the left-hand side, or + /// when the value of right-hand side is negative. + /// + /// Other combinations of types and operators are unsupported. + CheckedBinaryOp(BinOp, Operand, Operand), + + /// Computes a value as described by the operation. + //NullaryOp(NullOp, Ty), + + /// Exactly like `BinaryOp`, but less operands. + /// + /// Also does two's-complement arithmetic. Negation requires a signed integer or a float; + /// bitwise not requires a signed integer, unsigned integer, or bool. Both operation kinds + /// return a value with the same type as their operand. + UnaryOp(UnOp, Operand), + + /// Computes the discriminant of the place, returning it as an integer of type + /// [`discriminant_ty`]. Returns zero for types without discriminant. + /// + /// The validity requirements for the underlying value are undecided for this rvalue, see + /// [#91095]. Note too that the value of the discriminant is not the same thing as the + /// variant index; use [`discriminant_for_variant`] to convert. + /// + /// [`discriminant_ty`]: crate::ty::Ty::discriminant_ty + /// [#91095]: https://github.com/rust-lang/rust/issues/91095 + /// [`discriminant_for_variant`]: crate::ty::Ty::discriminant_for_variant + Discriminant(Place), + + /// Creates an aggregate value, like a tuple or struct. + /// + /// This is needed because dataflow analysis needs to distinguish + /// `dest = Foo { x: ..., y: ... }` from `dest.x = ...; dest.y = ...;` in the case that `Foo` + /// has a destructor. + /// + /// Disallowed after deaggregation for all aggregate kinds except `Array` and `Generator`. After + /// generator lowering, `Generator` aggregate kinds are disallowed too. + Aggregate(AggregateKind, Vec<Operand>), + + /// Transmutes a `*mut u8` into shallow-initialized `Box<T>`. + /// + /// This is different from a normal transmute because dataflow analysis will treat the box as + /// initialized but its content as uninitialized. Like other pointer casts, this in general + /// affects alias analysis. + ShallowInitBox(Operand, Ty), + + /// A CopyForDeref is equivalent to a read from a place at the + /// codegen level, but is treated specially by drop elaboration. When such a read happens, it + /// is guaranteed (via nature of the mir_opt `Derefer` in rustc_mir_transform/src/deref_separator) + /// that the only use of the returned value is a deref operation, immediately + /// followed by one or more projections. Drop elaboration treats this rvalue as if the + /// read never happened and just projects further. This allows simplifying various MIR + /// optimizations and codegen backends that previously had to handle deref operations anywhere + /// in a place. + CopyForDeref(Place), +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum Statement { + Assign(Place, Rvalue), + //FakeRead(Box<(FakeReadCause, Place)>), + //SetDiscriminant { + // place: Box<Place>, + // variant_index: VariantIdx, + //}, + Deinit(Place), + StorageLive(LocalId), + StorageDead(LocalId), + //Retag(RetagKind, Box<Place>), + //AscribeUserType(Place, UserTypeProjection, Variance), + //Intrinsic(Box<NonDivergingIntrinsic>), + Nop, +} + +#[derive(Debug, Default, PartialEq, Eq)] +pub struct BasicBlock { + /// List of statements in this block. + pub statements: Vec<Statement>, + + /// Terminator for this block. + /// + /// N.B., this should generally ONLY be `None` during construction. + /// Therefore, you should generally access it via the + /// `terminator()` or `terminator_mut()` methods. The only + /// exception is that certain passes, such as `simplify_cfg`, swap + /// out the terminator temporarily with `None` while they continue + /// to recurse over the set of basic blocks. + pub terminator: Option<Terminator>, + + /// If true, this block lies on an unwind path. This is used + /// during codegen where distinct kinds of basic blocks may be + /// generated (particularly for MSVC cleanup). Unwind blocks must + /// only branch to other unwind blocks. + pub is_cleanup: bool, +} + +#[derive(Debug, PartialEq, Eq)] +pub struct MirBody { + pub basic_blocks: Arena<BasicBlock>, + pub locals: Arena<Local>, + pub start_block: BasicBlockId, + pub owner: DefWithBodyId, + pub arg_count: usize, +} + +impl MirBody {} + +fn const_as_usize(c: &Const) -> usize { + try_const_usize(c).unwrap() as usize +} diff --git a/crates/hir-ty/src/mir/eval.rs b/crates/hir-ty/src/mir/eval.rs new file mode 100644 index 00000000000..1ec32010a19 --- /dev/null +++ b/crates/hir-ty/src/mir/eval.rs @@ -0,0 +1,1245 @@ +//! This module provides a MIR interpreter, which is used in const eval. + +use std::{borrow::Cow, collections::HashMap, iter}; + +use base_db::CrateId; +use chalk_ir::{ + fold::{FallibleTypeFolder, TypeFoldable, TypeSuperFoldable}, + DebruijnIndex, TyKind, +}; +use hir_def::{ + builtin_type::BuiltinType, + lang_item::{lang_attr, LangItem}, + layout::{Layout, LayoutError, RustcEnumVariantIdx, TagEncoding, Variants}, + AdtId, DefWithBodyId, EnumVariantId, FunctionId, HasModule, Lookup, VariantId, +}; +use intern::Interned; +use la_arena::ArenaMap; + +use crate::{ + consteval::{intern_const_scalar, ConstEvalError}, + db::HirDatabase, + from_placeholder_idx, + infer::{normalize, PointerCast}, + layout::layout_of_ty, + mapping::from_chalk, + method_resolution::lookup_impl_method, + CallableDefId, Const, ConstScalar, Interner, MemoryMap, Substitution, Ty, TyBuilder, TyExt, +}; + +use super::{ + const_as_usize, return_slot, AggregateKind, BinOp, CastKind, LocalId, MirBody, MirLowerError, + Operand, Place, ProjectionElem, Rvalue, Statement, Terminator, UnOp, +}; + +pub struct Evaluator<'a> { + db: &'a dyn HirDatabase, + stack: Vec<u8>, + heap: Vec<u8>, + crate_id: CrateId, + // FIXME: This is a workaround, see the comment on `interpret_mir` + assert_placeholder_ty_is_unused: bool, + /// A general limit on execution, to prevent non terminating programs from breaking r-a main process + execution_limit: usize, + /// An additional limit on stack depth, to prevent stack overflow + stack_depth_limit: usize, +} + +#[derive(Debug, Clone, Copy)] +enum Address { + Stack(usize), + Heap(usize), +} + +use Address::*; + +struct Interval { + addr: Address, + size: usize, +} + +impl Interval { + fn new(addr: Address, size: usize) -> Self { + Self { addr, size } + } + + fn get<'a>(&self, memory: &'a Evaluator<'a>) -> Result<&'a [u8]> { + memory.read_memory(self.addr, self.size) + } +} + +enum IntervalOrOwned { + Owned(Vec<u8>), + Borrowed(Interval), +} +impl IntervalOrOwned { + pub(crate) fn to_vec(self, memory: &Evaluator<'_>) -> Result<Vec<u8>> { + Ok(match self { + IntervalOrOwned::Owned(o) => o, + IntervalOrOwned::Borrowed(b) => b.get(memory)?.to_vec(), + }) + } +} + +macro_rules! from_bytes { + ($ty:tt, $value:expr) => { + ($ty::from_le_bytes(match ($value).try_into() { + Ok(x) => x, + Err(_) => return Err(MirEvalError::TypeError("mismatched size")), + })) + }; +} + +impl Address { + fn from_bytes(x: &[u8]) -> Result<Self> { + Ok(Address::from_usize(from_bytes!(usize, x))) + } + + fn from_usize(x: usize) -> Self { + if x > usize::MAX / 2 { + Stack(usize::MAX - x) + } else { + Heap(x) + } + } + + fn to_bytes(&self) -> Vec<u8> { + usize::to_le_bytes(self.to_usize()).to_vec() + } + + fn to_usize(&self) -> usize { + let as_num = match self { + Stack(x) => usize::MAX - *x, + Heap(x) => *x, + }; + as_num + } + + fn map(&self, f: impl FnOnce(usize) -> usize) -> Address { + match self { + Stack(x) => Stack(f(*x)), + Heap(x) => Heap(f(*x)), + } + } + + fn offset(&self, offset: usize) -> Address { + self.map(|x| x + offset) + } +} + +#[derive(Clone, PartialEq, Eq)] +pub enum MirEvalError { + ConstEvalError(Box<ConstEvalError>), + LayoutError(LayoutError, Ty), + /// Means that code had type errors (or mismatched args) and we shouldn't generate mir in first place. + TypeError(&'static str), + /// Means that code had undefined behavior. We don't try to actively detect UB, but if it was detected + /// then use this type of error. + UndefinedBehavior(&'static str), + Panic, + MirLowerError(FunctionId, MirLowerError), + TypeIsUnsized(Ty, &'static str), + NotSupported(String), + InvalidConst(Const), + InFunction(FunctionId, Box<MirEvalError>), + ExecutionLimitExceeded, + StackOverflow, + TargetDataLayoutNotAvailable, +} + +impl std::fmt::Debug for MirEvalError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::ConstEvalError(arg0) => f.debug_tuple("ConstEvalError").field(arg0).finish(), + Self::LayoutError(arg0, arg1) => { + f.debug_tuple("LayoutError").field(arg0).field(arg1).finish() + } + Self::TypeError(arg0) => f.debug_tuple("TypeError").field(arg0).finish(), + Self::UndefinedBehavior(arg0) => { + f.debug_tuple("UndefinedBehavior").field(arg0).finish() + } + Self::Panic => write!(f, "Panic"), + Self::TargetDataLayoutNotAvailable => write!(f, "TargetDataLayoutNotAvailable"), + Self::TypeIsUnsized(ty, it) => write!(f, "{ty:?} is unsized. {it} should be sized."), + Self::ExecutionLimitExceeded => write!(f, "execution limit exceeded"), + Self::StackOverflow => write!(f, "stack overflow"), + Self::MirLowerError(arg0, arg1) => { + f.debug_tuple("MirLowerError").field(arg0).field(arg1).finish() + } + Self::NotSupported(arg0) => f.debug_tuple("NotSupported").field(arg0).finish(), + Self::InvalidConst(arg0) => { + let data = &arg0.data(Interner); + f.debug_struct("InvalidConst").field("ty", &data.ty).field("value", &arg0).finish() + } + Self::InFunction(func, e) => { + let mut e = &**e; + let mut stack = vec![*func]; + while let Self::InFunction(f, next_e) = e { + e = &next_e; + stack.push(*f); + } + f.debug_struct("WithStack").field("error", e).field("stack", &stack).finish() + } + } + } +} + +macro_rules! not_supported { + ($x: expr) => { + return Err(MirEvalError::NotSupported(format!($x))) + }; +} + +impl From<ConstEvalError> for MirEvalError { + fn from(value: ConstEvalError) -> Self { + match value { + _ => MirEvalError::ConstEvalError(Box::new(value)), + } + } +} + +type Result<T> = std::result::Result<T, MirEvalError>; + +struct Locals<'a> { + ptr: &'a ArenaMap<LocalId, Address>, + body: &'a MirBody, + subst: &'a Substitution, +} + +pub fn interpret_mir( + db: &dyn HirDatabase, + body: &MirBody, + // FIXME: This is workaround. Ideally, const generics should have a separate body (issue #7434), but now + // they share their body with their parent, so in MIR lowering we have locals of the parent body, which + // might have placeholders. With this argument, we (wrongly) assume that every placeholder type has + // a zero size, hoping that they are all outside of our current body. Even without a fix for #7434, we can + // (and probably should) do better here, for example by excluding bindings outside of the target expression. + assert_placeholder_ty_is_unused: bool, +) -> Result<Const> { + let ty = body.locals[return_slot()].ty.clone(); + let mut evaluator = + Evaluator::new(db, body.owner.module(db.upcast()).krate(), assert_placeholder_ty_is_unused); + let bytes = evaluator.interpret_mir_with_no_arg(&body)?; + let memory_map = evaluator.create_memory_map( + &bytes, + &ty, + &Locals { ptr: &ArenaMap::new(), body: &body, subst: &Substitution::empty(Interner) }, + )?; + return Ok(intern_const_scalar(ConstScalar::Bytes(bytes, memory_map), ty)); +} + +impl Evaluator<'_> { + pub fn new<'a>( + db: &'a dyn HirDatabase, + crate_id: CrateId, + assert_placeholder_ty_is_unused: bool, + ) -> Evaluator<'a> { + Evaluator { + stack: vec![0], + heap: vec![0], + db, + crate_id, + assert_placeholder_ty_is_unused, + stack_depth_limit: 100, + execution_limit: 100_000, + } + } + + fn place_addr(&self, p: &Place, locals: &Locals<'_>) -> Result<Address> { + Ok(self.place_addr_and_ty(p, locals)?.0) + } + + fn ptr_size(&self) -> usize { + match self.db.target_data_layout(self.crate_id) { + Some(x) => x.pointer_size.bytes_usize(), + None => 8, + } + } + + fn place_addr_and_ty<'a>(&'a self, p: &Place, locals: &'a Locals<'a>) -> Result<(Address, Ty)> { + let mut addr = locals.ptr[p.local]; + let mut ty: Ty = + self.ty_filler(&locals.body.locals[p.local].ty, locals.subst, locals.body.owner)?; + for proj in &p.projection { + match proj { + ProjectionElem::Deref => { + match &ty.data(Interner).kind { + TyKind::Ref(_, _, inner) => { + ty = inner.clone(); + } + _ => not_supported!("dereferencing smart pointers"), + } + let x = from_bytes!(usize, self.read_memory(addr, self.ptr_size())?); + addr = Address::from_usize(x); + } + ProjectionElem::Index(op) => { + let offset = + from_bytes!(usize, self.read_memory(locals.ptr[*op], self.ptr_size())?); + match &ty.data(Interner).kind { + TyKind::Ref(_, _, inner) => match &inner.data(Interner).kind { + TyKind::Slice(inner) => { + ty = inner.clone(); + let ty_size = self.size_of_sized( + &ty, + locals, + "slice inner type should be sized", + )?; + let value = self.read_memory(addr, self.ptr_size() * 2)?; + addr = Address::from_bytes(&value[0..8])?.offset(ty_size * offset); + } + x => not_supported!("MIR index for ref type {x:?}"), + }, + TyKind::Array(inner, _) | TyKind::Slice(inner) => { + ty = inner.clone(); + let ty_size = self.size_of_sized( + &ty, + locals, + "array inner type should be sized", + )?; + addr = addr.offset(ty_size * offset); + } + x => not_supported!("MIR index for type {x:?}"), + } + } + &ProjectionElem::TupleField(f) => match &ty.data(Interner).kind { + TyKind::Tuple(_, subst) => { + let layout = self.layout(&ty)?; + ty = subst + .as_slice(Interner) + .get(f) + .ok_or(MirEvalError::TypeError("not enough tuple fields"))? + .assert_ty_ref(Interner) + .clone(); + let offset = layout.fields.offset(f).bytes_usize(); + addr = addr.offset(offset); + } + _ => return Err(MirEvalError::TypeError("Only tuple has tuple fields")), + }, + ProjectionElem::Field(f) => match &ty.data(Interner).kind { + TyKind::Adt(adt, subst) => { + let layout = self.layout_adt(adt.0, subst.clone())?; + let variant_layout = match &layout.variants { + Variants::Single { .. } => &layout, + Variants::Multiple { variants, .. } => { + &variants[match f.parent { + hir_def::VariantId::EnumVariantId(x) => { + RustcEnumVariantIdx(x.local_id) + } + _ => { + return Err(MirEvalError::TypeError( + "Multivariant layout only happens for enums", + )) + } + }] + } + }; + ty = self.db.field_types(f.parent)[f.local_id] + .clone() + .substitute(Interner, subst); + let offset = variant_layout + .fields + .offset(u32::from(f.local_id.into_raw()) as usize) + .bytes_usize(); + addr = addr.offset(offset); + } + _ => return Err(MirEvalError::TypeError("Only adt has fields")), + }, + ProjectionElem::ConstantIndex { .. } => { + not_supported!("constant index") + } + ProjectionElem::Subslice { .. } => not_supported!("subslice"), + ProjectionElem::OpaqueCast(_) => not_supported!("opaque cast"), + } + } + Ok((addr, ty)) + } + + fn layout(&self, ty: &Ty) -> Result<Layout> { + layout_of_ty(self.db, ty, self.crate_id) + .map_err(|e| MirEvalError::LayoutError(e, ty.clone())) + } + + fn layout_adt(&self, adt: AdtId, subst: Substitution) -> Result<Layout> { + self.db.layout_of_adt(adt, subst.clone()).map_err(|e| { + MirEvalError::LayoutError(e, TyKind::Adt(chalk_ir::AdtId(adt), subst).intern(Interner)) + }) + } + + fn place_ty<'a>(&'a self, p: &Place, locals: &'a Locals<'a>) -> Result<Ty> { + Ok(self.place_addr_and_ty(p, locals)?.1) + } + + fn operand_ty<'a>(&'a self, o: &'a Operand, locals: &'a Locals<'a>) -> Result<Ty> { + Ok(match o { + Operand::Copy(p) | Operand::Move(p) => self.place_ty(p, locals)?, + Operand::Constant(c) => c.data(Interner).ty.clone(), + }) + } + + fn interpret_mir( + &mut self, + body: &MirBody, + args: impl Iterator<Item = Vec<u8>>, + subst: Substitution, + ) -> Result<Vec<u8>> { + if let Some(x) = self.stack_depth_limit.checked_sub(1) { + self.stack_depth_limit = x; + } else { + return Err(MirEvalError::StackOverflow); + } + let mut current_block_idx = body.start_block; + let mut locals = Locals { ptr: &ArenaMap::new(), body: &body, subst: &subst }; + let (locals_ptr, stack_size) = { + let mut stack_ptr = self.stack.len(); + let addr = body + .locals + .iter() + .map(|(id, x)| { + let size = self.size_of_sized(&x.ty, &locals, "no unsized local")?; + let my_ptr = stack_ptr; + stack_ptr += size; + Ok((id, Stack(my_ptr))) + }) + .collect::<Result<ArenaMap<LocalId, _>>>()?; + let stack_size = stack_ptr - self.stack.len(); + (addr, stack_size) + }; + locals.ptr = &locals_ptr; + self.stack.extend(iter::repeat(0).take(stack_size)); + let mut remain_args = body.arg_count; + for ((_, addr), value) in locals_ptr.iter().skip(1).zip(args) { + self.write_memory(*addr, &value)?; + if remain_args == 0 { + return Err(MirEvalError::TypeError("more arguments provided")); + } + remain_args -= 1; + } + if remain_args > 0 { + return Err(MirEvalError::TypeError("not enough arguments provided")); + } + loop { + let current_block = &body.basic_blocks[current_block_idx]; + if let Some(x) = self.execution_limit.checked_sub(1) { + self.execution_limit = x; + } else { + return Err(MirEvalError::ExecutionLimitExceeded); + } + for statement in ¤t_block.statements { + match statement { + Statement::Assign(l, r) => { + let addr = self.place_addr(l, &locals)?; + let result = self.eval_rvalue(r, &locals)?.to_vec(&self)?; + self.write_memory(addr, &result)?; + } + Statement::Deinit(_) => not_supported!("de-init statement"), + Statement::StorageLive(_) => not_supported!("storage-live statement"), + Statement::StorageDead(_) => not_supported!("storage-dead statement"), + Statement::Nop => (), + } + } + let Some(terminator) = current_block.terminator.as_ref() else { + not_supported!("block without terminator"); + }; + match terminator { + Terminator::Goto { target } => { + current_block_idx = *target; + } + Terminator::Call { + func, + args, + destination, + target, + cleanup: _, + from_hir_call: _, + } => { + let fn_ty = self.operand_ty(func, &locals)?; + match &fn_ty.data(Interner).kind { + TyKind::FnDef(def, generic_args) => { + let def: CallableDefId = from_chalk(self.db, *def); + let generic_args = self.subst_filler(generic_args, &locals); + match def { + CallableDefId::FunctionId(def) => { + let arg_bytes = args + .iter() + .map(|x| { + Ok(self + .eval_operand(x, &locals)? + .get(&self)? + .to_owned()) + }) + .collect::<Result<Vec<_>>>()? + .into_iter(); + let function_data = self.db.function_data(def); + let is_intrinsic = match &function_data.abi { + Some(abi) => *abi == Interned::new_str("rust-intrinsic"), + None => match def.lookup(self.db.upcast()).container { + hir_def::ItemContainerId::ExternBlockId(block) => { + let id = block.lookup(self.db.upcast()).id; + id.item_tree(self.db.upcast())[id.value] + .abi + .as_deref() + == Some("rust-intrinsic") + } + _ => false, + }, + }; + let result = if is_intrinsic { + self.exec_intrinsic( + function_data + .name + .as_text() + .unwrap_or_default() + .as_str(), + arg_bytes, + generic_args, + &locals, + )? + } else if let Some(x) = self.detect_lang_function(def) { + self.exec_lang_item(x, arg_bytes)? + } else { + let trait_env = { + let Some(d) = body.owner.as_generic_def_id() else { + not_supported!("trait resolving in non generic def id"); + }; + self.db.trait_environment(d) + }; + let (imp, generic_args) = lookup_impl_method( + self.db, + trait_env, + def, + generic_args.clone(), + ); + let generic_args = + self.subst_filler(&generic_args, &locals); + let def = imp.into(); + let mir_body = self + .db + .mir_body(def) + .map_err(|e| MirEvalError::MirLowerError(imp, e))?; + self.interpret_mir(&mir_body, arg_bytes, generic_args) + .map_err(|e| { + MirEvalError::InFunction(imp, Box::new(e)) + })? + }; + let dest_addr = self.place_addr(destination, &locals)?; + self.write_memory(dest_addr, &result)?; + } + CallableDefId::StructId(id) => { + let (size, variant_layout, tag) = self.layout_of_variant( + id.into(), + generic_args.clone(), + &locals, + )?; + let result = self.make_by_layout( + size, + &variant_layout, + tag, + args, + &locals, + )?; + let dest_addr = self.place_addr(destination, &locals)?; + self.write_memory(dest_addr, &result)?; + } + CallableDefId::EnumVariantId(id) => { + let (size, variant_layout, tag) = self.layout_of_variant( + id.into(), + generic_args.clone(), + &locals, + )?; + let result = self.make_by_layout( + size, + &variant_layout, + tag, + args, + &locals, + )?; + let dest_addr = self.place_addr(destination, &locals)?; + self.write_memory(dest_addr, &result)?; + } + } + current_block_idx = + target.expect("broken mir, function without target"); + } + _ => not_supported!("unknown function type"), + } + } + Terminator::SwitchInt { discr, targets } => { + let val = u128::from_le_bytes(pad16( + self.eval_operand(discr, &locals)?.get(&self)?, + false, + )); + current_block_idx = targets.target_for_value(val); + } + Terminator::Return => { + let ty = body.locals[return_slot()].ty.clone(); + self.stack_depth_limit += 1; + return Ok(self + .read_memory( + locals.ptr[return_slot()], + self.size_of_sized(&ty, &locals, "return type")?, + )? + .to_owned()); + } + Terminator::Unreachable => { + return Err(MirEvalError::UndefinedBehavior("unreachable executed")) + } + _ => not_supported!("unknown terminator"), + } + } + } + + fn eval_rvalue<'a>( + &'a mut self, + r: &'a Rvalue, + locals: &'a Locals<'a>, + ) -> Result<IntervalOrOwned> { + use IntervalOrOwned::*; + Ok(match r { + Rvalue::Use(x) => Borrowed(self.eval_operand(x, locals)?), + Rvalue::Ref(_, p) => { + let addr = self.place_addr(p, locals)?; + Owned(addr.to_bytes()) + } + Rvalue::Len(_) => not_supported!("rvalue len"), + Rvalue::UnaryOp(op, val) => { + let mut c = self.eval_operand(val, locals)?.get(&self)?; + let mut ty = self.operand_ty(val, locals)?; + while let TyKind::Ref(_, _, z) = ty.kind(Interner) { + ty = z.clone(); + let size = self.size_of_sized(&ty, locals, "operand of unary op")?; + c = self.read_memory(Address::from_bytes(c)?, size)?; + } + let mut c = c.to_vec(); + if ty.as_builtin() == Some(BuiltinType::Bool) { + c[0] = 1 - c[0]; + } else { + match op { + UnOp::Not => c.iter_mut().for_each(|x| *x = !*x), + UnOp::Neg => { + c.iter_mut().for_each(|x| *x = !*x); + for k in c.iter_mut() { + let o; + (*k, o) = k.overflowing_add(1); + if !o { + break; + } + } + } + } + } + Owned(c) + } + Rvalue::CheckedBinaryOp(op, lhs, rhs) => { + let lc = self.eval_operand(lhs, locals)?; + let rc = self.eval_operand(rhs, locals)?; + let mut lc = lc.get(&self)?; + let mut rc = rc.get(&self)?; + let mut ty = self.operand_ty(lhs, locals)?; + while let TyKind::Ref(_, _, z) = ty.kind(Interner) { + ty = z.clone(); + let size = self.size_of_sized(&ty, locals, "operand of binary op")?; + lc = self.read_memory(Address::from_bytes(lc)?, size)?; + rc = self.read_memory(Address::from_bytes(rc)?, size)?; + } + let is_signed = matches!(ty.as_builtin(), Some(BuiltinType::Int(_))); + let l128 = i128::from_le_bytes(pad16(lc, is_signed)); + let r128 = i128::from_le_bytes(pad16(rc, is_signed)); + match op { + BinOp::Ge | BinOp::Gt | BinOp::Le | BinOp::Lt | BinOp::Eq | BinOp::Ne => { + let r = match op { + BinOp::Ge => l128 >= r128, + BinOp::Gt => l128 > r128, + BinOp::Le => l128 <= r128, + BinOp::Lt => l128 < r128, + BinOp::Eq => l128 == r128, + BinOp::Ne => l128 != r128, + _ => unreachable!(), + }; + let r = r as u8; + Owned(vec![r]) + } + BinOp::BitAnd + | BinOp::BitOr + | BinOp::BitXor + | BinOp::Add + | BinOp::Mul + | BinOp::Div + | BinOp::Rem + | BinOp::Sub => { + let r = match op { + BinOp::Add => l128.overflowing_add(r128).0, + BinOp::Mul => l128.overflowing_mul(r128).0, + BinOp::Div => l128.checked_div(r128).ok_or(MirEvalError::Panic)?, + BinOp::Rem => l128.checked_rem(r128).ok_or(MirEvalError::Panic)?, + BinOp::Sub => l128.overflowing_sub(r128).0, + BinOp::BitAnd => l128 & r128, + BinOp::BitOr => l128 | r128, + BinOp::BitXor => l128 ^ r128, + _ => unreachable!(), + }; + let r = r.to_le_bytes(); + for &k in &r[lc.len()..] { + if k != 0 && (k != 255 || !is_signed) { + return Err(MirEvalError::Panic); + } + } + Owned(r[0..lc.len()].into()) + } + BinOp::Shl | BinOp::Shr => { + let shift_amout = if r128 < 0 { + return Err(MirEvalError::Panic); + } else if r128 > 128 { + return Err(MirEvalError::Panic); + } else { + r128 as u8 + }; + let r = match op { + BinOp::Shl => l128 << shift_amout, + BinOp::Shr => l128 >> shift_amout, + _ => unreachable!(), + }; + Owned(r.to_le_bytes()[0..lc.len()].into()) + } + BinOp::Offset => not_supported!("offset binop"), + } + } + Rvalue::Discriminant(p) => { + let ty = self.place_ty(p, locals)?; + let bytes = self.eval_place(p, locals)?.get(&self)?; + let layout = self.layout(&ty)?; + match layout.variants { + Variants::Single { .. } => Owned(0u128.to_le_bytes().to_vec()), + Variants::Multiple { tag, tag_encoding, .. } => { + let Some(target_data_layout) = self.db.target_data_layout(self.crate_id) else { + not_supported!("missing target data layout"); + }; + let size = tag.size(&*target_data_layout).bytes_usize(); + let offset = layout.fields.offset(0).bytes_usize(); // The only field on enum variants is the tag field + match tag_encoding { + TagEncoding::Direct => { + let tag = &bytes[offset..offset + size]; + Owned(pad16(tag, false).to_vec()) + } + TagEncoding::Niche { untagged_variant, niche_start, .. } => { + let tag = &bytes[offset..offset + size]; + let candidate_discriminant = i128::from_le_bytes(pad16(tag, false)) + .wrapping_sub(niche_start as i128); + let enum_id = match ty.kind(Interner) { + TyKind::Adt(e, _) => match e.0 { + AdtId::EnumId(e) => e, + _ => not_supported!("Non enum with multi variant layout"), + }, + _ => not_supported!("Non adt with multi variant layout"), + }; + let enum_data = self.db.enum_data(enum_id); + let result = 'b: { + for (local_id, _) in enum_data.variants.iter() { + if candidate_discriminant + == self.db.const_eval_discriminant(EnumVariantId { + parent: enum_id, + local_id, + })? + { + break 'b candidate_discriminant; + } + } + self.db.const_eval_discriminant(EnumVariantId { + parent: enum_id, + local_id: untagged_variant.0, + })? + }; + Owned(result.to_le_bytes().to_vec()) + } + } + } + } + } + Rvalue::ShallowInitBox(_, _) => not_supported!("shallow init box"), + Rvalue::CopyForDeref(_) => not_supported!("copy for deref"), + Rvalue::Aggregate(kind, values) => match kind { + AggregateKind::Array(_) => { + let mut r = vec![]; + for x in values { + let value = self.eval_operand(x, locals)?.get(&self)?; + r.extend(value); + } + Owned(r) + } + AggregateKind::Tuple(ty) => { + let layout = self.layout(&ty)?; + Owned(self.make_by_layout( + layout.size.bytes_usize(), + &layout, + None, + values, + locals, + )?) + } + AggregateKind::Union(x, f) => { + let layout = self.layout_adt((*x).into(), Substitution::empty(Interner))?; + let offset = layout + .fields + .offset(u32::from(f.local_id.into_raw()) as usize) + .bytes_usize(); + let op = self.eval_operand(&values[0], locals)?.get(&self)?; + let mut result = vec![0; layout.size.bytes_usize()]; + result[offset..offset + op.len()].copy_from_slice(op); + Owned(result) + } + AggregateKind::Adt(x, subst) => { + let (size, variant_layout, tag) = + self.layout_of_variant(*x, subst.clone(), locals)?; + Owned(self.make_by_layout(size, &variant_layout, tag, values, locals)?) + } + }, + Rvalue::Cast(kind, operand, target_ty) => match kind { + CastKind::PointerExposeAddress => not_supported!("exposing pointer address"), + CastKind::PointerFromExposedAddress => { + not_supported!("creating pointer from exposed address") + } + CastKind::Pointer(cast) => match cast { + PointerCast::Unsize => { + let current_ty = self.operand_ty(operand, locals)?; + match &target_ty.data(Interner).kind { + TyKind::Raw(_, ty) | TyKind::Ref(_, _, ty) => { + match &ty.data(Interner).kind { + TyKind::Slice(_) => match ¤t_ty.data(Interner).kind { + TyKind::Raw(_, ty) | TyKind::Ref(_, _, ty) => { + match &ty.data(Interner).kind { + TyKind::Array(_, size) => { + let addr = self + .eval_operand(operand, locals)? + .get(&self)?; + let len = const_as_usize(size); + let mut r = Vec::with_capacity(16); + r.extend(addr.iter().copied()); + r.extend(len.to_le_bytes().into_iter()); + Owned(r) + } + _ => { + not_supported!("slice unsizing from non arrays") + } + } + } + _ => not_supported!("slice unsizing from non pointers"), + }, + TyKind::Dyn(_) => not_supported!("dyn pointer unsize cast"), + _ => not_supported!("unknown unsized cast"), + } + } + _ => not_supported!("unsized cast on unknown pointer type"), + } + } + x => not_supported!("pointer cast {x:?}"), + }, + CastKind::DynStar => not_supported!("dyn star cast"), + CastKind::IntToInt => { + // FIXME: handle signed cast + let current = pad16(self.eval_operand(operand, locals)?.get(&self)?, false); + let dest_size = + self.size_of_sized(target_ty, locals, "destination of int to int cast")?; + Owned(current[0..dest_size].to_vec()) + } + CastKind::FloatToInt => not_supported!("float to int cast"), + CastKind::FloatToFloat => not_supported!("float to float cast"), + CastKind::IntToFloat => not_supported!("float to int cast"), + CastKind::PtrToPtr => not_supported!("ptr to ptr cast"), + CastKind::FnPtrToPtr => not_supported!("fn ptr to ptr cast"), + }, + }) + } + + fn layout_of_variant( + &mut self, + x: VariantId, + subst: Substitution, + locals: &Locals<'_>, + ) -> Result<(usize, Layout, Option<(usize, usize, i128)>)> { + let adt = x.adt_id(); + if let DefWithBodyId::VariantId(f) = locals.body.owner { + if let VariantId::EnumVariantId(x) = x { + if AdtId::from(f.parent) == adt { + // Computing the exact size of enums require resolving the enum discriminants. In order to prevent loops (and + // infinite sized type errors) we use a dummy layout + let i = self.db.const_eval_discriminant(x)?; + return Ok((16, self.layout(&TyBuilder::unit())?, Some((0, 16, i)))); + } + } + } + let layout = self.layout_adt(adt, subst)?; + Ok(match layout.variants { + Variants::Single { .. } => (layout.size.bytes_usize(), layout, None), + Variants::Multiple { variants, tag, tag_encoding, .. } => { + let cx = self + .db + .target_data_layout(self.crate_id) + .ok_or(MirEvalError::TargetDataLayoutNotAvailable)?; + let enum_variant_id = match x { + VariantId::EnumVariantId(x) => x, + _ => not_supported!("multi variant layout for non-enums"), + }; + let rustc_enum_variant_idx = RustcEnumVariantIdx(enum_variant_id.local_id); + let mut discriminant = self.db.const_eval_discriminant(enum_variant_id)?; + let variant_layout = variants[rustc_enum_variant_idx].clone(); + let have_tag = match tag_encoding { + TagEncoding::Direct => true, + TagEncoding::Niche { untagged_variant, niche_variants: _, niche_start } => { + discriminant = discriminant.wrapping_add(niche_start as i128); + untagged_variant != rustc_enum_variant_idx + } + }; + ( + layout.size.bytes_usize(), + variant_layout, + if have_tag { + Some(( + layout.fields.offset(0).bytes_usize(), + tag.size(&*cx).bytes_usize(), + discriminant, + )) + } else { + None + }, + ) + } + }) + } + + fn make_by_layout( + &mut self, + size: usize, // Not neccessarily equal to variant_layout.size + variant_layout: &Layout, + tag: Option<(usize, usize, i128)>, + values: &Vec<Operand>, + locals: &Locals<'_>, + ) -> Result<Vec<u8>> { + let mut result = vec![0; size]; + if let Some((offset, size, value)) = tag { + result[offset..offset + size].copy_from_slice(&value.to_le_bytes()[0..size]); + } + for (i, op) in values.iter().enumerate() { + let offset = variant_layout.fields.offset(i).bytes_usize(); + let op = self.eval_operand(op, locals)?.get(&self)?; + result[offset..offset + op.len()].copy_from_slice(op); + } + Ok(result) + } + + fn eval_operand(&mut self, x: &Operand, locals: &Locals<'_>) -> Result<Interval> { + Ok(match x { + Operand::Copy(p) | Operand::Move(p) => self.eval_place(p, locals)?, + Operand::Constant(konst) => { + let data = &konst.data(Interner); + match &data.value { + chalk_ir::ConstValue::BoundVar(b) => { + let c = locals + .subst + .as_slice(Interner) + .get(b.index) + .ok_or(MirEvalError::TypeError("missing generic arg"))? + .assert_const_ref(Interner); + self.eval_operand(&Operand::Constant(c.clone()), locals)? + } + chalk_ir::ConstValue::InferenceVar(_) => { + not_supported!("inference var constant") + } + chalk_ir::ConstValue::Placeholder(_) => not_supported!("placeholder constant"), + chalk_ir::ConstValue::Concrete(c) => match &c.interned { + ConstScalar::Bytes(v, memory_map) => { + let mut v: Cow<'_, [u8]> = Cow::Borrowed(v); + let patch_map = memory_map.transform_addresses(|b| { + let addr = self.heap_allocate(b.len()); + self.write_memory(addr, b)?; + Ok(addr.to_usize()) + })?; + let size = self.size_of(&data.ty, locals)?.unwrap_or(v.len()); + if size != v.len() { + // Handle self enum + if size == 16 && v.len() < 16 { + v = Cow::Owned(pad16(&v, false).to_vec()); + } else if size < 16 && v.len() == 16 { + v = Cow::Owned(v[0..size].to_vec()); + } else { + return Err(MirEvalError::InvalidConst(konst.clone())); + } + } + let addr = self.heap_allocate(size); + self.write_memory(addr, &v)?; + self.patch_addresses(&patch_map, addr, &data.ty, locals)?; + Interval::new(addr, size) + } + ConstScalar::Unknown => not_supported!("evaluating unknown const"), + }, + } + } + }) + } + + fn eval_place(&mut self, p: &Place, locals: &Locals<'_>) -> Result<Interval> { + let addr = self.place_addr(p, locals)?; + Ok(Interval::new( + addr, + self.size_of_sized(&self.place_ty(p, locals)?, locals, "type of this place")?, + )) + } + + fn read_memory(&self, addr: Address, size: usize) -> Result<&[u8]> { + let (mem, pos) = match addr { + Stack(x) => (&self.stack, x), + Heap(x) => (&self.heap, x), + }; + mem.get(pos..pos + size).ok_or(MirEvalError::UndefinedBehavior("out of bound memory read")) + } + + fn write_memory(&mut self, addr: Address, r: &[u8]) -> Result<()> { + let (mem, pos) = match addr { + Stack(x) => (&mut self.stack, x), + Heap(x) => (&mut self.heap, x), + }; + mem.get_mut(pos..pos + r.len()) + .ok_or(MirEvalError::UndefinedBehavior("out of bound memory write"))? + .copy_from_slice(r); + Ok(()) + } + + fn size_of(&self, ty: &Ty, locals: &Locals<'_>) -> Result<Option<usize>> { + if let DefWithBodyId::VariantId(f) = locals.body.owner { + if let Some((adt, _)) = ty.as_adt() { + if AdtId::from(f.parent) == adt { + // Computing the exact size of enums require resolving the enum discriminants. In order to prevent loops (and + // infinite sized type errors) we use a dummy size + return Ok(Some(16)); + } + } + } + let ty = &self.ty_filler(ty, locals.subst, locals.body.owner)?; + let layout = self.layout(ty); + if self.assert_placeholder_ty_is_unused { + if matches!(layout, Err(MirEvalError::LayoutError(LayoutError::HasPlaceholder, _))) { + return Ok(Some(0)); + } + } + let layout = layout?; + Ok(layout.is_sized().then(|| layout.size.bytes_usize())) + } + + /// A version of `self.size_of` which returns error if the type is unsized. `what` argument should + /// be something that complete this: `error: type {ty} was unsized. {what} should be sized` + fn size_of_sized(&self, ty: &Ty, locals: &Locals<'_>, what: &'static str) -> Result<usize> { + match self.size_of(ty, locals)? { + Some(x) => Ok(x), + None => Err(MirEvalError::TypeIsUnsized(ty.clone(), what)), + } + } + + /// Uses `ty_filler` to fill an entire subst + fn subst_filler(&self, subst: &Substitution, locals: &Locals<'_>) -> Substitution { + Substitution::from_iter( + Interner, + subst.iter(Interner).map(|x| match x.data(Interner) { + chalk_ir::GenericArgData::Ty(ty) => { + let Ok(ty) = self.ty_filler(ty, locals.subst, locals.body.owner) else { + return x.clone(); + }; + chalk_ir::GenericArgData::Ty(ty).intern(Interner) + } + _ => x.clone(), + }), + ) + } + + /// This function substitutes placeholders of the body with the provided subst, effectively plays + /// the rule of monomorphization. In addition to placeholders, it substitutes opaque types (return + /// position impl traits) with their underlying type. + fn ty_filler(&self, ty: &Ty, subst: &Substitution, owner: DefWithBodyId) -> Result<Ty> { + struct Filler<'a> { + db: &'a dyn HirDatabase, + subst: &'a Substitution, + skip_params: usize, + } + impl FallibleTypeFolder<Interner> for Filler<'_> { + type Error = MirEvalError; + + fn as_dyn(&mut self) -> &mut dyn FallibleTypeFolder<Interner, Error = Self::Error> { + self + } + + fn interner(&self) -> Interner { + Interner + } + + fn try_fold_ty( + &mut self, + ty: Ty, + outer_binder: DebruijnIndex, + ) -> std::result::Result<Ty, Self::Error> { + match ty.kind(Interner) { + TyKind::OpaqueType(id, subst) => { + let impl_trait_id = self.db.lookup_intern_impl_trait_id((*id).into()); + match impl_trait_id { + crate::ImplTraitId::ReturnTypeImplTrait(func, idx) => { + let infer = self.db.infer(func.into()); + let filler = &mut Filler { db: self.db, subst, skip_params: 0 }; + filler.try_fold_ty(infer.type_of_rpit[idx].clone(), outer_binder) + } + crate::ImplTraitId::AsyncBlockTypeImplTrait(_, _) => { + not_supported!("async block impl trait"); + } + } + } + _ => ty.try_super_fold_with(self.as_dyn(), outer_binder), + } + } + + fn try_fold_free_placeholder_ty( + &mut self, + idx: chalk_ir::PlaceholderIndex, + _outer_binder: DebruijnIndex, + ) -> std::result::Result<Ty, Self::Error> { + let x = from_placeholder_idx(self.db, idx); + Ok(self + .subst + .as_slice(Interner) + .get((u32::from(x.local_id.into_raw()) as usize) + self.skip_params) + .and_then(|x| x.ty(Interner)) + .ok_or(MirEvalError::TypeError("Generic arg not provided"))? + .clone()) + } + } + let filler = &mut Filler { db: self.db, subst, skip_params: 0 }; + Ok(normalize(self.db, owner, ty.clone().try_fold_with(filler, DebruijnIndex::INNERMOST)?)) + } + + fn heap_allocate(&mut self, s: usize) -> Address { + let pos = self.heap.len(); + self.heap.extend(iter::repeat(0).take(s)); + Address::Heap(pos) + } + + pub fn interpret_mir_with_no_arg(&mut self, body: &MirBody) -> Result<Vec<u8>> { + self.interpret_mir(&body, vec![].into_iter(), Substitution::empty(Interner)) + } + + fn detect_lang_function(&self, def: FunctionId) -> Option<LangItem> { + lang_attr(self.db.upcast(), def) + } + + fn create_memory_map(&self, bytes: &[u8], ty: &Ty, locals: &Locals<'_>) -> Result<MemoryMap> { + // FIXME: support indirect references + let mut mm = MemoryMap::default(); + match ty.kind(Interner) { + TyKind::Ref(_, _, t) => { + let size = self.size_of(t, locals)?; + match size { + Some(size) => { + let addr_usize = from_bytes!(usize, bytes); + mm.insert( + addr_usize, + self.read_memory(Address::from_usize(addr_usize), size)?.to_vec(), + ) + } + None => { + let element_size = match t.kind(Interner) { + TyKind::Str => 1, + TyKind::Slice(t) => { + self.size_of_sized(t, locals, "slice inner type")? + } + _ => return Ok(mm), // FIXME: support other kind of unsized types + }; + let (addr, meta) = bytes.split_at(bytes.len() / 2); + let size = element_size * from_bytes!(usize, meta); + let addr = Address::from_bytes(addr)?; + mm.insert(addr.to_usize(), self.read_memory(addr, size)?.to_vec()); + } + } + } + _ => (), + } + Ok(mm) + } + + fn patch_addresses( + &mut self, + patch_map: &HashMap<usize, usize>, + addr: Address, + ty: &Ty, + locals: &Locals<'_>, + ) -> Result<()> { + // FIXME: support indirect references + let my_size = self.size_of_sized(ty, locals, "value to patch address")?; + match ty.kind(Interner) { + TyKind::Ref(_, _, t) => { + let size = self.size_of(t, locals)?; + match size { + Some(_) => { + let current = from_bytes!(usize, self.read_memory(addr, my_size)?); + if let Some(x) = patch_map.get(¤t) { + self.write_memory(addr, &x.to_le_bytes())?; + } + } + None => { + let current = from_bytes!(usize, self.read_memory(addr, my_size / 2)?); + if let Some(x) = patch_map.get(¤t) { + self.write_memory(addr, &x.to_le_bytes())?; + } + } + } + } + _ => (), + } + Ok(()) + } + + fn exec_intrinsic( + &self, + as_str: &str, + _arg_bytes: impl Iterator<Item = Vec<u8>>, + generic_args: Substitution, + locals: &Locals<'_>, + ) -> Result<Vec<u8>> { + match as_str { + "size_of" => { + let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|x| x.ty(Interner)) else { + return Err(MirEvalError::TypeError("size_of generic arg is not provided")); + }; + let size = self.size_of(ty, locals)?; + match size { + Some(x) => Ok(x.to_le_bytes().to_vec()), + None => return Err(MirEvalError::TypeError("size_of arg is unsized")), + } + } + _ => not_supported!("unknown intrinsic {as_str}"), + } + } + + pub(crate) fn exec_lang_item( + &self, + x: LangItem, + mut args: std::vec::IntoIter<Vec<u8>>, + ) -> Result<Vec<u8>> { + use LangItem::*; + match x { + PanicFmt | BeginPanic => Err(MirEvalError::Panic), + SliceLen => { + let arg = args + .next() + .ok_or(MirEvalError::TypeError("argument of <[T]>::len() is not provided"))?; + let ptr_size = arg.len() / 2; + Ok(arg[ptr_size..].into()) + } + x => not_supported!("Executing lang item {x:?}"), + } + } +} + +pub fn pad16(x: &[u8], is_signed: bool) -> [u8; 16] { + let is_negative = is_signed && x.last().unwrap_or(&0) > &128; + let fill_with = if is_negative { 255 } else { 0 }; + x.iter() + .copied() + .chain(iter::repeat(fill_with)) + .take(16) + .collect::<Vec<u8>>() + .try_into() + .expect("iterator take is not working") +} diff --git a/crates/hir-ty/src/mir/lower.rs b/crates/hir-ty/src/mir/lower.rs new file mode 100644 index 00000000000..1fa21e230c4 --- /dev/null +++ b/crates/hir-ty/src/mir/lower.rs @@ -0,0 +1,1209 @@ +//! This module generates a polymorphic MIR from a hir body + +use std::{iter, mem, sync::Arc}; + +use chalk_ir::{BoundVar, ConstData, DebruijnIndex, TyKind}; +use hir_def::{ + body::Body, + expr::{ + Array, BindingAnnotation, ExprId, LabelId, Literal, MatchArm, Pat, PatId, RecordLitField, + }, + layout::LayoutError, + resolver::{resolver_for_expr, ResolveValueResult, ValueNs}, + DefWithBodyId, EnumVariantId, HasModule, +}; +use la_arena::ArenaMap; + +use crate::{ + consteval::ConstEvalError, db::HirDatabase, layout::layout_of_ty, mapping::ToChalk, + utils::generics, Adjust, AutoBorrow, CallableDefId, TyBuilder, TyExt, +}; + +use super::*; + +#[derive(Debug, Clone, Copy)] +struct LoopBlocks { + begin: BasicBlockId, + end: BasicBlockId, +} + +struct MirLowerCtx<'a> { + result: MirBody, + owner: DefWithBodyId, + binding_locals: ArenaMap<PatId, LocalId>, + current_loop_blocks: Option<LoopBlocks>, + discr_temp: Option<Place>, + db: &'a dyn HirDatabase, + body: &'a Body, + infer: &'a InferenceResult, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum MirLowerError { + ConstEvalError(Box<ConstEvalError>), + LayoutError(LayoutError), + IncompleteExpr, + UnresolvedName, + MissingFunctionDefinition, + TypeError(&'static str), + NotSupported(String), + ContinueWithoutLoop, + BreakWithoutLoop, + Loop, +} + +macro_rules! not_supported { + ($x: expr) => { + return Err(MirLowerError::NotSupported(format!($x))) + }; +} + +impl From<ConstEvalError> for MirLowerError { + fn from(value: ConstEvalError) -> Self { + match value { + ConstEvalError::MirLowerError(e) => e, + _ => MirLowerError::ConstEvalError(Box::new(value)), + } + } +} + +impl From<LayoutError> for MirLowerError { + fn from(value: LayoutError) -> Self { + MirLowerError::LayoutError(value) + } +} + +type Result<T> = std::result::Result<T, MirLowerError>; + +impl MirLowerCtx<'_> { + fn temp(&mut self, ty: Ty) -> Result<LocalId> { + if matches!(ty.kind(Interner), TyKind::Slice(_) | TyKind::Dyn(_)) { + not_supported!("unsized temporaries"); + } + Ok(self.result.locals.alloc(Local { mutability: Mutability::Not, ty })) + } + + fn lower_expr_as_place(&self, expr_id: ExprId) -> Option<Place> { + let adjustments = self.infer.expr_adjustments.get(&expr_id); + let mut r = self.lower_expr_as_place_without_adjust(expr_id)?; + for adjustment in adjustments.iter().flat_map(|x| x.iter()) { + match adjustment.kind { + Adjust::NeverToAny => return Some(r), + Adjust::Deref(None) => { + r.projection.push(ProjectionElem::Deref); + } + Adjust::Deref(Some(_)) => return None, + Adjust::Borrow(_) => return None, + Adjust::Pointer(_) => return None, + } + } + Some(r) + } + + fn lower_expr_as_place_without_adjust(&self, expr_id: ExprId) -> Option<Place> { + match &self.body.exprs[expr_id] { + Expr::Path(p) => { + let resolver = resolver_for_expr(self.db.upcast(), self.owner, expr_id); + let pr = resolver.resolve_path_in_value_ns(self.db.upcast(), p.mod_path())?; + let pr = match pr { + ResolveValueResult::ValueNs(v) => v, + ResolveValueResult::Partial(..) => return None, + }; + match pr { + ValueNs::LocalBinding(pat_id) => Some(self.binding_locals[pat_id].into()), + _ => None, + } + } + Expr::UnaryOp { expr, op } => match op { + hir_def::expr::UnaryOp::Deref => { + let mut r = self.lower_expr_as_place(*expr)?; + r.projection.push(ProjectionElem::Deref); + Some(r) + } + _ => None, + }, + _ => None, + } + } + + fn lower_expr_to_some_operand( + &mut self, + expr_id: ExprId, + current: BasicBlockId, + ) -> Result<(Operand, BasicBlockId)> { + if !self.has_adjustments(expr_id) { + match &self.body.exprs[expr_id] { + Expr::Literal(l) => { + let ty = self.expr_ty(expr_id); + return Ok((self.lower_literal_to_operand(ty, l)?, current)); + } + _ => (), + } + } + let (p, current) = self.lower_expr_to_some_place(expr_id, current)?; + Ok((Operand::Copy(p), current)) + } + + fn lower_expr_to_some_place( + &mut self, + expr_id: ExprId, + prev_block: BasicBlockId, + ) -> Result<(Place, BasicBlockId)> { + if let Some(p) = self.lower_expr_as_place(expr_id) { + return Ok((p, prev_block)); + } + let mut ty = self.expr_ty(expr_id); + if let Some(x) = self.infer.expr_adjustments.get(&expr_id) { + if let Some(x) = x.last() { + ty = x.target.clone(); + } + } + let place = self.temp(ty)?; + Ok((place.into(), self.lower_expr_to_place(expr_id, place.into(), prev_block)?)) + } + + fn lower_expr_to_place( + &mut self, + expr_id: ExprId, + place: Place, + prev_block: BasicBlockId, + ) -> Result<BasicBlockId> { + if let Some(x) = self.infer.expr_adjustments.get(&expr_id) { + if x.len() > 0 { + let tmp = self.temp(self.expr_ty(expr_id))?; + let current = + self.lower_expr_to_place_without_adjust(expr_id, tmp.into(), prev_block)?; + let mut r = Place::from(tmp); + for adjustment in x { + match &adjustment.kind { + Adjust::NeverToAny => (), + Adjust::Deref(None) => { + r.projection.push(ProjectionElem::Deref); + } + Adjust::Deref(Some(_)) => not_supported!("overloaded dereference"), + Adjust::Borrow(AutoBorrow::Ref(m) | AutoBorrow::RawPtr(m)) => { + let tmp = self.temp(adjustment.target.clone())?; + self.push_assignment( + current, + tmp.into(), + Rvalue::Ref(BorrowKind::from_chalk(*m), r), + ); + r = tmp.into(); + } + Adjust::Pointer(cast) => { + let target = &adjustment.target; + let tmp = self.temp(target.clone())?; + self.push_assignment( + current, + tmp.into(), + Rvalue::Cast( + CastKind::Pointer(cast.clone()), + Operand::Copy(r).into(), + target.clone(), + ), + ); + r = tmp.into(); + } + } + } + self.push_assignment(current, place, Operand::Copy(r).into()); + return Ok(current); + } + } + self.lower_expr_to_place_without_adjust(expr_id, place, prev_block) + } + + fn lower_expr_to_place_without_adjust( + &mut self, + expr_id: ExprId, + place: Place, + mut current: BasicBlockId, + ) -> Result<BasicBlockId> { + match &self.body.exprs[expr_id] { + Expr::Missing => Err(MirLowerError::IncompleteExpr), + Expr::Path(p) => { + let resolver = resolver_for_expr(self.db.upcast(), self.owner, expr_id); + let pr = resolver + .resolve_path_in_value_ns(self.db.upcast(), p.mod_path()) + .ok_or(MirLowerError::UnresolvedName)?; + let pr = match pr { + ResolveValueResult::ValueNs(v) => v, + ResolveValueResult::Partial(..) => { + return match self + .infer + .assoc_resolutions_for_expr(expr_id) + .ok_or(MirLowerError::UnresolvedName)? + .0 + //.ok_or(ConstEvalError::SemanticError("unresolved assoc item"))? + { + hir_def::AssocItemId::ConstId(c) => self.lower_const(c, current, place), + _ => return Err(MirLowerError::UnresolvedName), + }; + } + }; + match pr { + ValueNs::LocalBinding(pat_id) => { + self.push_assignment( + current, + place, + Operand::Copy(self.binding_locals[pat_id].into()).into(), + ); + Ok(current) + } + ValueNs::ConstId(const_id) => self.lower_const(const_id, current, place), + ValueNs::EnumVariantId(variant_id) => { + let ty = self.infer.type_of_expr[expr_id].clone(); + self.lower_enum_variant(variant_id, current, place, ty, vec![]) + } + ValueNs::GenericParam(p) => { + let Some(def) = self.owner.as_generic_def_id() else { + not_supported!("owner without generic def id"); + }; + let gen = generics(self.db.upcast(), def); + let ty = self.expr_ty(expr_id); + self.push_assignment( + current, + place, + Operand::Constant( + ConstData { + ty, + value: chalk_ir::ConstValue::BoundVar(BoundVar::new( + DebruijnIndex::INNERMOST, + gen.param_idx(p.into()).ok_or(MirLowerError::TypeError( + "fail to lower const generic param", + ))?, + )), + } + .intern(Interner), + ) + .into(), + ); + Ok(current) + } + ValueNs::StructId(_) => { + // It's probably a unit struct or a zero sized function, so no action is needed. + Ok(current) + } + x => { + not_supported!("unknown name {x:?} in value name space"); + } + } + } + Expr::If { condition, then_branch, else_branch } => { + let (discr, current) = self.lower_expr_to_some_operand(*condition, current)?; + let start_of_then = self.new_basic_block(); + let end = self.new_basic_block(); + let end_of_then = + self.lower_expr_to_place(*then_branch, place.clone(), start_of_then)?; + self.set_goto(end_of_then, end); + let mut start_of_else = end; + if let Some(else_branch) = else_branch { + start_of_else = self.new_basic_block(); + let end_of_else = + self.lower_expr_to_place(*else_branch, place, start_of_else)?; + self.set_goto(end_of_else, end); + } + self.set_terminator( + current, + Terminator::SwitchInt { + discr, + targets: SwitchTargets::static_if(1, start_of_then, start_of_else), + }, + ); + Ok(end) + } + Expr::Let { pat, expr } => { + let (cond_place, current) = self.lower_expr_to_some_place(*expr, current)?; + let result = self.new_basic_block(); + let (then_target, else_target) = self.pattern_match( + current, + None, + cond_place, + self.expr_ty(*expr), + *pat, + BindingAnnotation::Unannotated, + )?; + self.write_bytes_to_place(then_target, place.clone(), vec![1], TyBuilder::bool())?; + self.set_goto(then_target, result); + if let Some(else_target) = else_target { + self.write_bytes_to_place(else_target, place, vec![0], TyBuilder::bool())?; + self.set_goto(else_target, result); + } + Ok(result) + } + Expr::Block { id: _, statements, tail, label } => { + if label.is_some() { + not_supported!("block with label"); + } + for statement in statements.iter() { + match statement { + hir_def::expr::Statement::Let { + pat, + initializer, + else_branch, + type_ref: _, + } => match initializer { + Some(expr_id) => { + let else_block; + let init_place; + (init_place, current) = + self.lower_expr_to_some_place(*expr_id, current)?; + (current, else_block) = self.pattern_match( + current, + None, + init_place, + self.expr_ty(*expr_id), + *pat, + BindingAnnotation::Unannotated, + )?; + match (else_block, else_branch) { + (None, _) => (), + (Some(else_block), None) => { + self.set_terminator(else_block, Terminator::Unreachable); + } + (Some(else_block), Some(else_branch)) => { + let (_, b) = self + .lower_expr_to_some_place(*else_branch, else_block)?; + self.set_terminator(b, Terminator::Unreachable); + } + } + } + None => continue, + }, + hir_def::expr::Statement::Expr { expr, has_semi: _ } => { + let ty = self.expr_ty(*expr); + let temp = self.temp(ty)?; + current = self.lower_expr_to_place(*expr, temp.into(), current)?; + } + } + } + match tail { + Some(tail) => self.lower_expr_to_place(*tail, place, current), + None => Ok(current), + } + } + Expr::Loop { body, label } => self.lower_loop(current, *label, |this, begin, _| { + let (_, block) = this.lower_expr_to_some_place(*body, begin)?; + this.set_goto(block, begin); + Ok(()) + }), + Expr::While { condition, body, label } => { + self.lower_loop(current, *label, |this, begin, end| { + let (discr, to_switch) = this.lower_expr_to_some_operand(*condition, begin)?; + let after_cond = this.new_basic_block(); + this.set_terminator( + to_switch, + Terminator::SwitchInt { + discr, + targets: SwitchTargets::static_if(1, after_cond, end), + }, + ); + let (_, block) = this.lower_expr_to_some_place(*body, after_cond)?; + this.set_goto(block, begin); + Ok(()) + }) + } + Expr::For { .. } => not_supported!("for loop"), + Expr::Call { callee, args, .. } => { + let callee_ty = self.expr_ty(*callee); + match &callee_ty.data(Interner).kind { + chalk_ir::TyKind::FnDef(..) => { + let func = Operand::from_bytes(vec![], callee_ty.clone()); + self.lower_call(func, args.iter().copied(), place, current) + } + TyKind::Scalar(_) + | TyKind::Tuple(_, _) + | TyKind::Array(_, _) + | TyKind::Adt(_, _) + | TyKind::Str + | TyKind::Foreign(_) + | TyKind::Slice(_) => { + return Err(MirLowerError::TypeError("function call on data type")) + } + TyKind::Error => return Err(MirLowerError::MissingFunctionDefinition), + TyKind::AssociatedType(_, _) + | TyKind::Raw(_, _) + | TyKind::Ref(_, _, _) + | TyKind::OpaqueType(_, _) + | TyKind::Never + | TyKind::Closure(_, _) + | TyKind::Generator(_, _) + | TyKind::GeneratorWitness(_, _) + | TyKind::Placeholder(_) + | TyKind::Dyn(_) + | TyKind::Alias(_) + | TyKind::Function(_) + | TyKind::BoundVar(_) + | TyKind::InferenceVar(_, _) => not_supported!("dynamic function call"), + } + } + Expr::MethodCall { receiver, args, .. } => { + let (func_id, generic_args) = + self.infer.method_resolution(expr_id).ok_or(MirLowerError::UnresolvedName)?; + let ty = chalk_ir::TyKind::FnDef( + CallableDefId::FunctionId(func_id).to_chalk(self.db), + generic_args, + ) + .intern(Interner); + let func = Operand::from_bytes(vec![], ty); + self.lower_call( + func, + iter::once(*receiver).chain(args.iter().copied()), + place, + current, + ) + } + Expr::Match { expr, arms } => { + let (cond_place, mut current) = self.lower_expr_to_some_place(*expr, current)?; + let cond_ty = self.expr_ty(*expr); + let end = self.new_basic_block(); + for MatchArm { pat, guard, expr } in arms.iter() { + if guard.is_some() { + not_supported!("pattern matching with guard"); + } + let (then, otherwise) = self.pattern_match( + current, + None, + cond_place.clone(), + cond_ty.clone(), + *pat, + BindingAnnotation::Unannotated, + )?; + let block = self.lower_expr_to_place(*expr, place.clone(), then)?; + self.set_goto(block, end); + match otherwise { + Some(o) => current = o, + None => { + // The current pattern was irrefutable, so there is no need to generate code + // for the rest of patterns + break; + } + } + } + if self.is_unterminated(current) { + self.set_terminator(current, Terminator::Unreachable); + } + Ok(end) + } + Expr::Continue { label } => match label { + Some(_) => not_supported!("continue with label"), + None => { + let loop_data = + self.current_loop_blocks.ok_or(MirLowerError::ContinueWithoutLoop)?; + self.set_goto(current, loop_data.begin); + let otherwise = self.new_basic_block(); + Ok(otherwise) + } + }, + Expr::Break { expr, label } => { + if expr.is_some() { + not_supported!("break with value"); + } + match label { + Some(_) => not_supported!("break with label"), + None => { + let loop_data = + self.current_loop_blocks.ok_or(MirLowerError::BreakWithoutLoop)?; + self.set_goto(current, loop_data.end); + Ok(self.new_basic_block()) + } + } + } + Expr::Return { expr } => { + if let Some(expr) = expr { + current = self.lower_expr_to_place(*expr, return_slot().into(), current)?; + } + self.set_terminator(current, Terminator::Return); + Ok(self.new_basic_block()) + } + Expr::Yield { .. } => not_supported!("yield"), + Expr::RecordLit { fields, .. } => { + let variant_id = self + .infer + .variant_resolution_for_expr(expr_id) + .ok_or(MirLowerError::UnresolvedName)?; + let subst = match self.expr_ty(expr_id).kind(Interner) { + TyKind::Adt(_, s) => s.clone(), + _ => not_supported!("Non ADT record literal"), + }; + let variant_data = variant_id.variant_data(self.db.upcast()); + match variant_id { + VariantId::EnumVariantId(_) | VariantId::StructId(_) => { + let mut operands = vec![None; variant_data.fields().len()]; + for RecordLitField { name, expr } in fields.iter() { + let field_id = + variant_data.field(name).ok_or(MirLowerError::UnresolvedName)?; + let op; + (op, current) = self.lower_expr_to_some_operand(*expr, current)?; + operands[u32::from(field_id.into_raw()) as usize] = Some(op); + } + self.push_assignment( + current, + place, + Rvalue::Aggregate( + AggregateKind::Adt(variant_id, subst), + operands.into_iter().map(|x| x).collect::<Option<_>>().ok_or( + MirLowerError::TypeError("missing field in record literal"), + )?, + ), + ); + Ok(current) + } + VariantId::UnionId(union_id) => { + let [RecordLitField { name, expr }] = fields.as_ref() else { + not_supported!("Union record literal with more than one field"); + }; + let local_id = + variant_data.field(name).ok_or(MirLowerError::UnresolvedName)?; + let mut place = place; + place + .projection + .push(PlaceElem::Field(FieldId { parent: union_id.into(), local_id })); + self.lower_expr_to_place(*expr, place, current) + } + } + } + Expr::Field { expr, name } => { + let (mut current_place, current) = self.lower_expr_to_some_place(*expr, current)?; + if let TyKind::Tuple(..) = self.expr_ty(*expr).kind(Interner) { + let index = name + .as_tuple_index() + .ok_or(MirLowerError::TypeError("named field on tuple"))?; + current_place.projection.push(ProjectionElem::TupleField(index)) + } else { + let field = self + .infer + .field_resolution(expr_id) + .ok_or(MirLowerError::UnresolvedName)?; + current_place.projection.push(ProjectionElem::Field(field)); + } + self.push_assignment(current, place, Operand::Copy(current_place).into()); + Ok(current) + } + Expr::Await { .. } => not_supported!("await"), + Expr::Try { .. } => not_supported!("? operator"), + Expr::Yeet { .. } => not_supported!("yeet"), + Expr::TryBlock { .. } => not_supported!("try block"), + Expr::Async { .. } => not_supported!("async block"), + Expr::Const { .. } => not_supported!("anonymous const block"), + Expr::Cast { expr, type_ref: _ } => { + let (x, current) = self.lower_expr_to_some_operand(*expr, current)?; + let source_ty = self.infer[*expr].clone(); + let target_ty = self.infer[expr_id].clone(); + self.push_assignment( + current, + place, + Rvalue::Cast(cast_kind(&source_ty, &target_ty)?, x, target_ty), + ); + Ok(current) + } + Expr::Ref { expr, rawness: _, mutability } => { + let p; + (p, current) = self.lower_expr_to_some_place(*expr, current)?; + let bk = BorrowKind::from_hir(*mutability); + self.push_assignment(current, place, Rvalue::Ref(bk, p)); + Ok(current) + } + Expr::Box { .. } => not_supported!("box expression"), + Expr::UnaryOp { expr, op } => match op { + hir_def::expr::UnaryOp::Deref => { + let (mut tmp, current) = self.lower_expr_to_some_place(*expr, current)?; + tmp.projection.push(ProjectionElem::Deref); + self.push_assignment(current, place, Operand::Copy(tmp).into()); + Ok(current) + } + hir_def::expr::UnaryOp::Not => { + let (op, current) = self.lower_expr_to_some_operand(*expr, current)?; + self.push_assignment(current, place, Rvalue::UnaryOp(UnOp::Not, op)); + Ok(current) + } + hir_def::expr::UnaryOp::Neg => { + let (op, current) = self.lower_expr_to_some_operand(*expr, current)?; + self.push_assignment(current, place, Rvalue::UnaryOp(UnOp::Neg, op)); + Ok(current) + } + }, + Expr::BinaryOp { lhs, rhs, op } => { + let op = op.ok_or(MirLowerError::IncompleteExpr)?; + if let hir_def::expr::BinaryOp::Assignment { op } = op { + if op.is_some() { + not_supported!("assignment with arith op (like +=)"); + } + let Some(lhs_place) = self.lower_expr_as_place(*lhs) else { + not_supported!("assignment to complex place"); + }; + let rhs_op; + (rhs_op, current) = self.lower_expr_to_some_operand(*rhs, current)?; + self.push_assignment(current, lhs_place, rhs_op.into()); + return Ok(current); + } + let lhs_op; + (lhs_op, current) = self.lower_expr_to_some_operand(*lhs, current)?; + let rhs_op; + (rhs_op, current) = self.lower_expr_to_some_operand(*rhs, current)?; + self.push_assignment( + current, + place, + Rvalue::CheckedBinaryOp( + match op { + hir_def::expr::BinaryOp::LogicOp(op) => match op { + hir_def::expr::LogicOp::And => BinOp::BitAnd, // FIXME: make these short circuit + hir_def::expr::LogicOp::Or => BinOp::BitOr, + }, + hir_def::expr::BinaryOp::ArithOp(op) => BinOp::from(op), + hir_def::expr::BinaryOp::CmpOp(op) => BinOp::from(op), + hir_def::expr::BinaryOp::Assignment { .. } => unreachable!(), // handled above + }, + lhs_op, + rhs_op, + ), + ); + Ok(current) + } + Expr::Range { .. } => not_supported!("range"), + Expr::Index { base, index } => { + let mut p_base; + (p_base, current) = self.lower_expr_to_some_place(*base, current)?; + let l_index = self.temp(self.expr_ty(*index))?; + current = self.lower_expr_to_place(*index, l_index.into(), current)?; + p_base.projection.push(ProjectionElem::Index(l_index)); + self.push_assignment(current, place, Operand::Copy(p_base).into()); + Ok(current) + } + Expr::Closure { .. } => not_supported!("closure"), + Expr::Tuple { exprs, is_assignee_expr: _ } => { + let r = Rvalue::Aggregate( + AggregateKind::Tuple(self.expr_ty(expr_id)), + exprs + .iter() + .map(|x| { + let o; + (o, current) = self.lower_expr_to_some_operand(*x, current)?; + Ok(o) + }) + .collect::<Result<_>>()?, + ); + self.push_assignment(current, place, r); + Ok(current) + } + Expr::Unsafe { body } => self.lower_expr_to_place(*body, place, current), + Expr::Array(l) => match l { + Array::ElementList { elements, .. } => { + let elem_ty = match &self.expr_ty(expr_id).data(Interner).kind { + TyKind::Array(ty, _) => ty.clone(), + _ => { + return Err(MirLowerError::TypeError( + "Array expression with non array type", + )) + } + }; + let r = Rvalue::Aggregate( + AggregateKind::Array(elem_ty), + elements + .iter() + .map(|x| { + let o; + (o, current) = self.lower_expr_to_some_operand(*x, current)?; + Ok(o) + }) + .collect::<Result<_>>()?, + ); + self.push_assignment(current, place, r); + Ok(current) + } + Array::Repeat { .. } => not_supported!("array repeat"), + }, + Expr::Literal(l) => { + let ty = self.expr_ty(expr_id); + let op = self.lower_literal_to_operand(ty, l)?; + self.push_assignment(current, place, op.into()); + Ok(current) + } + Expr::Underscore => not_supported!("underscore"), + } + } + + fn lower_literal_to_operand(&mut self, ty: Ty, l: &Literal) -> Result<Operand> { + let size = layout_of_ty(self.db, &ty, self.owner.module(self.db.upcast()).krate())? + .size + .bytes_usize(); + let bytes = match l { + hir_def::expr::Literal::String(b) => { + let b = b.as_bytes(); + let mut data = vec![]; + data.extend(0usize.to_le_bytes()); + data.extend(b.len().to_le_bytes()); + let mut mm = MemoryMap::default(); + mm.insert(0, b.to_vec()); + return Ok(Operand::from_concrete_const(data, mm, ty)); + } + hir_def::expr::Literal::ByteString(b) => { + let mut data = vec![]; + data.extend(0usize.to_le_bytes()); + data.extend(b.len().to_le_bytes()); + let mut mm = MemoryMap::default(); + mm.insert(0, b.to_vec()); + return Ok(Operand::from_concrete_const(data, mm, ty)); + } + hir_def::expr::Literal::Char(c) => u32::from(*c).to_le_bytes().into(), + hir_def::expr::Literal::Bool(b) => vec![*b as u8], + hir_def::expr::Literal::Int(x, _) => x.to_le_bytes()[0..size].into(), + hir_def::expr::Literal::Uint(x, _) => x.to_le_bytes()[0..size].into(), + hir_def::expr::Literal::Float(f, _) => match size { + 8 => f.into_f64().to_le_bytes().into(), + 4 => f.into_f32().to_le_bytes().into(), + _ => { + return Err(MirLowerError::TypeError("float with size other than 4 or 8 bytes")) + } + }, + }; + Ok(Operand::from_concrete_const(bytes, MemoryMap::default(), ty)) + } + + fn new_basic_block(&mut self) -> BasicBlockId { + self.result.basic_blocks.alloc(BasicBlock::default()) + } + + fn lower_const( + &mut self, + const_id: hir_def::ConstId, + prev_block: BasicBlockId, + place: Place, + ) -> Result<BasicBlockId> { + let c = self.db.const_eval(const_id)?; + self.write_const_to_place(c, prev_block, place) + } + + fn write_const_to_place( + &mut self, + c: Const, + prev_block: BasicBlockId, + place: Place, + ) -> Result<BasicBlockId> { + self.push_assignment(prev_block, place, Operand::Constant(c).into()); + Ok(prev_block) + } + + fn write_bytes_to_place( + &mut self, + prev_block: BasicBlockId, + place: Place, + cv: Vec<u8>, + ty: Ty, + ) -> Result<BasicBlockId> { + self.push_assignment(prev_block, place, Operand::from_bytes(cv, ty).into()); + Ok(prev_block) + } + + fn lower_enum_variant( + &mut self, + variant_id: EnumVariantId, + prev_block: BasicBlockId, + place: Place, + ty: Ty, + fields: Vec<Operand>, + ) -> Result<BasicBlockId> { + let subst = match ty.kind(Interner) { + TyKind::Adt(_, subst) => subst.clone(), + _ => not_supported!("Non ADT enum"), + }; + self.push_assignment( + prev_block, + place, + Rvalue::Aggregate(AggregateKind::Adt(variant_id.into(), subst), fields), + ); + Ok(prev_block) + } + + fn lower_call( + &mut self, + func: Operand, + args: impl Iterator<Item = ExprId>, + place: Place, + mut current: BasicBlockId, + ) -> Result<BasicBlockId> { + let args = args + .map(|arg| { + let temp; + (temp, current) = self.lower_expr_to_some_operand(arg, current)?; + Ok(temp) + }) + .collect::<Result<Vec<_>>>()?; + let b = self.result.basic_blocks.alloc(BasicBlock { + statements: vec![], + terminator: None, + is_cleanup: false, + }); + self.set_terminator( + current, + Terminator::Call { + func, + args, + destination: place, + target: Some(b), + cleanup: None, + from_hir_call: true, + }, + ); + Ok(b) + } + + fn is_unterminated(&mut self, source: BasicBlockId) -> bool { + self.result.basic_blocks[source].terminator.is_none() + } + + fn set_terminator(&mut self, source: BasicBlockId, terminator: Terminator) { + self.result.basic_blocks[source].terminator = Some(terminator); + } + + fn set_goto(&mut self, source: BasicBlockId, target: BasicBlockId) { + self.set_terminator(source, Terminator::Goto { target }); + } + + fn expr_ty(&self, e: ExprId) -> Ty { + self.infer[e].clone() + } + + fn push_assignment(&mut self, block: BasicBlockId, place: Place, rvalue: Rvalue) { + self.result.basic_blocks[block].statements.push(Statement::Assign(place, rvalue)); + } + + /// It gets a `current` unterminated block, appends some statements and possibly a terminator to it to check if + /// the pattern matches and write bindings, and returns two unterminated blocks, one for the matched path (which + /// can be the `current` block) and one for the mismatched path. If the input pattern is irrefutable, the + /// mismatched path block is `None`. + /// + /// By default, it will create a new block for mismatched path. If you already have one, you can provide it with + /// `current_else` argument to save an unneccessary jump. If `current_else` isn't `None`, the result mismatched path + /// wouldn't be `None` as well. Note that this function will add jumps to the beginning of the `current_else` block, + /// so it should be an empty block. + fn pattern_match( + &mut self, + mut current: BasicBlockId, + mut current_else: Option<BasicBlockId>, + mut cond_place: Place, + mut cond_ty: Ty, + pattern: PatId, + mut binding_mode: BindingAnnotation, + ) -> Result<(BasicBlockId, Option<BasicBlockId>)> { + Ok(match &self.body.pats[pattern] { + Pat::Missing => return Err(MirLowerError::IncompleteExpr), + Pat::Wild => (current, current_else), + Pat::Tuple { args, ellipsis } => { + pattern_matching_dereference(&mut cond_ty, &mut binding_mode, &mut cond_place); + let subst = match cond_ty.kind(Interner) { + TyKind::Tuple(_, s) => s, + _ => { + return Err(MirLowerError::TypeError( + "non tuple type matched with tuple pattern", + )) + } + }; + self.pattern_match_tuple_like( + current, + current_else, + args.iter().enumerate().map(|(i, x)| { + ( + PlaceElem::TupleField(i), + *x, + subst.at(Interner, i).assert_ty_ref(Interner).clone(), + ) + }), + *ellipsis, + &cond_place, + binding_mode, + )? + } + Pat::Or(_) => not_supported!("or pattern"), + Pat::Record { .. } => not_supported!("record pattern"), + Pat::Range { .. } => not_supported!("range pattern"), + Pat::Slice { .. } => not_supported!("slice pattern"), + Pat::Path(_) => not_supported!("path pattern"), + Pat::Lit(l) => { + let then_target = self.new_basic_block(); + let else_target = current_else.unwrap_or_else(|| self.new_basic_block()); + match &self.body.exprs[*l] { + Expr::Literal(l) => match l { + hir_def::expr::Literal::Int(x, _) => { + self.set_terminator( + current, + Terminator::SwitchInt { + discr: Operand::Copy(cond_place), + targets: SwitchTargets::static_if( + *x as u128, + then_target, + else_target, + ), + }, + ); + } + hir_def::expr::Literal::Uint(x, _) => { + self.set_terminator( + current, + Terminator::SwitchInt { + discr: Operand::Copy(cond_place), + targets: SwitchTargets::static_if(*x, then_target, else_target), + }, + ); + } + _ => not_supported!("non int path literal"), + }, + _ => not_supported!("expression path literal"), + } + (then_target, Some(else_target)) + } + Pat::Bind { mode, name: _, subpat } => { + let target_place = self.binding_locals[pattern]; + if let Some(subpat) = subpat { + (current, current_else) = self.pattern_match( + current, + current_else, + cond_place.clone(), + cond_ty, + *subpat, + binding_mode, + )? + } + if matches!(mode, BindingAnnotation::Ref | BindingAnnotation::RefMut) { + binding_mode = *mode; + } + self.push_assignment( + current, + target_place.into(), + match binding_mode { + BindingAnnotation::Unannotated | BindingAnnotation::Mutable => { + Operand::Copy(cond_place).into() + } + BindingAnnotation::Ref => Rvalue::Ref(BorrowKind::Shared, cond_place), + BindingAnnotation::RefMut => Rvalue::Ref( + BorrowKind::Mut { allow_two_phase_borrow: false }, + cond_place, + ), + }, + ); + (current, current_else) + } + Pat::TupleStruct { path: _, args, ellipsis } => { + let Some(variant) = self.infer.variant_resolution_for_pat(pattern) else { + not_supported!("unresolved variant"); + }; + pattern_matching_dereference(&mut cond_ty, &mut binding_mode, &mut cond_place); + let subst = match cond_ty.kind(Interner) { + TyKind::Adt(_, s) => s, + _ => { + return Err(MirLowerError::TypeError( + "non adt type matched with tuple struct", + )) + } + }; + let fields_type = self.db.field_types(variant); + match variant { + VariantId::EnumVariantId(v) => { + let e = self.db.const_eval_discriminant(v)? as u128; + let next = self.new_basic_block(); + let tmp = self.discr_temp_place(); + self.push_assignment( + current, + tmp.clone(), + Rvalue::Discriminant(cond_place.clone()), + ); + let else_target = current_else.unwrap_or_else(|| self.new_basic_block()); + self.set_terminator( + current, + Terminator::SwitchInt { + discr: Operand::Copy(tmp), + targets: SwitchTargets::static_if(e, next, else_target), + }, + ); + let enum_data = self.db.enum_data(v.parent); + let fields = + enum_data.variants[v.local_id].variant_data.fields().iter().map( + |(x, _)| { + ( + PlaceElem::Field(FieldId { parent: v.into(), local_id: x }), + fields_type[x].clone().substitute(Interner, subst), + ) + }, + ); + self.pattern_match_tuple_like( + next, + Some(else_target), + args.iter().zip(fields).map(|(x, y)| (y.0, *x, y.1)), + *ellipsis, + &cond_place, + binding_mode, + )? + } + VariantId::StructId(s) => { + let struct_data = self.db.struct_data(s); + let fields = struct_data.variant_data.fields().iter().map(|(x, _)| { + ( + PlaceElem::Field(FieldId { parent: s.into(), local_id: x }), + fields_type[x].clone().substitute(Interner, subst), + ) + }); + self.pattern_match_tuple_like( + current, + current_else, + args.iter().zip(fields).map(|(x, y)| (y.0, *x, y.1)), + *ellipsis, + &cond_place, + binding_mode, + )? + } + VariantId::UnionId(_) => { + return Err(MirLowerError::TypeError("pattern matching on union")) + } + } + } + Pat::Ref { .. } => not_supported!("& pattern"), + Pat::Box { .. } => not_supported!("box pattern"), + Pat::ConstBlock(_) => not_supported!("const block pattern"), + }) + } + + fn pattern_match_tuple_like( + &mut self, + mut current: BasicBlockId, + mut current_else: Option<BasicBlockId>, + args: impl Iterator<Item = (PlaceElem, PatId, Ty)>, + ellipsis: Option<usize>, + cond_place: &Place, + binding_mode: BindingAnnotation, + ) -> Result<(BasicBlockId, Option<BasicBlockId>)> { + if ellipsis.is_some() { + not_supported!("tuple like pattern with ellipsis"); + } + for (proj, arg, ty) in args { + let mut cond_place = cond_place.clone(); + cond_place.projection.push(proj); + (current, current_else) = + self.pattern_match(current, current_else, cond_place, ty, arg, binding_mode)?; + } + Ok((current, current_else)) + } + + fn discr_temp_place(&mut self) -> Place { + match &self.discr_temp { + Some(x) => x.clone(), + None => { + let tmp: Place = + self.temp(TyBuilder::discr_ty()).expect("discr_ty is never unsized").into(); + self.discr_temp = Some(tmp.clone()); + tmp + } + } + } + + fn lower_loop( + &mut self, + prev_block: BasicBlockId, + label: Option<LabelId>, + f: impl FnOnce(&mut MirLowerCtx<'_>, BasicBlockId, BasicBlockId) -> Result<()>, + ) -> Result<BasicBlockId> { + if label.is_some() { + not_supported!("loop with label"); + } + let begin = self.new_basic_block(); + let end = self.new_basic_block(); + let prev = mem::replace(&mut self.current_loop_blocks, Some(LoopBlocks { begin, end })); + self.set_goto(prev_block, begin); + f(self, begin, end)?; + self.current_loop_blocks = prev; + Ok(end) + } + + fn has_adjustments(&self, expr_id: ExprId) -> bool { + !self.infer.expr_adjustments.get(&expr_id).map(|x| x.is_empty()).unwrap_or(true) + } +} + +fn pattern_matching_dereference( + cond_ty: &mut Ty, + binding_mode: &mut BindingAnnotation, + cond_place: &mut Place, +) { + while let Some((ty, _, mu)) = cond_ty.as_reference() { + if mu == Mutability::Mut && *binding_mode != BindingAnnotation::Ref { + *binding_mode = BindingAnnotation::RefMut; + } else { + *binding_mode = BindingAnnotation::Ref; + } + *cond_ty = ty.clone(); + cond_place.projection.push(ProjectionElem::Deref); + } +} + +fn cast_kind(source_ty: &Ty, target_ty: &Ty) -> Result<CastKind> { + Ok(match (source_ty.kind(Interner), target_ty.kind(Interner)) { + (TyKind::Scalar(s), TyKind::Scalar(t)) => match (s, t) { + (chalk_ir::Scalar::Float(_), chalk_ir::Scalar::Float(_)) => CastKind::FloatToFloat, + (chalk_ir::Scalar::Float(_), _) => CastKind::FloatToInt, + (_, chalk_ir::Scalar::Float(_)) => CastKind::IntToFloat, + (_, _) => CastKind::IntToInt, + }, + // Enum to int casts + (TyKind::Scalar(_), TyKind::Adt(..)) | (TyKind::Adt(..), TyKind::Scalar(_)) => { + CastKind::IntToInt + } + (a, b) => not_supported!("Unknown cast between {a:?} and {b:?}"), + }) +} + +pub fn mir_body_query(db: &dyn HirDatabase, def: DefWithBodyId) -> Result<Arc<MirBody>> { + let body = db.body(def); + let infer = db.infer(def); + Ok(Arc::new(lower_to_mir(db, def, &body, &infer, body.body_expr)?)) +} + +pub fn mir_body_recover( + _db: &dyn HirDatabase, + _cycle: &[String], + _def: &DefWithBodyId, +) -> Result<Arc<MirBody>> { + Err(MirLowerError::Loop) +} + +pub fn lower_to_mir( + db: &dyn HirDatabase, + owner: DefWithBodyId, + body: &Body, + infer: &InferenceResult, + // FIXME: root_expr should always be the body.body_expr, but since `X` in `[(); X]` doesn't have its own specific body yet, we + // need to take this input explicitly. + root_expr: ExprId, +) -> Result<MirBody> { + let mut basic_blocks = Arena::new(); + let start_block = + basic_blocks.alloc(BasicBlock { statements: vec![], terminator: None, is_cleanup: false }); + let mut locals = Arena::new(); + // 0 is return local + locals.alloc(Local { mutability: Mutability::Mut, ty: infer[root_expr].clone() }); + let mut create_local_of_path = |p: PatId| { + // FIXME: mutablity is broken + locals.alloc(Local { mutability: Mutability::Not, ty: infer[p].clone() }) + }; + // 1 to param_len is for params + let mut binding_locals: ArenaMap<PatId, LocalId> = + body.params.iter().map(|&x| (x, create_local_of_path(x))).collect(); + // and then rest of bindings + for (pat_id, _) in body.pats.iter() { + if !binding_locals.contains_idx(pat_id) { + binding_locals.insert(pat_id, create_local_of_path(pat_id)); + } + } + let mir = MirBody { basic_blocks, locals, start_block, owner, arg_count: body.params.len() }; + let mut ctx = MirLowerCtx { + result: mir, + db, + infer, + body, + binding_locals, + owner, + current_loop_blocks: None, + discr_temp: None, + }; + let b = ctx.lower_expr_to_place(root_expr, return_slot().into(), start_block)?; + ctx.result.basic_blocks[b].terminator = Some(Terminator::Return); + Ok(ctx.result) +} diff --git a/crates/hir/src/lib.rs b/crates/hir/src/lib.rs index 6206a541c1e..eca37149f0a 100644 --- a/crates/hir/src/lib.rs +++ b/crates/hir/src/lib.rs @@ -50,7 +50,6 @@ use hir_def::{ per_ns::PerNs, resolver::{HasResolver, Resolver}, src::HasSource as _, - type_ref::ConstScalar, AdtId, AssocItemId, AssocItemLoc, AttrDefId, ConstId, ConstParamId, DefWithBodyId, EnumId, EnumVariantId, FunctionId, GenericDefId, HasModule, ImplId, ItemContainerId, LifetimeParamId, LocalEnumVariantId, LocalFieldId, Lookup, MacroExpander, MacroId, ModuleId, StaticId, StructId, @@ -59,16 +58,16 @@ use hir_def::{ use hir_expand::{name::name, MacroCallKind}; use hir_ty::{ all_super_traits, autoderef, - consteval::{unknown_const_as_generic, ComputedExpr, ConstEvalError, ConstExt}, + consteval::{try_const_usize, unknown_const_as_generic, ConstEvalError, ConstExt}, diagnostics::BodyValidationDiagnostic, layout::layout_of_ty, method_resolution::{self, TyFingerprint}, + mir::interpret_mir, primitive::UintTy, traits::FnTrait, AliasTy, CallableDefId, CallableSig, Canonical, CanonicalVarKinds, Cast, ClosureId, - ConcreteConst, ConstValue, GenericArgData, Interner, ParamKind, QuantifiedWhereClause, Scalar, - Substitution, TraitEnvironment, TraitRefExt, Ty, TyBuilder, TyDefId, TyExt, TyKind, - WhereClause, + GenericArgData, Interner, ParamKind, QuantifiedWhereClause, Scalar, Substitution, + TraitEnvironment, TraitRefExt, Ty, TyBuilder, TyDefId, TyExt, TyKind, WhereClause, }; use itertools::Itertools; use nameres::diagnostics::DefDiagnosticKind; @@ -130,6 +129,7 @@ pub use { }, hir_ty::{ display::{HirDisplay, HirDisplayError, HirWrite}, + mir::MirEvalError, PointerCast, Safety, }, }; @@ -1092,8 +1092,8 @@ impl Variant { self.source(db)?.value.expr() } - pub fn eval(self, db: &dyn HirDatabase) -> Result<ComputedExpr, ConstEvalError> { - db.const_eval_variant(self.into()) + pub fn eval(self, db: &dyn HirDatabase) -> Result<i128, ConstEvalError> { + db.const_eval_discriminant(self.into()) } } @@ -1639,6 +1639,14 @@ impl Function { let def_map = db.crate_def_map(loc.krate(db).into()); def_map.fn_as_proc_macro(self.id).map(|id| Macro { id: id.into() }) } + + pub fn eval(self, db: &dyn HirDatabase) -> Result<(), MirEvalError> { + let body = db + .mir_body(self.id.into()) + .map_err(|e| MirEvalError::MirLowerError(self.id.into(), e))?; + interpret_mir(db, &body, false)?; + Ok(()) + } } // Note: logically, this belongs to `hir_ty`, but we are not using it there yet. @@ -1781,7 +1789,7 @@ impl Const { Type::new_with_resolver_inner(db, &resolver, ty) } - pub fn eval(self, db: &dyn HirDatabase) -> Result<ComputedExpr, ConstEvalError> { + pub fn eval(self, db: &dyn HirDatabase) -> Result<hir_ty::Const, ConstEvalError> { db.const_eval(self.id) } } @@ -3268,12 +3276,7 @@ impl Type { pub fn as_array(&self, _db: &dyn HirDatabase) -> Option<(Type, usize)> { if let TyKind::Array(ty, len) = &self.ty.kind(Interner) { - match len.data(Interner).value { - ConstValue::Concrete(ConcreteConst { interned: ConstScalar::UInt(len) }) => { - Some((self.derived(ty.clone()), len as usize)) - } - _ => None, - } + try_const_usize(len).map(|x| (self.derived(ty.clone()), x as usize)) } else { None } diff --git a/crates/hir/src/source_analyzer.rs b/crates/hir/src/source_analyzer.rs index ef5434771d9..61e58cb1c4a 100644 --- a/crates/hir/src/source_analyzer.rs +++ b/crates/hir/src/source_analyzer.rs @@ -791,7 +791,7 @@ impl SourceAnalyzer { || Arc::new(hir_ty::TraitEnvironment::empty(krate)), |d| db.trait_environment(d), ); - method_resolution::lookup_impl_method(db, env, func, substs) + method_resolution::lookup_impl_method(db, env, func, substs).0 } fn resolve_impl_const_or_trait_def( @@ -809,7 +809,7 @@ impl SourceAnalyzer { || Arc::new(hir_ty::TraitEnvironment::empty(krate)), |d| db.trait_environment(d), ); - method_resolution::lookup_impl_const(db, env, const_id, subs) + method_resolution::lookup_impl_const(db, env, const_id, subs).0 } fn lang_trait_fn( diff --git a/crates/ide-assists/src/handlers/add_explicit_type.rs b/crates/ide-assists/src/handlers/add_explicit_type.rs index 0057f439f1a..785ae3d09c6 100644 --- a/crates/ide-assists/src/handlers/add_explicit_type.rs +++ b/crates/ide-assists/src/handlers/add_explicit_type.rs @@ -211,10 +211,8 @@ fn main() { check_assist_not_applicable( add_explicit_type, r#" -//- minicore: option - fn main() { - let $0l = [0.0; Some(2).unwrap()]; + let $0l = [0.0; unresolved_function(5)]; } "#, ); diff --git a/crates/ide/src/hover.rs b/crates/ide/src/hover.rs index 5f2c61f5b5f..64b2221bdea 100644 --- a/crates/ide/src/hover.rs +++ b/crates/ide/src/hover.rs @@ -30,6 +30,7 @@ pub struct HoverConfig { pub documentation: bool, pub keywords: bool, pub format: HoverDocFormat, + pub interpret_tests: bool, } #[derive(Clone, Debug, PartialEq, Eq)] diff --git a/crates/ide/src/hover/render.rs b/crates/ide/src/hover/render.rs index 22611cfb892..749c224c462 100644 --- a/crates/ide/src/hover/render.rs +++ b/crates/ide/src/hover/render.rs @@ -3,7 +3,8 @@ use std::fmt::Display; use either::Either; use hir::{ - Adt, AsAssocItem, AttributeTemplate, HasAttrs, HasSource, HirDisplay, Semantics, TypeInfo, + db::DefDatabase, Adt, AsAssocItem, AttributeTemplate, HasAttrs, HasSource, HirDisplay, + MirEvalError, Semantics, TypeInfo, }; use ide_db::{ base_db::SourceDatabase, @@ -402,7 +403,20 @@ pub(super) fn definition( )) }), Definition::Module(it) => label_and_docs(db, it), - Definition::Function(it) => label_and_docs(db, it), + Definition::Function(it) => label_and_layout_info_and_docs(db, it, |_| { + if !config.interpret_tests { + return None; + } + match it.eval(db) { + Ok(()) => Some("pass".into()), + Err(MirEvalError::Panic) => Some("fail".into()), + Err(MirEvalError::MirLowerError(f, e)) => { + let name = &db.function_data(f).name; + Some(format!("error: fail to lower {name} due {e:?}")) + } + Err(e) => Some(format!("error: {e:?}")), + } + }), Definition::Adt(it) => label_and_layout_info_and_docs(db, it, |&it| { let layout = it.layout(db).ok()?; Some(format!("size = {}, align = {}", layout.size.bytes(), layout.align.abi.bytes())) @@ -410,7 +424,7 @@ pub(super) fn definition( Definition::Variant(it) => label_value_and_docs(db, it, |&it| { if !it.parent_enum(db).is_data_carrying(db) { match it.eval(db) { - Ok(x) => Some(format!("{x}")), + Ok(x) => Some(if x >= 10 { format!("{x} ({x:#X})") } else { format!("{x}") }), Err(_) => it.value(db).map(|x| format!("{x:?}")), } } else { @@ -420,7 +434,7 @@ pub(super) fn definition( Definition::Const(it) => label_value_and_docs(db, it, |it| { let body = it.eval(db); match body { - Ok(x) => Some(format!("{x}")), + Ok(x) => Some(format!("{}", x.display(db))), Err(_) => { let source = it.source(db)?; let mut body = source.value.body()?.syntax().clone(); diff --git a/crates/ide/src/hover/tests.rs b/crates/ide/src/hover/tests.rs index bd7ce2f1d0d..d4eb314a381 100644 --- a/crates/ide/src/hover/tests.rs +++ b/crates/ide/src/hover/tests.rs @@ -4,16 +4,19 @@ use syntax::TextRange; use crate::{fixture, HoverConfig, HoverDocFormat}; +const HOVER_BASE_CONFIG: HoverConfig = HoverConfig { + links_in_hover: false, + documentation: true, + format: HoverDocFormat::Markdown, + keywords: true, + interpret_tests: false, +}; + fn check_hover_no_result(ra_fixture: &str) { let (analysis, position) = fixture::position(ra_fixture); let hover = analysis .hover( - &HoverConfig { - links_in_hover: true, - documentation: true, - keywords: true, - format: HoverDocFormat::Markdown, - }, + &HoverConfig { links_in_hover: true, ..HOVER_BASE_CONFIG }, FileRange { file_id: position.file_id, range: TextRange::empty(position.offset) }, ) .unwrap(); @@ -25,12 +28,7 @@ fn check(ra_fixture: &str, expect: Expect) { let (analysis, position) = fixture::position(ra_fixture); let hover = analysis .hover( - &HoverConfig { - links_in_hover: true, - documentation: true, - keywords: true, - format: HoverDocFormat::Markdown, - }, + &HoverConfig { links_in_hover: true, ..HOVER_BASE_CONFIG }, FileRange { file_id: position.file_id, range: TextRange::empty(position.offset) }, ) .unwrap() @@ -47,12 +45,7 @@ fn check_hover_no_links(ra_fixture: &str, expect: Expect) { let (analysis, position) = fixture::position(ra_fixture); let hover = analysis .hover( - &HoverConfig { - links_in_hover: false, - documentation: true, - keywords: true, - format: HoverDocFormat::Markdown, - }, + &HOVER_BASE_CONFIG, FileRange { file_id: position.file_id, range: TextRange::empty(position.offset) }, ) .unwrap() @@ -71,9 +64,8 @@ fn check_hover_no_markdown(ra_fixture: &str, expect: Expect) { .hover( &HoverConfig { links_in_hover: true, - documentation: true, - keywords: true, format: HoverDocFormat::PlainText, + ..HOVER_BASE_CONFIG }, FileRange { file_id: position.file_id, range: TextRange::empty(position.offset) }, ) @@ -91,12 +83,7 @@ fn check_actions(ra_fixture: &str, expect: Expect) { let (analysis, file_id, position) = fixture::range_or_position(ra_fixture); let hover = analysis .hover( - &HoverConfig { - links_in_hover: true, - documentation: true, - keywords: true, - format: HoverDocFormat::Markdown, - }, + &HoverConfig { links_in_hover: true, ..HOVER_BASE_CONFIG }, FileRange { file_id, range: position.range_or_empty() }, ) .unwrap() @@ -106,34 +93,13 @@ fn check_actions(ra_fixture: &str, expect: Expect) { fn check_hover_range(ra_fixture: &str, expect: Expect) { let (analysis, range) = fixture::range(ra_fixture); - let hover = analysis - .hover( - &HoverConfig { - links_in_hover: false, - documentation: true, - keywords: true, - format: HoverDocFormat::Markdown, - }, - range, - ) - .unwrap() - .unwrap(); + let hover = analysis.hover(&HOVER_BASE_CONFIG, range).unwrap().unwrap(); expect.assert_eq(hover.info.markup.as_str()) } fn check_hover_range_no_results(ra_fixture: &str) { let (analysis, range) = fixture::range(ra_fixture); - let hover = analysis - .hover( - &HoverConfig { - links_in_hover: false, - documentation: true, - keywords: true, - format: HoverDocFormat::Markdown, - }, - range, - ) - .unwrap(); + let hover = analysis.hover(&HOVER_BASE_CONFIG, range).unwrap(); assert!(hover.is_none()); } @@ -490,7 +456,6 @@ fn hover_field_offset() { // Hovering over the field when instantiating check( r#" -//- /main.rs target_data_layout:e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128 struct Foo { fiel$0d_a: u8, field_b: i32, field_c: i16 } "#, expect![[r#" @@ -512,7 +477,6 @@ fn hover_shows_struct_field_info() { // Hovering over the field when instantiating check( r#" -//- /main.rs target_data_layout:e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128 struct Foo { field_a: u32 } fn main() { @@ -535,7 +499,6 @@ fn main() { // Hovering over the field in the definition check( r#" -//- /main.rs target_data_layout:e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128 struct Foo { field_a$0: u32 } fn main() { @@ -568,7 +531,7 @@ fn hover_const_static() { ``` ```rust - const foo: u32 = 123 (0x7B) + const foo: u32 = 123 ``` "#]], ); @@ -1467,8 +1430,6 @@ fn my() {} fn test_hover_struct_doc_comment() { check( r#" -//- /main.rs target_data_layout:e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128 - /// This is an example /// multiline doc /// @@ -1527,7 +1488,7 @@ fn foo() { let bar = Ba$0r; } ``` ```rust - struct Bar + struct Bar // size = 0, align = 1 ``` --- @@ -1556,7 +1517,7 @@ fn foo() { let bar = Ba$0r; } ``` ```rust - struct Bar + struct Bar // size = 0, align = 1 ``` --- @@ -1584,7 +1545,7 @@ pub struct B$0ar ``` ```rust - pub struct Bar + pub struct Bar // size = 0, align = 1 ``` --- @@ -1611,7 +1572,7 @@ pub struct B$0ar ``` ```rust - pub struct Bar + pub struct Bar // size = 0, align = 1 ``` --- @@ -2913,8 +2874,6 @@ fn main() { let foo_test = name_with_dashes::wrapper::Thing::new$0(); } fn hover_field_pat_shorthand_ref_match_ergonomics() { check( r#" -//- /main.rs target_data_layout:e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128 - struct S { f: i32, } @@ -3506,8 +3465,8 @@ impl<const LEN: usize> Foo<LEN$0> {} } #[test] -fn hover_const_eval_variant() { - // show hex for <10 +fn hover_const_eval_discriminant() { + // Don't show hex for <10 check( r#" #[repr(u8)] @@ -3532,7 +3491,7 @@ enum E { This is a doc "#]], ); - // show hex for >10 + // Show hex for >10 check( r#" #[repr(u8)] @@ -3656,7 +3615,7 @@ trait T { } impl T for i32 { const AA: A = A { - i: 2 + i: 2 + 3 } } fn main() { @@ -3671,9 +3630,7 @@ fn main() { ``` ```rust - const AA: A = A { - i: 2 - } + const AA: A = A { i: 5 } ``` "#]], ); @@ -3792,7 +3749,7 @@ const FOO$0: usize = 1 << 3; This is a doc "#]], ); - // show hex for >10 + // FIXME: show hex for >10 check( r#" /// This is a doc @@ -3806,7 +3763,7 @@ const FOO$0: usize = (1 << 3) + (1 << 2); ``` ```rust - const FOO: usize = 12 (0xC) + const FOO: usize = 12 ``` --- @@ -3937,7 +3894,7 @@ const FOO$0: u8 = b'a'; ``` ```rust - const FOO: u8 = 97 (0x61) + const FOO: u8 = 97 ``` --- @@ -3959,7 +3916,7 @@ const FOO$0: u8 = b'\x61'; ``` ```rust - const FOO: u8 = 97 (0x61) + const FOO: u8 = 97 ``` --- @@ -4354,8 +4311,6 @@ fn main() { fn hover_intra_doc_links() { check( r#" -//- /main.rs target_data_layout:e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128 - pub mod theitem { /// This is the item. Cool! pub struct TheItem; @@ -4496,7 +4451,7 @@ trait A where fn string_shadowed_with_inner_items() { check( r#" -//- /main.rs crate:main deps:alloc target_data_layout:e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128 +//- /main.rs crate:main deps:alloc /// Custom `String` type. struct String; @@ -5191,7 +5146,7 @@ foo_macro!( ``` ```rust - pub struct Foo + pub struct Foo // size = 0, align = 1 ``` --- @@ -5205,8 +5160,6 @@ foo_macro!( fn hover_intra_in_attr() { check( r#" -//- /main.rs target_data_layout:e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128 - #[doc = "Doc comment for [`Foo$0`]"] pub struct Foo(i32); "#, @@ -5295,7 +5248,7 @@ pub struct Type; ``` ```rust - const KONST: dep::Type = $crate::Type + const KONST: dep::Type = Type ``` "#]], ); @@ -5327,8 +5280,6 @@ enum Enum { fn hover_record_variant_field() { check( r#" -//- /main.rs target_data_layout:e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128 - enum Enum { RecordV { field$0: u32 } } diff --git a/crates/ide/src/inlay_hints/discriminant.rs b/crates/ide/src/inlay_hints/discriminant.rs index 5dd51ad11f4..67eaa553ada 100644 --- a/crates/ide/src/inlay_hints/discriminant.rs +++ b/crates/ide/src/inlay_hints/discriminant.rs @@ -59,8 +59,14 @@ fn variant_hints( }, kind: InlayKind::Discriminant, label: InlayHintLabel::simple( - match &d { - Ok(v) => format!("{}", v), + match d { + Ok(x) => { + if x >= 10 { + format!("{x} ({x:#X})") + } else { + format!("{x}") + } + } Err(_) => "?".into(), }, Some(InlayTooltip::String(match &d { diff --git a/crates/ide/src/static_index.rs b/crates/ide/src/static_index.rs index 3f7f6885f61..c97691b14a5 100644 --- a/crates/ide/src/static_index.rs +++ b/crates/ide/src/static_index.rs @@ -139,6 +139,7 @@ impl StaticIndex<'_> { documentation: true, keywords: true, format: crate::HoverDocFormat::Markdown, + interpret_tests: false, }; let tokens = tokens.filter(|token| { matches!( diff --git a/crates/rust-analyzer/src/config.rs b/crates/rust-analyzer/src/config.rs index 48d3fd0e2b9..9c832462be5 100644 --- a/crates/rust-analyzer/src/config.rs +++ b/crates/rust-analyzer/src/config.rs @@ -366,6 +366,8 @@ config_data! { inlayHints_typeHints_hideClosureInitialization: bool = "false", /// Whether to hide inlay type hints for constructors. inlayHints_typeHints_hideNamedConstructor: bool = "false", + /// Enables the experimental support for interpreting tests. + interpret_tests: bool = "false", /// Join lines merges consecutive declaration and initialization of an assignment. joinLines_joinAssignments: bool = "true", @@ -1444,6 +1446,7 @@ impl Config { } }, keywords: self.data.hover_documentation_keywords_enable, + interpret_tests: self.data.interpret_tests, } } diff --git a/crates/test-utils/src/fixture.rs b/crates/test-utils/src/fixture.rs index d1afd0039aa..cd1235fa6dc 100644 --- a/crates/test-utils/src/fixture.rs +++ b/crates/test-utils/src/fixture.rs @@ -180,7 +180,9 @@ impl Fixture { let mut cfg_key_values = Vec::new(); let mut env = FxHashMap::default(); let mut introduce_new_source_root = None; - let mut target_data_layout = None; + let mut target_data_layout = Some( + "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128".to_string(), + ); for component in components[1..].iter() { let (key, value) = component.split_once(':').unwrap_or_else(|| panic!("invalid meta line: {meta:?}")); diff --git a/crates/test-utils/src/minicore.rs b/crates/test-utils/src/minicore.rs index 3b033e1aae5..7b48e42489c 100644 --- a/crates/test-utils/src/minicore.rs +++ b/crates/test-utils/src/minicore.rs @@ -510,6 +510,7 @@ pub mod fmt { pub mod slice { #[lang = "slice"] impl<T> [T] { + #[lang = "slice_len_fn"] pub fn len(&self) -> usize { loop {} } diff --git a/docs/user/generated_config.adoc b/docs/user/generated_config.adoc index ed7df3eac3d..1bc498c42ce 100644 --- a/docs/user/generated_config.adoc +++ b/docs/user/generated_config.adoc @@ -537,6 +537,11 @@ Only applies to closures with blocks, same as `#rust-analyzer.inlayHints.closure -- Whether to hide inlay type hints for constructors. -- +[[rust-analyzer.interpret.tests]]rust-analyzer.interpret.tests (default: `false`):: ++ +-- +Enables the experimental support for interpreting tests. +-- [[rust-analyzer.joinLines.joinAssignments]]rust-analyzer.joinLines.joinAssignments (default: `true`):: + -- diff --git a/editors/code/package.json b/editors/code/package.json index 631f31c17ea..effe4e279c7 100644 --- a/editors/code/package.json +++ b/editors/code/package.json @@ -1110,6 +1110,11 @@ "default": false, "type": "boolean" }, + "rust-analyzer.interpret.tests": { + "markdownDescription": "Enables the experimental support for interpreting tests.", + "default": false, + "type": "boolean" + }, "rust-analyzer.joinLines.joinAssignments": { "markdownDescription": "Join lines merges consecutive declaration and initialization of an assignment.", "default": true,