diff --git a/compiler/rustc_ast/src/util/classify.rs b/compiler/rustc_ast/src/util/classify.rs index 4b2544ac47e..541b95ea971 100644 --- a/compiler/rustc_ast/src/util/classify.rs +++ b/compiler/rustc_ast/src/util/classify.rs @@ -1,7 +1,8 @@ //! Routines the parser and pretty-printer use to classify AST nodes. use crate::ast::ExprKind::*; -use crate::{ast, token::Delimiter}; +use crate::ast::{self, MatchKind}; +use crate::token::Delimiter; /// This classification determines whether various syntactic positions break out /// of parsing the current expression (true) or continue parsing more of the @@ -81,6 +82,82 @@ pub fn expr_requires_semi_to_be_stmt(e: &ast::Expr) -> bool { } } +/// Returns whether the leftmost token of the given expression is the label of a +/// labeled loop or block, such as in `'inner: loop { break 'inner 1 } + 1`. +/// +/// Such expressions are not allowed as the value of an unlabeled break. +/// +/// ```ignore (illustrative) +/// 'outer: { +/// break 'inner: loop { break 'inner 1 } + 1; // invalid syntax +/// +/// break 'outer 'inner: loop { break 'inner 1 } + 1; // okay +/// +/// break ('inner: loop { break 'inner 1 } + 1); // okay +/// +/// break ('inner: loop { break 'inner 1 }) + 1; // okay +/// } +/// ``` +pub fn leading_labeled_expr(mut expr: &ast::Expr) -> bool { + loop { + match &expr.kind { + Block(_, label) | ForLoop { label, .. } | Loop(_, label, _) | While(_, _, label) => { + return label.is_some(); + } + + Assign(e, _, _) + | AssignOp(_, e, _) + | Await(e, _) + | Binary(_, e, _) + | Call(e, _) + | Cast(e, _) + | Field(e, _) + | Index(e, _, _) + | Match(e, _, MatchKind::Postfix) + | Range(Some(e), _, _) + | Try(e) => { + expr = e; + } + MethodCall(method_call) => { + expr = &method_call.receiver; + } + + AddrOf(..) + | Array(..) + | Become(..) + | Break(..) + | Closure(..) + | ConstBlock(..) + | Continue(..) + | FormatArgs(..) + | Gen(..) + | If(..) + | IncludedBytes(..) + | InlineAsm(..) + | Let(..) + | Lit(..) + | MacCall(..) + | Match(_, _, MatchKind::Prefix) + | OffsetOf(..) + | Paren(..) + | Path(..) + | Range(None, _, _) + | Repeat(..) + | Ret(..) + | Struct(..) + | TryBlock(..) + | Tup(..) + | Type(..) + | Unary(..) + | Underscore + | Yeet(..) + | Yield(..) + | Err(..) + | Dummy => return false, + } + } +} + pub enum TrailingBrace<'a> { /// Trailing brace in a macro call, like the one in `x as *const brace! {}`. /// We will suggest changing the macro call to a different delimiter. diff --git a/compiler/rustc_ast_pretty/src/pprust/state/expr.rs b/compiler/rustc_ast_pretty/src/pprust/state/expr.rs index 3d1f43a3766..5b13858f839 100644 --- a/compiler/rustc_ast_pretty/src/pprust/state/expr.rs +++ b/compiler/rustc_ast_pretty/src/pprust/state/expr.rs @@ -5,6 +5,7 @@ use itertools::{Itertools, Position}; use rustc_ast::ptr::P; use rustc_ast::token; +use rustc_ast::util::classify; use rustc_ast::util::literal::escape_byte_str_symbol; use rustc_ast::util::parser::{self, AssocOp, Fixity}; use rustc_ast::{self as ast, BlockCheckMode}; @@ -610,9 +611,12 @@ pub(super) fn print_expr_outer_attr_style( } if let Some(expr) = opt_expr { self.space(); - self.print_expr_maybe_paren( + self.print_expr_cond_paren( expr, - parser::PREC_JUMP, + // Parenthesize if required by precedence, or in the + // case of `break 'inner: loop { break 'inner 1 } + 1` + expr.precedence().order() < parser::PREC_JUMP + || (opt_label.is_none() && classify::leading_labeled_expr(expr)), fixup.subsequent_subexpression(), ); } diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index a6a3f0f9646..d034f9b5256 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -226,7 +226,8 @@ fn store( // when passed by value, making it smaller. // - On some ABIs, the Rust layout { u16, u16, u16 } may be padded up to 8 bytes // when passed by value, making it larger. - let copy_bytes = cmp::min(scratch_size.bytes(), self.layout.size.bytes()); + let copy_bytes = + cmp::min(cast.unaligned_size(bx).bytes(), self.layout.size.bytes()); // Allocate some scratch space... let llscratch = bx.alloca(scratch_size, scratch_align); bx.lifetime_start(llscratch, scratch_size); diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index b1c22faf1ae..0976be7ff8d 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -403,7 +403,7 @@ fn codegen_switchint_terminator( // // Why only in unoptimized builds? // - In unoptimized builds LLVM uses FastISel which does not support switches, so it - // must fall back to the to the slower SelectionDAG isel. Therefore, using `br` gives + // must fall back to the slower SelectionDAG isel. Therefore, using `br` gives // significant compile time speedups for unoptimized builds. // - In optimized builds the above doesn't hold, and using `br` sometimes results in // worse generated code because LLVM can no longer tell that the value being switched @@ -1521,7 +1521,7 @@ fn codegen_argument( // when passed by value, making it smaller. // - On some ABIs, the Rust layout { u16, u16, u16 } may be padded up to 8 bytes // when passed by value, making it larger. - let copy_bytes = cmp::min(scratch_size.bytes(), arg.layout.size.bytes()); + let copy_bytes = cmp::min(cast.unaligned_size(bx).bytes(), arg.layout.size.bytes()); // Allocate some scratch space... let llscratch = bx.alloca(scratch_size, scratch_align); bx.lifetime_start(llscratch, scratch_size); diff --git a/compiler/rustc_codegen_ssa/src/mir/locals.rs b/compiler/rustc_codegen_ssa/src/mir/locals.rs index a6c873e195e..5190021c005 100644 --- a/compiler/rustc_codegen_ssa/src/mir/locals.rs +++ b/compiler/rustc_codegen_ssa/src/mir/locals.rs @@ -47,7 +47,7 @@ pub(super) fn initialize_locals(&mut self, values: Vec let expected_ty = self.monomorphize(self.mir.local_decls[local].ty); if expected_ty != op.layout.ty { warn!( - "Unexpected initial operand type: expected {expected_ty:?}, found {:?}.\ + "Unexpected initial operand type:\nexpected {expected_ty:?},\nfound {:?}.\n\ See .", op.layout.ty ); diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs index e8da9842882..61f57c9030a 100644 --- a/compiler/rustc_codegen_ssa/src/mir/mod.rs +++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs @@ -230,10 +230,20 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( let layout = start_bx.layout_of(fx.monomorphize(decl.ty)); assert!(!layout.ty.has_erasable_regions()); - if local == mir::RETURN_PLACE && fx.fn_abi.ret.is_indirect() { - debug!("alloc: {:?} (return place) -> place", local); - let llretptr = start_bx.get_param(0); - return LocalRef::Place(PlaceRef::new_sized(llretptr, layout)); + if local == mir::RETURN_PLACE { + match fx.fn_abi.ret.mode { + PassMode::Indirect { .. } => { + debug!("alloc: {:?} (return place) -> place", local); + let llretptr = start_bx.get_param(0); + return LocalRef::Place(PlaceRef::new_sized(llretptr, layout)); + } + PassMode::Cast { ref cast, .. } => { + debug!("alloc: {:?} (return place) -> place", local); + let size = cast.size(&start_bx); + return LocalRef::Place(PlaceRef::alloca_size(&mut start_bx, size, layout)); + } + _ => {} + }; } if memory_locals.contains(local) { diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index 449fd9ae0db..97d5bb83128 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -108,9 +108,17 @@ pub fn new_sized_aligned( pub fn alloca>( bx: &mut Bx, layout: TyAndLayout<'tcx>, + ) -> Self { + Self::alloca_size(bx, layout.size, layout) + } + + pub fn alloca_size>( + bx: &mut Bx, + size: Size, + layout: TyAndLayout<'tcx>, ) -> Self { assert!(layout.is_sized(), "tried to statically allocate unsized place"); - PlaceValue::alloca(bx, layout.size, layout.align.abi).with_type(layout) + PlaceValue::alloca(bx, size, layout.align.abi).with_type(layout) } /// Returns a place for an indirect reference to an unsized place. diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs index b3a139d553a..181c7115386 100644 --- a/compiler/rustc_const_eval/src/interpret/discriminant.rs +++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs @@ -245,7 +245,7 @@ pub(crate) fn tag_for_variant( // The tag of a `Single` enum is like the tag of the niched // variant: there's no tag as the discriminant is encoded // entirely implicitly. If `write_discriminant` ever hits this - // case, we do a "validation read" to ensure the the right + // case, we do a "validation read" to ensure the right // discriminant is encoded implicitly, so any attempt to write // the wrong discriminant for a `Single` enum will reliably // result in UB. diff --git a/compiler/rustc_hir_typeck/src/method/suggest.rs b/compiler/rustc_hir_typeck/src/method/suggest.rs index e310730bf9e..dba3edbc1d7 100644 --- a/compiler/rustc_hir_typeck/src/method/suggest.rs +++ b/compiler/rustc_hir_typeck/src/method/suggest.rs @@ -499,7 +499,7 @@ fn check_and_add_sugg_binding(&mut self, binding: LetStmt) -> bool { } } - // If the shadowed binding has an an itializer expression, + // If the shadowed binding has an itializer expression, // use the initializer expression'ty to try to find the method again. // For example like: `let mut x = Vec::new();`, // `Vec::new()` is the itializer expression. @@ -968,7 +968,7 @@ fn report_no_match_method_error( } // Make sure that, if any traits other than the found ones were involved, - // we don't don't report an unimplemented trait. + // we don't report an unimplemented trait. // We don't want to say that `iter::Cloned` is not an iterator, just // because of some non-Clone item being iterated over. for (predicate, _parent_pred, _cause) in unsatisfied_predicates { @@ -2129,7 +2129,7 @@ fn suggest_associated_call_syntax( let target_ty = self .autoderef(sugg_span, rcvr_ty) .find(|(rcvr_ty, _)| { - DeepRejectCtxt { treat_obligation_params: TreatParams::AsCandidateKey } + DeepRejectCtxt::new(self.tcx, TreatParams::ForLookup) .types_may_unify(*rcvr_ty, impl_ty) }) .map_or(impl_ty, |(ty, _)| ty) diff --git a/compiler/rustc_middle/src/ty/closure.rs b/compiler/rustc_middle/src/ty/closure.rs index bade0d56415..bdd9a6bab2b 100644 --- a/compiler/rustc_middle/src/ty/closure.rs +++ b/compiler/rustc_middle/src/ty/closure.rs @@ -237,7 +237,7 @@ pub fn closure_captures(self, def_id: LocalDefId) -> &'tcx [&'tcx ty::CapturedPl /// Eg: 1. `foo.x` which is represented using `projections=[Field(x)]` is an ancestor of /// `foo.x.y` which is represented using `projections=[Field(x), Field(y)]`. /// Note both `foo.x` and `foo.x.y` start off of the same root variable `foo`. -/// 2. Since we only look at the projections here function will return `bar.x` as an a valid +/// 2. Since we only look at the projections here function will return `bar.x` as a valid /// ancestor of `foo.x.y`. It's the caller's responsibility to ensure that both projections /// list are being applied to the same root variable. pub fn is_ancestor_or_same_capture( diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs index 9225ae6300f..0771b0aa725 100644 --- a/compiler/rustc_middle/src/ty/context.rs +++ b/compiler/rustc_middle/src/ty/context.rs @@ -373,17 +373,6 @@ fn associated_type_def_ids(self, def_id: DefId) -> impl IntoIterator, - impl_args: ty::GenericArgsRef<'tcx>, - ) -> bool { - ty::fast_reject::DeepRejectCtxt { - treat_obligation_params: ty::fast_reject::TreatParams::ForLookup, - } - .args_may_unify(obligation_args, impl_args) - } - // This implementation is a bit different from `TyCtxt::for_each_relevant_impl`, // since we want to skip over blanket impls for non-rigid aliases, and also we // only want to consider types that *actually* unify with float/int vars. diff --git a/compiler/rustc_middle/src/ty/fast_reject.rs b/compiler/rustc_middle/src/ty/fast_reject.rs index 923667e609b..0413cfa5a63 100644 --- a/compiler/rustc_middle/src/ty/fast_reject.rs +++ b/compiler/rustc_middle/src/ty/fast_reject.rs @@ -1,369 +1,9 @@ -use crate::mir::Mutability; -use crate::ty::GenericArgKind; -use crate::ty::{self, GenericArgsRef, Ty, TyCtxt, TypeVisitableExt}; use rustc_hir::def_id::DefId; -use rustc_macros::{HashStable, TyDecodable, TyEncodable}; -use std::fmt::Debug; -use std::hash::Hash; -use std::iter; -/// See `simplify_type`. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)] -pub enum SimplifiedType { - Bool, - Char, - Int(ty::IntTy), - Uint(ty::UintTy), - Float(ty::FloatTy), - Adt(DefId), - Foreign(DefId), - Str, - Array, - Slice, - Ref(Mutability), - Ptr(Mutability), - Never, - Tuple(usize), - /// A trait object, all of whose components are markers - /// (e.g., `dyn Send + Sync`). - MarkerTraitObject, - Trait(DefId), - Closure(DefId), - Coroutine(DefId), - CoroutineWitness(DefId), - Function(usize), - Placeholder, - Error, -} +use super::TyCtxt; -/// Generic parameters are pretty much just bound variables, e.g. -/// the type of `fn foo<'a, T>(x: &'a T) -> u32 { ... }` can be thought of as -/// `for<'a, T> fn(&'a T) -> u32`. -/// -/// Typecheck of `foo` has to succeed for all possible generic arguments, so -/// during typeck, we have to treat its generic parameters as if they -/// were placeholders. -/// -/// But when calling `foo` we only have to provide a specific generic argument. -/// In that case the generic parameters are instantiated with inference variables. -/// As we use `simplify_type` before that instantiation happens, we just treat -/// generic parameters as if they were inference variables in that case. -#[derive(PartialEq, Eq, Debug, Clone, Copy)] -pub enum TreatParams { - /// Treat parameters as infer vars. This is the correct mode for caching - /// an impl's type for lookup. - AsCandidateKey, - /// Treat parameters as placeholders in the given environment. This is the - /// correct mode for *lookup*, as during candidate selection. - /// - /// This also treats projections with inference variables as infer vars - /// since they could be further normalized. - ForLookup, -} +pub use rustc_type_ir::fast_reject::*; -/// Tries to simplify a type by only returning the outermost injective¹ layer, if one exists. -/// -/// **This function should only be used if you need to store or retrieve the type from some -/// hashmap. If you want to quickly decide whether two types may unify, use the [DeepRejectCtxt] -/// instead.** -/// -/// The idea is to get something simple that we can use to quickly decide if two types could unify, -/// for example during method lookup. If this function returns `Some(x)` it can only unify with -/// types for which this method returns either `Some(x)` as well or `None`. -/// -/// A special case here are parameters and projections, which are only injective -/// if they are treated as placeholders. -/// -/// For example when storing impls based on their simplified self type, we treat -/// generic parameters as if they were inference variables. We must not simplify them here, -/// as they can unify with any other type. -/// -/// With projections we have to be even more careful, as treating them as placeholders -/// is only correct if they are fully normalized. -/// -/// ¹ meaning that if the outermost layers are different, then the whole types are also different. -pub fn simplify_type<'tcx>( - tcx: TyCtxt<'tcx>, - ty: Ty<'tcx>, - treat_params: TreatParams, -) -> Option { - match *ty.kind() { - ty::Bool => Some(SimplifiedType::Bool), - ty::Char => Some(SimplifiedType::Char), - ty::Int(int_type) => Some(SimplifiedType::Int(int_type)), - ty::Uint(uint_type) => Some(SimplifiedType::Uint(uint_type)), - ty::Float(float_type) => Some(SimplifiedType::Float(float_type)), - ty::Adt(def, _) => Some(SimplifiedType::Adt(def.did())), - ty::Str => Some(SimplifiedType::Str), - ty::Array(..) => Some(SimplifiedType::Array), - ty::Slice(..) => Some(SimplifiedType::Slice), - ty::Pat(ty, ..) => simplify_type(tcx, ty, treat_params), - ty::RawPtr(_, mutbl) => Some(SimplifiedType::Ptr(mutbl)), - ty::Dynamic(trait_info, ..) => match trait_info.principal_def_id() { - Some(principal_def_id) if !tcx.trait_is_auto(principal_def_id) => { - Some(SimplifiedType::Trait(principal_def_id)) - } - _ => Some(SimplifiedType::MarkerTraitObject), - }, - ty::Ref(_, _, mutbl) => Some(SimplifiedType::Ref(mutbl)), - ty::FnDef(def_id, _) | ty::Closure(def_id, _) | ty::CoroutineClosure(def_id, _) => { - Some(SimplifiedType::Closure(def_id)) - } - ty::Coroutine(def_id, _) => Some(SimplifiedType::Coroutine(def_id)), - ty::CoroutineWitness(def_id, _) => Some(SimplifiedType::CoroutineWitness(def_id)), - ty::Never => Some(SimplifiedType::Never), - ty::Tuple(tys) => Some(SimplifiedType::Tuple(tys.len())), - ty::FnPtr(f) => Some(SimplifiedType::Function(f.skip_binder().inputs().len())), - ty::Placeholder(..) => Some(SimplifiedType::Placeholder), - ty::Param(_) => match treat_params { - TreatParams::ForLookup => Some(SimplifiedType::Placeholder), - TreatParams::AsCandidateKey => None, - }, - ty::Alias(..) => match treat_params { - // When treating `ty::Param` as a placeholder, projections also - // don't unify with anything else as long as they are fully normalized. - // FIXME(-Znext-solver): Can remove this `if` and always simplify to `Placeholder` - // when the new solver is enabled by default. - TreatParams::ForLookup if !ty.has_non_region_infer() => { - Some(SimplifiedType::Placeholder) - } - TreatParams::ForLookup | TreatParams::AsCandidateKey => None, - }, - ty::Foreign(def_id) => Some(SimplifiedType::Foreign(def_id)), - ty::Error(_) => Some(SimplifiedType::Error), - ty::Bound(..) | ty::Infer(_) => None, - } -} +pub type DeepRejectCtxt<'tcx> = rustc_type_ir::fast_reject::DeepRejectCtxt>; -impl SimplifiedType { - pub fn def(self) -> Option { - match self { - SimplifiedType::Adt(d) - | SimplifiedType::Foreign(d) - | SimplifiedType::Trait(d) - | SimplifiedType::Closure(d) - | SimplifiedType::Coroutine(d) - | SimplifiedType::CoroutineWitness(d) => Some(d), - _ => None, - } - } -} - -/// Given generic arguments from an obligation and an impl, -/// could these two be unified after replacing parameters in the -/// the impl with inference variables. -/// -/// For obligations, parameters won't be replaced by inference -/// variables and only unify with themselves. We treat them -/// the same way we treat placeholders. -/// -/// We also use this function during coherence. For coherence the -/// impls only have to overlap for some value, so we treat parameters -/// on both sides like inference variables. This behavior is toggled -/// using the `treat_obligation_params` field. -#[derive(Debug, Clone, Copy)] -pub struct DeepRejectCtxt { - pub treat_obligation_params: TreatParams, -} - -impl DeepRejectCtxt { - pub fn args_may_unify<'tcx>( - self, - obligation_args: GenericArgsRef<'tcx>, - impl_args: GenericArgsRef<'tcx>, - ) -> bool { - iter::zip(obligation_args, impl_args).all(|(obl, imp)| { - match (obl.unpack(), imp.unpack()) { - // We don't fast reject based on regions. - (GenericArgKind::Lifetime(_), GenericArgKind::Lifetime(_)) => true, - (GenericArgKind::Type(obl), GenericArgKind::Type(imp)) => { - self.types_may_unify(obl, imp) - } - (GenericArgKind::Const(obl), GenericArgKind::Const(imp)) => { - self.consts_may_unify(obl, imp) - } - _ => bug!("kind mismatch: {obl} {imp}"), - } - }) - } - - pub fn types_may_unify<'tcx>(self, obligation_ty: Ty<'tcx>, impl_ty: Ty<'tcx>) -> bool { - match impl_ty.kind() { - // Start by checking whether the type in the impl may unify with - // pretty much everything. Just return `true` in that case. - ty::Param(_) | ty::Error(_) | ty::Alias(..) => return true, - // These types only unify with inference variables or their own - // variant. - ty::Bool - | ty::Char - | ty::Int(_) - | ty::Uint(_) - | ty::Float(_) - | ty::Adt(..) - | ty::Str - | ty::Array(..) - | ty::Slice(..) - | ty::RawPtr(..) - | ty::Dynamic(..) - | ty::Pat(..) - | ty::Ref(..) - | ty::Never - | ty::Tuple(..) - | ty::FnPtr(..) - | ty::Foreign(..) => debug_assert!(impl_ty.is_known_rigid()), - ty::FnDef(..) - | ty::Closure(..) - | ty::CoroutineClosure(..) - | ty::Coroutine(..) - | ty::CoroutineWitness(..) - | ty::Placeholder(..) - | ty::Bound(..) - | ty::Infer(_) => bug!("unexpected impl_ty: {impl_ty}"), - } - - let k = impl_ty.kind(); - match *obligation_ty.kind() { - // Purely rigid types, use structural equivalence. - ty::Bool - | ty::Char - | ty::Int(_) - | ty::Uint(_) - | ty::Float(_) - | ty::Str - | ty::Never - | ty::Foreign(_) => obligation_ty == impl_ty, - ty::Ref(_, obl_ty, obl_mutbl) => match k { - &ty::Ref(_, impl_ty, impl_mutbl) => { - obl_mutbl == impl_mutbl && self.types_may_unify(obl_ty, impl_ty) - } - _ => false, - }, - ty::Adt(obl_def, obl_args) => match k { - &ty::Adt(impl_def, impl_args) => { - obl_def == impl_def && self.args_may_unify(obl_args, impl_args) - } - _ => false, - }, - ty::Pat(obl_ty, _) => { - // FIXME(pattern_types): take pattern into account - matches!(k, &ty::Pat(impl_ty, _) if self.types_may_unify(obl_ty, impl_ty)) - } - ty::Slice(obl_ty) => { - matches!(k, &ty::Slice(impl_ty) if self.types_may_unify(obl_ty, impl_ty)) - } - ty::Array(obl_ty, obl_len) => match k { - &ty::Array(impl_ty, impl_len) => { - self.types_may_unify(obl_ty, impl_ty) - && self.consts_may_unify(obl_len, impl_len) - } - _ => false, - }, - ty::Tuple(obl) => match k { - &ty::Tuple(imp) => { - obl.len() == imp.len() - && iter::zip(obl, imp).all(|(obl, imp)| self.types_may_unify(obl, imp)) - } - _ => false, - }, - ty::RawPtr(obl_ty, obl_mutbl) => match *k { - ty::RawPtr(imp_ty, imp_mutbl) => { - obl_mutbl == imp_mutbl && self.types_may_unify(obl_ty, imp_ty) - } - _ => false, - }, - ty::Dynamic(obl_preds, ..) => { - // Ideally we would walk the existential predicates here or at least - // compare their length. But considering that the relevant `Relate` impl - // actually sorts and deduplicates these, that doesn't work. - matches!(k, ty::Dynamic(impl_preds, ..) if - obl_preds.principal_def_id() == impl_preds.principal_def_id() - ) - } - ty::FnPtr(obl_sig) => match k { - ty::FnPtr(impl_sig) => { - let ty::FnSig { inputs_and_output, c_variadic, safety, abi } = - obl_sig.skip_binder(); - let impl_sig = impl_sig.skip_binder(); - - abi == impl_sig.abi - && c_variadic == impl_sig.c_variadic - && safety == impl_sig.safety - && inputs_and_output.len() == impl_sig.inputs_and_output.len() - && iter::zip(inputs_and_output, impl_sig.inputs_and_output) - .all(|(obl, imp)| self.types_may_unify(obl, imp)) - } - _ => false, - }, - - // Impls cannot contain these types as these cannot be named directly. - ty::FnDef(..) | ty::Closure(..) | ty::CoroutineClosure(..) | ty::Coroutine(..) => false, - - // Placeholder types don't unify with anything on their own - ty::Placeholder(..) | ty::Bound(..) => false, - - // Depending on the value of `treat_obligation_params`, we either - // treat generic parameters like placeholders or like inference variables. - ty::Param(_) => match self.treat_obligation_params { - TreatParams::ForLookup => false, - TreatParams::AsCandidateKey => true, - }, - - ty::Infer(ty::IntVar(_)) => impl_ty.is_integral(), - - ty::Infer(ty::FloatVar(_)) => impl_ty.is_floating_point(), - - ty::Infer(_) => true, - - // As we're walking the whole type, it may encounter projections - // inside of binders and what not, so we're just going to assume that - // projections can unify with other stuff. - // - // Looking forward to lazy normalization this is the safer strategy anyways. - ty::Alias(..) => true, - - ty::Error(_) => true, - - ty::CoroutineWitness(..) => { - bug!("unexpected obligation type: {:?}", obligation_ty) - } - } - } - - pub fn consts_may_unify(self, obligation_ct: ty::Const<'_>, impl_ct: ty::Const<'_>) -> bool { - let impl_val = match impl_ct.kind() { - ty::ConstKind::Expr(_) - | ty::ConstKind::Param(_) - | ty::ConstKind::Unevaluated(_) - | ty::ConstKind::Error(_) => { - return true; - } - ty::ConstKind::Value(_, impl_val) => impl_val, - ty::ConstKind::Infer(_) | ty::ConstKind::Bound(..) | ty::ConstKind::Placeholder(_) => { - bug!("unexpected impl arg: {:?}", impl_ct) - } - }; - - match obligation_ct.kind() { - ty::ConstKind::Param(_) => match self.treat_obligation_params { - TreatParams::ForLookup => false, - TreatParams::AsCandidateKey => true, - }, - - // Placeholder consts don't unify with anything on their own - ty::ConstKind::Placeholder(_) => false, - - // As we don't necessarily eagerly evaluate constants, - // they might unify with any value. - ty::ConstKind::Expr(_) | ty::ConstKind::Unevaluated(_) | ty::ConstKind::Error(_) => { - true - } - ty::ConstKind::Value(_, obl_val) => obl_val == impl_val, - - ty::ConstKind::Infer(_) => true, - - ty::ConstKind::Bound(..) => { - bug!("unexpected obl const: {:?}", obligation_ct) - } - } - } -} +pub type SimplifiedType = rustc_type_ir::fast_reject::SimplifiedType; diff --git a/compiler/rustc_middle/src/ty/impls_ty.rs b/compiler/rustc_middle/src/ty/impls_ty.rs index efcf428c213..9be7370a1c2 100644 --- a/compiler/rustc_middle/src/ty/impls_ty.rs +++ b/compiler/rustc_middle/src/ty/impls_ty.rs @@ -4,7 +4,6 @@ use crate::middle::region; use crate::mir; use crate::ty; -use crate::ty::fast_reject::SimplifiedType; use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::stable_hasher::HashingControls; @@ -57,18 +56,6 @@ fn to_stable_hash_key(&self, hcx: &StableHashingContext<'a>) -> Fingerprint { } } -impl<'a> ToStableHashKey> for SimplifiedType { - type KeyType = Fingerprint; - - #[inline] - fn to_stable_hash_key(&self, hcx: &StableHashingContext<'a>) -> Fingerprint { - let mut hasher = StableHasher::new(); - let mut hcx: StableHashingContext<'a> = hcx.clone(); - self.hash_stable(&mut hcx, &mut hasher); - hasher.finish() - } -} - impl<'a, 'tcx> HashStable> for ty::GenericArg<'tcx> { fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { self.unpack().hash_stable(hcx, hasher); diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs index 19700353f59..d2f32cafb9d 100644 --- a/compiler/rustc_middle/src/ty/print/pretty.rs +++ b/compiler/rustc_middle/src/ty/print/pretty.rs @@ -1710,22 +1710,24 @@ fn pretty_print_const_scalar_int( ty::Bool if int == ScalarInt::FALSE => p!("false"), ty::Bool if int == ScalarInt::TRUE => p!("true"), // Float - ty::Float(ty::FloatTy::F16) => { - let val = Half::try_from(int).unwrap(); - p!(write("{}{}f16", val, if val.is_finite() { "" } else { "_" })) - } - ty::Float(ty::FloatTy::F32) => { - let val = Single::try_from(int).unwrap(); - p!(write("{}{}f32", val, if val.is_finite() { "" } else { "_" })) - } - ty::Float(ty::FloatTy::F64) => { - let val = Double::try_from(int).unwrap(); - p!(write("{}{}f64", val, if val.is_finite() { "" } else { "_" })) - } - ty::Float(ty::FloatTy::F128) => { - let val = Quad::try_from(int).unwrap(); - p!(write("{}{}f128", val, if val.is_finite() { "" } else { "_" })) - } + ty::Float(fty) => match fty { + ty::FloatTy::F16 => { + let val = Half::try_from(int).unwrap(); + p!(write("{}{}f16", val, if val.is_finite() { "" } else { "_" })) + } + ty::FloatTy::F32 => { + let val = Single::try_from(int).unwrap(); + p!(write("{}{}f32", val, if val.is_finite() { "" } else { "_" })) + } + ty::FloatTy::F64 => { + let val = Double::try_from(int).unwrap(); + p!(write("{}{}f64", val, if val.is_finite() { "" } else { "_" })) + } + ty::FloatTy::F128 => { + let val = Quad::try_from(int).unwrap(); + p!(write("{}{}f128", val, if val.is_finite() { "" } else { "_" })) + } + }, // Int ty::Uint(_) | ty::Int(_) => { let int = diff --git a/compiler/rustc_mir_build/src/build/expr/as_place.rs b/compiler/rustc_mir_build/src/build/expr/as_place.rs index 4b62afa61bb..91a3b53cc79 100644 --- a/compiler/rustc_mir_build/src/build/expr/as_place.rs +++ b/compiler/rustc_mir_build/src/build/expr/as_place.rs @@ -130,7 +130,7 @@ fn convert_to_hir_projections_and_truncate_for_capture( /// Eg: 1. `foo.x` which is represented using `projections=[Field(x)]` is an ancestor of /// `foo.x.y` which is represented using `projections=[Field(x), Field(y)]`. /// Note both `foo.x` and `foo.x.y` start off of the same root variable `foo`. -/// 2. Since we only look at the projections here function will return `bar.x` as an a valid +/// 2. Since we only look at the projections here function will return `bar.x` as a valid /// ancestor of `foo.x.y`. It's the caller's responsibility to ensure that both projections /// list are being applied to the same root variable. fn is_ancestor_or_same_capture( diff --git a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs index 8c6c9e10cdf..5745dc0969c 100644 --- a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs +++ b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs @@ -138,7 +138,7 @@ fn to_pat(&mut self, cv: mir::Const<'tcx>) -> Box> { // lints, but no errors), double-check that all types in the const implement // `PartialEq`. Even if we have a valtree, we may have found something // in there with non-structural-equality, meaning we match using `PartialEq` - // and we hence have to check that that impl exists. + // and we hence have to check if that impl exists. // This is all messy but not worth cleaning up: at some point we'll emit // a hard error when we don't have a valtree or when we find something in // the valtree that is not structural; then this can all be made a lot simpler. diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs index 3dbdeb615cf..2b7d9be6d35 100644 --- a/compiler/rustc_mir_transform/src/gvn.rs +++ b/compiler/rustc_mir_transform/src/gvn.rs @@ -8,7 +8,7 @@ //! `Value` is interned as a `VnIndex`, which allows us to cheaply compute identical values. //! //! From those assignments, we construct a mapping `VnIndex -> Vec<(Local, Location)>` of available -//! values, the locals in which they are stored, and a the assignment location. +//! values, the locals in which they are stored, and the assignment location. //! //! In a second pass, we traverse all (non SSA) assignments `x = rvalue` and operands. For each //! one, we compute the `VnIndex` of the rvalue. If this `VnIndex` is associated to a constant, we diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs index f7056702cb4..5d253d7384d 100644 --- a/compiler/rustc_mir_transform/src/lib.rs +++ b/compiler/rustc_mir_transform/src/lib.rs @@ -519,7 +519,7 @@ fn run_runtime_lowering_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { &add_subtyping_projections::Subtyper, // calling this after reveal_all ensures that we don't deal with opaque types &elaborate_drops::ElaborateDrops, // This will remove extraneous landing pads which are no longer - // necessary as well as well as forcing any call in a non-unwinding + // necessary as well as forcing any call in a non-unwinding // function calling a possibly-unwinding function to abort the process. &abort_unwinding_calls::AbortUnwindingCalls, // AddMovesForPackedDrops needs to run after drop diff --git a/compiler/rustc_mir_transform/src/promote_consts.rs b/compiler/rustc_mir_transform/src/promote_consts.rs index 3f4d2b65ff2..736647fb64b 100644 --- a/compiler/rustc_mir_transform/src/promote_consts.rs +++ b/compiler/rustc_mir_transform/src/promote_consts.rs @@ -816,7 +816,7 @@ fn promote_temp(&mut self, temp: Local) -> Local { mut func, mut args, call_source: desugar, fn_span, .. } => { // This promoted involves a function call, so it may fail to evaluate. - // Let's make sure it is added to `required_consts` so that that failure cannot get lost. + // Let's make sure it is added to `required_consts` so that failure cannot get lost. self.add_to_required = true; self.visit_operand(&mut func, loc); diff --git a/compiler/rustc_mir_transform/src/shim.rs b/compiler/rustc_mir_transform/src/shim.rs index 25577e88e28..6835a39cf36 100644 --- a/compiler/rustc_mir_transform/src/shim.rs +++ b/compiler/rustc_mir_transform/src/shim.rs @@ -1,18 +1,17 @@ use rustc_hir as hir; use rustc_hir::def_id::DefId; use rustc_hir::lang_items::LangItem; +use rustc_index::{Idx, IndexVec}; use rustc_middle::mir::*; use rustc_middle::query::Providers; use rustc_middle::ty::GenericArgs; use rustc_middle::ty::{self, CoroutineArgs, CoroutineArgsExt, EarlyBinder, Ty, TyCtxt}; use rustc_middle::{bug, span_bug}; -use rustc_target::abi::{FieldIdx, VariantIdx, FIRST_VARIANT}; - -use rustc_index::{Idx, IndexVec}; - use rustc_span::{source_map::Spanned, Span, DUMMY_SP}; +use rustc_target::abi::{FieldIdx, VariantIdx, FIRST_VARIANT}; use rustc_target::spec::abi::Abi; +use std::assert_matches::assert_matches; use std::fmt; use std::iter; @@ -1020,21 +1019,19 @@ fn build_construct_coroutine_by_move_shim<'tcx>( receiver_by_ref: bool, ) -> Body<'tcx> { let mut self_ty = tcx.type_of(coroutine_closure_def_id).instantiate_identity(); + let mut self_local: Place<'tcx> = Local::from_usize(1).into(); let ty::CoroutineClosure(_, args) = *self_ty.kind() else { bug!(); }; - // We use `&mut Self` here because we only need to emit an ABI-compatible shim body, - // rather than match the signature exactly (which might take `&self` instead). + // We use `&Self` here because we only need to emit an ABI-compatible shim body, + // rather than match the signature exactly (which might take `&mut self` instead). // - // The self type here is a coroutine-closure, not a coroutine, and we never read from - // it because it never has any captures, because this is only true in the Fn/FnMut - // implementation, not the AsyncFn/AsyncFnMut implementation, which is implemented only - // if the coroutine-closure has no captures. + // We adjust the `self_local` to be a deref since we want to copy fields out of + // a reference to the closure. if receiver_by_ref { - // Triple-check that there's no captures here. - assert_eq!(args.as_coroutine_closure().tupled_upvars_ty(), tcx.types.unit); - self_ty = Ty::new_mut_ref(tcx, tcx.lifetimes.re_erased, self_ty); + self_local = tcx.mk_place_deref(self_local); + self_ty = Ty::new_imm_ref(tcx, tcx.lifetimes.re_erased, self_ty); } let poly_sig = args.as_coroutine_closure().coroutine_closure_sig().map_bound(|sig| { @@ -1067,11 +1064,27 @@ fn build_construct_coroutine_by_move_shim<'tcx>( fields.push(Operand::Move(Local::from_usize(idx + 1).into())); } for (idx, ty) in args.as_coroutine_closure().upvar_tys().iter().enumerate() { - fields.push(Operand::Move(tcx.mk_place_field( - Local::from_usize(1).into(), - FieldIdx::from_usize(idx), - ty, - ))); + if receiver_by_ref { + // The only situation where it's possible is when we capture immuatable references, + // since those don't need to be reborrowed with the closure's env lifetime. Since + // references are always `Copy`, just emit a copy. + assert_matches!( + ty.kind(), + ty::Ref(_, _, hir::Mutability::Not), + "field should be captured by immutable ref if we have an `Fn` instance" + ); + fields.push(Operand::Copy(tcx.mk_place_field( + self_local, + FieldIdx::from_usize(idx), + ty, + ))); + } else { + fields.push(Operand::Move(tcx.mk_place_field( + self_local, + FieldIdx::from_usize(idx), + ty, + ))); + } } let source_info = SourceInfo::outermost(span); diff --git a/compiler/rustc_next_trait_solver/src/solve/normalizes_to/mod.rs b/compiler/rustc_next_trait_solver/src/solve/normalizes_to/mod.rs index 9275bcc8e97..a83bd689a80 100644 --- a/compiler/rustc_next_trait_solver/src/solve/normalizes_to/mod.rs +++ b/compiler/rustc_next_trait_solver/src/solve/normalizes_to/mod.rs @@ -3,6 +3,7 @@ mod opaque_types; mod weak_types; +use rustc_type_ir::fast_reject::{DeepRejectCtxt, TreatParams}; use rustc_type_ir::inherent::*; use rustc_type_ir::lang_items::TraitSolverLangItem; use rustc_type_ir::Upcast as _; @@ -144,7 +145,7 @@ fn consider_impl_candidate( let goal_trait_ref = goal.predicate.alias.trait_ref(cx); let impl_trait_ref = cx.impl_trait_ref(impl_def_id); - if !ecx.cx().args_may_unify_deep( + if !DeepRejectCtxt::new(ecx.cx(), TreatParams::ForLookup).args_may_unify( goal.predicate.alias.trait_ref(cx).args, impl_trait_ref.skip_binder().args, ) { diff --git a/compiler/rustc_next_trait_solver/src/solve/trait_goals.rs b/compiler/rustc_next_trait_solver/src/solve/trait_goals.rs index f5832f7e5b4..b2a59eece0d 100644 --- a/compiler/rustc_next_trait_solver/src/solve/trait_goals.rs +++ b/compiler/rustc_next_trait_solver/src/solve/trait_goals.rs @@ -2,6 +2,7 @@ use rustc_ast_ir::Movability; use rustc_type_ir::data_structures::IndexSet; +use rustc_type_ir::fast_reject::{DeepRejectCtxt, TreatParams}; use rustc_type_ir::inherent::*; use rustc_type_ir::lang_items::TraitSolverLangItem; use rustc_type_ir::visit::TypeVisitableExt as _; @@ -46,7 +47,8 @@ fn consider_impl_candidate( let cx = ecx.cx(); let impl_trait_ref = cx.impl_trait_ref(impl_def_id); - if !cx.args_may_unify_deep(goal.predicate.trait_ref.args, impl_trait_ref.skip_binder().args) + if !DeepRejectCtxt::new(ecx.cx(), TreatParams::ForLookup) + .args_may_unify(goal.predicate.trait_ref.args, impl_trait_ref.skip_binder().args) { return Err(NoSolution); } diff --git a/compiler/rustc_passes/src/dead.rs b/compiler/rustc_passes/src/dead.rs index bbd586386dd..6ea0ed339a6 100644 --- a/compiler/rustc_passes/src/dead.rs +++ b/compiler/rustc_passes/src/dead.rs @@ -102,7 +102,7 @@ fn ty_ref_to_pub_struct(tcx: TyCtxt<'_>, ty: &hir::Ty<'_>) -> Publicness { Publicness::new(true, true) } -/// Determine if a work from the worklist is coming from the a `#[allow]` +/// Determine if a work from the worklist is coming from a `#[allow]` /// or a `#[expect]` of `dead_code` #[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] enum ComesFromAllowExpect { diff --git a/compiler/rustc_resolve/src/diagnostics.rs b/compiler/rustc_resolve/src/diagnostics.rs index 263daa11ec3..50a4e03d233 100644 --- a/compiler/rustc_resolve/src/diagnostics.rs +++ b/compiler/rustc_resolve/src/diagnostics.rs @@ -1987,10 +1987,20 @@ pub(crate) fn report_path_resolution_error( candidates .sort_by_cached_key(|c| (c.path.segments.len(), pprust::path_to_string(&c.path))); if let Some(candidate) = candidates.get(0) { + let path = { + // remove the possible common prefix of the path + let start_index = (0..failed_segment_idx) + .find(|&i| path[i].ident != candidate.path.segments[i].ident) + .unwrap_or_default(); + let segments = (start_index..=failed_segment_idx) + .map(|s| candidate.path.segments[s].clone()) + .collect(); + Path { segments, span: Span::default(), tokens: None } + }; ( String::from("unresolved import"), Some(( - vec![(ident.span, pprust::path_to_string(&candidate.path))], + vec![(ident.span, pprust::path_to_string(&path))], String::from("a similar path exists"), Applicability::MaybeIncorrect, )), diff --git a/compiler/rustc_serialize/src/opaque.rs b/compiler/rustc_serialize/src/opaque.rs index 1dcb69920d7..d27dfd88824 100644 --- a/compiler/rustc_serialize/src/opaque.rs +++ b/compiler/rustc_serialize/src/opaque.rs @@ -155,7 +155,7 @@ fn write_all(&mut self, buf: &[u8]) { if std::intrinsics::unlikely(self.buffered > flush_threshold) { self.flush(); } - // SAFETY: We checked above that that N < self.buffer_empty().len(), + // SAFETY: We checked above that N < self.buffer_empty().len(), // and if isn't, flush ensures that our empty buffer is now BUF_SIZE. // We produce a post-mono error if N > BUF_SIZE. let buf = unsafe { self.buffer_empty().first_chunk_mut::().unwrap_unchecked() }; diff --git a/compiler/rustc_symbol_mangling/src/legacy.rs b/compiler/rustc_symbol_mangling/src/legacy.rs index 9edd2ff9b1a..5aa46cc0dea 100644 --- a/compiler/rustc_symbol_mangling/src/legacy.rs +++ b/compiler/rustc_symbol_mangling/src/legacy.rs @@ -85,9 +85,13 @@ pub(super) fn mangle<'tcx>( } // FIXME(async_closures): This shouldn't be needed when we fix // `Instance::ty`/`Instance::def_id`. - ty::InstanceKind::ConstructCoroutineInClosureShim { .. } - | ty::InstanceKind::CoroutineKindShim { .. } => { - printer.write_str("{{fn-once-shim}}").unwrap(); + ty::InstanceKind::ConstructCoroutineInClosureShim { receiver_by_ref, .. } => { + printer + .write_str(if receiver_by_ref { "{{by-move-shim}}" } else { "{{by-ref-shim}}" }) + .unwrap(); + } + ty::InstanceKind::CoroutineKindShim { .. } => { + printer.write_str("{{by-move-body-shim}}").unwrap(); } _ => {} } diff --git a/compiler/rustc_symbol_mangling/src/v0.rs b/compiler/rustc_symbol_mangling/src/v0.rs index 42c4fa83d1b..5f8029af020 100644 --- a/compiler/rustc_symbol_mangling/src/v0.rs +++ b/compiler/rustc_symbol_mangling/src/v0.rs @@ -49,8 +49,15 @@ pub(super) fn mangle<'tcx>( ty::InstanceKind::ReifyShim(_, Some(ReifyReason::FnPtr)) => Some("reify_fnptr"), ty::InstanceKind::ReifyShim(_, Some(ReifyReason::Vtable)) => Some("reify_vtable"), - ty::InstanceKind::ConstructCoroutineInClosureShim { .. } - | ty::InstanceKind::CoroutineKindShim { .. } => Some("fn_once"), + // FIXME(async_closures): This shouldn't be needed when we fix + // `Instance::ty`/`Instance::def_id`. + ty::InstanceKind::ConstructCoroutineInClosureShim { receiver_by_ref: true, .. } => { + Some("by_move") + } + ty::InstanceKind::ConstructCoroutineInClosureShim { receiver_by_ref: false, .. } => { + Some("by_ref") + } + ty::InstanceKind::CoroutineKindShim { .. } => Some("by_move_body"), _ => None, }; diff --git a/compiler/rustc_target/src/abi/call/mod.rs b/compiler/rustc_target/src/abi/call/mod.rs index 5713542c17d..8058130f441 100644 --- a/compiler/rustc_target/src/abi/call/mod.rs +++ b/compiler/rustc_target/src/abi/call/mod.rs @@ -339,7 +339,9 @@ pub fn pair(a: Reg, b: Reg) -> CastTarget { } } - pub fn size(&self, _cx: &C) -> Size { + /// When you only access the range containing valid data, you can use this unaligned size; + /// otherwise, use the safer `size` method. + pub fn unaligned_size(&self, _cx: &C) -> Size { // Prefix arguments are passed in specific designated registers let prefix_size = self .prefix @@ -353,6 +355,10 @@ pub fn size(&self, _cx: &C) -> Size { prefix_size + rest_size } + pub fn size(&self, cx: &C) -> Size { + self.unaligned_size(cx).align_to(self.align(cx)) + } + pub fn align(&self, cx: &C) -> Align { self.prefix .iter() diff --git a/compiler/rustc_trait_selection/src/traits/coherence.rs b/compiler/rustc_trait_selection/src/traits/coherence.rs index a4177d8a93f..9f0d84e7d45 100644 --- a/compiler/rustc_trait_selection/src/traits/coherence.rs +++ b/compiler/rustc_trait_selection/src/traits/coherence.rs @@ -121,7 +121,7 @@ pub fn overlapping_impls( // Before doing expensive operations like entering an inference context, do // a quick check via fast_reject to tell if the impl headers could possibly // unify. - let drcx = DeepRejectCtxt { treat_obligation_params: TreatParams::AsCandidateKey }; + let drcx = DeepRejectCtxt::new(tcx, TreatParams::AsCandidateKey); let impl1_ref = tcx.impl_trait_ref(impl1_def_id); let impl2_ref = tcx.impl_trait_ref(impl2_def_id); let may_overlap = match (impl1_ref, impl2_ref) { diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs index ccf86dbb1d0..a0a8e5963f1 100644 --- a/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs +++ b/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs @@ -4114,7 +4114,7 @@ fn point_at_chain( expr = binding_expr; } if let hir::Node::Param(param) = parent { - // ...and it is a an fn argument. + // ...and it is an fn argument. let prev_ty = self.resolve_vars_if_possible( typeck_results .node_type_opt(param.hir_id) diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/type_err_ctxt_ext.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/type_err_ctxt_ext.rs index adf1076a7c9..3e316b78454 100644 --- a/compiler/rustc_trait_selection/src/traits/error_reporting/type_err_ctxt_ext.rs +++ b/compiler/rustc_trait_selection/src/traits/error_reporting/type_err_ctxt_ext.rs @@ -1296,7 +1296,7 @@ fn visit_expr(&mut self, ex: &'v hir::Expr<'v>) -> Self::Result { expr = binding_expr; } if let hir::Node::Param(_param) = parent { - // ...and it is a an fn argument. + // ...and it is an fn argument. break; } } diff --git a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs index e36a9ca8bd1..4c3d833b0f9 100644 --- a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs +++ b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs @@ -571,7 +571,7 @@ fn assemble_candidates_from_impls( return; } - let drcx = DeepRejectCtxt { treat_obligation_params: TreatParams::ForLookup }; + let drcx = DeepRejectCtxt::new(self.tcx(), TreatParams::ForLookup); let obligation_args = obligation.predicate.skip_binder().trait_ref.args; self.tcx().for_each_relevant_impl( obligation.predicate.def_id(), diff --git a/compiler/rustc_ty_utils/src/abi.rs b/compiler/rustc_ty_utils/src/abi.rs index f1dd94839fe..f078cfe1b25 100644 --- a/compiler/rustc_ty_utils/src/abi.rs +++ b/compiler/rustc_ty_utils/src/abi.rs @@ -127,9 +127,9 @@ fn fn_sig_for_fn_abi<'tcx>( coroutine_kind = ty::ClosureKind::FnOnce; // Implementations of `FnMut` and `Fn` for coroutine-closures - // still take their receiver by (mut) ref. + // still take their receiver by ref. if receiver_by_ref { - Ty::new_mut_ref(tcx, tcx.lifetimes.re_erased, coroutine_ty) + Ty::new_imm_ref(tcx, tcx.lifetimes.re_erased, coroutine_ty) } else { coroutine_ty } diff --git a/compiler/rustc_type_ir/src/fast_reject.rs b/compiler/rustc_type_ir/src/fast_reject.rs new file mode 100644 index 00000000000..0810fa5c558 --- /dev/null +++ b/compiler/rustc_type_ir/src/fast_reject.rs @@ -0,0 +1,397 @@ +use std::fmt::Debug; +use std::hash::Hash; +use std::iter; +use std::marker::PhantomData; + +use rustc_ast_ir::Mutability; +#[cfg(feature = "nightly")] +use rustc_data_structures::fingerprint::Fingerprint; +#[cfg(feature = "nightly")] +use rustc_data_structures::stable_hasher::{HashStable, StableHasher, ToStableHashKey}; +#[cfg(feature = "nightly")] +use rustc_macros::{HashStable_NoContext, TyDecodable, TyEncodable}; + +use crate::inherent::*; +use crate::visit::TypeVisitableExt as _; +use crate::{self as ty, Interner}; + +/// See `simplify_type`. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "nightly", derive(TyEncodable, TyDecodable, HashStable_NoContext))] +pub enum SimplifiedType { + Bool, + Char, + Int(ty::IntTy), + Uint(ty::UintTy), + Float(ty::FloatTy), + Adt(DefId), + Foreign(DefId), + Str, + Array, + Slice, + Ref(Mutability), + Ptr(Mutability), + Never, + Tuple(usize), + /// A trait object, all of whose components are markers + /// (e.g., `dyn Send + Sync`). + MarkerTraitObject, + Trait(DefId), + Closure(DefId), + Coroutine(DefId), + CoroutineWitness(DefId), + Function(usize), + Placeholder, + Error, +} + +#[cfg(feature = "nightly")] +impl> ToStableHashKey for SimplifiedType { + type KeyType = Fingerprint; + + #[inline] + fn to_stable_hash_key(&self, hcx: &HCX) -> Fingerprint { + let mut hasher = StableHasher::new(); + let mut hcx: HCX = hcx.clone(); + self.hash_stable(&mut hcx, &mut hasher); + hasher.finish() + } +} + +/// Generic parameters are pretty much just bound variables, e.g. +/// the type of `fn foo<'a, T>(x: &'a T) -> u32 { ... }` can be thought of as +/// `for<'a, T> fn(&'a T) -> u32`. +/// +/// Typecheck of `foo` has to succeed for all possible generic arguments, so +/// during typeck, we have to treat its generic parameters as if they +/// were placeholders. +/// +/// But when calling `foo` we only have to provide a specific generic argument. +/// In that case the generic parameters are instantiated with inference variables. +/// As we use `simplify_type` before that instantiation happens, we just treat +/// generic parameters as if they were inference variables in that case. +#[derive(PartialEq, Eq, Debug, Clone, Copy)] +pub enum TreatParams { + /// Treat parameters as infer vars. This is the correct mode for caching + /// an impl's type for lookup. + AsCandidateKey, + /// Treat parameters as placeholders in the given environment. This is the + /// correct mode for *lookup*, as during candidate selection. + /// + /// This also treats projections with inference variables as infer vars + /// since they could be further normalized. + ForLookup, +} + +/// Tries to simplify a type by only returning the outermost injective¹ layer, if one exists. +/// +/// **This function should only be used if you need to store or retrieve the type from some +/// hashmap. If you want to quickly decide whether two types may unify, use the [DeepRejectCtxt] +/// instead.** +/// +/// The idea is to get something simple that we can use to quickly decide if two types could unify, +/// for example during method lookup. If this function returns `Some(x)` it can only unify with +/// types for which this method returns either `Some(x)` as well or `None`. +/// +/// A special case here are parameters and projections, which are only injective +/// if they are treated as placeholders. +/// +/// For example when storing impls based on their simplified self type, we treat +/// generic parameters as if they were inference variables. We must not simplify them here, +/// as they can unify with any other type. +/// +/// With projections we have to be even more careful, as treating them as placeholders +/// is only correct if they are fully normalized. +/// +/// ¹ meaning that if the outermost layers are different, then the whole types are also different. +pub fn simplify_type( + tcx: I, + ty: I::Ty, + treat_params: TreatParams, +) -> Option> { + match ty.kind() { + ty::Bool => Some(SimplifiedType::Bool), + ty::Char => Some(SimplifiedType::Char), + ty::Int(int_type) => Some(SimplifiedType::Int(int_type)), + ty::Uint(uint_type) => Some(SimplifiedType::Uint(uint_type)), + ty::Float(float_type) => Some(SimplifiedType::Float(float_type)), + ty::Adt(def, _) => Some(SimplifiedType::Adt(def.def_id())), + ty::Str => Some(SimplifiedType::Str), + ty::Array(..) => Some(SimplifiedType::Array), + ty::Slice(..) => Some(SimplifiedType::Slice), + ty::Pat(ty, ..) => simplify_type(tcx, ty, treat_params), + ty::RawPtr(_, mutbl) => Some(SimplifiedType::Ptr(mutbl)), + ty::Dynamic(trait_info, ..) => match trait_info.principal_def_id() { + Some(principal_def_id) if !tcx.trait_is_auto(principal_def_id) => { + Some(SimplifiedType::Trait(principal_def_id)) + } + _ => Some(SimplifiedType::MarkerTraitObject), + }, + ty::Ref(_, _, mutbl) => Some(SimplifiedType::Ref(mutbl)), + ty::FnDef(def_id, _) | ty::Closure(def_id, _) | ty::CoroutineClosure(def_id, _) => { + Some(SimplifiedType::Closure(def_id)) + } + ty::Coroutine(def_id, _) => Some(SimplifiedType::Coroutine(def_id)), + ty::CoroutineWitness(def_id, _) => Some(SimplifiedType::CoroutineWitness(def_id)), + ty::Never => Some(SimplifiedType::Never), + ty::Tuple(tys) => Some(SimplifiedType::Tuple(tys.len())), + ty::FnPtr(f) => Some(SimplifiedType::Function(f.skip_binder().inputs().len())), + ty::Placeholder(..) => Some(SimplifiedType::Placeholder), + ty::Param(_) => match treat_params { + TreatParams::ForLookup => Some(SimplifiedType::Placeholder), + TreatParams::AsCandidateKey => None, + }, + ty::Alias(..) => match treat_params { + // When treating `ty::Param` as a placeholder, projections also + // don't unify with anything else as long as they are fully normalized. + // FIXME(-Znext-solver): Can remove this `if` and always simplify to `Placeholder` + // when the new solver is enabled by default. + TreatParams::ForLookup if !ty.has_non_region_infer() => { + Some(SimplifiedType::Placeholder) + } + TreatParams::ForLookup | TreatParams::AsCandidateKey => None, + }, + ty::Foreign(def_id) => Some(SimplifiedType::Foreign(def_id)), + ty::Error(_) => Some(SimplifiedType::Error), + ty::Bound(..) | ty::Infer(_) => None, + } +} + +impl SimplifiedType { + pub fn def(self) -> Option { + match self { + SimplifiedType::Adt(d) + | SimplifiedType::Foreign(d) + | SimplifiedType::Trait(d) + | SimplifiedType::Closure(d) + | SimplifiedType::Coroutine(d) + | SimplifiedType::CoroutineWitness(d) => Some(d), + _ => None, + } + } +} + +/// Given generic arguments from an obligation and an impl, +/// could these two be unified after replacing parameters in the +/// the impl with inference variables. +/// +/// For obligations, parameters won't be replaced by inference +/// variables and only unify with themselves. We treat them +/// the same way we treat placeholders. +/// +/// We also use this function during coherence. For coherence the +/// impls only have to overlap for some value, so we treat parameters +/// on both sides like inference variables. This behavior is toggled +/// using the `treat_obligation_params` field. +#[derive(Debug, Clone, Copy)] +pub struct DeepRejectCtxt { + treat_obligation_params: TreatParams, + _interner: PhantomData, +} + +impl DeepRejectCtxt { + pub fn new(_interner: I, treat_obligation_params: TreatParams) -> Self { + DeepRejectCtxt { treat_obligation_params, _interner: PhantomData } + } + + pub fn args_may_unify( + self, + obligation_args: I::GenericArgs, + impl_args: I::GenericArgs, + ) -> bool { + iter::zip(obligation_args.iter(), impl_args.iter()).all(|(obl, imp)| { + match (obl.kind(), imp.kind()) { + // We don't fast reject based on regions. + (ty::GenericArgKind::Lifetime(_), ty::GenericArgKind::Lifetime(_)) => true, + (ty::GenericArgKind::Type(obl), ty::GenericArgKind::Type(imp)) => { + self.types_may_unify(obl, imp) + } + (ty::GenericArgKind::Const(obl), ty::GenericArgKind::Const(imp)) => { + self.consts_may_unify(obl, imp) + } + _ => panic!("kind mismatch: {obl:?} {imp:?}"), + } + }) + } + + pub fn types_may_unify(self, obligation_ty: I::Ty, impl_ty: I::Ty) -> bool { + match impl_ty.kind() { + // Start by checking whether the type in the impl may unify with + // pretty much everything. Just return `true` in that case. + ty::Param(_) | ty::Error(_) | ty::Alias(..) => return true, + // These types only unify with inference variables or their own + // variant. + ty::Bool + | ty::Char + | ty::Int(_) + | ty::Uint(_) + | ty::Float(_) + | ty::Adt(..) + | ty::Str + | ty::Array(..) + | ty::Slice(..) + | ty::RawPtr(..) + | ty::Dynamic(..) + | ty::Pat(..) + | ty::Ref(..) + | ty::Never + | ty::Tuple(..) + | ty::FnPtr(..) + | ty::Foreign(..) => debug_assert!(impl_ty.is_known_rigid()), + ty::FnDef(..) + | ty::Closure(..) + | ty::CoroutineClosure(..) + | ty::Coroutine(..) + | ty::CoroutineWitness(..) + | ty::Placeholder(..) + | ty::Bound(..) + | ty::Infer(_) => panic!("unexpected impl_ty: {impl_ty:?}"), + } + + let k = impl_ty.kind(); + match obligation_ty.kind() { + // Purely rigid types, use structural equivalence. + ty::Bool + | ty::Char + | ty::Int(_) + | ty::Uint(_) + | ty::Float(_) + | ty::Str + | ty::Never + | ty::Foreign(_) => obligation_ty == impl_ty, + ty::Ref(_, obl_ty, obl_mutbl) => match k { + ty::Ref(_, impl_ty, impl_mutbl) => { + obl_mutbl == impl_mutbl && self.types_may_unify(obl_ty, impl_ty) + } + _ => false, + }, + ty::Adt(obl_def, obl_args) => match k { + ty::Adt(impl_def, impl_args) => { + obl_def == impl_def && self.args_may_unify(obl_args, impl_args) + } + _ => false, + }, + ty::Pat(obl_ty, _) => { + // FIXME(pattern_types): take pattern into account + matches!(k, ty::Pat(impl_ty, _) if self.types_may_unify(obl_ty, impl_ty)) + } + ty::Slice(obl_ty) => { + matches!(k, ty::Slice(impl_ty) if self.types_may_unify(obl_ty, impl_ty)) + } + ty::Array(obl_ty, obl_len) => match k { + ty::Array(impl_ty, impl_len) => { + self.types_may_unify(obl_ty, impl_ty) + && self.consts_may_unify(obl_len, impl_len) + } + _ => false, + }, + ty::Tuple(obl) => match k { + ty::Tuple(imp) => { + obl.len() == imp.len() + && iter::zip(obl.iter(), imp.iter()) + .all(|(obl, imp)| self.types_may_unify(obl, imp)) + } + _ => false, + }, + ty::RawPtr(obl_ty, obl_mutbl) => match k { + ty::RawPtr(imp_ty, imp_mutbl) => { + obl_mutbl == imp_mutbl && self.types_may_unify(obl_ty, imp_ty) + } + _ => false, + }, + ty::Dynamic(obl_preds, ..) => { + // Ideally we would walk the existential predicates here or at least + // compare their length. But considering that the relevant `Relate` impl + // actually sorts and deduplicates these, that doesn't work. + matches!(k, ty::Dynamic(impl_preds, ..) if + obl_preds.principal_def_id() == impl_preds.principal_def_id() + ) + } + ty::FnPtr(obl_sig) => match k { + ty::FnPtr(impl_sig) => { + let ty::FnSig { inputs_and_output, c_variadic, safety, abi } = + obl_sig.skip_binder(); + let impl_sig = impl_sig.skip_binder(); + + abi == impl_sig.abi + && c_variadic == impl_sig.c_variadic + && safety == impl_sig.safety + && inputs_and_output.len() == impl_sig.inputs_and_output.len() + && iter::zip(inputs_and_output.iter(), impl_sig.inputs_and_output.iter()) + .all(|(obl, imp)| self.types_may_unify(obl, imp)) + } + _ => false, + }, + + // Impls cannot contain these types as these cannot be named directly. + ty::FnDef(..) | ty::Closure(..) | ty::CoroutineClosure(..) | ty::Coroutine(..) => false, + + // Placeholder types don't unify with anything on their own + ty::Placeholder(..) | ty::Bound(..) => false, + + // Depending on the value of `treat_obligation_params`, we either + // treat generic parameters like placeholders or like inference variables. + ty::Param(_) => match self.treat_obligation_params { + TreatParams::ForLookup => false, + TreatParams::AsCandidateKey => true, + }, + + ty::Infer(ty::IntVar(_)) => impl_ty.is_integral(), + + ty::Infer(ty::FloatVar(_)) => impl_ty.is_floating_point(), + + ty::Infer(_) => true, + + // As we're walking the whole type, it may encounter projections + // inside of binders and what not, so we're just going to assume that + // projections can unify with other stuff. + // + // Looking forward to lazy normalization this is the safer strategy anyways. + ty::Alias(..) => true, + + ty::Error(_) => true, + + ty::CoroutineWitness(..) => { + panic!("unexpected obligation type: {:?}", obligation_ty) + } + } + } + + pub fn consts_may_unify(self, obligation_ct: I::Const, impl_ct: I::Const) -> bool { + let impl_val = match impl_ct.kind() { + ty::ConstKind::Expr(_) + | ty::ConstKind::Param(_) + | ty::ConstKind::Unevaluated(_) + | ty::ConstKind::Error(_) => { + return true; + } + ty::ConstKind::Value(_, impl_val) => impl_val, + ty::ConstKind::Infer(_) | ty::ConstKind::Bound(..) | ty::ConstKind::Placeholder(_) => { + panic!("unexpected impl arg: {:?}", impl_ct) + } + }; + + match obligation_ct.kind() { + ty::ConstKind::Param(_) => match self.treat_obligation_params { + TreatParams::ForLookup => false, + TreatParams::AsCandidateKey => true, + }, + + // Placeholder consts don't unify with anything on their own + ty::ConstKind::Placeholder(_) => false, + + // As we don't necessarily eagerly evaluate constants, + // they might unify with any value. + ty::ConstKind::Expr(_) | ty::ConstKind::Unevaluated(_) | ty::ConstKind::Error(_) => { + true + } + ty::ConstKind::Value(_, obl_val) => obl_val == impl_val, + + ty::ConstKind::Infer(_) => true, + + ty::ConstKind::Bound(..) => { + panic!("unexpected obl const: {:?}", obligation_ct) + } + } + } +} diff --git a/compiler/rustc_type_ir/src/inherent.rs b/compiler/rustc_type_ir/src/inherent.rs index a4e1a97d505..ffe16964ae5 100644 --- a/compiler/rustc_type_ir/src/inherent.rs +++ b/compiler/rustc_type_ir/src/inherent.rs @@ -120,6 +120,14 @@ fn is_ty_var(self) -> bool { matches!(self.kind(), ty::Infer(ty::TyVar(_))) } + fn is_floating_point(self) -> bool { + matches!(self.kind(), ty::Float(_) | ty::Infer(ty::FloatVar(_))) + } + + fn is_integral(self) -> bool { + matches!(self.kind(), ty::Infer(ty::IntVar(_)) | ty::Int(_) | ty::Uint(_)) + } + fn is_fn_ptr(self) -> bool { matches!(self.kind(), ty::FnPtr(_)) } diff --git a/compiler/rustc_type_ir/src/interner.rs b/compiler/rustc_type_ir/src/interner.rs index 6665158c7cd..eaa3ab7ce43 100644 --- a/compiler/rustc_type_ir/src/interner.rs +++ b/compiler/rustc_type_ir/src/interner.rs @@ -222,13 +222,6 @@ fn explicit_super_predicates_of( fn associated_type_def_ids(self, def_id: Self::DefId) -> impl IntoIterator; - // FIXME: move `fast_reject` into `rustc_type_ir`. - fn args_may_unify_deep( - self, - obligation_args: Self::GenericArgs, - impl_args: Self::GenericArgs, - ) -> bool; - fn for_each_relevant_impl( self, trait_def_id: Self::DefId, diff --git a/compiler/rustc_type_ir/src/lib.rs b/compiler/rustc_type_ir/src/lib.rs index d7442e7c89c..1b5529cd8db 100644 --- a/compiler/rustc_type_ir/src/lib.rs +++ b/compiler/rustc_type_ir/src/lib.rs @@ -21,6 +21,7 @@ pub mod codec; pub mod data_structures; pub mod error; +pub mod fast_reject; pub mod fold; pub mod inherent; pub mod ir_print; diff --git a/library/core/src/clone.rs b/library/core/src/clone.rs index d7ce65f6c53..939b2be6dfa 100644 --- a/library/core/src/clone.rs +++ b/library/core/src/clone.rs @@ -230,7 +230,7 @@ pub struct AssertParamIsCopy { pub unsafe trait CloneToUninit { /// Performs copy-assignment from `self` to `dst`. /// - /// This is analogous to to `std::ptr::write(dst, self.clone())`, + /// This is analogous to `std::ptr::write(dst, self.clone())`, /// except that `self` may be a dynamically-sized type ([`!Sized`](Sized)). /// /// Before this function is called, `dst` may point to uninitialized memory. diff --git a/library/core/src/iter/adapters/mod.rs b/library/core/src/iter/adapters/mod.rs index 05a5f268905..1bde4488cc9 100644 --- a/library/core/src/iter/adapters/mod.rs +++ b/library/core/src/iter/adapters/mod.rs @@ -159,7 +159,7 @@ pub(crate) struct GenericShunt<'a, I, R> { residual: &'a mut Option, } -/// Process the given iterator as if it yielded a the item's `Try::Output` +/// Process the given iterator as if it yielded the item's `Try::Output` /// type instead. Any `Try::Residual`s encountered will stop the inner iterator /// and be propagated back to the overall result. pub(crate) fn try_process(iter: I, mut f: F) -> ChangeOutputType diff --git a/library/core/src/num/dec2flt/lemire.rs b/library/core/src/num/dec2flt/lemire.rs index 3bc052df7a6..01642e1b111 100644 --- a/library/core/src/num/dec2flt/lemire.rs +++ b/library/core/src/num/dec2flt/lemire.rs @@ -157,7 +157,7 @@ fn compute_product_approx(q: i64, w: u64, precision: usize) -> (u64, u64) { // Need to do a second multiplication to get better precision // for the lower product. This will always be exact // where q is < 55, since 5^55 < 2^128. If this wraps, - // then we need to need to round up the hi product. + // then we need to round up the hi product. let (_, second_hi) = full_multiplication(w, hi5); first_lo = first_lo.wrapping_add(second_hi); if second_hi > first_lo { diff --git a/library/std/src/io/error/repr_bitpacked.rs b/library/std/src/io/error/repr_bitpacked.rs index c9d3934ad70..a5cefe2292b 100644 --- a/library/std/src/io/error/repr_bitpacked.rs +++ b/library/std/src/io/error/repr_bitpacked.rs @@ -28,7 +28,7 @@ //! //! # Layout //! Tagged values are 64 bits, with the 2 least significant bits used for the -//! tag. This means there are there are 4 "variants": +//! tag. This means there are 4 "variants": //! //! - **Tag 0b00**: The first variant is equivalent to //! `ErrorData::SimpleMessage`, and holds a `&'static SimpleMessage` directly. diff --git a/library/std/src/sys/pal/unix/args.rs b/library/std/src/sys/pal/unix/args.rs index db2ec73148e..e2ec838b740 100644 --- a/library/std/src/sys/pal/unix/args.rs +++ b/library/std/src/sys/pal/unix/args.rs @@ -183,7 +183,7 @@ pub fn argc_argv() -> (isize, *const *const c_char) { // Use `_NSGetArgc` and `_NSGetArgv` on Apple platforms. // // Even though these have underscores in their names, they've been available -// since since the first versions of both macOS and iOS, and are declared in +// since the first versions of both macOS and iOS, and are declared in // the header `crt_externs.h`. // // NOTE: This header was added to the iOS 13.0 SDK, which has been the source diff --git a/library/std/src/sys/pal/windows/alloc.rs b/library/std/src/sys/pal/windows/alloc.rs index 681d1a5efe9..24c237b5eb0 100644 --- a/library/std/src/sys/pal/windows/alloc.rs +++ b/library/std/src/sys/pal/windows/alloc.rs @@ -190,7 +190,7 @@ unsafe fn allocate(layout: Layout, zeroed: bool) -> *mut u8 { // it, it is safe to write a header directly before it. unsafe { ptr::write((aligned as *mut Header).sub(1), Header(ptr)) }; - // SAFETY: The returned pointer does not point to the to the start of an allocated block, + // SAFETY: The returned pointer does not point to the start of an allocated block, // but there is a header readable directly before it containing the location of the start // of the block. aligned diff --git a/src/doc/book b/src/doc/book index 45c1a6d69ed..f1e49bf7a8e 160000 --- a/src/doc/book +++ b/src/doc/book @@ -1 +1 @@ -Subproject commit 45c1a6d69edfd1fc91fb7504cb73958dbd09441e +Subproject commit f1e49bf7a8ea6c31ce016a52b8a4f6e1ffcfbc64 diff --git a/src/doc/edition-guide b/src/doc/edition-guide index cb58c430b4e..941db8b3df4 160000 --- a/src/doc/edition-guide +++ b/src/doc/edition-guide @@ -1 +1 @@ -Subproject commit cb58c430b4e8054c2cb81d2d4434092c482a93d8 +Subproject commit 941db8b3df45fd46cd87b50a5c86714b91dcde9c diff --git a/src/doc/reference b/src/doc/reference index 0b805c65804..1ae3deebc3a 160000 --- a/src/doc/reference +++ b/src/doc/reference @@ -1 +1 @@ -Subproject commit 0b805c65804019b0ac8f2fe3117afad82a6069b8 +Subproject commit 1ae3deebc3ac16e276b6558e01420f8e605def08 diff --git a/src/doc/rust-by-example b/src/doc/rust-by-example index b1d97bd6113..658c6c27cb9 160000 --- a/src/doc/rust-by-example +++ b/src/doc/rust-by-example @@ -1 +1 @@ -Subproject commit b1d97bd6113aba732b2091ce093c76f2d05bb8a0 +Subproject commit 658c6c27cb975b92227936024816986c2d3716fb diff --git a/src/doc/rustc-dev-guide b/src/doc/rustc-dev-guide index aec82168dd3..d6e3a32a557 160000 --- a/src/doc/rustc-dev-guide +++ b/src/doc/rustc-dev-guide @@ -1 +1 @@ -Subproject commit aec82168dd3121289a194b381f56076fc789a4d2 +Subproject commit d6e3a32a557db5902e714604def8015d6bb7e0f7 diff --git a/src/librustdoc/html/render/write_shared.rs b/src/librustdoc/html/render/write_shared.rs index c806bf1cc66..8fd56eae37f 100644 --- a/src/librustdoc/html/render/write_shared.rs +++ b/src/librustdoc/html/render/write_shared.rs @@ -507,8 +507,7 @@ fn visit_item(&mut self, it: &Item) { // Be aware of `tests/rustdoc/type-alias/deeply-nested-112515.rs` which might regress. let Some(impl_did) = impl_item_id.as_def_id() else { continue }; let for_ty = self.cx.tcx().type_of(impl_did).skip_binder(); - let reject_cx = - DeepRejectCtxt { treat_obligation_params: TreatParams::AsCandidateKey }; + let reject_cx = DeepRejectCtxt::new(self.cx.tcx(), TreatParams::AsCandidateKey); if !reject_cx.types_may_unify(aliased_ty, for_ty) { continue; } diff --git a/src/tools/clippy/clippy_lints/src/empty_with_brackets.rs b/src/tools/clippy/clippy_lints/src/empty_with_brackets.rs index 745599b0e57..743ec5b9ea7 100644 --- a/src/tools/clippy/clippy_lints/src/empty_with_brackets.rs +++ b/src/tools/clippy/clippy_lints/src/empty_with_brackets.rs @@ -16,7 +16,7 @@ /// and it may be desirable to do so consistently for style. /// /// However, removing the brackets also introduces a public constant named after the struct, - /// so this is not just a syntactic simplification but an an API change, and adding them back + /// so this is not just a syntactic simplification but an API change, and adding them back /// is a *breaking* API change. /// /// ### Example @@ -44,7 +44,7 @@ /// and it may be desirable to do so consistently for style. /// /// However, removing the brackets also introduces a public constant named after the variant, - /// so this is not just a syntactic simplification but an an API change, and adding them back + /// so this is not just a syntactic simplification but an API change, and adding them back /// is a *breaking* API change. /// /// ### Example diff --git a/src/tools/clippy/clippy_lints/src/non_copy_const.rs b/src/tools/clippy/clippy_lints/src/non_copy_const.rs index 5cb8e7bfab2..964d199bfcb 100644 --- a/src/tools/clippy/clippy_lints/src/non_copy_const.rs +++ b/src/tools/clippy/clippy_lints/src/non_copy_const.rs @@ -258,7 +258,7 @@ fn is_value_unfrozen_raw( // e.g. implementing `has_frozen_variant` described above, and not running this function // when the type doesn't have any frozen variants would be the 'correct' way for the 2nd // case (that actually removes another suboptimal behavior (I won't say 'false positive') where, - // similar to 2., but with the a frozen variant) (e.g. borrowing + // similar to 2., but with a frozen variant) (e.g. borrowing // `borrow_interior_mutable_const::enums::AssocConsts::TO_BE_FROZEN_VARIANT`). // I chose this way because unfrozen enums as assoc consts are rare (or, hopefully, none). matches!(err, ErrorHandled::TooGeneric(..)) diff --git a/src/tools/miri/tests/pass/async-closure.rs b/src/tools/miri/tests/pass/async-closure.rs index 2f7ec2b9e6f..721af578883 100644 --- a/src/tools/miri/tests/pass/async-closure.rs +++ b/src/tools/miri/tests/pass/async-closure.rs @@ -1,7 +1,8 @@ #![feature(async_closure, noop_waker, async_fn_traits)] +#![allow(unused)] use std::future::Future; -use std::ops::{AsyncFnMut, AsyncFnOnce}; +use std::ops::{AsyncFn, AsyncFnMut, AsyncFnOnce}; use std::pin::pin; use std::task::*; @@ -17,6 +18,10 @@ pub fn block_on(fut: impl Future) -> T { } } +async fn call(f: &mut impl AsyncFn(i32)) { + f(0).await; +} + async fn call_mut(f: &mut impl AsyncFnMut(i32)) { f(0).await; } @@ -26,10 +31,10 @@ async fn call_once(f: impl AsyncFnOnce(i32)) { } async fn call_normal>(f: &impl Fn(i32) -> F) { - f(0).await; + f(1).await; } -async fn call_normal_once>(f: impl FnOnce(i32) -> F) { +async fn call_normal_mut>(f: &mut impl FnMut(i32) -> F) { f(1).await; } @@ -39,14 +44,16 @@ pub fn main() { let mut async_closure = async move |a: i32| { println!("{a} {b}"); }; + call(&mut async_closure).await; call_mut(&mut async_closure).await; call_once(async_closure).await; - // No-capture closures implement `Fn`. - let async_closure = async move |a: i32| { - println!("{a}"); + let b = 2i32; + let mut async_closure = async |a: i32| { + println!("{a} {b}"); }; call_normal(&async_closure).await; - call_normal_once(async_closure).await; + call_normal_mut(&mut async_closure).await; + call_once(async_closure).await; }); } diff --git a/src/tools/miri/tests/pass/async-closure.stdout b/src/tools/miri/tests/pass/async-closure.stdout index 7baae1aa94f..217944c84a2 100644 --- a/src/tools/miri/tests/pass/async-closure.stdout +++ b/src/tools/miri/tests/pass/async-closure.stdout @@ -1,4 +1,6 @@ 0 2 +0 2 +1 2 +1 2 +1 2 1 2 -0 -1 diff --git a/tests/codegen/cast-target-abi.rs b/tests/codegen/cast-target-abi.rs index c6a8b7bbf37..34e52d38bbe 100644 --- a/tests/codegen/cast-target-abi.rs +++ b/tests/codegen/cast-target-abi.rs @@ -1,6 +1,7 @@ // ignore-tidy-linelength -//@ revisions:aarch64 loongarch64 powerpc64 sparc64 -//@ compile-flags: -O -C no-prepopulate-passes +//@ revisions:aarch64 loongarch64 powerpc64 sparc64 x86_64 +// FIXME: Add `-Cllvm-args=--lint-abort-on-error` after LLVM 19 +//@ compile-flags: -O -C no-prepopulate-passes -C passes=lint //@[aarch64] compile-flags: --target aarch64-unknown-linux-gnu //@[aarch64] needs-llvm-components: arm @@ -10,6 +11,8 @@ //@[powerpc64] needs-llvm-components: powerpc //@[sparc64] compile-flags: --target sparc64-unknown-linux-gnu //@[sparc64] needs-llvm-components: sparc +//@[x86_64] compile-flags: --target x86_64-unknown-linux-gnu +//@[x86_64] needs-llvm-components: x86 // Tests that arguments with `PassMode::Cast` are handled correctly. @@ -60,30 +63,261 @@ pub struct DoubleFloat { g: f32, } -extern "C" { - fn receives_twou16s(x: TwoU16s); - fn returns_twou16s() -> TwoU16s; - - fn receives_fiveu16s(x: FiveU16s); - fn returns_fiveu16s() -> FiveU16s; - - fn receives_doubledouble(x: DoubleDouble); - fn returns_doubledouble() -> DoubleDouble; - - // These functions cause an ICE in sparc64 ABI code (https://github.com/rust-lang/rust/issues/122620) - #[cfg(not(target_arch = "sparc64"))] - fn receives_doublefloat(x: DoubleFloat); - #[cfg(not(target_arch = "sparc64"))] - fn returns_doublefloat() -> DoubleFloat; +// On x86_64, this struct will be passed as `{ i64, i32 }`. +// The load and store instructions will access 16 bytes, so we should allocate 16 bytes. +#[repr(C)] +pub struct Three32s { + a: u32, + b: u32, + c: u32, } -// CHECK-LABEL: @call_twou16s +// CHECK-LABEL: @receives_twou16s +// aarch64-SAME: ([[ABI_TYPE:i64]] {{.*}}[[ABI_VALUE:%.+]]) +// loongarch64-SAME: ([[ABI_TYPE:i64]] {{.*}}[[ABI_VALUE:%.+]]) +// powerpc64-SAME: ([[ABI_TYPE:i32]] {{.*}}[[ABI_VALUE:%.+]]) +// sparc64-SAME: ([[ABI_TYPE:i64]] {{.*}}[[ABI_VALUE:%.+]]) +// x86_64-SAME: ([[ABI_TYPE:i32]] {{.*}}[[ABI_VALUE:%.+]]) #[no_mangle] -pub unsafe fn call_twou16s() { +#[inline(never)] +pub extern "C" fn receives_twou16s(x: TwoU16s) { // aarch64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]] // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]] // powerpc64: [[ABI_ALLOCA:%.+]] = alloca [4 x i8], align [[ABI_ALIGN:4]] // sparc64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]] + // x86_64: [[ABI_ALLOCA:%.+]] = alloca [4 x i8], align [[ABI_ALIGN:4]] + + // CHECK: [[RUST_ALLOCA:%.+]] = alloca [4 x i8], align [[RUST_ALIGN:2]] + + // CHECK: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + + // CHECK: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 4, i1 false) +} + +// CHECK-LABEL: @returns_twou16s +// powerpc64-SAME: sret([4 x i8]) align [[RUST_ALIGN:2]] {{.*}}[[RET_PTR:%.*]]) +#[no_mangle] +#[inline(never)] +pub extern "C" fn returns_twou16s() -> TwoU16s { + // powerpc returns this struct via sret pointer, it doesn't use the cast ABI. + // The other targets copy the cast ABI type to an alloca. + + // aarch64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:2]] + // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:2]] + // sparc64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:2]] + // x86_64: [[ABI_ALLOCA:%.+]] = alloca [4 x i8], align [[ABI_ALIGN:2]] + + // aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:i64]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:i64]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:i64]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // x86_64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:i32]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + + // aarch64: ret [[ABI_TYPE]] [[ABI_VALUE]] + // loongarch64: ret [[ABI_TYPE]] [[ABI_VALUE]] + // sparc64: ret [[ABI_TYPE]] [[ABI_VALUE]] + // x86_64: ret [[ABI_TYPE]] [[ABI_VALUE]] + TwoU16s { a: 0, b: 1 } +} + +// CHECK-LABEL: @receives_fiveu16s +// aarch64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]]) +// loongarch64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]]) +// powerpc64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]]) +// sparc64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]]) +// x86_64-SAME: ([[ABI_TYPE:{ i64, i16 }]] {{.*}}[[ABI_VALUE:%.+]]) +#[no_mangle] +#[inline(never)] +pub extern "C" fn receives_fiveu16s(x: FiveU16s) { + // CHECK: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] + + // CHECK: [[RUST_ALLOCA:%.+]] = alloca [10 x i8], align [[RUST_ALIGN:2]] + + // CHECK: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + + // CHECK: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 10, i1 false) +} + +// CHECK-LABEL: @returns_fiveu16s +// powerpc64-SAME: sret([10 x i8]) align [[RUST_ALIGN:2]] {{.*}}[[RET_PTR:%.*]]) +#[no_mangle] +#[inline(never)] +pub extern "C" fn returns_fiveu16s() -> FiveU16s { + // powerpc returns this struct via sret pointer, it doesn't use the cast ABI. + // The other targets copy the cast ABI type to an alloca. + + // aarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:2]] + // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:2]] + // sparc64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:2]] + // x86_64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:2]] + + // aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // x86_64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ i64, i16 }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + + // aarch64: ret [[ABI_TYPE]] [[ABI_VALUE]] + // loongarch64: ret [[ABI_TYPE]] [[ABI_VALUE]] + // sparc64: ret [[ABI_TYPE]] [[ABI_VALUE]] + // x86_64: ret [[ABI_TYPE]] [[ABI_VALUE]] + FiveU16s { a: 0, b: 1, c: 2, d: 3, e: 4 } +} + +// CHECK-LABEL: @receives_doubledouble +// aarch64-SAME: ([[ABI_TYPE:\[2 x double\]]] {{.*}}[[ABI_VALUE:%.+]]) +// loongarch64-SAME: ([[ABI_TYPE:{ double, double }]] {{.*}}[[ABI_VALUE:%.+]]) +// powerpc64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]]) +// sparc64-SAME: ([[ABI_TYPE:{ double, double }]] {{.*}}[[ABI_VALUE:%.+]]) +// x86_64-SAME: ([[ABI_TYPE:{ double, double }]] {{.*}}[[ABI_VALUE:%.+]]) +#[no_mangle] +#[inline(never)] +pub extern "C" fn receives_doubledouble(x: DoubleDouble) { + // CHECK: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] + + // CHECK: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]] + + // CHECK: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + + // CHECK: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 16, i1 false) +} + +// CHECK-LABEL: @returns_doubledouble +// powerpc64-SAME: sret([16 x i8]) align [[RUST_ALIGN:8]] {{.*}}[[RET_PTR:%.*]]) +#[no_mangle] +#[inline(never)] +pub extern "C" fn returns_doubledouble() -> DoubleDouble { + // powerpc returns this struct via sret pointer, it doesn't use the cast ABI. + // The other targets copy the cast ABI type to an alloca. + + // aarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] + // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] + // sparc64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] + // x86_64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] + + // aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x double\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ double, double }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ double, double }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // x86_64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ double, double }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + + // aarch64: ret [[ABI_TYPE]] [[ABI_VALUE]] + // loongarch64: ret [[ABI_TYPE]] [[ABI_VALUE]] + // sparc64: ret [[ABI_TYPE]] [[ABI_VALUE]] + // x86_64: ret [[ABI_TYPE]] [[ABI_VALUE]] + DoubleDouble { f: 0., g: 1. } +} + +// CHECK-LABEL: @receives_three32s +// aarch64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]]) +// loongarch64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]]) +// powerpc64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]]) +// sparc64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]]) +// x86_64-SAME: ([[ABI_TYPE:{ i64, i32 }]] {{.*}}[[ABI_VALUE:%.+]]) +#[no_mangle] +#[inline(never)] +pub extern "C" fn receives_three32s(x: Three32s) { + // CHECK: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] + + // CHECK: [[RUST_ALLOCA:%.+]] = alloca [12 x i8], align [[RUST_ALIGN:4]] + + // CHECK: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + + // CHECK: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 12, i1 false) +} + +// CHECK-LABEL: @returns_three32s +// powerpc64-SAME: sret([12 x i8]) align [[RUST_ALIGN:4]] {{.*}}[[RET_PTR:%.*]]) +#[no_mangle] +#[inline(never)] +pub extern "C" fn returns_three32s() -> Three32s { + // powerpc returns this struct via sret pointer, it doesn't use the cast ABI. + // The other targets copy the cast ABI type to an alloca. + + // aarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:4]] + // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:4]] + // sparc64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:4]] + // x86_64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:4]] + + // aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // x86_64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ i64, i32 }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + + // aarch64: ret [[ABI_TYPE]] [[ABI_VALUE]] + // loongarch64: ret [[ABI_TYPE]] [[ABI_VALUE]] + // sparc64: ret [[ABI_TYPE]] [[ABI_VALUE]] + // x86_64: ret [[ABI_TYPE]] [[ABI_VALUE]] + Three32s { a: 0, b: 0, c: 0 } +} + +// These functions cause an ICE in sparc64 ABI code (https://github.com/rust-lang/rust/issues/122620) +#[cfg(not(target_arch = "sparc64"))] +// aarch64-LABEL: @receives_doublefloat +// loongarch64-LABEL: @receives_doublefloat +// powerpc64-LABEL: @receives_doublefloat +// x86_64-LABEL: @receives_doublefloat + +// aarch64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]]) +// loongarch64-SAME: ([[ABI_TYPE:{ double, float }]] {{.*}}[[ABI_VALUE:%.+]]) +// powerpc64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]]) +// x86_64-SAME: ([[ABI_TYPE:{ double, double }]] {{.*}}[[ABI_VALUE:%.+]]) +#[no_mangle] +#[inline(never)] +pub extern "C" fn receives_doublefloat(x: DoubleFloat) { + // aarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] + // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] + // powerpc64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] + // x86_64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] + + // aarch64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]] + // loongarch64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]] + // powerpc64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]] + // x86_64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]] + + // aarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // loongarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // powerpc64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // x86_64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + + // aarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 16, i1 false) + // loongarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 12, i1 false) + // powerpc64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 16, i1 false) + // x86_64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 16, i1 false) +} + +#[cfg(not(target_arch = "sparc64"))] +// aarch64-LABEL: @returns_doublefloat +// loongarch64-LABEL: @returns_doublefloat +// powerpc64-LABEL: @returns_doublefloat +// x86_64-LABEL: @returns_doublefloat + +// powerpc64-SAME: sret([16 x i8]) align [[RUST_ALIGN:8]] {{.*}}[[RET_PTR:%.*]]) +#[no_mangle] +#[inline(never)] +pub extern "C" fn returns_doublefloat() -> DoubleFloat { + // powerpc returns this struct via sret pointer, it doesn't use the cast ABI. + // The other targets copy the cast ABI type to an alloca. + + // aarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] + // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] + // x86_64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] + + // aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ double, float }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // x86_64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ double, double }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + + // aarch64: ret [[ABI_TYPE]] [[ABI_VALUE]] + // loongarch64: ret [[ABI_TYPE]] [[ABI_VALUE]] + // x86_64: ret [[ABI_TYPE]] [[ABI_VALUE]] + DoubleFloat { f: 0., g: 0. } +} + +// CHECK-LABEL: @call_twou16s +#[no_mangle] +pub fn call_twou16s() { + // aarch64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]] + // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]] + // powerpc64: [[ABI_ALLOCA:%.+]] = alloca [4 x i8], align [[ABI_ALIGN:4]] + // sparc64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]] + // x86_64: [[ABI_ALLOCA:%.+]] = alloca [4 x i8], align [[ABI_ALIGN:4]] // CHECK: [[RUST_ALLOCA:%.+]] = alloca [4 x i8], align [[RUST_ALIGN:2]] @@ -93,6 +327,7 @@ pub unsafe fn call_twou16s() { // loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:i64]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // powerpc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:i32]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:i64]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // x86_64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:i32]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // CHECK: call void @receives_twou16s([[ABI_TYPE]] [[ABI_VALUE]]) let x = TwoU16s { a: 1, b: 2 }; @@ -101,7 +336,7 @@ pub unsafe fn call_twou16s() { // CHECK-LABEL: @return_twou16s #[no_mangle] -pub unsafe fn return_twou16s() -> TwoU16s { +pub fn return_twou16s() -> TwoU16s { // powerpc returns this struct via sret pointer, it doesn't use the cast ABI. // powerpc64: [[RETVAL:%.+]] = alloca [4 x i8], align 2 @@ -112,34 +347,45 @@ pub unsafe fn return_twou16s() -> TwoU16s { // aarch64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]] // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]] // sparc64: [[ABI_ALLOCA:%.+]] = alloca [8 x i8], align [[ABI_ALIGN:8]] + // x86_64: [[ABI_ALLOCA:%.+]] = alloca [4 x i8], align [[ABI_ALIGN:4]] // aarch64: [[RUST_ALLOCA:%.+]] = alloca [4 x i8], align [[RUST_ALIGN:2]] // loongarch64: [[RUST_ALLOCA:%.+]] = alloca [4 x i8], align [[RUST_ALIGN:2]] // sparc64: [[RUST_ALLOCA:%.+]] = alloca [4 x i8], align [[RUST_ALIGN:2]] + // x86_64: [[RUST_ALLOCA:%.+]] = alloca [4 x i8], align [[RUST_ALIGN:2]] // aarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:i64]] @returns_twou16s() // loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:i64]] @returns_twou16s() // sparc64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:i64]] @returns_twou16s() + // x86_64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:i32]] @returns_twou16s() // aarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // loongarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // sparc64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // x86_64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // aarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 4, i1 false) // loongarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 4, i1 false) // sparc64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 4, i1 false) + // x86_64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 4, i1 false) returns_twou16s() } // CHECK-LABEL: @call_fiveu16s #[no_mangle] -pub unsafe fn call_fiveu16s() { +pub fn call_fiveu16s() { // CHECK: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] // CHECK: [[RUST_ALLOCA:%.+]] = alloca [10 x i8], align 2 // CHECK: call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 10, i1 false) - // CHECK: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + + // aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // powerpc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // x86_64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ i64, i16 }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // CHECK: call void @receives_fiveu16s([[ABI_TYPE]] [[ABI_VALUE]]) let x = FiveU16s { a: 1, b: 2, c: 3, d: 4, e: 5 }; receives_fiveu16s(x); @@ -148,7 +394,7 @@ pub unsafe fn call_fiveu16s() { // CHECK-LABEL: @return_fiveu16s // CHECK-SAME: (ptr {{.+}} sret([10 x i8]) align [[RUST_ALIGN:2]] dereferenceable(10) [[RET_PTR:%.+]]) #[no_mangle] -pub unsafe fn return_fiveu16s() -> FiveU16s { +pub fn return_fiveu16s() -> FiveU16s { // powerpc returns this struct via sret pointer, it doesn't use the cast ABI. // powerpc64: call void @returns_fiveu16s(ptr {{.+}} [[RET_PTR]]) @@ -158,24 +404,28 @@ pub unsafe fn return_fiveu16s() -> FiveU16s { // aarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] // sparc64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] + // x86_64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] // aarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_fiveu16s() // loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_fiveu16s() // sparc64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_fiveu16s() + // x86_64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:{ i64, i16 }]] @returns_fiveu16s() // aarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // loongarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // sparc64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // x86_64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // aarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RET_PTR]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 10, i1 false) // loongarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RET_PTR]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 10, i1 false) // sparc64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RET_PTR]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 10, i1 false) + // x86_64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RET_PTR]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 10, i1 false) returns_fiveu16s() } // CHECK-LABEL: @call_doubledouble #[no_mangle] -pub unsafe fn call_doubledouble() { +pub fn call_doubledouble() { // CHECK: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] // CHECK: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]] @@ -186,6 +436,7 @@ pub unsafe fn call_doubledouble() { // loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ double, double }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // powerpc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ double, double }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // x86_64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ double, double }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // CHECK: call void @receives_doubledouble([[ABI_TYPE]] [[ABI_VALUE]]) let x = DoubleDouble { f: 1., g: 2. }; @@ -194,7 +445,7 @@ pub unsafe fn call_doubledouble() { // CHECK-LABEL: @return_doubledouble #[no_mangle] -pub unsafe fn return_doubledouble() -> DoubleDouble { +pub fn return_doubledouble() -> DoubleDouble { // powerpc returns this struct via sret pointer, it doesn't use the cast ABI. // powerpc64: [[RETVAL:%.+]] = alloca [16 x i8], align 8 @@ -205,22 +456,27 @@ pub unsafe fn return_doubledouble() -> DoubleDouble { // aarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] // sparc64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] + // x86_64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] // aarch64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]] // loongarch64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]] // sparc64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]] + // x86_64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]] // aarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x double\]]] @returns_doubledouble() // loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:{ double, double }]] @returns_doubledouble() // sparc64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:{ double, double }]] @returns_doubledouble() + // x86_64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:{ double, double }]] @returns_doubledouble() // aarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // loongarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // sparc64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // x86_64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // aarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 16, i1 false) // loongarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 16, i1 false) // sparc64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 16, i1 false) + // x86_64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 16, i1 false) returns_doubledouble() } @@ -229,27 +485,33 @@ pub unsafe fn return_doubledouble() -> DoubleDouble { // aarch64-LABEL: @call_doublefloat // loongarch64-LABEL: @call_doublefloat // powerpc64-LABEL: @call_doublefloat +// x86_64-LABEL: @call_doublefloat #[no_mangle] -pub unsafe fn call_doublefloat() { +pub fn call_doublefloat() { // aarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] - // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [12 x i8], align [[ABI_ALIGN:8]] + // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] // powerpc64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] + // x86_64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] // aarch64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]] // loongarch64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]] // powerpc64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]] + // x86_64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]] // aarch64: call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 16, i1 false) // loongarch64: call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 12, i1 false) // powerpc64: call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 16, i1 false) + // x86_64: call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 16, i1 false) // aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ double, float }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // powerpc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // x86_64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ double, double }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // aarch64: call void @receives_doublefloat([[ABI_TYPE]] {{(inreg )?}}[[ABI_VALUE]]) // loongarch64: call void @receives_doublefloat([[ABI_TYPE]] {{(inreg )?}}[[ABI_VALUE]]) // powerpc64: call void @receives_doublefloat([[ABI_TYPE]] {{(inreg )?}}[[ABI_VALUE]]) + // x86_64: call void @receives_doublefloat([[ABI_TYPE]] {{(inreg )?}}[[ABI_VALUE]]) let x = DoubleFloat { f: 1., g: 2. }; receives_doublefloat(x); } @@ -259,8 +521,9 @@ pub unsafe fn call_doublefloat() { // aarch64-LABEL: @return_doublefloat // loongarch64-LABEL: @return_doublefloat // powerpc64-LABEL: @return_doublefloat +// x86_64-LABEL: @return_doublefloat #[no_mangle] -pub unsafe fn return_doublefloat() -> DoubleFloat { +pub fn return_doublefloat() -> DoubleFloat { // powerpc returns this struct via sret pointer, it doesn't use the cast ABI. // powerpc64: [[RETVAL:%.+]] = alloca [16 x i8], align 8 @@ -269,18 +532,72 @@ pub unsafe fn return_doublefloat() -> DoubleFloat { // The other targets copy the cast ABI type to an alloca. // aarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] - // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [12 x i8], align [[ABI_ALIGN:8]] + // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] + // x86_64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] // aarch64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]] // loongarch64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]] + // x86_64: [[RUST_ALLOCA:%.+]] = alloca [16 x i8], align [[RUST_ALIGN:8]] // aarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_doublefloat() // loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:{ double, float }]] @returns_doublefloat() + // x86_64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:{ double, double }]] @returns_doublefloat() // aarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // loongarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // x86_64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // aarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 16, i1 false) // loongarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 12, i1 false) + // x86_64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 16, i1 false) returns_doublefloat() } + +// CHECK-LABEL: @call_three32s +#[no_mangle] +pub fn call_three32s() { + // CHECK: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] + // CHECK: [[RUST_ALLOCA:%.+]] = alloca [12 x i8], align [[RUST_ALIGN:4]] + // CHECK: call void @llvm.memcpy.{{.+}}(ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], ptr align [[RUST_ALIGN]] [[RUST_ALLOCA]], i64 12, i1 false) + + // aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // powerpc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // x86_64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ i64, i32 }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + + // CHECK: call void @receives_three32s([[ABI_TYPE]] [[ABI_VALUE]]) + let x = Three32s { a: 1, b: 2, c: 3 }; + receives_three32s(x); +} + +// Regression test for #75839 +// CHECK-LABEL: @return_three32s( +// CHECK-SAME: sret([12 x i8]) align [[RUST_ALIGN:4]] {{.*}}[[RUST_RETVAL:%.*]]) +#[no_mangle] +pub fn return_three32s() -> Three32s { + // powerpc returns this struct via sret pointer, it doesn't use the cast ABI. + + // powerpc64: call void @returns_three32s(ptr {{.+}} [[RUST_RETVAL]]) + + // aarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] + // loongarch64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] + // sparc64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] + // x86_64: [[ABI_ALLOCA:%.+]] = alloca [16 x i8], align [[ABI_ALIGN:8]] + + // aarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_three32s() + // loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_three32s() + // sparc64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_three32s() + // x86_64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:{ i64, i32 }]] @returns_three32s() + + // aarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // loongarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // sparc64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // x86_64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + + // aarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_RETVAL]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 12, i1 false) + // loongarch64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_RETVAL]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 12, i1 false) + // sparc64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_RETVAL]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 12, i1 false) + // x86_64: call void @llvm.memcpy.{{.+}}(ptr align [[RUST_ALIGN]] [[RUST_RETVAL]], ptr align [[ABI_ALIGN]] [[ABI_ALLOCA]], i64 12, i1 false) + returns_three32s() +} diff --git a/tests/codegen/cffi/ffi-out-of-bounds-loads.rs b/tests/codegen/cffi/ffi-out-of-bounds-loads.rs index 614d5d94f62..a4b7c0caa6d 100644 --- a/tests/codegen/cffi/ffi-out-of-bounds-loads.rs +++ b/tests/codegen/cffi/ffi-out-of-bounds-loads.rs @@ -1,5 +1,5 @@ //@ revisions: linux apple -//@ compile-flags: -C opt-level=0 -C no-prepopulate-passes +//@ compile-flags: -C opt-level=0 -C no-prepopulate-passes -C passes=lint //@[linux] compile-flags: --target x86_64-unknown-linux-gnu //@[linux] needs-llvm-components: x86 @@ -36,7 +36,7 @@ struct S { pub fn test() { let s = S { f1: 1, f2: 2, f3: 3 }; unsafe { - // CHECK: [[ALLOCA:%.+]] = alloca [12 x i8], align 8 + // CHECK: [[ALLOCA:%.+]] = alloca [16 x i8], align 8 // CHECK: [[LOAD:%.+]] = load { i64, i32 }, ptr [[ALLOCA]], align 8 // CHECK: call void @foo({ i64, i32 } [[LOAD]]) foo(s); diff --git a/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#0}-{closure#0}.coroutine_by_move.0.panic-abort.mir b/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#0}-{closure#0}.coroutine_by_move.0.panic-abort.mir index 06028487d01..1c34955a8d9 100644 --- a/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#0}-{closure#0}.coroutine_by_move.0.panic-abort.mir +++ b/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#0}-{closure#0}.coroutine_by_move.0.panic-abort.mir @@ -1,6 +1,6 @@ // MIR for `main::{closure#0}::{closure#0}::{closure#0}` 0 coroutine_by_move -fn main::{closure#0}::{closure#0}::{closure#0}(_1: {async closure body@$DIR/async_closure_shims.rs:42:53: 45:10}, _2: ResumeTy) -> () +fn main::{closure#0}::{closure#0}::{closure#0}(_1: {async closure body@$DIR/async_closure_shims.rs:53:53: 56:10}, _2: ResumeTy) -> () yields () { debug _task_context => _2; diff --git a/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#0}-{closure#0}.coroutine_by_move.0.panic-unwind.mir b/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#0}-{closure#0}.coroutine_by_move.0.panic-unwind.mir index 06028487d01..1c34955a8d9 100644 --- a/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#0}-{closure#0}.coroutine_by_move.0.panic-unwind.mir +++ b/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#0}-{closure#0}.coroutine_by_move.0.panic-unwind.mir @@ -1,6 +1,6 @@ // MIR for `main::{closure#0}::{closure#0}::{closure#0}` 0 coroutine_by_move -fn main::{closure#0}::{closure#0}::{closure#0}(_1: {async closure body@$DIR/async_closure_shims.rs:42:53: 45:10}, _2: ResumeTy) -> () +fn main::{closure#0}::{closure#0}::{closure#0}(_1: {async closure body@$DIR/async_closure_shims.rs:53:53: 56:10}, _2: ResumeTy) -> () yields () { debug _task_context => _2; diff --git a/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#0}.coroutine_closure_by_move.0.panic-abort.mir b/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#0}.coroutine_closure_by_move.0.panic-abort.mir index 93447b1388d..a984845fd2c 100644 --- a/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#0}.coroutine_closure_by_move.0.panic-abort.mir +++ b/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#0}.coroutine_closure_by_move.0.panic-abort.mir @@ -1,10 +1,10 @@ // MIR for `main::{closure#0}::{closure#0}` 0 coroutine_closure_by_move -fn main::{closure#0}::{closure#0}(_1: {async closure@$DIR/async_closure_shims.rs:42:33: 42:52}, _2: i32) -> {async closure body@$DIR/async_closure_shims.rs:42:53: 45:10} { - let mut _0: {async closure body@$DIR/async_closure_shims.rs:42:53: 45:10}; +fn main::{closure#0}::{closure#0}(_1: {async closure@$DIR/async_closure_shims.rs:53:33: 53:52}, _2: i32) -> {async closure body@$DIR/async_closure_shims.rs:53:53: 56:10} { + let mut _0: {async closure body@$DIR/async_closure_shims.rs:53:53: 56:10}; bb0: { - _0 = {coroutine@$DIR/async_closure_shims.rs:42:53: 45:10 (#0)} { a: move _2, b: move (_1.0: i32) }; + _0 = {coroutine@$DIR/async_closure_shims.rs:53:53: 56:10 (#0)} { a: move _2, b: move (_1.0: i32) }; return; } } diff --git a/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#0}.coroutine_closure_by_move.0.panic-unwind.mir b/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#0}.coroutine_closure_by_move.0.panic-unwind.mir index 93447b1388d..a984845fd2c 100644 --- a/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#0}.coroutine_closure_by_move.0.panic-unwind.mir +++ b/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#0}.coroutine_closure_by_move.0.panic-unwind.mir @@ -1,10 +1,10 @@ // MIR for `main::{closure#0}::{closure#0}` 0 coroutine_closure_by_move -fn main::{closure#0}::{closure#0}(_1: {async closure@$DIR/async_closure_shims.rs:42:33: 42:52}, _2: i32) -> {async closure body@$DIR/async_closure_shims.rs:42:53: 45:10} { - let mut _0: {async closure body@$DIR/async_closure_shims.rs:42:53: 45:10}; +fn main::{closure#0}::{closure#0}(_1: {async closure@$DIR/async_closure_shims.rs:53:33: 53:52}, _2: i32) -> {async closure body@$DIR/async_closure_shims.rs:53:53: 56:10} { + let mut _0: {async closure body@$DIR/async_closure_shims.rs:53:53: 56:10}; bb0: { - _0 = {coroutine@$DIR/async_closure_shims.rs:42:53: 45:10 (#0)} { a: move _2, b: move (_1.0: i32) }; + _0 = {coroutine@$DIR/async_closure_shims.rs:53:53: 56:10 (#0)} { a: move _2, b: move (_1.0: i32) }; return; } } diff --git a/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#1}-{closure#0}.coroutine_by_move.0.panic-abort.mir b/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#1}-{closure#0}.coroutine_by_move.0.panic-abort.mir new file mode 100644 index 00000000000..516908144a6 --- /dev/null +++ b/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#1}-{closure#0}.coroutine_by_move.0.panic-abort.mir @@ -0,0 +1,47 @@ +// MIR for `main::{closure#0}::{closure#1}::{closure#0}` 0 coroutine_by_move + +fn main::{closure#0}::{closure#1}::{closure#0}(_1: {async closure body@$DIR/async_closure_shims.rs:62:48: 65:10}, _2: ResumeTy) -> () +yields () + { + debug _task_context => _2; + debug a => (_1.0: i32); + debug b => (*(_1.1: &i32)); + let mut _0: (); + let _3: i32; + scope 1 { + debug a => _3; + let _4: &i32; + scope 2 { + debug a => _4; + let _5: &i32; + scope 3 { + debug b => _5; + } + } + } + + bb0: { + StorageLive(_3); + _3 = (_1.0: i32); + FakeRead(ForLet(None), _3); + StorageLive(_4); + _4 = &_3; + FakeRead(ForLet(None), _4); + StorageLive(_5); + _5 = &(*(_1.1: &i32)); + FakeRead(ForLet(None), _5); + _0 = const (); + StorageDead(_5); + StorageDead(_4); + StorageDead(_3); + drop(_1) -> [return: bb1, unwind: bb2]; + } + + bb1: { + return; + } + + bb2 (cleanup): { + resume; + } +} diff --git a/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#1}-{closure#0}.coroutine_by_move.0.panic-unwind.mir b/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#1}-{closure#0}.coroutine_by_move.0.panic-unwind.mir new file mode 100644 index 00000000000..516908144a6 --- /dev/null +++ b/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#1}-{closure#0}.coroutine_by_move.0.panic-unwind.mir @@ -0,0 +1,47 @@ +// MIR for `main::{closure#0}::{closure#1}::{closure#0}` 0 coroutine_by_move + +fn main::{closure#0}::{closure#1}::{closure#0}(_1: {async closure body@$DIR/async_closure_shims.rs:62:48: 65:10}, _2: ResumeTy) -> () +yields () + { + debug _task_context => _2; + debug a => (_1.0: i32); + debug b => (*(_1.1: &i32)); + let mut _0: (); + let _3: i32; + scope 1 { + debug a => _3; + let _4: &i32; + scope 2 { + debug a => _4; + let _5: &i32; + scope 3 { + debug b => _5; + } + } + } + + bb0: { + StorageLive(_3); + _3 = (_1.0: i32); + FakeRead(ForLet(None), _3); + StorageLive(_4); + _4 = &_3; + FakeRead(ForLet(None), _4); + StorageLive(_5); + _5 = &(*(_1.1: &i32)); + FakeRead(ForLet(None), _5); + _0 = const (); + StorageDead(_5); + StorageDead(_4); + StorageDead(_3); + drop(_1) -> [return: bb1, unwind: bb2]; + } + + bb1: { + return; + } + + bb2 (cleanup): { + resume; + } +} diff --git a/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#1}.coroutine_closure_by_move.0.panic-abort.mir b/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#1}.coroutine_closure_by_move.0.panic-abort.mir new file mode 100644 index 00000000000..aab9f7b03b9 --- /dev/null +++ b/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#1}.coroutine_closure_by_move.0.panic-abort.mir @@ -0,0 +1,10 @@ +// MIR for `main::{closure#0}::{closure#1}` 0 coroutine_closure_by_move + +fn main::{closure#0}::{closure#1}(_1: {async closure@$DIR/async_closure_shims.rs:62:33: 62:47}, _2: i32) -> {async closure body@$DIR/async_closure_shims.rs:62:48: 65:10} { + let mut _0: {async closure body@$DIR/async_closure_shims.rs:62:48: 65:10}; + + bb0: { + _0 = {coroutine@$DIR/async_closure_shims.rs:62:48: 65:10 (#0)} { a: move _2, b: move (_1.0: &i32) }; + return; + } +} diff --git a/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#1}.coroutine_closure_by_move.0.panic-unwind.mir b/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#1}.coroutine_closure_by_move.0.panic-unwind.mir new file mode 100644 index 00000000000..aab9f7b03b9 --- /dev/null +++ b/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#1}.coroutine_closure_by_move.0.panic-unwind.mir @@ -0,0 +1,10 @@ +// MIR for `main::{closure#0}::{closure#1}` 0 coroutine_closure_by_move + +fn main::{closure#0}::{closure#1}(_1: {async closure@$DIR/async_closure_shims.rs:62:33: 62:47}, _2: i32) -> {async closure body@$DIR/async_closure_shims.rs:62:48: 65:10} { + let mut _0: {async closure body@$DIR/async_closure_shims.rs:62:48: 65:10}; + + bb0: { + _0 = {coroutine@$DIR/async_closure_shims.rs:62:48: 65:10 (#0)} { a: move _2, b: move (_1.0: &i32) }; + return; + } +} diff --git a/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#1}.coroutine_closure_by_ref.0.panic-abort.mir b/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#1}.coroutine_closure_by_ref.0.panic-abort.mir index cab7bdb7e3c..ba20c28cd01 100644 --- a/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#1}.coroutine_closure_by_ref.0.panic-abort.mir +++ b/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#1}.coroutine_closure_by_ref.0.panic-abort.mir @@ -1,10 +1,10 @@ // MIR for `main::{closure#0}::{closure#1}` 0 coroutine_closure_by_ref -fn main::{closure#0}::{closure#1}(_1: &mut {async closure@$DIR/async_closure_shims.rs:49:29: 49:48}, _2: i32) -> {async closure body@$DIR/async_closure_shims.rs:49:49: 51:10} { - let mut _0: {async closure body@$DIR/async_closure_shims.rs:49:49: 51:10}; +fn main::{closure#0}::{closure#1}(_1: &{async closure@$DIR/async_closure_shims.rs:62:33: 62:47}, _2: i32) -> {async closure body@$DIR/async_closure_shims.rs:62:48: 65:10} { + let mut _0: {async closure body@$DIR/async_closure_shims.rs:62:48: 65:10}; bb0: { - _0 = {coroutine@$DIR/async_closure_shims.rs:49:49: 51:10 (#0)} { a: move _2 }; + _0 = {coroutine@$DIR/async_closure_shims.rs:62:48: 65:10 (#0)} { a: move _2, b: ((*_1).0: &i32) }; return; } } diff --git a/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#1}.coroutine_closure_by_ref.0.panic-unwind.mir b/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#1}.coroutine_closure_by_ref.0.panic-unwind.mir index cab7bdb7e3c..ba20c28cd01 100644 --- a/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#1}.coroutine_closure_by_ref.0.panic-unwind.mir +++ b/tests/mir-opt/async_closure_shims.main-{closure#0}-{closure#1}.coroutine_closure_by_ref.0.panic-unwind.mir @@ -1,10 +1,10 @@ // MIR for `main::{closure#0}::{closure#1}` 0 coroutine_closure_by_ref -fn main::{closure#0}::{closure#1}(_1: &mut {async closure@$DIR/async_closure_shims.rs:49:29: 49:48}, _2: i32) -> {async closure body@$DIR/async_closure_shims.rs:49:49: 51:10} { - let mut _0: {async closure body@$DIR/async_closure_shims.rs:49:49: 51:10}; +fn main::{closure#0}::{closure#1}(_1: &{async closure@$DIR/async_closure_shims.rs:62:33: 62:47}, _2: i32) -> {async closure body@$DIR/async_closure_shims.rs:62:48: 65:10} { + let mut _0: {async closure body@$DIR/async_closure_shims.rs:62:48: 65:10}; bb0: { - _0 = {coroutine@$DIR/async_closure_shims.rs:49:49: 51:10 (#0)} { a: move _2 }; + _0 = {coroutine@$DIR/async_closure_shims.rs:62:48: 65:10 (#0)} { a: move _2, b: ((*_1).0: &i32) }; return; } } diff --git a/tests/mir-opt/async_closure_shims.rs b/tests/mir-opt/async_closure_shims.rs index 7d226df6866..57c55ef055c 100644 --- a/tests/mir-opt/async_closure_shims.rs +++ b/tests/mir-opt/async_closure_shims.rs @@ -3,9 +3,10 @@ // EMIT_MIR_FOR_EACH_PANIC_STRATEGY #![feature(async_closure, noop_waker, async_fn_traits)] +#![allow(unused)] use std::future::Future; -use std::ops::{AsyncFnMut, AsyncFnOnce}; +use std::ops::{AsyncFn, AsyncFnMut, AsyncFnOnce}; use std::pin::pin; use std::task::*; @@ -21,6 +22,10 @@ pub fn block_on(fut: impl Future) -> T { } } +async fn call(f: &mut impl AsyncFn(i32)) { + f(0).await; +} + async fn call_mut(f: &mut impl AsyncFnMut(i32)) { f(0).await; } @@ -33,9 +38,15 @@ async fn call_normal>(f: &impl Fn(i32) -> F) { f(1).await; } +async fn call_normal_mut>(f: &mut impl FnMut(i32) -> F) { + f(1).await; +} + // EMIT_MIR async_closure_shims.main-{closure#0}-{closure#0}.coroutine_closure_by_move.0.mir // EMIT_MIR async_closure_shims.main-{closure#0}-{closure#0}-{closure#0}.coroutine_by_move.0.mir // EMIT_MIR async_closure_shims.main-{closure#0}-{closure#1}.coroutine_closure_by_ref.0.mir +// EMIT_MIR async_closure_shims.main-{closure#0}-{closure#1}.coroutine_closure_by_move.0.mir +// EMIT_MIR async_closure_shims.main-{closure#0}-{closure#1}-{closure#0}.coroutine_by_move.0.mir pub fn main() { block_on(async { let b = 2i32; @@ -43,12 +54,17 @@ pub fn main() { let a = &a; let b = &b; }; + call(&mut async_closure).await; call_mut(&mut async_closure).await; call_once(async_closure).await; - let async_closure = async move |a: i32| { + let b = 2i32; + let mut async_closure = async |a: i32| { let a = &a; + let b = &b; }; call_normal(&async_closure).await; + call_normal_mut(&mut async_closure).await; + call_once(async_closure).await; }); } diff --git a/tests/ui/async-await/async-closures/constrained-but-no-upvars-yet.rs b/tests/ui/async-await/async-closures/constrained-but-no-upvars-yet.rs index a43906d01e5..3b222d00bae 100644 --- a/tests/ui/async-await/async-closures/constrained-but-no-upvars-yet.rs +++ b/tests/ui/async-await/async-closures/constrained-but-no-upvars-yet.rs @@ -1,5 +1,5 @@ //@ edition: 2021 -//@ check-pass +//@ build-pass //@ revisions: current next //@ ignore-compare-mode-next-solver (explicit revisions) //@[next] compile-flags: -Znext-solver diff --git a/tests/ui/async-await/async-closures/force-move-due-to-actually-fnonce.rs b/tests/ui/async-await/async-closures/force-move-due-to-actually-fnonce.rs index ce49f55e3e3..7244a29673b 100644 --- a/tests/ui/async-await/async-closures/force-move-due-to-actually-fnonce.rs +++ b/tests/ui/async-await/async-closures/force-move-due-to-actually-fnonce.rs @@ -1,6 +1,6 @@ //@ aux-build:block-on.rs //@ edition:2021 -//@ check-pass +//@ build-pass #![feature(async_closure)] diff --git a/tests/ui/async-await/async-closures/force-move-due-to-inferred-kind.rs b/tests/ui/async-await/async-closures/force-move-due-to-inferred-kind.rs index 803c990ef93..7ce210a33c3 100644 --- a/tests/ui/async-await/async-closures/force-move-due-to-inferred-kind.rs +++ b/tests/ui/async-await/async-closures/force-move-due-to-inferred-kind.rs @@ -1,6 +1,6 @@ //@ aux-build:block-on.rs //@ edition:2021 -//@ check-pass +//@ build-pass #![feature(async_closure)] diff --git a/tests/ui/async-await/async-closures/implements-fnmut.rs b/tests/ui/async-await/async-closures/implements-fnmut.rs index 1ed326cd061..8e780ce9889 100644 --- a/tests/ui/async-await/async-closures/implements-fnmut.rs +++ b/tests/ui/async-await/async-closures/implements-fnmut.rs @@ -1,4 +1,4 @@ -//@ check-pass +//@ build-pass //@ edition: 2021 // Demonstrates that an async closure may implement `FnMut` (not just `async FnMut`!) @@ -9,9 +9,13 @@ #![feature(async_closure)] -fn main() {} +fn main() { + hello(&Ty); +} -fn needs_fn_mut(x: impl FnMut() -> T) {} +fn needs_fn_mut(mut x: impl FnMut() -> T) { + x(); +} fn hello(x: &Ty) { needs_fn_mut(async || { x.hello(); }); diff --git a/tests/ui/async-await/async-closures/signature-deduction.rs b/tests/ui/async-await/async-closures/signature-deduction.rs index 031dab10296..856f3963ee6 100644 --- a/tests/ui/async-await/async-closures/signature-deduction.rs +++ b/tests/ui/async-await/async-closures/signature-deduction.rs @@ -1,4 +1,4 @@ -//@ check-pass +//@ build-pass //@ edition: 2021 #![feature(async_closure)] diff --git a/tests/ui/closures/2229_closure_analysis/migrations/precise_no_migrations.rs b/tests/ui/closures/2229_closure_analysis/migrations/precise_no_migrations.rs index f46ec4b927a..4da87038577 100644 --- a/tests/ui/closures/2229_closure_analysis/migrations/precise_no_migrations.rs +++ b/tests/ui/closures/2229_closure_analysis/migrations/precise_no_migrations.rs @@ -48,7 +48,7 @@ fn drop(&mut self) { } // If a path isn't directly captured but requires Drop, then this tests that migrations aren't -// needed if the a parent to that path is captured. +// needed if the parent to that path is captured. fn test_precise_analysis_parent_captured_1() { let t = ConstainsDropField(Foo(10), Foo(20)); @@ -60,7 +60,7 @@ fn test_precise_analysis_parent_captured_1() { } // If a path isn't directly captured but requires Drop, then this tests that migrations aren't -// needed if the a parent to that path is captured. +// needed if the parent to that path is captured. fn test_precise_analysis_parent_captured_2() { let t = ContainsAndImplsDrop(Foo(10)); diff --git a/tests/ui/const-generics/adt_const_params/nested_bad_const_param_ty.rs b/tests/ui/const-generics/adt_const_params/nested_bad_const_param_ty.rs new file mode 100644 index 00000000000..9f05c53eef0 --- /dev/null +++ b/tests/ui/const-generics/adt_const_params/nested_bad_const_param_ty.rs @@ -0,0 +1,18 @@ +#![feature(adt_const_params)] +#![allow(incomplete_features)] + +use std::marker::ConstParamTy; + +#[derive(ConstParamTy)] +//~^ the trait `ConstParamTy` cannot be implemented for this ty +struct Foo([*const u8; 1]); + +#[derive(ConstParamTy)] +//~^ the trait `ConstParamTy` cannot be implemented for this ty +struct Foo2([*mut u8; 1]); + +#[derive(ConstParamTy)] +//~^ the trait `ConstParamTy` cannot be implemented for this ty +struct Foo3([fn(); 1]); + +fn main() {} diff --git a/tests/ui/const-generics/adt_const_params/nested_bad_const_param_ty.stderr b/tests/ui/const-generics/adt_const_params/nested_bad_const_param_ty.stderr new file mode 100644 index 00000000000..9e772e8d55d --- /dev/null +++ b/tests/ui/const-generics/adt_const_params/nested_bad_const_param_ty.stderr @@ -0,0 +1,51 @@ +error[E0204]: the trait `ConstParamTy` cannot be implemented for this type + --> $DIR/nested_bad_const_param_ty.rs:6:10 + | +LL | #[derive(ConstParamTy)] + | ^^^^^^^^^^^^ +LL | +LL | struct Foo([*const u8; 1]); + | -------------- this field does not implement `ConstParamTy` + | +note: the `ConstParamTy` impl for `[*const u8; 1]` requires that `*const u8: ConstParamTy` + --> $DIR/nested_bad_const_param_ty.rs:8:12 + | +LL | struct Foo([*const u8; 1]); + | ^^^^^^^^^^^^^^ + = note: this error originates in the derive macro `ConstParamTy` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0204]: the trait `ConstParamTy` cannot be implemented for this type + --> $DIR/nested_bad_const_param_ty.rs:10:10 + | +LL | #[derive(ConstParamTy)] + | ^^^^^^^^^^^^ +LL | +LL | struct Foo2([*mut u8; 1]); + | ------------ this field does not implement `ConstParamTy` + | +note: the `ConstParamTy` impl for `[*mut u8; 1]` requires that `*mut u8: ConstParamTy` + --> $DIR/nested_bad_const_param_ty.rs:12:13 + | +LL | struct Foo2([*mut u8; 1]); + | ^^^^^^^^^^^^ + = note: this error originates in the derive macro `ConstParamTy` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0204]: the trait `ConstParamTy` cannot be implemented for this type + --> $DIR/nested_bad_const_param_ty.rs:14:10 + | +LL | #[derive(ConstParamTy)] + | ^^^^^^^^^^^^ +LL | +LL | struct Foo3([fn(); 1]); + | --------- this field does not implement `ConstParamTy` + | +note: the `ConstParamTy` impl for `[fn(); 1]` requires that `fn(): ConstParamTy` + --> $DIR/nested_bad_const_param_ty.rs:16:13 + | +LL | struct Foo3([fn(); 1]); + | ^^^^^^^^^ + = note: this error originates in the derive macro `ConstParamTy` (in Nightly builds, run with -Z macro-backtrace for more info) + +error: aborting due to 3 previous errors + +For more information about this error, try `rustc --explain E0204`. diff --git a/tests/ui/imports/suggest-import-issue-120074.rs b/tests/ui/imports/suggest-import-issue-120074.rs new file mode 100644 index 00000000000..a798e9eeeb8 --- /dev/null +++ b/tests/ui/imports/suggest-import-issue-120074.rs @@ -0,0 +1,11 @@ +pub mod foo { + pub mod bar { + pub fn do_the_thing() -> usize { + 42 + } + } +} + +fn main() { + println!("Hello, {}!", crate::bar::do_the_thing); //~ ERROR failed to resolve: unresolved import +} diff --git a/tests/ui/imports/suggest-import-issue-120074.stderr b/tests/ui/imports/suggest-import-issue-120074.stderr new file mode 100644 index 00000000000..c1dff93bbdb --- /dev/null +++ b/tests/ui/imports/suggest-import-issue-120074.stderr @@ -0,0 +1,23 @@ +error[E0433]: failed to resolve: unresolved import + --> $DIR/suggest-import-issue-120074.rs:10:35 + | +LL | println!("Hello, {}!", crate::bar::do_the_thing); + | ^^^ unresolved import + | +help: a similar path exists + | +LL | println!("Hello, {}!", crate::foo::bar::do_the_thing); + | ~~~~~~~~ +help: consider importing this module + | +LL + use foo::bar; + | +help: if you import `bar`, refer to it directly + | +LL - println!("Hello, {}!", crate::bar::do_the_thing); +LL + println!("Hello, {}!", bar::do_the_thing); + | + +error: aborting due to 1 previous error + +For more information about this error, try `rustc --explain E0433`. diff --git a/tests/ui/traits/associated_type_bound/116464-invalid-assoc-type-suggestion-in-trait-impl.rs b/tests/ui/traits/associated_type_bound/116464-invalid-assoc-type-suggestion-in-trait-impl.rs index 445ea2de610..52b488101a8 100644 --- a/tests/ui/traits/associated_type_bound/116464-invalid-assoc-type-suggestion-in-trait-impl.rs +++ b/tests/ui/traits/associated_type_bound/116464-invalid-assoc-type-suggestion-in-trait-impl.rs @@ -11,7 +11,7 @@ impl Trait for i32 { type Assoc = String; } -// Should not not trigger suggestion here... +// Should not trigger suggestion here... impl Trait for () {} //~^ ERROR trait takes 1 generic argument but 2 generic arguments were supplied diff --git a/tests/ui/unpretty/expanded-interpolation.rs b/tests/ui/unpretty/expanded-interpolation.rs index 8f0e21ce870..1dc72c67f51 100644 --- a/tests/ui/unpretty/expanded-interpolation.rs +++ b/tests/ui/unpretty/expanded-interpolation.rs @@ -18,6 +18,26 @@ macro_rules! stmt { ($stmt:stmt) => { $stmt }; } +fn break_labeled_loop() { + let no_paren = 'outer: loop { + break 'outer expr!('inner: loop { break 'inner 1; } + 1); + }; + + let paren_around_break_value = 'outer: loop { + break expr!('inner: loop { break 'inner 1; } + 1); + }; + + macro_rules! breaking { + ($value:expr) => { + break $value + }; + } + + let paren_around_break_value = loop { + breaking!('inner: loop { break 'inner 1; } + 1); + }; +} + fn if_let() { macro_rules! if_let { ($pat:pat, $expr:expr) => { diff --git a/tests/ui/unpretty/expanded-interpolation.stdout b/tests/ui/unpretty/expanded-interpolation.stdout index 73322b50f2d..556e57dbd92 100644 --- a/tests/ui/unpretty/expanded-interpolation.stdout +++ b/tests/ui/unpretty/expanded-interpolation.stdout @@ -20,6 +20,19 @@ macro_rules! expr { ($expr:expr) => { $expr }; } macro_rules! stmt { ($stmt:stmt) => { $stmt }; } +fn break_labeled_loop() { + let no_paren = + 'outer: loop { break 'outer 'inner: loop { break 'inner 1; } + 1; }; + + let paren_around_break_value = + 'outer: loop { break ('inner: loop { break 'inner 1; } + 1); }; + + macro_rules! breaking { ($value:expr) => { break $value }; } + + let paren_around_break_value = + loop { break ('inner: loop { break 'inner 1; } + 1); }; +} + fn if_let() { macro_rules! if_let { ($pat:pat, $expr:expr) => { if let $pat = $expr {} };