Auto merge of #123385 - matthiaskrgr:rollup-v69vjbn, r=matthiaskrgr
Rollup of 8 pull requests Successful merges: - #123198 (Add fn const BuildHasherDefault::new) - #123226 (De-LLVM the unchecked shifts [MCP#693]) - #123302 (Make sure to insert `Sized` bound first into clauses list) - #123348 (rustdoc: add a couple of regression tests) - #123362 (Check that nested statics in thread locals are duplicated per thread.) - #123368 (CFI: Support non-general coroutines) - #123375 (rustdoc: synthetic auto trait impls: accept unresolved region vars for now) - #123378 (Update sysinfo to 0.30.8) Failed merges: - #123349 (Fix capture analysis for by-move closure bodies) r? `@ghost` `@rustbot` modify labels: rollup
This commit is contained in:
commit
88c2f4f5f5
@ -5370,9 +5370,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sysinfo"
|
||||
version = "0.30.7"
|
||||
version = "0.30.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0c385888ef380a852a16209afc8cfad22795dd8873d69c9a14d2e2088f118d18"
|
||||
checksum = "4b1a378e48fb3ce3a5cf04359c456c9c98ff689bcf1c1bc6e6a31f247686f275"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"core-foundation-sys",
|
||||
|
@ -5,7 +5,7 @@ use crate::back::write::{
|
||||
compute_per_cgu_lto_type, start_async_codegen, submit_codegened_module_to_llvm,
|
||||
submit_post_lto_module_to_llvm, submit_pre_lto_module_to_llvm, ComputedLtoType, OngoingCodegen,
|
||||
};
|
||||
use crate::common::{IntPredicate, RealPredicate, TypeKind};
|
||||
use crate::common::{self, IntPredicate, RealPredicate, TypeKind};
|
||||
use crate::errors;
|
||||
use crate::meth;
|
||||
use crate::mir;
|
||||
@ -33,7 +33,7 @@ use rustc_middle::mir::mono::{CodegenUnit, CodegenUnitNameBuilder, MonoItem};
|
||||
use rustc_middle::query::Providers;
|
||||
use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout};
|
||||
use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
|
||||
use rustc_session::config::{self, CrateType, EntryFnType, OutputType};
|
||||
use rustc_session::config::{self, CrateType, EntryFnType, OptLevel, OutputType};
|
||||
use rustc_session::Session;
|
||||
use rustc_span::symbol::sym;
|
||||
use rustc_span::Symbol;
|
||||
@ -300,14 +300,35 @@ pub fn coerce_unsized_into<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
}
|
||||
}
|
||||
|
||||
pub fn cast_shift_expr_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
/// Returns `rhs` sufficiently masked, truncated, and/or extended so that
|
||||
/// it can be used to shift `lhs`.
|
||||
///
|
||||
/// Shifts in MIR are all allowed to have mismatched LHS & RHS types.
|
||||
/// The shift methods in `BuilderMethods`, however, are fully homogeneous
|
||||
/// (both parameters and the return type are all the same type).
|
||||
///
|
||||
/// If `is_unchecked` is false, this masks the RHS to ensure it stays in-bounds,
|
||||
/// as the `BuilderMethods` shifts are UB for out-of-bounds shift amounts.
|
||||
/// For 32- and 64-bit types, this matches the semantics
|
||||
/// of Java. (See related discussion on #1877 and #10183.)
|
||||
///
|
||||
/// If `is_unchecked` is true, this does no masking, and adds sufficient `assume`
|
||||
/// calls or operation flags to preserve as much freedom to optimize as possible.
|
||||
pub fn build_shift_expr_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
bx: &mut Bx,
|
||||
lhs: Bx::Value,
|
||||
rhs: Bx::Value,
|
||||
mut rhs: Bx::Value,
|
||||
is_unchecked: bool,
|
||||
) -> Bx::Value {
|
||||
// Shifts may have any size int on the rhs
|
||||
let mut rhs_llty = bx.cx().val_ty(rhs);
|
||||
let mut lhs_llty = bx.cx().val_ty(lhs);
|
||||
|
||||
let mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, false);
|
||||
if !is_unchecked {
|
||||
rhs = bx.and(rhs, mask);
|
||||
}
|
||||
|
||||
if bx.cx().type_kind(rhs_llty) == TypeKind::Vector {
|
||||
rhs_llty = bx.cx().element_type(rhs_llty)
|
||||
}
|
||||
@ -317,6 +338,12 @@ pub fn cast_shift_expr_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
let rhs_sz = bx.cx().int_width(rhs_llty);
|
||||
let lhs_sz = bx.cx().int_width(lhs_llty);
|
||||
if lhs_sz < rhs_sz {
|
||||
if is_unchecked && bx.sess().opts.optimize != OptLevel::No {
|
||||
// FIXME: Use `trunc nuw` once that's available
|
||||
let inrange = bx.icmp(IntPredicate::IntULE, rhs, mask);
|
||||
bx.assume(inrange);
|
||||
}
|
||||
|
||||
bx.trunc(rhs, lhs_llty)
|
||||
} else if lhs_sz > rhs_sz {
|
||||
// We zero-extend even if the RHS is signed. So e.g. `(x: i32) << -1i8` will zero-extend the
|
||||
|
@ -3,10 +3,9 @@
|
||||
use rustc_hir::LangItem;
|
||||
use rustc_middle::mir;
|
||||
use rustc_middle::ty::Instance;
|
||||
use rustc_middle::ty::{self, layout::TyAndLayout, Ty, TyCtxt};
|
||||
use rustc_middle::ty::{self, layout::TyAndLayout, TyCtxt};
|
||||
use rustc_span::Span;
|
||||
|
||||
use crate::base;
|
||||
use crate::traits::*;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
@ -128,44 +127,6 @@ pub fn build_langcall<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
(bx.fn_abi_of_instance(instance, ty::List::empty()), bx.get_fn_addr(instance), instance)
|
||||
}
|
||||
|
||||
// To avoid UB from LLVM, these two functions mask RHS with an
|
||||
// appropriate mask unconditionally (i.e., the fallback behavior for
|
||||
// all shifts). For 32- and 64-bit types, this matches the semantics
|
||||
// of Java. (See related discussion on #1877 and #10183.)
|
||||
|
||||
pub fn build_masked_lshift<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
bx: &mut Bx,
|
||||
lhs: Bx::Value,
|
||||
rhs: Bx::Value,
|
||||
) -> Bx::Value {
|
||||
let rhs = base::cast_shift_expr_rhs(bx, lhs, rhs);
|
||||
// #1877, #10183: Ensure that input is always valid
|
||||
let rhs = shift_mask_rhs(bx, rhs);
|
||||
bx.shl(lhs, rhs)
|
||||
}
|
||||
|
||||
pub fn build_masked_rshift<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
bx: &mut Bx,
|
||||
lhs_t: Ty<'tcx>,
|
||||
lhs: Bx::Value,
|
||||
rhs: Bx::Value,
|
||||
) -> Bx::Value {
|
||||
let rhs = base::cast_shift_expr_rhs(bx, lhs, rhs);
|
||||
// #1877, #10183: Ensure that input is always valid
|
||||
let rhs = shift_mask_rhs(bx, rhs);
|
||||
let is_signed = lhs_t.is_signed();
|
||||
if is_signed { bx.ashr(lhs, rhs) } else { bx.lshr(lhs, rhs) }
|
||||
}
|
||||
|
||||
fn shift_mask_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
bx: &mut Bx,
|
||||
rhs: Bx::Value,
|
||||
) -> Bx::Value {
|
||||
let rhs_llty = bx.val_ty(rhs);
|
||||
let shift_val = shift_mask_val(bx, rhs_llty, rhs_llty, false);
|
||||
bx.and(rhs, shift_val)
|
||||
}
|
||||
|
||||
pub fn shift_mask_val<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
|
||||
bx: &mut Bx,
|
||||
llty: Bx::Type,
|
||||
|
@ -3,7 +3,7 @@ use super::place::PlaceRef;
|
||||
use super::{FunctionCx, LocalRef};
|
||||
|
||||
use crate::base;
|
||||
use crate::common::{self, IntPredicate};
|
||||
use crate::common::IntPredicate;
|
||||
use crate::traits::*;
|
||||
use crate::MemFlags;
|
||||
|
||||
@ -861,14 +861,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
bx.inbounds_gep(llty, lhs, &[rhs])
|
||||
}
|
||||
}
|
||||
mir::BinOp::Shl => common::build_masked_lshift(bx, lhs, rhs),
|
||||
mir::BinOp::ShlUnchecked => {
|
||||
let rhs = base::cast_shift_expr_rhs(bx, lhs, rhs);
|
||||
mir::BinOp::Shl | mir::BinOp::ShlUnchecked => {
|
||||
let rhs = base::build_shift_expr_rhs(bx, lhs, rhs, op == mir::BinOp::ShlUnchecked);
|
||||
bx.shl(lhs, rhs)
|
||||
}
|
||||
mir::BinOp::Shr => common::build_masked_rshift(bx, input_ty, lhs, rhs),
|
||||
mir::BinOp::ShrUnchecked => {
|
||||
let rhs = base::cast_shift_expr_rhs(bx, lhs, rhs);
|
||||
mir::BinOp::Shr | mir::BinOp::ShrUnchecked => {
|
||||
let rhs = base::build_shift_expr_rhs(bx, lhs, rhs, op == mir::BinOp::ShrUnchecked);
|
||||
if is_signed { bx.ashr(lhs, rhs) } else { bx.lshr(lhs, rhs) }
|
||||
}
|
||||
mir::BinOp::Ne
|
||||
|
@ -222,6 +222,7 @@ const_eval_mut_deref =
|
||||
|
||||
const_eval_mutable_ptr_in_final = encountered mutable pointer in final value of {const_eval_intern_kind}
|
||||
|
||||
const_eval_nested_static_in_thread_local = #[thread_local] does not support implicit nested statics, please create explicit static items and refer to them instead
|
||||
const_eval_non_const_fmt_macro_call =
|
||||
cannot call non-const formatting macro in {const_eval_const_context}s
|
||||
|
||||
|
@ -25,6 +25,13 @@ pub(crate) struct DanglingPtrInFinal {
|
||||
pub kind: InternKind,
|
||||
}
|
||||
|
||||
#[derive(Diagnostic)]
|
||||
#[diag(const_eval_nested_static_in_thread_local)]
|
||||
pub(crate) struct NestedStaticInThreadLocal {
|
||||
#[primary_span]
|
||||
pub span: Span,
|
||||
}
|
||||
|
||||
#[derive(LintDiagnostic)]
|
||||
#[diag(const_eval_mutable_ptr_in_final)]
|
||||
pub(crate) struct MutablePtrInFinal {
|
||||
|
@ -28,7 +28,7 @@ use rustc_span::sym;
|
||||
|
||||
use super::{AllocId, Allocation, InterpCx, MPlaceTy, Machine, MemoryKind, PlaceTy};
|
||||
use crate::const_eval;
|
||||
use crate::errors::{DanglingPtrInFinal, MutablePtrInFinal};
|
||||
use crate::errors::{DanglingPtrInFinal, MutablePtrInFinal, NestedStaticInThreadLocal};
|
||||
|
||||
pub trait CompileTimeMachine<'mir, 'tcx: 'mir, T> = Machine<
|
||||
'mir,
|
||||
@ -108,6 +108,10 @@ fn intern_as_new_static<'tcx>(
|
||||
);
|
||||
tcx.set_nested_alloc_id_static(alloc_id, feed.def_id());
|
||||
|
||||
if tcx.is_thread_local_static(static_id.into()) {
|
||||
tcx.dcx().emit_err(NestedStaticInThreadLocal { span: tcx.def_span(static_id) });
|
||||
}
|
||||
|
||||
// These do not inherit the codegen attrs of the parent static allocation, since
|
||||
// it doesn't make sense for them to inherit their `#[no_mangle]` and `#[link_name = ..]`
|
||||
// and the like.
|
||||
|
@ -54,14 +54,20 @@ impl<'tcx> Bounds<'tcx> {
|
||||
span: Span,
|
||||
polarity: ty::PredicatePolarity,
|
||||
) {
|
||||
self.clauses.push((
|
||||
let clause = (
|
||||
trait_ref
|
||||
.map_bound(|trait_ref| {
|
||||
ty::ClauseKind::Trait(ty::TraitPredicate { trait_ref, polarity })
|
||||
})
|
||||
.to_predicate(tcx),
|
||||
span,
|
||||
));
|
||||
);
|
||||
// FIXME(-Znext-solver): We can likely remove this hack once the new trait solver lands.
|
||||
if tcx.lang_items().sized_trait() == Some(trait_ref.def_id()) {
|
||||
self.clauses.insert(0, clause);
|
||||
} else {
|
||||
self.clauses.push(clause);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push_projection_bound(
|
||||
|
@ -459,9 +459,8 @@ pub fn check_intrinsic_type(
|
||||
sym::unchecked_div | sym::unchecked_rem | sym::exact_div => {
|
||||
(1, 0, vec![param(0), param(0)], param(0))
|
||||
}
|
||||
sym::unchecked_shl | sym::unchecked_shr | sym::rotate_left | sym::rotate_right => {
|
||||
(1, 0, vec![param(0), param(0)], param(0))
|
||||
}
|
||||
sym::unchecked_shl | sym::unchecked_shr => (2, 0, vec![param(0), param(1)], param(0)),
|
||||
sym::rotate_left | sym::rotate_right => (1, 0, vec![param(0), param(0)], param(0)),
|
||||
sym::unchecked_add | sym::unchecked_sub | sym::unchecked_mul => {
|
||||
(1, 0, vec![param(0), param(0)], param(0))
|
||||
}
|
||||
|
@ -1218,22 +1218,35 @@ pub fn typeid_for_instance<'tcx>(
|
||||
let trait_id = tcx.fn_trait_kind_to_def_id(closure_args.kind()).unwrap();
|
||||
let tuple_args =
|
||||
tcx.instantiate_bound_regions_with_erased(closure_args.sig()).inputs()[0];
|
||||
(trait_id, tuple_args)
|
||||
(trait_id, Some(tuple_args))
|
||||
}
|
||||
ty::Coroutine(..) => (
|
||||
tcx.require_lang_item(LangItem::Coroutine, None),
|
||||
instance.args.as_coroutine().resume_ty(),
|
||||
),
|
||||
ty::Coroutine(..) => match tcx.coroutine_kind(instance.def_id()).unwrap() {
|
||||
hir::CoroutineKind::Coroutine(..) => (
|
||||
tcx.require_lang_item(LangItem::Coroutine, None),
|
||||
Some(instance.args.as_coroutine().resume_ty()),
|
||||
),
|
||||
hir::CoroutineKind::Desugared(desugaring, _) => {
|
||||
let lang_item = match desugaring {
|
||||
hir::CoroutineDesugaring::Async => LangItem::Future,
|
||||
hir::CoroutineDesugaring::AsyncGen => LangItem::AsyncIterator,
|
||||
hir::CoroutineDesugaring::Gen => LangItem::Iterator,
|
||||
};
|
||||
(tcx.require_lang_item(lang_item, None), None)
|
||||
}
|
||||
},
|
||||
ty::CoroutineClosure(..) => (
|
||||
tcx.require_lang_item(LangItem::FnOnce, None),
|
||||
tcx.instantiate_bound_regions_with_erased(
|
||||
instance.args.as_coroutine_closure().coroutine_closure_sig(),
|
||||
)
|
||||
.tupled_inputs_ty,
|
||||
Some(
|
||||
tcx.instantiate_bound_regions_with_erased(
|
||||
instance.args.as_coroutine_closure().coroutine_closure_sig(),
|
||||
)
|
||||
.tupled_inputs_ty,
|
||||
),
|
||||
),
|
||||
x => bug!("Unexpected type kind for closure-like: {x:?}"),
|
||||
};
|
||||
let trait_ref = ty::TraitRef::new(tcx, trait_id, [closure_ty, inputs]);
|
||||
let concrete_args = tcx.mk_args_trait(closure_ty, inputs.map(Into::into));
|
||||
let trait_ref = ty::TraitRef::new(tcx, trait_id, concrete_args);
|
||||
let invoke_ty = trait_object_ty(tcx, ty::Binder::dummy(trait_ref));
|
||||
let abstract_args = tcx.mk_args_trait(invoke_ty, trait_ref.args.into_iter().skip(1));
|
||||
// There should be exactly one method on this trait, and it should be the one we're
|
||||
|
@ -752,6 +752,18 @@ pub trait BuildHasher {
|
||||
#[stable(since = "1.7.0", feature = "build_hasher")]
|
||||
pub struct BuildHasherDefault<H>(marker::PhantomData<fn() -> H>);
|
||||
|
||||
impl<H> BuildHasherDefault<H> {
|
||||
/// Creates a new BuildHasherDefault for Hasher `H`.
|
||||
#[unstable(
|
||||
feature = "build_hasher_default_const_new",
|
||||
issue = "123197",
|
||||
reason = "recently added"
|
||||
)]
|
||||
pub const fn new() -> Self {
|
||||
BuildHasherDefault(marker::PhantomData)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(since = "1.9.0", feature = "core_impl_debug")]
|
||||
impl<H> fmt::Debug for BuildHasherDefault<H> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
@ -778,7 +790,7 @@ impl<H> Clone for BuildHasherDefault<H> {
|
||||
#[stable(since = "1.7.0", feature = "build_hasher")]
|
||||
impl<H> Default for BuildHasherDefault<H> {
|
||||
fn default() -> BuildHasherDefault<H> {
|
||||
BuildHasherDefault(marker::PhantomData)
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2236,18 +2236,20 @@ extern "rust-intrinsic" {
|
||||
/// Safe wrappers for this intrinsic are available on the integer
|
||||
/// primitives via the `checked_shl` method. For example,
|
||||
/// [`u32::checked_shl`]
|
||||
#[cfg(not(bootstrap))]
|
||||
#[rustc_const_stable(feature = "const_int_unchecked", since = "1.40.0")]
|
||||
#[rustc_nounwind]
|
||||
pub fn unchecked_shl<T: Copy>(x: T, y: T) -> T;
|
||||
pub fn unchecked_shl<T: Copy, U: Copy>(x: T, y: U) -> T;
|
||||
/// Performs an unchecked right shift, resulting in undefined behavior when
|
||||
/// `y < 0` or `y >= N`, where N is the width of T in bits.
|
||||
///
|
||||
/// Safe wrappers for this intrinsic are available on the integer
|
||||
/// primitives via the `checked_shr` method. For example,
|
||||
/// [`u32::checked_shr`]
|
||||
#[cfg(not(bootstrap))]
|
||||
#[rustc_const_stable(feature = "const_int_unchecked", since = "1.40.0")]
|
||||
#[rustc_nounwind]
|
||||
pub fn unchecked_shr<T: Copy>(x: T, y: T) -> T;
|
||||
pub fn unchecked_shr<T: Copy, U: Copy>(x: T, y: U) -> T;
|
||||
|
||||
/// Returns the result of an unchecked addition, resulting in
|
||||
/// undefined behavior when `x + y > T::MAX` or `x + y < T::MIN`.
|
||||
|
@ -1227,10 +1227,18 @@ macro_rules! int_impl {
|
||||
#[inline(always)]
|
||||
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
|
||||
pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self {
|
||||
// SAFETY: the caller must uphold the safety contract for
|
||||
// `unchecked_shl`.
|
||||
// Any legal shift amount is losslessly representable in the self type.
|
||||
unsafe { intrinsics::unchecked_shl(self, conv_rhs_for_unchecked_shift!($SelfT, rhs)) }
|
||||
#[cfg(bootstrap)]
|
||||
{
|
||||
// For bootstrapping, just use built-in primitive shift.
|
||||
// panicking is a legal manifestation of UB
|
||||
self << rhs
|
||||
}
|
||||
#[cfg(not(bootstrap))]
|
||||
{
|
||||
// SAFETY: the caller must uphold the safety contract for
|
||||
// `unchecked_shl`.
|
||||
unsafe { intrinsics::unchecked_shl(self, rhs) }
|
||||
}
|
||||
}
|
||||
|
||||
/// Checked shift right. Computes `self >> rhs`, returning `None` if `rhs` is
|
||||
@ -1310,10 +1318,18 @@ macro_rules! int_impl {
|
||||
#[inline(always)]
|
||||
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
|
||||
pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self {
|
||||
// SAFETY: the caller must uphold the safety contract for
|
||||
// `unchecked_shr`.
|
||||
// Any legal shift amount is losslessly representable in the self type.
|
||||
unsafe { intrinsics::unchecked_shr(self, conv_rhs_for_unchecked_shift!($SelfT, rhs)) }
|
||||
#[cfg(bootstrap)]
|
||||
{
|
||||
// For bootstrapping, just use built-in primitive shift.
|
||||
// panicking is a legal manifestation of UB
|
||||
self >> rhs
|
||||
}
|
||||
#[cfg(not(bootstrap))]
|
||||
{
|
||||
// SAFETY: the caller must uphold the safety contract for
|
||||
// `unchecked_shr`.
|
||||
unsafe { intrinsics::unchecked_shr(self, rhs) }
|
||||
}
|
||||
}
|
||||
|
||||
/// Checked absolute value. Computes `self.abs()`, returning `None` if
|
||||
|
@ -285,17 +285,6 @@ macro_rules! widening_impl {
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! conv_rhs_for_unchecked_shift {
|
||||
($SelfT:ty, $x:expr) => {{
|
||||
// If the `as` cast will truncate, ensure we still tell the backend
|
||||
// that the pre-truncation value was also small.
|
||||
if <$SelfT>::BITS < 32 {
|
||||
intrinsics::assume($x <= (<$SelfT>::MAX as u32));
|
||||
}
|
||||
$x as $SelfT
|
||||
}};
|
||||
}
|
||||
|
||||
impl i8 {
|
||||
int_impl! {
|
||||
Self = i8,
|
||||
|
@ -1286,10 +1286,18 @@ macro_rules! uint_impl {
|
||||
#[inline(always)]
|
||||
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
|
||||
pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self {
|
||||
// SAFETY: the caller must uphold the safety contract for
|
||||
// `unchecked_shl`.
|
||||
// Any legal shift amount is losslessly representable in the self type.
|
||||
unsafe { intrinsics::unchecked_shl(self, conv_rhs_for_unchecked_shift!($SelfT, rhs)) }
|
||||
#[cfg(bootstrap)]
|
||||
{
|
||||
// For bootstrapping, just use built-in primitive shift.
|
||||
// panicking is a legal manifestation of UB
|
||||
self << rhs
|
||||
}
|
||||
#[cfg(not(bootstrap))]
|
||||
{
|
||||
// SAFETY: the caller must uphold the safety contract for
|
||||
// `unchecked_shl`.
|
||||
unsafe { intrinsics::unchecked_shl(self, rhs) }
|
||||
}
|
||||
}
|
||||
|
||||
/// Checked shift right. Computes `self >> rhs`, returning `None`
|
||||
@ -1369,10 +1377,18 @@ macro_rules! uint_impl {
|
||||
#[inline(always)]
|
||||
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
|
||||
pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self {
|
||||
// SAFETY: the caller must uphold the safety contract for
|
||||
// `unchecked_shr`.
|
||||
// Any legal shift amount is losslessly representable in the self type.
|
||||
unsafe { intrinsics::unchecked_shr(self, conv_rhs_for_unchecked_shift!($SelfT, rhs)) }
|
||||
#[cfg(bootstrap)]
|
||||
{
|
||||
// For bootstrapping, just use built-in primitive shift.
|
||||
// panicking is a legal manifestation of UB
|
||||
self >> rhs
|
||||
}
|
||||
#[cfg(not(bootstrap))]
|
||||
{
|
||||
// SAFETY: the caller must uphold the safety contract for
|
||||
// `unchecked_shr`.
|
||||
unsafe { intrinsics::unchecked_shr(self, rhs) }
|
||||
}
|
||||
}
|
||||
|
||||
/// Checked exponentiation. Computes `self.pow(exp)`, returning `None` if
|
||||
|
@ -1781,9 +1781,19 @@ pub(crate) const unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usiz
|
||||
// FIXME(#75598): Direct use of these intrinsics improves codegen significantly at opt-level <=
|
||||
// 1, where the method versions of these operations are not inlined.
|
||||
use intrinsics::{
|
||||
assume, cttz_nonzero, exact_div, mul_with_overflow, unchecked_rem, unchecked_shl,
|
||||
unchecked_shr, unchecked_sub, wrapping_add, wrapping_mul, wrapping_sub,
|
||||
assume, cttz_nonzero, exact_div, mul_with_overflow, unchecked_rem, unchecked_sub,
|
||||
wrapping_add, wrapping_mul, wrapping_sub,
|
||||
};
|
||||
#[cfg(bootstrap)]
|
||||
const unsafe fn unchecked_shl(value: usize, shift: usize) -> usize {
|
||||
value << shift
|
||||
}
|
||||
#[cfg(bootstrap)]
|
||||
const unsafe fn unchecked_shr(value: usize, shift: usize) -> usize {
|
||||
value >> shift
|
||||
}
|
||||
#[cfg(not(bootstrap))]
|
||||
use intrinsics::{unchecked_shl, unchecked_shr};
|
||||
|
||||
/// Calculate multiplicative modular inverse of `x` modulo `m`.
|
||||
///
|
||||
|
@ -181,8 +181,14 @@ fn clean_param_env<'tcx>(
|
||||
})
|
||||
.map(|pred| {
|
||||
tcx.fold_regions(pred, |r, _| match *r {
|
||||
ty::ReVar(vid) => vid_to_region[&vid],
|
||||
// FIXME: Don't `unwrap_or`, I think we should panic if we encounter an infer var that
|
||||
// we can't map to a concrete region. However, `AutoTraitFinder` *does* leak those kinds
|
||||
// of `ReVar`s for some reason at the time of writing. See `rustdoc-ui/` tests.
|
||||
// This is in dire need of an investigation into `AutoTraitFinder`.
|
||||
ty::ReVar(vid) => vid_to_region.get(&vid).copied().unwrap_or(r),
|
||||
ty::ReEarlyParam(_) | ty::ReStatic | ty::ReBound(..) | ty::ReError(_) => r,
|
||||
// FIXME(#120606): `AutoTraitFinder` can actually leak placeholder regions which feels
|
||||
// incorrect. Needs investigation.
|
||||
ty::ReLateParam(_) | ty::RePlaceholder(_) | ty::ReErased => {
|
||||
bug!("unexpected region kind: {r:?}")
|
||||
}
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#![crate_type = "lib"]
|
||||
#![feature(unchecked_shifts)]
|
||||
#![feature(core_intrinsics)]
|
||||
|
||||
// CHECK-LABEL: @unchecked_shl_unsigned_same
|
||||
#[no_mangle]
|
||||
@ -19,7 +20,7 @@ pub unsafe fn unchecked_shl_unsigned_smaller(a: u16, b: u32) -> u16 {
|
||||
// This uses -DAG to avoid failing on irrelevant reorderings,
|
||||
// like emitting the truncation earlier.
|
||||
|
||||
// CHECK-DAG: %[[INRANGE:.+]] = icmp ult i32 %b, 65536
|
||||
// CHECK-DAG: %[[INRANGE:.+]] = icmp ult i32 %b, 16
|
||||
// CHECK-DAG: tail call void @llvm.assume(i1 %[[INRANGE]])
|
||||
// CHECK-DAG: %[[TRUNC:.+]] = trunc i32 %b to i16
|
||||
// CHECK-DAG: shl i16 %a, %[[TRUNC]]
|
||||
@ -51,7 +52,7 @@ pub unsafe fn unchecked_shr_signed_smaller(a: i16, b: u32) -> i16 {
|
||||
// This uses -DAG to avoid failing on irrelevant reorderings,
|
||||
// like emitting the truncation earlier.
|
||||
|
||||
// CHECK-DAG: %[[INRANGE:.+]] = icmp ult i32 %b, 32768
|
||||
// CHECK-DAG: %[[INRANGE:.+]] = icmp ult i32 %b, 16
|
||||
// CHECK-DAG: tail call void @llvm.assume(i1 %[[INRANGE]])
|
||||
// CHECK-DAG: %[[TRUNC:.+]] = trunc i32 %b to i16
|
||||
// CHECK-DAG: ashr i16 %a, %[[TRUNC]]
|
||||
@ -66,3 +67,47 @@ pub unsafe fn unchecked_shr_signed_bigger(a: i64, b: u32) -> i64 {
|
||||
// CHECK: ashr i64 %a, %[[EXT]]
|
||||
a.unchecked_shr(b)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @unchecked_shr_u128_i8
|
||||
#[no_mangle]
|
||||
pub unsafe fn unchecked_shr_u128_i8(a: u128, b: i8) -> u128 {
|
||||
// CHECK-NOT: assume
|
||||
// CHECK: %[[EXT:.+]] = zext{{( nneg)?}} i8 %b to i128
|
||||
// CHECK: lshr i128 %a, %[[EXT]]
|
||||
std::intrinsics::unchecked_shr(a, b)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @unchecked_shl_i128_u8
|
||||
#[no_mangle]
|
||||
pub unsafe fn unchecked_shl_i128_u8(a: i128, b: u8) -> i128 {
|
||||
// CHECK-NOT: assume
|
||||
// CHECK: %[[EXT:.+]] = zext{{( nneg)?}} i8 %b to i128
|
||||
// CHECK: shl i128 %a, %[[EXT]]
|
||||
std::intrinsics::unchecked_shl(a, b)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @unchecked_shl_u8_i128
|
||||
#[no_mangle]
|
||||
pub unsafe fn unchecked_shl_u8_i128(a: u8, b: i128) -> u8 {
|
||||
// This uses -DAG to avoid failing on irrelevant reorderings,
|
||||
// like emitting the truncation earlier.
|
||||
|
||||
// CHECK-DAG: %[[INRANGE:.+]] = icmp ult i128 %b, 8
|
||||
// CHECK-DAG: tail call void @llvm.assume(i1 %[[INRANGE]])
|
||||
// CHECK-DAG: %[[TRUNC:.+]] = trunc i128 %b to i8
|
||||
// CHECK-DAG: shl i8 %a, %[[TRUNC]]
|
||||
std::intrinsics::unchecked_shl(a, b)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @unchecked_shr_i8_u128
|
||||
#[no_mangle]
|
||||
pub unsafe fn unchecked_shr_i8_u128(a: i8, b: u128) -> i8 {
|
||||
// This uses -DAG to avoid failing on irrelevant reorderings,
|
||||
// like emitting the truncation earlier.
|
||||
|
||||
// CHECK-DAG: %[[INRANGE:.+]] = icmp ult i128 %b, 8
|
||||
// CHECK-DAG: tail call void @llvm.assume(i1 %[[INRANGE]])
|
||||
// CHECK-DAG: %[[TRUNC:.+]] = trunc i128 %b to i8
|
||||
// CHECK-DAG: ashr i8 %a, %[[TRUNC]]
|
||||
std::intrinsics::unchecked_shr(a, b)
|
||||
}
|
||||
|
@ -217,7 +217,7 @@ pub fn second_trait_bound<T: Eq + Clone>() {}
|
||||
pub fn second_builtin_bound<T: Send >() {}
|
||||
|
||||
#[cfg(not(any(cfail1,cfail4)))]
|
||||
#[rustc_clean(cfg = "cfail2", except = "opt_hir_owner_nodes, predicates_of")]
|
||||
#[rustc_clean(cfg = "cfail2", except = "opt_hir_owner_nodes")]
|
||||
#[rustc_clean(cfg = "cfail3")]
|
||||
#[rustc_clean(cfg = "cfail5", except = "opt_hir_owner_nodes, predicates_of")]
|
||||
#[rustc_clean(cfg = "cfail6")]
|
||||
|
@ -4,6 +4,9 @@
|
||||
|
||||
//@ compile-flags: -Zmir-opt-level=2 -Zinline-mir
|
||||
|
||||
// These used to be more interesting when the library had to fix the RHS type.
|
||||
// After MCP#693, though, that's the backend's problem, not something in MIR.
|
||||
|
||||
// EMIT_MIR unchecked_shifts.unchecked_shl_unsigned_smaller.Inline.diff
|
||||
// EMIT_MIR unchecked_shifts.unchecked_shl_unsigned_smaller.PreCodegen.after.mir
|
||||
pub unsafe fn unchecked_shl_unsigned_smaller(a: u16, b: u32) -> u16 {
|
||||
@ -12,22 +15,6 @@ pub unsafe fn unchecked_shl_unsigned_smaller(a: u16, b: u32) -> u16 {
|
||||
a.unchecked_shl(b)
|
||||
}
|
||||
|
||||
// EMIT_MIR unchecked_shifts.unchecked_shr_signed_smaller.Inline.diff
|
||||
// EMIT_MIR unchecked_shifts.unchecked_shr_signed_smaller.PreCodegen.after.mir
|
||||
pub unsafe fn unchecked_shr_signed_smaller(a: i16, b: u32) -> i16 {
|
||||
// CHECK-LABEL: fn unchecked_shr_signed_smaller(
|
||||
// CHECK: (inlined core::num::<impl i16>::unchecked_shr)
|
||||
a.unchecked_shr(b)
|
||||
}
|
||||
|
||||
// EMIT_MIR unchecked_shifts.unchecked_shl_unsigned_bigger.Inline.diff
|
||||
// EMIT_MIR unchecked_shifts.unchecked_shl_unsigned_bigger.PreCodegen.after.mir
|
||||
pub unsafe fn unchecked_shl_unsigned_bigger(a: u64, b: u32) -> u64 {
|
||||
// CHECK-LABEL: fn unchecked_shl_unsigned_bigger(
|
||||
// CHECK: (inlined core::num::<impl u64>::unchecked_shl)
|
||||
a.unchecked_shl(b)
|
||||
}
|
||||
|
||||
// EMIT_MIR unchecked_shifts.unchecked_shr_signed_bigger.Inline.diff
|
||||
// EMIT_MIR unchecked_shifts.unchecked_shr_signed_bigger.PreCodegen.after.mir
|
||||
pub unsafe fn unchecked_shr_signed_bigger(a: i64, b: u32) -> i64 {
|
||||
|
@ -1,36 +0,0 @@
|
||||
- // MIR for `unchecked_shl_unsigned_bigger` before Inline
|
||||
+ // MIR for `unchecked_shl_unsigned_bigger` after Inline
|
||||
|
||||
fn unchecked_shl_unsigned_bigger(_1: u64, _2: u32) -> u64 {
|
||||
debug a => _1;
|
||||
debug b => _2;
|
||||
let mut _0: u64;
|
||||
let mut _3: u64;
|
||||
let mut _4: u32;
|
||||
+ scope 1 (inlined core::num::<impl u64>::unchecked_shl) {
|
||||
+ debug self => _3;
|
||||
+ debug rhs => _4;
|
||||
+ let mut _5: u64;
|
||||
+ scope 2 {
|
||||
+ }
|
||||
+ }
|
||||
|
||||
bb0: {
|
||||
StorageLive(_3);
|
||||
_3 = _1;
|
||||
StorageLive(_4);
|
||||
_4 = _2;
|
||||
- _0 = core::num::<impl u64>::unchecked_shl(move _3, move _4) -> [return: bb1, unwind unreachable];
|
||||
- }
|
||||
-
|
||||
- bb1: {
|
||||
+ StorageLive(_5);
|
||||
+ _5 = _4 as u64 (IntToInt);
|
||||
+ _0 = ShlUnchecked(_3, move _5);
|
||||
+ StorageDead(_5);
|
||||
StorageDead(_4);
|
||||
StorageDead(_3);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -1,36 +0,0 @@
|
||||
- // MIR for `unchecked_shl_unsigned_bigger` before Inline
|
||||
+ // MIR for `unchecked_shl_unsigned_bigger` after Inline
|
||||
|
||||
fn unchecked_shl_unsigned_bigger(_1: u64, _2: u32) -> u64 {
|
||||
debug a => _1;
|
||||
debug b => _2;
|
||||
let mut _0: u64;
|
||||
let mut _3: u64;
|
||||
let mut _4: u32;
|
||||
+ scope 1 (inlined core::num::<impl u64>::unchecked_shl) {
|
||||
+ debug self => _3;
|
||||
+ debug rhs => _4;
|
||||
+ let mut _5: u64;
|
||||
+ scope 2 {
|
||||
+ }
|
||||
+ }
|
||||
|
||||
bb0: {
|
||||
StorageLive(_3);
|
||||
_3 = _1;
|
||||
StorageLive(_4);
|
||||
_4 = _2;
|
||||
- _0 = core::num::<impl u64>::unchecked_shl(move _3, move _4) -> [return: bb1, unwind continue];
|
||||
- }
|
||||
-
|
||||
- bb1: {
|
||||
+ StorageLive(_5);
|
||||
+ _5 = _4 as u64 (IntToInt);
|
||||
+ _0 = ShlUnchecked(_3, move _5);
|
||||
+ StorageDead(_5);
|
||||
StorageDead(_4);
|
||||
StorageDead(_3);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -1,22 +0,0 @@
|
||||
// MIR for `unchecked_shl_unsigned_bigger` after PreCodegen
|
||||
|
||||
fn unchecked_shl_unsigned_bigger(_1: u64, _2: u32) -> u64 {
|
||||
debug a => _1;
|
||||
debug b => _2;
|
||||
let mut _0: u64;
|
||||
scope 1 (inlined core::num::<impl u64>::unchecked_shl) {
|
||||
debug self => _1;
|
||||
debug rhs => _2;
|
||||
let mut _3: u64;
|
||||
scope 2 {
|
||||
}
|
||||
}
|
||||
|
||||
bb0: {
|
||||
StorageLive(_3);
|
||||
_3 = _2 as u64 (IntToInt);
|
||||
_0 = ShlUnchecked(_1, move _3);
|
||||
StorageDead(_3);
|
||||
return;
|
||||
}
|
||||
}
|
@ -1,22 +0,0 @@
|
||||
// MIR for `unchecked_shl_unsigned_bigger` after PreCodegen
|
||||
|
||||
fn unchecked_shl_unsigned_bigger(_1: u64, _2: u32) -> u64 {
|
||||
debug a => _1;
|
||||
debug b => _2;
|
||||
let mut _0: u64;
|
||||
scope 1 (inlined core::num::<impl u64>::unchecked_shl) {
|
||||
debug self => _1;
|
||||
debug rhs => _2;
|
||||
let mut _3: u64;
|
||||
scope 2 {
|
||||
}
|
||||
}
|
||||
|
||||
bb0: {
|
||||
StorageLive(_3);
|
||||
_3 = _2 as u64 (IntToInt);
|
||||
_0 = ShlUnchecked(_1, move _3);
|
||||
StorageDead(_3);
|
||||
return;
|
||||
}
|
||||
}
|
@ -10,8 +10,6 @@
|
||||
+ scope 1 (inlined core::num::<impl u16>::unchecked_shl) {
|
||||
+ debug self => _3;
|
||||
+ debug rhs => _4;
|
||||
+ let mut _5: u16;
|
||||
+ let mut _6: bool;
|
||||
+ scope 2 {
|
||||
+ }
|
||||
+ }
|
||||
@ -25,14 +23,7 @@
|
||||
- }
|
||||
-
|
||||
- bb1: {
|
||||
+ StorageLive(_5);
|
||||
+ StorageLive(_6);
|
||||
+ _6 = Le(_4, const 65535_u32);
|
||||
+ assume(move _6);
|
||||
+ StorageDead(_6);
|
||||
+ _5 = _4 as u16 (IntToInt);
|
||||
+ _0 = ShlUnchecked(_3, move _5);
|
||||
+ StorageDead(_5);
|
||||
+ _0 = ShlUnchecked(_3, _4);
|
||||
StorageDead(_4);
|
||||
StorageDead(_3);
|
||||
return;
|
||||
|
@ -10,8 +10,6 @@
|
||||
+ scope 1 (inlined core::num::<impl u16>::unchecked_shl) {
|
||||
+ debug self => _3;
|
||||
+ debug rhs => _4;
|
||||
+ let mut _5: u16;
|
||||
+ let mut _6: bool;
|
||||
+ scope 2 {
|
||||
+ }
|
||||
+ }
|
||||
@ -25,14 +23,7 @@
|
||||
- }
|
||||
-
|
||||
- bb1: {
|
||||
+ StorageLive(_5);
|
||||
+ StorageLive(_6);
|
||||
+ _6 = Le(_4, const 65535_u32);
|
||||
+ assume(move _6);
|
||||
+ StorageDead(_6);
|
||||
+ _5 = _4 as u16 (IntToInt);
|
||||
+ _0 = ShlUnchecked(_3, move _5);
|
||||
+ StorageDead(_5);
|
||||
+ _0 = ShlUnchecked(_3, _4);
|
||||
StorageDead(_4);
|
||||
StorageDead(_3);
|
||||
return;
|
||||
|
@ -7,21 +7,12 @@ fn unchecked_shl_unsigned_smaller(_1: u16, _2: u32) -> u16 {
|
||||
scope 1 (inlined core::num::<impl u16>::unchecked_shl) {
|
||||
debug self => _1;
|
||||
debug rhs => _2;
|
||||
let mut _3: bool;
|
||||
let mut _4: u16;
|
||||
scope 2 {
|
||||
}
|
||||
}
|
||||
|
||||
bb0: {
|
||||
StorageLive(_4);
|
||||
StorageLive(_3);
|
||||
_3 = Le(_2, const 65535_u32);
|
||||
assume(move _3);
|
||||
StorageDead(_3);
|
||||
_4 = _2 as u16 (IntToInt);
|
||||
_0 = ShlUnchecked(_1, move _4);
|
||||
StorageDead(_4);
|
||||
_0 = ShlUnchecked(_1, _2);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -7,21 +7,12 @@ fn unchecked_shl_unsigned_smaller(_1: u16, _2: u32) -> u16 {
|
||||
scope 1 (inlined core::num::<impl u16>::unchecked_shl) {
|
||||
debug self => _1;
|
||||
debug rhs => _2;
|
||||
let mut _3: bool;
|
||||
let mut _4: u16;
|
||||
scope 2 {
|
||||
}
|
||||
}
|
||||
|
||||
bb0: {
|
||||
StorageLive(_4);
|
||||
StorageLive(_3);
|
||||
_3 = Le(_2, const 65535_u32);
|
||||
assume(move _3);
|
||||
StorageDead(_3);
|
||||
_4 = _2 as u16 (IntToInt);
|
||||
_0 = ShlUnchecked(_1, move _4);
|
||||
StorageDead(_4);
|
||||
_0 = ShlUnchecked(_1, _2);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -10,7 +10,6 @@
|
||||
+ scope 1 (inlined core::num::<impl i64>::unchecked_shr) {
|
||||
+ debug self => _3;
|
||||
+ debug rhs => _4;
|
||||
+ let mut _5: i64;
|
||||
+ scope 2 {
|
||||
+ }
|
||||
+ }
|
||||
@ -24,10 +23,7 @@
|
||||
- }
|
||||
-
|
||||
- bb1: {
|
||||
+ StorageLive(_5);
|
||||
+ _5 = _4 as i64 (IntToInt);
|
||||
+ _0 = ShrUnchecked(_3, move _5);
|
||||
+ StorageDead(_5);
|
||||
+ _0 = ShrUnchecked(_3, _4);
|
||||
StorageDead(_4);
|
||||
StorageDead(_3);
|
||||
return;
|
||||
|
@ -10,7 +10,6 @@
|
||||
+ scope 1 (inlined core::num::<impl i64>::unchecked_shr) {
|
||||
+ debug self => _3;
|
||||
+ debug rhs => _4;
|
||||
+ let mut _5: i64;
|
||||
+ scope 2 {
|
||||
+ }
|
||||
+ }
|
||||
@ -24,10 +23,7 @@
|
||||
- }
|
||||
-
|
||||
- bb1: {
|
||||
+ StorageLive(_5);
|
||||
+ _5 = _4 as i64 (IntToInt);
|
||||
+ _0 = ShrUnchecked(_3, move _5);
|
||||
+ StorageDead(_5);
|
||||
+ _0 = ShrUnchecked(_3, _4);
|
||||
StorageDead(_4);
|
||||
StorageDead(_3);
|
||||
return;
|
||||
|
@ -7,16 +7,12 @@ fn unchecked_shr_signed_bigger(_1: i64, _2: u32) -> i64 {
|
||||
scope 1 (inlined core::num::<impl i64>::unchecked_shr) {
|
||||
debug self => _1;
|
||||
debug rhs => _2;
|
||||
let mut _3: i64;
|
||||
scope 2 {
|
||||
}
|
||||
}
|
||||
|
||||
bb0: {
|
||||
StorageLive(_3);
|
||||
_3 = _2 as i64 (IntToInt);
|
||||
_0 = ShrUnchecked(_1, move _3);
|
||||
StorageDead(_3);
|
||||
_0 = ShrUnchecked(_1, _2);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -7,16 +7,12 @@ fn unchecked_shr_signed_bigger(_1: i64, _2: u32) -> i64 {
|
||||
scope 1 (inlined core::num::<impl i64>::unchecked_shr) {
|
||||
debug self => _1;
|
||||
debug rhs => _2;
|
||||
let mut _3: i64;
|
||||
scope 2 {
|
||||
}
|
||||
}
|
||||
|
||||
bb0: {
|
||||
StorageLive(_3);
|
||||
_3 = _2 as i64 (IntToInt);
|
||||
_0 = ShrUnchecked(_1, move _3);
|
||||
StorageDead(_3);
|
||||
_0 = ShrUnchecked(_1, _2);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -1,41 +0,0 @@
|
||||
- // MIR for `unchecked_shr_signed_smaller` before Inline
|
||||
+ // MIR for `unchecked_shr_signed_smaller` after Inline
|
||||
|
||||
fn unchecked_shr_signed_smaller(_1: i16, _2: u32) -> i16 {
|
||||
debug a => _1;
|
||||
debug b => _2;
|
||||
let mut _0: i16;
|
||||
let mut _3: i16;
|
||||
let mut _4: u32;
|
||||
+ scope 1 (inlined core::num::<impl i16>::unchecked_shr) {
|
||||
+ debug self => _3;
|
||||
+ debug rhs => _4;
|
||||
+ let mut _5: i16;
|
||||
+ let mut _6: bool;
|
||||
+ scope 2 {
|
||||
+ }
|
||||
+ }
|
||||
|
||||
bb0: {
|
||||
StorageLive(_3);
|
||||
_3 = _1;
|
||||
StorageLive(_4);
|
||||
_4 = _2;
|
||||
- _0 = core::num::<impl i16>::unchecked_shr(move _3, move _4) -> [return: bb1, unwind unreachable];
|
||||
- }
|
||||
-
|
||||
- bb1: {
|
||||
+ StorageLive(_5);
|
||||
+ StorageLive(_6);
|
||||
+ _6 = Le(_4, const 32767_u32);
|
||||
+ assume(move _6);
|
||||
+ StorageDead(_6);
|
||||
+ _5 = _4 as i16 (IntToInt);
|
||||
+ _0 = ShrUnchecked(_3, move _5);
|
||||
+ StorageDead(_5);
|
||||
StorageDead(_4);
|
||||
StorageDead(_3);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -1,41 +0,0 @@
|
||||
- // MIR for `unchecked_shr_signed_smaller` before Inline
|
||||
+ // MIR for `unchecked_shr_signed_smaller` after Inline
|
||||
|
||||
fn unchecked_shr_signed_smaller(_1: i16, _2: u32) -> i16 {
|
||||
debug a => _1;
|
||||
debug b => _2;
|
||||
let mut _0: i16;
|
||||
let mut _3: i16;
|
||||
let mut _4: u32;
|
||||
+ scope 1 (inlined core::num::<impl i16>::unchecked_shr) {
|
||||
+ debug self => _3;
|
||||
+ debug rhs => _4;
|
||||
+ let mut _5: i16;
|
||||
+ let mut _6: bool;
|
||||
+ scope 2 {
|
||||
+ }
|
||||
+ }
|
||||
|
||||
bb0: {
|
||||
StorageLive(_3);
|
||||
_3 = _1;
|
||||
StorageLive(_4);
|
||||
_4 = _2;
|
||||
- _0 = core::num::<impl i16>::unchecked_shr(move _3, move _4) -> [return: bb1, unwind continue];
|
||||
- }
|
||||
-
|
||||
- bb1: {
|
||||
+ StorageLive(_5);
|
||||
+ StorageLive(_6);
|
||||
+ _6 = Le(_4, const 32767_u32);
|
||||
+ assume(move _6);
|
||||
+ StorageDead(_6);
|
||||
+ _5 = _4 as i16 (IntToInt);
|
||||
+ _0 = ShrUnchecked(_3, move _5);
|
||||
+ StorageDead(_5);
|
||||
StorageDead(_4);
|
||||
StorageDead(_3);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -1,27 +0,0 @@
|
||||
// MIR for `unchecked_shr_signed_smaller` after PreCodegen
|
||||
|
||||
fn unchecked_shr_signed_smaller(_1: i16, _2: u32) -> i16 {
|
||||
debug a => _1;
|
||||
debug b => _2;
|
||||
let mut _0: i16;
|
||||
scope 1 (inlined core::num::<impl i16>::unchecked_shr) {
|
||||
debug self => _1;
|
||||
debug rhs => _2;
|
||||
let mut _3: bool;
|
||||
let mut _4: i16;
|
||||
scope 2 {
|
||||
}
|
||||
}
|
||||
|
||||
bb0: {
|
||||
StorageLive(_4);
|
||||
StorageLive(_3);
|
||||
_3 = Le(_2, const 32767_u32);
|
||||
assume(move _3);
|
||||
StorageDead(_3);
|
||||
_4 = _2 as i16 (IntToInt);
|
||||
_0 = ShrUnchecked(_1, move _4);
|
||||
StorageDead(_4);
|
||||
return;
|
||||
}
|
||||
}
|
@ -1,27 +0,0 @@
|
||||
// MIR for `unchecked_shr_signed_smaller` after PreCodegen
|
||||
|
||||
fn unchecked_shr_signed_smaller(_1: i16, _2: u32) -> i16 {
|
||||
debug a => _1;
|
||||
debug b => _2;
|
||||
let mut _0: i16;
|
||||
scope 1 (inlined core::num::<impl i16>::unchecked_shr) {
|
||||
debug self => _1;
|
||||
debug rhs => _2;
|
||||
let mut _3: bool;
|
||||
let mut _4: i16;
|
||||
scope 2 {
|
||||
}
|
||||
}
|
||||
|
||||
bb0: {
|
||||
StorageLive(_4);
|
||||
StorageLive(_3);
|
||||
_3 = Le(_2, const 32767_u32);
|
||||
assume(move _3);
|
||||
StorageDead(_3);
|
||||
_4 = _2 as i16 (IntToInt);
|
||||
_0 = ShrUnchecked(_1, move _4);
|
||||
StorageDead(_4);
|
||||
return;
|
||||
}
|
||||
}
|
@ -16,7 +16,7 @@ pub fn wrapping(a: i32, b: i32) {
|
||||
}
|
||||
|
||||
// EMIT_MIR lower_intrinsics.unchecked.LowerIntrinsics.diff
|
||||
pub unsafe fn unchecked(a: i32, b: i32) {
|
||||
pub unsafe fn unchecked(a: i32, b: i32, c: u32) {
|
||||
// CHECK-LABEL: fn unchecked(
|
||||
// CHECK: {{_.*}} = AddUnchecked(
|
||||
// CHECK: {{_.*}} = SubUnchecked(
|
||||
@ -25,6 +25,8 @@ pub unsafe fn unchecked(a: i32, b: i32) {
|
||||
// CHECK: {{_.*}} = Rem(
|
||||
// CHECK: {{_.*}} = ShlUnchecked(
|
||||
// CHECK: {{_.*}} = ShrUnchecked(
|
||||
// CHECK: {{_.*}} = ShlUnchecked(
|
||||
// CHECK: {{_.*}} = ShrUnchecked(
|
||||
let _a = core::intrinsics::unchecked_add(a, b);
|
||||
let _b = core::intrinsics::unchecked_sub(a, b);
|
||||
let _c = core::intrinsics::unchecked_mul(a, b);
|
||||
@ -32,6 +34,8 @@ pub unsafe fn unchecked(a: i32, b: i32) {
|
||||
let _y = core::intrinsics::unchecked_rem(a, b);
|
||||
let _i = core::intrinsics::unchecked_shl(a, b);
|
||||
let _j = core::intrinsics::unchecked_shr(a, b);
|
||||
let _k = core::intrinsics::unchecked_shl(a, c);
|
||||
let _l = core::intrinsics::unchecked_shr(a, c);
|
||||
}
|
||||
|
||||
// EMIT_MIR lower_intrinsics.size_of.LowerIntrinsics.diff
|
||||
|
@ -1,45 +1,58 @@
|
||||
- // MIR for `unchecked` before LowerIntrinsics
|
||||
+ // MIR for `unchecked` after LowerIntrinsics
|
||||
|
||||
fn unchecked(_1: i32, _2: i32) -> () {
|
||||
fn unchecked(_1: i32, _2: i32, _3: u32) -> () {
|
||||
debug a => _1;
|
||||
debug b => _2;
|
||||
debug c => _3;
|
||||
let mut _0: ();
|
||||
let _3: i32;
|
||||
let mut _4: i32;
|
||||
let _4: i32;
|
||||
let mut _5: i32;
|
||||
let mut _7: i32;
|
||||
let mut _6: i32;
|
||||
let mut _8: i32;
|
||||
let mut _10: i32;
|
||||
let mut _9: i32;
|
||||
let mut _11: i32;
|
||||
let mut _13: i32;
|
||||
let mut _12: i32;
|
||||
let mut _14: i32;
|
||||
let mut _16: i32;
|
||||
let mut _15: i32;
|
||||
let mut _17: i32;
|
||||
let mut _19: i32;
|
||||
let mut _18: i32;
|
||||
let mut _20: i32;
|
||||
let mut _22: i32;
|
||||
let mut _21: i32;
|
||||
let mut _23: i32;
|
||||
let mut _24: i32;
|
||||
let mut _26: i32;
|
||||
let mut _27: u32;
|
||||
let mut _29: i32;
|
||||
let mut _30: u32;
|
||||
scope 1 {
|
||||
debug _a => _3;
|
||||
let _6: i32;
|
||||
debug _a => _4;
|
||||
let _7: i32;
|
||||
scope 2 {
|
||||
debug _b => _6;
|
||||
let _9: i32;
|
||||
debug _b => _7;
|
||||
let _10: i32;
|
||||
scope 3 {
|
||||
debug _c => _9;
|
||||
let _12: i32;
|
||||
debug _c => _10;
|
||||
let _13: i32;
|
||||
scope 4 {
|
||||
debug _x => _12;
|
||||
let _15: i32;
|
||||
debug _x => _13;
|
||||
let _16: i32;
|
||||
scope 5 {
|
||||
debug _y => _15;
|
||||
let _18: i32;
|
||||
debug _y => _16;
|
||||
let _19: i32;
|
||||
scope 6 {
|
||||
debug _i => _18;
|
||||
let _21: i32;
|
||||
debug _i => _19;
|
||||
let _22: i32;
|
||||
scope 7 {
|
||||
debug _j => _21;
|
||||
debug _j => _22;
|
||||
let _25: i32;
|
||||
scope 8 {
|
||||
debug _k => _25;
|
||||
let _28: i32;
|
||||
scope 9 {
|
||||
debug _l => _28;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -49,105 +62,133 @@
|
||||
}
|
||||
|
||||
bb0: {
|
||||
StorageLive(_3);
|
||||
StorageLive(_4);
|
||||
_4 = _1;
|
||||
StorageLive(_5);
|
||||
_5 = _2;
|
||||
- _3 = unchecked_add::<i32>(move _4, move _5) -> [return: bb1, unwind unreachable];
|
||||
+ _3 = AddUnchecked(move _4, move _5);
|
||||
_5 = _1;
|
||||
StorageLive(_6);
|
||||
_6 = _2;
|
||||
- _4 = unchecked_add::<i32>(move _5, move _6) -> [return: bb1, unwind unreachable];
|
||||
+ _4 = AddUnchecked(move _5, move _6);
|
||||
+ goto -> bb1;
|
||||
}
|
||||
|
||||
bb1: {
|
||||
StorageDead(_6);
|
||||
StorageDead(_5);
|
||||
StorageDead(_4);
|
||||
StorageLive(_6);
|
||||
StorageLive(_7);
|
||||
_7 = _1;
|
||||
StorageLive(_8);
|
||||
_8 = _2;
|
||||
- _6 = unchecked_sub::<i32>(move _7, move _8) -> [return: bb2, unwind unreachable];
|
||||
+ _6 = SubUnchecked(move _7, move _8);
|
||||
_8 = _1;
|
||||
StorageLive(_9);
|
||||
_9 = _2;
|
||||
- _7 = unchecked_sub::<i32>(move _8, move _9) -> [return: bb2, unwind unreachable];
|
||||
+ _7 = SubUnchecked(move _8, move _9);
|
||||
+ goto -> bb2;
|
||||
}
|
||||
|
||||
bb2: {
|
||||
StorageDead(_9);
|
||||
StorageDead(_8);
|
||||
StorageDead(_7);
|
||||
StorageLive(_9);
|
||||
StorageLive(_10);
|
||||
_10 = _1;
|
||||
StorageLive(_11);
|
||||
_11 = _2;
|
||||
- _9 = unchecked_mul::<i32>(move _10, move _11) -> [return: bb3, unwind unreachable];
|
||||
+ _9 = MulUnchecked(move _10, move _11);
|
||||
_11 = _1;
|
||||
StorageLive(_12);
|
||||
_12 = _2;
|
||||
- _10 = unchecked_mul::<i32>(move _11, move _12) -> [return: bb3, unwind unreachable];
|
||||
+ _10 = MulUnchecked(move _11, move _12);
|
||||
+ goto -> bb3;
|
||||
}
|
||||
|
||||
bb3: {
|
||||
StorageDead(_12);
|
||||
StorageDead(_11);
|
||||
StorageDead(_10);
|
||||
StorageLive(_12);
|
||||
StorageLive(_13);
|
||||
_13 = _1;
|
||||
StorageLive(_14);
|
||||
_14 = _2;
|
||||
- _12 = unchecked_div::<i32>(move _13, move _14) -> [return: bb4, unwind unreachable];
|
||||
+ _12 = Div(move _13, move _14);
|
||||
_14 = _1;
|
||||
StorageLive(_15);
|
||||
_15 = _2;
|
||||
- _13 = unchecked_div::<i32>(move _14, move _15) -> [return: bb4, unwind unreachable];
|
||||
+ _13 = Div(move _14, move _15);
|
||||
+ goto -> bb4;
|
||||
}
|
||||
|
||||
bb4: {
|
||||
StorageDead(_15);
|
||||
StorageDead(_14);
|
||||
StorageDead(_13);
|
||||
StorageLive(_15);
|
||||
StorageLive(_16);
|
||||
_16 = _1;
|
||||
StorageLive(_17);
|
||||
_17 = _2;
|
||||
- _15 = unchecked_rem::<i32>(move _16, move _17) -> [return: bb5, unwind unreachable];
|
||||
+ _15 = Rem(move _16, move _17);
|
||||
_17 = _1;
|
||||
StorageLive(_18);
|
||||
_18 = _2;
|
||||
- _16 = unchecked_rem::<i32>(move _17, move _18) -> [return: bb5, unwind unreachable];
|
||||
+ _16 = Rem(move _17, move _18);
|
||||
+ goto -> bb5;
|
||||
}
|
||||
|
||||
bb5: {
|
||||
StorageDead(_18);
|
||||
StorageDead(_17);
|
||||
StorageDead(_16);
|
||||
StorageLive(_18);
|
||||
StorageLive(_19);
|
||||
_19 = _1;
|
||||
StorageLive(_20);
|
||||
_20 = _2;
|
||||
- _18 = unchecked_shl::<i32>(move _19, move _20) -> [return: bb6, unwind unreachable];
|
||||
+ _18 = ShlUnchecked(move _19, move _20);
|
||||
_20 = _1;
|
||||
StorageLive(_21);
|
||||
_21 = _2;
|
||||
- _19 = unchecked_shl::<i32, i32>(move _20, move _21) -> [return: bb6, unwind unreachable];
|
||||
+ _19 = ShlUnchecked(move _20, move _21);
|
||||
+ goto -> bb6;
|
||||
}
|
||||
|
||||
bb6: {
|
||||
StorageDead(_21);
|
||||
StorageDead(_20);
|
||||
StorageDead(_19);
|
||||
StorageLive(_21);
|
||||
StorageLive(_22);
|
||||
_22 = _1;
|
||||
StorageLive(_23);
|
||||
_23 = _2;
|
||||
- _21 = unchecked_shr::<i32>(move _22, move _23) -> [return: bb7, unwind unreachable];
|
||||
+ _21 = ShrUnchecked(move _22, move _23);
|
||||
_23 = _1;
|
||||
StorageLive(_24);
|
||||
_24 = _2;
|
||||
- _22 = unchecked_shr::<i32, i32>(move _23, move _24) -> [return: bb7, unwind unreachable];
|
||||
+ _22 = ShrUnchecked(move _23, move _24);
|
||||
+ goto -> bb7;
|
||||
}
|
||||
|
||||
bb7: {
|
||||
StorageDead(_24);
|
||||
StorageDead(_23);
|
||||
StorageDead(_22);
|
||||
StorageLive(_25);
|
||||
StorageLive(_26);
|
||||
_26 = _1;
|
||||
StorageLive(_27);
|
||||
_27 = _3;
|
||||
- _25 = unchecked_shl::<i32, u32>(move _26, move _27) -> [return: bb8, unwind unreachable];
|
||||
+ _25 = ShlUnchecked(move _26, move _27);
|
||||
+ goto -> bb8;
|
||||
}
|
||||
|
||||
bb8: {
|
||||
StorageDead(_27);
|
||||
StorageDead(_26);
|
||||
StorageLive(_28);
|
||||
StorageLive(_29);
|
||||
_29 = _1;
|
||||
StorageLive(_30);
|
||||
_30 = _3;
|
||||
- _28 = unchecked_shr::<i32, u32>(move _29, move _30) -> [return: bb9, unwind unreachable];
|
||||
+ _28 = ShrUnchecked(move _29, move _30);
|
||||
+ goto -> bb9;
|
||||
}
|
||||
|
||||
bb9: {
|
||||
StorageDead(_30);
|
||||
StorageDead(_29);
|
||||
_0 = const ();
|
||||
StorageDead(_21);
|
||||
StorageDead(_18);
|
||||
StorageDead(_15);
|
||||
StorageDead(_12);
|
||||
StorageDead(_9);
|
||||
StorageDead(_6);
|
||||
StorageDead(_3);
|
||||
StorageDead(_28);
|
||||
StorageDead(_25);
|
||||
StorageDead(_22);
|
||||
StorageDead(_19);
|
||||
StorageDead(_16);
|
||||
StorageDead(_13);
|
||||
StorageDead(_10);
|
||||
StorageDead(_7);
|
||||
StorageDead(_4);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -1,45 +1,58 @@
|
||||
- // MIR for `unchecked` before LowerIntrinsics
|
||||
+ // MIR for `unchecked` after LowerIntrinsics
|
||||
|
||||
fn unchecked(_1: i32, _2: i32) -> () {
|
||||
fn unchecked(_1: i32, _2: i32, _3: u32) -> () {
|
||||
debug a => _1;
|
||||
debug b => _2;
|
||||
debug c => _3;
|
||||
let mut _0: ();
|
||||
let _3: i32;
|
||||
let mut _4: i32;
|
||||
let _4: i32;
|
||||
let mut _5: i32;
|
||||
let mut _7: i32;
|
||||
let mut _6: i32;
|
||||
let mut _8: i32;
|
||||
let mut _10: i32;
|
||||
let mut _9: i32;
|
||||
let mut _11: i32;
|
||||
let mut _13: i32;
|
||||
let mut _12: i32;
|
||||
let mut _14: i32;
|
||||
let mut _16: i32;
|
||||
let mut _15: i32;
|
||||
let mut _17: i32;
|
||||
let mut _19: i32;
|
||||
let mut _18: i32;
|
||||
let mut _20: i32;
|
||||
let mut _22: i32;
|
||||
let mut _21: i32;
|
||||
let mut _23: i32;
|
||||
let mut _24: i32;
|
||||
let mut _26: i32;
|
||||
let mut _27: u32;
|
||||
let mut _29: i32;
|
||||
let mut _30: u32;
|
||||
scope 1 {
|
||||
debug _a => _3;
|
||||
let _6: i32;
|
||||
debug _a => _4;
|
||||
let _7: i32;
|
||||
scope 2 {
|
||||
debug _b => _6;
|
||||
let _9: i32;
|
||||
debug _b => _7;
|
||||
let _10: i32;
|
||||
scope 3 {
|
||||
debug _c => _9;
|
||||
let _12: i32;
|
||||
debug _c => _10;
|
||||
let _13: i32;
|
||||
scope 4 {
|
||||
debug _x => _12;
|
||||
let _15: i32;
|
||||
debug _x => _13;
|
||||
let _16: i32;
|
||||
scope 5 {
|
||||
debug _y => _15;
|
||||
let _18: i32;
|
||||
debug _y => _16;
|
||||
let _19: i32;
|
||||
scope 6 {
|
||||
debug _i => _18;
|
||||
let _21: i32;
|
||||
debug _i => _19;
|
||||
let _22: i32;
|
||||
scope 7 {
|
||||
debug _j => _21;
|
||||
debug _j => _22;
|
||||
let _25: i32;
|
||||
scope 8 {
|
||||
debug _k => _25;
|
||||
let _28: i32;
|
||||
scope 9 {
|
||||
debug _l => _28;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -49,105 +62,133 @@
|
||||
}
|
||||
|
||||
bb0: {
|
||||
StorageLive(_3);
|
||||
StorageLive(_4);
|
||||
_4 = _1;
|
||||
StorageLive(_5);
|
||||
_5 = _2;
|
||||
- _3 = unchecked_add::<i32>(move _4, move _5) -> [return: bb1, unwind unreachable];
|
||||
+ _3 = AddUnchecked(move _4, move _5);
|
||||
_5 = _1;
|
||||
StorageLive(_6);
|
||||
_6 = _2;
|
||||
- _4 = unchecked_add::<i32>(move _5, move _6) -> [return: bb1, unwind unreachable];
|
||||
+ _4 = AddUnchecked(move _5, move _6);
|
||||
+ goto -> bb1;
|
||||
}
|
||||
|
||||
bb1: {
|
||||
StorageDead(_6);
|
||||
StorageDead(_5);
|
||||
StorageDead(_4);
|
||||
StorageLive(_6);
|
||||
StorageLive(_7);
|
||||
_7 = _1;
|
||||
StorageLive(_8);
|
||||
_8 = _2;
|
||||
- _6 = unchecked_sub::<i32>(move _7, move _8) -> [return: bb2, unwind unreachable];
|
||||
+ _6 = SubUnchecked(move _7, move _8);
|
||||
_8 = _1;
|
||||
StorageLive(_9);
|
||||
_9 = _2;
|
||||
- _7 = unchecked_sub::<i32>(move _8, move _9) -> [return: bb2, unwind unreachable];
|
||||
+ _7 = SubUnchecked(move _8, move _9);
|
||||
+ goto -> bb2;
|
||||
}
|
||||
|
||||
bb2: {
|
||||
StorageDead(_9);
|
||||
StorageDead(_8);
|
||||
StorageDead(_7);
|
||||
StorageLive(_9);
|
||||
StorageLive(_10);
|
||||
_10 = _1;
|
||||
StorageLive(_11);
|
||||
_11 = _2;
|
||||
- _9 = unchecked_mul::<i32>(move _10, move _11) -> [return: bb3, unwind unreachable];
|
||||
+ _9 = MulUnchecked(move _10, move _11);
|
||||
_11 = _1;
|
||||
StorageLive(_12);
|
||||
_12 = _2;
|
||||
- _10 = unchecked_mul::<i32>(move _11, move _12) -> [return: bb3, unwind unreachable];
|
||||
+ _10 = MulUnchecked(move _11, move _12);
|
||||
+ goto -> bb3;
|
||||
}
|
||||
|
||||
bb3: {
|
||||
StorageDead(_12);
|
||||
StorageDead(_11);
|
||||
StorageDead(_10);
|
||||
StorageLive(_12);
|
||||
StorageLive(_13);
|
||||
_13 = _1;
|
||||
StorageLive(_14);
|
||||
_14 = _2;
|
||||
- _12 = unchecked_div::<i32>(move _13, move _14) -> [return: bb4, unwind unreachable];
|
||||
+ _12 = Div(move _13, move _14);
|
||||
_14 = _1;
|
||||
StorageLive(_15);
|
||||
_15 = _2;
|
||||
- _13 = unchecked_div::<i32>(move _14, move _15) -> [return: bb4, unwind unreachable];
|
||||
+ _13 = Div(move _14, move _15);
|
||||
+ goto -> bb4;
|
||||
}
|
||||
|
||||
bb4: {
|
||||
StorageDead(_15);
|
||||
StorageDead(_14);
|
||||
StorageDead(_13);
|
||||
StorageLive(_15);
|
||||
StorageLive(_16);
|
||||
_16 = _1;
|
||||
StorageLive(_17);
|
||||
_17 = _2;
|
||||
- _15 = unchecked_rem::<i32>(move _16, move _17) -> [return: bb5, unwind unreachable];
|
||||
+ _15 = Rem(move _16, move _17);
|
||||
_17 = _1;
|
||||
StorageLive(_18);
|
||||
_18 = _2;
|
||||
- _16 = unchecked_rem::<i32>(move _17, move _18) -> [return: bb5, unwind unreachable];
|
||||
+ _16 = Rem(move _17, move _18);
|
||||
+ goto -> bb5;
|
||||
}
|
||||
|
||||
bb5: {
|
||||
StorageDead(_18);
|
||||
StorageDead(_17);
|
||||
StorageDead(_16);
|
||||
StorageLive(_18);
|
||||
StorageLive(_19);
|
||||
_19 = _1;
|
||||
StorageLive(_20);
|
||||
_20 = _2;
|
||||
- _18 = unchecked_shl::<i32>(move _19, move _20) -> [return: bb6, unwind unreachable];
|
||||
+ _18 = ShlUnchecked(move _19, move _20);
|
||||
_20 = _1;
|
||||
StorageLive(_21);
|
||||
_21 = _2;
|
||||
- _19 = unchecked_shl::<i32, i32>(move _20, move _21) -> [return: bb6, unwind unreachable];
|
||||
+ _19 = ShlUnchecked(move _20, move _21);
|
||||
+ goto -> bb6;
|
||||
}
|
||||
|
||||
bb6: {
|
||||
StorageDead(_21);
|
||||
StorageDead(_20);
|
||||
StorageDead(_19);
|
||||
StorageLive(_21);
|
||||
StorageLive(_22);
|
||||
_22 = _1;
|
||||
StorageLive(_23);
|
||||
_23 = _2;
|
||||
- _21 = unchecked_shr::<i32>(move _22, move _23) -> [return: bb7, unwind unreachable];
|
||||
+ _21 = ShrUnchecked(move _22, move _23);
|
||||
_23 = _1;
|
||||
StorageLive(_24);
|
||||
_24 = _2;
|
||||
- _22 = unchecked_shr::<i32, i32>(move _23, move _24) -> [return: bb7, unwind unreachable];
|
||||
+ _22 = ShrUnchecked(move _23, move _24);
|
||||
+ goto -> bb7;
|
||||
}
|
||||
|
||||
bb7: {
|
||||
StorageDead(_24);
|
||||
StorageDead(_23);
|
||||
StorageDead(_22);
|
||||
StorageLive(_25);
|
||||
StorageLive(_26);
|
||||
_26 = _1;
|
||||
StorageLive(_27);
|
||||
_27 = _3;
|
||||
- _25 = unchecked_shl::<i32, u32>(move _26, move _27) -> [return: bb8, unwind unreachable];
|
||||
+ _25 = ShlUnchecked(move _26, move _27);
|
||||
+ goto -> bb8;
|
||||
}
|
||||
|
||||
bb8: {
|
||||
StorageDead(_27);
|
||||
StorageDead(_26);
|
||||
StorageLive(_28);
|
||||
StorageLive(_29);
|
||||
_29 = _1;
|
||||
StorageLive(_30);
|
||||
_30 = _3;
|
||||
- _28 = unchecked_shr::<i32, u32>(move _29, move _30) -> [return: bb9, unwind unreachable];
|
||||
+ _28 = ShrUnchecked(move _29, move _30);
|
||||
+ goto -> bb9;
|
||||
}
|
||||
|
||||
bb9: {
|
||||
StorageDead(_30);
|
||||
StorageDead(_29);
|
||||
_0 = const ();
|
||||
StorageDead(_21);
|
||||
StorageDead(_18);
|
||||
StorageDead(_15);
|
||||
StorageDead(_12);
|
||||
StorageDead(_9);
|
||||
StorageDead(_6);
|
||||
StorageDead(_3);
|
||||
StorageDead(_28);
|
||||
StorageDead(_25);
|
||||
StorageDead(_22);
|
||||
StorageDead(_19);
|
||||
StorageDead(_16);
|
||||
StorageDead(_13);
|
||||
StorageDead(_10);
|
||||
StorageDead(_7);
|
||||
StorageDead(_4);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,23 @@
|
||||
// We used to ICE here while trying to synthesize auto trait impls.
|
||||
// issue: 107715
|
||||
//@ check-pass
|
||||
|
||||
pub const N: usize = 1;
|
||||
|
||||
pub struct MapType<K: Supertrait<V>, V> {
|
||||
_array: K::Array,
|
||||
}
|
||||
|
||||
pub trait Subtrait: Supertrait<[u8; N]> {}
|
||||
|
||||
pub trait Supertrait<V> {
|
||||
type Array: AnotherTrait<V>;
|
||||
}
|
||||
|
||||
pub trait AnotherTrait<V> {
|
||||
const LENGTH: usize;
|
||||
}
|
||||
|
||||
pub struct Container<S: Subtrait> {
|
||||
_x: MapType<S, [u8; N]>,
|
||||
}
|
@ -0,0 +1,17 @@
|
||||
// We used to ICE here while trying to synthesize auto trait impls.
|
||||
// issue: 112242
|
||||
//@ check-pass
|
||||
//@ compile-flags: -Znormalize-docs
|
||||
|
||||
pub trait MyTrait<'a> {
|
||||
type MyItem;
|
||||
}
|
||||
pub struct Inner<Q>(Q);
|
||||
pub struct Outer<Q>(Inner<Q>);
|
||||
|
||||
impl<'a, Q> std::marker::Unpin for Inner<Q>
|
||||
where
|
||||
Q: MyTrait<'a>,
|
||||
<Q as MyTrait<'a>>::MyItem: Copy,
|
||||
{
|
||||
}
|
@ -0,0 +1,11 @@
|
||||
// We used to ICE here while trying to synthesize auto trait impls.
|
||||
// issue: 123370
|
||||
//@ check-pass
|
||||
|
||||
pub struct Inner<'a, Q>(&'a (), Q);
|
||||
|
||||
pub struct Outer<'a, Q>(Inner<'a, Q>);
|
||||
|
||||
impl<'a, Q: Trait<'a>> std::marker::Unpin for Inner<'static, Q> {}
|
||||
|
||||
pub trait Trait<'a> {}
|
@ -0,0 +1,18 @@
|
||||
// We used to ICE here while trying to synthesize auto trait impls.
|
||||
// issue: 114657
|
||||
|
||||
pub trait Foo {
|
||||
type FooType;
|
||||
}
|
||||
|
||||
pub trait Bar<const A: usize>: Foo<FooType = <Self as Bar<A>>::BarType> {
|
||||
type BarType;
|
||||
}
|
||||
|
||||
pub(crate) const B: usize = 5;
|
||||
|
||||
pub trait Tec: Bar<B> {}
|
||||
|
||||
pub struct Structure<C: Tec> { //~ ERROR the trait bound `C: Bar<5>` is not satisfied
|
||||
_field: C::BarType, //~ ERROR the trait bound `C: Bar<5>` is not satisfied
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
error[E0277]: the trait bound `C: Bar<5>` is not satisfied
|
||||
--> $DIR/projections-in-super-trait-bound-unsatisfied.rs:16:1
|
||||
|
|
||||
LL | pub struct Structure<C: Tec> {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `Bar<5>` is not implemented for `C`
|
||||
|
|
||||
help: consider further restricting this bound
|
||||
|
|
||||
LL | pub struct Structure<C: Tec + Bar<5>> {
|
||||
| ++++++++
|
||||
|
||||
error[E0277]: the trait bound `C: Bar<5>` is not satisfied
|
||||
--> $DIR/projections-in-super-trait-bound-unsatisfied.rs:17:13
|
||||
|
|
||||
LL | _field: C::BarType,
|
||||
| ^^^^^^^^^^ the trait `Bar<5>` is not implemented for `C`
|
||||
|
|
||||
help: consider further restricting this bound
|
||||
|
|
||||
LL | pub struct Structure<C: Tec + Bar<5>> {
|
||||
| ++++++++
|
||||
|
||||
error: aborting due to 2 previous errors
|
||||
|
||||
For more information about this error, try `rustc --explain E0277`.
|
@ -0,0 +1,10 @@
|
||||
// We used to ICE here while trying to synthesize auto trait impls.
|
||||
// issue: 112828
|
||||
|
||||
struct Outer(Inner);
|
||||
struct Inner;
|
||||
|
||||
unsafe impl<Q: Trait> Send for Inner {}
|
||||
//~^ ERROR the type parameter `Q` is not constrained by the impl trait, self type, or predicates
|
||||
|
||||
trait Trait {}
|
@ -0,0 +1,9 @@
|
||||
error[E0207]: the type parameter `Q` is not constrained by the impl trait, self type, or predicates
|
||||
--> $DIR/unconstrained-param-in-impl-ambiguity.rs:7:13
|
||||
|
|
||||
LL | unsafe impl<Q: Trait> Send for Inner {}
|
||||
| ^ unconstrained type parameter
|
||||
|
||||
error: aborting due to 1 previous error
|
||||
|
||||
For more information about this error, try `rustc --explain E0207`.
|
@ -27,7 +27,7 @@ const SHL_U128: u128 = unsafe { intrinsics::unchecked_shl(5_u128, 128) };
|
||||
|
||||
const SHL_I8: i8 = unsafe { intrinsics::unchecked_shl(5_i8, 8) };
|
||||
//~^ ERROR evaluation of constant value failed
|
||||
const SHL_I16: i16 = unsafe { intrinsics::unchecked_shl(5_16, 16) };
|
||||
const SHL_I16: i16 = unsafe { intrinsics::unchecked_shl(5_i16, 16) };
|
||||
//~^ ERROR evaluation of constant value failed
|
||||
const SHL_I32: i32 = unsafe { intrinsics::unchecked_shl(5_i32, 32) };
|
||||
//~^ ERROR evaluation of constant value failed
|
||||
@ -40,7 +40,7 @@ const SHL_I128: i128 = unsafe { intrinsics::unchecked_shl(5_i128, 128) };
|
||||
|
||||
const SHL_I8_NEG: i8 = unsafe { intrinsics::unchecked_shl(5_i8, -1) };
|
||||
//~^ ERROR evaluation of constant value failed
|
||||
const SHL_I16_NEG: i16 = unsafe { intrinsics::unchecked_shl(5_16, -1) };
|
||||
const SHL_I16_NEG: i16 = unsafe { intrinsics::unchecked_shl(5_i16, -1) };
|
||||
//~^ ERROR evaluation of constant value failed
|
||||
const SHL_I32_NEG: i32 = unsafe { intrinsics::unchecked_shl(5_i32, -1) };
|
||||
//~^ ERROR evaluation of constant value failed
|
||||
@ -54,7 +54,7 @@ const SHL_I128_NEG: i128 = unsafe { intrinsics::unchecked_shl(5_i128, -1) };
|
||||
|
||||
const SHL_I8_NEG_RANDOM: i8 = unsafe { intrinsics::unchecked_shl(5_i8, -6) };
|
||||
//~^ ERROR evaluation of constant value failed
|
||||
const SHL_I16_NEG_RANDOM: i16 = unsafe { intrinsics::unchecked_shl(5_16, -13) };
|
||||
const SHL_I16_NEG_RANDOM: i16 = unsafe { intrinsics::unchecked_shl(5_i16, -13) };
|
||||
//~^ ERROR evaluation of constant value failed
|
||||
const SHL_I32_NEG_RANDOM: i32 = unsafe { intrinsics::unchecked_shl(5_i32, -25) };
|
||||
//~^ ERROR evaluation of constant value failed
|
||||
@ -82,7 +82,7 @@ const SHR_U128: u128 = unsafe { intrinsics::unchecked_shr(5_u128, 128) };
|
||||
|
||||
const SHR_I8: i8 = unsafe { intrinsics::unchecked_shr(5_i8, 8) };
|
||||
//~^ ERROR evaluation of constant value failed
|
||||
const SHR_I16: i16 = unsafe { intrinsics::unchecked_shr(5_16, 16) };
|
||||
const SHR_I16: i16 = unsafe { intrinsics::unchecked_shr(5_i16, 16) };
|
||||
//~^ ERROR evaluation of constant value failed
|
||||
const SHR_I32: i32 = unsafe { intrinsics::unchecked_shr(5_i32, 32) };
|
||||
//~^ ERROR evaluation of constant value failed
|
||||
@ -95,7 +95,7 @@ const SHR_I128: i128 = unsafe { intrinsics::unchecked_shr(5_i128, 128) };
|
||||
|
||||
const SHR_I8_NEG: i8 = unsafe { intrinsics::unchecked_shr(5_i8, -1) };
|
||||
//~^ ERROR evaluation of constant value failed
|
||||
const SHR_I16_NEG: i16 = unsafe { intrinsics::unchecked_shr(5_16, -1) };
|
||||
const SHR_I16_NEG: i16 = unsafe { intrinsics::unchecked_shr(5_i16, -1) };
|
||||
//~^ ERROR evaluation of constant value failed
|
||||
const SHR_I32_NEG: i32 = unsafe { intrinsics::unchecked_shr(5_i32, -1) };
|
||||
//~^ ERROR evaluation of constant value failed
|
||||
@ -109,7 +109,7 @@ const SHR_I128_NEG: i128 = unsafe { intrinsics::unchecked_shr(5_i128, -1) };
|
||||
|
||||
const SHR_I8_NEG_RANDOM: i8 = unsafe { intrinsics::unchecked_shr(5_i8, -6) };
|
||||
//~^ ERROR evaluation of constant value failed
|
||||
const SHR_I16_NEG_RANDOM: i16 = unsafe { intrinsics::unchecked_shr(5_16, -13) };
|
||||
const SHR_I16_NEG_RANDOM: i16 = unsafe { intrinsics::unchecked_shr(5_i16, -13) };
|
||||
//~^ ERROR evaluation of constant value failed
|
||||
const SHR_I32_NEG_RANDOM: i32 = unsafe { intrinsics::unchecked_shr(5_i32, -25) };
|
||||
//~^ ERROR evaluation of constant value failed
|
||||
|
@ -37,8 +37,8 @@ LL | const SHL_I8: i8 = unsafe { intrinsics::unchecked_shl(5_i8, 8) };
|
||||
error[E0080]: evaluation of constant value failed
|
||||
--> $DIR/const-int-unchecked.rs:30:31
|
||||
|
|
||||
LL | const SHL_I16: i16 = unsafe { intrinsics::unchecked_shl(5_16, 16) };
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by 16 in `unchecked_shl`
|
||||
LL | const SHL_I16: i16 = unsafe { intrinsics::unchecked_shl(5_i16, 16) };
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by 16 in `unchecked_shl`
|
||||
|
||||
error[E0080]: evaluation of constant value failed
|
||||
--> $DIR/const-int-unchecked.rs:32:31
|
||||
@ -67,8 +67,8 @@ LL | const SHL_I8_NEG: i8 = unsafe { intrinsics::unchecked_shl(5_i8, -1) };
|
||||
error[E0080]: evaluation of constant value failed
|
||||
--> $DIR/const-int-unchecked.rs:43:35
|
||||
|
|
||||
LL | const SHL_I16_NEG: i16 = unsafe { intrinsics::unchecked_shl(5_16, -1) };
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -1 in `unchecked_shl`
|
||||
LL | const SHL_I16_NEG: i16 = unsafe { intrinsics::unchecked_shl(5_i16, -1) };
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -1 in `unchecked_shl`
|
||||
|
||||
error[E0080]: evaluation of constant value failed
|
||||
--> $DIR/const-int-unchecked.rs:45:35
|
||||
@ -97,8 +97,8 @@ LL | const SHL_I8_NEG_RANDOM: i8 = unsafe { intrinsics::unchecked_shl(5_i8, -6)
|
||||
error[E0080]: evaluation of constant value failed
|
||||
--> $DIR/const-int-unchecked.rs:57:42
|
||||
|
|
||||
LL | const SHL_I16_NEG_RANDOM: i16 = unsafe { intrinsics::unchecked_shl(5_16, -13) };
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -13 in `unchecked_shl`
|
||||
LL | const SHL_I16_NEG_RANDOM: i16 = unsafe { intrinsics::unchecked_shl(5_i16, -13) };
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -13 in `unchecked_shl`
|
||||
|
||||
error[E0080]: evaluation of constant value failed
|
||||
--> $DIR/const-int-unchecked.rs:59:42
|
||||
@ -157,8 +157,8 @@ LL | const SHR_I8: i8 = unsafe { intrinsics::unchecked_shr(5_i8, 8) };
|
||||
error[E0080]: evaluation of constant value failed
|
||||
--> $DIR/const-int-unchecked.rs:85:31
|
||||
|
|
||||
LL | const SHR_I16: i16 = unsafe { intrinsics::unchecked_shr(5_16, 16) };
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by 16 in `unchecked_shr`
|
||||
LL | const SHR_I16: i16 = unsafe { intrinsics::unchecked_shr(5_i16, 16) };
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by 16 in `unchecked_shr`
|
||||
|
||||
error[E0080]: evaluation of constant value failed
|
||||
--> $DIR/const-int-unchecked.rs:87:31
|
||||
@ -187,8 +187,8 @@ LL | const SHR_I8_NEG: i8 = unsafe { intrinsics::unchecked_shr(5_i8, -1) };
|
||||
error[E0080]: evaluation of constant value failed
|
||||
--> $DIR/const-int-unchecked.rs:98:35
|
||||
|
|
||||
LL | const SHR_I16_NEG: i16 = unsafe { intrinsics::unchecked_shr(5_16, -1) };
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -1 in `unchecked_shr`
|
||||
LL | const SHR_I16_NEG: i16 = unsafe { intrinsics::unchecked_shr(5_i16, -1) };
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -1 in `unchecked_shr`
|
||||
|
||||
error[E0080]: evaluation of constant value failed
|
||||
--> $DIR/const-int-unchecked.rs:100:35
|
||||
@ -217,8 +217,8 @@ LL | const SHR_I8_NEG_RANDOM: i8 = unsafe { intrinsics::unchecked_shr(5_i8, -6)
|
||||
error[E0080]: evaluation of constant value failed
|
||||
--> $DIR/const-int-unchecked.rs:112:42
|
||||
|
|
||||
LL | const SHR_I16_NEG_RANDOM: i16 = unsafe { intrinsics::unchecked_shr(5_16, -13) };
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -13 in `unchecked_shr`
|
||||
LL | const SHR_I16_NEG_RANDOM: i16 = unsafe { intrinsics::unchecked_shr(5_i16, -13) };
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -13 in `unchecked_shr`
|
||||
|
||||
error[E0080]: evaluation of constant value failed
|
||||
--> $DIR/const-int-unchecked.rs:114:42
|
||||
|
@ -0,0 +1,30 @@
|
||||
//@ check-pass
|
||||
// Regression test due to #123279
|
||||
|
||||
pub trait Job: AsJob {
|
||||
fn run_once(&self);
|
||||
}
|
||||
|
||||
impl<F: Fn()> Job for F {
|
||||
fn run_once(&self) {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
pub trait AsJob {}
|
||||
|
||||
// Ensure that `T: Sized + Job` by reordering the explicit `Sized` to where
|
||||
// the implicit sized pred would go.
|
||||
impl<T: Job + Sized> AsJob for T {}
|
||||
|
||||
pub struct LoopingJobService {
|
||||
job: Box<dyn Job>,
|
||||
}
|
||||
|
||||
impl Job for LoopingJobService {
|
||||
fn run_once(&self) {
|
||||
self.job.run_once()
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {}
|
@ -3,6 +3,7 @@
|
||||
//@ revisions: cfi kcfi
|
||||
// FIXME(#122848) Remove only-linux once OSX CFI binaries work
|
||||
//@ only-linux
|
||||
//@ edition: 2024
|
||||
//@ [cfi] needs-sanitizer-cfi
|
||||
//@ [kcfi] needs-sanitizer-kcfi
|
||||
//@ compile-flags: -C target-feature=-crt-static
|
||||
@ -10,16 +11,22 @@
|
||||
//@ [cfi] compile-flags: -Z sanitizer=cfi
|
||||
//@ [kcfi] compile-flags: -Z sanitizer=kcfi
|
||||
//@ [kcfi] compile-flags: -C panic=abort -Z panic-abort-tests -C prefer-dynamic=off
|
||||
//@ compile-flags: --test
|
||||
//@ compile-flags: --test -Z unstable-options
|
||||
//@ run-pass
|
||||
|
||||
#![feature(coroutines)]
|
||||
#![feature(coroutine_trait)]
|
||||
#![feature(noop_waker)]
|
||||
#![feature(gen_blocks)]
|
||||
#![feature(async_iterator)]
|
||||
|
||||
use std::ops::{Coroutine, CoroutineState};
|
||||
use std::pin::{pin, Pin};
|
||||
use std::task::{Context, Poll, Waker};
|
||||
use std::async_iter::AsyncIterator;
|
||||
|
||||
fn main() {
|
||||
#[test]
|
||||
fn general_coroutine() {
|
||||
let mut coro = |x: i32| {
|
||||
yield x;
|
||||
"done"
|
||||
@ -28,3 +35,33 @@ fn main() {
|
||||
assert_eq!(abstract_coro.as_mut().resume(2), CoroutineState::Yielded(2));
|
||||
assert_eq!(abstract_coro.as_mut().resume(0), CoroutineState::Complete("done"));
|
||||
}
|
||||
|
||||
async fn async_fn() {}
|
||||
|
||||
#[test]
|
||||
fn async_coroutine() {
|
||||
let f: fn() -> Pin<Box<dyn Future<Output = ()>>> = || Box::pin(async_fn());
|
||||
let _ = async { f().await; };
|
||||
assert_eq!(f().as_mut().poll(&mut Context::from_waker(Waker::noop())), Poll::Ready(()));
|
||||
}
|
||||
|
||||
async gen fn async_gen_fn() -> u8 {
|
||||
yield 5;
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn async_gen_coroutine() {
|
||||
let f: fn() -> Pin<Box<dyn AsyncIterator<Item = u8>>> = || Box::pin(async_gen_fn());
|
||||
assert_eq!(f().as_mut().poll_next(&mut Context::from_waker(Waker::noop())),
|
||||
Poll::Ready(Some(5)));
|
||||
}
|
||||
|
||||
gen fn gen_fn() -> u8 {
|
||||
yield 6;
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_coroutine() {
|
||||
let f: fn() -> Box<dyn Iterator<Item = u8>> = || Box::new(gen_fn());
|
||||
assert_eq!(f().next(), Some(6));
|
||||
}
|
||||
|
14
tests/ui/statics/nested_thread_local.rs
Normal file
14
tests/ui/statics/nested_thread_local.rs
Normal file
@ -0,0 +1,14 @@
|
||||
// Check that we forbid nested statics in `thread_local` statics.
|
||||
|
||||
#![feature(const_refs_to_cell)]
|
||||
#![feature(thread_local)]
|
||||
|
||||
#[thread_local]
|
||||
static mut FOO: &u32 = {
|
||||
//~^ ERROR: does not support implicit nested statics
|
||||
// Prevent promotion (that would trigger on `&42` as an expression)
|
||||
let x = 42;
|
||||
&{ x }
|
||||
};
|
||||
|
||||
fn main() {}
|
8
tests/ui/statics/nested_thread_local.stderr
Normal file
8
tests/ui/statics/nested_thread_local.stderr
Normal file
@ -0,0 +1,8 @@
|
||||
error: #[thread_local] does not support implicit nested statics, please create explicit static items and refer to them instead
|
||||
--> $DIR/nested_thread_local.rs:7:1
|
||||
|
|
||||
LL | static mut FOO: &u32 = {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
error: aborting due to 1 previous error
|
||||
|
Loading…
x
Reference in New Issue
Block a user