Auto merge of #118082 - compiler-errors:rollup-ejsc8yd, r=matthiaskrgr

Rollup of 8 pull requests

Successful merges:

 - #117828 (Avoid iterating over hashmaps in astconv)
 - #117832 (interpret: simplify handling of shifts by no longer trying to handle signed and unsigned shift amounts in the same branch)
 - #117891 (Recover `dyn` and `impl` after `for<...>`)
 - #117957 (if available use a Child's pidfd for kill/wait)
 - #117988 (Handle attempts to have multiple `cfg`d tail expressions)
 - #117994 (Ignore but do not assume region obligations from unifying headers in negative coherence)
 - #118000 (Make regionck care about placeholders in outlives components)
 - #118068 (subtree update cg_gcc 2023/11/17)

r? `@ghost`
`@rustbot` modify labels: rollup
This commit is contained in:
bors 2023-11-20 11:24:28 +00:00
commit 46ecc10c69
54 changed files with 795 additions and 147 deletions

View File

@ -99,8 +99,10 @@ jobs:
- name: Build - name: Build
run: | run: |
./y.sh prepare --only-libcore ./y.sh prepare --only-libcore
./y.sh build # TODO: remove --features master when it is back to the default.
cargo test ./y.sh build --features master
# TODO: remove --features master when it is back to the default.
cargo test --features master
./clean_all.sh ./clean_all.sh
- name: Prepare dependencies - name: Prepare dependencies
@ -121,7 +123,8 @@ jobs:
- name: Run tests - name: Run tests
run: | run: |
./test.sh --release --clean --build-sysroot ${{ matrix.commands }} # TODO: remove --features master when it is back to the default.
./test.sh --features master --release --clean --build-sysroot ${{ matrix.commands }}
duplicates: duplicates:
runs-on: ubuntu-latest runs-on: ubuntu-latest

View File

@ -21,11 +21,14 @@ jobs:
libgccjit_version: libgccjit_version:
- gcc: "libgccjit.so" - gcc: "libgccjit.so"
artifacts_branch: "master" artifacts_branch: "master"
# TODO: switch back to --no-default-features in the case of libgccjit 12 when the default is to enable
# master again.
extra: "--features master"
- gcc: "libgccjit_without_int128.so" - gcc: "libgccjit_without_int128.so"
artifacts_branch: "master-without-128bit-integers" artifacts_branch: "master-without-128bit-integers"
extra: "--features master"
- gcc: "libgccjit12.so" - gcc: "libgccjit12.so"
artifacts_branch: "gcc12" artifacts_branch: "gcc12"
extra: "--no-default-features"
# FIXME(antoyo): we need to set GCC_EXEC_PREFIX so that the linker can find the linker plugin. # FIXME(antoyo): we need to set GCC_EXEC_PREFIX so that the linker can find the linker plugin.
# Not sure why it's not found otherwise. # Not sure why it's not found otherwise.
env_extra: "TEST_FLAGS='-Cpanic=abort -Zpanic-abort-tests' GCC_EXEC_PREFIX=/usr/lib/gcc/" env_extra: "TEST_FLAGS='-Cpanic=abort -Zpanic-abort-tests' GCC_EXEC_PREFIX=/usr/lib/gcc/"

View File

@ -114,8 +114,10 @@ jobs:
- name: Build - name: Build
run: | run: |
./y.sh prepare --only-libcore --cross ./y.sh prepare --only-libcore --cross
./y.sh build --target-triple m68k-unknown-linux-gnu # TODO: remove --features master when it is back to the default.
CG_GCC_TEST_TARGET=m68k-unknown-linux-gnu cargo test ./y.sh build --target-triple m68k-unknown-linux-gnu --features master
# TODO: remove --features master when it is back to the default.
CG_GCC_TEST_TARGET=m68k-unknown-linux-gnu cargo test --features master
./clean_all.sh ./clean_all.sh
- name: Prepare dependencies - name: Prepare dependencies
@ -136,4 +138,5 @@ jobs:
- name: Run tests - name: Run tests
run: | run: |
./test.sh --release --clean --build-sysroot ${{ matrix.commands }} # TODO: remove --features master when it is back to the default.
./test.sh --release --features master --clean --build-sysroot ${{ matrix.commands }}

View File

@ -78,8 +78,10 @@ jobs:
- name: Build - name: Build
run: | run: |
./y.sh prepare --only-libcore ./y.sh prepare --only-libcore
EMBED_LTO_BITCODE=1 ./y.sh build --release --release-sysroot # TODO: remove --features master when it is back to the default.
cargo test EMBED_LTO_BITCODE=1 ./y.sh build --release --release-sysroot --features master
# TODO: remove --features master when it is back to the default.
cargo test --features master
./clean_all.sh ./clean_all.sh
- name: Prepare dependencies - name: Prepare dependencies
@ -102,4 +104,5 @@ jobs:
- name: Run tests - name: Run tests
run: | run: |
EMBED_LTO_BITCODE=1 ./test.sh --release --clean --release-sysroot --build-sysroot ${{ matrix.commands }} # TODO: remove --features master when it is back to the default.
EMBED_LTO_BITCODE=1 ./test.sh --release --clean --release-sysroot --build-sysroot ${{ matrix.commands }} --features master

View File

@ -92,8 +92,10 @@ jobs:
- name: Build - name: Build
run: | run: |
./y.sh prepare --only-libcore ./y.sh prepare --only-libcore
./y.sh build --release --release-sysroot # TODO: remove `--features master` when it is back to the default.
cargo test ./y.sh build --release --release-sysroot --features master
# TODO: remove --features master when it is back to the default.
cargo test --features master
- name: Clean - name: Clean
if: ${{ !matrix.cargo_runner }} if: ${{ !matrix.cargo_runner }}
@ -111,12 +113,14 @@ jobs:
uses: actions-rs/cargo@v1.0.3 uses: actions-rs/cargo@v1.0.3
with: with:
command: build command: build
args: --release # TODO: remove `--features master` when it is back to the default.
args: --release --features master
- name: Run tests - name: Run tests
if: ${{ !matrix.cargo_runner }} if: ${{ !matrix.cargo_runner }}
run: | run: |
./test.sh --release --clean --release-sysroot --build-sysroot --mini-tests --std-tests --test-libcore # TODO: remove `--features master` when it is back to the default.
./test.sh --release --clean --release-sysroot --build-sysroot --mini-tests --std-tests --test-libcore --features master
- name: Run stdarch tests - name: Run stdarch tests
if: ${{ !matrix.cargo_runner }} if: ${{ !matrix.cargo_runner }}

View File

@ -74,7 +74,7 @@ dependencies = [
[[package]] [[package]]
name = "gccjit" name = "gccjit"
version = "1.0.0" version = "1.0.0"
source = "git+https://github.com/antoyo/gccjit.rs#c52a218f5529321285b4489e5562a00e5428e033" source = "git+https://github.com/antoyo/gccjit.rs#6e290f25b1d1edab5ae9ace486fd2dc8c08d6421"
dependencies = [ dependencies = [
"gccjit_sys", "gccjit_sys",
] ]
@ -82,7 +82,7 @@ dependencies = [
[[package]] [[package]]
name = "gccjit_sys" name = "gccjit_sys"
version = "0.0.1" version = "0.0.1"
source = "git+https://github.com/antoyo/gccjit.rs#c52a218f5529321285b4489e5562a00e5428e033" source = "git+https://github.com/antoyo/gccjit.rs#6e290f25b1d1edab5ae9ace486fd2dc8c08d6421"
dependencies = [ dependencies = [
"libc", "libc",
] ]

View File

@ -28,3 +28,7 @@ fi
# Copy files to sysroot # Copy files to sysroot
mkdir -p sysroot/lib/rustlib/$TARGET_TRIPLE/lib/ mkdir -p sysroot/lib/rustlib/$TARGET_TRIPLE/lib/
cp -r target/$TARGET_TRIPLE/$sysroot_channel/deps/* sysroot/lib/rustlib/$TARGET_TRIPLE/lib/ cp -r target/$TARGET_TRIPLE/$sysroot_channel/deps/* sysroot/lib/rustlib/$TARGET_TRIPLE/lib/
# Copy the source files to the sysroot (Rust for Linux needs this).
source_dir=sysroot/lib/rustlib/src/rust
mkdir -p $source_dir
cp -r sysroot_src/library/ $source_dir

View File

@ -194,6 +194,12 @@ fn build_sysroot(
copier, copier,
)?; )?;
// Copy the source files to the sysroot (Rust for Linux needs this).
let sysroot_src_path = "sysroot/lib/rustlib/src/rust";
fs::create_dir_all(&sysroot_src_path)
.map_err(|error| format!("Failed to create directory `{}`: {:?}", sysroot_src_path, error))?;
run_command(&[&"cp", &"-r", &"sysroot_src/library/", &sysroot_src_path], None)?;
Ok(()) Ok(())
} }

View File

@ -38,3 +38,5 @@ tests/ui/target-feature/missing-plusminus.rs
tests/ui/sse2.rs tests/ui/sse2.rs
tests/ui/codegen/issue-79865-llvm-miscompile.rs tests/ui/codegen/issue-79865-llvm-miscompile.rs
tests/ui/intrinsics/intrinsics-integer.rs tests/ui/intrinsics/intrinsics-integer.rs
tests/ui/std-backtrace.rs
tests/ui/mir/alignment/packed.rs

View File

@ -1,6 +1,6 @@
From 7bcd24ec6d4a96121874cb1ae5a23ea274aeff34 Mon Sep 17 00:00:00 2001 From a5663265f797a43c502915c356fe7899c16cee92 Mon Sep 17 00:00:00 2001
From: None <none@example.com> From: None <none@example.com>
Date: Thu, 19 Oct 2023 13:12:51 -0400 Date: Sat, 18 Nov 2023 10:50:36 -0500
Subject: [PATCH] [core] Disable portable-simd test Subject: [PATCH] [core] Disable portable-simd test
--- ---
@ -8,18 +8,18 @@ Subject: [PATCH] [core] Disable portable-simd test
1 file changed, 2 deletions(-) 1 file changed, 2 deletions(-)
diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs
index 5814ed4..194ad4c 100644 index d0a119c..76fdece 100644
--- a/library/core/tests/lib.rs --- a/library/core/tests/lib.rs
+++ b/library/core/tests/lib.rs +++ b/library/core/tests/lib.rs
@@ -90,7 +90,6 @@ @@ -89,7 +89,6 @@
#![feature(never_type)]
#![feature(unwrap_infallible)] #![feature(unwrap_infallible)]
#![feature(pointer_byte_offsets)]
#![feature(pointer_is_aligned)] #![feature(pointer_is_aligned)]
-#![feature(portable_simd)] -#![feature(portable_simd)]
#![feature(ptr_metadata)] #![feature(ptr_metadata)]
#![feature(lazy_cell)] #![feature(lazy_cell)]
#![feature(unsized_tuple_coercion)] #![feature(unsized_tuple_coercion)]
@@ -157,7 +156,6 @@ mod pin; @@ -155,7 +154,6 @@ mod pin;
mod pin_macro; mod pin_macro;
mod ptr; mod ptr;
mod result; mod result;
@ -28,5 +28,5 @@ index 5814ed4..194ad4c 100644
mod str; mod str;
mod str_lossy; mod str_lossy;
-- --
2.42.0 2.42.1

View File

@ -1,3 +1,3 @@
[toolchain] [toolchain]
channel = "nightly-2023-10-21" channel = "nightly-2023-11-17"
components = ["rust-src", "rustc-dev", "llvm-tools-preview"] components = ["rust-src", "rustc-dev", "llvm-tools-preview"]

View File

@ -3,7 +3,6 @@
use std::time::Instant; use std::time::Instant;
use gccjit::{ use gccjit::{
Context,
FunctionType, FunctionType,
GlobalKind, GlobalKind,
}; };
@ -18,8 +17,9 @@
use rustc_codegen_ssa::traits::DebugInfoMethods; use rustc_codegen_ssa::traits::DebugInfoMethods;
use rustc_session::config::DebugInfo; use rustc_session::config::DebugInfo;
use rustc_span::Symbol; use rustc_span::Symbol;
use rustc_target::spec::PanicStrategy;
use crate::{LockedTargetInfo, gcc_util}; use crate::{LockedTargetInfo, gcc_util, new_context};
use crate::GccContext; use crate::GccContext;
use crate::builder::Builder; use crate::builder::Builder;
use crate::context::CodegenCx; use crate::context::CodegenCx;
@ -88,20 +88,18 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, target_info: Lock
fn module_codegen(tcx: TyCtxt<'_>, (cgu_name, target_info): (Symbol, LockedTargetInfo)) -> ModuleCodegen<GccContext> { fn module_codegen(tcx: TyCtxt<'_>, (cgu_name, target_info): (Symbol, LockedTargetInfo)) -> ModuleCodegen<GccContext> {
let cgu = tcx.codegen_unit(cgu_name); let cgu = tcx.codegen_unit(cgu_name);
// Instantiate monomorphizations without filling out definitions yet... // Instantiate monomorphizations without filling out definitions yet...
let context = Context::default(); let context = new_context(tcx);
if tcx.sess.panic_strategy() == PanicStrategy::Unwind {
context.add_command_line_option("-fexceptions"); context.add_command_line_option("-fexceptions");
context.add_driver_option("-fexceptions"); context.add_driver_option("-fexceptions");
}
let disabled_features: HashSet<_> = tcx.sess.opts.cg.target_feature.split(',') let disabled_features: HashSet<_> = tcx.sess.opts.cg.target_feature.split(',')
.filter(|feature| feature.starts_with('-')) .filter(|feature| feature.starts_with('-'))
.map(|string| &string[1..]) .map(|string| &string[1..])
.collect(); .collect();
if tcx.sess.target.arch == "x86" || tcx.sess.target.arch == "x86_64" {
context.add_command_line_option("-masm=intel");
}
if !disabled_features.contains("avx") && tcx.sess.target.arch == "x86_64" { if !disabled_features.contains("avx") && tcx.sess.target.arch == "x86_64" {
// NOTE: we always enable AVX because the equivalent of llvm.x86.sse2.cmp.pd in GCC for // NOTE: we always enable AVX because the equivalent of llvm.x86.sse2.cmp.pd in GCC for
// SSE2 is multiple builtins, so we use the AVX __builtin_ia32_cmppd instead. // SSE2 is multiple builtins, so we use the AVX __builtin_ia32_cmppd instead.

View File

@ -76,6 +76,9 @@ pub fn gcc_lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
a >> b a >> b
} }
} }
else if a_type.is_vector() && a_type.is_vector() {
a >> b
}
else if a_native && !b_native { else if a_native && !b_native {
self.gcc_lshr(a, self.gcc_int_cast(b, a_type)) self.gcc_lshr(a, self.gcc_int_cast(b, a_type))
} }
@ -144,7 +147,7 @@ pub fn gcc_lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
fn additive_operation(&self, operation: BinaryOp, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> { fn additive_operation(&self, operation: BinaryOp, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
let a_type = a.get_type(); let a_type = a.get_type();
let b_type = b.get_type(); let b_type = b.get_type();
if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) { if (self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type)) || (a_type.is_vector() && b_type.is_vector()) {
if a_type != b_type { if a_type != b_type {
if a_type.is_vector() { if a_type.is_vector() {
// Vector types need to be bitcast. // Vector types need to be bitcast.
@ -158,6 +161,8 @@ fn additive_operation(&self, operation: BinaryOp, a: RValue<'gcc>, mut b: RValue
self.context.new_binary_op(None, operation, a_type, a, b) self.context.new_binary_op(None, operation, a_type, a, b)
} }
else { else {
debug_assert!(a_type.dyncast_array().is_some());
debug_assert!(b_type.dyncast_array().is_some());
let signed = a_type.is_compatible_with(self.i128_type); let signed = a_type.is_compatible_with(self.i128_type);
let func_name = let func_name =
match (operation, signed) { match (operation, signed) {
@ -189,10 +194,12 @@ pub fn gcc_sub(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
fn multiplicative_operation(&self, operation: BinaryOp, operation_name: &str, signed: bool, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { fn multiplicative_operation(&self, operation: BinaryOp, operation_name: &str, signed: bool, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
let a_type = a.get_type(); let a_type = a.get_type();
let b_type = b.get_type(); let b_type = b.get_type();
if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) { if (self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type)) || (a_type.is_vector() && b_type.is_vector()) {
self.context.new_binary_op(None, operation, a_type, a, b) self.context.new_binary_op(None, operation, a_type, a, b)
} }
else { else {
debug_assert!(a_type.dyncast_array().is_some());
debug_assert!(b_type.dyncast_array().is_some());
let sign = let sign =
if signed { if signed {
"" ""
@ -337,6 +344,8 @@ pub fn gcc_checked_binop(&self, oop: OverflowOp, typ: Ty<'_>, lhs: <Self as Back
pub fn operation_with_overflow(&self, func_name: &str, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> (RValue<'gcc>, RValue<'gcc>) { pub fn operation_with_overflow(&self, func_name: &str, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> (RValue<'gcc>, RValue<'gcc>) {
let a_type = lhs.get_type(); let a_type = lhs.get_type();
let b_type = rhs.get_type(); let b_type = rhs.get_type();
debug_assert!(a_type.dyncast_array().is_some());
debug_assert!(b_type.dyncast_array().is_some());
let param_a = self.context.new_parameter(None, a_type, "a"); let param_a = self.context.new_parameter(None, a_type, "a");
let param_b = self.context.new_parameter(None, b_type, "b"); let param_b = self.context.new_parameter(None, b_type, "b");
let result_field = self.context.new_field(None, a_type, "result"); let result_field = self.context.new_field(None, a_type, "result");
@ -496,7 +505,11 @@ pub fn gcc_icmp(&mut self, op: IntPredicate, mut lhs: RValue<'gcc>, mut rhs: RVa
pub fn gcc_xor(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { pub fn gcc_xor(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
let a_type = a.get_type(); let a_type = a.get_type();
let b_type = b.get_type(); let b_type = b.get_type();
if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) { if a_type.is_vector() && b_type.is_vector() {
let b = self.bitcast_if_needed(b, a_type);
a ^ b
}
else if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) {
a ^ b a ^ b
} }
else { else {
@ -527,6 +540,9 @@ pub fn gcc_shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
a << b a << b
} }
} }
else if a_type.is_vector() && a_type.is_vector() {
a << b
}
else if a_native && !b_native { else if a_native && !b_native {
self.gcc_shl(a, self.gcc_int_cast(b, a_type)) self.gcc_shl(a, self.gcc_int_cast(b, a_type))
} }
@ -690,6 +706,7 @@ fn bitwise_operation(&self, operation: BinaryOp, a: RValue<'gcc>, mut b: RValue<
let a_native = self.is_native_int_type_or_bool(a_type); let a_native = self.is_native_int_type_or_bool(a_type);
let b_native = self.is_native_int_type_or_bool(b_type); let b_native = self.is_native_int_type_or_bool(b_type);
if a_type.is_vector() && b_type.is_vector() { if a_type.is_vector() && b_type.is_vector() {
let b = self.bitcast_if_needed(b, a_type);
self.context.new_binary_op(None, operation, a_type, a, b) self.context.new_binary_op(None, operation, a_type, a, b)
} }
else if a_native && b_native { else if a_native && b_native {
@ -748,6 +765,7 @@ fn int_to_float_cast(&self, signed: bool, value: RValue<'gcc>, dest_typ: Type<'g
return self.context.new_cast(None, value, dest_typ); return self.context.new_cast(None, value, dest_typ);
} }
debug_assert!(value_type.dyncast_array().is_some());
let name_suffix = let name_suffix =
match self.type_kind(dest_typ) { match self.type_kind(dest_typ) {
TypeKind::Float => "tisf", TypeKind::Float => "tisf",
@ -781,6 +799,7 @@ fn float_to_int_cast(&self, signed: bool, value: RValue<'gcc>, dest_typ: Type<'g
return self.context.new_cast(None, value, dest_typ); return self.context.new_cast(None, value, dest_typ);
} }
debug_assert!(value_type.dyncast_array().is_some());
let name_suffix = let name_suffix =
match self.type_kind(value_type) { match self.type_kind(value_type) {
TypeKind::Float => "sfti", TypeKind::Float => "sfti",

View File

@ -39,6 +39,8 @@
extern crate rustc_fluent_macro; extern crate rustc_fluent_macro;
extern crate rustc_fs_util; extern crate rustc_fs_util;
extern crate rustc_hir; extern crate rustc_hir;
#[cfg(feature="master")]
extern crate rustc_interface;
extern crate rustc_macros; extern crate rustc_macros;
extern crate rustc_metadata; extern crate rustc_metadata;
extern crate rustc_middle; extern crate rustc_middle;
@ -86,7 +88,7 @@
use gccjit::{Context, OptimizationLevel}; use gccjit::{Context, OptimizationLevel};
#[cfg(feature="master")] #[cfg(feature="master")]
use gccjit::TargetInfo; use gccjit::{TargetInfo, Version};
#[cfg(not(feature="master"))] #[cfg(not(feature="master"))]
use gccjit::CType; use gccjit::CType;
use errors::LTONotSupported; use errors::LTONotSupported;
@ -244,17 +246,33 @@ fn target_features(&self, sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
} }
} }
fn new_context<'gcc, 'tcx>(tcx: TyCtxt<'tcx>) -> Context<'gcc> {
let context = Context::default();
if tcx.sess.target.arch == "x86" || tcx.sess.target.arch == "x86_64" {
context.add_command_line_option("-masm=intel");
}
#[cfg(feature="master")]
{
let version = Version::get();
let version = format!("{}.{}.{}", version.major, version.minor, version.patch);
context.set_output_ident(&format!("rustc version {} with libgccjit {}",
rustc_interface::util::rustc_version_str().unwrap_or("unknown version"),
version,
));
}
// TODO(antoyo): check if this should only be added when using -Cforce-unwind-tables=n.
context.add_command_line_option("-fno-asynchronous-unwind-tables");
context
}
impl ExtraBackendMethods for GccCodegenBackend { impl ExtraBackendMethods for GccCodegenBackend {
fn codegen_allocator<'tcx>(&self, tcx: TyCtxt<'tcx>, module_name: &str, kind: AllocatorKind, alloc_error_handler_kind: AllocatorKind) -> Self::Module { fn codegen_allocator<'tcx>(&self, tcx: TyCtxt<'tcx>, module_name: &str, kind: AllocatorKind, alloc_error_handler_kind: AllocatorKind) -> Self::Module {
let mut mods = GccContext { let mut mods = GccContext {
context: Context::default(), context: new_context(tcx),
should_combine_object_files: false, should_combine_object_files: false,
temp_dir: None, temp_dir: None,
}; };
if tcx.sess.target.arch == "x86" || tcx.sess.target.arch == "x86_64" {
mods.context.add_command_line_option("-masm=intel");
}
unsafe { allocator::codegen(tcx, &mut mods, module_name, kind, alloc_error_handler_kind); } unsafe { allocator::codegen(tcx, &mut mods, module_name, kind, alloc_error_handler_kind); }
mods mods
} }

View File

@ -322,8 +322,13 @@ pub fn cast_shift_expr_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
if lhs_sz < rhs_sz { if lhs_sz < rhs_sz {
bx.trunc(rhs, lhs_llty) bx.trunc(rhs, lhs_llty)
} else if lhs_sz > rhs_sz { } else if lhs_sz > rhs_sz {
// FIXME (#1877: If in the future shifting by negative // We zero-extend even if the RHS is signed. So e.g. `(x: i32) << -1i8` will zero-extend the
// values is no longer undefined then this is wrong. // RHS to `255i32`. But then we mask the shift amount to be within the size of the LHS
// anyway so the result is `31` as it should be. All the extra bits introduced by zext
// are masked off so their value does not matter.
// FIXME: if we ever support 512bit integers, this will be wrong! For such large integers,
// the extra bits introduced by zext are *not* all masked away any more.
assert!(lhs_sz <= 256);
bx.zext(rhs, lhs_llty) bx.zext(rhs, lhs_llty)
} else { } else {
rhs rhs

View File

@ -156,41 +156,35 @@ fn binary_int_op(
// Shift ops can have an RHS with a different numeric type. // Shift ops can have an RHS with a different numeric type.
if matches!(bin_op, Shl | ShlUnchecked | Shr | ShrUnchecked) { if matches!(bin_op, Shl | ShlUnchecked | Shr | ShrUnchecked) {
let size = u128::from(left_layout.size.bits()); let size = left_layout.size.bits();
// Even if `r` is signed, we treat it as if it was unsigned (i.e., we use its // The shift offset is implicitly masked to the type size. (This is the one MIR operator
// zero-extended form). This matches the codegen backend: // that does *not* directly map to a single LLVM operation.) Compute how much we
// <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/base.rs#L315-L317>. // actually shift and whether there was an overflow due to shifting too much.
// The overflow check is also ignorant to the sign: let (shift_amount, overflow) = if right_layout.abi.is_signed() {
// <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/mir/rvalue.rs#L728>. let shift_amount = self.sign_extend(r, right_layout) as i128;
// This would behave rather strangely if we had integer types of size 256: a shift by let overflow = shift_amount < 0 || shift_amount >= i128::from(size);
// -1i8 would actually shift by 255, but that would *not* be considered overflowing. A let masked_amount = (shift_amount as u128) % u128::from(size);
// shift by -1i16 though would be considered overflowing. If we had integers of size debug_assert_eq!(overflow, shift_amount != (masked_amount as i128));
// 512, then a shift by -1i8 would even produce a different result than one by -1i16: (masked_amount, overflow)
// the first shifts by 255, the latter by u16::MAX % 512 = 511. Lucky enough, our } else {
// integers are maximally 128bits wide, so negative shifts *always* overflow and we have let shift_amount = r;
// consistent results for the same value represented at different bit widths. let masked_amount = shift_amount % u128::from(size);
assert!(size <= 128); (masked_amount, shift_amount != masked_amount)
let original_r = r; };
let overflow = r >= size; let shift_amount = u32::try_from(shift_amount).unwrap(); // we masked so this will always fit
// The shift offset is implicitly masked to the type size, to make sure this operation // Compute the shifted result.
// is always defined. This is the one MIR operator that does *not* directly map to a
// single LLVM operation. See
// <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/common.rs#L131-L158>
// for the corresponding truncation in our codegen backends.
let r = r % size;
let r = u32::try_from(r).unwrap(); // we masked so this will always fit
let result = if left_layout.abi.is_signed() { let result = if left_layout.abi.is_signed() {
let l = self.sign_extend(l, left_layout) as i128; let l = self.sign_extend(l, left_layout) as i128;
let result = match bin_op { let result = match bin_op {
Shl | ShlUnchecked => l.checked_shl(r).unwrap(), Shl | ShlUnchecked => l.checked_shl(shift_amount).unwrap(),
Shr | ShrUnchecked => l.checked_shr(r).unwrap(), Shr | ShrUnchecked => l.checked_shr(shift_amount).unwrap(),
_ => bug!(), _ => bug!(),
}; };
result as u128 result as u128
} else { } else {
match bin_op { match bin_op {
Shl | ShlUnchecked => l.checked_shl(r).unwrap(), Shl | ShlUnchecked => l.checked_shl(shift_amount).unwrap(),
Shr | ShrUnchecked => l.checked_shr(r).unwrap(), Shr | ShrUnchecked => l.checked_shr(shift_amount).unwrap(),
_ => bug!(), _ => bug!(),
} }
}; };
@ -199,7 +193,11 @@ fn binary_int_op(
if overflow && let Some(intrinsic_name) = throw_ub_on_overflow { if overflow && let Some(intrinsic_name) = throw_ub_on_overflow {
throw_ub_custom!( throw_ub_custom!(
fluent::const_eval_overflow_shift, fluent::const_eval_overflow_shift,
val = original_r, val = if right_layout.abi.is_signed() {
(self.sign_extend(r, right_layout) as i128).to_string()
} else {
r.to_string()
},
name = intrinsic_name name = intrinsic_name
); );
} }

View File

@ -4,7 +4,7 @@
ParenthesizedFnTraitExpansion, ParenthesizedFnTraitExpansion,
}; };
use crate::traits::error_reporting::report_object_safety_error; use crate::traits::error_reporting::report_object_safety_error;
use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::fx::{FxHashMap, FxIndexMap, FxIndexSet};
use rustc_errors::{pluralize, struct_span_err, Applicability, Diagnostic, ErrorGuaranteed}; use rustc_errors::{pluralize, struct_span_err, Applicability, Diagnostic, ErrorGuaranteed};
use rustc_hir as hir; use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LocalDefId}; use rustc_hir::def_id::{DefId, LocalDefId};
@ -16,8 +16,6 @@
use rustc_span::{Span, Symbol, DUMMY_SP}; use rustc_span::{Span, Symbol, DUMMY_SP};
use rustc_trait_selection::traits::object_safety_violations_for_assoc_item; use rustc_trait_selection::traits::object_safety_violations_for_assoc_item;
use std::collections::BTreeSet;
impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
/// On missing type parameters, emit an E0393 error and provide a structured suggestion using /// On missing type parameters, emit an E0393 error and provide a structured suggestion using
/// the type parameter's name as a placeholder. /// the type parameter's name as a placeholder.
@ -504,7 +502,7 @@ pub(crate) fn complain_about_inherent_assoc_type_not_found(
/// emit a generic note suggesting using a `where` clause to constraint instead. /// emit a generic note suggesting using a `where` clause to constraint instead.
pub(crate) fn complain_about_missing_associated_types( pub(crate) fn complain_about_missing_associated_types(
&self, &self,
associated_types: FxHashMap<Span, BTreeSet<DefId>>, associated_types: FxIndexMap<Span, FxIndexSet<DefId>>,
potential_assoc_types: Vec<Span>, potential_assoc_types: Vec<Span>,
trait_bounds: &[hir::PolyTraitRef<'_>], trait_bounds: &[hir::PolyTraitRef<'_>],
) { ) {
@ -514,13 +512,13 @@ pub(crate) fn complain_about_missing_associated_types(
let tcx = self.tcx(); let tcx = self.tcx();
// FIXME: Marked `mut` so that we can replace the spans further below with a more // FIXME: Marked `mut` so that we can replace the spans further below with a more
// appropriate one, but this should be handled earlier in the span assignment. // appropriate one, but this should be handled earlier in the span assignment.
let mut associated_types: FxHashMap<Span, Vec<_>> = associated_types let mut associated_types: FxIndexMap<Span, Vec<_>> = associated_types
.into_iter() .into_iter()
.map(|(span, def_ids)| { .map(|(span, def_ids)| {
(span, def_ids.into_iter().map(|did| tcx.associated_item(did)).collect()) (span, def_ids.into_iter().map(|did| tcx.associated_item(did)).collect())
}) })
.collect(); .collect();
let mut names: FxHashMap<String, Vec<Symbol>> = Default::default(); let mut names: FxIndexMap<String, Vec<Symbol>> = Default::default();
let mut names_len = 0; let mut names_len = 0;
// Account for things like `dyn Foo + 'a`, like in tests `issue-22434.rs` and // Account for things like `dyn Foo + 'a`, like in tests `issue-22434.rs` and

View File

@ -1,7 +1,7 @@
use crate::astconv::{GenericArgCountMismatch, GenericArgCountResult, OnlySelfBounds}; use crate::astconv::{GenericArgCountMismatch, GenericArgCountResult, OnlySelfBounds};
use crate::bounds::Bounds; use crate::bounds::Bounds;
use crate::errors::TraitObjectDeclaredWithNoTraits; use crate::errors::TraitObjectDeclaredWithNoTraits;
use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::fx::{FxHashSet, FxIndexMap, FxIndexSet};
use rustc_errors::struct_span_err; use rustc_errors::struct_span_err;
use rustc_hir as hir; use rustc_hir as hir;
use rustc_hir::def::{DefKind, Res}; use rustc_hir::def::{DefKind, Res};
@ -14,7 +14,6 @@
use rustc_trait_selection::traits::{self, astconv_object_safety_violations}; use rustc_trait_selection::traits::{self, astconv_object_safety_violations};
use smallvec::{smallvec, SmallVec}; use smallvec::{smallvec, SmallVec};
use std::collections::BTreeSet;
use super::AstConv; use super::AstConv;
@ -148,8 +147,7 @@ trait here instead: `trait NewTrait: {} {{}}`",
} }
} }
// Use a `BTreeSet` to keep output in a more consistent order. let mut associated_types: FxIndexMap<Span, FxIndexSet<DefId>> = FxIndexMap::default();
let mut associated_types: FxHashMap<Span, BTreeSet<DefId>> = FxHashMap::default();
let regular_traits_refs_spans = trait_bounds let regular_traits_refs_spans = trait_bounds
.into_iter() .into_iter()

View File

@ -80,6 +80,10 @@ pub(crate) fn insert_outlives_predicate<'tcx>(
.or_insert(span); .or_insert(span);
} }
Component::Placeholder(_) => {
span_bug!(span, "Should not deduce placeholder outlives component");
}
Component::Alias(alias_ty) => { Component::Alias(alias_ty) => {
// This would either arise from something like: // This would either arise from something like:
// //

View File

@ -2351,6 +2351,7 @@ pub fn construct_generic_bound_failure(
let labeled_user_string = match bound_kind { let labeled_user_string = match bound_kind {
GenericKind::Param(ref p) => format!("the parameter type `{p}`"), GenericKind::Param(ref p) => format!("the parameter type `{p}`"),
GenericKind::Placeholder(ref p) => format!("the placeholder type `{p:?}`"),
GenericKind::Alias(ref p) => match p.kind(self.tcx) { GenericKind::Alias(ref p) => match p.kind(self.tcx) {
ty::AliasKind::Projection | ty::AliasKind::Inherent => { ty::AliasKind::Projection | ty::AliasKind::Inherent => {
format!("the associated type `{p}`") format!("the associated type `{p}`")

View File

@ -11,6 +11,7 @@
pub enum Component<'tcx> { pub enum Component<'tcx> {
Region(ty::Region<'tcx>), Region(ty::Region<'tcx>),
Param(ty::ParamTy), Param(ty::ParamTy),
Placeholder(ty::PlaceholderType),
UnresolvedInferenceVariable(ty::InferTy), UnresolvedInferenceVariable(ty::InferTy),
// Projections like `T::Foo` are tricky because a constraint like // Projections like `T::Foo` are tricky because a constraint like
@ -120,6 +121,10 @@ fn compute_components<'tcx>(
out.push(Component::Param(p)); out.push(Component::Param(p));
} }
ty::Placeholder(p) => {
out.push(Component::Placeholder(p));
}
// For projections, we prefer to generate an obligation like // For projections, we prefer to generate an obligation like
// `<P0 as Trait<P1...Pn>>::Foo: 'a`, because this gives the // `<P0 as Trait<P1...Pn>>::Foo: 'a`, because this gives the
// regionck more ways to prove that it holds. However, // regionck more ways to prove that it holds. However,
@ -176,7 +181,6 @@ fn compute_components<'tcx>(
ty::Tuple(..) | // ... ty::Tuple(..) | // ...
ty::FnPtr(_) | // OutlivesFunction (*) ty::FnPtr(_) | // OutlivesFunction (*)
ty::Dynamic(..) | // OutlivesObject, OutlivesFragment (*) ty::Dynamic(..) | // OutlivesObject, OutlivesFragment (*)
ty::Placeholder(..) |
ty::Bound(..) | ty::Bound(..) |
ty::Error(_) => { ty::Error(_) => {
// (*) Function pointers and trait objects are both binders. // (*) Function pointers and trait objects are both binders.

View File

@ -243,6 +243,9 @@ fn components_must_outlive(
Component::Param(param_ty) => { Component::Param(param_ty) => {
self.param_ty_must_outlive(origin, region, *param_ty); self.param_ty_must_outlive(origin, region, *param_ty);
} }
Component::Placeholder(placeholder_ty) => {
self.placeholder_ty_must_outlive(origin, region, *placeholder_ty);
}
Component::Alias(alias_ty) => self.alias_ty_must_outlive(origin, region, *alias_ty), Component::Alias(alias_ty) => self.alias_ty_must_outlive(origin, region, *alias_ty),
Component::EscapingAlias(subcomponents) => { Component::EscapingAlias(subcomponents) => {
self.components_must_outlive(origin, &subcomponents, region, category); self.components_must_outlive(origin, &subcomponents, region, category);
@ -267,10 +270,28 @@ fn param_ty_must_outlive(
region: ty::Region<'tcx>, region: ty::Region<'tcx>,
param_ty: ty::ParamTy, param_ty: ty::ParamTy,
) { ) {
let verify_bound = self.verify_bound.param_bound(param_ty); let verify_bound = self.verify_bound.param_or_placeholder_bound(param_ty.to_ty(self.tcx));
self.delegate.push_verify(origin, GenericKind::Param(param_ty), region, verify_bound); self.delegate.push_verify(origin, GenericKind::Param(param_ty), region, verify_bound);
} }
#[instrument(level = "debug", skip(self))]
fn placeholder_ty_must_outlive(
&mut self,
origin: infer::SubregionOrigin<'tcx>,
region: ty::Region<'tcx>,
placeholder_ty: ty::PlaceholderType,
) {
let verify_bound = self
.verify_bound
.param_or_placeholder_bound(Ty::new_placeholder(self.tcx, placeholder_ty));
self.delegate.push_verify(
origin,
GenericKind::Placeholder(placeholder_ty),
region,
verify_bound,
);
}
#[instrument(level = "debug", skip(self))] #[instrument(level = "debug", skip(self))]
fn alias_ty_must_outlive( fn alias_ty_must_outlive(
&mut self, &mut self,

View File

@ -37,11 +37,11 @@ pub fn new(
} }
#[instrument(level = "debug", skip(self))] #[instrument(level = "debug", skip(self))]
pub fn param_bound(&self, param_ty: ty::ParamTy) -> VerifyBound<'tcx> { pub fn param_or_placeholder_bound(&self, ty: Ty<'tcx>) -> VerifyBound<'tcx> {
// Start with anything like `T: 'a` we can scrape from the // Start with anything like `T: 'a` we can scrape from the
// environment. If the environment contains something like // environment. If the environment contains something like
// `for<'a> T: 'a`, then we know that `T` outlives everything. // `for<'a> T: 'a`, then we know that `T` outlives everything.
let declared_bounds_from_env = self.declared_generic_bounds_from_env(param_ty); let declared_bounds_from_env = self.declared_generic_bounds_from_env(ty);
debug!(?declared_bounds_from_env); debug!(?declared_bounds_from_env);
let mut param_bounds = vec![]; let mut param_bounds = vec![];
for declared_bound in declared_bounds_from_env { for declared_bound in declared_bounds_from_env {
@ -51,7 +51,7 @@ pub fn param_bound(&self, param_ty: ty::ParamTy) -> VerifyBound<'tcx> {
param_bounds.push(VerifyBound::OutlivedBy(region)); param_bounds.push(VerifyBound::OutlivedBy(region));
} else { } else {
// This is `for<'a> T: 'a`. This means that `T` outlives everything! All done here. // This is `for<'a> T: 'a`. This means that `T` outlives everything! All done here.
debug!("found that {param_ty:?} outlives any lifetime, returning empty vector"); debug!("found that {ty:?} outlives any lifetime, returning empty vector");
return VerifyBound::AllBounds(vec![]); return VerifyBound::AllBounds(vec![]);
} }
} }
@ -168,7 +168,10 @@ fn bound_from_single_component(
) -> VerifyBound<'tcx> { ) -> VerifyBound<'tcx> {
match *component { match *component {
Component::Region(lt) => VerifyBound::OutlivedBy(lt), Component::Region(lt) => VerifyBound::OutlivedBy(lt),
Component::Param(param_ty) => self.param_bound(param_ty), Component::Param(param_ty) => self.param_or_placeholder_bound(param_ty.to_ty(self.tcx)),
Component::Placeholder(placeholder_ty) => {
self.param_or_placeholder_bound(Ty::new_placeholder(self.tcx, placeholder_ty))
}
Component::Alias(alias_ty) => self.alias_bound(alias_ty, visited), Component::Alias(alias_ty) => self.alias_bound(alias_ty, visited),
Component::EscapingAlias(ref components) => { Component::EscapingAlias(ref components) => {
self.bound_from_components(components, visited) self.bound_from_components(components, visited)
@ -195,9 +198,9 @@ fn bound_from_single_component(
/// bounds, but all the bounds it returns can be relied upon. /// bounds, but all the bounds it returns can be relied upon.
fn declared_generic_bounds_from_env( fn declared_generic_bounds_from_env(
&self, &self,
param_ty: ty::ParamTy, generic_ty: Ty<'tcx>,
) -> Vec<ty::Binder<'tcx, ty::OutlivesPredicate<Ty<'tcx>, ty::Region<'tcx>>>> { ) -> Vec<ty::Binder<'tcx, ty::OutlivesPredicate<Ty<'tcx>, ty::Region<'tcx>>>> {
let generic_ty = param_ty.to_ty(self.tcx); assert!(matches!(generic_ty.kind(), ty::Param(_) | ty::Placeholder(_)));
self.declared_generic_bounds_from_env_for_erased_ty(generic_ty) self.declared_generic_bounds_from_env_for_erased_ty(generic_ty)
} }

View File

@ -147,6 +147,7 @@ pub struct Verify<'tcx> {
#[derive(Copy, Clone, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable)] #[derive(Copy, Clone, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable)]
pub enum GenericKind<'tcx> { pub enum GenericKind<'tcx> {
Param(ty::ParamTy), Param(ty::ParamTy),
Placeholder(ty::PlaceholderType),
Alias(ty::AliasTy<'tcx>), Alias(ty::AliasTy<'tcx>),
} }
@ -707,6 +708,7 @@ impl<'tcx> fmt::Debug for GenericKind<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self { match *self {
GenericKind::Param(ref p) => write!(f, "{p:?}"), GenericKind::Param(ref p) => write!(f, "{p:?}"),
GenericKind::Placeholder(ref p) => write!(f, "{p:?}"),
GenericKind::Alias(ref p) => write!(f, "{p:?}"), GenericKind::Alias(ref p) => write!(f, "{p:?}"),
} }
} }
@ -716,6 +718,7 @@ impl<'tcx> fmt::Display for GenericKind<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self { match *self {
GenericKind::Param(ref p) => write!(f, "{p}"), GenericKind::Param(ref p) => write!(f, "{p}"),
GenericKind::Placeholder(ref p) => write!(f, "{p:?}"),
GenericKind::Alias(ref p) => write!(f, "{p}"), GenericKind::Alias(ref p) => write!(f, "{p}"),
} }
} }
@ -725,6 +728,7 @@ impl<'tcx> GenericKind<'tcx> {
pub fn to_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> { pub fn to_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
match *self { match *self {
GenericKind::Param(ref p) => p.to_ty(tcx), GenericKind::Param(ref p) => p.to_ty(tcx),
GenericKind::Placeholder(ref p) => Ty::new_placeholder(tcx, *p),
GenericKind::Alias(ref p) => p.to_ty(tcx), GenericKind::Alias(ref p) => p.to_ty(tcx),
} }
} }

View File

@ -365,6 +365,11 @@ fn elaborate(&mut self, elaboratable: &O) {
Some(ty::ClauseKind::TypeOutlives(ty::OutlivesPredicate(ty, r_min))) Some(ty::ClauseKind::TypeOutlives(ty::OutlivesPredicate(ty, r_min)))
} }
Component::Placeholder(p) => {
let ty = Ty::new_placeholder(tcx, p);
Some(ty::ClauseKind::TypeOutlives(ty::OutlivesPredicate(ty, r_min)))
}
Component::UnresolvedInferenceVariable(_) => None, Component::UnresolvedInferenceVariable(_) => None,
Component::Alias(alias_ty) => { Component::Alias(alias_ty) => {

View File

@ -1404,18 +1404,18 @@ pub enum BinOp {
BitOr, BitOr,
/// The `<<` operator (shift left) /// The `<<` operator (shift left)
/// ///
/// The offset is truncated to the size of the first operand before shifting. /// The offset is truncated to the size of the first operand and made unsigned before shifting.
Shl, Shl,
/// Like `Shl`, but is UB if the RHS >= LHS::BITS /// Like `Shl`, but is UB if the RHS >= LHS::BITS or RHS < 0
ShlUnchecked, ShlUnchecked,
/// The `>>` operator (shift right) /// The `>>` operator (shift right)
/// ///
/// The offset is truncated to the size of the first operand before shifting. /// The offset is truncated to the size of the first operand and made unsigned before shifting.
/// ///
/// This is an arithmetic shift if the LHS is signed /// This is an arithmetic shift if the LHS is signed
/// and a logical shift if the LHS is unsigned. /// and a logical shift if the LHS is unsigned.
Shr, Shr,
/// Like `Shl`, but is UB if the RHS >= LHS::BITS /// Like `Shl`, but is UB if the RHS >= LHS::BITS or RHS < 0
ShrUnchecked, ShrUnchecked,
/// The `==` operator (equality) /// The `==` operator (equality)
Eq, Eq,

View File

@ -600,10 +600,12 @@ pub(crate) fn build_binary_op(
BinOp::Shl | BinOp::Shr if self.check_overflow && ty.is_integral() => { BinOp::Shl | BinOp::Shr if self.check_overflow && ty.is_integral() => {
// For an unsigned RHS, the shift is in-range for `rhs < bits`. // For an unsigned RHS, the shift is in-range for `rhs < bits`.
// For a signed RHS, `IntToInt` cast to the equivalent unsigned // For a signed RHS, `IntToInt` cast to the equivalent unsigned
// type and do that same comparison. Because the type is the // type and do that same comparison.
// same size, there's no negative shift amount that ends up // A negative value will be *at least* 128 after the cast (that's i8::MIN),
// overlapping with valid ones, thus it catches negatives too. // and 128 is an overflowing shift amount for all our currently existing types,
// so this cast can never make us miss an overflow.
let (lhs_size, _) = ty.int_size_and_signed(self.tcx); let (lhs_size, _) = ty.int_size_and_signed(self.tcx);
assert!(lhs_size.bits() <= 128);
let rhs_ty = rhs.ty(&self.local_decls, self.tcx); let rhs_ty = rhs.ty(&self.local_decls, self.tcx);
let (rhs_size, _) = rhs_ty.int_size_and_signed(self.tcx); let (rhs_size, _) = rhs_ty.int_size_and_signed(self.tcx);
@ -625,7 +627,6 @@ pub(crate) fn build_binary_op(
// This can't overflow because the largest shiftable types are 128-bit, // This can't overflow because the largest shiftable types are 128-bit,
// which fits in `u8`, the smallest possible `unsigned_ty`. // which fits in `u8`, the smallest possible `unsigned_ty`.
// (And `from_uint` will `bug!` if that's ever no longer true.)
let lhs_bits = Operand::const_from_scalar( let lhs_bits = Operand::const_from_scalar(
self.tcx, self.tcx,
unsigned_ty, unsigned_ty,

View File

@ -739,6 +739,9 @@ parse_trailing_vert_not_allowed = a trailing `|` is not allowed in an or-pattern
parse_trait_alias_cannot_be_auto = trait aliases cannot be `auto` parse_trait_alias_cannot_be_auto = trait aliases cannot be `auto`
parse_trait_alias_cannot_be_unsafe = trait aliases cannot be `unsafe` parse_trait_alias_cannot_be_unsafe = trait aliases cannot be `unsafe`
parse_transpose_dyn_or_impl = `for<...>` expected after `{$kw}`, not before
.suggestion = move `{$kw}` before the `for<...>`
parse_type_ascription_removed = parse_type_ascription_removed =
if you meant to annotate an expression with a type, the type ascription syntax has been removed, see issue #101728 <https://github.com/rust-lang/rust/issues/101728> if you meant to annotate an expression with a type, the type ascription syntax has been removed, see issue #101728 <https://github.com/rust-lang/rust/issues/101728>

View File

@ -2827,3 +2827,23 @@ pub(crate) struct GenericArgsInPatRequireTurbofishSyntax {
)] )]
pub suggest_turbofish: Span, pub suggest_turbofish: Span,
} }
#[derive(Diagnostic)]
#[diag(parse_transpose_dyn_or_impl)]
pub(crate) struct TransposeDynOrImpl<'a> {
#[primary_span]
pub span: Span,
pub kw: &'a str,
#[subdiagnostic]
pub sugg: TransposeDynOrImplSugg<'a>,
}
#[derive(Subdiagnostic)]
#[multipart_suggestion(parse_suggestion, applicability = "machine-applicable")]
pub(crate) struct TransposeDynOrImplSugg<'a> {
#[suggestion_part(code = "")]
pub removal_span: Span,
#[suggestion_part(code = "{kw} ")]
pub insertion_span: Span,
pub kw: &'a str,
}

View File

@ -21,6 +21,7 @@
use crate::fluent_generated as fluent; use crate::fluent_generated as fluent;
use crate::parser; use crate::parser;
use crate::parser::attr::InnerAttrPolicy;
use rustc_ast as ast; use rustc_ast as ast;
use rustc_ast::ptr::P; use rustc_ast::ptr::P;
use rustc_ast::token::{self, Delimiter, Lit, LitKind, TokenKind}; use rustc_ast::token::{self, Delimiter, Lit, LitKind, TokenKind};
@ -723,6 +724,101 @@ fn is_ident_eq_keyword(found: &TokenKind, expected: &TokenType) -> bool {
Err(err) Err(err)
} }
pub(super) fn attr_on_non_tail_expr(&self, expr: &Expr) {
// Missing semicolon typo error.
let span = self.prev_token.span.shrink_to_hi();
let mut err = self.sess.create_err(ExpectedSemi {
span,
token: self.token.clone(),
unexpected_token_label: Some(self.token.span),
sugg: ExpectedSemiSugg::AddSemi(span),
});
let attr_span = match &expr.attrs[..] {
[] => unreachable!(),
[only] => only.span,
[first, rest @ ..] => {
for attr in rest {
err.span_label(attr.span, "");
}
first.span
}
};
err.span_label(
attr_span,
format!(
"only `;` terminated statements or tail expressions are allowed after {}",
if expr.attrs.len() == 1 { "this attribute" } else { "these attributes" },
),
);
if self.token == token::Pound
&& self.look_ahead(1, |t| t.kind == token::OpenDelim(Delimiter::Bracket))
{
// We have
// #[attr]
// expr
// #[not_attr]
// other_expr
err.span_label(span, "expected `;` here");
err.multipart_suggestion(
"alternatively, consider surrounding the expression with a block",
vec![
(expr.span.shrink_to_lo(), "{ ".to_string()),
(expr.span.shrink_to_hi(), " }".to_string()),
],
Applicability::MachineApplicable,
);
let mut snapshot = self.create_snapshot_for_diagnostic();
if let [attr] = &expr.attrs[..]
&& let ast::AttrKind::Normal(attr_kind) = &attr.kind
&& let [segment] = &attr_kind.item.path.segments[..]
&& segment.ident.name == sym::cfg
&& let Ok(next_attr) = snapshot.parse_attribute(InnerAttrPolicy::Forbidden(None))
&& let ast::AttrKind::Normal(next_attr_kind) = next_attr.kind
&& let [next_segment] = &next_attr_kind.item.path.segments[..]
&& segment.ident.name == sym::cfg
&& let Ok(next_expr) = snapshot.parse_expr()
{
// We have for sure
// #[cfg(..)]
// expr
// #[cfg(..)]
// other_expr
// So we suggest using `if cfg!(..) { expr } else if cfg!(..) { other_expr }`.
let margin = self.sess.source_map().span_to_margin(next_expr.span).unwrap_or(0);
let sugg = vec![
(attr.span.with_hi(segment.span().hi()), "if cfg!".to_string()),
(
attr_kind.item.args.span().unwrap().shrink_to_hi().with_hi(attr.span.hi()),
" {".to_string(),
),
(expr.span.shrink_to_lo(), " ".to_string()),
(
next_attr.span.with_hi(next_segment.span().hi()),
"} else if cfg!".to_string(),
),
(
next_attr_kind
.item
.args
.span()
.unwrap()
.shrink_to_hi()
.with_hi(next_attr.span.hi()),
" {".to_string(),
),
(next_expr.span.shrink_to_lo(), " ".to_string()),
(next_expr.span.shrink_to_hi(), format!("\n{}}}", " ".repeat(margin))),
];
err.multipart_suggestion(
"it seems like you are trying to provide different expressions depending on \
`cfg`, consider using `if cfg!(..)`",
sugg,
Applicability::MachineApplicable,
);
}
}
err.emit();
}
fn check_too_many_raw_str_terminators(&mut self, err: &mut Diagnostic) -> bool { fn check_too_many_raw_str_terminators(&mut self, err: &mut Diagnostic) -> bool {
let sm = self.sess.source_map(); let sm = self.sess.source_map();
match (&self.prev_token.kind, &self.token.kind) { match (&self.prev_token.kind, &self.token.kind) {

View File

@ -617,6 +617,20 @@ pub fn parse_full_stmt(
let mut add_semi_to_stmt = false; let mut add_semi_to_stmt = false;
match &mut stmt.kind { match &mut stmt.kind {
// Expression without semicolon.
StmtKind::Expr(expr)
if classify::expr_requires_semi_to_be_stmt(expr)
&& !expr.attrs.is_empty()
&& ![token::Eof, token::Semi, token::CloseDelim(Delimiter::Brace)]
.contains(&self.token.kind) =>
{
// The user has written `#[attr] expr` which is unsupported. (#106020)
self.attr_on_non_tail_expr(&expr);
// We already emitted an error, so don't emit another type error
let sp = expr.span.to(self.prev_token.span);
*expr = self.mk_expr_err(sp);
}
// Expression without semicolon. // Expression without semicolon.
StmtKind::Expr(expr) StmtKind::Expr(expr)
if self.token != token::Eof && classify::expr_requires_semi_to_be_stmt(expr) => if self.token != token::Eof && classify::expr_requires_semi_to_be_stmt(expr) =>

View File

@ -287,6 +287,7 @@ fn parse_ty_common(
// Function pointer type // Function pointer type
self.parse_ty_bare_fn(lo, ThinVec::new(), None, recover_return_sign)? self.parse_ty_bare_fn(lo, ThinVec::new(), None, recover_return_sign)?
} else if self.check_keyword(kw::For) { } else if self.check_keyword(kw::For) {
let for_span = self.token.span;
// Function pointer type or bound list (trait object type) starting with a poly-trait. // Function pointer type or bound list (trait object type) starting with a poly-trait.
// `for<'lt> [unsafe] [extern "ABI"] fn (&'lt S) -> T` // `for<'lt> [unsafe] [extern "ABI"] fn (&'lt S) -> T`
// `for<'lt> Trait1<'lt> + Trait2 + 'a` // `for<'lt> Trait1<'lt> + Trait2 + 'a`
@ -298,11 +299,44 @@ fn parse_ty_common(
Some(self.prev_token.span.shrink_to_lo()), Some(self.prev_token.span.shrink_to_lo()),
recover_return_sign, recover_return_sign,
)? )?
} else {
// Try to recover `for<'a> dyn Trait` or `for<'a> impl Trait`.
if self.may_recover()
&& (self.eat_keyword_noexpect(kw::Impl) || self.eat_keyword_noexpect(kw::Dyn))
{
let kw = self.prev_token.ident().unwrap().0.name;
let mut err = self.sess.create_err(errors::TransposeDynOrImpl {
span: self.prev_token.span,
kw: kw.as_str(),
sugg: errors::TransposeDynOrImplSugg {
removal_span: self.prev_token.span.with_hi(self.token.span.lo()),
insertion_span: for_span.shrink_to_lo(),
kw: kw.as_str(),
},
});
let path = self.parse_path(PathStyle::Type)?;
let parse_plus = allow_plus == AllowPlus::Yes && self.check_plus();
let kind =
self.parse_remaining_bounds_path(lifetime_defs, path, lo, parse_plus)?;
// Take the parsed bare trait object and turn it either
// into a `dyn` object or an `impl Trait`.
let kind = match (kind, kw) {
(TyKind::TraitObject(bounds, _), kw::Dyn) => {
TyKind::TraitObject(bounds, TraitObjectSyntax::Dyn)
}
(TyKind::TraitObject(bounds, _), kw::Impl) => {
TyKind::ImplTrait(ast::DUMMY_NODE_ID, bounds)
}
_ => return Err(err),
};
err.emit();
kind
} else { } else {
let path = self.parse_path(PathStyle::Type)?; let path = self.parse_path(PathStyle::Type)?;
let parse_plus = allow_plus == AllowPlus::Yes && self.check_plus(); let parse_plus = allow_plus == AllowPlus::Yes && self.check_plus();
self.parse_remaining_bounds_path(lifetime_defs, path, lo, parse_plus)? self.parse_remaining_bounds_path(lifetime_defs, path, lo, parse_plus)?
} }
}
} else if self.eat_keyword(kw::Impl) { } else if self.eat_keyword(kw::Impl) {
self.parse_impl_ty(&mut impl_dyn_multi)? self.parse_impl_ty(&mut impl_dyn_multi)?
} else if self.is_explicit_dyn_type() { } else if self.is_explicit_dyn_type() {

View File

@ -398,7 +398,8 @@ fn impl_intersection_has_negative_obligation(
debug!("negative_impl(impl1_def_id={:?}, impl2_def_id={:?})", impl1_def_id, impl2_def_id); debug!("negative_impl(impl1_def_id={:?}, impl2_def_id={:?})", impl1_def_id, impl2_def_id);
let ref infcx = tcx.infer_ctxt().intercrate(true).with_next_trait_solver(true).build(); let ref infcx = tcx.infer_ctxt().intercrate(true).with_next_trait_solver(true).build();
let universe = infcx.universe(); let root_universe = infcx.universe();
assert_eq!(root_universe, ty::UniverseIndex::ROOT);
let impl1_header = fresh_impl_header(infcx, impl1_def_id); let impl1_header = fresh_impl_header(infcx, impl1_def_id);
let param_env = let param_env =
@ -408,13 +409,25 @@ fn impl_intersection_has_negative_obligation(
// Equate the headers to find their intersection (the general type, with infer vars, // Equate the headers to find their intersection (the general type, with infer vars,
// that may apply both impls). // that may apply both impls).
let Some(_equate_obligations) = let Some(equate_obligations) =
equate_impl_headers(infcx, param_env, &impl1_header, &impl2_header) equate_impl_headers(infcx, param_env, &impl1_header, &impl2_header)
else { else {
return false; return false;
}; };
plug_infer_with_placeholders(infcx, universe, (impl1_header.impl_args, impl2_header.impl_args)); plug_infer_with_placeholders(
infcx,
root_universe,
(impl1_header.impl_args, impl2_header.impl_args),
);
let param_env = infcx.resolve_vars_if_possible(param_env);
// FIXME(with_negative_coherence): the infcx has constraints from equating
// the impl headers. We should use these constraints as assumptions, not as
// requirements, when proving the negated where clauses below.
drop(equate_obligations);
drop(infcx.take_registered_region_obligations());
drop(infcx.take_and_reset_region_constraints());
util::elaborate(tcx, tcx.predicates_of(impl2_def_id).instantiate(tcx, impl2_header.impl_args)) util::elaborate(tcx, tcx.predicates_of(impl2_def_id).instantiate(tcx, impl2_header.impl_args))
.any(|(clause, _)| try_prove_negated_where_clause(infcx, clause, param_env)) .any(|(clause, _)| try_prove_negated_where_clause(infcx, clause, param_env))
@ -541,14 +554,6 @@ fn try_prove_negated_where_clause<'tcx>(
return false; return false;
}; };
// FIXME(with_negative_coherence): the infcx has region contraints from equating
// the impl headers as requirements. Given that the only region constraints we
// get are involving inference regions in the root, it shouldn't matter, but
// still sus.
//
// We probably should just throw away the region obligations registered up until
// now, or ideally use them as assumptions when proving the region obligations
// that we get from proving the negative predicate below.
let ref infcx = root_infcx.fork(); let ref infcx = root_infcx.fork();
let ocx = ObligationCtxt::new(infcx); let ocx = ObligationCtxt::new(infcx);

View File

@ -208,6 +208,9 @@ fn implied_bounds_from_components<'tcx>(
Component::Region(r) => Some(OutlivesBound::RegionSubRegion(sub_region, r)), Component::Region(r) => Some(OutlivesBound::RegionSubRegion(sub_region, r)),
Component::Param(p) => Some(OutlivesBound::RegionSubParam(sub_region, p)), Component::Param(p) => Some(OutlivesBound::RegionSubParam(sub_region, p)),
Component::Alias(p) => Some(OutlivesBound::RegionSubAlias(sub_region, p)), Component::Alias(p) => Some(OutlivesBound::RegionSubAlias(sub_region, p)),
Component::Placeholder(_) => {
unimplemented!("Shouldn't expect a placeholder type in implied bounds (yet)")
}
Component::EscapingAlias(_) => Component::EscapingAlias(_) =>
// If the projection has escaping regions, don't // If the projection has escaping regions, don't
// try to infer any implied bounds even for its // try to infer any implied bounds even for its

View File

@ -152,6 +152,12 @@ pub trait CommandExt: Sealed {
/// in a guaranteed race-free manner (e.g. if the `clone3` system call /// in a guaranteed race-free manner (e.g. if the `clone3` system call
/// is supported). Otherwise, [`pidfd`] will return an error. /// is supported). Otherwise, [`pidfd`] will return an error.
/// ///
/// If a pidfd has been successfully created and not been taken from the `Child`
/// then calls to `kill()`, `wait()` and `try_wait()` will use the pidfd
/// instead of the pid. This can prevent pid recycling races, e.g.
/// those caused by rogue libraries in the same process prematurely reaping
/// zombie children via `waitpid(-1, ...)` calls.
///
/// [`Command`]: process::Command /// [`Command`]: process::Command
/// [`Child`]: process::Child /// [`Child`]: process::Child
/// [`pidfd`]: fn@ChildExt::pidfd /// [`pidfd`]: fn@ChildExt::pidfd

View File

@ -9,6 +9,8 @@
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
use crate::os::linux::process::PidFd; use crate::os::linux::process::PidFd;
#[cfg(target_os = "linux")]
use crate::os::unix::io::AsRawFd;
#[cfg(any( #[cfg(any(
target_os = "macos", target_os = "macos",
@ -696,11 +698,12 @@ union Cmsg {
msg.msg_iov = &mut iov as *mut _ as *mut _; msg.msg_iov = &mut iov as *mut _ as *mut _;
msg.msg_iovlen = 1; msg.msg_iovlen = 1;
msg.msg_controllen = mem::size_of_val(&cmsg.buf) as _;
msg.msg_control = &mut cmsg.buf as *mut _ as *mut _;
// only attach cmsg if we successfully acquired the pidfd // only attach cmsg if we successfully acquired the pidfd
if pidfd >= 0 { if pidfd >= 0 {
msg.msg_controllen = mem::size_of_val(&cmsg.buf) as _;
msg.msg_control = &mut cmsg.buf as *mut _ as *mut _;
let hdr = CMSG_FIRSTHDR(&mut msg as *mut _ as *mut _); let hdr = CMSG_FIRSTHDR(&mut msg as *mut _ as *mut _);
(*hdr).cmsg_level = SOL_SOCKET; (*hdr).cmsg_level = SOL_SOCKET;
(*hdr).cmsg_type = SCM_RIGHTS; (*hdr).cmsg_type = SCM_RIGHTS;
@ -717,7 +720,7 @@ union Cmsg {
// so we get a consistent SEQPACKET order // so we get a consistent SEQPACKET order
match cvt_r(|| libc::sendmsg(sock.as_raw(), &msg, 0)) { match cvt_r(|| libc::sendmsg(sock.as_raw(), &msg, 0)) {
Ok(0) => {} Ok(0) => {}
_ => rtabort!("failed to communicate with parent process"), other => rtabort!("failed to communicate with parent process. {:?}", other),
} }
} }
} }
@ -748,7 +751,7 @@ union Cmsg {
msg.msg_controllen = mem::size_of::<Cmsg>() as _; msg.msg_controllen = mem::size_of::<Cmsg>() as _;
msg.msg_control = &mut cmsg as *mut _ as *mut _; msg.msg_control = &mut cmsg as *mut _ as *mut _;
match cvt_r(|| libc::recvmsg(sock.as_raw(), &mut msg, 0)) { match cvt_r(|| libc::recvmsg(sock.as_raw(), &mut msg, libc::MSG_CMSG_CLOEXEC)) {
Err(_) => return -1, Err(_) => return -1,
Ok(_) => {} Ok(_) => {}
} }
@ -787,7 +790,7 @@ pub struct Process {
// On Linux, stores the pidfd created for this child. // On Linux, stores the pidfd created for this child.
// This is None if the user did not request pidfd creation, // This is None if the user did not request pidfd creation,
// or if the pidfd could not be created for some reason // or if the pidfd could not be created for some reason
// (e.g. the `clone3` syscall was not available). // (e.g. the `pidfd_open` syscall was not available).
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
pidfd: Option<PidFd>, pidfd: Option<PidFd>,
} }
@ -816,10 +819,23 @@ pub fn kill(&mut self) -> io::Result<()> {
// and used for another process, and we probably shouldn't be killing // and used for another process, and we probably shouldn't be killing
// random processes, so return Ok because the process has exited already. // random processes, so return Ok because the process has exited already.
if self.status.is_some() { if self.status.is_some() {
Ok(()) return Ok(());
} else {
cvt(unsafe { libc::kill(self.pid, libc::SIGKILL) }).map(drop)
} }
#[cfg(target_os = "linux")]
if let Some(pid_fd) = self.pidfd.as_ref() {
// pidfd_send_signal predates pidfd_open. so if we were able to get an fd then sending signals will work too
return cvt(unsafe {
libc::syscall(
libc::SYS_pidfd_send_signal,
pid_fd.as_raw_fd(),
libc::SIGKILL,
crate::ptr::null::<()>(),
0,
)
})
.map(drop);
}
cvt(unsafe { libc::kill(self.pid, libc::SIGKILL) }).map(drop)
} }
pub fn wait(&mut self) -> io::Result<ExitStatus> { pub fn wait(&mut self) -> io::Result<ExitStatus> {
@ -827,6 +843,17 @@ pub fn wait(&mut self) -> io::Result<ExitStatus> {
if let Some(status) = self.status { if let Some(status) = self.status {
return Ok(status); return Ok(status);
} }
#[cfg(target_os = "linux")]
if let Some(pid_fd) = self.pidfd.as_ref() {
let mut siginfo: libc::siginfo_t = unsafe { crate::mem::zeroed() };
cvt_r(|| unsafe {
libc::waitid(libc::P_PIDFD, pid_fd.as_raw_fd() as u32, &mut siginfo, libc::WEXITED)
})?;
let status = ExitStatus::from_waitid_siginfo(siginfo);
self.status = Some(status);
return Ok(status);
}
let mut status = 0 as c_int; let mut status = 0 as c_int;
cvt_r(|| unsafe { libc::waitpid(self.pid, &mut status, 0) })?; cvt_r(|| unsafe { libc::waitpid(self.pid, &mut status, 0) })?;
self.status = Some(ExitStatus::new(status)); self.status = Some(ExitStatus::new(status));
@ -837,6 +864,25 @@ pub fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> {
if let Some(status) = self.status { if let Some(status) = self.status {
return Ok(Some(status)); return Ok(Some(status));
} }
#[cfg(target_os = "linux")]
if let Some(pid_fd) = self.pidfd.as_ref() {
let mut siginfo: libc::siginfo_t = unsafe { crate::mem::zeroed() };
cvt(unsafe {
libc::waitid(
libc::P_PIDFD,
pid_fd.as_raw_fd() as u32,
&mut siginfo,
libc::WEXITED | libc::WNOHANG,
)
})?;
if unsafe { siginfo.si_pid() } == 0 {
return Ok(None);
}
let status = ExitStatus::from_waitid_siginfo(siginfo);
self.status = Some(status);
return Ok(Some(status));
}
let mut status = 0 as c_int; let mut status = 0 as c_int;
let pid = cvt(unsafe { libc::waitpid(self.pid, &mut status, libc::WNOHANG) })?; let pid = cvt(unsafe { libc::waitpid(self.pid, &mut status, libc::WNOHANG) })?;
if pid == 0 { if pid == 0 {
@ -866,6 +912,20 @@ pub fn new(status: c_int) -> ExitStatus {
ExitStatus(status) ExitStatus(status)
} }
#[cfg(target_os = "linux")]
pub fn from_waitid_siginfo(siginfo: libc::siginfo_t) -> ExitStatus {
let status = unsafe { siginfo.si_status() };
match siginfo.si_code {
libc::CLD_EXITED => ExitStatus((status & 0xff) << 8),
libc::CLD_KILLED => ExitStatus(status),
libc::CLD_DUMPED => ExitStatus(status | 0x80),
libc::CLD_CONTINUED => ExitStatus(0xffff),
libc::CLD_STOPPED | libc::CLD_TRAPPED => ExitStatus(((status & 0xff) << 8) | 0x7f),
_ => unreachable!("waitid() should only return the above codes"),
}
}
fn exited(&self) -> bool { fn exited(&self) -> bool {
libc::WIFEXITED(self.0) libc::WIFEXITED(self.0)
} }

View File

@ -64,7 +64,8 @@ fn test_command_fork_no_unwind() {
#[test] #[test]
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
fn test_command_pidfd() { fn test_command_pidfd() {
use crate::os::fd::RawFd; use crate::assert_matches::assert_matches;
use crate::os::fd::{AsRawFd, RawFd};
use crate::os::linux::process::{ChildExt, CommandExt}; use crate::os::linux::process::{ChildExt, CommandExt};
use crate::process::Command; use crate::process::Command;
@ -78,10 +79,22 @@ fn test_command_pidfd() {
}; };
// always exercise creation attempts // always exercise creation attempts
let child = Command::new("echo").create_pidfd(true).spawn().unwrap(); let mut child = Command::new("false").create_pidfd(true).spawn().unwrap();
// but only check if we know that the kernel supports pidfds // but only check if we know that the kernel supports pidfds
if pidfd_open_available { if pidfd_open_available {
assert!(child.pidfd().is_ok()) assert!(child.pidfd().is_ok());
} }
if let Ok(pidfd) = child.pidfd() {
let flags = super::cvt(unsafe { libc::fcntl(pidfd.as_raw_fd(), libc::F_GETFD) }).unwrap();
assert!(flags & libc::FD_CLOEXEC != 0);
}
let status = child.wait().expect("error waiting on pidfd");
assert_eq!(status.code(), Some(1));
let mut child = Command::new("sleep").arg("1000").create_pidfd(true).spawn().unwrap();
assert_matches!(child.try_wait(), Ok(None));
child.kill().expect("failed to kill child");
let status = child.wait().expect("error waiting on pidfd");
assert_eq!(status.signal(), Some(libc::SIGKILL));
} }

View File

@ -0,0 +1,9 @@
#![feature(core_intrinsics)]
use std::intrinsics;
fn main() {
unsafe {
let _n = intrinsics::unchecked_shl(1i8, -1);
//~^ ERROR: overflowing shift by -1 in `unchecked_shl`
}
}

View File

@ -0,0 +1,15 @@
error: Undefined Behavior: overflowing shift by -1 in `unchecked_shl`
--> $DIR/unchecked_shl2.rs:LL:CC
|
LL | let _n = intrinsics::unchecked_shl(1i8, -1);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -1 in `unchecked_shl`
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
= note: BACKTRACE:
= note: inside `main` at $DIR/unchecked_shl2.rs:LL:CC
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to previous error

View File

@ -49,7 +49,7 @@ LL | type B;
LL | type Bar<Rhs> = dyn Add<Rhs> + Sub<Rhs> + X<Rhs> + Z<Rhs>; LL | type Bar<Rhs> = dyn Add<Rhs> + Sub<Rhs> + X<Rhs> + Z<Rhs>;
| ^^^^^^^^ ^^^^^^^^ ^^^^^^ ^^^^^^ associated types `A`, `B`, `Output` must be specified | ^^^^^^^^ ^^^^^^^^ ^^^^^^ ^^^^^^ associated types `A`, `B`, `Output` must be specified
| | | | | | | |
| | | associated types `Output` (from trait `Mul`), `Output` (from trait `Div`) must be specified | | | associated types `Output` (from trait `Div`), `Output` (from trait `Mul`) must be specified
| | associated type `Output` must be specified | | associated type `Output` must be specified
| associated type `Output` must be specified | associated type `Output` must be specified
| |
@ -119,7 +119,7 @@ error[E0191]: the value of the associated types `Output` in `Div`, `Output` in `
--> $DIR/missing-associated-types.rs:24:21 --> $DIR/missing-associated-types.rs:24:21
| |
LL | type Bal<Rhs> = dyn X<Rhs>; LL | type Bal<Rhs> = dyn X<Rhs>;
| ^^^^^^ associated types `Output` (from trait `Mul`), `Output` (from trait `Div`) must be specified | ^^^^^^ associated types `Output` (from trait `Div`), `Output` (from trait `Mul`) must be specified
| |
= help: consider introducing a new type parameter, adding `where` constraints using the fully-qualified path to the associated types = help: consider introducing a new type parameter, adding `where` constraints using the fully-qualified path to the associated types

View File

@ -0,0 +1,14 @@
#![feature(negative_impls)]
#![feature(with_negative_coherence)]
struct Wrap<T>(T);
trait Foo {}
impl<T: 'static> !Foo for Box<T> {}
trait Bar {}
impl<T> Bar for T where T: Foo {}
impl<T> Bar for Box<T> {}
//~^ ERROR conflicting implementations of trait `Bar` for type `Box<_>`
fn main() {}

View File

@ -0,0 +1,11 @@
error[E0119]: conflicting implementations of trait `Bar` for type `Box<_>`
--> $DIR/negative-coherence-check-placeholder-outlives.rs:11:1
|
LL | impl<T> Bar for T where T: Foo {}
| ------------------------------ first implementation here
LL | impl<T> Bar for Box<T> {}
| ^^^^^^^^^^^^^^^^^^^^^^ conflicting implementation for `Box<_>`
error: aborting due to previous error
For more information about this error, try `rustc --explain E0119`.

View File

@ -0,0 +1,19 @@
error: conflicting implementations of trait `FnMarker` for type `fn(&_)`
--> $DIR/negative-coherence-placeholder-region-constraints-on-unification.rs:21:1
|
LL | impl<T: ?Sized + Marker> FnMarker for fn(T) {}
| ------------------------------------------- first implementation here
LL | impl<T: ?Sized> FnMarker for fn(&T) {}
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ conflicting implementation for `fn(&_)`
|
= warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
= note: for more information, see issue #56105 <https://github.com/rust-lang/rust/issues/56105>
= note: this behavior recently changed as a result of a bug fix; see rust-lang/rust#56105 for details
note: the lint level is defined here
--> $DIR/negative-coherence-placeholder-region-constraints-on-unification.rs:4:11
|
LL | #![forbid(coherence_leak_check)]
| ^^^^^^^^^^^^^^^^^^^^
error: aborting due to previous error

View File

@ -0,0 +1,25 @@
// revisions: explicit implicit
//[implicit] check-pass
#![forbid(coherence_leak_check)]
#![feature(negative_impls, with_negative_coherence)]
pub trait Marker {}
#[cfg(implicit)]
impl<T: ?Sized> !Marker for &T {}
#[cfg(explicit)]
impl<'a, T: ?Sized + 'a> !Marker for &'a T {}
trait FnMarker {}
// Unifying these two impls below results in a `T: '!0` obligation
// that we shouldn't need to care about. Ideally, we'd treat that
// as an assumption when proving `&'!0 T: Marker`...
impl<T: ?Sized + Marker> FnMarker for fn(T) {}
impl<T: ?Sized> FnMarker for fn(&T) {}
//[explicit]~^ ERROR conflicting implementations of trait `FnMarker` for type `fn(&_)`
//[explicit]~| WARN this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
fn main() {}

View File

@ -62,61 +62,61 @@ error[E0080]: evaluation of constant value failed
--> $DIR/const-int-unchecked.rs:41:33 --> $DIR/const-int-unchecked.rs:41:33
| |
LL | const SHL_I8_NEG: i8 = unsafe { intrinsics::unchecked_shl(5_i8, -1) }; LL | const SHL_I8_NEG: i8 = unsafe { intrinsics::unchecked_shl(5_i8, -1) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by 255 in `unchecked_shl` | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -1 in `unchecked_shl`
error[E0080]: evaluation of constant value failed error[E0080]: evaluation of constant value failed
--> $DIR/const-int-unchecked.rs:43:35 --> $DIR/const-int-unchecked.rs:43:35
| |
LL | const SHL_I16_NEG: i16 = unsafe { intrinsics::unchecked_shl(5_16, -1) }; LL | const SHL_I16_NEG: i16 = unsafe { intrinsics::unchecked_shl(5_16, -1) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by 65535 in `unchecked_shl` | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -1 in `unchecked_shl`
error[E0080]: evaluation of constant value failed error[E0080]: evaluation of constant value failed
--> $DIR/const-int-unchecked.rs:45:35 --> $DIR/const-int-unchecked.rs:45:35
| |
LL | const SHL_I32_NEG: i32 = unsafe { intrinsics::unchecked_shl(5_i32, -1) }; LL | const SHL_I32_NEG: i32 = unsafe { intrinsics::unchecked_shl(5_i32, -1) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by 4294967295 in `unchecked_shl` | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -1 in `unchecked_shl`
error[E0080]: evaluation of constant value failed error[E0080]: evaluation of constant value failed
--> $DIR/const-int-unchecked.rs:47:35 --> $DIR/const-int-unchecked.rs:47:35
| |
LL | const SHL_I64_NEG: i64 = unsafe { intrinsics::unchecked_shl(5_i64, -1) }; LL | const SHL_I64_NEG: i64 = unsafe { intrinsics::unchecked_shl(5_i64, -1) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by 18446744073709551615 in `unchecked_shl` | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -1 in `unchecked_shl`
error[E0080]: evaluation of constant value failed error[E0080]: evaluation of constant value failed
--> $DIR/const-int-unchecked.rs:49:37 --> $DIR/const-int-unchecked.rs:49:37
| |
LL | const SHL_I128_NEG: i128 = unsafe { intrinsics::unchecked_shl(5_i128, -1) }; LL | const SHL_I128_NEG: i128 = unsafe { intrinsics::unchecked_shl(5_i128, -1) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by 340282366920938463463374607431768211455 in `unchecked_shl` | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -1 in `unchecked_shl`
error[E0080]: evaluation of constant value failed error[E0080]: evaluation of constant value failed
--> $DIR/const-int-unchecked.rs:55:40 --> $DIR/const-int-unchecked.rs:55:40
| |
LL | const SHL_I8_NEG_RANDOM: i8 = unsafe { intrinsics::unchecked_shl(5_i8, -6) }; LL | const SHL_I8_NEG_RANDOM: i8 = unsafe { intrinsics::unchecked_shl(5_i8, -6) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by 250 in `unchecked_shl` | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -6 in `unchecked_shl`
error[E0080]: evaluation of constant value failed error[E0080]: evaluation of constant value failed
--> $DIR/const-int-unchecked.rs:57:42 --> $DIR/const-int-unchecked.rs:57:42
| |
LL | const SHL_I16_NEG_RANDOM: i16 = unsafe { intrinsics::unchecked_shl(5_16, -13) }; LL | const SHL_I16_NEG_RANDOM: i16 = unsafe { intrinsics::unchecked_shl(5_16, -13) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by 65523 in `unchecked_shl` | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -13 in `unchecked_shl`
error[E0080]: evaluation of constant value failed error[E0080]: evaluation of constant value failed
--> $DIR/const-int-unchecked.rs:59:42 --> $DIR/const-int-unchecked.rs:59:42
| |
LL | const SHL_I32_NEG_RANDOM: i32 = unsafe { intrinsics::unchecked_shl(5_i32, -25) }; LL | const SHL_I32_NEG_RANDOM: i32 = unsafe { intrinsics::unchecked_shl(5_i32, -25) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by 4294967271 in `unchecked_shl` | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -25 in `unchecked_shl`
error[E0080]: evaluation of constant value failed error[E0080]: evaluation of constant value failed
--> $DIR/const-int-unchecked.rs:61:42 --> $DIR/const-int-unchecked.rs:61:42
| |
LL | const SHL_I64_NEG_RANDOM: i64 = unsafe { intrinsics::unchecked_shl(5_i64, -30) }; LL | const SHL_I64_NEG_RANDOM: i64 = unsafe { intrinsics::unchecked_shl(5_i64, -30) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by 18446744073709551586 in `unchecked_shl` | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -30 in `unchecked_shl`
error[E0080]: evaluation of constant value failed error[E0080]: evaluation of constant value failed
--> $DIR/const-int-unchecked.rs:63:44 --> $DIR/const-int-unchecked.rs:63:44
| |
LL | const SHL_I128_NEG_RANDOM: i128 = unsafe { intrinsics::unchecked_shl(5_i128, -93) }; LL | const SHL_I128_NEG_RANDOM: i128 = unsafe { intrinsics::unchecked_shl(5_i128, -93) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by 340282366920938463463374607431768211363 in `unchecked_shl` | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -93 in `unchecked_shl`
error[E0080]: evaluation of constant value failed error[E0080]: evaluation of constant value failed
--> $DIR/const-int-unchecked.rs:70:29 --> $DIR/const-int-unchecked.rs:70:29
@ -182,61 +182,61 @@ error[E0080]: evaluation of constant value failed
--> $DIR/const-int-unchecked.rs:96:33 --> $DIR/const-int-unchecked.rs:96:33
| |
LL | const SHR_I8_NEG: i8 = unsafe { intrinsics::unchecked_shr(5_i8, -1) }; LL | const SHR_I8_NEG: i8 = unsafe { intrinsics::unchecked_shr(5_i8, -1) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by 255 in `unchecked_shr` | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -1 in `unchecked_shr`
error[E0080]: evaluation of constant value failed error[E0080]: evaluation of constant value failed
--> $DIR/const-int-unchecked.rs:98:35 --> $DIR/const-int-unchecked.rs:98:35
| |
LL | const SHR_I16_NEG: i16 = unsafe { intrinsics::unchecked_shr(5_16, -1) }; LL | const SHR_I16_NEG: i16 = unsafe { intrinsics::unchecked_shr(5_16, -1) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by 65535 in `unchecked_shr` | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -1 in `unchecked_shr`
error[E0080]: evaluation of constant value failed error[E0080]: evaluation of constant value failed
--> $DIR/const-int-unchecked.rs:100:35 --> $DIR/const-int-unchecked.rs:100:35
| |
LL | const SHR_I32_NEG: i32 = unsafe { intrinsics::unchecked_shr(5_i32, -1) }; LL | const SHR_I32_NEG: i32 = unsafe { intrinsics::unchecked_shr(5_i32, -1) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by 4294967295 in `unchecked_shr` | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -1 in `unchecked_shr`
error[E0080]: evaluation of constant value failed error[E0080]: evaluation of constant value failed
--> $DIR/const-int-unchecked.rs:102:35 --> $DIR/const-int-unchecked.rs:102:35
| |
LL | const SHR_I64_NEG: i64 = unsafe { intrinsics::unchecked_shr(5_i64, -1) }; LL | const SHR_I64_NEG: i64 = unsafe { intrinsics::unchecked_shr(5_i64, -1) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by 18446744073709551615 in `unchecked_shr` | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -1 in `unchecked_shr`
error[E0080]: evaluation of constant value failed error[E0080]: evaluation of constant value failed
--> $DIR/const-int-unchecked.rs:104:37 --> $DIR/const-int-unchecked.rs:104:37
| |
LL | const SHR_I128_NEG: i128 = unsafe { intrinsics::unchecked_shr(5_i128, -1) }; LL | const SHR_I128_NEG: i128 = unsafe { intrinsics::unchecked_shr(5_i128, -1) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by 340282366920938463463374607431768211455 in `unchecked_shr` | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -1 in `unchecked_shr`
error[E0080]: evaluation of constant value failed error[E0080]: evaluation of constant value failed
--> $DIR/const-int-unchecked.rs:110:40 --> $DIR/const-int-unchecked.rs:110:40
| |
LL | const SHR_I8_NEG_RANDOM: i8 = unsafe { intrinsics::unchecked_shr(5_i8, -6) }; LL | const SHR_I8_NEG_RANDOM: i8 = unsafe { intrinsics::unchecked_shr(5_i8, -6) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by 250 in `unchecked_shr` | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -6 in `unchecked_shr`
error[E0080]: evaluation of constant value failed error[E0080]: evaluation of constant value failed
--> $DIR/const-int-unchecked.rs:112:42 --> $DIR/const-int-unchecked.rs:112:42
| |
LL | const SHR_I16_NEG_RANDOM: i16 = unsafe { intrinsics::unchecked_shr(5_16, -13) }; LL | const SHR_I16_NEG_RANDOM: i16 = unsafe { intrinsics::unchecked_shr(5_16, -13) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by 65523 in `unchecked_shr` | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -13 in `unchecked_shr`
error[E0080]: evaluation of constant value failed error[E0080]: evaluation of constant value failed
--> $DIR/const-int-unchecked.rs:114:42 --> $DIR/const-int-unchecked.rs:114:42
| |
LL | const SHR_I32_NEG_RANDOM: i32 = unsafe { intrinsics::unchecked_shr(5_i32, -25) }; LL | const SHR_I32_NEG_RANDOM: i32 = unsafe { intrinsics::unchecked_shr(5_i32, -25) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by 4294967271 in `unchecked_shr` | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -25 in `unchecked_shr`
error[E0080]: evaluation of constant value failed error[E0080]: evaluation of constant value failed
--> $DIR/const-int-unchecked.rs:116:42 --> $DIR/const-int-unchecked.rs:116:42
| |
LL | const SHR_I64_NEG_RANDOM: i64 = unsafe { intrinsics::unchecked_shr(5_i64, -30) }; LL | const SHR_I64_NEG_RANDOM: i64 = unsafe { intrinsics::unchecked_shr(5_i64, -30) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by 18446744073709551586 in `unchecked_shr` | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -30 in `unchecked_shr`
error[E0080]: evaluation of constant value failed error[E0080]: evaluation of constant value failed
--> $DIR/const-int-unchecked.rs:118:44 --> $DIR/const-int-unchecked.rs:118:44
| |
LL | const SHR_I128_NEG_RANDOM: i128 = unsafe { intrinsics::unchecked_shr(5_i128, -93) }; LL | const SHR_I128_NEG_RANDOM: i128 = unsafe { intrinsics::unchecked_shr(5_i128, -93) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by 340282366920938463463374607431768211363 in `unchecked_shr` | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ overflowing shift by -93 in `unchecked_shr`
error[E0080]: evaluation of constant value failed error[E0080]: evaluation of constant value failed
--> $DIR/const-int-unchecked.rs:123:25 --> $DIR/const-int-unchecked.rs:123:25

View File

@ -0,0 +1,19 @@
#![feature(stmt_expr_attributes)]
fn foo() -> String {
#[cfg(feature = "validation")]
[1, 2, 3].iter().map(|c| c.to_string()).collect::<String>() //~ ERROR expected `;`, found `#`
#[cfg(not(feature = "validation"))]
String::new()
}
fn bar() -> String {
#[attr]
[1, 2, 3].iter().map(|c| c.to_string()).collect::<String>() //~ ERROR expected `;`, found `#`
#[attr] //~ ERROR cannot find attribute `attr` in this scope
String::new()
}
fn main() {
println!("{}", foo());
}

View File

@ -0,0 +1,54 @@
error: expected `;`, found `#`
--> $DIR/multiple-tail-expr-behind-cfg.rs:5:64
|
LL | #[cfg(feature = "validation")]
| ------------------------------ only `;` terminated statements or tail expressions are allowed after this attribute
LL | [1, 2, 3].iter().map(|c| c.to_string()).collect::<String>()
| ^ expected `;` here
LL | #[cfg(not(feature = "validation"))]
| - unexpected token
|
help: add `;` here
|
LL | [1, 2, 3].iter().map(|c| c.to_string()).collect::<String>();
| +
help: alternatively, consider surrounding the expression with a block
|
LL | { [1, 2, 3].iter().map(|c| c.to_string()).collect::<String>() }
| + +
help: it seems like you are trying to provide different expressions depending on `cfg`, consider using `if cfg!(..)`
|
LL ~ if cfg!(feature = "validation") {
LL ~ [1, 2, 3].iter().map(|c| c.to_string()).collect::<String>()
LL ~ } else if cfg!(not(feature = "validation")) {
LL ~ String::new()
LL + }
|
error: expected `;`, found `#`
--> $DIR/multiple-tail-expr-behind-cfg.rs:12:64
|
LL | #[attr]
| ------- only `;` terminated statements or tail expressions are allowed after this attribute
LL | [1, 2, 3].iter().map(|c| c.to_string()).collect::<String>()
| ^ expected `;` here
LL | #[attr]
| - unexpected token
|
help: add `;` here
|
LL | [1, 2, 3].iter().map(|c| c.to_string()).collect::<String>();
| +
help: alternatively, consider surrounding the expression with a block
|
LL | { [1, 2, 3].iter().map(|c| c.to_string()).collect::<String>() }
| + +
error: cannot find attribute `attr` in this scope
--> $DIR/multiple-tail-expr-behind-cfg.rs:13:7
|
LL | #[attr]
| ^^^^
error: aborting due to 3 previous errors

View File

@ -0,0 +1,9 @@
trait Trait {}
fn test(_: &for<'a> dyn Trait) {}
//~^ ERROR `for<...>` expected after `dyn`, not before
fn test2(_: for<'a> impl Trait) {}
//~^ ERROR `for<...>` expected after `impl`, not before
fn main() {}

View File

@ -0,0 +1,26 @@
error: `for<...>` expected after `dyn`, not before
--> $DIR/recover-hrtb-before-dyn-impl-kw.rs:3:21
|
LL | fn test(_: &for<'a> dyn Trait) {}
| ^^^
|
help: move `dyn` before the `for<...>`
|
LL - fn test(_: &for<'a> dyn Trait) {}
LL + fn test(_: &dyn for<'a> Trait) {}
|
error: `for<...>` expected after `impl`, not before
--> $DIR/recover-hrtb-before-dyn-impl-kw.rs:6:21
|
LL | fn test2(_: for<'a> impl Trait) {}
| ^^^^
|
help: move `impl` before the `for<...>`
|
LL - fn test2(_: for<'a> impl Trait) {}
LL + fn test2(_: impl for<'a> Trait) {}
|
error: aborting due to 2 previous errors

View File

@ -0,0 +1,26 @@
warning: the feature `non_lifetime_binders` is incomplete and may not be safe to use and/or cause compiler crashes
--> $DIR/placeholders-dont-outlive-static.rs:6:12
|
LL | #![feature(non_lifetime_binders)]
| ^^^^^^^^^^^^^^^^^^^^
|
= note: see issue #108185 <https://github.com/rust-lang/rust/issues/108185> for more information
= note: `#[warn(incomplete_features)]` on by default
error[E0310]: the placeholder type `!1_"T"` may not live long enough
--> $DIR/placeholders-dont-outlive-static.rs:13:5
|
LL | foo();
| ^^^^^
| |
| the placeholder type `!1_"T"` must be valid for the static lifetime...
| ...so that the type `T` will meet its required lifetime bounds
|
help: consider adding an explicit lifetime bound
|
LL | fn bad() where !1_"T": 'static {
| +++++++++++++++++++++
error: aborting due to previous error; 1 warning emitted
For more information about this error, try `rustc --explain E0310`.

View File

@ -0,0 +1,26 @@
warning: the feature `non_lifetime_binders` is incomplete and may not be safe to use and/or cause compiler crashes
--> $DIR/placeholders-dont-outlive-static.rs:6:12
|
LL | #![feature(non_lifetime_binders)]
| ^^^^^^^^^^^^^^^^^^^^
|
= note: see issue #108185 <https://github.com/rust-lang/rust/issues/108185> for more information
= note: `#[warn(incomplete_features)]` on by default
error[E0310]: the placeholder type `!1_"T"` may not live long enough
--> $DIR/placeholders-dont-outlive-static.rs:19:5
|
LL | foo();
| ^^^^^
| |
| the placeholder type `!1_"T"` must be valid for the static lifetime...
| ...so that the type `T` will meet its required lifetime bounds
|
help: consider adding an explicit lifetime bound
|
LL | fn good() where for<T> T: 'static, !1_"T": 'static {
| +++++++++++++++++
error: aborting due to previous error; 1 warning emitted
For more information about this error, try `rustc --explain E0310`.

View File

@ -0,0 +1,22 @@
// revisions: good bad
//[good] known-bug: unknown
// `for<T> T: 'static` doesn't imply itself when processing outlives obligations
#![feature(non_lifetime_binders)]
//[bad]~^ WARN the feature `non_lifetime_binders` is incomplete
fn foo() where for<T> T: 'static {}
#[cfg(bad)]
fn bad() {
foo();
//[bad]~^ ERROR the placeholder type `!1_"T"` may not live long enough
}
#[cfg(good)]
fn good() where for<T> T: 'static {
foo();
}
fn main() {}

View File

@ -1,10 +1,9 @@
// edition:2021 // edition:2021
// check-pass // known-bug: unknown
// Checks that test_type_match code doesn't ICE when predicates have late-bound types // Checks that test_type_match code doesn't ICE when predicates have late-bound types
#![feature(non_lifetime_binders)] #![feature(non_lifetime_binders)]
//~^ WARN is incomplete and may not be safe to use
async fn walk2<'a, T: 'a>(_: T) async fn walk2<'a, T: 'a>(_: T)
where where

View File

@ -7,5 +7,20 @@ LL | #![feature(non_lifetime_binders)]
= note: see issue #108185 <https://github.com/rust-lang/rust/issues/108185> for more information = note: see issue #108185 <https://github.com/rust-lang/rust/issues/108185> for more information
= note: `#[warn(incomplete_features)]` on by default = note: `#[warn(incomplete_features)]` on by default
warning: 1 warning emitted error[E0309]: the placeholder type `!1_"F"` may not live long enough
--> $DIR/type-match-with-late-bound.rs:11:1
|
LL | async fn walk2<'a, T: 'a>(_: T)
| -- the placeholder type `!1_"F"` must be valid for the lifetime `'a` as defined here...
...
LL | {}
| ^^ ...so that the type `F` will meet its required lifetime bounds
|
help: consider adding an explicit lifetime bound
|
LL | for<F> F: 'a, !1_"F": 'a
| ~~~~~~~~~~~~
error: aborting due to previous error; 1 warning emitted
For more information about this error, try `rustc --explain E0309`.