Auto merge of #130615 - GuillaumeGomez:rollup-tq0ff7y, r=GuillaumeGomez
Rollup of 6 pull requests Successful merges: - #129542 (Add regression test for #129541) - #129755 (test: cross-edition metavar fragment specifiers) - #130566 (Break up compiletest `runtest.rs` into smaller helper modules) - #130585 (Add tidy check for rustdoc templates to ensure the whitespace characters are all stripped) - #130605 (Fix feature name in test) - #130607 ([Clippy] Remove final std paths for diagnostic item) r? `@ghost` `@rustbot` modify labels: rollup
This commit is contained in:
commit
fb46739014
@ -309,6 +309,7 @@
|
||||
RwLockReadGuard,
|
||||
RwLockWriteGuard,
|
||||
Saturating,
|
||||
SeekFrom,
|
||||
Send,
|
||||
SeqCst,
|
||||
Sized,
|
||||
|
@ -2058,6 +2058,7 @@ fn seek_relative(&mut self, offset: i64) -> Result<()> {
|
||||
/// It is used by the [`Seek`] trait.
|
||||
#[derive(Copy, PartialEq, Eq, Clone, Debug)]
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[cfg_attr(not(test), rustc_diagnostic_item = "SeekFrom")]
|
||||
pub enum SeekFrom {
|
||||
/// Sets the offset to the provided number of bytes.
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
|
@ -8,7 +8,7 @@
|
||||
use clippy_utils::diagnostics::span_lint_and_sugg;
|
||||
use clippy_utils::source::snippet_with_applicability;
|
||||
use clippy_utils::ty::implements_trait;
|
||||
use clippy_utils::{match_def_path, paths};
|
||||
use clippy_utils::is_enum_variant_ctor;
|
||||
|
||||
use super::SEEK_FROM_CURRENT;
|
||||
|
||||
@ -36,8 +36,8 @@ pub(super) fn check<'tcx>(cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>, recv: &'
|
||||
fn arg_is_seek_from_current<'tcx>(cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) -> bool {
|
||||
if let ExprKind::Call(f, args) = expr.kind
|
||||
&& let ExprKind::Path(ref path) = f.kind
|
||||
&& let Some(def_id) = cx.qpath_res(path, f.hir_id).opt_def_id()
|
||||
&& match_def_path(cx, def_id, &paths::STD_IO_SEEK_FROM_CURRENT)
|
||||
&& let Some(ctor_call_id) = cx.qpath_res(path, f.hir_id).opt_def_id()
|
||||
&& is_enum_variant_ctor(cx, sym::SeekFrom, sym!(Current), ctor_call_id)
|
||||
{
|
||||
// check if argument of `SeekFrom::Current` is `0`
|
||||
if args.len() == 1
|
||||
|
@ -1,6 +1,6 @@
|
||||
use clippy_utils::diagnostics::span_lint_and_then;
|
||||
use clippy_utils::ty::implements_trait;
|
||||
use clippy_utils::{is_expr_used_or_unified, match_def_path, paths};
|
||||
use clippy_utils::{is_expr_used_or_unified, is_enum_variant_ctor};
|
||||
use rustc_ast::ast::{LitIntType, LitKind};
|
||||
use rustc_data_structures::packed::Pu128;
|
||||
use rustc_errors::Applicability;
|
||||
@ -28,8 +28,8 @@ pub(super) fn check<'tcx>(
|
||||
&& implements_trait(cx, ty, seek_trait_id, &[])
|
||||
&& let ExprKind::Call(func, args1) = arg.kind
|
||||
&& let ExprKind::Path(ref path) = func.kind
|
||||
&& let Some(def_id) = cx.qpath_res(path, func.hir_id).opt_def_id()
|
||||
&& match_def_path(cx, def_id, &paths::STD_IO_SEEKFROM_START)
|
||||
&& let Some(ctor_call_id) = cx.qpath_res(path, func.hir_id).opt_def_id()
|
||||
&& is_enum_variant_ctor(cx, sym::SeekFrom, sym!(Start), ctor_call_id)
|
||||
&& args1.len() == 1
|
||||
&& let ExprKind::Lit(lit) = args1[0].kind
|
||||
&& let LitKind::Int(Pu128(0), LitIntType::Unsuffixed) = lit.node
|
||||
|
@ -263,24 +263,18 @@ pub fn is_res_lang_ctor(cx: &LateContext<'_>, res: Res, lang_item: LangItem) ->
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_res_diagnostic_ctor(cx: &LateContext<'_>, res: Res, diag_item: Symbol) -> bool {
|
||||
if let Res::Def(DefKind::Ctor(..), id) = res
|
||||
&& let Some(id) = cx.tcx.opt_parent(id)
|
||||
{
|
||||
cx.tcx.is_diagnostic_item(diag_item, id)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if a `QPath` resolves to a constructor of a diagnostic item.
|
||||
pub fn is_diagnostic_ctor(cx: &LateContext<'_>, qpath: &QPath<'_>, diagnostic_item: Symbol) -> bool {
|
||||
if let QPath::Resolved(_, path) = qpath {
|
||||
if let Res::Def(DefKind::Ctor(..), ctor_id) = path.res {
|
||||
return cx.tcx.is_diagnostic_item(diagnostic_item, cx.tcx.parent(ctor_id));
|
||||
}
|
||||
}
|
||||
false
|
||||
/// Checks if `{ctor_call_id}(...)` is `{enum_item}::{variant_name}(...)`.
|
||||
pub fn is_enum_variant_ctor(cx: &LateContext<'_>, enum_item: Symbol, variant_name: Symbol, ctor_call_id: DefId) -> bool {
|
||||
let Some(enum_def_id) = cx.tcx.get_diagnostic_item(enum_item) else {
|
||||
return false;
|
||||
};
|
||||
|
||||
let variants = cx.tcx.adt_def(enum_def_id).variants().iter();
|
||||
variants
|
||||
.filter(|variant| variant.name == variant_name)
|
||||
.filter_map(|variant| variant.ctor.as_ref())
|
||||
.any(|(_, ctor_def_id)| *ctor_def_id == ctor_call_id)
|
||||
}
|
||||
|
||||
/// Checks if the `DefId` matches the given diagnostic item or it's constructor.
|
||||
|
@ -29,8 +29,7 @@
|
||||
pub const SYNTAX_CONTEXT: [&str; 3] = ["rustc_span", "hygiene", "SyntaxContext"];
|
||||
|
||||
// Paths in `core`/`alloc`/`std`. This should be avoided and cleaned up by adding diagnostic items.
|
||||
pub const STD_IO_SEEK_FROM_CURRENT: [&str; 4] = ["std", "io", "SeekFrom", "Current"];
|
||||
pub const STD_IO_SEEKFROM_START: [&str; 4] = ["std", "io", "SeekFrom", "Start"];
|
||||
// ... none currently!
|
||||
|
||||
// Paths in clippy itself
|
||||
pub const MSRV: [&str; 3] = ["clippy_config", "msrvs", "Msrv"];
|
||||
|
File diff suppressed because it is too large
Load Diff
19
src/tools/compiletest/src/runtest/assembly.rs
Normal file
19
src/tools/compiletest/src/runtest/assembly.rs
Normal file
@ -0,0 +1,19 @@
|
||||
use super::TestCx;
|
||||
|
||||
impl TestCx<'_> {
|
||||
pub(super) fn run_assembly_test(&self) {
|
||||
if self.config.llvm_filecheck.is_none() {
|
||||
self.fatal("missing --llvm-filecheck");
|
||||
}
|
||||
|
||||
let (proc_res, output_path) = self.compile_test_and_save_assembly();
|
||||
if !proc_res.status.success() {
|
||||
self.fatal_proc_rec("compilation failed!", &proc_res);
|
||||
}
|
||||
|
||||
let proc_res = self.verify_with_filecheck(&output_path);
|
||||
if !proc_res.status.success() {
|
||||
self.fatal_proc_rec("verification with 'FileCheck' failed", &proc_res);
|
||||
}
|
||||
}
|
||||
}
|
22
src/tools/compiletest/src/runtest/codegen.rs
Normal file
22
src/tools/compiletest/src/runtest/codegen.rs
Normal file
@ -0,0 +1,22 @@
|
||||
use super::{PassMode, TestCx};
|
||||
|
||||
impl TestCx<'_> {
|
||||
pub(super) fn run_codegen_test(&self) {
|
||||
if self.config.llvm_filecheck.is_none() {
|
||||
self.fatal("missing --llvm-filecheck");
|
||||
}
|
||||
|
||||
let (proc_res, output_path) = self.compile_test_and_save_ir();
|
||||
if !proc_res.status.success() {
|
||||
self.fatal_proc_rec("compilation failed!", &proc_res);
|
||||
}
|
||||
|
||||
if let Some(PassMode::Build) = self.pass_mode() {
|
||||
return;
|
||||
}
|
||||
let proc_res = self.verify_with_filecheck(&output_path);
|
||||
if !proc_res.status.success() {
|
||||
self.fatal_proc_rec("verification with 'FileCheck' failed", &proc_res);
|
||||
}
|
||||
}
|
||||
}
|
191
src/tools/compiletest/src/runtest/codegen_units.rs
Normal file
191
src/tools/compiletest/src/runtest/codegen_units.rs
Normal file
@ -0,0 +1,191 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use super::{Emit, TestCx, WillExecute};
|
||||
use crate::errors;
|
||||
use crate::util::static_regex;
|
||||
|
||||
impl TestCx<'_> {
|
||||
pub(super) fn run_codegen_units_test(&self) {
|
||||
assert!(self.revision.is_none(), "revisions not relevant here");
|
||||
|
||||
let proc_res = self.compile_test(WillExecute::No, Emit::None);
|
||||
|
||||
if !proc_res.status.success() {
|
||||
self.fatal_proc_rec("compilation failed!", &proc_res);
|
||||
}
|
||||
|
||||
self.check_no_compiler_crash(&proc_res, self.props.should_ice);
|
||||
|
||||
const PREFIX: &str = "MONO_ITEM ";
|
||||
const CGU_MARKER: &str = "@@";
|
||||
|
||||
// Some MonoItems can contain {closure@/path/to/checkout/tests/codgen-units/test.rs}
|
||||
// To prevent the current dir from leaking, we just replace the entire path to the test
|
||||
// file with TEST_PATH.
|
||||
let actual: Vec<MonoItem> = proc_res
|
||||
.stdout
|
||||
.lines()
|
||||
.filter(|line| line.starts_with(PREFIX))
|
||||
.map(|line| {
|
||||
line.replace(&self.testpaths.file.display().to_string(), "TEST_PATH").to_string()
|
||||
})
|
||||
.map(|line| str_to_mono_item(&line, true))
|
||||
.collect();
|
||||
|
||||
let expected: Vec<MonoItem> = errors::load_errors(&self.testpaths.file, None)
|
||||
.iter()
|
||||
.map(|e| str_to_mono_item(&e.msg[..], false))
|
||||
.collect();
|
||||
|
||||
let mut missing = Vec::new();
|
||||
let mut wrong_cgus = Vec::new();
|
||||
|
||||
for expected_item in &expected {
|
||||
let actual_item_with_same_name = actual.iter().find(|ti| ti.name == expected_item.name);
|
||||
|
||||
if let Some(actual_item) = actual_item_with_same_name {
|
||||
if !expected_item.codegen_units.is_empty() &&
|
||||
// Also check for codegen units
|
||||
expected_item.codegen_units != actual_item.codegen_units
|
||||
{
|
||||
wrong_cgus.push((expected_item.clone(), actual_item.clone()));
|
||||
}
|
||||
} else {
|
||||
missing.push(expected_item.string.clone());
|
||||
}
|
||||
}
|
||||
|
||||
let unexpected: Vec<_> = actual
|
||||
.iter()
|
||||
.filter(|acgu| !expected.iter().any(|ecgu| acgu.name == ecgu.name))
|
||||
.map(|acgu| acgu.string.clone())
|
||||
.collect();
|
||||
|
||||
if !missing.is_empty() {
|
||||
missing.sort();
|
||||
|
||||
println!("\nThese items should have been contained but were not:\n");
|
||||
|
||||
for item in &missing {
|
||||
println!("{}", item);
|
||||
}
|
||||
|
||||
println!("\n");
|
||||
}
|
||||
|
||||
if !unexpected.is_empty() {
|
||||
let sorted = {
|
||||
let mut sorted = unexpected.clone();
|
||||
sorted.sort();
|
||||
sorted
|
||||
};
|
||||
|
||||
println!("\nThese items were contained but should not have been:\n");
|
||||
|
||||
for item in sorted {
|
||||
println!("{}", item);
|
||||
}
|
||||
|
||||
println!("\n");
|
||||
}
|
||||
|
||||
if !wrong_cgus.is_empty() {
|
||||
wrong_cgus.sort_by_key(|pair| pair.0.name.clone());
|
||||
println!("\nThe following items were assigned to wrong codegen units:\n");
|
||||
|
||||
for &(ref expected_item, ref actual_item) in &wrong_cgus {
|
||||
println!("{}", expected_item.name);
|
||||
println!(" expected: {}", codegen_units_to_str(&expected_item.codegen_units));
|
||||
println!(" actual: {}", codegen_units_to_str(&actual_item.codegen_units));
|
||||
println!();
|
||||
}
|
||||
}
|
||||
|
||||
if !(missing.is_empty() && unexpected.is_empty() && wrong_cgus.is_empty()) {
|
||||
panic!();
|
||||
}
|
||||
|
||||
#[derive(Clone, Eq, PartialEq)]
|
||||
struct MonoItem {
|
||||
name: String,
|
||||
codegen_units: HashSet<String>,
|
||||
string: String,
|
||||
}
|
||||
|
||||
// [MONO_ITEM] name [@@ (cgu)+]
|
||||
fn str_to_mono_item(s: &str, cgu_has_crate_disambiguator: bool) -> MonoItem {
|
||||
let s = if s.starts_with(PREFIX) { (&s[PREFIX.len()..]).trim() } else { s.trim() };
|
||||
|
||||
let full_string = format!("{}{}", PREFIX, s);
|
||||
|
||||
let parts: Vec<&str> =
|
||||
s.split(CGU_MARKER).map(str::trim).filter(|s| !s.is_empty()).collect();
|
||||
|
||||
let name = parts[0].trim();
|
||||
|
||||
let cgus = if parts.len() > 1 {
|
||||
let cgus_str = parts[1];
|
||||
|
||||
cgus_str
|
||||
.split(' ')
|
||||
.map(str::trim)
|
||||
.filter(|s| !s.is_empty())
|
||||
.map(|s| {
|
||||
if cgu_has_crate_disambiguator {
|
||||
remove_crate_disambiguators_from_set_of_cgu_names(s)
|
||||
} else {
|
||||
s.to_string()
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
} else {
|
||||
HashSet::new()
|
||||
};
|
||||
|
||||
MonoItem { name: name.to_owned(), codegen_units: cgus, string: full_string }
|
||||
}
|
||||
|
||||
fn codegen_units_to_str(cgus: &HashSet<String>) -> String {
|
||||
let mut cgus: Vec<_> = cgus.iter().collect();
|
||||
cgus.sort();
|
||||
|
||||
let mut string = String::new();
|
||||
for cgu in cgus {
|
||||
string.push_str(&cgu[..]);
|
||||
string.push(' ');
|
||||
}
|
||||
|
||||
string
|
||||
}
|
||||
|
||||
// Given a cgu-name-prefix of the form <crate-name>.<crate-disambiguator> or
|
||||
// the form <crate-name1>.<crate-disambiguator1>-in-<crate-name2>.<crate-disambiguator2>,
|
||||
// remove all crate-disambiguators.
|
||||
fn remove_crate_disambiguator_from_cgu(cgu: &str) -> String {
|
||||
let Some(captures) =
|
||||
static_regex!(r"^[^\.]+(?P<d1>\.[[:alnum:]]+)(-in-[^\.]+(?P<d2>\.[[:alnum:]]+))?")
|
||||
.captures(cgu)
|
||||
else {
|
||||
panic!("invalid cgu name encountered: {cgu}");
|
||||
};
|
||||
|
||||
let mut new_name = cgu.to_owned();
|
||||
|
||||
if let Some(d2) = captures.name("d2") {
|
||||
new_name.replace_range(d2.start()..d2.end(), "");
|
||||
}
|
||||
|
||||
let d1 = captures.name("d1").unwrap();
|
||||
new_name.replace_range(d1.start()..d1.end(), "");
|
||||
|
||||
new_name
|
||||
}
|
||||
|
||||
// The name of merged CGUs is constructed as the names of the original
|
||||
// CGUs joined with "--". This function splits such composite CGU names
|
||||
// and handles each component individually.
|
||||
fn remove_crate_disambiguators_from_set_of_cgu_names(cgus: &str) -> String {
|
||||
cgus.split("--").map(remove_crate_disambiguator_from_cgu).collect::<Vec<_>>().join("--")
|
||||
}
|
||||
}
|
||||
}
|
@ -18,7 +18,7 @@ fn coverage_dump_path(&self) -> &Path {
|
||||
.unwrap_or_else(|| self.fatal("missing --coverage-dump"))
|
||||
}
|
||||
|
||||
pub(crate) fn run_coverage_map_test(&self) {
|
||||
pub(super) fn run_coverage_map_test(&self) {
|
||||
let coverage_dump_path = self.coverage_dump_path();
|
||||
|
||||
let (proc_res, llvm_ir_path) = self.compile_test_and_save_ir();
|
||||
@ -50,7 +50,7 @@ pub(crate) fn run_coverage_map_test(&self) {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn run_coverage_run_test(&self) {
|
||||
pub(super) fn run_coverage_run_test(&self) {
|
||||
let should_run = self.run_if_enabled();
|
||||
let proc_res = self.compile_test(should_run, Emit::None);
|
||||
|
||||
|
25
src/tools/compiletest/src/runtest/crash.rs
Normal file
25
src/tools/compiletest/src/runtest/crash.rs
Normal file
@ -0,0 +1,25 @@
|
||||
use super::{TestCx, WillExecute};
|
||||
|
||||
impl TestCx<'_> {
|
||||
pub(super) fn run_crash_test(&self) {
|
||||
let pm = self.pass_mode();
|
||||
let proc_res = self.compile_test(WillExecute::No, self.should_emit_metadata(pm));
|
||||
|
||||
if std::env::var("COMPILETEST_VERBOSE_CRASHES").is_ok() {
|
||||
eprintln!("{}", proc_res.status);
|
||||
eprintln!("{}", proc_res.stdout);
|
||||
eprintln!("{}", proc_res.stderr);
|
||||
eprintln!("{}", proc_res.cmdline);
|
||||
}
|
||||
|
||||
// if a test does not crash, consider it an error
|
||||
if proc_res.status.success() || matches!(proc_res.status.code(), Some(1 | 0)) {
|
||||
self.fatal(&format!(
|
||||
"crashtest no longer crashes/triggers ICE, horray! Please give it a meaningful name, \
|
||||
add a doc-comment to the start of the test explaining why it exists and \
|
||||
move it to tests/ui or wherever you see fit. Adding 'Fixes #<issueNr>' to your PR description \
|
||||
ensures that the corresponding ticket is auto-closed upon merge."
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
509
src/tools/compiletest/src/runtest/debuginfo.rs
Normal file
509
src/tools/compiletest/src/runtest/debuginfo.rs
Normal file
@ -0,0 +1,509 @@
|
||||
use std::ffi::{OsStr, OsString};
|
||||
use std::fs::File;
|
||||
use std::io::{BufRead, BufReader, Read};
|
||||
use std::path::Path;
|
||||
use std::process::{Command, Output, Stdio};
|
||||
|
||||
use tracing::debug;
|
||||
|
||||
use super::debugger::DebuggerCommands;
|
||||
use super::{Debugger, Emit, ProcRes, TestCx, Truncated, WillExecute};
|
||||
use crate::common::Config;
|
||||
use crate::util::logv;
|
||||
use crate::{extract_gdb_version, is_android_gdb_target};
|
||||
|
||||
impl TestCx<'_> {
|
||||
pub(super) fn run_debuginfo_test(&self) {
|
||||
match self.config.debugger.unwrap() {
|
||||
Debugger::Cdb => self.run_debuginfo_cdb_test(),
|
||||
Debugger::Gdb => self.run_debuginfo_gdb_test(),
|
||||
Debugger::Lldb => self.run_debuginfo_lldb_test(),
|
||||
}
|
||||
}
|
||||
|
||||
fn run_debuginfo_cdb_test(&self) {
|
||||
let config = Config {
|
||||
target_rustcflags: self.cleanup_debug_info_options(&self.config.target_rustcflags),
|
||||
host_rustcflags: self.cleanup_debug_info_options(&self.config.host_rustcflags),
|
||||
..self.config.clone()
|
||||
};
|
||||
|
||||
let test_cx = TestCx { config: &config, ..*self };
|
||||
|
||||
test_cx.run_debuginfo_cdb_test_no_opt();
|
||||
}
|
||||
|
||||
fn run_debuginfo_cdb_test_no_opt(&self) {
|
||||
let exe_file = self.make_exe_name();
|
||||
|
||||
// Existing PDB files are update in-place. When changing the debuginfo
|
||||
// the compiler generates for something, this can lead to the situation
|
||||
// where both the old and the new version of the debuginfo for the same
|
||||
// type is present in the PDB, which is very confusing.
|
||||
// Therefore we delete any existing PDB file before compiling the test
|
||||
// case.
|
||||
// FIXME: If can reliably detect that MSVC's link.exe is used, then
|
||||
// passing `/INCREMENTAL:NO` might be a cleaner way to do this.
|
||||
let pdb_file = exe_file.with_extension(".pdb");
|
||||
if pdb_file.exists() {
|
||||
std::fs::remove_file(pdb_file).unwrap();
|
||||
}
|
||||
|
||||
// compile test file (it should have 'compile-flags:-g' in the header)
|
||||
let should_run = self.run_if_enabled();
|
||||
let compile_result = self.compile_test(should_run, Emit::None);
|
||||
if !compile_result.status.success() {
|
||||
self.fatal_proc_rec("compilation failed!", &compile_result);
|
||||
}
|
||||
if let WillExecute::Disabled = should_run {
|
||||
return;
|
||||
}
|
||||
|
||||
let prefixes = {
|
||||
static PREFIXES: &[&str] = &["cdb", "cdbg"];
|
||||
// No "native rust support" variation for CDB yet.
|
||||
PREFIXES
|
||||
};
|
||||
|
||||
// Parse debugger commands etc from test files
|
||||
let dbg_cmds = DebuggerCommands::parse_from(
|
||||
&self.testpaths.file,
|
||||
self.config,
|
||||
prefixes,
|
||||
self.revision,
|
||||
)
|
||||
.unwrap_or_else(|e| self.fatal(&e));
|
||||
|
||||
// https://docs.microsoft.com/en-us/windows-hardware/drivers/debugger/debugger-commands
|
||||
let mut script_str = String::with_capacity(2048);
|
||||
script_str.push_str("version\n"); // List CDB (and more) version info in test output
|
||||
script_str.push_str(".nvlist\n"); // List loaded `*.natvis` files, bulk of custom MSVC debug
|
||||
|
||||
// If a .js file exists next to the source file being tested, then this is a JavaScript
|
||||
// debugging extension that needs to be loaded.
|
||||
let mut js_extension = self.testpaths.file.clone();
|
||||
js_extension.set_extension("cdb.js");
|
||||
if js_extension.exists() {
|
||||
script_str.push_str(&format!(".scriptload \"{}\"\n", js_extension.to_string_lossy()));
|
||||
}
|
||||
|
||||
// Set breakpoints on every line that contains the string "#break"
|
||||
let source_file_name = self.testpaths.file.file_name().unwrap().to_string_lossy();
|
||||
for line in &dbg_cmds.breakpoint_lines {
|
||||
script_str.push_str(&format!("bp `{}:{}`\n", source_file_name, line));
|
||||
}
|
||||
|
||||
// Append the other `cdb-command:`s
|
||||
for line in &dbg_cmds.commands {
|
||||
script_str.push_str(line);
|
||||
script_str.push('\n');
|
||||
}
|
||||
|
||||
script_str.push_str("qq\n"); // Quit the debugger (including remote debugger, if any)
|
||||
|
||||
// Write the script into a file
|
||||
debug!("script_str = {}", script_str);
|
||||
self.dump_output_file(&script_str, "debugger.script");
|
||||
let debugger_script = self.make_out_name("debugger.script");
|
||||
|
||||
let cdb_path = &self.config.cdb.as_ref().unwrap();
|
||||
let mut cdb = Command::new(cdb_path);
|
||||
cdb.arg("-lines") // Enable source line debugging.
|
||||
.arg("-cf")
|
||||
.arg(&debugger_script)
|
||||
.arg(&exe_file);
|
||||
|
||||
let debugger_run_result = self.compose_and_run(
|
||||
cdb,
|
||||
self.config.run_lib_path.to_str().unwrap(),
|
||||
None, // aux_path
|
||||
None, // input
|
||||
);
|
||||
|
||||
if !debugger_run_result.status.success() {
|
||||
self.fatal_proc_rec("Error while running CDB", &debugger_run_result);
|
||||
}
|
||||
|
||||
if let Err(e) = dbg_cmds.check_output(&debugger_run_result) {
|
||||
self.fatal_proc_rec(&e, &debugger_run_result);
|
||||
}
|
||||
}
|
||||
|
||||
fn run_debuginfo_gdb_test(&self) {
|
||||
let config = Config {
|
||||
target_rustcflags: self.cleanup_debug_info_options(&self.config.target_rustcflags),
|
||||
host_rustcflags: self.cleanup_debug_info_options(&self.config.host_rustcflags),
|
||||
..self.config.clone()
|
||||
};
|
||||
|
||||
let test_cx = TestCx { config: &config, ..*self };
|
||||
|
||||
test_cx.run_debuginfo_gdb_test_no_opt();
|
||||
}
|
||||
|
||||
fn run_debuginfo_gdb_test_no_opt(&self) {
|
||||
let dbg_cmds = DebuggerCommands::parse_from(
|
||||
&self.testpaths.file,
|
||||
self.config,
|
||||
&["gdb"],
|
||||
self.revision,
|
||||
)
|
||||
.unwrap_or_else(|e| self.fatal(&e));
|
||||
let mut cmds = dbg_cmds.commands.join("\n");
|
||||
|
||||
// compile test file (it should have 'compile-flags:-g' in the header)
|
||||
let should_run = self.run_if_enabled();
|
||||
let compiler_run_result = self.compile_test(should_run, Emit::None);
|
||||
if !compiler_run_result.status.success() {
|
||||
self.fatal_proc_rec("compilation failed!", &compiler_run_result);
|
||||
}
|
||||
if let WillExecute::Disabled = should_run {
|
||||
return;
|
||||
}
|
||||
|
||||
let exe_file = self.make_exe_name();
|
||||
|
||||
let debugger_run_result;
|
||||
if is_android_gdb_target(&self.config.target) {
|
||||
cmds = cmds.replace("run", "continue");
|
||||
|
||||
let tool_path = match self.config.android_cross_path.to_str() {
|
||||
Some(x) => x.to_owned(),
|
||||
None => self.fatal("cannot find android cross path"),
|
||||
};
|
||||
|
||||
// write debugger script
|
||||
let mut script_str = String::with_capacity(2048);
|
||||
script_str.push_str(&format!("set charset {}\n", Self::charset()));
|
||||
script_str.push_str(&format!("set sysroot {}\n", tool_path));
|
||||
script_str.push_str(&format!("file {}\n", exe_file.to_str().unwrap()));
|
||||
script_str.push_str("target remote :5039\n");
|
||||
script_str.push_str(&format!(
|
||||
"set solib-search-path \
|
||||
./{}/stage2/lib/rustlib/{}/lib/\n",
|
||||
self.config.host, self.config.target
|
||||
));
|
||||
for line in &dbg_cmds.breakpoint_lines {
|
||||
script_str.push_str(
|
||||
format!(
|
||||
"break {:?}:{}\n",
|
||||
self.testpaths.file.file_name().unwrap().to_string_lossy(),
|
||||
*line
|
||||
)
|
||||
.as_str(),
|
||||
);
|
||||
}
|
||||
script_str.push_str(&cmds);
|
||||
script_str.push_str("\nquit\n");
|
||||
|
||||
debug!("script_str = {}", script_str);
|
||||
self.dump_output_file(&script_str, "debugger.script");
|
||||
|
||||
let adb_path = &self.config.adb_path;
|
||||
|
||||
Command::new(adb_path)
|
||||
.arg("push")
|
||||
.arg(&exe_file)
|
||||
.arg(&self.config.adb_test_dir)
|
||||
.status()
|
||||
.unwrap_or_else(|e| panic!("failed to exec `{adb_path:?}`: {e:?}"));
|
||||
|
||||
Command::new(adb_path)
|
||||
.args(&["forward", "tcp:5039", "tcp:5039"])
|
||||
.status()
|
||||
.unwrap_or_else(|e| panic!("failed to exec `{adb_path:?}`: {e:?}"));
|
||||
|
||||
let adb_arg = format!(
|
||||
"export LD_LIBRARY_PATH={}; \
|
||||
gdbserver{} :5039 {}/{}",
|
||||
self.config.adb_test_dir.clone(),
|
||||
if self.config.target.contains("aarch64") { "64" } else { "" },
|
||||
self.config.adb_test_dir.clone(),
|
||||
exe_file.file_name().unwrap().to_str().unwrap()
|
||||
);
|
||||
|
||||
debug!("adb arg: {}", adb_arg);
|
||||
let mut adb = Command::new(adb_path)
|
||||
.args(&["shell", &adb_arg])
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::inherit())
|
||||
.spawn()
|
||||
.unwrap_or_else(|e| panic!("failed to exec `{adb_path:?}`: {e:?}"));
|
||||
|
||||
// Wait for the gdbserver to print out "Listening on port ..."
|
||||
// at which point we know that it's started and then we can
|
||||
// execute the debugger below.
|
||||
let mut stdout = BufReader::new(adb.stdout.take().unwrap());
|
||||
let mut line = String::new();
|
||||
loop {
|
||||
line.truncate(0);
|
||||
stdout.read_line(&mut line).unwrap();
|
||||
if line.starts_with("Listening on port 5039") {
|
||||
break;
|
||||
}
|
||||
}
|
||||
drop(stdout);
|
||||
|
||||
let mut debugger_script = OsString::from("-command=");
|
||||
debugger_script.push(self.make_out_name("debugger.script"));
|
||||
let debugger_opts: &[&OsStr] =
|
||||
&["-quiet".as_ref(), "-batch".as_ref(), "-nx".as_ref(), &debugger_script];
|
||||
|
||||
let gdb_path = self.config.gdb.as_ref().unwrap();
|
||||
let Output { status, stdout, stderr } = Command::new(&gdb_path)
|
||||
.args(debugger_opts)
|
||||
.output()
|
||||
.unwrap_or_else(|e| panic!("failed to exec `{gdb_path:?}`: {e:?}"));
|
||||
let cmdline = {
|
||||
let mut gdb = Command::new(&format!("{}-gdb", self.config.target));
|
||||
gdb.args(debugger_opts);
|
||||
let cmdline = self.make_cmdline(&gdb, "");
|
||||
logv(self.config, format!("executing {}", cmdline));
|
||||
cmdline
|
||||
};
|
||||
|
||||
debugger_run_result = ProcRes {
|
||||
status,
|
||||
stdout: String::from_utf8(stdout).unwrap(),
|
||||
stderr: String::from_utf8(stderr).unwrap(),
|
||||
truncated: Truncated::No,
|
||||
cmdline,
|
||||
};
|
||||
if adb.kill().is_err() {
|
||||
println!("Adb process is already finished.");
|
||||
}
|
||||
} else {
|
||||
let rust_src_root =
|
||||
self.config.find_rust_src_root().expect("Could not find Rust source root");
|
||||
let rust_pp_module_rel_path = Path::new("./src/etc");
|
||||
let rust_pp_module_abs_path =
|
||||
rust_src_root.join(rust_pp_module_rel_path).to_str().unwrap().to_owned();
|
||||
// write debugger script
|
||||
let mut script_str = String::with_capacity(2048);
|
||||
script_str.push_str(&format!("set charset {}\n", Self::charset()));
|
||||
script_str.push_str("show version\n");
|
||||
|
||||
match self.config.gdb_version {
|
||||
Some(version) => {
|
||||
println!("NOTE: compiletest thinks it is using GDB version {}", version);
|
||||
|
||||
if version > extract_gdb_version("7.4").unwrap() {
|
||||
// Add the directory containing the pretty printers to
|
||||
// GDB's script auto loading safe path
|
||||
script_str.push_str(&format!(
|
||||
"add-auto-load-safe-path {}\n",
|
||||
rust_pp_module_abs_path.replace(r"\", r"\\")
|
||||
));
|
||||
|
||||
let output_base_dir = self.output_base_dir().to_str().unwrap().to_owned();
|
||||
|
||||
// Add the directory containing the output binary to
|
||||
// include embedded pretty printers to GDB's script
|
||||
// auto loading safe path
|
||||
script_str.push_str(&format!(
|
||||
"add-auto-load-safe-path {}\n",
|
||||
output_base_dir.replace(r"\", r"\\")
|
||||
));
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
println!(
|
||||
"NOTE: compiletest does not know which version of \
|
||||
GDB it is using"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// The following line actually doesn't have to do anything with
|
||||
// pretty printing, it just tells GDB to print values on one line:
|
||||
script_str.push_str("set print pretty off\n");
|
||||
|
||||
// Add the pretty printer directory to GDB's source-file search path
|
||||
script_str
|
||||
.push_str(&format!("directory {}\n", rust_pp_module_abs_path.replace(r"\", r"\\")));
|
||||
|
||||
// Load the target executable
|
||||
script_str
|
||||
.push_str(&format!("file {}\n", exe_file.to_str().unwrap().replace(r"\", r"\\")));
|
||||
|
||||
// Force GDB to print values in the Rust format.
|
||||
script_str.push_str("set language rust\n");
|
||||
|
||||
// Add line breakpoints
|
||||
for line in &dbg_cmds.breakpoint_lines {
|
||||
script_str.push_str(&format!(
|
||||
"break '{}':{}\n",
|
||||
self.testpaths.file.file_name().unwrap().to_string_lossy(),
|
||||
*line
|
||||
));
|
||||
}
|
||||
|
||||
script_str.push_str(&cmds);
|
||||
script_str.push_str("\nquit\n");
|
||||
|
||||
debug!("script_str = {}", script_str);
|
||||
self.dump_output_file(&script_str, "debugger.script");
|
||||
|
||||
let mut debugger_script = OsString::from("-command=");
|
||||
debugger_script.push(self.make_out_name("debugger.script"));
|
||||
|
||||
let debugger_opts: &[&OsStr] =
|
||||
&["-quiet".as_ref(), "-batch".as_ref(), "-nx".as_ref(), &debugger_script];
|
||||
|
||||
let mut gdb = Command::new(self.config.gdb.as_ref().unwrap());
|
||||
let pythonpath = if let Ok(pp) = std::env::var("PYTHONPATH") {
|
||||
format!("{pp}:{rust_pp_module_abs_path}")
|
||||
} else {
|
||||
rust_pp_module_abs_path
|
||||
};
|
||||
gdb.args(debugger_opts).env("PYTHONPATH", pythonpath);
|
||||
|
||||
debugger_run_result =
|
||||
self.compose_and_run(gdb, self.config.run_lib_path.to_str().unwrap(), None, None);
|
||||
}
|
||||
|
||||
if !debugger_run_result.status.success() {
|
||||
self.fatal_proc_rec("gdb failed to execute", &debugger_run_result);
|
||||
}
|
||||
|
||||
if let Err(e) = dbg_cmds.check_output(&debugger_run_result) {
|
||||
self.fatal_proc_rec(&e, &debugger_run_result);
|
||||
}
|
||||
}
|
||||
|
||||
fn run_debuginfo_lldb_test(&self) {
|
||||
if self.config.lldb_python_dir.is_none() {
|
||||
self.fatal("Can't run LLDB test because LLDB's python path is not set.");
|
||||
}
|
||||
|
||||
let config = Config {
|
||||
target_rustcflags: self.cleanup_debug_info_options(&self.config.target_rustcflags),
|
||||
host_rustcflags: self.cleanup_debug_info_options(&self.config.host_rustcflags),
|
||||
..self.config.clone()
|
||||
};
|
||||
|
||||
let test_cx = TestCx { config: &config, ..*self };
|
||||
|
||||
test_cx.run_debuginfo_lldb_test_no_opt();
|
||||
}
|
||||
|
||||
fn run_debuginfo_lldb_test_no_opt(&self) {
|
||||
// compile test file (it should have 'compile-flags:-g' in the header)
|
||||
let should_run = self.run_if_enabled();
|
||||
let compile_result = self.compile_test(should_run, Emit::None);
|
||||
if !compile_result.status.success() {
|
||||
self.fatal_proc_rec("compilation failed!", &compile_result);
|
||||
}
|
||||
if let WillExecute::Disabled = should_run {
|
||||
return;
|
||||
}
|
||||
|
||||
let exe_file = self.make_exe_name();
|
||||
|
||||
match self.config.lldb_version {
|
||||
Some(ref version) => {
|
||||
println!("NOTE: compiletest thinks it is using LLDB version {}", version);
|
||||
}
|
||||
_ => {
|
||||
println!(
|
||||
"NOTE: compiletest does not know which version of \
|
||||
LLDB it is using"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Parse debugger commands etc from test files
|
||||
let dbg_cmds = DebuggerCommands::parse_from(
|
||||
&self.testpaths.file,
|
||||
self.config,
|
||||
&["lldb"],
|
||||
self.revision,
|
||||
)
|
||||
.unwrap_or_else(|e| self.fatal(&e));
|
||||
|
||||
// Write debugger script:
|
||||
// We don't want to hang when calling `quit` while the process is still running
|
||||
let mut script_str = String::from("settings set auto-confirm true\n");
|
||||
|
||||
// Make LLDB emit its version, so we have it documented in the test output
|
||||
script_str.push_str("version\n");
|
||||
|
||||
// Switch LLDB into "Rust mode"
|
||||
let rust_src_root =
|
||||
self.config.find_rust_src_root().expect("Could not find Rust source root");
|
||||
let rust_pp_module_rel_path = Path::new("./src/etc");
|
||||
let rust_pp_module_abs_path = rust_src_root.join(rust_pp_module_rel_path);
|
||||
|
||||
script_str.push_str(&format!(
|
||||
"command script import {}/lldb_lookup.py\n",
|
||||
rust_pp_module_abs_path.to_str().unwrap()
|
||||
));
|
||||
File::open(rust_pp_module_abs_path.join("lldb_commands"))
|
||||
.and_then(|mut file| file.read_to_string(&mut script_str))
|
||||
.expect("Failed to read lldb_commands");
|
||||
|
||||
// Set breakpoints on every line that contains the string "#break"
|
||||
let source_file_name = self.testpaths.file.file_name().unwrap().to_string_lossy();
|
||||
for line in &dbg_cmds.breakpoint_lines {
|
||||
script_str.push_str(&format!(
|
||||
"breakpoint set --file '{}' --line {}\n",
|
||||
source_file_name, line
|
||||
));
|
||||
}
|
||||
|
||||
// Append the other commands
|
||||
for line in &dbg_cmds.commands {
|
||||
script_str.push_str(line);
|
||||
script_str.push('\n');
|
||||
}
|
||||
|
||||
// Finally, quit the debugger
|
||||
script_str.push_str("\nquit\n");
|
||||
|
||||
// Write the script into a file
|
||||
debug!("script_str = {}", script_str);
|
||||
self.dump_output_file(&script_str, "debugger.script");
|
||||
let debugger_script = self.make_out_name("debugger.script");
|
||||
|
||||
// Let LLDB execute the script via lldb_batchmode.py
|
||||
let debugger_run_result = self.run_lldb(&exe_file, &debugger_script, &rust_src_root);
|
||||
|
||||
if !debugger_run_result.status.success() {
|
||||
self.fatal_proc_rec("Error while running LLDB", &debugger_run_result);
|
||||
}
|
||||
|
||||
if let Err(e) = dbg_cmds.check_output(&debugger_run_result) {
|
||||
self.fatal_proc_rec(&e, &debugger_run_result);
|
||||
}
|
||||
}
|
||||
|
||||
fn run_lldb(
|
||||
&self,
|
||||
test_executable: &Path,
|
||||
debugger_script: &Path,
|
||||
rust_src_root: &Path,
|
||||
) -> ProcRes {
|
||||
// Prepare the lldb_batchmode which executes the debugger script
|
||||
let lldb_script_path = rust_src_root.join("src/etc/lldb_batchmode.py");
|
||||
let pythonpath = if let Ok(pp) = std::env::var("PYTHONPATH") {
|
||||
format!("{pp}:{}", self.config.lldb_python_dir.as_ref().unwrap())
|
||||
} else {
|
||||
self.config.lldb_python_dir.as_ref().unwrap().to_string()
|
||||
};
|
||||
self.run_command_to_procres(
|
||||
Command::new(&self.config.python)
|
||||
.arg(&lldb_script_path)
|
||||
.arg(test_executable)
|
||||
.arg(debugger_script)
|
||||
.env("PYTHONUNBUFFERED", "1") // Help debugging #78665
|
||||
.env("PYTHONPATH", pythonpath),
|
||||
)
|
||||
}
|
||||
|
||||
fn cleanup_debug_info_options(&self, options: &Vec<String>) -> Vec<String> {
|
||||
// Remove options that are either unwanted (-O) or may lead to duplicates due to RUSTFLAGS.
|
||||
let options_to_remove = ["-O".to_owned(), "-g".to_owned(), "--debuginfo".to_owned()];
|
||||
|
||||
options.iter().filter(|x| !options_to_remove.contains(x)).cloned().collect()
|
||||
}
|
||||
}
|
128
src/tools/compiletest/src/runtest/incremental.rs
Normal file
128
src/tools/compiletest/src/runtest/incremental.rs
Normal file
@ -0,0 +1,128 @@
|
||||
use super::{TestCx, WillExecute};
|
||||
use crate::errors;
|
||||
|
||||
// FIXME(jieyouxu): `run_rpass_test` got hoisted out of this because apparently valgrind falls back
|
||||
// to `run_rpass_test` if valgrind isn't available, which is questionable, but keeping it for
|
||||
// refactoring changes to preserve current behavior.
|
||||
|
||||
impl TestCx<'_> {
|
||||
pub(super) fn run_incremental_test(&self) {
|
||||
// Basic plan for a test incremental/foo/bar.rs:
|
||||
// - load list of revisions rpass1, cfail2, rpass3
|
||||
// - each should begin with `cpass`, `rpass`, `cfail`, or `rfail`
|
||||
// - if `cpass`, expect compilation to succeed, don't execute
|
||||
// - if `rpass`, expect compilation and execution to succeed
|
||||
// - if `cfail`, expect compilation to fail
|
||||
// - if `rfail`, expect compilation to succeed and execution to fail
|
||||
// - create a directory build/foo/bar.incremental
|
||||
// - compile foo/bar.rs with -C incremental=.../foo/bar.incremental and -C rpass1
|
||||
// - because name of revision starts with "rpass", expect success
|
||||
// - compile foo/bar.rs with -C incremental=.../foo/bar.incremental and -C cfail2
|
||||
// - because name of revision starts with "cfail", expect an error
|
||||
// - load expected errors as usual, but filter for those that end in `[rfail2]`
|
||||
// - compile foo/bar.rs with -C incremental=.../foo/bar.incremental and -C rpass3
|
||||
// - because name of revision starts with "rpass", expect success
|
||||
// - execute build/foo/bar.exe and save output
|
||||
//
|
||||
// FIXME -- use non-incremental mode as an oracle? That doesn't apply
|
||||
// to #[rustc_dirty] and clean tests I guess
|
||||
|
||||
let revision = self.revision.expect("incremental tests require a list of revisions");
|
||||
|
||||
// Incremental workproduct directory should have already been created.
|
||||
let incremental_dir = self.props.incremental_dir.as_ref().unwrap();
|
||||
assert!(incremental_dir.exists(), "init_incremental_test failed to create incremental dir");
|
||||
|
||||
if self.config.verbose {
|
||||
print!("revision={:?} props={:#?}", revision, self.props);
|
||||
}
|
||||
|
||||
if revision.starts_with("cpass") {
|
||||
if self.props.should_ice {
|
||||
self.fatal("can only use should-ice in cfail tests");
|
||||
}
|
||||
self.run_cpass_test();
|
||||
} else if revision.starts_with("rpass") {
|
||||
if self.props.should_ice {
|
||||
self.fatal("can only use should-ice in cfail tests");
|
||||
}
|
||||
self.run_rpass_test();
|
||||
} else if revision.starts_with("rfail") {
|
||||
if self.props.should_ice {
|
||||
self.fatal("can only use should-ice in cfail tests");
|
||||
}
|
||||
self.run_rfail_test();
|
||||
} else if revision.starts_with("cfail") {
|
||||
self.run_cfail_test();
|
||||
} else {
|
||||
self.fatal("revision name must begin with cpass, rpass, rfail, or cfail");
|
||||
}
|
||||
}
|
||||
|
||||
fn run_cpass_test(&self) {
|
||||
let emit_metadata = self.should_emit_metadata(self.pass_mode());
|
||||
let proc_res = self.compile_test(WillExecute::No, emit_metadata);
|
||||
|
||||
if !proc_res.status.success() {
|
||||
self.fatal_proc_rec("compilation failed!", &proc_res);
|
||||
}
|
||||
|
||||
// FIXME(#41968): Move this check to tidy?
|
||||
if !errors::load_errors(&self.testpaths.file, self.revision).is_empty() {
|
||||
self.fatal("compile-pass tests with expected warnings should be moved to ui/");
|
||||
}
|
||||
}
|
||||
|
||||
fn run_cfail_test(&self) {
|
||||
let pm = self.pass_mode();
|
||||
let proc_res = self.compile_test(WillExecute::No, self.should_emit_metadata(pm));
|
||||
self.check_if_test_should_compile(&proc_res, pm);
|
||||
self.check_no_compiler_crash(&proc_res, self.props.should_ice);
|
||||
|
||||
let output_to_check = self.get_output(&proc_res);
|
||||
let expected_errors = errors::load_errors(&self.testpaths.file, self.revision);
|
||||
if !expected_errors.is_empty() {
|
||||
if !self.props.error_patterns.is_empty() || !self.props.regex_error_patterns.is_empty()
|
||||
{
|
||||
self.fatal("both error pattern and expected errors specified");
|
||||
}
|
||||
self.check_expected_errors(expected_errors, &proc_res);
|
||||
} else {
|
||||
self.check_all_error_patterns(&output_to_check, &proc_res, pm);
|
||||
}
|
||||
if self.props.should_ice {
|
||||
match proc_res.status.code() {
|
||||
Some(101) => (),
|
||||
_ => self.fatal("expected ICE"),
|
||||
}
|
||||
}
|
||||
|
||||
self.check_forbid_output(&output_to_check, &proc_res);
|
||||
}
|
||||
|
||||
fn run_rfail_test(&self) {
|
||||
let pm = self.pass_mode();
|
||||
let should_run = self.run_if_enabled();
|
||||
let proc_res = self.compile_test(should_run, self.should_emit_metadata(pm));
|
||||
|
||||
if !proc_res.status.success() {
|
||||
self.fatal_proc_rec("compilation failed!", &proc_res);
|
||||
}
|
||||
|
||||
if let WillExecute::Disabled = should_run {
|
||||
return;
|
||||
}
|
||||
|
||||
let proc_res = self.exec_compiled_test();
|
||||
|
||||
// The value our Makefile configures valgrind to return on failure
|
||||
const VALGRIND_ERR: i32 = 100;
|
||||
if proc_res.status.code() == Some(VALGRIND_ERR) {
|
||||
self.fatal_proc_rec("run-fail test isn't valgrind-clean!", &proc_res);
|
||||
}
|
||||
|
||||
let output_to_check = self.get_output(&proc_res);
|
||||
self.check_correct_failure_status(&proc_res);
|
||||
self.check_all_error_patterns(&output_to_check, &proc_res, pm);
|
||||
}
|
||||
}
|
32
src/tools/compiletest/src/runtest/js_doc.rs
Normal file
32
src/tools/compiletest/src/runtest/js_doc.rs
Normal file
@ -0,0 +1,32 @@
|
||||
use std::process::Command;
|
||||
|
||||
use super::TestCx;
|
||||
|
||||
impl TestCx<'_> {
|
||||
pub(super) fn run_js_doc_test(&self) {
|
||||
if let Some(nodejs) = &self.config.nodejs {
|
||||
let out_dir = self.output_base_dir();
|
||||
|
||||
self.document(&out_dir, &self.testpaths);
|
||||
|
||||
let root = self.config.find_rust_src_root().unwrap();
|
||||
let file_stem =
|
||||
self.testpaths.file.file_stem().and_then(|f| f.to_str()).expect("no file stem");
|
||||
let res = self.run_command_to_procres(
|
||||
Command::new(&nodejs)
|
||||
.arg(root.join("src/tools/rustdoc-js/tester.js"))
|
||||
.arg("--doc-folder")
|
||||
.arg(out_dir)
|
||||
.arg("--crate-name")
|
||||
.arg(file_stem.replace("-", "_"))
|
||||
.arg("--test-file")
|
||||
.arg(self.testpaths.file.with_extension("js")),
|
||||
);
|
||||
if !res.status.success() {
|
||||
self.fatal_proc_rec("rustdoc-js test failed!", &res);
|
||||
}
|
||||
} else {
|
||||
self.fatal("no nodeJS");
|
||||
}
|
||||
}
|
||||
}
|
155
src/tools/compiletest/src/runtest/mir_opt.rs
Normal file
155
src/tools/compiletest/src/runtest/mir_opt.rs
Normal file
@ -0,0 +1,155 @@
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use glob::glob;
|
||||
use miropt_test_tools::{files_for_miropt_test, MiroptTest, MiroptTestFile};
|
||||
use tracing::debug;
|
||||
|
||||
use super::{Emit, TestCx, WillExecute};
|
||||
use crate::compute_diff::write_diff;
|
||||
|
||||
impl TestCx<'_> {
|
||||
pub(super) fn run_mir_opt_test(&self) {
|
||||
let pm = self.pass_mode();
|
||||
let should_run = self.should_run(pm);
|
||||
|
||||
let mut test_info = files_for_miropt_test(
|
||||
&self.testpaths.file,
|
||||
self.config.get_pointer_width(),
|
||||
self.config.target_cfg().panic.for_miropt_test_tools(),
|
||||
);
|
||||
|
||||
let passes = std::mem::take(&mut test_info.passes);
|
||||
|
||||
let proc_res = self.compile_test_with_passes(should_run, Emit::Mir, passes);
|
||||
if !proc_res.status.success() {
|
||||
self.fatal_proc_rec("compilation failed!", &proc_res);
|
||||
}
|
||||
self.check_mir_dump(test_info);
|
||||
|
||||
if let WillExecute::Yes = should_run {
|
||||
let proc_res = self.exec_compiled_test();
|
||||
|
||||
if !proc_res.status.success() {
|
||||
self.fatal_proc_rec("test run failed!", &proc_res);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn check_mir_dump(&self, test_info: MiroptTest) {
|
||||
let test_dir = self.testpaths.file.parent().unwrap();
|
||||
let test_crate =
|
||||
self.testpaths.file.file_stem().unwrap().to_str().unwrap().replace('-', "_");
|
||||
|
||||
let MiroptTest { run_filecheck, suffix, files, passes: _ } = test_info;
|
||||
|
||||
if self.config.bless {
|
||||
for e in
|
||||
glob(&format!("{}/{}.*{}.mir", test_dir.display(), test_crate, suffix)).unwrap()
|
||||
{
|
||||
fs::remove_file(e.unwrap()).unwrap();
|
||||
}
|
||||
for e in
|
||||
glob(&format!("{}/{}.*{}.diff", test_dir.display(), test_crate, suffix)).unwrap()
|
||||
{
|
||||
fs::remove_file(e.unwrap()).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
for MiroptTestFile { from_file, to_file, expected_file } in files {
|
||||
let dumped_string = if let Some(after) = to_file {
|
||||
self.diff_mir_files(from_file.into(), after.into())
|
||||
} else {
|
||||
let mut output_file = PathBuf::new();
|
||||
output_file.push(self.get_mir_dump_dir());
|
||||
output_file.push(&from_file);
|
||||
debug!(
|
||||
"comparing the contents of: {} with {}",
|
||||
output_file.display(),
|
||||
expected_file.display()
|
||||
);
|
||||
if !output_file.exists() {
|
||||
panic!(
|
||||
"Output file `{}` from test does not exist, available files are in `{}`",
|
||||
output_file.display(),
|
||||
output_file.parent().unwrap().display()
|
||||
);
|
||||
}
|
||||
self.check_mir_test_timestamp(&from_file, &output_file);
|
||||
let dumped_string = fs::read_to_string(&output_file).unwrap();
|
||||
self.normalize_output(&dumped_string, &[])
|
||||
};
|
||||
|
||||
if self.config.bless {
|
||||
let _ = fs::remove_file(&expected_file);
|
||||
fs::write(expected_file, dumped_string.as_bytes()).unwrap();
|
||||
} else {
|
||||
if !expected_file.exists() {
|
||||
panic!("Output file `{}` from test does not exist", expected_file.display());
|
||||
}
|
||||
let expected_string = fs::read_to_string(&expected_file).unwrap();
|
||||
if dumped_string != expected_string {
|
||||
print!("{}", write_diff(&expected_string, &dumped_string, 3));
|
||||
panic!(
|
||||
"Actual MIR output differs from expected MIR output {}",
|
||||
expected_file.display()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if run_filecheck {
|
||||
let output_path = self.output_base_name().with_extension("mir");
|
||||
let proc_res = self.verify_with_filecheck(&output_path);
|
||||
if !proc_res.status.success() {
|
||||
self.fatal_proc_rec("verification with 'FileCheck' failed", &proc_res);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn diff_mir_files(&self, before: PathBuf, after: PathBuf) -> String {
|
||||
let to_full_path = |path: PathBuf| {
|
||||
let full = self.get_mir_dump_dir().join(&path);
|
||||
if !full.exists() {
|
||||
panic!(
|
||||
"the mir dump file for {} does not exist (requested in {})",
|
||||
path.display(),
|
||||
self.testpaths.file.display(),
|
||||
);
|
||||
}
|
||||
full
|
||||
};
|
||||
let before = to_full_path(before);
|
||||
let after = to_full_path(after);
|
||||
debug!("comparing the contents of: {} with {}", before.display(), after.display());
|
||||
let before = fs::read_to_string(before).unwrap();
|
||||
let after = fs::read_to_string(after).unwrap();
|
||||
let before = self.normalize_output(&before, &[]);
|
||||
let after = self.normalize_output(&after, &[]);
|
||||
let mut dumped_string = String::new();
|
||||
for result in diff::lines(&before, &after) {
|
||||
use std::fmt::Write;
|
||||
match result {
|
||||
diff::Result::Left(s) => writeln!(dumped_string, "- {}", s).unwrap(),
|
||||
diff::Result::Right(s) => writeln!(dumped_string, "+ {}", s).unwrap(),
|
||||
diff::Result::Both(s, _) => writeln!(dumped_string, " {}", s).unwrap(),
|
||||
}
|
||||
}
|
||||
dumped_string
|
||||
}
|
||||
|
||||
fn check_mir_test_timestamp(&self, test_name: &str, output_file: &Path) {
|
||||
let t = |file| fs::metadata(file).unwrap().modified().unwrap();
|
||||
let source_file = &self.testpaths.file;
|
||||
let output_time = t(output_file);
|
||||
let source_time = t(source_file);
|
||||
if source_time > output_time {
|
||||
debug!("source file time: {:?} output file time: {:?}", source_time, output_time);
|
||||
panic!(
|
||||
"test source file `{}` is newer than potentially stale output file `{}`.",
|
||||
source_file.display(),
|
||||
test_name
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
104
src/tools/compiletest/src/runtest/pretty.rs
Normal file
104
src/tools/compiletest/src/runtest/pretty.rs
Normal file
@ -0,0 +1,104 @@
|
||||
use std::fs;
|
||||
|
||||
use super::{ProcRes, ReadFrom, TestCx};
|
||||
use crate::util::logv;
|
||||
|
||||
impl TestCx<'_> {
|
||||
pub(super) fn run_pretty_test(&self) {
|
||||
if self.props.pp_exact.is_some() {
|
||||
logv(self.config, "testing for exact pretty-printing".to_owned());
|
||||
} else {
|
||||
logv(self.config, "testing for converging pretty-printing".to_owned());
|
||||
}
|
||||
|
||||
let rounds = match self.props.pp_exact {
|
||||
Some(_) => 1,
|
||||
None => 2,
|
||||
};
|
||||
|
||||
let src = fs::read_to_string(&self.testpaths.file).unwrap();
|
||||
let mut srcs = vec![src];
|
||||
|
||||
let mut round = 0;
|
||||
while round < rounds {
|
||||
logv(
|
||||
self.config,
|
||||
format!("pretty-printing round {} revision {:?}", round, self.revision),
|
||||
);
|
||||
let read_from =
|
||||
if round == 0 { ReadFrom::Path } else { ReadFrom::Stdin(srcs[round].to_owned()) };
|
||||
|
||||
let proc_res = self.print_source(read_from, &self.props.pretty_mode);
|
||||
if !proc_res.status.success() {
|
||||
self.fatal_proc_rec(
|
||||
&format!(
|
||||
"pretty-printing failed in round {} revision {:?}",
|
||||
round, self.revision
|
||||
),
|
||||
&proc_res,
|
||||
);
|
||||
}
|
||||
|
||||
let ProcRes { stdout, .. } = proc_res;
|
||||
srcs.push(stdout);
|
||||
round += 1;
|
||||
}
|
||||
|
||||
let mut expected = match self.props.pp_exact {
|
||||
Some(ref file) => {
|
||||
let filepath = self.testpaths.file.parent().unwrap().join(file);
|
||||
fs::read_to_string(&filepath).unwrap()
|
||||
}
|
||||
None => srcs[srcs.len() - 2].clone(),
|
||||
};
|
||||
let mut actual = srcs[srcs.len() - 1].clone();
|
||||
|
||||
if self.props.pp_exact.is_some() {
|
||||
// Now we have to care about line endings
|
||||
let cr = "\r".to_owned();
|
||||
actual = actual.replace(&cr, "");
|
||||
expected = expected.replace(&cr, "");
|
||||
}
|
||||
|
||||
if !self.config.bless {
|
||||
self.compare_source(&expected, &actual);
|
||||
} else if expected != actual {
|
||||
let filepath_buf;
|
||||
let filepath = match &self.props.pp_exact {
|
||||
Some(file) => {
|
||||
filepath_buf = self.testpaths.file.parent().unwrap().join(file);
|
||||
&filepath_buf
|
||||
}
|
||||
None => &self.testpaths.file,
|
||||
};
|
||||
fs::write(filepath, &actual).unwrap();
|
||||
}
|
||||
|
||||
// If we're only making sure that the output matches then just stop here
|
||||
if self.props.pretty_compare_only {
|
||||
return;
|
||||
}
|
||||
|
||||
// Finally, let's make sure it actually appears to remain valid code
|
||||
let proc_res = self.typecheck_source(actual);
|
||||
if !proc_res.status.success() {
|
||||
self.fatal_proc_rec("pretty-printed source does not typecheck", &proc_res);
|
||||
}
|
||||
|
||||
if !self.props.pretty_expanded {
|
||||
return;
|
||||
}
|
||||
|
||||
// additionally, run `-Zunpretty=expanded` and try to build it.
|
||||
let proc_res = self.print_source(ReadFrom::Path, "expanded");
|
||||
if !proc_res.status.success() {
|
||||
self.fatal_proc_rec("pretty-printing (expanded) failed", &proc_res);
|
||||
}
|
||||
|
||||
let ProcRes { stdout: expanded_src, .. } = proc_res;
|
||||
let proc_res = self.typecheck_source(expanded_src);
|
||||
if !proc_res.status.success() {
|
||||
self.fatal_proc_rec("pretty-printed source (expanded) does not typecheck", &proc_res);
|
||||
}
|
||||
}
|
||||
}
|
518
src/tools/compiletest/src/runtest/run_make.rs
Normal file
518
src/tools/compiletest/src/runtest/run_make.rs
Normal file
@ -0,0 +1,518 @@
|
||||
use std::path::Path;
|
||||
use std::process::{Command, Output, Stdio};
|
||||
use std::{env, fs};
|
||||
|
||||
use super::{ProcRes, TestCx};
|
||||
use crate::util::{copy_dir_all, dylib_env_var};
|
||||
|
||||
impl TestCx<'_> {
|
||||
pub(super) fn run_rmake_test(&self) {
|
||||
let test_dir = &self.testpaths.file;
|
||||
if test_dir.join("rmake.rs").exists() {
|
||||
self.run_rmake_v2_test();
|
||||
} else if test_dir.join("Makefile").exists() {
|
||||
self.run_rmake_legacy_test();
|
||||
} else {
|
||||
self.fatal("failed to find either `rmake.rs` or `Makefile`")
|
||||
}
|
||||
}
|
||||
|
||||
fn run_rmake_legacy_test(&self) {
|
||||
let cwd = env::current_dir().unwrap();
|
||||
let src_root = self.config.src_base.parent().unwrap().parent().unwrap();
|
||||
let src_root = cwd.join(&src_root);
|
||||
|
||||
let tmpdir = cwd.join(self.output_base_name());
|
||||
if tmpdir.exists() {
|
||||
self.aggressive_rm_rf(&tmpdir).unwrap();
|
||||
}
|
||||
fs::create_dir_all(&tmpdir).unwrap();
|
||||
|
||||
let host = &self.config.host;
|
||||
let make = if host.contains("dragonfly")
|
||||
|| host.contains("freebsd")
|
||||
|| host.contains("netbsd")
|
||||
|| host.contains("openbsd")
|
||||
|| host.contains("aix")
|
||||
{
|
||||
"gmake"
|
||||
} else {
|
||||
"make"
|
||||
};
|
||||
|
||||
let mut cmd = Command::new(make);
|
||||
cmd.current_dir(&self.testpaths.file)
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.env("TARGET", &self.config.target)
|
||||
.env("PYTHON", &self.config.python)
|
||||
.env("S", src_root)
|
||||
.env("RUST_BUILD_STAGE", &self.config.stage_id)
|
||||
.env("RUSTC", cwd.join(&self.config.rustc_path))
|
||||
.env("TMPDIR", &tmpdir)
|
||||
.env("LD_LIB_PATH_ENVVAR", dylib_env_var())
|
||||
.env("HOST_RPATH_DIR", cwd.join(&self.config.compile_lib_path))
|
||||
.env("TARGET_RPATH_DIR", cwd.join(&self.config.run_lib_path))
|
||||
.env("LLVM_COMPONENTS", &self.config.llvm_components)
|
||||
// We for sure don't want these tests to run in parallel, so make
|
||||
// sure they don't have access to these vars if we run via `make`
|
||||
// at the top level
|
||||
.env_remove("MAKEFLAGS")
|
||||
.env_remove("MFLAGS")
|
||||
.env_remove("CARGO_MAKEFLAGS");
|
||||
|
||||
if let Some(ref rustdoc) = self.config.rustdoc_path {
|
||||
cmd.env("RUSTDOC", cwd.join(rustdoc));
|
||||
}
|
||||
|
||||
if let Some(ref node) = self.config.nodejs {
|
||||
cmd.env("NODE", node);
|
||||
}
|
||||
|
||||
if let Some(ref linker) = self.config.target_linker {
|
||||
cmd.env("RUSTC_LINKER", linker);
|
||||
}
|
||||
|
||||
if let Some(ref clang) = self.config.run_clang_based_tests_with {
|
||||
cmd.env("CLANG", clang);
|
||||
}
|
||||
|
||||
if let Some(ref filecheck) = self.config.llvm_filecheck {
|
||||
cmd.env("LLVM_FILECHECK", filecheck);
|
||||
}
|
||||
|
||||
if let Some(ref llvm_bin_dir) = self.config.llvm_bin_dir {
|
||||
cmd.env("LLVM_BIN_DIR", llvm_bin_dir);
|
||||
}
|
||||
|
||||
if let Some(ref remote_test_client) = self.config.remote_test_client {
|
||||
cmd.env("REMOTE_TEST_CLIENT", remote_test_client);
|
||||
}
|
||||
|
||||
// We don't want RUSTFLAGS set from the outside to interfere with
|
||||
// compiler flags set in the test cases:
|
||||
cmd.env_remove("RUSTFLAGS");
|
||||
|
||||
// Use dynamic musl for tests because static doesn't allow creating dylibs
|
||||
if self.config.host.contains("musl") {
|
||||
cmd.env("RUSTFLAGS", "-Ctarget-feature=-crt-static").env("IS_MUSL_HOST", "1");
|
||||
}
|
||||
|
||||
if self.config.bless {
|
||||
cmd.env("RUSTC_BLESS_TEST", "--bless");
|
||||
// Assume this option is active if the environment variable is "defined", with _any_ value.
|
||||
// As an example, a `Makefile` can use this option by:
|
||||
//
|
||||
// ifdef RUSTC_BLESS_TEST
|
||||
// cp "$(TMPDIR)"/actual_something.ext expected_something.ext
|
||||
// else
|
||||
// $(DIFF) expected_something.ext "$(TMPDIR)"/actual_something.ext
|
||||
// endif
|
||||
}
|
||||
|
||||
if self.config.target.contains("msvc") && !self.config.cc.is_empty() {
|
||||
// We need to pass a path to `lib.exe`, so assume that `cc` is `cl.exe`
|
||||
// and that `lib.exe` lives next to it.
|
||||
let lib = Path::new(&self.config.cc).parent().unwrap().join("lib.exe");
|
||||
|
||||
// MSYS doesn't like passing flags of the form `/foo` as it thinks it's
|
||||
// a path and instead passes `C:\msys64\foo`, so convert all
|
||||
// `/`-arguments to MSVC here to `-` arguments.
|
||||
let cflags = self
|
||||
.config
|
||||
.cflags
|
||||
.split(' ')
|
||||
.map(|s| s.replace("/", "-"))
|
||||
.collect::<Vec<_>>()
|
||||
.join(" ");
|
||||
let cxxflags = self
|
||||
.config
|
||||
.cxxflags
|
||||
.split(' ')
|
||||
.map(|s| s.replace("/", "-"))
|
||||
.collect::<Vec<_>>()
|
||||
.join(" ");
|
||||
|
||||
cmd.env("IS_MSVC", "1")
|
||||
.env("IS_WINDOWS", "1")
|
||||
.env("MSVC_LIB", format!("'{}' -nologo", lib.display()))
|
||||
.env("MSVC_LIB_PATH", format!("{}", lib.display()))
|
||||
.env("CC", format!("'{}' {}", self.config.cc, cflags))
|
||||
.env("CXX", format!("'{}' {}", &self.config.cxx, cxxflags));
|
||||
} else {
|
||||
cmd.env("CC", format!("{} {}", self.config.cc, self.config.cflags))
|
||||
.env("CXX", format!("{} {}", self.config.cxx, self.config.cxxflags))
|
||||
.env("AR", &self.config.ar);
|
||||
|
||||
if self.config.target.contains("windows") {
|
||||
cmd.env("IS_WINDOWS", "1");
|
||||
}
|
||||
}
|
||||
|
||||
let (output, truncated) =
|
||||
self.read2_abbreviated(cmd.spawn().expect("failed to spawn `make`"));
|
||||
if !output.status.success() {
|
||||
let res = ProcRes {
|
||||
status: output.status,
|
||||
stdout: String::from_utf8_lossy(&output.stdout).into_owned(),
|
||||
stderr: String::from_utf8_lossy(&output.stderr).into_owned(),
|
||||
truncated,
|
||||
cmdline: format!("{:?}", cmd),
|
||||
};
|
||||
self.fatal_proc_rec("make failed", &res);
|
||||
}
|
||||
}
|
||||
|
||||
fn run_rmake_v2_test(&self) {
|
||||
// For `run-make` V2, we need to perform 2 steps to build and run a `run-make` V2 recipe
|
||||
// (`rmake.rs`) to run the actual tests. The support library is already built as a tool rust
|
||||
// library and is available under `build/$TARGET/stageN-tools-bin/librun_make_support.rlib`.
|
||||
//
|
||||
// 1. We need to build the recipe `rmake.rs` as a binary and link in the `run_make_support`
|
||||
// library.
|
||||
// 2. We need to run the recipe binary.
|
||||
|
||||
// So we assume the rust-lang/rust project setup looks like the following (our `.` is the
|
||||
// top-level directory, irrelevant entries to our purposes omitted):
|
||||
//
|
||||
// ```
|
||||
// . // <- `source_root`
|
||||
// ├── build/ // <- `build_root`
|
||||
// ├── compiler/
|
||||
// ├── library/
|
||||
// ├── src/
|
||||
// │ └── tools/
|
||||
// │ └── run_make_support/
|
||||
// └── tests
|
||||
// └── run-make/
|
||||
// ```
|
||||
|
||||
// `source_root` is the top-level directory containing the rust-lang/rust checkout.
|
||||
let source_root =
|
||||
self.config.find_rust_src_root().expect("could not determine rust source root");
|
||||
// `self.config.build_base` is actually the build base folder + "test" + test suite name, it
|
||||
// looks like `build/<host_triple>/test/run-make`. But we want `build/<host_triple>/`. Note
|
||||
// that the `build` directory does not need to be called `build`, nor does it need to be
|
||||
// under `source_root`, so we must compute it based off of `self.config.build_base`.
|
||||
let build_root =
|
||||
self.config.build_base.parent().and_then(Path::parent).unwrap().to_path_buf();
|
||||
|
||||
// We construct the following directory tree for each rmake.rs test:
|
||||
// ```
|
||||
// <base_dir>/
|
||||
// rmake.exe
|
||||
// rmake_out/
|
||||
// ```
|
||||
// having the recipe executable separate from the output artifacts directory allows the
|
||||
// recipes to `remove_dir_all($TMPDIR)` without running into issues related trying to remove
|
||||
// a currently running executable because the recipe executable is not under the
|
||||
// `rmake_out/` directory.
|
||||
//
|
||||
// This setup intentionally diverges from legacy Makefile run-make tests.
|
||||
let base_dir = self.output_base_name();
|
||||
if base_dir.exists() {
|
||||
self.aggressive_rm_rf(&base_dir).unwrap();
|
||||
}
|
||||
let rmake_out_dir = base_dir.join("rmake_out");
|
||||
fs::create_dir_all(&rmake_out_dir).unwrap();
|
||||
|
||||
// Copy all input files (apart from rmake.rs) to the temporary directory,
|
||||
// so that the input directory structure from `tests/run-make/<test>` is mirrored
|
||||
// to the `rmake_out` directory.
|
||||
for path in walkdir::WalkDir::new(&self.testpaths.file).min_depth(1) {
|
||||
let path = path.unwrap().path().to_path_buf();
|
||||
if path.file_name().is_some_and(|s| s != "rmake.rs") {
|
||||
let target = rmake_out_dir.join(path.strip_prefix(&self.testpaths.file).unwrap());
|
||||
if path.is_dir() {
|
||||
copy_dir_all(&path, target).unwrap();
|
||||
} else {
|
||||
fs::copy(&path, target).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// `self.config.stage_id` looks like `stage1-<target_triple>`, but we only want
|
||||
// the `stage1` part as that is what the output directories of bootstrap are prefixed with.
|
||||
// Note that this *assumes* build layout from bootstrap is produced as:
|
||||
//
|
||||
// ```
|
||||
// build/<target_triple>/ // <- this is `build_root`
|
||||
// ├── stage0
|
||||
// ├── stage0-bootstrap-tools
|
||||
// ├── stage0-codegen
|
||||
// ├── stage0-rustc
|
||||
// ├── stage0-std
|
||||
// ├── stage0-sysroot
|
||||
// ├── stage0-tools
|
||||
// ├── stage0-tools-bin
|
||||
// ├── stage1
|
||||
// ├── stage1-std
|
||||
// ├── stage1-tools
|
||||
// ├── stage1-tools-bin
|
||||
// └── test
|
||||
// ```
|
||||
// FIXME(jieyouxu): improve the communication between bootstrap and compiletest here so
|
||||
// we don't have to hack out a `stageN`.
|
||||
let stage = self.config.stage_id.split('-').next().unwrap();
|
||||
|
||||
// In order to link in the support library as a rlib when compiling recipes, we need three
|
||||
// paths:
|
||||
// 1. Path of the built support library rlib itself.
|
||||
// 2. Path of the built support library's dependencies directory.
|
||||
// 3. Path of the built support library's dependencies' dependencies directory.
|
||||
//
|
||||
// The paths look like
|
||||
//
|
||||
// ```
|
||||
// build/<target_triple>/
|
||||
// ├── stageN-tools-bin/
|
||||
// │ └── librun_make_support.rlib // <- support rlib itself
|
||||
// ├── stageN-tools/
|
||||
// │ ├── release/deps/ // <- deps of deps
|
||||
// │ └── <host_triple>/release/deps/ // <- deps
|
||||
// ```
|
||||
//
|
||||
// FIXME(jieyouxu): there almost certainly is a better way to do this (specifically how the
|
||||
// support lib and its deps are organized, can't we copy them to the tools-bin dir as
|
||||
// well?), but this seems to work for now.
|
||||
|
||||
let stage_tools_bin = build_root.join(format!("{stage}-tools-bin"));
|
||||
let support_lib_path = stage_tools_bin.join("librun_make_support.rlib");
|
||||
|
||||
let stage_tools = build_root.join(format!("{stage}-tools"));
|
||||
let support_lib_deps = stage_tools.join(&self.config.host).join("release").join("deps");
|
||||
let support_lib_deps_deps = stage_tools.join("release").join("deps");
|
||||
|
||||
// To compile the recipe with rustc, we need to provide suitable dynamic library search
|
||||
// paths to rustc. This includes both:
|
||||
// 1. The "base" dylib search paths that was provided to compiletest, e.g. `LD_LIBRARY_PATH`
|
||||
// on some linux distros.
|
||||
// 2. Specific library paths in `self.config.compile_lib_path` needed for running rustc.
|
||||
|
||||
let base_dylib_search_paths =
|
||||
Vec::from_iter(env::split_paths(&env::var(dylib_env_var()).unwrap()));
|
||||
|
||||
let host_dylib_search_paths = {
|
||||
let mut paths = vec![self.config.compile_lib_path.clone()];
|
||||
paths.extend(base_dylib_search_paths.iter().cloned());
|
||||
paths
|
||||
};
|
||||
|
||||
// Calculate the paths of the recipe binary. As previously discussed, this is placed at
|
||||
// `<base_dir>/<bin_name>` with `bin_name` being `rmake` or `rmake.exe` depending on
|
||||
// platform.
|
||||
let recipe_bin = {
|
||||
let mut p = base_dir.join("rmake");
|
||||
p.set_extension(env::consts::EXE_EXTENSION);
|
||||
p
|
||||
};
|
||||
|
||||
let mut rustc = Command::new(&self.config.rustc_path);
|
||||
rustc
|
||||
.arg("-o")
|
||||
.arg(&recipe_bin)
|
||||
// Specify library search paths for `run_make_support`.
|
||||
.arg(format!("-Ldependency={}", &support_lib_path.parent().unwrap().to_string_lossy()))
|
||||
.arg(format!("-Ldependency={}", &support_lib_deps.to_string_lossy()))
|
||||
.arg(format!("-Ldependency={}", &support_lib_deps_deps.to_string_lossy()))
|
||||
// Provide `run_make_support` as extern prelude, so test writers don't need to write
|
||||
// `extern run_make_support;`.
|
||||
.arg("--extern")
|
||||
.arg(format!("run_make_support={}", &support_lib_path.to_string_lossy()))
|
||||
.arg("--edition=2021")
|
||||
.arg(&self.testpaths.file.join("rmake.rs"))
|
||||
// Provide necessary library search paths for rustc.
|
||||
.env(dylib_env_var(), &env::join_paths(host_dylib_search_paths).unwrap());
|
||||
|
||||
// In test code we want to be very pedantic about values being silently discarded that are
|
||||
// annotated with `#[must_use]`.
|
||||
rustc.arg("-Dunused_must_use");
|
||||
|
||||
// > `cg_clif` uses `COMPILETEST_FORCE_STAGE0=1 ./x.py test --stage 0` for running the rustc
|
||||
// > test suite. With the introduction of rmake.rs this broke. `librun_make_support.rlib` is
|
||||
// > compiled using the bootstrap rustc wrapper which sets `--sysroot
|
||||
// > build/aarch64-unknown-linux-gnu/stage0-sysroot`, but then compiletest will compile
|
||||
// > `rmake.rs` using the sysroot of the bootstrap compiler causing it to not find the
|
||||
// > `libstd.rlib` against which `librun_make_support.rlib` is compiled.
|
||||
//
|
||||
// The gist here is that we have to pass the proper stage0 sysroot if we want
|
||||
//
|
||||
// ```
|
||||
// $ COMPILETEST_FORCE_STAGE0=1 ./x test run-make --stage 0
|
||||
// ```
|
||||
//
|
||||
// to work correctly.
|
||||
//
|
||||
// See <https://github.com/rust-lang/rust/pull/122248> for more background.
|
||||
if std::env::var_os("COMPILETEST_FORCE_STAGE0").is_some() {
|
||||
let stage0_sysroot = build_root.join("stage0-sysroot");
|
||||
rustc.arg("--sysroot").arg(&stage0_sysroot);
|
||||
}
|
||||
|
||||
// Now run rustc to build the recipe.
|
||||
let res = self.run_command_to_procres(&mut rustc);
|
||||
if !res.status.success() {
|
||||
self.fatal_proc_rec("run-make test failed: could not build `rmake.rs` recipe", &res);
|
||||
}
|
||||
|
||||
// To actually run the recipe, we have to provide the recipe with a bunch of information
|
||||
// provided through env vars.
|
||||
|
||||
// Compute stage-specific standard library paths.
|
||||
let stage_std_path = build_root.join(&stage).join("lib");
|
||||
|
||||
// Compute dynamic library search paths for recipes.
|
||||
let recipe_dylib_search_paths = {
|
||||
let mut paths = base_dylib_search_paths.clone();
|
||||
paths.push(support_lib_path.parent().unwrap().to_path_buf());
|
||||
paths.push(stage_std_path.join("rustlib").join(&self.config.host).join("lib"));
|
||||
paths
|
||||
};
|
||||
|
||||
// Compute runtime library search paths for recipes. This is target-specific.
|
||||
let target_runtime_dylib_search_paths = {
|
||||
let mut paths = vec![rmake_out_dir.clone()];
|
||||
paths.extend(base_dylib_search_paths.iter().cloned());
|
||||
paths
|
||||
};
|
||||
|
||||
// FIXME(jieyouxu): please rename `TARGET_RPATH_ENV`, `HOST_RPATH_DIR` and
|
||||
// `TARGET_RPATH_DIR`, it is **extremely** confusing!
|
||||
let mut cmd = Command::new(&recipe_bin);
|
||||
cmd.current_dir(&rmake_out_dir)
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
// Provide the target-specific env var that is used to record dylib search paths. For
|
||||
// example, this could be `LD_LIBRARY_PATH` on some linux distros but `PATH` on Windows.
|
||||
.env("LD_LIB_PATH_ENVVAR", dylib_env_var())
|
||||
// Provide the dylib search paths.
|
||||
.env(dylib_env_var(), &env::join_paths(recipe_dylib_search_paths).unwrap())
|
||||
// Provide runtime dylib search paths.
|
||||
.env("TARGET_RPATH_ENV", &env::join_paths(target_runtime_dylib_search_paths).unwrap())
|
||||
// Provide the target.
|
||||
.env("TARGET", &self.config.target)
|
||||
// Some tests unfortunately still need Python, so provide path to a Python interpreter.
|
||||
.env("PYTHON", &self.config.python)
|
||||
// Provide path to checkout root. This is the top-level directory containing
|
||||
// rust-lang/rust checkout.
|
||||
.env("SOURCE_ROOT", &source_root)
|
||||
// Provide path to stage-corresponding rustc.
|
||||
.env("RUSTC", &self.config.rustc_path)
|
||||
// Provide the directory to libraries that are needed to run the *compiler*. This is not
|
||||
// to be confused with `TARGET_RPATH_ENV` or `TARGET_RPATH_DIR`. This is needed if the
|
||||
// recipe wants to invoke rustc.
|
||||
.env("HOST_RPATH_DIR", &self.config.compile_lib_path)
|
||||
// Provide the directory to libraries that might be needed to run compiled binaries
|
||||
// (further compiled by the recipe!).
|
||||
.env("TARGET_RPATH_DIR", &self.config.run_lib_path)
|
||||
// Provide which LLVM components are available (e.g. which LLVM components are provided
|
||||
// through a specific CI runner).
|
||||
.env("LLVM_COMPONENTS", &self.config.llvm_components);
|
||||
|
||||
if let Some(ref rustdoc) = self.config.rustdoc_path {
|
||||
cmd.env("RUSTDOC", source_root.join(rustdoc));
|
||||
}
|
||||
|
||||
if let Some(ref node) = self.config.nodejs {
|
||||
cmd.env("NODE", node);
|
||||
}
|
||||
|
||||
if let Some(ref linker) = self.config.target_linker {
|
||||
cmd.env("RUSTC_LINKER", linker);
|
||||
}
|
||||
|
||||
if let Some(ref clang) = self.config.run_clang_based_tests_with {
|
||||
cmd.env("CLANG", clang);
|
||||
}
|
||||
|
||||
if let Some(ref filecheck) = self.config.llvm_filecheck {
|
||||
cmd.env("LLVM_FILECHECK", filecheck);
|
||||
}
|
||||
|
||||
if let Some(ref llvm_bin_dir) = self.config.llvm_bin_dir {
|
||||
cmd.env("LLVM_BIN_DIR", llvm_bin_dir);
|
||||
}
|
||||
|
||||
if let Some(ref remote_test_client) = self.config.remote_test_client {
|
||||
cmd.env("REMOTE_TEST_CLIENT", remote_test_client);
|
||||
}
|
||||
|
||||
// We don't want RUSTFLAGS set from the outside to interfere with
|
||||
// compiler flags set in the test cases:
|
||||
cmd.env_remove("RUSTFLAGS");
|
||||
|
||||
// Use dynamic musl for tests because static doesn't allow creating dylibs
|
||||
if self.config.host.contains("musl") {
|
||||
cmd.env("RUSTFLAGS", "-Ctarget-feature=-crt-static").env("IS_MUSL_HOST", "1");
|
||||
}
|
||||
|
||||
if self.config.bless {
|
||||
// If we're running in `--bless` mode, set an environment variable to tell
|
||||
// `run_make_support` to bless snapshot files instead of checking them.
|
||||
//
|
||||
// The value is this test's source directory, because the support code
|
||||
// will need that path in order to bless the _original_ snapshot files,
|
||||
// not the copies in `rmake_out`.
|
||||
// (See <https://github.com/rust-lang/rust/issues/129038>.)
|
||||
cmd.env("RUSTC_BLESS_TEST", &self.testpaths.file);
|
||||
}
|
||||
|
||||
if self.config.target.contains("msvc") && !self.config.cc.is_empty() {
|
||||
// We need to pass a path to `lib.exe`, so assume that `cc` is `cl.exe`
|
||||
// and that `lib.exe` lives next to it.
|
||||
let lib = Path::new(&self.config.cc).parent().unwrap().join("lib.exe");
|
||||
|
||||
// MSYS doesn't like passing flags of the form `/foo` as it thinks it's
|
||||
// a path and instead passes `C:\msys64\foo`, so convert all
|
||||
// `/`-arguments to MSVC here to `-` arguments.
|
||||
let cflags = self
|
||||
.config
|
||||
.cflags
|
||||
.split(' ')
|
||||
.map(|s| s.replace("/", "-"))
|
||||
.collect::<Vec<_>>()
|
||||
.join(" ");
|
||||
let cxxflags = self
|
||||
.config
|
||||
.cxxflags
|
||||
.split(' ')
|
||||
.map(|s| s.replace("/", "-"))
|
||||
.collect::<Vec<_>>()
|
||||
.join(" ");
|
||||
|
||||
cmd.env("IS_MSVC", "1")
|
||||
.env("IS_WINDOWS", "1")
|
||||
.env("MSVC_LIB", format!("'{}' -nologo", lib.display()))
|
||||
.env("MSVC_LIB_PATH", format!("{}", lib.display()))
|
||||
// Note: we diverge from legacy run_make and don't lump `CC` the compiler and
|
||||
// default flags together.
|
||||
.env("CC_DEFAULT_FLAGS", &cflags)
|
||||
.env("CC", &self.config.cc)
|
||||
.env("CXX_DEFAULT_FLAGS", &cxxflags)
|
||||
.env("CXX", &self.config.cxx);
|
||||
} else {
|
||||
cmd.env("CC_DEFAULT_FLAGS", &self.config.cflags)
|
||||
.env("CC", &self.config.cc)
|
||||
.env("CXX_DEFAULT_FLAGS", &self.config.cxxflags)
|
||||
.env("CXX", &self.config.cxx)
|
||||
.env("AR", &self.config.ar);
|
||||
|
||||
if self.config.target.contains("windows") {
|
||||
cmd.env("IS_WINDOWS", "1");
|
||||
}
|
||||
}
|
||||
|
||||
let (Output { stdout, stderr, status }, truncated) =
|
||||
self.read2_abbreviated(cmd.spawn().expect("failed to spawn `rmake`"));
|
||||
if !status.success() {
|
||||
let res = ProcRes {
|
||||
status,
|
||||
stdout: String::from_utf8_lossy(&stdout).into_owned(),
|
||||
stderr: String::from_utf8_lossy(&stderr).into_owned(),
|
||||
truncated,
|
||||
cmdline: format!("{:?}", cmd),
|
||||
};
|
||||
self.fatal_proc_rec("rmake recipe failed to complete", &res);
|
||||
}
|
||||
}
|
||||
}
|
34
src/tools/compiletest/src/runtest/rustdoc.rs
Normal file
34
src/tools/compiletest/src/runtest/rustdoc.rs
Normal file
@ -0,0 +1,34 @@
|
||||
use std::process::Command;
|
||||
|
||||
use super::{remove_and_create_dir_all, TestCx};
|
||||
|
||||
impl TestCx<'_> {
|
||||
pub(super) fn run_rustdoc_test(&self) {
|
||||
assert!(self.revision.is_none(), "revisions not relevant here");
|
||||
|
||||
let out_dir = self.output_base_dir();
|
||||
remove_and_create_dir_all(&out_dir);
|
||||
|
||||
let proc_res = self.document(&out_dir, &self.testpaths);
|
||||
if !proc_res.status.success() {
|
||||
self.fatal_proc_rec("rustdoc failed!", &proc_res);
|
||||
}
|
||||
|
||||
if self.props.check_test_line_numbers_match {
|
||||
self.check_rustdoc_test_option(proc_res);
|
||||
} else {
|
||||
let root = self.config.find_rust_src_root().unwrap();
|
||||
let mut cmd = Command::new(&self.config.python);
|
||||
cmd.arg(root.join("src/etc/htmldocck.py")).arg(&out_dir).arg(&self.testpaths.file);
|
||||
if self.config.bless {
|
||||
cmd.arg("--bless");
|
||||
}
|
||||
let res = self.run_command_to_procres(&mut cmd);
|
||||
if !res.status.success() {
|
||||
self.fatal_proc_rec_with_ctx("htmldocck failed!", &res, |mut this| {
|
||||
this.compare_to_default_rustdoc(&out_dir)
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
48
src/tools/compiletest/src/runtest/rustdoc_json.rs
Normal file
48
src/tools/compiletest/src/runtest/rustdoc_json.rs
Normal file
@ -0,0 +1,48 @@
|
||||
use std::process::Command;
|
||||
|
||||
use super::{remove_and_create_dir_all, TestCx};
|
||||
|
||||
impl TestCx<'_> {
|
||||
pub(super) fn run_rustdoc_json_test(&self) {
|
||||
//FIXME: Add bless option.
|
||||
|
||||
assert!(self.revision.is_none(), "revisions not relevant here");
|
||||
|
||||
let out_dir = self.output_base_dir();
|
||||
remove_and_create_dir_all(&out_dir);
|
||||
|
||||
let proc_res = self.document(&out_dir, &self.testpaths);
|
||||
if !proc_res.status.success() {
|
||||
self.fatal_proc_rec("rustdoc failed!", &proc_res);
|
||||
}
|
||||
|
||||
let root = self.config.find_rust_src_root().unwrap();
|
||||
let mut json_out = out_dir.join(self.testpaths.file.file_stem().unwrap());
|
||||
json_out.set_extension("json");
|
||||
let res = self.run_command_to_procres(
|
||||
Command::new(self.config.jsondocck_path.as_ref().unwrap())
|
||||
.arg("--doc-dir")
|
||||
.arg(root.join(&out_dir))
|
||||
.arg("--template")
|
||||
.arg(&self.testpaths.file),
|
||||
);
|
||||
|
||||
if !res.status.success() {
|
||||
self.fatal_proc_rec_with_ctx("jsondocck failed!", &res, |_| {
|
||||
println!("Rustdoc Output:");
|
||||
proc_res.print_info();
|
||||
})
|
||||
}
|
||||
|
||||
let mut json_out = out_dir.join(self.testpaths.file.file_stem().unwrap());
|
||||
json_out.set_extension("json");
|
||||
|
||||
let res = self.run_command_to_procres(
|
||||
Command::new(self.config.jsondoclint_path.as_ref().unwrap()).arg(&json_out),
|
||||
);
|
||||
|
||||
if !res.status.success() {
|
||||
self.fatal_proc_rec("jsondoclint failed!", &res);
|
||||
}
|
||||
}
|
||||
}
|
233
src/tools/compiletest/src/runtest/ui.rs
Normal file
233
src/tools/compiletest/src/runtest/ui.rs
Normal file
@ -0,0 +1,233 @@
|
||||
use std::collections::HashSet;
|
||||
use std::fs::OpenOptions;
|
||||
use std::io::Write;
|
||||
|
||||
use rustfix::{apply_suggestions, get_suggestions_from_json, Filter};
|
||||
use tracing::debug;
|
||||
|
||||
use super::{
|
||||
AllowUnused, Emit, ErrorKind, FailMode, LinkToAux, PassMode, TargetLocation, TestCx,
|
||||
TestOutput, Truncated, WillExecute, UI_FIXED,
|
||||
};
|
||||
use crate::{errors, json};
|
||||
|
||||
impl TestCx<'_> {
|
||||
pub(super) fn run_ui_test(&self) {
|
||||
if let Some(FailMode::Build) = self.props.fail_mode {
|
||||
// Make sure a build-fail test cannot fail due to failing analysis (e.g. typeck).
|
||||
let pm = Some(PassMode::Check);
|
||||
let proc_res =
|
||||
self.compile_test_general(WillExecute::No, Emit::Metadata, pm, Vec::new());
|
||||
self.check_if_test_should_compile(&proc_res, pm);
|
||||
}
|
||||
|
||||
let pm = self.pass_mode();
|
||||
let should_run = self.should_run(pm);
|
||||
let emit_metadata = self.should_emit_metadata(pm);
|
||||
let proc_res = self.compile_test(should_run, emit_metadata);
|
||||
self.check_if_test_should_compile(&proc_res, pm);
|
||||
if matches!(proc_res.truncated, Truncated::Yes)
|
||||
&& !self.props.dont_check_compiler_stdout
|
||||
&& !self.props.dont_check_compiler_stderr
|
||||
{
|
||||
self.fatal_proc_rec(
|
||||
"compiler output got truncated, cannot compare with reference file",
|
||||
&proc_res,
|
||||
);
|
||||
}
|
||||
|
||||
// if the user specified a format in the ui test
|
||||
// print the output to the stderr file, otherwise extract
|
||||
// the rendered error messages from json and print them
|
||||
let explicit = self.props.compile_flags.iter().any(|s| s.contains("--error-format"));
|
||||
|
||||
let expected_fixed = self.load_expected_output(UI_FIXED);
|
||||
|
||||
self.check_and_prune_duplicate_outputs(&proc_res, &[], &[]);
|
||||
|
||||
let mut errors = self.load_compare_outputs(&proc_res, TestOutput::Compile, explicit);
|
||||
let rustfix_input = json::rustfix_diagnostics_only(&proc_res.stderr);
|
||||
|
||||
if self.config.compare_mode.is_some() {
|
||||
// don't test rustfix with nll right now
|
||||
} else if self.config.rustfix_coverage {
|
||||
// Find out which tests have `MachineApplicable` suggestions but are missing
|
||||
// `run-rustfix` or `run-rustfix-only-machine-applicable` headers.
|
||||
//
|
||||
// This will return an empty `Vec` in case the executed test file has a
|
||||
// `compile-flags: --error-format=xxxx` header with a value other than `json`.
|
||||
let suggestions = get_suggestions_from_json(
|
||||
&rustfix_input,
|
||||
&HashSet::new(),
|
||||
Filter::MachineApplicableOnly,
|
||||
)
|
||||
.unwrap_or_default();
|
||||
if !suggestions.is_empty()
|
||||
&& !self.props.run_rustfix
|
||||
&& !self.props.rustfix_only_machine_applicable
|
||||
{
|
||||
let mut coverage_file_path = self.config.build_base.clone();
|
||||
coverage_file_path.push("rustfix_missing_coverage.txt");
|
||||
debug!("coverage_file_path: {}", coverage_file_path.display());
|
||||
|
||||
let mut file = OpenOptions::new()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(coverage_file_path.as_path())
|
||||
.expect("could not create or open file");
|
||||
|
||||
if let Err(e) = writeln!(file, "{}", self.testpaths.file.display()) {
|
||||
panic!("couldn't write to {}: {e:?}", coverage_file_path.display());
|
||||
}
|
||||
}
|
||||
} else if self.props.run_rustfix {
|
||||
// Apply suggestions from rustc to the code itself
|
||||
let unfixed_code = self.load_expected_output_from_path(&self.testpaths.file).unwrap();
|
||||
let suggestions = get_suggestions_from_json(
|
||||
&rustfix_input,
|
||||
&HashSet::new(),
|
||||
if self.props.rustfix_only_machine_applicable {
|
||||
Filter::MachineApplicableOnly
|
||||
} else {
|
||||
Filter::Everything
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
let fixed_code = apply_suggestions(&unfixed_code, &suggestions).unwrap_or_else(|e| {
|
||||
panic!(
|
||||
"failed to apply suggestions for {:?} with rustfix: {}",
|
||||
self.testpaths.file, e
|
||||
)
|
||||
});
|
||||
|
||||
errors += self.compare_output("fixed", &fixed_code, &expected_fixed);
|
||||
} else if !expected_fixed.is_empty() {
|
||||
panic!(
|
||||
"the `//@ run-rustfix` directive wasn't found but a `*.fixed` \
|
||||
file was found"
|
||||
);
|
||||
}
|
||||
|
||||
if errors > 0 {
|
||||
println!("To update references, rerun the tests and pass the `--bless` flag");
|
||||
let relative_path_to_file =
|
||||
self.testpaths.relative_dir.join(self.testpaths.file.file_name().unwrap());
|
||||
println!(
|
||||
"To only update this specific test, also pass `--test-args {}`",
|
||||
relative_path_to_file.display(),
|
||||
);
|
||||
self.fatal_proc_rec(
|
||||
&format!("{} errors occurred comparing output.", errors),
|
||||
&proc_res,
|
||||
);
|
||||
}
|
||||
|
||||
let expected_errors = errors::load_errors(&self.testpaths.file, self.revision);
|
||||
|
||||
if let WillExecute::Yes = should_run {
|
||||
let proc_res = self.exec_compiled_test();
|
||||
let run_output_errors = if self.props.check_run_results {
|
||||
self.load_compare_outputs(&proc_res, TestOutput::Run, explicit)
|
||||
} else {
|
||||
0
|
||||
};
|
||||
if run_output_errors > 0 {
|
||||
self.fatal_proc_rec(
|
||||
&format!("{} errors occurred comparing run output.", run_output_errors),
|
||||
&proc_res,
|
||||
);
|
||||
}
|
||||
if self.should_run_successfully(pm) {
|
||||
if !proc_res.status.success() {
|
||||
self.fatal_proc_rec("test run failed!", &proc_res);
|
||||
}
|
||||
} else if proc_res.status.success() {
|
||||
self.fatal_proc_rec("test run succeeded!", &proc_res);
|
||||
}
|
||||
|
||||
if !self.props.error_patterns.is_empty() || !self.props.regex_error_patterns.is_empty()
|
||||
{
|
||||
// "// error-pattern" comments
|
||||
let output_to_check = self.get_output(&proc_res);
|
||||
self.check_all_error_patterns(&output_to_check, &proc_res, pm);
|
||||
}
|
||||
}
|
||||
|
||||
debug!(
|
||||
"run_ui_test: explicit={:?} config.compare_mode={:?} expected_errors={:?} \
|
||||
proc_res.status={:?} props.error_patterns={:?}",
|
||||
explicit,
|
||||
self.config.compare_mode,
|
||||
expected_errors,
|
||||
proc_res.status,
|
||||
self.props.error_patterns
|
||||
);
|
||||
|
||||
let check_patterns = should_run == WillExecute::No
|
||||
&& (!self.props.error_patterns.is_empty()
|
||||
|| !self.props.regex_error_patterns.is_empty());
|
||||
if !explicit && self.config.compare_mode.is_none() {
|
||||
let check_annotations = !check_patterns || !expected_errors.is_empty();
|
||||
|
||||
if check_annotations {
|
||||
// "//~ERROR comments"
|
||||
self.check_expected_errors(expected_errors, &proc_res);
|
||||
}
|
||||
} else if explicit && !expected_errors.is_empty() {
|
||||
let msg = format!(
|
||||
"line {}: cannot combine `--error-format` with {} annotations; use `error-pattern` instead",
|
||||
expected_errors[0].line_num,
|
||||
expected_errors[0].kind.unwrap_or(ErrorKind::Error),
|
||||
);
|
||||
self.fatal(&msg);
|
||||
}
|
||||
if check_patterns {
|
||||
// "// error-pattern" comments
|
||||
let output_to_check = self.get_output(&proc_res);
|
||||
self.check_all_error_patterns(&output_to_check, &proc_res, pm);
|
||||
}
|
||||
|
||||
if self.props.run_rustfix && self.config.compare_mode.is_none() {
|
||||
// And finally, compile the fixed code and make sure it both
|
||||
// succeeds and has no diagnostics.
|
||||
let mut rustc = self.make_compile_args(
|
||||
&self.expected_output_path(UI_FIXED),
|
||||
TargetLocation::ThisFile(self.make_exe_name()),
|
||||
emit_metadata,
|
||||
AllowUnused::No,
|
||||
LinkToAux::Yes,
|
||||
Vec::new(),
|
||||
);
|
||||
|
||||
// If a test is revisioned, it's fixed source file can be named "a.foo.fixed", which,
|
||||
// well, "a.foo" isn't a valid crate name. So we explicitly mangle the test name
|
||||
// (including the revision) here to avoid the test writer having to manually specify a
|
||||
// `#![crate_name = "..."]` as a workaround. This is okay since we're only checking if
|
||||
// the fixed code is compilable.
|
||||
if self.revision.is_some() {
|
||||
let crate_name =
|
||||
self.testpaths.file.file_stem().expect("test must have a file stem");
|
||||
// crate name must be alphanumeric or `_`.
|
||||
let crate_name =
|
||||
crate_name.to_str().expect("crate name implies file name must be valid UTF-8");
|
||||
// replace `a.foo` -> `a__foo` for crate name purposes.
|
||||
// replace `revision-name-with-dashes` -> `revision_name_with_underscore`
|
||||
let crate_name = crate_name.replace('.', "__");
|
||||
let crate_name = crate_name.replace('-', "_");
|
||||
rustc.arg("--crate-name");
|
||||
rustc.arg(crate_name);
|
||||
}
|
||||
|
||||
let res = self.compose_and_run_compiler(rustc, None, self.testpaths);
|
||||
if !res.status.success() {
|
||||
self.fatal_proc_rec("failed to compile fixed code", &res);
|
||||
}
|
||||
if !res.stderr.is_empty()
|
||||
&& !self.props.rustfix_only_machine_applicable
|
||||
&& !json::rustfix_diagnostics_only(&res.stderr).is_empty()
|
||||
{
|
||||
self.fatal_proc_rec("fixed code is still producing diagnostics", &res);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
34
src/tools/compiletest/src/runtest/valgrind.rs
Normal file
34
src/tools/compiletest/src/runtest/valgrind.rs
Normal file
@ -0,0 +1,34 @@
|
||||
use super::{Emit, TestCx, WillExecute};
|
||||
|
||||
impl TestCx<'_> {
|
||||
pub(super) fn run_valgrind_test(&self) {
|
||||
assert!(self.revision.is_none(), "revisions not relevant here");
|
||||
|
||||
// FIXME(jieyouxu): does this really make any sense? If a valgrind test isn't testing
|
||||
// valgrind, what is it even testing?
|
||||
if self.config.valgrind_path.is_none() {
|
||||
assert!(!self.config.force_valgrind);
|
||||
return self.run_rpass_test();
|
||||
}
|
||||
|
||||
let should_run = self.run_if_enabled();
|
||||
let mut proc_res = self.compile_test(should_run, Emit::None);
|
||||
|
||||
if !proc_res.status.success() {
|
||||
self.fatal_proc_rec("compilation failed!", &proc_res);
|
||||
}
|
||||
|
||||
if let WillExecute::Disabled = should_run {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut new_config = self.config.clone();
|
||||
new_config.runner = new_config.valgrind_path.clone();
|
||||
let new_cx = TestCx { config: &new_config, ..*self };
|
||||
proc_res = new_cx.exec_compiled_test();
|
||||
|
||||
if !proc_res.status.success() {
|
||||
self.fatal_proc_rec("test run failed!", &proc_res);
|
||||
}
|
||||
}
|
||||
}
|
@ -82,6 +82,7 @@ fn tidy_error(args: &str) -> std::io::Result<()> {
|
||||
pub mod run_make_tests;
|
||||
pub mod rustdoc_css_themes;
|
||||
pub mod rustdoc_gui_tests;
|
||||
pub mod rustdoc_templates;
|
||||
pub mod style;
|
||||
pub mod target_policy;
|
||||
pub mod target_specific_tests;
|
||||
|
@ -108,6 +108,7 @@ macro_rules! check {
|
||||
check!(mir_opt_tests, &tests_path, bless);
|
||||
check!(rustdoc_gui_tests, &tests_path);
|
||||
check!(rustdoc_css_themes, &librustdoc_path);
|
||||
check!(rustdoc_templates, &librustdoc_path);
|
||||
check!(known_bug, &crashes_path);
|
||||
check!(unknown_revision, &tests_path);
|
||||
|
||||
|
58
src/tools/tidy/src/rustdoc_templates.rs
Normal file
58
src/tools/tidy/src/rustdoc_templates.rs
Normal file
@ -0,0 +1,58 @@
|
||||
//! Tidy check to ensure that rustdoc templates didn't forget a `{# #}` to strip extra whitespace
|
||||
//! characters.
|
||||
|
||||
use std::ffi::OsStr;
|
||||
use std::path::Path;
|
||||
|
||||
use ignore::DirEntry;
|
||||
|
||||
use crate::walk::walk;
|
||||
|
||||
// Array containing `("beginning of tag", "end of tag")`.
|
||||
const TAGS: &[(&str, &str)] = &[("{#", "#}"), ("{%", "%}"), ("{{", "}}")];
|
||||
|
||||
pub fn check(librustdoc_path: &Path, bad: &mut bool) {
|
||||
walk(
|
||||
&librustdoc_path.join("html/templates"),
|
||||
|path, is_dir| is_dir || !path.extension().is_some_and(|ext| ext == OsStr::new("html")),
|
||||
&mut |path: &DirEntry, file_content: &str| {
|
||||
let mut lines = file_content.lines().enumerate().peekable();
|
||||
|
||||
while let Some((pos, line)) = lines.next() {
|
||||
let line = line.trim();
|
||||
if TAGS.iter().any(|(_, tag)| line.ends_with(tag)) {
|
||||
continue;
|
||||
}
|
||||
let Some(next_line) = lines.peek().map(|(_, next_line)| next_line.trim()) else {
|
||||
continue;
|
||||
};
|
||||
if TAGS.iter().any(|(tag, _)| next_line.starts_with(tag)) {
|
||||
continue;
|
||||
}
|
||||
// Maybe this is a multi-line tag, let's filter it out then.
|
||||
match TAGS.iter().find_map(|(tag, end_tag)| {
|
||||
if line.rfind(tag).is_some() { Some(end_tag) } else { None }
|
||||
}) {
|
||||
None => {
|
||||
// No it's not, let's error.
|
||||
tidy_error!(
|
||||
bad,
|
||||
"`{}` at line {}: missing `{{# #}}` at the end of the line",
|
||||
path.path().display(),
|
||||
pos + 1,
|
||||
);
|
||||
}
|
||||
Some(end_tag) => {
|
||||
// We skip the tag.
|
||||
while let Some((_, next_line)) = lines.peek() {
|
||||
if next_line.contains(end_tag) {
|
||||
break;
|
||||
}
|
||||
lines.next();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
);
|
||||
}
|
@ -5,7 +5,7 @@
|
||||
|
||||
#![feature(decl_macro)]
|
||||
#![cfg(FALSE)]
|
||||
#![feature(box_syntax)]
|
||||
#![feature(box_patterns)]
|
||||
|
||||
macro mac() {} // OK
|
||||
|
||||
|
14
tests/ui/macros/auxiliary/metavar_2018.rs
Normal file
14
tests/ui/macros/auxiliary/metavar_2018.rs
Normal file
@ -0,0 +1,14 @@
|
||||
//@ edition: 2018
|
||||
#[macro_export]
|
||||
macro_rules! make_matcher {
|
||||
($name:ident, $fragment_type:ident, $d:tt) => {
|
||||
#[macro_export]
|
||||
macro_rules! $name {
|
||||
($d _:$fragment_type) => { true };
|
||||
(const { 0 }) => { false };
|
||||
(A | B) => { false };
|
||||
}
|
||||
};
|
||||
}
|
||||
make_matcher!(is_expr_from_2018, expr, $);
|
||||
make_matcher!(is_pat_from_2018, pat, $);
|
38
tests/ui/macros/metavar_cross_edition_recursive_macros.rs
Normal file
38
tests/ui/macros/metavar_cross_edition_recursive_macros.rs
Normal file
@ -0,0 +1,38 @@
|
||||
//@ compile-flags: --edition=2024 -Z unstable-options
|
||||
//@ aux-build: metavar_2018.rs
|
||||
//@ known-bug: #130484
|
||||
//@ run-pass
|
||||
|
||||
// This test captures the behavior of macro-generating-macros with fragment
|
||||
// specifiers across edition boundaries.
|
||||
|
||||
#![feature(expr_fragment_specifier_2024)]
|
||||
#![feature(macro_metavar_expr)]
|
||||
#![allow(incomplete_features)]
|
||||
|
||||
extern crate metavar_2018;
|
||||
|
||||
use metavar_2018::{is_expr_from_2018, is_pat_from_2018, make_matcher};
|
||||
|
||||
make_matcher!(is_expr_from_2024, expr, $);
|
||||
make_matcher!(is_pat_from_2024, pat, $);
|
||||
|
||||
fn main() {
|
||||
// Check expr
|
||||
let from_2018 = is_expr_from_2018!(const { 0 });
|
||||
dbg!(from_2018);
|
||||
let from_2024 = is_expr_from_2024!(const { 0 });
|
||||
dbg!(from_2024);
|
||||
|
||||
assert!(!from_2018);
|
||||
assert!(!from_2024); // from_2024 will be true once #130484 is fixed
|
||||
|
||||
// Check pat
|
||||
let from_2018 = is_pat_from_2018!(A | B);
|
||||
dbg!(from_2018);
|
||||
let from_2024 = is_pat_from_2024!(A | B);
|
||||
dbg!(from_2024);
|
||||
|
||||
assert!(!from_2018);
|
||||
assert!(!from_2024); // from_2024 will be true once #130484 is fixed
|
||||
}
|
14
tests/ui/traits/sized-coniductive.rs
Normal file
14
tests/ui/traits/sized-coniductive.rs
Normal file
@ -0,0 +1,14 @@
|
||||
//@ check-pass
|
||||
// https://github.com/rust-lang/rust/issues/129541
|
||||
|
||||
#[derive(Clone)]
|
||||
struct Test {
|
||||
field: std::borrow::Cow<'static, [Self]>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct Hello {
|
||||
a: <[Hello] as std::borrow::ToOwned>::Owned,
|
||||
}
|
||||
|
||||
fn main(){}
|
Loading…
Reference in New Issue
Block a user