Rollup merge of #125719 - Zalathar:run-coverage, r=jieyouxu
Extract coverage-specific code out of `compiletest::runtest` I had been vaguely intending to do this for a while, but seeing #89475 on the compiletest dashboard inspired me to actually go and do it. This moves a few hundred lines of coverage-specific code out of the main module, making navigation a bit easier. There is still a small amount of coverage-specific logic in broader functions in that module, since it can't easily be moved. This is just cut-and-paste plus fixing visibility and imports, so no functional changes. I also removed the unit test for anonymizing line numbers in MC/DC reports, as foreshadowed by the comment I wrote when adding it. That functionality is now adequately exercised by the actual snapshot tests for MC/DC coverage. (Removing the test now avoids the need to move it, or to make the function it calls visible.)
This commit is contained in:
commit
6ef3dd0d38
@ -9,7 +9,7 @@ use crate::common::{Codegen, CodegenUnits, DebugInfo, Debugger, Rustdoc};
|
|||||||
use crate::common::{CompareMode, FailMode, PassMode};
|
use crate::common::{CompareMode, FailMode, PassMode};
|
||||||
use crate::common::{Config, TestPaths};
|
use crate::common::{Config, TestPaths};
|
||||||
use crate::common::{CoverageMap, CoverageRun, Pretty, RunPassValgrind};
|
use crate::common::{CoverageMap, CoverageRun, Pretty, RunPassValgrind};
|
||||||
use crate::common::{UI_COVERAGE, UI_COVERAGE_MAP, UI_RUN_STDERR, UI_RUN_STDOUT};
|
use crate::common::{UI_RUN_STDERR, UI_RUN_STDOUT};
|
||||||
use crate::compute_diff::{write_diff, write_filtered_diff};
|
use crate::compute_diff::{write_diff, write_filtered_diff};
|
||||||
use crate::errors::{self, Error, ErrorKind};
|
use crate::errors::{self, Error, ErrorKind};
|
||||||
use crate::header::TestProps;
|
use crate::header::TestProps;
|
||||||
@ -41,6 +41,7 @@ use tracing::*;
|
|||||||
use crate::extract_gdb_version;
|
use crate::extract_gdb_version;
|
||||||
use crate::is_android_gdb_target;
|
use crate::is_android_gdb_target;
|
||||||
|
|
||||||
|
mod coverage;
|
||||||
mod debugger;
|
mod debugger;
|
||||||
use debugger::DebuggerCommands;
|
use debugger::DebuggerCommands;
|
||||||
|
|
||||||
@ -53,6 +54,7 @@ macro_rules! static_regex {
|
|||||||
RE.get_or_init(|| ::regex::Regex::new($re).unwrap())
|
RE.get_or_init(|| ::regex::Regex::new($re).unwrap())
|
||||||
}};
|
}};
|
||||||
}
|
}
|
||||||
|
use static_regex;
|
||||||
|
|
||||||
const FAKE_SRC_BASE: &str = "fake-test-src-base";
|
const FAKE_SRC_BASE: &str = "fake-test-src-base";
|
||||||
|
|
||||||
@ -267,8 +269,8 @@ impl<'test> TestCx<'test> {
|
|||||||
MirOpt => self.run_mir_opt_test(),
|
MirOpt => self.run_mir_opt_test(),
|
||||||
Assembly => self.run_assembly_test(),
|
Assembly => self.run_assembly_test(),
|
||||||
JsDocTest => self.run_js_doc_test(),
|
JsDocTest => self.run_js_doc_test(),
|
||||||
CoverageMap => self.run_coverage_map_test(),
|
CoverageMap => self.run_coverage_map_test(), // see self::coverage
|
||||||
CoverageRun => self.run_coverage_run_test(),
|
CoverageRun => self.run_coverage_run_test(), // see self::coverage
|
||||||
Crashes => self.run_crash_test(),
|
Crashes => self.run_crash_test(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -504,224 +506,6 @@ impl<'test> TestCx<'test> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn run_coverage_map_test(&self) {
|
|
||||||
let Some(coverage_dump_path) = &self.config.coverage_dump_path else {
|
|
||||||
self.fatal("missing --coverage-dump");
|
|
||||||
};
|
|
||||||
|
|
||||||
let (proc_res, llvm_ir_path) = self.compile_test_and_save_ir();
|
|
||||||
if !proc_res.status.success() {
|
|
||||||
self.fatal_proc_rec("compilation failed!", &proc_res);
|
|
||||||
}
|
|
||||||
drop(proc_res);
|
|
||||||
|
|
||||||
let mut dump_command = Command::new(coverage_dump_path);
|
|
||||||
dump_command.arg(llvm_ir_path);
|
|
||||||
let proc_res = self.run_command_to_procres(&mut dump_command);
|
|
||||||
if !proc_res.status.success() {
|
|
||||||
self.fatal_proc_rec("coverage-dump failed!", &proc_res);
|
|
||||||
}
|
|
||||||
|
|
||||||
let kind = UI_COVERAGE_MAP;
|
|
||||||
|
|
||||||
let expected_coverage_dump = self.load_expected_output(kind);
|
|
||||||
let actual_coverage_dump = self.normalize_output(&proc_res.stdout, &[]);
|
|
||||||
|
|
||||||
let coverage_dump_errors =
|
|
||||||
self.compare_output(kind, &actual_coverage_dump, &expected_coverage_dump);
|
|
||||||
|
|
||||||
if coverage_dump_errors > 0 {
|
|
||||||
self.fatal_proc_rec(
|
|
||||||
&format!("{coverage_dump_errors} errors occurred comparing coverage output."),
|
|
||||||
&proc_res,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn run_coverage_run_test(&self) {
|
|
||||||
let should_run = self.run_if_enabled();
|
|
||||||
let proc_res = self.compile_test(should_run, Emit::None);
|
|
||||||
|
|
||||||
if !proc_res.status.success() {
|
|
||||||
self.fatal_proc_rec("compilation failed!", &proc_res);
|
|
||||||
}
|
|
||||||
drop(proc_res);
|
|
||||||
|
|
||||||
if let WillExecute::Disabled = should_run {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
let profraw_path = self.output_base_dir().join("default.profraw");
|
|
||||||
let profdata_path = self.output_base_dir().join("default.profdata");
|
|
||||||
|
|
||||||
// Delete any existing profraw/profdata files to rule out unintended
|
|
||||||
// interference between repeated test runs.
|
|
||||||
if profraw_path.exists() {
|
|
||||||
std::fs::remove_file(&profraw_path).unwrap();
|
|
||||||
}
|
|
||||||
if profdata_path.exists() {
|
|
||||||
std::fs::remove_file(&profdata_path).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
let proc_res = self.exec_compiled_test_general(
|
|
||||||
&[("LLVM_PROFILE_FILE", &profraw_path.to_str().unwrap())],
|
|
||||||
false,
|
|
||||||
);
|
|
||||||
if self.props.failure_status.is_some() {
|
|
||||||
self.check_correct_failure_status(&proc_res);
|
|
||||||
} else if !proc_res.status.success() {
|
|
||||||
self.fatal_proc_rec("test run failed!", &proc_res);
|
|
||||||
}
|
|
||||||
drop(proc_res);
|
|
||||||
|
|
||||||
let mut profraw_paths = vec![profraw_path];
|
|
||||||
let mut bin_paths = vec![self.make_exe_name()];
|
|
||||||
|
|
||||||
if self.config.suite == "coverage-run-rustdoc" {
|
|
||||||
self.run_doctests_for_coverage(&mut profraw_paths, &mut bin_paths);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run `llvm-profdata merge` to index the raw coverage output.
|
|
||||||
let proc_res = self.run_llvm_tool("llvm-profdata", |cmd| {
|
|
||||||
cmd.args(["merge", "--sparse", "--output"]);
|
|
||||||
cmd.arg(&profdata_path);
|
|
||||||
cmd.args(&profraw_paths);
|
|
||||||
});
|
|
||||||
if !proc_res.status.success() {
|
|
||||||
self.fatal_proc_rec("llvm-profdata merge failed!", &proc_res);
|
|
||||||
}
|
|
||||||
drop(proc_res);
|
|
||||||
|
|
||||||
// Run `llvm-cov show` to produce a coverage report in text format.
|
|
||||||
let proc_res = self.run_llvm_tool("llvm-cov", |cmd| {
|
|
||||||
cmd.args(["show", "--format=text", "--show-line-counts-or-regions"]);
|
|
||||||
|
|
||||||
cmd.arg("--Xdemangler");
|
|
||||||
cmd.arg(self.config.rust_demangler_path.as_ref().unwrap());
|
|
||||||
|
|
||||||
cmd.arg("--instr-profile");
|
|
||||||
cmd.arg(&profdata_path);
|
|
||||||
|
|
||||||
for bin in &bin_paths {
|
|
||||||
cmd.arg("--object");
|
|
||||||
cmd.arg(bin);
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd.args(&self.props.llvm_cov_flags);
|
|
||||||
});
|
|
||||||
if !proc_res.status.success() {
|
|
||||||
self.fatal_proc_rec("llvm-cov show failed!", &proc_res);
|
|
||||||
}
|
|
||||||
|
|
||||||
let kind = UI_COVERAGE;
|
|
||||||
|
|
||||||
let expected_coverage = self.load_expected_output(kind);
|
|
||||||
let normalized_actual_coverage =
|
|
||||||
self.normalize_coverage_output(&proc_res.stdout).unwrap_or_else(|err| {
|
|
||||||
self.fatal_proc_rec(&err, &proc_res);
|
|
||||||
});
|
|
||||||
|
|
||||||
let coverage_errors =
|
|
||||||
self.compare_output(kind, &normalized_actual_coverage, &expected_coverage);
|
|
||||||
|
|
||||||
if coverage_errors > 0 {
|
|
||||||
self.fatal_proc_rec(
|
|
||||||
&format!("{} errors occurred comparing coverage output.", coverage_errors),
|
|
||||||
&proc_res,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Run any doctests embedded in this test file, and add any resulting
|
|
||||||
/// `.profraw` files and doctest executables to the given vectors.
|
|
||||||
fn run_doctests_for_coverage(
|
|
||||||
&self,
|
|
||||||
profraw_paths: &mut Vec<PathBuf>,
|
|
||||||
bin_paths: &mut Vec<PathBuf>,
|
|
||||||
) {
|
|
||||||
// Put .profraw files and doctest executables in dedicated directories,
|
|
||||||
// to make it easier to glob them all later.
|
|
||||||
let profraws_dir = self.output_base_dir().join("doc_profraws");
|
|
||||||
let bins_dir = self.output_base_dir().join("doc_bins");
|
|
||||||
|
|
||||||
// Remove existing directories to prevent cross-run interference.
|
|
||||||
if profraws_dir.try_exists().unwrap() {
|
|
||||||
std::fs::remove_dir_all(&profraws_dir).unwrap();
|
|
||||||
}
|
|
||||||
if bins_dir.try_exists().unwrap() {
|
|
||||||
std::fs::remove_dir_all(&bins_dir).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut rustdoc_cmd =
|
|
||||||
Command::new(self.config.rustdoc_path.as_ref().expect("--rustdoc-path not passed"));
|
|
||||||
|
|
||||||
// In general there will be multiple doctest binaries running, so we
|
|
||||||
// tell the profiler runtime to write their coverage data into separate
|
|
||||||
// profraw files.
|
|
||||||
rustdoc_cmd.env("LLVM_PROFILE_FILE", profraws_dir.join("%p-%m.profraw"));
|
|
||||||
|
|
||||||
rustdoc_cmd.args(["--test", "-Cinstrument-coverage"]);
|
|
||||||
|
|
||||||
// Without this, the doctests complain about not being able to find
|
|
||||||
// their enclosing file's crate for some reason.
|
|
||||||
rustdoc_cmd.args(["--crate-name", "workaround_for_79771"]);
|
|
||||||
|
|
||||||
// Persist the doctest binaries so that `llvm-cov show` can read their
|
|
||||||
// embedded coverage mappings later.
|
|
||||||
rustdoc_cmd.arg("-Zunstable-options");
|
|
||||||
rustdoc_cmd.arg("--persist-doctests");
|
|
||||||
rustdoc_cmd.arg(&bins_dir);
|
|
||||||
|
|
||||||
rustdoc_cmd.arg("-L");
|
|
||||||
rustdoc_cmd.arg(self.aux_output_dir_name());
|
|
||||||
|
|
||||||
rustdoc_cmd.arg(&self.testpaths.file);
|
|
||||||
|
|
||||||
let proc_res = self.compose_and_run_compiler(rustdoc_cmd, None);
|
|
||||||
if !proc_res.status.success() {
|
|
||||||
self.fatal_proc_rec("rustdoc --test failed!", &proc_res)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn glob_iter(path: impl AsRef<Path>) -> impl Iterator<Item = PathBuf> {
|
|
||||||
let path_str = path.as_ref().to_str().unwrap();
|
|
||||||
let iter = glob(path_str).unwrap();
|
|
||||||
iter.map(Result::unwrap)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find all profraw files in the profraw directory.
|
|
||||||
for p in glob_iter(profraws_dir.join("*.profraw")) {
|
|
||||||
profraw_paths.push(p);
|
|
||||||
}
|
|
||||||
// Find all executables in the `--persist-doctests` directory, while
|
|
||||||
// avoiding other file types (e.g. `.pdb` on Windows). This doesn't
|
|
||||||
// need to be perfect, as long as it can handle the files actually
|
|
||||||
// produced by `rustdoc --test`.
|
|
||||||
for p in glob_iter(bins_dir.join("**/*")) {
|
|
||||||
let is_bin = p.is_file()
|
|
||||||
&& match p.extension() {
|
|
||||||
None => true,
|
|
||||||
Some(ext) => ext == OsStr::new("exe"),
|
|
||||||
};
|
|
||||||
if is_bin {
|
|
||||||
bin_paths.push(p);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn run_llvm_tool(&self, name: &str, configure_cmd_fn: impl FnOnce(&mut Command)) -> ProcRes {
|
|
||||||
let tool_path = self
|
|
||||||
.config
|
|
||||||
.llvm_bin_dir
|
|
||||||
.as_ref()
|
|
||||||
.expect("this test expects the LLVM bin dir to be available")
|
|
||||||
.join(name);
|
|
||||||
|
|
||||||
let mut cmd = Command::new(tool_path);
|
|
||||||
configure_cmd_fn(&mut cmd);
|
|
||||||
|
|
||||||
self.run_command_to_procres(&mut cmd)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn run_command_to_procres(&self, cmd: &mut Command) -> ProcRes {
|
fn run_command_to_procres(&self, cmd: &mut Command) -> ProcRes {
|
||||||
let output = cmd.output().unwrap_or_else(|e| panic!("failed to exec `{cmd:?}`: {e:?}"));
|
let output = cmd.output().unwrap_or_else(|e| panic!("failed to exec `{cmd:?}`: {e:?}"));
|
||||||
|
|
||||||
@ -737,143 +521,6 @@ impl<'test> TestCx<'test> {
|
|||||||
proc_res
|
proc_res
|
||||||
}
|
}
|
||||||
|
|
||||||
fn normalize_coverage_output(&self, coverage: &str) -> Result<String, String> {
|
|
||||||
let normalized = self.normalize_output(coverage, &[]);
|
|
||||||
let normalized = Self::anonymize_coverage_line_numbers(&normalized);
|
|
||||||
|
|
||||||
let mut lines = normalized.lines().collect::<Vec<_>>();
|
|
||||||
|
|
||||||
Self::sort_coverage_file_sections(&mut lines)?;
|
|
||||||
Self::sort_coverage_subviews(&mut lines)?;
|
|
||||||
|
|
||||||
let joined_lines = lines.iter().flat_map(|line| [line, "\n"]).collect::<String>();
|
|
||||||
Ok(joined_lines)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Replace line numbers in coverage reports with the placeholder `LL`,
|
|
||||||
/// so that the tests are less sensitive to lines being added/removed.
|
|
||||||
fn anonymize_coverage_line_numbers(coverage: &str) -> String {
|
|
||||||
// The coverage reporter prints line numbers at the start of a line.
|
|
||||||
// They are truncated or left-padded to occupy exactly 5 columns.
|
|
||||||
// (`LineNumberColumnWidth` in `SourceCoverageViewText.cpp`.)
|
|
||||||
// A pipe character `|` appears immediately after the final digit.
|
|
||||||
//
|
|
||||||
// Line numbers that appear inside expansion/instantiation subviews
|
|
||||||
// have an additional prefix of ` |` for each nesting level.
|
|
||||||
//
|
|
||||||
// Branch views also include the relevant line number, so we want to
|
|
||||||
// redact those too. (These line numbers don't have padding.)
|
|
||||||
//
|
|
||||||
// Note: The pattern `(?m:^)` matches the start of a line.
|
|
||||||
|
|
||||||
// ` 1|` => ` LL|`
|
|
||||||
// ` 10|` => ` LL|`
|
|
||||||
// ` 100|` => ` LL|`
|
|
||||||
// ` | 1000|` => ` | LL|`
|
|
||||||
// ` | | 1000|` => ` | | LL|`
|
|
||||||
let coverage = static_regex!(r"(?m:^)(?<prefix>(?: \|)*) *[0-9]+\|")
|
|
||||||
.replace_all(&coverage, "${prefix} LL|");
|
|
||||||
|
|
||||||
// ` | Branch (1:` => ` | Branch (LL:`
|
|
||||||
// ` | | Branch (10:` => ` | | Branch (LL:`
|
|
||||||
let coverage = static_regex!(r"(?m:^)(?<prefix>(?: \|)+ Branch \()[0-9]+:")
|
|
||||||
.replace_all(&coverage, "${prefix}LL:");
|
|
||||||
|
|
||||||
// ` |---> MC/DC Decision Region (1:30) to (2:` => ` |---> MC/DC Decision Region (LL:30) to (LL:`
|
|
||||||
let coverage =
|
|
||||||
static_regex!(r"(?m:^)(?<prefix>(?: \|)+---> MC/DC Decision Region \()[0-9]+:(?<middle>[0-9]+\) to \()[0-9]+:")
|
|
||||||
.replace_all(&coverage, "${prefix}LL:${middle}LL:");
|
|
||||||
|
|
||||||
// ` | Condition C1 --> (1:` => ` | Condition C1 --> (LL:`
|
|
||||||
let coverage =
|
|
||||||
static_regex!(r"(?m:^)(?<prefix>(?: \|)+ Condition C[0-9]+ --> \()[0-9]+:")
|
|
||||||
.replace_all(&coverage, "${prefix}LL:");
|
|
||||||
|
|
||||||
coverage.into_owned()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Coverage reports can describe multiple source files, separated by
|
|
||||||
/// blank lines. The order of these files is unpredictable (since it
|
|
||||||
/// depends on implementation details), so we need to sort the file
|
|
||||||
/// sections into a consistent order before comparing against a snapshot.
|
|
||||||
fn sort_coverage_file_sections(coverage_lines: &mut Vec<&str>) -> Result<(), String> {
|
|
||||||
// Group the lines into file sections, separated by blank lines.
|
|
||||||
let mut sections = coverage_lines.split(|line| line.is_empty()).collect::<Vec<_>>();
|
|
||||||
|
|
||||||
// The last section should be empty, representing an extra trailing blank line.
|
|
||||||
if !sections.last().is_some_and(|last| last.is_empty()) {
|
|
||||||
return Err("coverage report should end with an extra blank line".to_owned());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort the file sections (not including the final empty "section").
|
|
||||||
let except_last = sections.len() - 1;
|
|
||||||
(&mut sections[..except_last]).sort();
|
|
||||||
|
|
||||||
// Join the file sections back into a flat list of lines, with
|
|
||||||
// sections separated by blank lines.
|
|
||||||
let joined = sections.join(&[""] as &[_]);
|
|
||||||
assert_eq!(joined.len(), coverage_lines.len());
|
|
||||||
*coverage_lines = joined;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn sort_coverage_subviews(coverage_lines: &mut Vec<&str>) -> Result<(), String> {
|
|
||||||
let mut output_lines = Vec::new();
|
|
||||||
|
|
||||||
// We accumulate a list of zero or more "subviews", where each
|
|
||||||
// subview is a list of one or more lines.
|
|
||||||
let mut subviews: Vec<Vec<&str>> = Vec::new();
|
|
||||||
|
|
||||||
fn flush<'a>(subviews: &mut Vec<Vec<&'a str>>, output_lines: &mut Vec<&'a str>) {
|
|
||||||
if subviews.is_empty() {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Take and clear the list of accumulated subviews.
|
|
||||||
let mut subviews = std::mem::take(subviews);
|
|
||||||
|
|
||||||
// The last "subview" should be just a boundary line on its own,
|
|
||||||
// so exclude it when sorting the other subviews.
|
|
||||||
let except_last = subviews.len() - 1;
|
|
||||||
(&mut subviews[..except_last]).sort();
|
|
||||||
|
|
||||||
for view in subviews {
|
|
||||||
for line in view {
|
|
||||||
output_lines.push(line);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (line, line_num) in coverage_lines.iter().zip(1..) {
|
|
||||||
if line.starts_with(" ------------------") {
|
|
||||||
// This is a subview boundary line, so start a new subview.
|
|
||||||
subviews.push(vec![line]);
|
|
||||||
} else if line.starts_with(" |") {
|
|
||||||
// Add this line to the current subview.
|
|
||||||
subviews
|
|
||||||
.last_mut()
|
|
||||||
.ok_or(format!(
|
|
||||||
"unexpected subview line outside of a subview on line {line_num}"
|
|
||||||
))?
|
|
||||||
.push(line);
|
|
||||||
} else {
|
|
||||||
// This line is not part of a subview, so sort and print any
|
|
||||||
// accumulated subviews, and then print the line as-is.
|
|
||||||
flush(&mut subviews, &mut output_lines);
|
|
||||||
output_lines.push(line);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
flush(&mut subviews, &mut output_lines);
|
|
||||||
assert!(subviews.is_empty());
|
|
||||||
|
|
||||||
assert_eq!(output_lines.len(), coverage_lines.len());
|
|
||||||
*coverage_lines = output_lines;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn run_pretty_test(&self) {
|
fn run_pretty_test(&self) {
|
||||||
if self.props.pp_exact.is_some() {
|
if self.props.pp_exact.is_some() {
|
||||||
logv(self.config, "testing for exact pretty-printing".to_owned());
|
logv(self.config, "testing for exact pretty-printing".to_owned());
|
||||||
|
367
src/tools/compiletest/src/runtest/coverage.rs
Normal file
367
src/tools/compiletest/src/runtest/coverage.rs
Normal file
@ -0,0 +1,367 @@
|
|||||||
|
//! Code specific to the coverage test suites.
|
||||||
|
|
||||||
|
use std::ffi::OsStr;
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
use std::process::Command;
|
||||||
|
|
||||||
|
use glob::glob;
|
||||||
|
|
||||||
|
use crate::common::{UI_COVERAGE, UI_COVERAGE_MAP};
|
||||||
|
use crate::runtest::{static_regex, Emit, ProcRes, TestCx, WillExecute};
|
||||||
|
|
||||||
|
impl<'test> TestCx<'test> {
|
||||||
|
pub(crate) fn run_coverage_map_test(&self) {
|
||||||
|
let Some(coverage_dump_path) = &self.config.coverage_dump_path else {
|
||||||
|
self.fatal("missing --coverage-dump");
|
||||||
|
};
|
||||||
|
|
||||||
|
let (proc_res, llvm_ir_path) = self.compile_test_and_save_ir();
|
||||||
|
if !proc_res.status.success() {
|
||||||
|
self.fatal_proc_rec("compilation failed!", &proc_res);
|
||||||
|
}
|
||||||
|
drop(proc_res);
|
||||||
|
|
||||||
|
let mut dump_command = Command::new(coverage_dump_path);
|
||||||
|
dump_command.arg(llvm_ir_path);
|
||||||
|
let proc_res = self.run_command_to_procres(&mut dump_command);
|
||||||
|
if !proc_res.status.success() {
|
||||||
|
self.fatal_proc_rec("coverage-dump failed!", &proc_res);
|
||||||
|
}
|
||||||
|
|
||||||
|
let kind = UI_COVERAGE_MAP;
|
||||||
|
|
||||||
|
let expected_coverage_dump = self.load_expected_output(kind);
|
||||||
|
let actual_coverage_dump = self.normalize_output(&proc_res.stdout, &[]);
|
||||||
|
|
||||||
|
let coverage_dump_errors =
|
||||||
|
self.compare_output(kind, &actual_coverage_dump, &expected_coverage_dump);
|
||||||
|
|
||||||
|
if coverage_dump_errors > 0 {
|
||||||
|
self.fatal_proc_rec(
|
||||||
|
&format!("{coverage_dump_errors} errors occurred comparing coverage output."),
|
||||||
|
&proc_res,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn run_coverage_run_test(&self) {
|
||||||
|
let should_run = self.run_if_enabled();
|
||||||
|
let proc_res = self.compile_test(should_run, Emit::None);
|
||||||
|
|
||||||
|
if !proc_res.status.success() {
|
||||||
|
self.fatal_proc_rec("compilation failed!", &proc_res);
|
||||||
|
}
|
||||||
|
drop(proc_res);
|
||||||
|
|
||||||
|
if let WillExecute::Disabled = should_run {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let profraw_path = self.output_base_dir().join("default.profraw");
|
||||||
|
let profdata_path = self.output_base_dir().join("default.profdata");
|
||||||
|
|
||||||
|
// Delete any existing profraw/profdata files to rule out unintended
|
||||||
|
// interference between repeated test runs.
|
||||||
|
if profraw_path.exists() {
|
||||||
|
std::fs::remove_file(&profraw_path).unwrap();
|
||||||
|
}
|
||||||
|
if profdata_path.exists() {
|
||||||
|
std::fs::remove_file(&profdata_path).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let proc_res = self.exec_compiled_test_general(
|
||||||
|
&[("LLVM_PROFILE_FILE", &profraw_path.to_str().unwrap())],
|
||||||
|
false,
|
||||||
|
);
|
||||||
|
if self.props.failure_status.is_some() {
|
||||||
|
self.check_correct_failure_status(&proc_res);
|
||||||
|
} else if !proc_res.status.success() {
|
||||||
|
self.fatal_proc_rec("test run failed!", &proc_res);
|
||||||
|
}
|
||||||
|
drop(proc_res);
|
||||||
|
|
||||||
|
let mut profraw_paths = vec![profraw_path];
|
||||||
|
let mut bin_paths = vec![self.make_exe_name()];
|
||||||
|
|
||||||
|
if self.config.suite == "coverage-run-rustdoc" {
|
||||||
|
self.run_doctests_for_coverage(&mut profraw_paths, &mut bin_paths);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run `llvm-profdata merge` to index the raw coverage output.
|
||||||
|
let proc_res = self.run_llvm_tool("llvm-profdata", |cmd| {
|
||||||
|
cmd.args(["merge", "--sparse", "--output"]);
|
||||||
|
cmd.arg(&profdata_path);
|
||||||
|
cmd.args(&profraw_paths);
|
||||||
|
});
|
||||||
|
if !proc_res.status.success() {
|
||||||
|
self.fatal_proc_rec("llvm-profdata merge failed!", &proc_res);
|
||||||
|
}
|
||||||
|
drop(proc_res);
|
||||||
|
|
||||||
|
// Run `llvm-cov show` to produce a coverage report in text format.
|
||||||
|
let proc_res = self.run_llvm_tool("llvm-cov", |cmd| {
|
||||||
|
cmd.args(["show", "--format=text", "--show-line-counts-or-regions"]);
|
||||||
|
|
||||||
|
cmd.arg("--Xdemangler");
|
||||||
|
cmd.arg(self.config.rust_demangler_path.as_ref().unwrap());
|
||||||
|
|
||||||
|
cmd.arg("--instr-profile");
|
||||||
|
cmd.arg(&profdata_path);
|
||||||
|
|
||||||
|
for bin in &bin_paths {
|
||||||
|
cmd.arg("--object");
|
||||||
|
cmd.arg(bin);
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.args(&self.props.llvm_cov_flags);
|
||||||
|
});
|
||||||
|
if !proc_res.status.success() {
|
||||||
|
self.fatal_proc_rec("llvm-cov show failed!", &proc_res);
|
||||||
|
}
|
||||||
|
|
||||||
|
let kind = UI_COVERAGE;
|
||||||
|
|
||||||
|
let expected_coverage = self.load_expected_output(kind);
|
||||||
|
let normalized_actual_coverage =
|
||||||
|
self.normalize_coverage_output(&proc_res.stdout).unwrap_or_else(|err| {
|
||||||
|
self.fatal_proc_rec(&err, &proc_res);
|
||||||
|
});
|
||||||
|
|
||||||
|
let coverage_errors =
|
||||||
|
self.compare_output(kind, &normalized_actual_coverage, &expected_coverage);
|
||||||
|
|
||||||
|
if coverage_errors > 0 {
|
||||||
|
self.fatal_proc_rec(
|
||||||
|
&format!("{} errors occurred comparing coverage output.", coverage_errors),
|
||||||
|
&proc_res,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Run any doctests embedded in this test file, and add any resulting
|
||||||
|
/// `.profraw` files and doctest executables to the given vectors.
|
||||||
|
fn run_doctests_for_coverage(
|
||||||
|
&self,
|
||||||
|
profraw_paths: &mut Vec<PathBuf>,
|
||||||
|
bin_paths: &mut Vec<PathBuf>,
|
||||||
|
) {
|
||||||
|
// Put .profraw files and doctest executables in dedicated directories,
|
||||||
|
// to make it easier to glob them all later.
|
||||||
|
let profraws_dir = self.output_base_dir().join("doc_profraws");
|
||||||
|
let bins_dir = self.output_base_dir().join("doc_bins");
|
||||||
|
|
||||||
|
// Remove existing directories to prevent cross-run interference.
|
||||||
|
if profraws_dir.try_exists().unwrap() {
|
||||||
|
std::fs::remove_dir_all(&profraws_dir).unwrap();
|
||||||
|
}
|
||||||
|
if bins_dir.try_exists().unwrap() {
|
||||||
|
std::fs::remove_dir_all(&bins_dir).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut rustdoc_cmd =
|
||||||
|
Command::new(self.config.rustdoc_path.as_ref().expect("--rustdoc-path not passed"));
|
||||||
|
|
||||||
|
// In general there will be multiple doctest binaries running, so we
|
||||||
|
// tell the profiler runtime to write their coverage data into separate
|
||||||
|
// profraw files.
|
||||||
|
rustdoc_cmd.env("LLVM_PROFILE_FILE", profraws_dir.join("%p-%m.profraw"));
|
||||||
|
|
||||||
|
rustdoc_cmd.args(["--test", "-Cinstrument-coverage"]);
|
||||||
|
|
||||||
|
// Without this, the doctests complain about not being able to find
|
||||||
|
// their enclosing file's crate for some reason.
|
||||||
|
rustdoc_cmd.args(["--crate-name", "workaround_for_79771"]);
|
||||||
|
|
||||||
|
// Persist the doctest binaries so that `llvm-cov show` can read their
|
||||||
|
// embedded coverage mappings later.
|
||||||
|
rustdoc_cmd.arg("-Zunstable-options");
|
||||||
|
rustdoc_cmd.arg("--persist-doctests");
|
||||||
|
rustdoc_cmd.arg(&bins_dir);
|
||||||
|
|
||||||
|
rustdoc_cmd.arg("-L");
|
||||||
|
rustdoc_cmd.arg(self.aux_output_dir_name());
|
||||||
|
|
||||||
|
rustdoc_cmd.arg(&self.testpaths.file);
|
||||||
|
|
||||||
|
let proc_res = self.compose_and_run_compiler(rustdoc_cmd, None);
|
||||||
|
if !proc_res.status.success() {
|
||||||
|
self.fatal_proc_rec("rustdoc --test failed!", &proc_res)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn glob_iter(path: impl AsRef<Path>) -> impl Iterator<Item = PathBuf> {
|
||||||
|
let path_str = path.as_ref().to_str().unwrap();
|
||||||
|
let iter = glob(path_str).unwrap();
|
||||||
|
iter.map(Result::unwrap)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find all profraw files in the profraw directory.
|
||||||
|
for p in glob_iter(profraws_dir.join("*.profraw")) {
|
||||||
|
profraw_paths.push(p);
|
||||||
|
}
|
||||||
|
// Find all executables in the `--persist-doctests` directory, while
|
||||||
|
// avoiding other file types (e.g. `.pdb` on Windows). This doesn't
|
||||||
|
// need to be perfect, as long as it can handle the files actually
|
||||||
|
// produced by `rustdoc --test`.
|
||||||
|
for p in glob_iter(bins_dir.join("**/*")) {
|
||||||
|
let is_bin = p.is_file()
|
||||||
|
&& match p.extension() {
|
||||||
|
None => true,
|
||||||
|
Some(ext) => ext == OsStr::new("exe"),
|
||||||
|
};
|
||||||
|
if is_bin {
|
||||||
|
bin_paths.push(p);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn run_llvm_tool(&self, name: &str, configure_cmd_fn: impl FnOnce(&mut Command)) -> ProcRes {
|
||||||
|
let tool_path = self
|
||||||
|
.config
|
||||||
|
.llvm_bin_dir
|
||||||
|
.as_ref()
|
||||||
|
.expect("this test expects the LLVM bin dir to be available")
|
||||||
|
.join(name);
|
||||||
|
|
||||||
|
let mut cmd = Command::new(tool_path);
|
||||||
|
configure_cmd_fn(&mut cmd);
|
||||||
|
|
||||||
|
self.run_command_to_procres(&mut cmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn normalize_coverage_output(&self, coverage: &str) -> Result<String, String> {
|
||||||
|
let normalized = self.normalize_output(coverage, &[]);
|
||||||
|
let normalized = Self::anonymize_coverage_line_numbers(&normalized);
|
||||||
|
|
||||||
|
let mut lines = normalized.lines().collect::<Vec<_>>();
|
||||||
|
|
||||||
|
Self::sort_coverage_file_sections(&mut lines)?;
|
||||||
|
Self::sort_coverage_subviews(&mut lines)?;
|
||||||
|
|
||||||
|
let joined_lines = lines.iter().flat_map(|line| [line, "\n"]).collect::<String>();
|
||||||
|
Ok(joined_lines)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Replace line numbers in coverage reports with the placeholder `LL`,
|
||||||
|
/// so that the tests are less sensitive to lines being added/removed.
|
||||||
|
fn anonymize_coverage_line_numbers(coverage: &str) -> String {
|
||||||
|
// The coverage reporter prints line numbers at the start of a line.
|
||||||
|
// They are truncated or left-padded to occupy exactly 5 columns.
|
||||||
|
// (`LineNumberColumnWidth` in `SourceCoverageViewText.cpp`.)
|
||||||
|
// A pipe character `|` appears immediately after the final digit.
|
||||||
|
//
|
||||||
|
// Line numbers that appear inside expansion/instantiation subviews
|
||||||
|
// have an additional prefix of ` |` for each nesting level.
|
||||||
|
//
|
||||||
|
// Branch views also include the relevant line number, so we want to
|
||||||
|
// redact those too. (These line numbers don't have padding.)
|
||||||
|
//
|
||||||
|
// Note: The pattern `(?m:^)` matches the start of a line.
|
||||||
|
|
||||||
|
// ` 1|` => ` LL|`
|
||||||
|
// ` 10|` => ` LL|`
|
||||||
|
// ` 100|` => ` LL|`
|
||||||
|
// ` | 1000|` => ` | LL|`
|
||||||
|
// ` | | 1000|` => ` | | LL|`
|
||||||
|
let coverage = static_regex!(r"(?m:^)(?<prefix>(?: \|)*) *[0-9]+\|")
|
||||||
|
.replace_all(&coverage, "${prefix} LL|");
|
||||||
|
|
||||||
|
// ` | Branch (1:` => ` | Branch (LL:`
|
||||||
|
// ` | | Branch (10:` => ` | | Branch (LL:`
|
||||||
|
let coverage = static_regex!(r"(?m:^)(?<prefix>(?: \|)+ Branch \()[0-9]+:")
|
||||||
|
.replace_all(&coverage, "${prefix}LL:");
|
||||||
|
|
||||||
|
// ` |---> MC/DC Decision Region (1:30) to (2:` => ` |---> MC/DC Decision Region (LL:30) to (LL:`
|
||||||
|
let coverage =
|
||||||
|
static_regex!(r"(?m:^)(?<prefix>(?: \|)+---> MC/DC Decision Region \()[0-9]+:(?<middle>[0-9]+\) to \()[0-9]+:")
|
||||||
|
.replace_all(&coverage, "${prefix}LL:${middle}LL:");
|
||||||
|
|
||||||
|
// ` | Condition C1 --> (1:` => ` | Condition C1 --> (LL:`
|
||||||
|
let coverage =
|
||||||
|
static_regex!(r"(?m:^)(?<prefix>(?: \|)+ Condition C[0-9]+ --> \()[0-9]+:")
|
||||||
|
.replace_all(&coverage, "${prefix}LL:");
|
||||||
|
|
||||||
|
coverage.into_owned()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Coverage reports can describe multiple source files, separated by
|
||||||
|
/// blank lines. The order of these files is unpredictable (since it
|
||||||
|
/// depends on implementation details), so we need to sort the file
|
||||||
|
/// sections into a consistent order before comparing against a snapshot.
|
||||||
|
fn sort_coverage_file_sections(coverage_lines: &mut Vec<&str>) -> Result<(), String> {
|
||||||
|
// Group the lines into file sections, separated by blank lines.
|
||||||
|
let mut sections = coverage_lines.split(|line| line.is_empty()).collect::<Vec<_>>();
|
||||||
|
|
||||||
|
// The last section should be empty, representing an extra trailing blank line.
|
||||||
|
if !sections.last().is_some_and(|last| last.is_empty()) {
|
||||||
|
return Err("coverage report should end with an extra blank line".to_owned());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort the file sections (not including the final empty "section").
|
||||||
|
let except_last = sections.len() - 1;
|
||||||
|
(&mut sections[..except_last]).sort();
|
||||||
|
|
||||||
|
// Join the file sections back into a flat list of lines, with
|
||||||
|
// sections separated by blank lines.
|
||||||
|
let joined = sections.join(&[""] as &[_]);
|
||||||
|
assert_eq!(joined.len(), coverage_lines.len());
|
||||||
|
*coverage_lines = joined;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn sort_coverage_subviews(coverage_lines: &mut Vec<&str>) -> Result<(), String> {
|
||||||
|
let mut output_lines = Vec::new();
|
||||||
|
|
||||||
|
// We accumulate a list of zero or more "subviews", where each
|
||||||
|
// subview is a list of one or more lines.
|
||||||
|
let mut subviews: Vec<Vec<&str>> = Vec::new();
|
||||||
|
|
||||||
|
fn flush<'a>(subviews: &mut Vec<Vec<&'a str>>, output_lines: &mut Vec<&'a str>) {
|
||||||
|
if subviews.is_empty() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Take and clear the list of accumulated subviews.
|
||||||
|
let mut subviews = std::mem::take(subviews);
|
||||||
|
|
||||||
|
// The last "subview" should be just a boundary line on its own,
|
||||||
|
// so exclude it when sorting the other subviews.
|
||||||
|
let except_last = subviews.len() - 1;
|
||||||
|
(&mut subviews[..except_last]).sort();
|
||||||
|
|
||||||
|
for view in subviews {
|
||||||
|
for line in view {
|
||||||
|
output_lines.push(line);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (line, line_num) in coverage_lines.iter().zip(1..) {
|
||||||
|
if line.starts_with(" ------------------") {
|
||||||
|
// This is a subview boundary line, so start a new subview.
|
||||||
|
subviews.push(vec![line]);
|
||||||
|
} else if line.starts_with(" |") {
|
||||||
|
// Add this line to the current subview.
|
||||||
|
subviews
|
||||||
|
.last_mut()
|
||||||
|
.ok_or(format!(
|
||||||
|
"unexpected subview line outside of a subview on line {line_num}"
|
||||||
|
))?
|
||||||
|
.push(line);
|
||||||
|
} else {
|
||||||
|
// This line is not part of a subview, so sort and print any
|
||||||
|
// accumulated subviews, and then print the line as-is.
|
||||||
|
flush(&mut subviews, &mut output_lines);
|
||||||
|
output_lines.push(line);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
flush(&mut subviews, &mut output_lines);
|
||||||
|
assert!(subviews.is_empty());
|
||||||
|
|
||||||
|
assert_eq!(output_lines.len(), coverage_lines.len());
|
||||||
|
*coverage_lines = output_lines;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
@ -48,71 +48,3 @@ fn normalize_platform_differences() {
|
|||||||
r#"println!("test\ntest")"#,
|
r#"println!("test\ntest")"#,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Test for anonymizing line numbers in coverage reports, especially for
|
|
||||||
/// MC/DC regions.
|
|
||||||
///
|
|
||||||
/// FIXME(#123409): This test can be removed when we have examples of MC/DC
|
|
||||||
/// coverage in the actual coverage test suite.
|
|
||||||
#[test]
|
|
||||||
fn anonymize_coverage_line_numbers() {
|
|
||||||
let anon = |coverage| TestCx::anonymize_coverage_line_numbers(coverage);
|
|
||||||
|
|
||||||
let input = r#"
|
|
||||||
7| 2|fn mcdc_check_neither(a: bool, b: bool) {
|
|
||||||
8| 2| if a && b {
|
|
||||||
^0
|
|
||||||
------------------
|
|
||||||
|---> MC/DC Decision Region (8:8) to (8:14)
|
|
||||||
|
|
|
||||||
| Number of Conditions: 2
|
|
||||||
| Condition C1 --> (8:8)
|
|
||||||
| Condition C2 --> (8:13)
|
|
||||||
|
|
|
||||||
| Executed MC/DC Test Vectors:
|
|
||||||
|
|
|
||||||
| C1, C2 Result
|
|
||||||
| 1 { F, - = F }
|
|
||||||
|
|
|
||||||
| C1-Pair: not covered
|
|
||||||
| C2-Pair: not covered
|
|
||||||
| MC/DC Coverage for Decision: 0.00%
|
|
||||||
|
|
|
||||||
------------------
|
|
||||||
9| 0| say("a and b");
|
|
||||||
10| 2| } else {
|
|
||||||
11| 2| say("not both");
|
|
||||||
12| 2| }
|
|
||||||
13| 2|}
|
|
||||||
"#;
|
|
||||||
|
|
||||||
let expected = r#"
|
|
||||||
LL| 2|fn mcdc_check_neither(a: bool, b: bool) {
|
|
||||||
LL| 2| if a && b {
|
|
||||||
^0
|
|
||||||
------------------
|
|
||||||
|---> MC/DC Decision Region (LL:8) to (LL:14)
|
|
||||||
|
|
|
||||||
| Number of Conditions: 2
|
|
||||||
| Condition C1 --> (LL:8)
|
|
||||||
| Condition C2 --> (LL:13)
|
|
||||||
|
|
|
||||||
| Executed MC/DC Test Vectors:
|
|
||||||
|
|
|
||||||
| C1, C2 Result
|
|
||||||
| 1 { F, - = F }
|
|
||||||
|
|
|
||||||
| C1-Pair: not covered
|
|
||||||
| C2-Pair: not covered
|
|
||||||
| MC/DC Coverage for Decision: 0.00%
|
|
||||||
|
|
|
||||||
------------------
|
|
||||||
LL| 0| say("a and b");
|
|
||||||
LL| 2| } else {
|
|
||||||
LL| 2| say("not both");
|
|
||||||
LL| 2| }
|
|
||||||
LL| 2|}
|
|
||||||
"#;
|
|
||||||
|
|
||||||
assert_eq!(anon(input), expected);
|
|
||||||
}
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user