Split libtest into several smaller modules
This commit is contained in:
parent
a16dca337d
commit
4d5052203d
251
src/libtest/bench.rs
Normal file
251
src/libtest/bench.rs
Normal file
@ -0,0 +1,251 @@
|
||||
//! Benchmarking module.
|
||||
use super::{
|
||||
BenchMode, MonitorMsg, Sender, Sink, TestDesc, TestResult
|
||||
};
|
||||
|
||||
use crate::stats;
|
||||
use std::time::{Duration, Instant};
|
||||
use std::cmp;
|
||||
use std::io;
|
||||
use std::panic::{catch_unwind, AssertUnwindSafe};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::hint::black_box;
|
||||
|
||||
/// Manager of the benchmarking runs.
|
||||
///
|
||||
/// This is fed into functions marked with `#[bench]` to allow for
|
||||
/// set-up & tear-down before running a piece of code repeatedly via a
|
||||
/// call to `iter`.
|
||||
#[derive(Clone)]
|
||||
pub struct Bencher {
|
||||
mode: BenchMode,
|
||||
summary: Option<stats::Summary>,
|
||||
pub bytes: u64,
|
||||
}
|
||||
|
||||
impl Bencher {
|
||||
/// Callback for benchmark functions to run in their body.
|
||||
pub fn iter<T, F>(&mut self, mut inner: F)
|
||||
where
|
||||
F: FnMut() -> T,
|
||||
{
|
||||
if self.mode == BenchMode::Single {
|
||||
ns_iter_inner(&mut inner, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
self.summary = Some(iter(&mut inner));
|
||||
}
|
||||
|
||||
pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
|
||||
where
|
||||
F: FnMut(&mut Bencher),
|
||||
{
|
||||
f(self);
|
||||
return self.summary;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct BenchSamples {
|
||||
pub ns_iter_summ: stats::Summary,
|
||||
pub mb_s: usize,
|
||||
}
|
||||
|
||||
pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
|
||||
use std::fmt::Write;
|
||||
let mut output = String::new();
|
||||
|
||||
let median = bs.ns_iter_summ.median as usize;
|
||||
let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
|
||||
|
||||
output
|
||||
.write_fmt(format_args!(
|
||||
"{:>11} ns/iter (+/- {})",
|
||||
fmt_thousands_sep(median, ','),
|
||||
fmt_thousands_sep(deviation, ',')
|
||||
))
|
||||
.unwrap();
|
||||
if bs.mb_s != 0 {
|
||||
output
|
||||
.write_fmt(format_args!(" = {} MB/s", bs.mb_s))
|
||||
.unwrap();
|
||||
}
|
||||
output
|
||||
}
|
||||
|
||||
// Format a number with thousands separators
|
||||
fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
|
||||
use std::fmt::Write;
|
||||
let mut output = String::new();
|
||||
let mut trailing = false;
|
||||
for &pow in &[9, 6, 3, 0] {
|
||||
let base = 10_usize.pow(pow);
|
||||
if pow == 0 || trailing || n / base != 0 {
|
||||
if !trailing {
|
||||
output.write_fmt(format_args!("{}", n / base)).unwrap();
|
||||
} else {
|
||||
output.write_fmt(format_args!("{:03}", n / base)).unwrap();
|
||||
}
|
||||
if pow != 0 {
|
||||
output.push(sep);
|
||||
}
|
||||
trailing = true;
|
||||
}
|
||||
n %= base;
|
||||
}
|
||||
|
||||
output
|
||||
}
|
||||
|
||||
fn ns_from_dur(dur: Duration) -> u64 {
|
||||
dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64)
|
||||
}
|
||||
|
||||
fn ns_iter_inner<T, F>(inner: &mut F, k: u64) -> u64
|
||||
where
|
||||
F: FnMut() -> T,
|
||||
{
|
||||
let start = Instant::now();
|
||||
for _ in 0..k {
|
||||
black_box(inner());
|
||||
}
|
||||
return ns_from_dur(start.elapsed());
|
||||
}
|
||||
|
||||
pub fn iter<T, F>(inner: &mut F) -> stats::Summary
|
||||
where
|
||||
F: FnMut() -> T,
|
||||
{
|
||||
// Initial bench run to get ballpark figure.
|
||||
let ns_single = ns_iter_inner(inner, 1);
|
||||
|
||||
// Try to estimate iter count for 1ms falling back to 1m
|
||||
// iterations if first run took < 1ns.
|
||||
let ns_target_total = 1_000_000; // 1ms
|
||||
let mut n = ns_target_total / cmp::max(1, ns_single);
|
||||
|
||||
// if the first run took more than 1ms we don't want to just
|
||||
// be left doing 0 iterations on every loop. The unfortunate
|
||||
// side effect of not being able to do as many runs is
|
||||
// automatically handled by the statistical analysis below
|
||||
// (i.e., larger error bars).
|
||||
n = cmp::max(1, n);
|
||||
|
||||
let mut total_run = Duration::new(0, 0);
|
||||
let samples: &mut [f64] = &mut [0.0_f64; 50];
|
||||
loop {
|
||||
let loop_start = Instant::now();
|
||||
|
||||
for p in &mut *samples {
|
||||
*p = ns_iter_inner(inner, n) as f64 / n as f64;
|
||||
}
|
||||
|
||||
stats::winsorize(samples, 5.0);
|
||||
let summ = stats::Summary::new(samples);
|
||||
|
||||
for p in &mut *samples {
|
||||
let ns = ns_iter_inner(inner, 5 * n);
|
||||
*p = ns as f64 / (5 * n) as f64;
|
||||
}
|
||||
|
||||
stats::winsorize(samples, 5.0);
|
||||
let summ5 = stats::Summary::new(samples);
|
||||
|
||||
let loop_run = loop_start.elapsed();
|
||||
|
||||
// If we've run for 100ms and seem to have converged to a
|
||||
// stable median.
|
||||
if loop_run > Duration::from_millis(100)
|
||||
&& summ.median_abs_dev_pct < 1.0
|
||||
&& summ.median - summ5.median < summ5.median_abs_dev
|
||||
{
|
||||
return summ5;
|
||||
}
|
||||
|
||||
total_run = total_run + loop_run;
|
||||
// Longest we ever run for is 3s.
|
||||
if total_run > Duration::from_secs(3) {
|
||||
return summ5;
|
||||
}
|
||||
|
||||
// If we overflow here just return the results so far. We check a
|
||||
// multiplier of 10 because we're about to multiply by 2 and the
|
||||
// next iteration of the loop will also multiply by 5 (to calculate
|
||||
// the summ5 result)
|
||||
n = match n.checked_mul(10) {
|
||||
Some(_) => n * 2,
|
||||
None => {
|
||||
return summ5;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
pub fn benchmark<F>(desc: TestDesc, monitor_ch: Sender<MonitorMsg>, nocapture: bool, f: F)
|
||||
where
|
||||
F: FnMut(&mut Bencher),
|
||||
{
|
||||
let mut bs = Bencher {
|
||||
mode: BenchMode::Auto,
|
||||
summary: None,
|
||||
bytes: 0,
|
||||
};
|
||||
|
||||
let data = Arc::new(Mutex::new(Vec::new()));
|
||||
let oldio = if !nocapture {
|
||||
Some((
|
||||
io::set_print(Some(Box::new(Sink(data.clone())))),
|
||||
io::set_panic(Some(Box::new(Sink(data.clone())))),
|
||||
))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f)));
|
||||
|
||||
if let Some((printio, panicio)) = oldio {
|
||||
io::set_print(printio);
|
||||
io::set_panic(panicio);
|
||||
}
|
||||
|
||||
let test_result = match result {
|
||||
//bs.bench(f) {
|
||||
Ok(Some(ns_iter_summ)) => {
|
||||
let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
|
||||
let mb_s = bs.bytes * 1000 / ns_iter;
|
||||
|
||||
let bs = BenchSamples {
|
||||
ns_iter_summ,
|
||||
mb_s: mb_s as usize,
|
||||
};
|
||||
TestResult::TrBench(bs)
|
||||
}
|
||||
Ok(None) => {
|
||||
// iter not called, so no data.
|
||||
// FIXME: error in this case?
|
||||
let samples: &mut [f64] = &mut [0.0_f64; 1];
|
||||
let bs = BenchSamples {
|
||||
ns_iter_summ: stats::Summary::new(samples),
|
||||
mb_s: 0,
|
||||
};
|
||||
TestResult::TrBench(bs)
|
||||
}
|
||||
Err(_) => TestResult::TrFailed,
|
||||
};
|
||||
|
||||
let stdout = data.lock().unwrap().to_vec();
|
||||
monitor_ch.send((desc, test_result, None, stdout)).unwrap();
|
||||
}
|
||||
|
||||
pub fn run_once<F>(f: F)
|
||||
where
|
||||
F: FnMut(&mut Bencher),
|
||||
{
|
||||
let mut bs = Bencher {
|
||||
mode: BenchMode::Single,
|
||||
summary: None,
|
||||
bytes: 0,
|
||||
};
|
||||
bs.bench(f);
|
||||
}
|
384
src/libtest/cli.rs
Normal file
384
src/libtest/cli.rs
Normal file
@ -0,0 +1,384 @@
|
||||
//! Module converting command-line arguments into test configuration.
|
||||
|
||||
use std::env;
|
||||
use std::path::PathBuf;
|
||||
use getopts;
|
||||
|
||||
use super::options::{RunIgnored, ColorConfig, OutputFormat, Options};
|
||||
use super::time::TestTimeOptions;
|
||||
use super::helpers::isatty;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct TestOpts {
|
||||
pub list: bool,
|
||||
pub filter: Option<String>,
|
||||
pub filter_exact: bool,
|
||||
pub exclude_should_panic: bool,
|
||||
pub run_ignored: RunIgnored,
|
||||
pub run_tests: bool,
|
||||
pub bench_benchmarks: bool,
|
||||
pub logfile: Option<PathBuf>,
|
||||
pub nocapture: bool,
|
||||
pub color: ColorConfig,
|
||||
pub format: OutputFormat,
|
||||
pub test_threads: Option<usize>,
|
||||
pub skip: Vec<String>,
|
||||
pub time_options: Option<TestTimeOptions>,
|
||||
pub options: Options,
|
||||
}
|
||||
|
||||
impl TestOpts {
|
||||
pub fn use_color(&self) -> bool {
|
||||
match self.color {
|
||||
ColorConfig::AutoColor => !self.nocapture && isatty::stdout_isatty(),
|
||||
ColorConfig::AlwaysColor => true,
|
||||
ColorConfig::NeverColor => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Result of parsing the options.
|
||||
pub type OptRes = Result<TestOpts, String>;
|
||||
/// Result of parsing the option part.
|
||||
type OptPartRes<T> = Result<Option<T>, String>;
|
||||
|
||||
fn optgroups() -> getopts::Options {
|
||||
let mut opts = getopts::Options::new();
|
||||
opts.optflag("", "include-ignored", "Run ignored and not ignored tests")
|
||||
.optflag("", "ignored", "Run only ignored tests")
|
||||
.optflag("", "exclude-should-panic", "Excludes tests marked as should_panic")
|
||||
.optflag("", "test", "Run tests and not benchmarks")
|
||||
.optflag("", "bench", "Run benchmarks instead of tests")
|
||||
.optflag("", "list", "List all tests and benchmarks")
|
||||
.optflag("h", "help", "Display this message (longer with --help)")
|
||||
.optopt(
|
||||
"",
|
||||
"logfile",
|
||||
"Write logs to the specified file instead \
|
||||
of stdout",
|
||||
"PATH",
|
||||
)
|
||||
.optflag(
|
||||
"",
|
||||
"nocapture",
|
||||
"don't capture stdout/stderr of each \
|
||||
task, allow printing directly",
|
||||
)
|
||||
.optopt(
|
||||
"",
|
||||
"test-threads",
|
||||
"Number of threads used for running tests \
|
||||
in parallel",
|
||||
"n_threads",
|
||||
)
|
||||
.optmulti(
|
||||
"",
|
||||
"skip",
|
||||
"Skip tests whose names contain FILTER (this flag can \
|
||||
be used multiple times)",
|
||||
"FILTER",
|
||||
)
|
||||
.optflag(
|
||||
"q",
|
||||
"quiet",
|
||||
"Display one character per test instead of one line. \
|
||||
Alias to --format=terse",
|
||||
)
|
||||
.optflag(
|
||||
"",
|
||||
"exact",
|
||||
"Exactly match filters rather than by substring",
|
||||
)
|
||||
.optopt(
|
||||
"",
|
||||
"color",
|
||||
"Configure coloring of output:
|
||||
auto = colorize if stdout is a tty and tests are run on serially (default);
|
||||
always = always colorize output;
|
||||
never = never colorize output;",
|
||||
"auto|always|never",
|
||||
)
|
||||
.optopt(
|
||||
"",
|
||||
"format",
|
||||
"Configure formatting of output:
|
||||
pretty = Print verbose output;
|
||||
terse = Display one character per test;
|
||||
json = Output a json document",
|
||||
"pretty|terse|json",
|
||||
)
|
||||
.optflag(
|
||||
"",
|
||||
"show-output",
|
||||
"Show captured stdout of successful tests"
|
||||
)
|
||||
.optopt(
|
||||
"Z",
|
||||
"",
|
||||
"Enable nightly-only flags:
|
||||
unstable-options = Allow use of experimental features",
|
||||
"unstable-options",
|
||||
)
|
||||
.optflagopt(
|
||||
"",
|
||||
"report-time",
|
||||
"Show execution time of each test. Awailable values:
|
||||
plain = do not colorize the execution time (default);
|
||||
colored = colorize output according to the `color` parameter value;
|
||||
|
||||
Threshold values for colorized output can be configured via
|
||||
`RUST_TEST_TIME_UNIT`, `RUST_TEST_TIME_INTEGRATION` and
|
||||
`RUST_TEST_TIME_DOCTEST` environment variables.
|
||||
|
||||
Expected format of environment variable is `VARIABLE=WARN_TIME,CRITICAL_TIME`.
|
||||
|
||||
Not available for --format=terse",
|
||||
"plain|colored"
|
||||
)
|
||||
.optflag(
|
||||
"",
|
||||
"ensure-time",
|
||||
"Treat excess of the test execution time limit as error.
|
||||
|
||||
Threshold values for this option can be configured via
|
||||
`RUST_TEST_TIME_UNIT`, `RUST_TEST_TIME_INTEGRATION` and
|
||||
`RUST_TEST_TIME_DOCTEST` environment variables.
|
||||
|
||||
Expected format of environment variable is `VARIABLE=WARN_TIME,CRITICAL_TIME`.
|
||||
|
||||
`CRITICAL_TIME` here means the limit that should not be exceeded by test.
|
||||
"
|
||||
);
|
||||
return opts;
|
||||
}
|
||||
|
||||
fn usage(binary: &str, options: &getopts::Options) {
|
||||
let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
|
||||
println!(
|
||||
r#"{usage}
|
||||
|
||||
The FILTER string is tested against the name of all tests, and only those
|
||||
tests whose names contain the filter are run.
|
||||
|
||||
By default, all tests are run in parallel. This can be altered with the
|
||||
--test-threads flag or the RUST_TEST_THREADS environment variable when running
|
||||
tests (set it to 1).
|
||||
|
||||
All tests have their standard output and standard error captured by default.
|
||||
This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
|
||||
environment variable to a value other than "0". Logging is not captured by default.
|
||||
|
||||
Test Attributes:
|
||||
|
||||
`#[test]` - Indicates a function is a test to be run. This function
|
||||
takes no arguments.
|
||||
`#[bench]` - Indicates a function is a benchmark to be run. This
|
||||
function takes one argument (test::Bencher).
|
||||
`#[should_panic]` - This function (also labeled with `#[test]`) will only pass if
|
||||
the code causes a panic (an assertion failure or panic!)
|
||||
A message may be provided, which the failure string must
|
||||
contain: #[should_panic(expected = "foo")].
|
||||
`#[ignore]` - When applied to a function which is already attributed as a
|
||||
test, then the test runner will ignore these tests during
|
||||
normal test runs. Running with --ignored or --include-ignored will run
|
||||
these tests."#,
|
||||
usage = options.usage(&message)
|
||||
);
|
||||
}
|
||||
|
||||
// FIXME: Copied from libsyntax until linkage errors are resolved. Issue #47566
|
||||
fn is_nightly() -> bool {
|
||||
// Whether this is a feature-staged build, i.e., on the beta or stable channel
|
||||
let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some();
|
||||
// Whether we should enable unstable features for bootstrapping
|
||||
let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok();
|
||||
|
||||
bootstrap || !disable_unstable_features
|
||||
}
|
||||
|
||||
// Gets the option value and checks if unstable features are enabled.
|
||||
macro_rules! unstable_optflag {
|
||||
($matches:ident, $allow_unstable:ident, $option_name:literal) => {{
|
||||
let opt = $matches.opt_present($option_name);
|
||||
if !$allow_unstable && opt {
|
||||
return Some(Err(format!(
|
||||
"The \"{}\" flag is only accepted on the nightly compiler",
|
||||
$option_name
|
||||
)));
|
||||
}
|
||||
|
||||
opt
|
||||
}};
|
||||
}
|
||||
|
||||
// Gets the CLI options assotiated with `report-time` feature.
|
||||
fn get_time_options(
|
||||
matches: &getopts::Matches,
|
||||
allow_unstable: bool)
|
||||
-> Option<OptPartRes<TestTimeOptions>> {
|
||||
let report_time = unstable_optflag!(matches, allow_unstable, "report-time");
|
||||
let colored_opt_str = matches.opt_str("report-time");
|
||||
let mut report_time_colored = report_time && colored_opt_str == Some("colored".into());
|
||||
let ensure_test_time = unstable_optflag!(matches, allow_unstable, "ensure-time");
|
||||
|
||||
// If `ensure-test-time` option is provided, time output is enforced,
|
||||
// so user won't be confused if any of tests will silently fail.
|
||||
let options = if report_time || ensure_test_time {
|
||||
if ensure_test_time && !report_time {
|
||||
report_time_colored = true;
|
||||
}
|
||||
Some(TestTimeOptions::new_from_env(ensure_test_time, report_time_colored))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Some(Ok(options))
|
||||
}
|
||||
|
||||
// Parses command line arguments into test options
|
||||
pub fn parse_opts(args: &[String]) -> Option<OptRes> {
|
||||
let mut allow_unstable = false;
|
||||
let opts = optgroups();
|
||||
let args = args.get(1..).unwrap_or(args);
|
||||
let matches = match opts.parse(args) {
|
||||
Ok(m) => m,
|
||||
Err(f) => return Some(Err(f.to_string())),
|
||||
};
|
||||
|
||||
if let Some(opt) = matches.opt_str("Z") {
|
||||
if !is_nightly() {
|
||||
return Some(Err(
|
||||
"the option `Z` is only accepted on the nightly compiler".into(),
|
||||
));
|
||||
}
|
||||
|
||||
match &*opt {
|
||||
"unstable-options" => {
|
||||
allow_unstable = true;
|
||||
}
|
||||
_ => {
|
||||
return Some(Err("Unrecognized option to `Z`".into()));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if matches.opt_present("h") {
|
||||
usage(&args[0], &opts);
|
||||
return None;
|
||||
}
|
||||
|
||||
let filter = if !matches.free.is_empty() {
|
||||
Some(matches.free[0].clone())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let exclude_should_panic = unstable_optflag!(matches, allow_unstable, "exclude-should-panic");
|
||||
|
||||
let include_ignored = unstable_optflag!(matches, allow_unstable, "include-ignored");
|
||||
|
||||
let run_ignored = match (include_ignored, matches.opt_present("ignored")) {
|
||||
(true, true) => {
|
||||
return Some(Err(
|
||||
"the options --include-ignored and --ignored are mutually exclusive".into(),
|
||||
));
|
||||
}
|
||||
(true, false) => RunIgnored::Yes,
|
||||
(false, true) => RunIgnored::Only,
|
||||
(false, false) => RunIgnored::No,
|
||||
};
|
||||
let quiet = matches.opt_present("quiet");
|
||||
let exact = matches.opt_present("exact");
|
||||
let list = matches.opt_present("list");
|
||||
|
||||
let logfile = matches.opt_str("logfile");
|
||||
let logfile = logfile.map(|s| PathBuf::from(&s));
|
||||
|
||||
let bench_benchmarks = matches.opt_present("bench");
|
||||
let run_tests = !bench_benchmarks || matches.opt_present("test");
|
||||
|
||||
let mut nocapture = matches.opt_present("nocapture");
|
||||
if !nocapture {
|
||||
nocapture = match env::var("RUST_TEST_NOCAPTURE") {
|
||||
Ok(val) => &val != "0",
|
||||
Err(_) => false,
|
||||
};
|
||||
}
|
||||
|
||||
let time_options = match get_time_options(&matches, allow_unstable) {
|
||||
Some(Ok(val)) => val,
|
||||
Some(Err(e)) => return Some(Err(e)),
|
||||
None => panic!("Unexpected output from `get_time_options`"),
|
||||
};
|
||||
|
||||
let test_threads = match matches.opt_str("test-threads") {
|
||||
Some(n_str) => match n_str.parse::<usize>() {
|
||||
Ok(0) => return Some(Err("argument for --test-threads must not be 0".to_string())),
|
||||
Ok(n) => Some(n),
|
||||
Err(e) => {
|
||||
return Some(Err(format!(
|
||||
"argument for --test-threads must be a number > 0 \
|
||||
(error: {})",
|
||||
e
|
||||
)));
|
||||
}
|
||||
},
|
||||
None => None,
|
||||
};
|
||||
|
||||
let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
|
||||
Some("auto") | None => ColorConfig::AutoColor,
|
||||
Some("always") => ColorConfig::AlwaysColor,
|
||||
Some("never") => ColorConfig::NeverColor,
|
||||
|
||||
Some(v) => {
|
||||
return Some(Err(format!(
|
||||
"argument for --color must be auto, always, or never (was \
|
||||
{})",
|
||||
v
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
let format = match matches.opt_str("format").as_ref().map(|s| &**s) {
|
||||
None if quiet => OutputFormat::Terse,
|
||||
Some("pretty") | None => OutputFormat::Pretty,
|
||||
Some("terse") => OutputFormat::Terse,
|
||||
Some("json") => {
|
||||
if !allow_unstable {
|
||||
return Some(Err(
|
||||
"The \"json\" format is only accepted on the nightly compiler".into(),
|
||||
));
|
||||
}
|
||||
OutputFormat::Json
|
||||
}
|
||||
|
||||
Some(v) => {
|
||||
return Some(Err(format!(
|
||||
"argument for --format must be pretty, terse, or json (was \
|
||||
{})",
|
||||
v
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
let test_opts = TestOpts {
|
||||
list,
|
||||
filter,
|
||||
filter_exact: exact,
|
||||
exclude_should_panic,
|
||||
run_ignored,
|
||||
run_tests,
|
||||
bench_benchmarks,
|
||||
logfile,
|
||||
nocapture,
|
||||
color,
|
||||
format,
|
||||
test_threads,
|
||||
skip: matches.opt_strs("skip"),
|
||||
time_options,
|
||||
options: Options::new().display_output(matches.opt_present("show-output")),
|
||||
};
|
||||
|
||||
Some(Ok(test_opts))
|
||||
}
|
@ -27,7 +27,7 @@ fn write_event(
|
||||
ty: &str,
|
||||
name: &str,
|
||||
evt: &str,
|
||||
exec_time: Option<&TestExecTime>,
|
||||
exec_time: Option<&time::TestExecTime>,
|
||||
stdout: Option<Cow<'_, str>>,
|
||||
extra: Option<&str>,
|
||||
) -> io::Result<()> {
|
||||
@ -76,7 +76,7 @@ fn write_result(
|
||||
&mut self,
|
||||
desc: &TestDesc,
|
||||
result: &TestResult,
|
||||
exec_time: Option<&TestExecTime>,
|
||||
exec_time: Option<&time::TestExecTime>,
|
||||
stdout: &[u8],
|
||||
state: &ConsoleTestState,
|
||||
) -> io::Result<()> {
|
||||
|
@ -16,7 +16,7 @@ fn write_result(
|
||||
&mut self,
|
||||
desc: &TestDesc,
|
||||
result: &TestResult,
|
||||
exec_time: Option<&TestExecTime>,
|
||||
exec_time: Option<&time::TestExecTime>,
|
||||
stdout: &[u8],
|
||||
state: &ConsoleTestState,
|
||||
) -> io::Result<()>;
|
||||
|
@ -3,7 +3,7 @@
|
||||
pub(crate) struct PrettyFormatter<T> {
|
||||
out: OutputLocation<T>,
|
||||
use_color: bool,
|
||||
time_options: Option<TestTimeOptions>,
|
||||
time_options: Option<time::TestTimeOptions>,
|
||||
|
||||
/// Number of columns to fill when aligning names
|
||||
max_name_len: usize,
|
||||
@ -17,7 +17,7 @@ pub fn new(
|
||||
use_color: bool,
|
||||
max_name_len: usize,
|
||||
is_multithreaded: bool,
|
||||
time_options: Option<TestTimeOptions>,
|
||||
time_options: Option<time::TestTimeOptions>,
|
||||
) -> Self {
|
||||
PrettyFormatter {
|
||||
out,
|
||||
@ -93,7 +93,7 @@ pub fn write_plain<S: AsRef<str>>(&mut self, s: S) -> io::Result<()> {
|
||||
fn write_time(
|
||||
&mut self,
|
||||
desc: &TestDesc,
|
||||
exec_time: Option<&TestExecTime>
|
||||
exec_time: Option<&time::TestExecTime>
|
||||
) -> io::Result<()> {
|
||||
if let (Some(opts), Some(time)) = (self.time_options, exec_time) {
|
||||
let time_str = format!(" <{}>", time);
|
||||
@ -194,7 +194,7 @@ fn write_result(
|
||||
&mut self,
|
||||
desc: &TestDesc,
|
||||
result: &TestResult,
|
||||
exec_time: Option<&TestExecTime>,
|
||||
exec_time: Option<&time::TestExecTime>,
|
||||
_: &[u8],
|
||||
_: &ConsoleTestState,
|
||||
) -> io::Result<()> {
|
||||
@ -225,7 +225,7 @@ fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> {
|
||||
|
||||
self.write_plain(&format!(
|
||||
"test {} has been running for over {} seconds\n",
|
||||
desc.name, TEST_WARN_TIMEOUT_S
|
||||
desc.name, time::TEST_WARN_TIMEOUT_S
|
||||
))
|
||||
}
|
||||
|
||||
|
@ -174,7 +174,7 @@ fn write_result(
|
||||
&mut self,
|
||||
desc: &TestDesc,
|
||||
result: &TestResult,
|
||||
_: Option<&TestExecTime>,
|
||||
_: Option<&time::TestExecTime>,
|
||||
_: &[u8],
|
||||
_: &ConsoleTestState,
|
||||
) -> io::Result<()> {
|
||||
@ -196,7 +196,7 @@ fn write_result(
|
||||
fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> {
|
||||
self.write_plain(&format!(
|
||||
"test {} has been running for over {} seconds\n",
|
||||
desc.name, TEST_WARN_TIMEOUT_S
|
||||
desc.name, time::TEST_WARN_TIMEOUT_S
|
||||
))
|
||||
}
|
||||
|
||||
|
153
src/libtest/helpers/concurrency.rs
Normal file
153
src/libtest/helpers/concurrency.rs
Normal file
@ -0,0 +1,153 @@
|
||||
//! Helper module which helps to determine amount of threads to be used
|
||||
//! during tests execution.
|
||||
use std::env;
|
||||
|
||||
#[cfg(any(unix, target_os = "cloudabi"))]
|
||||
use libc;
|
||||
|
||||
#[allow(deprecated)]
|
||||
pub fn get_concurrency() -> usize {
|
||||
return match env::var("RUST_TEST_THREADS") {
|
||||
Ok(s) => {
|
||||
let opt_n: Option<usize> = s.parse().ok();
|
||||
match opt_n {
|
||||
Some(n) if n > 0 => n,
|
||||
_ => panic!(
|
||||
"RUST_TEST_THREADS is `{}`, should be a positive integer.",
|
||||
s
|
||||
),
|
||||
}
|
||||
}
|
||||
Err(..) => num_cpus(),
|
||||
};
|
||||
|
||||
#[cfg(windows)]
|
||||
#[allow(nonstandard_style)]
|
||||
fn num_cpus() -> usize {
|
||||
#[repr(C)]
|
||||
struct SYSTEM_INFO {
|
||||
wProcessorArchitecture: u16,
|
||||
wReserved: u16,
|
||||
dwPageSize: u32,
|
||||
lpMinimumApplicationAddress: *mut u8,
|
||||
lpMaximumApplicationAddress: *mut u8,
|
||||
dwActiveProcessorMask: *mut u8,
|
||||
dwNumberOfProcessors: u32,
|
||||
dwProcessorType: u32,
|
||||
dwAllocationGranularity: u32,
|
||||
wProcessorLevel: u16,
|
||||
wProcessorRevision: u16,
|
||||
}
|
||||
extern "system" {
|
||||
fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
|
||||
}
|
||||
unsafe {
|
||||
let mut sysinfo = std::mem::zeroed();
|
||||
GetSystemInfo(&mut sysinfo);
|
||||
sysinfo.dwNumberOfProcessors as usize
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "vxworks")]
|
||||
fn num_cpus() -> usize {
|
||||
// FIXME: Implement num_cpus on vxWorks
|
||||
1
|
||||
}
|
||||
|
||||
#[cfg(target_os = "redox")]
|
||||
fn num_cpus() -> usize {
|
||||
// FIXME: Implement num_cpus on Redox
|
||||
1
|
||||
}
|
||||
|
||||
#[cfg(any(
|
||||
all(target_arch = "wasm32", not(target_os = "emscripten")),
|
||||
all(target_vendor = "fortanix", target_env = "sgx")
|
||||
))]
|
||||
fn num_cpus() -> usize {
|
||||
1
|
||||
}
|
||||
|
||||
#[cfg(any(
|
||||
target_os = "android",
|
||||
target_os = "cloudabi",
|
||||
target_os = "emscripten",
|
||||
target_os = "fuchsia",
|
||||
target_os = "ios",
|
||||
target_os = "linux",
|
||||
target_os = "macos",
|
||||
target_os = "solaris",
|
||||
))]
|
||||
fn num_cpus() -> usize {
|
||||
unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
|
||||
}
|
||||
|
||||
#[cfg(any(
|
||||
target_os = "freebsd",
|
||||
target_os = "dragonfly",
|
||||
target_os = "netbsd"
|
||||
))]
|
||||
fn num_cpus() -> usize {
|
||||
use std::ptr;
|
||||
|
||||
let mut cpus: libc::c_uint = 0;
|
||||
let mut cpus_size = std::mem::size_of_val(&cpus);
|
||||
|
||||
unsafe {
|
||||
cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
|
||||
}
|
||||
if cpus < 1 {
|
||||
let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
|
||||
unsafe {
|
||||
libc::sysctl(
|
||||
mib.as_mut_ptr(),
|
||||
2,
|
||||
&mut cpus as *mut _ as *mut _,
|
||||
&mut cpus_size as *mut _ as *mut _,
|
||||
ptr::null_mut(),
|
||||
0,
|
||||
);
|
||||
}
|
||||
if cpus < 1 {
|
||||
cpus = 1;
|
||||
}
|
||||
}
|
||||
cpus as usize
|
||||
}
|
||||
|
||||
#[cfg(target_os = "openbsd")]
|
||||
fn num_cpus() -> usize {
|
||||
use std::ptr;
|
||||
|
||||
let mut cpus: libc::c_uint = 0;
|
||||
let mut cpus_size = std::mem::size_of_val(&cpus);
|
||||
let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
|
||||
|
||||
unsafe {
|
||||
libc::sysctl(
|
||||
mib.as_mut_ptr(),
|
||||
2,
|
||||
&mut cpus as *mut _ as *mut _,
|
||||
&mut cpus_size as *mut _ as *mut _,
|
||||
ptr::null_mut(),
|
||||
0,
|
||||
);
|
||||
}
|
||||
if cpus < 1 {
|
||||
cpus = 1;
|
||||
}
|
||||
cpus as usize
|
||||
}
|
||||
|
||||
#[cfg(target_os = "haiku")]
|
||||
fn num_cpus() -> usize {
|
||||
// FIXME: implement
|
||||
1
|
||||
}
|
||||
|
||||
#[cfg(target_os = "l4re")]
|
||||
fn num_cpus() -> usize {
|
||||
// FIXME: implement
|
||||
1
|
||||
}
|
||||
}
|
33
src/libtest/helpers/isatty.rs
Normal file
33
src/libtest/helpers/isatty.rs
Normal file
@ -0,0 +1,33 @@
|
||||
//! Helper module which provides a function to test
|
||||
//! if stdout is a tty.
|
||||
|
||||
#[cfg(any(
|
||||
target_os = "cloudabi",
|
||||
all(target_arch = "wasm32", not(target_os = "emscripten")),
|
||||
all(target_vendor = "fortanix", target_env = "sgx")
|
||||
))]
|
||||
pub fn stdout_isatty() -> bool {
|
||||
// FIXME: Implement isatty on SGX
|
||||
false
|
||||
}
|
||||
#[cfg(unix)]
|
||||
pub fn stdout_isatty() -> bool {
|
||||
unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
|
||||
}
|
||||
#[cfg(windows)]
|
||||
pub fn stdout_isatty() -> bool {
|
||||
type DWORD = u32;
|
||||
type BOOL = i32;
|
||||
type HANDLE = *mut u8;
|
||||
type LPDWORD = *mut u32;
|
||||
const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
|
||||
extern "system" {
|
||||
fn GetStdHandle(which: DWORD) -> HANDLE;
|
||||
fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
|
||||
}
|
||||
unsafe {
|
||||
let handle = GetStdHandle(STD_OUTPUT_HANDLE);
|
||||
let mut out = 0;
|
||||
GetConsoleMode(handle, &mut out) != 0
|
||||
}
|
||||
}
|
50
src/libtest/helpers/metrics.rs
Normal file
50
src/libtest/helpers/metrics.rs
Normal file
@ -0,0 +1,50 @@
|
||||
//! Benchmark metrics.
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
#[derive(Clone, PartialEq, Debug, Copy)]
|
||||
pub struct Metric {
|
||||
value: f64,
|
||||
noise: f64,
|
||||
}
|
||||
|
||||
impl Metric {
|
||||
pub fn new(value: f64, noise: f64) -> Metric {
|
||||
Metric { value, noise }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq)]
|
||||
pub struct MetricMap(BTreeMap<String, Metric>);
|
||||
|
||||
impl MetricMap {
|
||||
pub fn new() -> MetricMap {
|
||||
MetricMap(BTreeMap::new())
|
||||
}
|
||||
|
||||
/// Insert a named `value` (+/- `noise`) metric into the map. The value
|
||||
/// must be non-negative. The `noise` indicates the uncertainty of the
|
||||
/// metric, which doubles as the "noise range" of acceptable
|
||||
/// pairwise-regressions on this named value, when comparing from one
|
||||
/// metric to the next using `compare_to_old`.
|
||||
///
|
||||
/// If `noise` is positive, then it means this metric is of a value
|
||||
/// you want to see grow smaller, so a change larger than `noise` in the
|
||||
/// positive direction represents a regression.
|
||||
///
|
||||
/// If `noise` is negative, then it means this metric is of a value
|
||||
/// you want to see grow larger, so a change larger than `noise` in the
|
||||
/// negative direction represents a regression.
|
||||
pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
|
||||
let m = Metric { value, noise };
|
||||
self.0.insert(name.to_owned(), m);
|
||||
}
|
||||
|
||||
pub fn fmt_metrics(&self) -> String {
|
||||
let v = self
|
||||
.0
|
||||
.iter()
|
||||
.map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
|
||||
.collect::<Vec<_>>();
|
||||
v.join(", ")
|
||||
}
|
||||
}
|
6
src/libtest/helpers/mod.rs
Normal file
6
src/libtest/helpers/mod.rs
Normal file
@ -0,0 +1,6 @@
|
||||
//! Module with common helpers not directly related to tests
|
||||
//! but used in `libtest`.
|
||||
|
||||
pub mod concurrency;
|
||||
pub mod isatty;
|
||||
pub mod metrics;
|
1417
src/libtest/lib.rs
1417
src/libtest/lib.rs
File diff suppressed because it is too large
Load Diff
80
src/libtest/options.rs
Normal file
80
src/libtest/options.rs
Normal file
@ -0,0 +1,80 @@
|
||||
//! Enums denoting options for test execution.
|
||||
|
||||
/// Whether to execute tests concurrently or not
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
pub enum Concurrent {
|
||||
Yes,
|
||||
No,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
pub enum BenchMode {
|
||||
Auto,
|
||||
Single,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub enum ShouldPanic {
|
||||
No,
|
||||
Yes,
|
||||
YesWithMessage(&'static str),
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub enum ColorConfig {
|
||||
AutoColor,
|
||||
AlwaysColor,
|
||||
NeverColor,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
pub enum OutputFormat {
|
||||
Pretty,
|
||||
Terse,
|
||||
Json,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
pub enum RunIgnored {
|
||||
Yes,
|
||||
No,
|
||||
Only,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub enum RunStrategy {
|
||||
/// Runs the test in the current process, and sends the result back over the
|
||||
/// supplied channel.
|
||||
InProcess,
|
||||
|
||||
/// Spawns a subprocess to run the test, and sends the result back over the
|
||||
/// supplied channel. Requires `argv[0]` to exist and point to the binary
|
||||
/// that's currently running.
|
||||
SpawnPrimary,
|
||||
}
|
||||
|
||||
/// In case we want to add other options as well, just add them in this struct.
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct Options {
|
||||
pub display_output: bool,
|
||||
pub panic_abort: bool,
|
||||
}
|
||||
|
||||
impl Options {
|
||||
pub fn new() -> Options {
|
||||
Options {
|
||||
display_output: false,
|
||||
panic_abort: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn display_output(mut self, display_output: bool) -> Options {
|
||||
self.display_output = display_output;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn panic_abort(mut self, panic_abort: bool) -> Options {
|
||||
self.panic_abort = panic_abort;
|
||||
self
|
||||
}
|
||||
}
|
102
src/libtest/test_result.rs
Normal file
102
src/libtest/test_result.rs
Normal file
@ -0,0 +1,102 @@
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use super::bench::BenchSamples;
|
||||
use super::time;
|
||||
use super::types::TestDesc;
|
||||
use super::options::ShouldPanic;
|
||||
|
||||
pub use self::TestResult::*;
|
||||
|
||||
// Return codes for secondary process.
|
||||
// Start somewhere other than 0 so we know the return code means what we think
|
||||
// it means.
|
||||
pub const TR_OK: i32 = 50;
|
||||
pub const TR_FAILED: i32 = 51;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum TestResult {
|
||||
TrOk,
|
||||
TrFailed,
|
||||
TrFailedMsg(String),
|
||||
TrIgnored,
|
||||
TrAllowedFail,
|
||||
TrBench(BenchSamples),
|
||||
TrTimedFail,
|
||||
}
|
||||
|
||||
unsafe impl Send for TestResult {}
|
||||
|
||||
|
||||
pub fn calc_result<'a>(
|
||||
desc: &TestDesc,
|
||||
task_result: Result<(), &'a (dyn Any + 'static + Send)>,
|
||||
time_opts: &Option<time::TestTimeOptions>,
|
||||
exec_time: &Option<time::TestExecTime>
|
||||
) -> TestResult {
|
||||
let result = match (&desc.should_panic, task_result) {
|
||||
(&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TestResult::TrOk,
|
||||
(&ShouldPanic::YesWithMessage(msg), Err(ref err)) => {
|
||||
if err
|
||||
.downcast_ref::<String>()
|
||||
.map(|e| &**e)
|
||||
.or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
|
||||
.map(|e| e.contains(msg))
|
||||
.unwrap_or(false)
|
||||
{
|
||||
TestResult::TrOk
|
||||
} else {
|
||||
if desc.allow_fail {
|
||||
TestResult::TrAllowedFail
|
||||
} else {
|
||||
TestResult::TrFailedMsg(format!("panic did not include expected string '{}'", msg))
|
||||
}
|
||||
}
|
||||
}
|
||||
(&ShouldPanic::Yes, Ok(())) => TestResult::TrFailedMsg("test did not panic as expected".to_string()),
|
||||
_ if desc.allow_fail => TestResult::TrAllowedFail,
|
||||
_ => TestResult::TrFailed,
|
||||
};
|
||||
|
||||
// If test is already failed (or allowed to fail), do not change the result.
|
||||
if result != TestResult::TrOk {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Check if test is failed due to timeout.
|
||||
if let (Some(opts), Some(time)) = (time_opts, exec_time) {
|
||||
if opts.error_on_excess && opts.is_critical(desc, time) {
|
||||
return TestResult::TrTimedFail;
|
||||
}
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
pub fn get_result_from_exit_code(
|
||||
desc: &TestDesc,
|
||||
code: i32,
|
||||
time_opts: &Option<time::TestTimeOptions>,
|
||||
exec_time: &Option<time::TestExecTime>,
|
||||
) -> TestResult {
|
||||
let result = match (desc.allow_fail, code) {
|
||||
(_, TR_OK) => TestResult::TrOk,
|
||||
(true, TR_FAILED) => TestResult::TrAllowedFail,
|
||||
(false, TR_FAILED) => TestResult::TrFailed,
|
||||
(_, _) => TestResult::TrFailedMsg(format!("got unexpected return code {}", code)),
|
||||
};
|
||||
|
||||
// If test is already failed (or allowed to fail), do not change the result.
|
||||
if result != TestResult::TrOk {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Check if test is failed due to timeout.
|
||||
if let (Some(opts), Some(time)) = (time_opts, exec_time) {
|
||||
if opts.error_on_excess && opts.is_critical(desc, time) {
|
||||
return TestResult::TrTimedFail;
|
||||
}
|
||||
}
|
||||
|
||||
result
|
||||
}
|
206
src/libtest/time.rs
Normal file
206
src/libtest/time.rs
Normal file
@ -0,0 +1,206 @@
|
||||
//! Module `time` contains everything related to the time measurement of unit tests
|
||||
//! execution.
|
||||
//! Two main purposes of this module:
|
||||
//! - Check whether test is timed out.
|
||||
//! - Provide helpers for `report-time` and `measure-time` options.
|
||||
|
||||
use std::time::{Duration, Instant};
|
||||
use std::str::FromStr;
|
||||
use std::fmt;
|
||||
use std::env;
|
||||
|
||||
use super::types::{TestDesc, TestType};
|
||||
|
||||
pub const TEST_WARN_TIMEOUT_S: u64 = 60;
|
||||
|
||||
/// This small module contains constants used by `report-time` option.
|
||||
/// Those constants values will be used if corresponding environment variables are not set.
|
||||
///
|
||||
/// To override values for unit-tests, use a constant `RUST_TEST_TIME_UNIT`,
|
||||
/// To override values for integration tests, use a constant `RUST_TEST_TIME_INTEGRATION`,
|
||||
/// To override values for doctests, use a constant `RUST_TEST_TIME_DOCTEST`.
|
||||
///
|
||||
/// Example of the expected format is `RUST_TEST_TIME_xxx=100,200`, where 100 means
|
||||
/// warn time, and 200 means critical time.
|
||||
pub mod time_constants {
|
||||
use std::time::Duration;
|
||||
use super::TEST_WARN_TIMEOUT_S;
|
||||
|
||||
/// Environment variable for overriding default threshold for unit-tests.
|
||||
pub const UNIT_ENV_NAME: &str = "RUST_TEST_TIME_UNIT";
|
||||
|
||||
// Unit tests are supposed to be really quick.
|
||||
pub const UNIT_WARN: Duration = Duration::from_millis(50);
|
||||
pub const UNIT_CRITICAL: Duration = Duration::from_millis(100);
|
||||
|
||||
/// Environment variable for overriding default threshold for unit-tests.
|
||||
pub const INTEGRATION_ENV_NAME: &str = "RUST_TEST_TIME_INTEGRATION";
|
||||
|
||||
// Integration tests may have a lot of work, so they can take longer to execute.
|
||||
pub const INTEGRATION_WARN: Duration = Duration::from_millis(500);
|
||||
pub const INTEGRATION_CRITICAL: Duration = Duration::from_millis(1000);
|
||||
|
||||
/// Environment variable for overriding default threshold for unit-tests.
|
||||
pub const DOCTEST_ENV_NAME: &str = "RUST_TEST_TIME_DOCTEST";
|
||||
|
||||
// Doctests are similar to integration tests, because they can include a lot of
|
||||
// initialization code.
|
||||
pub const DOCTEST_WARN: Duration = INTEGRATION_WARN;
|
||||
pub const DOCTEST_CRITICAL: Duration = INTEGRATION_CRITICAL;
|
||||
|
||||
// Do not suppose anything about unknown tests, base limits on the
|
||||
// `TEST_WARN_TIMEOUT_S` constant.
|
||||
pub const UNKNOWN_WARN: Duration = Duration::from_secs(TEST_WARN_TIMEOUT_S);
|
||||
pub const UNKNOWN_CRITICAL: Duration = Duration::from_secs(TEST_WARN_TIMEOUT_S * 2);
|
||||
}
|
||||
|
||||
/// Returns an `Instance` object denoting when the test should be considered
|
||||
/// timed out.
|
||||
pub fn get_default_test_timeout() -> Instant {
|
||||
Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S)
|
||||
}
|
||||
|
||||
/// The meassured execution time of a unit test.
|
||||
#[derive(Clone, PartialEq)]
|
||||
pub struct TestExecTime(pub Duration);
|
||||
|
||||
impl fmt::Display for TestExecTime {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{:.3}s", self.0.as_secs_f64())
|
||||
}
|
||||
}
|
||||
|
||||
/// Structure denoting time limits for test execution.
|
||||
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)]
|
||||
pub struct TimeThreshold {
|
||||
pub warn: Duration,
|
||||
pub critical: Duration,
|
||||
}
|
||||
|
||||
impl TimeThreshold {
|
||||
/// Creates a new `TimeThreshold` instance with provided durations.
|
||||
pub fn new(warn: Duration, critical: Duration) -> Self {
|
||||
Self {
|
||||
warn,
|
||||
critical,
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to create a `TimeThreshold` instance with values obtained
|
||||
/// from the environment variable, and returns `None` if the variable
|
||||
/// is not set.
|
||||
/// Environment variable format is expected to match `\d+,\d+`.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if variable with provided name is set but contains inappropriate
|
||||
/// value.
|
||||
pub fn from_env_var(env_var_name: &str) -> Option<Self> {
|
||||
let durations_str = env::var(env_var_name).ok()?;
|
||||
|
||||
// Split string into 2 substrings by comma and try to parse numbers.
|
||||
let mut durations = durations_str
|
||||
.splitn(2, ',')
|
||||
.map(|v| {
|
||||
u64::from_str(v).unwrap_or_else(|_| {
|
||||
panic!(
|
||||
"Duration value in variable {} is expected to be a number, but got {}",
|
||||
env_var_name, v
|
||||
)
|
||||
})
|
||||
});
|
||||
|
||||
// Callback to be called if the environment variable has unexpected structure.
|
||||
let panic_on_incorrect_value = || {
|
||||
panic!(
|
||||
"Duration variable {} expected to have 2 numbers separated by comma, but got {}",
|
||||
env_var_name, durations_str
|
||||
);
|
||||
};
|
||||
|
||||
let (warn, critical) = (
|
||||
durations.next().unwrap_or_else(panic_on_incorrect_value),
|
||||
durations.next().unwrap_or_else(panic_on_incorrect_value)
|
||||
);
|
||||
|
||||
if warn > critical {
|
||||
panic!("Test execution warn time should be less or equal to the critical time");
|
||||
}
|
||||
|
||||
Some(Self::new(Duration::from_millis(warn), Duration::from_millis(critical)))
|
||||
}
|
||||
}
|
||||
|
||||
/// Structure with parameters for calculating test execution time.
|
||||
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)]
|
||||
pub struct TestTimeOptions {
|
||||
/// Denotes if the test critical execution time limit excess should be considered
|
||||
/// a test failure.
|
||||
pub error_on_excess: bool,
|
||||
pub colored: bool,
|
||||
pub unit_threshold: TimeThreshold,
|
||||
pub integration_threshold: TimeThreshold,
|
||||
pub doctest_threshold: TimeThreshold,
|
||||
}
|
||||
|
||||
impl TestTimeOptions {
|
||||
pub fn new_from_env(error_on_excess: bool, colored: bool) -> Self {
|
||||
let unit_threshold =
|
||||
TimeThreshold::from_env_var(time_constants::UNIT_ENV_NAME)
|
||||
.unwrap_or_else(Self::default_unit);
|
||||
|
||||
let integration_threshold =
|
||||
TimeThreshold::from_env_var(time_constants::INTEGRATION_ENV_NAME)
|
||||
.unwrap_or_else(Self::default_integration);
|
||||
|
||||
let doctest_threshold =
|
||||
TimeThreshold::from_env_var(time_constants::DOCTEST_ENV_NAME)
|
||||
.unwrap_or_else(Self::default_doctest);
|
||||
|
||||
Self {
|
||||
error_on_excess,
|
||||
colored,
|
||||
unit_threshold,
|
||||
integration_threshold,
|
||||
doctest_threshold,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_warn(&self, test: &TestDesc, exec_time: &TestExecTime) -> bool {
|
||||
exec_time.0 >= self.warn_time(test)
|
||||
}
|
||||
|
||||
pub fn is_critical(&self, test: &TestDesc, exec_time: &TestExecTime) -> bool {
|
||||
exec_time.0 >= self.critical_time(test)
|
||||
}
|
||||
|
||||
fn warn_time(&self, test: &TestDesc) -> Duration {
|
||||
match test.test_type {
|
||||
TestType::UnitTest => self.unit_threshold.warn,
|
||||
TestType::IntegrationTest => self.integration_threshold.warn,
|
||||
TestType::DocTest => self.doctest_threshold.warn,
|
||||
TestType::Unknown => time_constants::UNKNOWN_WARN,
|
||||
}
|
||||
}
|
||||
|
||||
fn critical_time(&self, test: &TestDesc) -> Duration {
|
||||
match test.test_type {
|
||||
TestType::UnitTest => self.unit_threshold.critical,
|
||||
TestType::IntegrationTest => self.integration_threshold.critical,
|
||||
TestType::DocTest => self.doctest_threshold.critical,
|
||||
TestType::Unknown => time_constants::UNKNOWN_CRITICAL,
|
||||
}
|
||||
}
|
||||
|
||||
fn default_unit() -> TimeThreshold {
|
||||
TimeThreshold::new(time_constants::UNIT_WARN, time_constants::UNIT_CRITICAL)
|
||||
}
|
||||
|
||||
fn default_integration() -> TimeThreshold {
|
||||
TimeThreshold::new(time_constants::INTEGRATION_WARN, time_constants::INTEGRATION_CRITICAL)
|
||||
}
|
||||
|
||||
fn default_doctest() -> TimeThreshold {
|
||||
TimeThreshold::new(time_constants::DOCTEST_WARN, time_constants::DOCTEST_CRITICAL)
|
||||
}
|
||||
}
|
145
src/libtest/types.rs
Normal file
145
src/libtest/types.rs
Normal file
@ -0,0 +1,145 @@
|
||||
//! Common types used by `libtest`.
|
||||
|
||||
use std::fmt;
|
||||
use std::borrow::Cow;
|
||||
|
||||
use super::options;
|
||||
use super::bench::Bencher;
|
||||
|
||||
pub use NamePadding::*;
|
||||
pub use TestName::*;
|
||||
pub use TestFn::*;
|
||||
|
||||
/// Type of the test according to the [rust book](https://doc.rust-lang.org/cargo/guide/tests.html)
|
||||
/// conventions.
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub enum TestType {
|
||||
/// Unit-tests are expected to be in the `src` folder of the crate.
|
||||
UnitTest,
|
||||
/// Integration-style tests are expected to be in the `tests` folder of the crate.
|
||||
IntegrationTest,
|
||||
/// Doctests are created by the `librustdoc` manually, so it's a different type of test.
|
||||
DocTest,
|
||||
/// Tests for the sources that don't follow the project layout convention
|
||||
/// (e.g. tests in raw `main.rs` compiled by calling `rustc --test` directly).
|
||||
Unknown,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
|
||||
pub enum NamePadding {
|
||||
PadNone,
|
||||
PadOnRight,
|
||||
}
|
||||
|
||||
// The name of a test. By convention this follows the rules for rust
|
||||
// paths; i.e., it should be a series of identifiers separated by double
|
||||
// colons. This way if some test runner wants to arrange the tests
|
||||
// hierarchically it may.
|
||||
#[derive(Clone, PartialEq, Eq, Hash, Debug)]
|
||||
pub enum TestName {
|
||||
StaticTestName(&'static str),
|
||||
DynTestName(String),
|
||||
AlignedTestName(Cow<'static, str>, NamePadding),
|
||||
}
|
||||
|
||||
impl TestName {
|
||||
pub fn as_slice(&self) -> &str {
|
||||
match *self {
|
||||
StaticTestName(s) => s,
|
||||
DynTestName(ref s) => s,
|
||||
AlignedTestName(ref s, _) => &*s,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn padding(&self) -> NamePadding {
|
||||
match self {
|
||||
&AlignedTestName(_, p) => p,
|
||||
_ => PadNone,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_padding(&self, padding: NamePadding) -> TestName {
|
||||
let name = match self {
|
||||
&TestName::StaticTestName(name) => Cow::Borrowed(name),
|
||||
&TestName::DynTestName(ref name) => Cow::Owned(name.clone()),
|
||||
&TestName::AlignedTestName(ref name, _) => name.clone(),
|
||||
};
|
||||
|
||||
TestName::AlignedTestName(name, padding)
|
||||
}
|
||||
}
|
||||
impl fmt::Display for TestName {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt::Display::fmt(self.as_slice(), f)
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents a benchmark function.
|
||||
pub trait TDynBenchFn: Send {
|
||||
fn run(&self, harness: &mut Bencher);
|
||||
}
|
||||
|
||||
// A function that runs a test. If the function returns successfully,
|
||||
// the test succeeds; if the function panics then the test fails. We
|
||||
// may need to come up with a more clever definition of test in order
|
||||
// to support isolation of tests into threads.
|
||||
pub enum TestFn {
|
||||
StaticTestFn(fn()),
|
||||
StaticBenchFn(fn(&mut Bencher)),
|
||||
DynTestFn(Box<dyn FnOnce() + Send>),
|
||||
DynBenchFn(Box<dyn TDynBenchFn + 'static>),
|
||||
}
|
||||
|
||||
impl TestFn {
|
||||
pub fn padding(&self) -> NamePadding {
|
||||
match *self {
|
||||
StaticTestFn(..) => PadNone,
|
||||
StaticBenchFn(..) => PadOnRight,
|
||||
DynTestFn(..) => PadNone,
|
||||
DynBenchFn(..) => PadOnRight,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for TestFn {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.write_str(match *self {
|
||||
StaticTestFn(..) => "StaticTestFn(..)",
|
||||
StaticBenchFn(..) => "StaticBenchFn(..)",
|
||||
DynTestFn(..) => "DynTestFn(..)",
|
||||
DynBenchFn(..) => "DynBenchFn(..)",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// The definition of a single test. A test runner will run a list of
|
||||
// these.
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct TestDesc {
|
||||
pub name: TestName,
|
||||
pub ignore: bool,
|
||||
pub should_panic: options::ShouldPanic,
|
||||
pub allow_fail: bool,
|
||||
pub test_type: TestType,
|
||||
}
|
||||
|
||||
impl TestDesc {
|
||||
pub fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
|
||||
let mut name = String::from(self.name.as_slice());
|
||||
let fill = column_count.saturating_sub(name.len());
|
||||
let pad = " ".repeat(fill);
|
||||
match align {
|
||||
PadNone => name,
|
||||
PadOnRight => {
|
||||
name.push_str(&pad);
|
||||
name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct TestDescAndFn {
|
||||
pub desc: TestDesc,
|
||||
pub testfn: TestFn,
|
||||
}
|
Loading…
Reference in New Issue
Block a user