From 4d5052203d200474b1a9aacbb0d59666a576ee16 Mon Sep 17 00:00:00 2001 From: Igor Aleksanov Date: Thu, 17 Oct 2019 09:47:43 +0300 Subject: [PATCH] Split libtest into several smaller modules --- src/libtest/bench.rs | 251 +++++ src/libtest/cli.rs | 384 ++++++++ src/libtest/formatters/json.rs | 4 +- src/libtest/formatters/mod.rs | 2 +- src/libtest/formatters/pretty.rs | 10 +- src/libtest/formatters/terse.rs | 4 +- src/libtest/helpers/concurrency.rs | 153 +++ src/libtest/helpers/isatty.rs | 33 + src/libtest/helpers/metrics.rs | 50 + src/libtest/helpers/mod.rs | 6 + src/libtest/lib.rs | 1417 +--------------------------- src/libtest/options.rs | 80 ++ src/libtest/test_result.rs | 102 ++ src/libtest/time.rs | 206 ++++ src/libtest/types.rs | 145 +++ 15 files changed, 1462 insertions(+), 1385 deletions(-) create mode 100644 src/libtest/bench.rs create mode 100644 src/libtest/cli.rs create mode 100644 src/libtest/helpers/concurrency.rs create mode 100644 src/libtest/helpers/isatty.rs create mode 100644 src/libtest/helpers/metrics.rs create mode 100644 src/libtest/helpers/mod.rs create mode 100644 src/libtest/options.rs create mode 100644 src/libtest/test_result.rs create mode 100644 src/libtest/time.rs create mode 100644 src/libtest/types.rs diff --git a/src/libtest/bench.rs b/src/libtest/bench.rs new file mode 100644 index 00000000000..055a74f691c --- /dev/null +++ b/src/libtest/bench.rs @@ -0,0 +1,251 @@ +//! Benchmarking module. +use super::{ + BenchMode, MonitorMsg, Sender, Sink, TestDesc, TestResult +}; + +use crate::stats; +use std::time::{Duration, Instant}; +use std::cmp; +use std::io; +use std::panic::{catch_unwind, AssertUnwindSafe}; +use std::sync::{Arc, Mutex}; +use std::hint::black_box; + +/// Manager of the benchmarking runs. +/// +/// This is fed into functions marked with `#[bench]` to allow for +/// set-up & tear-down before running a piece of code repeatedly via a +/// call to `iter`. +#[derive(Clone)] +pub struct Bencher { + mode: BenchMode, + summary: Option, + pub bytes: u64, +} + +impl Bencher { + /// Callback for benchmark functions to run in their body. + pub fn iter(&mut self, mut inner: F) + where + F: FnMut() -> T, + { + if self.mode == BenchMode::Single { + ns_iter_inner(&mut inner, 1); + return; + } + + self.summary = Some(iter(&mut inner)); + } + + pub fn bench(&mut self, mut f: F) -> Option + where + F: FnMut(&mut Bencher), + { + f(self); + return self.summary; + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct BenchSamples { + pub ns_iter_summ: stats::Summary, + pub mb_s: usize, +} + +pub fn fmt_bench_samples(bs: &BenchSamples) -> String { + use std::fmt::Write; + let mut output = String::new(); + + let median = bs.ns_iter_summ.median as usize; + let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize; + + output + .write_fmt(format_args!( + "{:>11} ns/iter (+/- {})", + fmt_thousands_sep(median, ','), + fmt_thousands_sep(deviation, ',') + )) + .unwrap(); + if bs.mb_s != 0 { + output + .write_fmt(format_args!(" = {} MB/s", bs.mb_s)) + .unwrap(); + } + output +} + +// Format a number with thousands separators +fn fmt_thousands_sep(mut n: usize, sep: char) -> String { + use std::fmt::Write; + let mut output = String::new(); + let mut trailing = false; + for &pow in &[9, 6, 3, 0] { + let base = 10_usize.pow(pow); + if pow == 0 || trailing || n / base != 0 { + if !trailing { + output.write_fmt(format_args!("{}", n / base)).unwrap(); + } else { + output.write_fmt(format_args!("{:03}", n / base)).unwrap(); + } + if pow != 0 { + output.push(sep); + } + trailing = true; + } + n %= base; + } + + output +} + +fn ns_from_dur(dur: Duration) -> u64 { + dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64) +} + +fn ns_iter_inner(inner: &mut F, k: u64) -> u64 +where + F: FnMut() -> T, +{ + let start = Instant::now(); + for _ in 0..k { + black_box(inner()); + } + return ns_from_dur(start.elapsed()); +} + +pub fn iter(inner: &mut F) -> stats::Summary +where + F: FnMut() -> T, +{ + // Initial bench run to get ballpark figure. + let ns_single = ns_iter_inner(inner, 1); + + // Try to estimate iter count for 1ms falling back to 1m + // iterations if first run took < 1ns. + let ns_target_total = 1_000_000; // 1ms + let mut n = ns_target_total / cmp::max(1, ns_single); + + // if the first run took more than 1ms we don't want to just + // be left doing 0 iterations on every loop. The unfortunate + // side effect of not being able to do as many runs is + // automatically handled by the statistical analysis below + // (i.e., larger error bars). + n = cmp::max(1, n); + + let mut total_run = Duration::new(0, 0); + let samples: &mut [f64] = &mut [0.0_f64; 50]; + loop { + let loop_start = Instant::now(); + + for p in &mut *samples { + *p = ns_iter_inner(inner, n) as f64 / n as f64; + } + + stats::winsorize(samples, 5.0); + let summ = stats::Summary::new(samples); + + for p in &mut *samples { + let ns = ns_iter_inner(inner, 5 * n); + *p = ns as f64 / (5 * n) as f64; + } + + stats::winsorize(samples, 5.0); + let summ5 = stats::Summary::new(samples); + + let loop_run = loop_start.elapsed(); + + // If we've run for 100ms and seem to have converged to a + // stable median. + if loop_run > Duration::from_millis(100) + && summ.median_abs_dev_pct < 1.0 + && summ.median - summ5.median < summ5.median_abs_dev + { + return summ5; + } + + total_run = total_run + loop_run; + // Longest we ever run for is 3s. + if total_run > Duration::from_secs(3) { + return summ5; + } + + // If we overflow here just return the results so far. We check a + // multiplier of 10 because we're about to multiply by 2 and the + // next iteration of the loop will also multiply by 5 (to calculate + // the summ5 result) + n = match n.checked_mul(10) { + Some(_) => n * 2, + None => { + return summ5; + } + }; + } +} + +pub fn benchmark(desc: TestDesc, monitor_ch: Sender, nocapture: bool, f: F) +where + F: FnMut(&mut Bencher), +{ + let mut bs = Bencher { + mode: BenchMode::Auto, + summary: None, + bytes: 0, + }; + + let data = Arc::new(Mutex::new(Vec::new())); + let oldio = if !nocapture { + Some(( + io::set_print(Some(Box::new(Sink(data.clone())))), + io::set_panic(Some(Box::new(Sink(data.clone())))), + )) + } else { + None + }; + + let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f))); + + if let Some((printio, panicio)) = oldio { + io::set_print(printio); + io::set_panic(panicio); + } + + let test_result = match result { + //bs.bench(f) { + Ok(Some(ns_iter_summ)) => { + let ns_iter = cmp::max(ns_iter_summ.median as u64, 1); + let mb_s = bs.bytes * 1000 / ns_iter; + + let bs = BenchSamples { + ns_iter_summ, + mb_s: mb_s as usize, + }; + TestResult::TrBench(bs) + } + Ok(None) => { + // iter not called, so no data. + // FIXME: error in this case? + let samples: &mut [f64] = &mut [0.0_f64; 1]; + let bs = BenchSamples { + ns_iter_summ: stats::Summary::new(samples), + mb_s: 0, + }; + TestResult::TrBench(bs) + } + Err(_) => TestResult::TrFailed, + }; + + let stdout = data.lock().unwrap().to_vec(); + monitor_ch.send((desc, test_result, None, stdout)).unwrap(); +} + +pub fn run_once(f: F) +where + F: FnMut(&mut Bencher), +{ + let mut bs = Bencher { + mode: BenchMode::Single, + summary: None, + bytes: 0, + }; + bs.bench(f); +} diff --git a/src/libtest/cli.rs b/src/libtest/cli.rs new file mode 100644 index 00000000000..b35193701d6 --- /dev/null +++ b/src/libtest/cli.rs @@ -0,0 +1,384 @@ +//! Module converting command-line arguments into test configuration. + +use std::env; +use std::path::PathBuf; +use getopts; + +use super::options::{RunIgnored, ColorConfig, OutputFormat, Options}; +use super::time::TestTimeOptions; +use super::helpers::isatty; + +#[derive(Debug)] +pub struct TestOpts { + pub list: bool, + pub filter: Option, + pub filter_exact: bool, + pub exclude_should_panic: bool, + pub run_ignored: RunIgnored, + pub run_tests: bool, + pub bench_benchmarks: bool, + pub logfile: Option, + pub nocapture: bool, + pub color: ColorConfig, + pub format: OutputFormat, + pub test_threads: Option, + pub skip: Vec, + pub time_options: Option, + pub options: Options, +} + +impl TestOpts { + pub fn use_color(&self) -> bool { + match self.color { + ColorConfig::AutoColor => !self.nocapture && isatty::stdout_isatty(), + ColorConfig::AlwaysColor => true, + ColorConfig::NeverColor => false, + } + } +} + +/// Result of parsing the options. +pub type OptRes = Result; +/// Result of parsing the option part. +type OptPartRes = Result, String>; + +fn optgroups() -> getopts::Options { + let mut opts = getopts::Options::new(); + opts.optflag("", "include-ignored", "Run ignored and not ignored tests") + .optflag("", "ignored", "Run only ignored tests") + .optflag("", "exclude-should-panic", "Excludes tests marked as should_panic") + .optflag("", "test", "Run tests and not benchmarks") + .optflag("", "bench", "Run benchmarks instead of tests") + .optflag("", "list", "List all tests and benchmarks") + .optflag("h", "help", "Display this message (longer with --help)") + .optopt( + "", + "logfile", + "Write logs to the specified file instead \ + of stdout", + "PATH", + ) + .optflag( + "", + "nocapture", + "don't capture stdout/stderr of each \ + task, allow printing directly", + ) + .optopt( + "", + "test-threads", + "Number of threads used for running tests \ + in parallel", + "n_threads", + ) + .optmulti( + "", + "skip", + "Skip tests whose names contain FILTER (this flag can \ + be used multiple times)", + "FILTER", + ) + .optflag( + "q", + "quiet", + "Display one character per test instead of one line. \ + Alias to --format=terse", + ) + .optflag( + "", + "exact", + "Exactly match filters rather than by substring", + ) + .optopt( + "", + "color", + "Configure coloring of output: + auto = colorize if stdout is a tty and tests are run on serially (default); + always = always colorize output; + never = never colorize output;", + "auto|always|never", + ) + .optopt( + "", + "format", + "Configure formatting of output: + pretty = Print verbose output; + terse = Display one character per test; + json = Output a json document", + "pretty|terse|json", + ) + .optflag( + "", + "show-output", + "Show captured stdout of successful tests" + ) + .optopt( + "Z", + "", + "Enable nightly-only flags: + unstable-options = Allow use of experimental features", + "unstable-options", + ) + .optflagopt( + "", + "report-time", + "Show execution time of each test. Awailable values: + plain = do not colorize the execution time (default); + colored = colorize output according to the `color` parameter value; + + Threshold values for colorized output can be configured via + `RUST_TEST_TIME_UNIT`, `RUST_TEST_TIME_INTEGRATION` and + `RUST_TEST_TIME_DOCTEST` environment variables. + + Expected format of environment variable is `VARIABLE=WARN_TIME,CRITICAL_TIME`. + + Not available for --format=terse", + "plain|colored" + ) + .optflag( + "", + "ensure-time", + "Treat excess of the test execution time limit as error. + + Threshold values for this option can be configured via + `RUST_TEST_TIME_UNIT`, `RUST_TEST_TIME_INTEGRATION` and + `RUST_TEST_TIME_DOCTEST` environment variables. + + Expected format of environment variable is `VARIABLE=WARN_TIME,CRITICAL_TIME`. + + `CRITICAL_TIME` here means the limit that should not be exceeded by test. + " + ); + return opts; +} + +fn usage(binary: &str, options: &getopts::Options) { + let message = format!("Usage: {} [OPTIONS] [FILTER]", binary); + println!( + r#"{usage} + +The FILTER string is tested against the name of all tests, and only those +tests whose names contain the filter are run. + +By default, all tests are run in parallel. This can be altered with the +--test-threads flag or the RUST_TEST_THREADS environment variable when running +tests (set it to 1). + +All tests have their standard output and standard error captured by default. +This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE +environment variable to a value other than "0". Logging is not captured by default. + +Test Attributes: + + `#[test]` - Indicates a function is a test to be run. This function + takes no arguments. + `#[bench]` - Indicates a function is a benchmark to be run. This + function takes one argument (test::Bencher). + `#[should_panic]` - This function (also labeled with `#[test]`) will only pass if + the code causes a panic (an assertion failure or panic!) + A message may be provided, which the failure string must + contain: #[should_panic(expected = "foo")]. + `#[ignore]` - When applied to a function which is already attributed as a + test, then the test runner will ignore these tests during + normal test runs. Running with --ignored or --include-ignored will run + these tests."#, + usage = options.usage(&message) + ); +} + +// FIXME: Copied from libsyntax until linkage errors are resolved. Issue #47566 +fn is_nightly() -> bool { + // Whether this is a feature-staged build, i.e., on the beta or stable channel + let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some(); + // Whether we should enable unstable features for bootstrapping + let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok(); + + bootstrap || !disable_unstable_features +} + +// Gets the option value and checks if unstable features are enabled. +macro_rules! unstable_optflag { + ($matches:ident, $allow_unstable:ident, $option_name:literal) => {{ + let opt = $matches.opt_present($option_name); + if !$allow_unstable && opt { + return Some(Err(format!( + "The \"{}\" flag is only accepted on the nightly compiler", + $option_name + ))); + } + + opt + }}; +} + +// Gets the CLI options assotiated with `report-time` feature. +fn get_time_options( + matches: &getopts::Matches, + allow_unstable: bool) +-> Option> { + let report_time = unstable_optflag!(matches, allow_unstable, "report-time"); + let colored_opt_str = matches.opt_str("report-time"); + let mut report_time_colored = report_time && colored_opt_str == Some("colored".into()); + let ensure_test_time = unstable_optflag!(matches, allow_unstable, "ensure-time"); + + // If `ensure-test-time` option is provided, time output is enforced, + // so user won't be confused if any of tests will silently fail. + let options = if report_time || ensure_test_time { + if ensure_test_time && !report_time { + report_time_colored = true; + } + Some(TestTimeOptions::new_from_env(ensure_test_time, report_time_colored)) + } else { + None + }; + + Some(Ok(options)) +} + +// Parses command line arguments into test options +pub fn parse_opts(args: &[String]) -> Option { + let mut allow_unstable = false; + let opts = optgroups(); + let args = args.get(1..).unwrap_or(args); + let matches = match opts.parse(args) { + Ok(m) => m, + Err(f) => return Some(Err(f.to_string())), + }; + + if let Some(opt) = matches.opt_str("Z") { + if !is_nightly() { + return Some(Err( + "the option `Z` is only accepted on the nightly compiler".into(), + )); + } + + match &*opt { + "unstable-options" => { + allow_unstable = true; + } + _ => { + return Some(Err("Unrecognized option to `Z`".into())); + } + } + }; + + if matches.opt_present("h") { + usage(&args[0], &opts); + return None; + } + + let filter = if !matches.free.is_empty() { + Some(matches.free[0].clone()) + } else { + None + }; + + let exclude_should_panic = unstable_optflag!(matches, allow_unstable, "exclude-should-panic"); + + let include_ignored = unstable_optflag!(matches, allow_unstable, "include-ignored"); + + let run_ignored = match (include_ignored, matches.opt_present("ignored")) { + (true, true) => { + return Some(Err( + "the options --include-ignored and --ignored are mutually exclusive".into(), + )); + } + (true, false) => RunIgnored::Yes, + (false, true) => RunIgnored::Only, + (false, false) => RunIgnored::No, + }; + let quiet = matches.opt_present("quiet"); + let exact = matches.opt_present("exact"); + let list = matches.opt_present("list"); + + let logfile = matches.opt_str("logfile"); + let logfile = logfile.map(|s| PathBuf::from(&s)); + + let bench_benchmarks = matches.opt_present("bench"); + let run_tests = !bench_benchmarks || matches.opt_present("test"); + + let mut nocapture = matches.opt_present("nocapture"); + if !nocapture { + nocapture = match env::var("RUST_TEST_NOCAPTURE") { + Ok(val) => &val != "0", + Err(_) => false, + }; + } + + let time_options = match get_time_options(&matches, allow_unstable) { + Some(Ok(val)) => val, + Some(Err(e)) => return Some(Err(e)), + None => panic!("Unexpected output from `get_time_options`"), + }; + + let test_threads = match matches.opt_str("test-threads") { + Some(n_str) => match n_str.parse::() { + Ok(0) => return Some(Err("argument for --test-threads must not be 0".to_string())), + Ok(n) => Some(n), + Err(e) => { + return Some(Err(format!( + "argument for --test-threads must be a number > 0 \ + (error: {})", + e + ))); + } + }, + None => None, + }; + + let color = match matches.opt_str("color").as_ref().map(|s| &**s) { + Some("auto") | None => ColorConfig::AutoColor, + Some("always") => ColorConfig::AlwaysColor, + Some("never") => ColorConfig::NeverColor, + + Some(v) => { + return Some(Err(format!( + "argument for --color must be auto, always, or never (was \ + {})", + v + ))); + } + }; + + let format = match matches.opt_str("format").as_ref().map(|s| &**s) { + None if quiet => OutputFormat::Terse, + Some("pretty") | None => OutputFormat::Pretty, + Some("terse") => OutputFormat::Terse, + Some("json") => { + if !allow_unstable { + return Some(Err( + "The \"json\" format is only accepted on the nightly compiler".into(), + )); + } + OutputFormat::Json + } + + Some(v) => { + return Some(Err(format!( + "argument for --format must be pretty, terse, or json (was \ + {})", + v + ))); + } + }; + + let test_opts = TestOpts { + list, + filter, + filter_exact: exact, + exclude_should_panic, + run_ignored, + run_tests, + bench_benchmarks, + logfile, + nocapture, + color, + format, + test_threads, + skip: matches.opt_strs("skip"), + time_options, + options: Options::new().display_output(matches.opt_present("show-output")), + }; + + Some(Ok(test_opts)) +} diff --git a/src/libtest/formatters/json.rs b/src/libtest/formatters/json.rs index dcd733620bf..ff756c456da 100644 --- a/src/libtest/formatters/json.rs +++ b/src/libtest/formatters/json.rs @@ -27,7 +27,7 @@ fn write_event( ty: &str, name: &str, evt: &str, - exec_time: Option<&TestExecTime>, + exec_time: Option<&time::TestExecTime>, stdout: Option>, extra: Option<&str>, ) -> io::Result<()> { @@ -76,7 +76,7 @@ fn write_result( &mut self, desc: &TestDesc, result: &TestResult, - exec_time: Option<&TestExecTime>, + exec_time: Option<&time::TestExecTime>, stdout: &[u8], state: &ConsoleTestState, ) -> io::Result<()> { diff --git a/src/libtest/formatters/mod.rs b/src/libtest/formatters/mod.rs index dd202fb3ab6..72432cd8e3c 100644 --- a/src/libtest/formatters/mod.rs +++ b/src/libtest/formatters/mod.rs @@ -16,7 +16,7 @@ fn write_result( &mut self, desc: &TestDesc, result: &TestResult, - exec_time: Option<&TestExecTime>, + exec_time: Option<&time::TestExecTime>, stdout: &[u8], state: &ConsoleTestState, ) -> io::Result<()>; diff --git a/src/libtest/formatters/pretty.rs b/src/libtest/formatters/pretty.rs index 2935b4c99ce..84e1a44dab8 100644 --- a/src/libtest/formatters/pretty.rs +++ b/src/libtest/formatters/pretty.rs @@ -3,7 +3,7 @@ pub(crate) struct PrettyFormatter { out: OutputLocation, use_color: bool, - time_options: Option, + time_options: Option, /// Number of columns to fill when aligning names max_name_len: usize, @@ -17,7 +17,7 @@ pub fn new( use_color: bool, max_name_len: usize, is_multithreaded: bool, - time_options: Option, + time_options: Option, ) -> Self { PrettyFormatter { out, @@ -93,7 +93,7 @@ pub fn write_plain>(&mut self, s: S) -> io::Result<()> { fn write_time( &mut self, desc: &TestDesc, - exec_time: Option<&TestExecTime> + exec_time: Option<&time::TestExecTime> ) -> io::Result<()> { if let (Some(opts), Some(time)) = (self.time_options, exec_time) { let time_str = format!(" <{}>", time); @@ -194,7 +194,7 @@ fn write_result( &mut self, desc: &TestDesc, result: &TestResult, - exec_time: Option<&TestExecTime>, + exec_time: Option<&time::TestExecTime>, _: &[u8], _: &ConsoleTestState, ) -> io::Result<()> { @@ -225,7 +225,7 @@ fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> { self.write_plain(&format!( "test {} has been running for over {} seconds\n", - desc.name, TEST_WARN_TIMEOUT_S + desc.name, time::TEST_WARN_TIMEOUT_S )) } diff --git a/src/libtest/formatters/terse.rs b/src/libtest/formatters/terse.rs index 8914e7b6b56..50407d1130f 100644 --- a/src/libtest/formatters/terse.rs +++ b/src/libtest/formatters/terse.rs @@ -174,7 +174,7 @@ fn write_result( &mut self, desc: &TestDesc, result: &TestResult, - _: Option<&TestExecTime>, + _: Option<&time::TestExecTime>, _: &[u8], _: &ConsoleTestState, ) -> io::Result<()> { @@ -196,7 +196,7 @@ fn write_result( fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> { self.write_plain(&format!( "test {} has been running for over {} seconds\n", - desc.name, TEST_WARN_TIMEOUT_S + desc.name, time::TEST_WARN_TIMEOUT_S )) } diff --git a/src/libtest/helpers/concurrency.rs b/src/libtest/helpers/concurrency.rs new file mode 100644 index 00000000000..f0292c2d2c7 --- /dev/null +++ b/src/libtest/helpers/concurrency.rs @@ -0,0 +1,153 @@ +//! Helper module which helps to determine amount of threads to be used +//! during tests execution. +use std::env; + +#[cfg(any(unix, target_os = "cloudabi"))] +use libc; + +#[allow(deprecated)] +pub fn get_concurrency() -> usize { + return match env::var("RUST_TEST_THREADS") { + Ok(s) => { + let opt_n: Option = s.parse().ok(); + match opt_n { + Some(n) if n > 0 => n, + _ => panic!( + "RUST_TEST_THREADS is `{}`, should be a positive integer.", + s + ), + } + } + Err(..) => num_cpus(), + }; + + #[cfg(windows)] + #[allow(nonstandard_style)] + fn num_cpus() -> usize { + #[repr(C)] + struct SYSTEM_INFO { + wProcessorArchitecture: u16, + wReserved: u16, + dwPageSize: u32, + lpMinimumApplicationAddress: *mut u8, + lpMaximumApplicationAddress: *mut u8, + dwActiveProcessorMask: *mut u8, + dwNumberOfProcessors: u32, + dwProcessorType: u32, + dwAllocationGranularity: u32, + wProcessorLevel: u16, + wProcessorRevision: u16, + } + extern "system" { + fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32; + } + unsafe { + let mut sysinfo = std::mem::zeroed(); + GetSystemInfo(&mut sysinfo); + sysinfo.dwNumberOfProcessors as usize + } + } + + #[cfg(target_os = "vxworks")] + fn num_cpus() -> usize { + // FIXME: Implement num_cpus on vxWorks + 1 + } + + #[cfg(target_os = "redox")] + fn num_cpus() -> usize { + // FIXME: Implement num_cpus on Redox + 1 + } + + #[cfg(any( + all(target_arch = "wasm32", not(target_os = "emscripten")), + all(target_vendor = "fortanix", target_env = "sgx") + ))] + fn num_cpus() -> usize { + 1 + } + + #[cfg(any( + target_os = "android", + target_os = "cloudabi", + target_os = "emscripten", + target_os = "fuchsia", + target_os = "ios", + target_os = "linux", + target_os = "macos", + target_os = "solaris", + ))] + fn num_cpus() -> usize { + unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize } + } + + #[cfg(any( + target_os = "freebsd", + target_os = "dragonfly", + target_os = "netbsd" + ))] + fn num_cpus() -> usize { + use std::ptr; + + let mut cpus: libc::c_uint = 0; + let mut cpus_size = std::mem::size_of_val(&cpus); + + unsafe { + cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint; + } + if cpus < 1 { + let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0]; + unsafe { + libc::sysctl( + mib.as_mut_ptr(), + 2, + &mut cpus as *mut _ as *mut _, + &mut cpus_size as *mut _ as *mut _, + ptr::null_mut(), + 0, + ); + } + if cpus < 1 { + cpus = 1; + } + } + cpus as usize + } + + #[cfg(target_os = "openbsd")] + fn num_cpus() -> usize { + use std::ptr; + + let mut cpus: libc::c_uint = 0; + let mut cpus_size = std::mem::size_of_val(&cpus); + let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0]; + + unsafe { + libc::sysctl( + mib.as_mut_ptr(), + 2, + &mut cpus as *mut _ as *mut _, + &mut cpus_size as *mut _ as *mut _, + ptr::null_mut(), + 0, + ); + } + if cpus < 1 { + cpus = 1; + } + cpus as usize + } + + #[cfg(target_os = "haiku")] + fn num_cpus() -> usize { + // FIXME: implement + 1 + } + + #[cfg(target_os = "l4re")] + fn num_cpus() -> usize { + // FIXME: implement + 1 + } +} diff --git a/src/libtest/helpers/isatty.rs b/src/libtest/helpers/isatty.rs new file mode 100644 index 00000000000..638328aea18 --- /dev/null +++ b/src/libtest/helpers/isatty.rs @@ -0,0 +1,33 @@ +//! Helper module which provides a function to test +//! if stdout is a tty. + +#[cfg(any( + target_os = "cloudabi", + all(target_arch = "wasm32", not(target_os = "emscripten")), + all(target_vendor = "fortanix", target_env = "sgx") +))] +pub fn stdout_isatty() -> bool { + // FIXME: Implement isatty on SGX + false +} +#[cfg(unix)] +pub fn stdout_isatty() -> bool { + unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 } +} +#[cfg(windows)] +pub fn stdout_isatty() -> bool { + type DWORD = u32; + type BOOL = i32; + type HANDLE = *mut u8; + type LPDWORD = *mut u32; + const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD; + extern "system" { + fn GetStdHandle(which: DWORD) -> HANDLE; + fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL; + } + unsafe { + let handle = GetStdHandle(STD_OUTPUT_HANDLE); + let mut out = 0; + GetConsoleMode(handle, &mut out) != 0 + } +} \ No newline at end of file diff --git a/src/libtest/helpers/metrics.rs b/src/libtest/helpers/metrics.rs new file mode 100644 index 00000000000..f77a23e6875 --- /dev/null +++ b/src/libtest/helpers/metrics.rs @@ -0,0 +1,50 @@ +//! Benchmark metrics. +use std::collections::BTreeMap; + +#[derive(Clone, PartialEq, Debug, Copy)] +pub struct Metric { + value: f64, + noise: f64, +} + +impl Metric { + pub fn new(value: f64, noise: f64) -> Metric { + Metric { value, noise } + } +} + +#[derive(Clone, PartialEq)] +pub struct MetricMap(BTreeMap); + +impl MetricMap { + pub fn new() -> MetricMap { + MetricMap(BTreeMap::new()) + } + + /// Insert a named `value` (+/- `noise`) metric into the map. The value + /// must be non-negative. The `noise` indicates the uncertainty of the + /// metric, which doubles as the "noise range" of acceptable + /// pairwise-regressions on this named value, when comparing from one + /// metric to the next using `compare_to_old`. + /// + /// If `noise` is positive, then it means this metric is of a value + /// you want to see grow smaller, so a change larger than `noise` in the + /// positive direction represents a regression. + /// + /// If `noise` is negative, then it means this metric is of a value + /// you want to see grow larger, so a change larger than `noise` in the + /// negative direction represents a regression. + pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) { + let m = Metric { value, noise }; + self.0.insert(name.to_owned(), m); + } + + pub fn fmt_metrics(&self) -> String { + let v = self + .0 + .iter() + .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise)) + .collect::>(); + v.join(", ") + } +} diff --git a/src/libtest/helpers/mod.rs b/src/libtest/helpers/mod.rs new file mode 100644 index 00000000000..0bbe77b1c50 --- /dev/null +++ b/src/libtest/helpers/mod.rs @@ -0,0 +1,6 @@ +//! Module with common helpers not directly related to tests +//! but used in `libtest`. + +pub mod concurrency; +pub mod isatty; +pub mod metrics; diff --git a/src/libtest/lib.rs b/src/libtest/lib.rs index 4c3cbeb4acc..f7999467111 100644 --- a/src/libtest/lib.rs +++ b/src/libtest/lib.rs @@ -30,33 +30,21 @@ #![feature(termination_trait_lib)] #![feature(test)] -use getopts; -#[cfg(any(unix, target_os = "cloudabi"))] -extern crate libc; use term; pub use self::ColorConfig::*; -use self::NamePadding::*; use self::OutputLocation::*; use self::TestEvent::*; -pub use self::TestFn::*; -pub use self::TestName::*; -pub use self::TestResult::*; +pub use self::types::TestName::*; -use std::any::Any; use std::borrow::Cow; -use std::cmp; -use std::collections::BTreeMap; use std::env; -use std::fmt; use std::fs::File; use std::io; use std::io::prelude::*; use std::panic::{self, catch_unwind, AssertUnwindSafe, PanicInfo}; -use std::path::PathBuf; use std::process; use std::process::{ExitStatus, Command, Termination}; -use std::str::FromStr; use std::sync::mpsc::{channel, Sender}; use std::sync::{Arc, Mutex}; use std::thread; @@ -65,277 +53,49 @@ #[cfg(test)] mod tests; -const TEST_WARN_TIMEOUT_S: u64 = 60; const QUIET_MODE_MAX_COLUMN: usize = 100; // insert a '\n' after 100 tests in quiet mode const SECONDARY_TEST_INVOKER_VAR: &'static str = "__RUST_TEST_INVOKE"; -// Return codes for secondary process. -// Start somewhere other than 0 so we know the return code means what we think -// it means. -const TR_OK: i32 = 50; -const TR_FAILED: i32 = 51; - -/// This small module contains constants used by `report-time` option. -/// Those constants values will be used if corresponding environment variables are not set. -/// -/// To override values for unit-tests, use a constant `RUST_TEST_TIME_UNIT`, -/// To override values for integration tests, use a constant `RUST_TEST_TIME_INTEGRATION`, -/// To override values for doctests, use a constant `RUST_TEST_TIME_DOCTEST`. -/// -/// Example of the expected format is `RUST_TEST_TIME_xxx=100,200`, where 100 means -/// warn time, and 200 means critical time. -pub mod time_constants { - use std::time::Duration; - use super::TEST_WARN_TIMEOUT_S; - - /// Environment variable for overriding default threshold for unit-tests. - pub const UNIT_ENV_NAME: &str = "RUST_TEST_TIME_UNIT"; - - // Unit tests are supposed to be really quick. - pub const UNIT_WARN: Duration = Duration::from_millis(50); - pub const UNIT_CRITICAL: Duration = Duration::from_millis(100); - - /// Environment variable for overriding default threshold for unit-tests. - pub const INTEGRATION_ENV_NAME: &str = "RUST_TEST_TIME_INTEGRATION"; - - // Integration tests may have a lot of work, so they can take longer to execute. - pub const INTEGRATION_WARN: Duration = Duration::from_millis(500); - pub const INTEGRATION_CRITICAL: Duration = Duration::from_millis(1000); - - /// Environment variable for overriding default threshold for unit-tests. - pub const DOCTEST_ENV_NAME: &str = "RUST_TEST_TIME_DOCTEST"; - - // Doctests are similar to integration tests, because they can include a lot of - // initialization code. - pub const DOCTEST_WARN: Duration = INTEGRATION_WARN; - pub const DOCTEST_CRITICAL: Duration = INTEGRATION_CRITICAL; - - // Do not suppose anything about unknown tests, base limits on the - // `TEST_WARN_TIMEOUT_S` constant. - pub const UNKNOWN_WARN: Duration = Duration::from_secs(TEST_WARN_TIMEOUT_S); - pub const UNKNOWN_CRITICAL: Duration = Duration::from_secs(TEST_WARN_TIMEOUT_S * 2); -} - // to be used by rustc to compile tests in libtest pub mod test { pub use crate::{ - assert_test_result, filter_tests, parse_opts, run_test, test_main, test_main_static, - Bencher, DynTestFn, DynTestName, Metric, MetricMap, Options, RunIgnored, RunStrategy, - ShouldPanic, StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, TestName, - TestOpts, TestTimeOptions, TestType, TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk, + bench::Bencher, + cli::{parse_opts, TestOpts}, + helpers::metrics::{Metric, MetricMap}, + options::{ShouldPanic, Options, RunIgnored, RunStrategy}, + test_result::{TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk}, + time::TestTimeOptions, + types::{ + DynTestFn, DynTestName, StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, + TestName, TestType, + }, + assert_test_result, filter_tests, run_test, test_main, test_main_static, }; } +use bench::*; +use test_result::*; +use types::*; +use options::*; +use cli::*; + +use helpers::concurrency::get_concurrency; +use helpers::metrics::MetricMap; + mod formatters; pub mod stats; +mod cli; +mod helpers; +mod time; +mod types; +mod options; +mod bench; +mod test_result; + use crate::formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter}; -/// Whether to execute tests concurrently or not -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum Concurrent { - Yes, - No, -} - -/// Type of the test according to the [rust book](https://doc.rust-lang.org/cargo/guide/tests.html) -/// conventions. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -pub enum TestType { - /// Unit-tests are expected to be in the `src` folder of the crate. - UnitTest, - /// Integration-style tests are expected to be in the `tests` folder of the crate. - IntegrationTest, - /// Doctests are created by the `librustdoc` manually, so it's a different type of test. - DocTest, - /// Tests for the sources that don't follow the project layout convention - /// (e.g. tests in raw `main.rs` compiled by calling `rustc --test` directly). - Unknown, -} - -// The name of a test. By convention this follows the rules for rust -// paths; i.e., it should be a series of identifiers separated by double -// colons. This way if some test runner wants to arrange the tests -// hierarchically it may. - -#[derive(Clone, PartialEq, Eq, Hash, Debug)] -pub enum TestName { - StaticTestName(&'static str), - DynTestName(String), - AlignedTestName(Cow<'static, str>, NamePadding), -} -impl TestName { - fn as_slice(&self) -> &str { - match *self { - StaticTestName(s) => s, - DynTestName(ref s) => s, - AlignedTestName(ref s, _) => &*s, - } - } - - fn padding(&self) -> NamePadding { - match self { - &AlignedTestName(_, p) => p, - _ => PadNone, - } - } - - fn with_padding(&self, padding: NamePadding) -> TestName { - let name = match self { - &TestName::StaticTestName(name) => Cow::Borrowed(name), - &TestName::DynTestName(ref name) => Cow::Owned(name.clone()), - &TestName::AlignedTestName(ref name, _) => name.clone(), - }; - - TestName::AlignedTestName(name, padding) - } -} -impl fmt::Display for TestName { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self.as_slice(), f) - } -} - -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] -pub enum NamePadding { - PadNone, - PadOnRight, -} - -impl TestDesc { - fn padded_name(&self, column_count: usize, align: NamePadding) -> String { - let mut name = String::from(self.name.as_slice()); - let fill = column_count.saturating_sub(name.len()); - let pad = " ".repeat(fill); - match align { - PadNone => name, - PadOnRight => { - name.push_str(&pad); - name - } - } - } -} - -/// Represents a benchmark function. -pub trait TDynBenchFn: Send { - fn run(&self, harness: &mut Bencher); -} - -// A function that runs a test. If the function returns successfully, -// the test succeeds; if the function panics then the test fails. We -// may need to come up with a more clever definition of test in order -// to support isolation of tests into threads. -pub enum TestFn { - StaticTestFn(fn()), - StaticBenchFn(fn(&mut Bencher)), - DynTestFn(Box), - DynBenchFn(Box), -} - -impl TestFn { - fn padding(&self) -> NamePadding { - match *self { - StaticTestFn(..) => PadNone, - StaticBenchFn(..) => PadOnRight, - DynTestFn(..) => PadNone, - DynBenchFn(..) => PadOnRight, - } - } -} - -impl fmt::Debug for TestFn { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(match *self { - StaticTestFn(..) => "StaticTestFn(..)", - StaticBenchFn(..) => "StaticBenchFn(..)", - DynTestFn(..) => "DynTestFn(..)", - DynBenchFn(..) => "DynBenchFn(..)", - }) - } -} - -/// Manager of the benchmarking runs. -/// -/// This is fed into functions marked with `#[bench]` to allow for -/// set-up & tear-down before running a piece of code repeatedly via a -/// call to `iter`. -#[derive(Clone)] -pub struct Bencher { - mode: BenchMode, - summary: Option, - pub bytes: u64, -} - -#[derive(Clone, PartialEq, Eq)] -pub enum BenchMode { - Auto, - Single, -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -pub enum ShouldPanic { - No, - Yes, - YesWithMessage(&'static str), -} - -// The definition of a single test. A test runner will run a list of -// these. -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct TestDesc { - pub name: TestName, - pub ignore: bool, - pub should_panic: ShouldPanic, - pub allow_fail: bool, - pub test_type: TestType, -} - -#[derive(Debug)] -pub struct TestDescAndFn { - pub desc: TestDesc, - pub testfn: TestFn, -} - -#[derive(Clone, PartialEq, Debug, Copy)] -pub struct Metric { - value: f64, - noise: f64, -} - -impl Metric { - pub fn new(value: f64, noise: f64) -> Metric { - Metric { value, noise } - } -} - -/// In case we want to add other options as well, just add them in this struct. -#[derive(Copy, Clone, Debug)] -pub struct Options { - display_output: bool, - panic_abort: bool, -} - -impl Options { - pub fn new() -> Options { - Options { - display_output: false, - panic_abort: false, - } - } - - pub fn display_output(mut self, display_output: bool) -> Options { - self.display_output = display_output; - self - } - - pub fn panic_abort(mut self, panic_abort: bool) -> Options { - self.panic_abort = panic_abort; - self - } -} - // The default console test runner. It accepts the command line // arguments and a vector of test_descs. pub fn test_main(args: &[String], tests: Vec, options: Option) { @@ -440,556 +200,6 @@ pub fn assert_test_result(result: T) { ); } -#[derive(Copy, Clone, Debug)] -pub enum ColorConfig { - AutoColor, - AlwaysColor, - NeverColor, -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum OutputFormat { - Pretty, - Terse, - Json, -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum RunIgnored { - Yes, - No, - Only, -} - -/// Structure denoting time limits for test execution. -#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)] -pub struct TimeThreshold { - pub warn: Duration, - pub critical: Duration, -} - -impl TimeThreshold { - /// Creates a new `TimeThreshold` instance with provided durations. - pub fn new(warn: Duration, critical: Duration) -> Self { - Self { - warn, - critical, - } - } - - /// Attempts to create a `TimeThreshold` instance with values obtained - /// from the environment variable, and returns `None` if the variable - /// is not set. - /// Environment variable format is expected to match `\d+,\d+`. - /// - /// # Panics - /// - /// Panics if variable with provided name is set but contains inappropriate - /// value. - pub fn from_env_var(env_var_name: &str) -> Option { - let durations_str = env::var(env_var_name).ok()?; - - // Split string into 2 substrings by comma and try to parse numbers. - let mut durations = durations_str - .splitn(2, ',') - .map(|v| { - u64::from_str(v).unwrap_or_else(|_| { - panic!( - "Duration value in variable {} is expected to be a number, but got {}", - env_var_name, v - ) - }) - }); - - // Callback to be called if the environment variable has unexpected structure. - let panic_on_incorrect_value = || { - panic!( - "Duration variable {} expected to have 2 numbers separated by comma, but got {}", - env_var_name, durations_str - ); - }; - - let (warn, critical) = ( - durations.next().unwrap_or_else(panic_on_incorrect_value), - durations.next().unwrap_or_else(panic_on_incorrect_value) - ); - - if warn > critical { - panic!("Test execution warn time should be less or equal to the critical time"); - } - - Some(Self::new(Duration::from_millis(warn), Duration::from_millis(critical))) - } -} - -/// Structure with parameters for calculating test execution time. -#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)] -pub struct TestTimeOptions { - /// Denotes if the test critical execution time limit excess should be considered - /// a test failure. - pub error_on_excess: bool, - pub colored: bool, - pub unit_threshold: TimeThreshold, - pub integration_threshold: TimeThreshold, - pub doctest_threshold: TimeThreshold, -} - -impl TestTimeOptions { - pub fn new_from_env(error_on_excess: bool, colored: bool) -> Self { - let unit_threshold = - TimeThreshold::from_env_var(time_constants::UNIT_ENV_NAME) - .unwrap_or_else(Self::default_unit); - - let integration_threshold = - TimeThreshold::from_env_var(time_constants::INTEGRATION_ENV_NAME) - .unwrap_or_else(Self::default_integration); - - let doctest_threshold = - TimeThreshold::from_env_var(time_constants::DOCTEST_ENV_NAME) - .unwrap_or_else(Self::default_doctest); - - Self { - error_on_excess, - colored, - unit_threshold, - integration_threshold, - doctest_threshold, - } - } - - pub fn is_warn(&self, test: &TestDesc, exec_time: &TestExecTime) -> bool { - exec_time.0 >= self.warn_time(test) - } - - pub fn is_critical(&self, test: &TestDesc, exec_time: &TestExecTime) -> bool { - exec_time.0 >= self.critical_time(test) - } - - fn warn_time(&self, test: &TestDesc) -> Duration { - match test.test_type { - TestType::UnitTest => self.unit_threshold.warn, - TestType::IntegrationTest => self.integration_threshold.warn, - TestType::DocTest => self.doctest_threshold.warn, - TestType::Unknown => time_constants::UNKNOWN_WARN, - } - } - - fn critical_time(&self, test: &TestDesc) -> Duration { - match test.test_type { - TestType::UnitTest => self.unit_threshold.critical, - TestType::IntegrationTest => self.integration_threshold.critical, - TestType::DocTest => self.doctest_threshold.critical, - TestType::Unknown => time_constants::UNKNOWN_CRITICAL, - } - } - - fn default_unit() -> TimeThreshold { - TimeThreshold::new(time_constants::UNIT_WARN, time_constants::UNIT_CRITICAL) - } - - fn default_integration() -> TimeThreshold { - TimeThreshold::new(time_constants::INTEGRATION_WARN, time_constants::INTEGRATION_CRITICAL) - } - - fn default_doctest() -> TimeThreshold { - TimeThreshold::new(time_constants::DOCTEST_WARN, time_constants::DOCTEST_CRITICAL) - } -} - -#[derive(Debug)] -pub struct TestOpts { - pub list: bool, - pub filter: Option, - pub filter_exact: bool, - pub exclude_should_panic: bool, - pub run_ignored: RunIgnored, - pub run_tests: bool, - pub bench_benchmarks: bool, - pub logfile: Option, - pub nocapture: bool, - pub color: ColorConfig, - pub format: OutputFormat, - pub test_threads: Option, - pub skip: Vec, - pub time_options: Option, - pub options: Options, -} - -/// Result of parsing the options. -pub type OptRes = Result; -/// Result of parsing the option part. -type OptPartRes = Result, String>; - -fn optgroups() -> getopts::Options { - let mut opts = getopts::Options::new(); - opts.optflag("", "include-ignored", "Run ignored and not ignored tests") - .optflag("", "ignored", "Run only ignored tests") - .optflag("", "exclude-should-panic", "Excludes tests marked as should_panic") - .optflag("", "test", "Run tests and not benchmarks") - .optflag("", "bench", "Run benchmarks instead of tests") - .optflag("", "list", "List all tests and benchmarks") - .optflag("h", "help", "Display this message (longer with --help)") - .optopt( - "", - "logfile", - "Write logs to the specified file instead \ - of stdout", - "PATH", - ) - .optflag( - "", - "nocapture", - "don't capture stdout/stderr of each \ - task, allow printing directly", - ) - .optopt( - "", - "test-threads", - "Number of threads used for running tests \ - in parallel", - "n_threads", - ) - .optmulti( - "", - "skip", - "Skip tests whose names contain FILTER (this flag can \ - be used multiple times)", - "FILTER", - ) - .optflag( - "q", - "quiet", - "Display one character per test instead of one line. \ - Alias to --format=terse", - ) - .optflag( - "", - "exact", - "Exactly match filters rather than by substring", - ) - .optopt( - "", - "color", - "Configure coloring of output: - auto = colorize if stdout is a tty and tests are run on serially (default); - always = always colorize output; - never = never colorize output;", - "auto|always|never", - ) - .optopt( - "", - "format", - "Configure formatting of output: - pretty = Print verbose output; - terse = Display one character per test; - json = Output a json document", - "pretty|terse|json", - ) - .optflag( - "", - "show-output", - "Show captured stdout of successful tests" - ) - .optopt( - "Z", - "", - "Enable nightly-only flags: - unstable-options = Allow use of experimental features", - "unstable-options", - ) - .optflagopt( - "", - "report-time", - "Show execution time of each test. Awailable values: - plain = do not colorize the execution time (default); - colored = colorize output according to the `color` parameter value; - - Threshold values for colorized output can be configured via - `RUST_TEST_TIME_UNIT`, `RUST_TEST_TIME_INTEGRATION` and - `RUST_TEST_TIME_DOCTEST` environment variables. - - Expected format of environment variable is `VARIABLE=WARN_TIME,CRITICAL_TIME`. - - Not available for --format=terse", - "plain|colored" - ) - .optflag( - "", - "ensure-time", - "Treat excess of the test execution time limit as error. - - Threshold values for this option can be configured via - `RUST_TEST_TIME_UNIT`, `RUST_TEST_TIME_INTEGRATION` and - `RUST_TEST_TIME_DOCTEST` environment variables. - - Expected format of environment variable is `VARIABLE=WARN_TIME,CRITICAL_TIME`. - - `CRITICAL_TIME` here means the limit that should not be exceeded by test. - " - ); - return opts; -} - -fn usage(binary: &str, options: &getopts::Options) { - let message = format!("Usage: {} [OPTIONS] [FILTER]", binary); - println!( - r#"{usage} - -The FILTER string is tested against the name of all tests, and only those -tests whose names contain the filter are run. - -By default, all tests are run in parallel. This can be altered with the ---test-threads flag or the RUST_TEST_THREADS environment variable when running -tests (set it to 1). - -All tests have their standard output and standard error captured by default. -This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE -environment variable to a value other than "0". Logging is not captured by default. - -Test Attributes: - - `#[test]` - Indicates a function is a test to be run. This function - takes no arguments. - `#[bench]` - Indicates a function is a benchmark to be run. This - function takes one argument (test::Bencher). - `#[should_panic]` - This function (also labeled with `#[test]`) will only pass if - the code causes a panic (an assertion failure or panic!) - A message may be provided, which the failure string must - contain: #[should_panic(expected = "foo")]. - `#[ignore]` - When applied to a function which is already attributed as a - test, then the test runner will ignore these tests during - normal test runs. Running with --ignored or --include-ignored will run - these tests."#, - usage = options.usage(&message) - ); -} - -// FIXME: Copied from libsyntax until linkage errors are resolved. Issue #47566 -fn is_nightly() -> bool { - // Whether this is a feature-staged build, i.e., on the beta or stable channel - let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some(); - // Whether we should enable unstable features for bootstrapping - let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok(); - - bootstrap || !disable_unstable_features -} - -// Gets the option value and checks if unstable features are enabled. -macro_rules! unstable_optflag { - ($matches:ident, $allow_unstable:ident, $option_name:literal) => {{ - let opt = $matches.opt_present($option_name); - if !$allow_unstable && opt { - return Some(Err(format!( - "The \"{}\" flag is only accepted on the nightly compiler", - $option_name - ))); - } - - opt - }}; -} - -// Gets the CLI options assotiated with `report-time` feature. -fn get_time_options( - matches: &getopts::Matches, - allow_unstable: bool) --> Option> { - let report_time = unstable_optflag!(matches, allow_unstable, "report-time"); - let colored_opt_str = matches.opt_str("report-time"); - let mut report_time_colored = report_time && colored_opt_str == Some("colored".into()); - let ensure_test_time = unstable_optflag!(matches, allow_unstable, "ensure-time"); - - // If `ensure-test-time` option is provided, time output is enforced, - // so user won't be confused if any of tests will silently fail. - let options = if report_time || ensure_test_time { - if ensure_test_time && !report_time { - report_time_colored = true; - } - Some(TestTimeOptions::new_from_env(ensure_test_time, report_time_colored)) - } else { - None - }; - - Some(Ok(options)) -} - -// Parses command line arguments into test options -pub fn parse_opts(args: &[String]) -> Option { - let mut allow_unstable = false; - let opts = optgroups(); - let args = args.get(1..).unwrap_or(args); - let matches = match opts.parse(args) { - Ok(m) => m, - Err(f) => return Some(Err(f.to_string())), - }; - - if let Some(opt) = matches.opt_str("Z") { - if !is_nightly() { - return Some(Err( - "the option `Z` is only accepted on the nightly compiler".into(), - )); - } - - match &*opt { - "unstable-options" => { - allow_unstable = true; - } - _ => { - return Some(Err("Unrecognized option to `Z`".into())); - } - } - }; - - if matches.opt_present("h") { - usage(&args[0], &opts); - return None; - } - - let filter = if !matches.free.is_empty() { - Some(matches.free[0].clone()) - } else { - None - }; - - let exclude_should_panic = unstable_optflag!(matches, allow_unstable, "exclude-should-panic"); - - let include_ignored = unstable_optflag!(matches, allow_unstable, "include-ignored"); - - let run_ignored = match (include_ignored, matches.opt_present("ignored")) { - (true, true) => { - return Some(Err( - "the options --include-ignored and --ignored are mutually exclusive".into(), - )); - } - (true, false) => RunIgnored::Yes, - (false, true) => RunIgnored::Only, - (false, false) => RunIgnored::No, - }; - let quiet = matches.opt_present("quiet"); - let exact = matches.opt_present("exact"); - let list = matches.opt_present("list"); - - let logfile = matches.opt_str("logfile"); - let logfile = logfile.map(|s| PathBuf::from(&s)); - - let bench_benchmarks = matches.opt_present("bench"); - let run_tests = !bench_benchmarks || matches.opt_present("test"); - - let mut nocapture = matches.opt_present("nocapture"); - if !nocapture { - nocapture = match env::var("RUST_TEST_NOCAPTURE") { - Ok(val) => &val != "0", - Err(_) => false, - }; - } - - let time_options = match get_time_options(&matches, allow_unstable) { - Some(Ok(val)) => val, - Some(Err(e)) => return Some(Err(e)), - None => panic!("Unexpected output from `get_time_options`"), - }; - - let test_threads = match matches.opt_str("test-threads") { - Some(n_str) => match n_str.parse::() { - Ok(0) => return Some(Err("argument for --test-threads must not be 0".to_string())), - Ok(n) => Some(n), - Err(e) => { - return Some(Err(format!( - "argument for --test-threads must be a number > 0 \ - (error: {})", - e - ))); - } - }, - None => None, - }; - - let color = match matches.opt_str("color").as_ref().map(|s| &**s) { - Some("auto") | None => AutoColor, - Some("always") => AlwaysColor, - Some("never") => NeverColor, - - Some(v) => { - return Some(Err(format!( - "argument for --color must be auto, always, or never (was \ - {})", - v - ))); - } - }; - - let format = match matches.opt_str("format").as_ref().map(|s| &**s) { - None if quiet => OutputFormat::Terse, - Some("pretty") | None => OutputFormat::Pretty, - Some("terse") => OutputFormat::Terse, - Some("json") => { - if !allow_unstable { - return Some(Err( - "The \"json\" format is only accepted on the nightly compiler".into(), - )); - } - OutputFormat::Json - } - - Some(v) => { - return Some(Err(format!( - "argument for --format must be pretty, terse, or json (was \ - {})", - v - ))); - } - }; - - let test_opts = TestOpts { - list, - filter, - filter_exact: exact, - exclude_should_panic, - run_ignored, - run_tests, - bench_benchmarks, - logfile, - nocapture, - color, - format, - test_threads, - skip: matches.opt_strs("skip"), - time_options, - options: Options::new().display_output(matches.opt_present("show-output")), - }; - - Some(Ok(test_opts)) -} - -#[derive(Debug, Clone, PartialEq)] -pub struct BenchSamples { - ns_iter_summ: stats::Summary, - mb_s: usize, -} - -#[derive(Debug, Clone, PartialEq)] -pub enum TestResult { - TrOk, - TrFailed, - TrFailedMsg(String), - TrIgnored, - TrAllowedFail, - TrBench(BenchSamples), - TrTimedFail, -} - -unsafe impl Send for TestResult {} - -/// The meassured execution time of a unit test. -#[derive(Clone, PartialEq)] -pub struct TestExecTime(Duration); - -impl fmt::Display for TestExecTime { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:.3}s", self.0.as_secs_f64()) - } -} - enum OutputLocation { Pretty(Box), Raw(T), @@ -1071,7 +281,7 @@ pub fn write_log( pub fn write_log_result(&mut self,test: &TestDesc, result: &TestResult, - exec_time: Option<&TestExecTime>, + exec_time: Option<&time::TestExecTime>, ) -> io::Result<()> { self.write_log(|| format!( "{} {}", @@ -1097,52 +307,6 @@ fn current_test_count(&self) -> usize { } } -// Format a number with thousands separators -fn fmt_thousands_sep(mut n: usize, sep: char) -> String { - use std::fmt::Write; - let mut output = String::new(); - let mut trailing = false; - for &pow in &[9, 6, 3, 0] { - let base = 10_usize.pow(pow); - if pow == 0 || trailing || n / base != 0 { - if !trailing { - output.write_fmt(format_args!("{}", n / base)).unwrap(); - } else { - output.write_fmt(format_args!("{:03}", n / base)).unwrap(); - } - if pow != 0 { - output.push(sep); - } - trailing = true; - } - n %= base; - } - - output -} - -pub fn fmt_bench_samples(bs: &BenchSamples) -> String { - use std::fmt::Write; - let mut output = String::new(); - - let median = bs.ns_iter_summ.median as usize; - let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize; - - output - .write_fmt(format_args!( - "{:>11} ns/iter (+/- {})", - fmt_thousands_sep(median, ','), - fmt_thousands_sep(deviation, ',') - )) - .unwrap(); - if bs.mb_s != 0 { - output - .write_fmt(format_args!(" = {} MB/s", bs.mb_s)) - .unwrap(); - } - output -} - // List the tests to console, and optionally to logfile. Filters are honored. pub fn list_tests_console(opts: &TestOpts, tests: Vec) -> io::Result<()> { let mut output = match term::stdout() { @@ -1271,14 +435,14 @@ fn callback( let mut out: Box = match opts.format { OutputFormat::Pretty => Box::new(PrettyFormatter::new( output, - use_color(opts), + opts.use_color(), max_name_len, is_multithreaded, opts.time_options, )), OutputFormat::Terse => Box::new(TerseFormatter::new( output, - use_color(opts), + opts.use_color(), max_name_len, is_multithreaded, )), @@ -1299,55 +463,16 @@ fn len_if_padded(t: &TestDescAndFn) -> usize { return out.write_run_finish(&st); } -fn use_color(opts: &TestOpts) -> bool { - match opts.color { - AutoColor => !opts.nocapture && stdout_isatty(), - AlwaysColor => true, - NeverColor => false, - } -} - -#[cfg(any( - target_os = "cloudabi", - all(target_arch = "wasm32", not(target_os = "emscripten")), - all(target_vendor = "fortanix", target_env = "sgx") -))] -fn stdout_isatty() -> bool { - // FIXME: Implement isatty on SGX - false -} -#[cfg(unix)] -fn stdout_isatty() -> bool { - unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 } -} -#[cfg(windows)] -fn stdout_isatty() -> bool { - type DWORD = u32; - type BOOL = i32; - type HANDLE = *mut u8; - type LPDWORD = *mut u32; - const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD; - extern "system" { - fn GetStdHandle(which: DWORD) -> HANDLE; - fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL; - } - unsafe { - let handle = GetStdHandle(STD_OUTPUT_HANDLE); - let mut out = 0; - GetConsoleMode(handle, &mut out) != 0 - } -} - #[derive(Clone)] pub enum TestEvent { TeFiltered(Vec), TeWait(TestDesc), - TeResult(TestDesc, TestResult, Option, Vec), + TeResult(TestDesc, TestResult, Option, Vec), TeTimeout(TestDesc), TeFilteredOut(usize), } -pub type MonitorMsg = (TestDesc, TestResult, Option, Vec); +pub type MonitorMsg = (TestDesc, TestResult, Option, Vec); struct Sink(Arc>>); impl Write for Sink { @@ -1359,18 +484,6 @@ fn flush(&mut self) -> io::Result<()> { } } -#[derive(Clone, Copy)] -pub enum RunStrategy { - /// Runs the test in the current process, and sends the result back over the - /// supplied channel. - InProcess, - - /// Spawns a subprocess to run the test, and sends the result back over the - /// supplied channel. Requires `argv[0]` to exist and point to the binary - /// that's currently running. - SpawnPrimary, -} - pub fn run_tests(opts: &TestOpts, tests: Vec, mut callback: F) -> io::Result<()> where F: FnMut(TestEvent) -> io::Result<()>, @@ -1467,7 +580,7 @@ fn calc_timeout(running_tests: &TestMap) -> Option { while pending > 0 || !remaining.is_empty() { while pending < concurrency && !remaining.is_empty() { let test = remaining.pop().unwrap(); - let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S); + let timeout = time::get_default_test_timeout(); running_tests.insert(test.desc.clone(), timeout); callback(TeWait(test.desc.clone()))?; //here no pad run_test(opts, !opts.run_tests, test, run_strategy, tx.clone(), Concurrent::Yes); @@ -1510,153 +623,6 @@ fn calc_timeout(running_tests: &TestMap) -> Option { Ok(()) } -#[allow(deprecated)] -fn get_concurrency() -> usize { - return match env::var("RUST_TEST_THREADS") { - Ok(s) => { - let opt_n: Option = s.parse().ok(); - match opt_n { - Some(n) if n > 0 => n, - _ => panic!( - "RUST_TEST_THREADS is `{}`, should be a positive integer.", - s - ), - } - } - Err(..) => num_cpus(), - }; - - #[cfg(windows)] - #[allow(nonstandard_style)] - fn num_cpus() -> usize { - #[repr(C)] - struct SYSTEM_INFO { - wProcessorArchitecture: u16, - wReserved: u16, - dwPageSize: u32, - lpMinimumApplicationAddress: *mut u8, - lpMaximumApplicationAddress: *mut u8, - dwActiveProcessorMask: *mut u8, - dwNumberOfProcessors: u32, - dwProcessorType: u32, - dwAllocationGranularity: u32, - wProcessorLevel: u16, - wProcessorRevision: u16, - } - extern "system" { - fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32; - } - unsafe { - let mut sysinfo = std::mem::zeroed(); - GetSystemInfo(&mut sysinfo); - sysinfo.dwNumberOfProcessors as usize - } - } - - #[cfg(target_os = "vxworks")] - fn num_cpus() -> usize { - // FIXME: Implement num_cpus on vxWorks - 1 - } - - #[cfg(target_os = "redox")] - fn num_cpus() -> usize { - // FIXME: Implement num_cpus on Redox - 1 - } - - #[cfg(any( - all(target_arch = "wasm32", not(target_os = "emscripten")), - all(target_vendor = "fortanix", target_env = "sgx") - ))] - fn num_cpus() -> usize { - 1 - } - - #[cfg(any( - target_os = "android", - target_os = "cloudabi", - target_os = "emscripten", - target_os = "fuchsia", - target_os = "ios", - target_os = "linux", - target_os = "macos", - target_os = "solaris", - ))] - fn num_cpus() -> usize { - unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize } - } - - #[cfg(any( - target_os = "freebsd", - target_os = "dragonfly", - target_os = "netbsd" - ))] - fn num_cpus() -> usize { - use std::ptr; - - let mut cpus: libc::c_uint = 0; - let mut cpus_size = std::mem::size_of_val(&cpus); - - unsafe { - cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint; - } - if cpus < 1 { - let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0]; - unsafe { - libc::sysctl( - mib.as_mut_ptr(), - 2, - &mut cpus as *mut _ as *mut _, - &mut cpus_size as *mut _ as *mut _, - ptr::null_mut(), - 0, - ); - } - if cpus < 1 { - cpus = 1; - } - } - cpus as usize - } - - #[cfg(target_os = "openbsd")] - fn num_cpus() -> usize { - use std::ptr; - - let mut cpus: libc::c_uint = 0; - let mut cpus_size = std::mem::size_of_val(&cpus); - let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0]; - - unsafe { - libc::sysctl( - mib.as_mut_ptr(), - 2, - &mut cpus as *mut _ as *mut _, - &mut cpus_size as *mut _ as *mut _, - ptr::null_mut(), - 0, - ); - } - if cpus < 1 { - cpus = 1; - } - cpus as usize - } - - #[cfg(target_os = "haiku")] - fn num_cpus() -> usize { - // FIXME: implement - 1 - } - - #[cfg(target_os = "l4re")] - fn num_cpus() -> usize { - // FIXME: implement - 1 - } -} - pub fn filter_tests(opts: &TestOpts, tests: Vec) -> Vec { let mut filtered = tests; let matches_filter = |test: &TestDescAndFn, filter: &str| { @@ -1748,7 +714,7 @@ struct TestRunOpts { pub strategy: RunStrategy, pub nocapture: bool, pub concurrency: Concurrent, - pub time: Option, + pub time: Option, } fn run_test_inner( @@ -1835,86 +801,13 @@ fn __rust_begin_short_backtrace(f: F) { f() } -fn calc_result<'a>( - desc: &TestDesc, - task_result: Result<(), &'a (dyn Any + 'static + Send)>, - time_opts: &Option, - exec_time: &Option -) -> TestResult { - let result = match (&desc.should_panic, task_result) { - (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TrOk, - (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => { - if err - .downcast_ref::() - .map(|e| &**e) - .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e)) - .map(|e| e.contains(msg)) - .unwrap_or(false) - { - TrOk - } else { - if desc.allow_fail { - TrAllowedFail - } else { - TrFailedMsg(format!("panic did not include expected string '{}'", msg)) - } - } - } - (&ShouldPanic::Yes, Ok(())) => TrFailedMsg("test did not panic as expected".to_string()), - _ if desc.allow_fail => TrAllowedFail, - _ => TrFailed, - }; - - // If test is already failed (or allowed to fail), do not change the result. - if result != TrOk { - return result; - } - - // Check if test is failed due to timeout. - if let (Some(opts), Some(time)) = (time_opts, exec_time) { - if opts.error_on_excess && opts.is_critical(desc, time) { - return TrTimedFail; - } - } - - result -} - -fn get_result_from_exit_code( - desc: &TestDesc, - code: i32, - time_opts: &Option, - exec_time: &Option, -) -> TestResult { - let result = match (desc.allow_fail, code) { - (_, TR_OK) => TrOk, - (true, TR_FAILED) => TrAllowedFail, - (false, TR_FAILED) => TrFailed, - (_, _) => TrFailedMsg(format!("got unexpected return code {}", code)), - }; - - // If test is already failed (or allowed to fail), do not change the result. - if result != TrOk { - return result; - } - - // Check if test is failed due to timeout. - if let (Some(opts), Some(time)) = (time_opts, exec_time) { - if opts.error_on_excess && opts.is_critical(desc, time) { - return TrTimedFail; - } - } - - result -} - fn run_test_in_process( desc: TestDesc, nocapture: bool, report_time: bool, testfn: Box, monitor_ch: Sender, - time_opts: Option, + time_opts: Option, ) { // Buffer for capturing standard I/O let data = Arc::new(Mutex::new(Vec::new())); @@ -1936,7 +829,7 @@ fn run_test_in_process( let result = catch_unwind(AssertUnwindSafe(testfn)); let exec_time = start.map(|start| { let duration = start.elapsed(); - TestExecTime(duration) + time::TestExecTime(duration) }); if let Some((printio, panicio)) = oldio { @@ -1956,7 +849,7 @@ fn spawn_test_subprocess( desc: TestDesc, report_time: bool, monitor_ch: Sender, - time_opts: Option, + time_opts: Option, ) { let (result, test_output, exec_time) = (|| { let args = env::args().collect::>(); @@ -1978,7 +871,7 @@ fn spawn_test_subprocess( }; let exec_time = start.map(|start| { let duration = start.elapsed(); - TestExecTime(duration) + time::TestExecTime(duration) }); let std::process::Output { stdout, stderr, status } = output; @@ -2025,9 +918,9 @@ fn run_test_in_spawned_subprocess( } if let TrOk = test_result { - process::exit(TR_OK); + process::exit(test_result::TR_OK); } else { - process::exit(TR_FAILED); + process::exit(test_result::TR_FAILED); } }); let record_result2 = record_result.clone(); @@ -2053,229 +946,3 @@ fn get_exit_code(status: ExitStatus) -> Result { } } } - -#[derive(Clone, PartialEq)] -pub struct MetricMap(BTreeMap); - -impl MetricMap { - pub fn new() -> MetricMap { - MetricMap(BTreeMap::new()) - } - - /// Insert a named `value` (+/- `noise`) metric into the map. The value - /// must be non-negative. The `noise` indicates the uncertainty of the - /// metric, which doubles as the "noise range" of acceptable - /// pairwise-regressions on this named value, when comparing from one - /// metric to the next using `compare_to_old`. - /// - /// If `noise` is positive, then it means this metric is of a value - /// you want to see grow smaller, so a change larger than `noise` in the - /// positive direction represents a regression. - /// - /// If `noise` is negative, then it means this metric is of a value - /// you want to see grow larger, so a change larger than `noise` in the - /// negative direction represents a regression. - pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) { - let m = Metric { value, noise }; - self.0.insert(name.to_owned(), m); - } - - pub fn fmt_metrics(&self) -> String { - let v = self - .0 - .iter() - .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise)) - .collect::>(); - v.join(", ") - } -} - -// Benchmarking - -pub use std::hint::black_box; - -impl Bencher { - /// Callback for benchmark functions to run in their body. - pub fn iter(&mut self, mut inner: F) - where - F: FnMut() -> T, - { - if self.mode == BenchMode::Single { - ns_iter_inner(&mut inner, 1); - return; - } - - self.summary = Some(iter(&mut inner)); - } - - pub fn bench(&mut self, mut f: F) -> Option - where - F: FnMut(&mut Bencher), - { - f(self); - return self.summary; - } -} - -fn ns_from_dur(dur: Duration) -> u64 { - dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64) -} - -fn ns_iter_inner(inner: &mut F, k: u64) -> u64 -where - F: FnMut() -> T, -{ - let start = Instant::now(); - for _ in 0..k { - black_box(inner()); - } - return ns_from_dur(start.elapsed()); -} - -pub fn iter(inner: &mut F) -> stats::Summary -where - F: FnMut() -> T, -{ - // Initial bench run to get ballpark figure. - let ns_single = ns_iter_inner(inner, 1); - - // Try to estimate iter count for 1ms falling back to 1m - // iterations if first run took < 1ns. - let ns_target_total = 1_000_000; // 1ms - let mut n = ns_target_total / cmp::max(1, ns_single); - - // if the first run took more than 1ms we don't want to just - // be left doing 0 iterations on every loop. The unfortunate - // side effect of not being able to do as many runs is - // automatically handled by the statistical analysis below - // (i.e., larger error bars). - n = cmp::max(1, n); - - let mut total_run = Duration::new(0, 0); - let samples: &mut [f64] = &mut [0.0_f64; 50]; - loop { - let loop_start = Instant::now(); - - for p in &mut *samples { - *p = ns_iter_inner(inner, n) as f64 / n as f64; - } - - stats::winsorize(samples, 5.0); - let summ = stats::Summary::new(samples); - - for p in &mut *samples { - let ns = ns_iter_inner(inner, 5 * n); - *p = ns as f64 / (5 * n) as f64; - } - - stats::winsorize(samples, 5.0); - let summ5 = stats::Summary::new(samples); - - let loop_run = loop_start.elapsed(); - - // If we've run for 100ms and seem to have converged to a - // stable median. - if loop_run > Duration::from_millis(100) - && summ.median_abs_dev_pct < 1.0 - && summ.median - summ5.median < summ5.median_abs_dev - { - return summ5; - } - - total_run = total_run + loop_run; - // Longest we ever run for is 3s. - if total_run > Duration::from_secs(3) { - return summ5; - } - - // If we overflow here just return the results so far. We check a - // multiplier of 10 because we're about to multiply by 2 and the - // next iteration of the loop will also multiply by 5 (to calculate - // the summ5 result) - n = match n.checked_mul(10) { - Some(_) => n * 2, - None => { - return summ5; - } - }; - } -} - -pub mod bench { - use super::{ - BenchMode, BenchSamples, Bencher, MonitorMsg, Sender, Sink, TestDesc, TestResult - }; - use crate::stats; - use std::cmp; - use std::io; - use std::panic::{catch_unwind, AssertUnwindSafe}; - use std::sync::{Arc, Mutex}; - - pub fn benchmark(desc: TestDesc, monitor_ch: Sender, nocapture: bool, f: F) - where - F: FnMut(&mut Bencher), - { - let mut bs = Bencher { - mode: BenchMode::Auto, - summary: None, - bytes: 0, - }; - - let data = Arc::new(Mutex::new(Vec::new())); - let oldio = if !nocapture { - Some(( - io::set_print(Some(Box::new(Sink(data.clone())))), - io::set_panic(Some(Box::new(Sink(data.clone())))), - )) - } else { - None - }; - - let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f))); - - if let Some((printio, panicio)) = oldio { - io::set_print(printio); - io::set_panic(panicio); - } - - let test_result = match result { - //bs.bench(f) { - Ok(Some(ns_iter_summ)) => { - let ns_iter = cmp::max(ns_iter_summ.median as u64, 1); - let mb_s = bs.bytes * 1000 / ns_iter; - - let bs = BenchSamples { - ns_iter_summ, - mb_s: mb_s as usize, - }; - TestResult::TrBench(bs) - } - Ok(None) => { - // iter not called, so no data. - // FIXME: error in this case? - let samples: &mut [f64] = &mut [0.0_f64; 1]; - let bs = BenchSamples { - ns_iter_summ: stats::Summary::new(samples), - mb_s: 0, - }; - TestResult::TrBench(bs) - } - Err(_) => TestResult::TrFailed, - }; - - let stdout = data.lock().unwrap().to_vec(); - monitor_ch.send((desc, test_result, None, stdout)).unwrap(); - } - - pub fn run_once(f: F) - where - F: FnMut(&mut Bencher), - { - let mut bs = Bencher { - mode: BenchMode::Single, - summary: None, - bytes: 0, - }; - bs.bench(f); - } -} diff --git a/src/libtest/options.rs b/src/libtest/options.rs new file mode 100644 index 00000000000..0a604cae0ca --- /dev/null +++ b/src/libtest/options.rs @@ -0,0 +1,80 @@ +//! Enums denoting options for test execution. + +/// Whether to execute tests concurrently or not +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum Concurrent { + Yes, + No, +} + +#[derive(Clone, PartialEq, Eq)] +pub enum BenchMode { + Auto, + Single, +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub enum ShouldPanic { + No, + Yes, + YesWithMessage(&'static str), +} + +#[derive(Copy, Clone, Debug)] +pub enum ColorConfig { + AutoColor, + AlwaysColor, + NeverColor, +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum OutputFormat { + Pretty, + Terse, + Json, +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum RunIgnored { + Yes, + No, + Only, +} + +#[derive(Clone, Copy)] +pub enum RunStrategy { + /// Runs the test in the current process, and sends the result back over the + /// supplied channel. + InProcess, + + /// Spawns a subprocess to run the test, and sends the result back over the + /// supplied channel. Requires `argv[0]` to exist and point to the binary + /// that's currently running. + SpawnPrimary, +} + +/// In case we want to add other options as well, just add them in this struct. +#[derive(Copy, Clone, Debug)] +pub struct Options { + pub display_output: bool, + pub panic_abort: bool, +} + +impl Options { + pub fn new() -> Options { + Options { + display_output: false, + panic_abort: false, + } + } + + pub fn display_output(mut self, display_output: bool) -> Options { + self.display_output = display_output; + self + } + + pub fn panic_abort(mut self, panic_abort: bool) -> Options { + self.panic_abort = panic_abort; + self + } +} diff --git a/src/libtest/test_result.rs b/src/libtest/test_result.rs new file mode 100644 index 00000000000..4eb3f93e2a4 --- /dev/null +++ b/src/libtest/test_result.rs @@ -0,0 +1,102 @@ + +use std::any::Any; + +use super::bench::BenchSamples; +use super::time; +use super::types::TestDesc; +use super::options::ShouldPanic; + +pub use self::TestResult::*; + +// Return codes for secondary process. +// Start somewhere other than 0 so we know the return code means what we think +// it means. +pub const TR_OK: i32 = 50; +pub const TR_FAILED: i32 = 51; + +#[derive(Debug, Clone, PartialEq)] +pub enum TestResult { + TrOk, + TrFailed, + TrFailedMsg(String), + TrIgnored, + TrAllowedFail, + TrBench(BenchSamples), + TrTimedFail, +} + +unsafe impl Send for TestResult {} + + +pub fn calc_result<'a>( + desc: &TestDesc, + task_result: Result<(), &'a (dyn Any + 'static + Send)>, + time_opts: &Option, + exec_time: &Option +) -> TestResult { + let result = match (&desc.should_panic, task_result) { + (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TestResult::TrOk, + (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => { + if err + .downcast_ref::() + .map(|e| &**e) + .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e)) + .map(|e| e.contains(msg)) + .unwrap_or(false) + { + TestResult::TrOk + } else { + if desc.allow_fail { + TestResult::TrAllowedFail + } else { + TestResult::TrFailedMsg(format!("panic did not include expected string '{}'", msg)) + } + } + } + (&ShouldPanic::Yes, Ok(())) => TestResult::TrFailedMsg("test did not panic as expected".to_string()), + _ if desc.allow_fail => TestResult::TrAllowedFail, + _ => TestResult::TrFailed, + }; + + // If test is already failed (or allowed to fail), do not change the result. + if result != TestResult::TrOk { + return result; + } + + // Check if test is failed due to timeout. + if let (Some(opts), Some(time)) = (time_opts, exec_time) { + if opts.error_on_excess && opts.is_critical(desc, time) { + return TestResult::TrTimedFail; + } + } + + result +} + +pub fn get_result_from_exit_code( + desc: &TestDesc, + code: i32, + time_opts: &Option, + exec_time: &Option, +) -> TestResult { + let result = match (desc.allow_fail, code) { + (_, TR_OK) => TestResult::TrOk, + (true, TR_FAILED) => TestResult::TrAllowedFail, + (false, TR_FAILED) => TestResult::TrFailed, + (_, _) => TestResult::TrFailedMsg(format!("got unexpected return code {}", code)), + }; + + // If test is already failed (or allowed to fail), do not change the result. + if result != TestResult::TrOk { + return result; + } + + // Check if test is failed due to timeout. + if let (Some(opts), Some(time)) = (time_opts, exec_time) { + if opts.error_on_excess && opts.is_critical(desc, time) { + return TestResult::TrTimedFail; + } + } + + result +} diff --git a/src/libtest/time.rs b/src/libtest/time.rs new file mode 100644 index 00000000000..b7ce764505b --- /dev/null +++ b/src/libtest/time.rs @@ -0,0 +1,206 @@ +//! Module `time` contains everything related to the time measurement of unit tests +//! execution. +//! Two main purposes of this module: +//! - Check whether test is timed out. +//! - Provide helpers for `report-time` and `measure-time` options. + +use std::time::{Duration, Instant}; +use std::str::FromStr; +use std::fmt; +use std::env; + +use super::types::{TestDesc, TestType}; + +pub const TEST_WARN_TIMEOUT_S: u64 = 60; + +/// This small module contains constants used by `report-time` option. +/// Those constants values will be used if corresponding environment variables are not set. +/// +/// To override values for unit-tests, use a constant `RUST_TEST_TIME_UNIT`, +/// To override values for integration tests, use a constant `RUST_TEST_TIME_INTEGRATION`, +/// To override values for doctests, use a constant `RUST_TEST_TIME_DOCTEST`. +/// +/// Example of the expected format is `RUST_TEST_TIME_xxx=100,200`, where 100 means +/// warn time, and 200 means critical time. +pub mod time_constants { + use std::time::Duration; + use super::TEST_WARN_TIMEOUT_S; + + /// Environment variable for overriding default threshold for unit-tests. + pub const UNIT_ENV_NAME: &str = "RUST_TEST_TIME_UNIT"; + + // Unit tests are supposed to be really quick. + pub const UNIT_WARN: Duration = Duration::from_millis(50); + pub const UNIT_CRITICAL: Duration = Duration::from_millis(100); + + /// Environment variable for overriding default threshold for unit-tests. + pub const INTEGRATION_ENV_NAME: &str = "RUST_TEST_TIME_INTEGRATION"; + + // Integration tests may have a lot of work, so they can take longer to execute. + pub const INTEGRATION_WARN: Duration = Duration::from_millis(500); + pub const INTEGRATION_CRITICAL: Duration = Duration::from_millis(1000); + + /// Environment variable for overriding default threshold for unit-tests. + pub const DOCTEST_ENV_NAME: &str = "RUST_TEST_TIME_DOCTEST"; + + // Doctests are similar to integration tests, because they can include a lot of + // initialization code. + pub const DOCTEST_WARN: Duration = INTEGRATION_WARN; + pub const DOCTEST_CRITICAL: Duration = INTEGRATION_CRITICAL; + + // Do not suppose anything about unknown tests, base limits on the + // `TEST_WARN_TIMEOUT_S` constant. + pub const UNKNOWN_WARN: Duration = Duration::from_secs(TEST_WARN_TIMEOUT_S); + pub const UNKNOWN_CRITICAL: Duration = Duration::from_secs(TEST_WARN_TIMEOUT_S * 2); +} + +/// Returns an `Instance` object denoting when the test should be considered +/// timed out. +pub fn get_default_test_timeout() -> Instant { + Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S) +} + +/// The meassured execution time of a unit test. +#[derive(Clone, PartialEq)] +pub struct TestExecTime(pub Duration); + +impl fmt::Display for TestExecTime { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:.3}s", self.0.as_secs_f64()) + } +} + +/// Structure denoting time limits for test execution. +#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)] +pub struct TimeThreshold { + pub warn: Duration, + pub critical: Duration, +} + +impl TimeThreshold { + /// Creates a new `TimeThreshold` instance with provided durations. + pub fn new(warn: Duration, critical: Duration) -> Self { + Self { + warn, + critical, + } + } + + /// Attempts to create a `TimeThreshold` instance with values obtained + /// from the environment variable, and returns `None` if the variable + /// is not set. + /// Environment variable format is expected to match `\d+,\d+`. + /// + /// # Panics + /// + /// Panics if variable with provided name is set but contains inappropriate + /// value. + pub fn from_env_var(env_var_name: &str) -> Option { + let durations_str = env::var(env_var_name).ok()?; + + // Split string into 2 substrings by comma and try to parse numbers. + let mut durations = durations_str + .splitn(2, ',') + .map(|v| { + u64::from_str(v).unwrap_or_else(|_| { + panic!( + "Duration value in variable {} is expected to be a number, but got {}", + env_var_name, v + ) + }) + }); + + // Callback to be called if the environment variable has unexpected structure. + let panic_on_incorrect_value = || { + panic!( + "Duration variable {} expected to have 2 numbers separated by comma, but got {}", + env_var_name, durations_str + ); + }; + + let (warn, critical) = ( + durations.next().unwrap_or_else(panic_on_incorrect_value), + durations.next().unwrap_or_else(panic_on_incorrect_value) + ); + + if warn > critical { + panic!("Test execution warn time should be less or equal to the critical time"); + } + + Some(Self::new(Duration::from_millis(warn), Duration::from_millis(critical))) + } +} + +/// Structure with parameters for calculating test execution time. +#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)] +pub struct TestTimeOptions { + /// Denotes if the test critical execution time limit excess should be considered + /// a test failure. + pub error_on_excess: bool, + pub colored: bool, + pub unit_threshold: TimeThreshold, + pub integration_threshold: TimeThreshold, + pub doctest_threshold: TimeThreshold, +} + +impl TestTimeOptions { + pub fn new_from_env(error_on_excess: bool, colored: bool) -> Self { + let unit_threshold = + TimeThreshold::from_env_var(time_constants::UNIT_ENV_NAME) + .unwrap_or_else(Self::default_unit); + + let integration_threshold = + TimeThreshold::from_env_var(time_constants::INTEGRATION_ENV_NAME) + .unwrap_or_else(Self::default_integration); + + let doctest_threshold = + TimeThreshold::from_env_var(time_constants::DOCTEST_ENV_NAME) + .unwrap_or_else(Self::default_doctest); + + Self { + error_on_excess, + colored, + unit_threshold, + integration_threshold, + doctest_threshold, + } + } + + pub fn is_warn(&self, test: &TestDesc, exec_time: &TestExecTime) -> bool { + exec_time.0 >= self.warn_time(test) + } + + pub fn is_critical(&self, test: &TestDesc, exec_time: &TestExecTime) -> bool { + exec_time.0 >= self.critical_time(test) + } + + fn warn_time(&self, test: &TestDesc) -> Duration { + match test.test_type { + TestType::UnitTest => self.unit_threshold.warn, + TestType::IntegrationTest => self.integration_threshold.warn, + TestType::DocTest => self.doctest_threshold.warn, + TestType::Unknown => time_constants::UNKNOWN_WARN, + } + } + + fn critical_time(&self, test: &TestDesc) -> Duration { + match test.test_type { + TestType::UnitTest => self.unit_threshold.critical, + TestType::IntegrationTest => self.integration_threshold.critical, + TestType::DocTest => self.doctest_threshold.critical, + TestType::Unknown => time_constants::UNKNOWN_CRITICAL, + } + } + + fn default_unit() -> TimeThreshold { + TimeThreshold::new(time_constants::UNIT_WARN, time_constants::UNIT_CRITICAL) + } + + fn default_integration() -> TimeThreshold { + TimeThreshold::new(time_constants::INTEGRATION_WARN, time_constants::INTEGRATION_CRITICAL) + } + + fn default_doctest() -> TimeThreshold { + TimeThreshold::new(time_constants::DOCTEST_WARN, time_constants::DOCTEST_CRITICAL) + } +} diff --git a/src/libtest/types.rs b/src/libtest/types.rs new file mode 100644 index 00000000000..89bcf2cf285 --- /dev/null +++ b/src/libtest/types.rs @@ -0,0 +1,145 @@ +//! Common types used by `libtest`. + +use std::fmt; +use std::borrow::Cow; + +use super::options; +use super::bench::Bencher; + +pub use NamePadding::*; +pub use TestName::*; +pub use TestFn::*; + +/// Type of the test according to the [rust book](https://doc.rust-lang.org/cargo/guide/tests.html) +/// conventions. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub enum TestType { + /// Unit-tests are expected to be in the `src` folder of the crate. + UnitTest, + /// Integration-style tests are expected to be in the `tests` folder of the crate. + IntegrationTest, + /// Doctests are created by the `librustdoc` manually, so it's a different type of test. + DocTest, + /// Tests for the sources that don't follow the project layout convention + /// (e.g. tests in raw `main.rs` compiled by calling `rustc --test` directly). + Unknown, +} + +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum NamePadding { + PadNone, + PadOnRight, +} + +// The name of a test. By convention this follows the rules for rust +// paths; i.e., it should be a series of identifiers separated by double +// colons. This way if some test runner wants to arrange the tests +// hierarchically it may. +#[derive(Clone, PartialEq, Eq, Hash, Debug)] +pub enum TestName { + StaticTestName(&'static str), + DynTestName(String), + AlignedTestName(Cow<'static, str>, NamePadding), +} + +impl TestName { + pub fn as_slice(&self) -> &str { + match *self { + StaticTestName(s) => s, + DynTestName(ref s) => s, + AlignedTestName(ref s, _) => &*s, + } + } + + pub fn padding(&self) -> NamePadding { + match self { + &AlignedTestName(_, p) => p, + _ => PadNone, + } + } + + pub fn with_padding(&self, padding: NamePadding) -> TestName { + let name = match self { + &TestName::StaticTestName(name) => Cow::Borrowed(name), + &TestName::DynTestName(ref name) => Cow::Owned(name.clone()), + &TestName::AlignedTestName(ref name, _) => name.clone(), + }; + + TestName::AlignedTestName(name, padding) + } +} +impl fmt::Display for TestName { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self.as_slice(), f) + } +} + +/// Represents a benchmark function. +pub trait TDynBenchFn: Send { + fn run(&self, harness: &mut Bencher); +} + +// A function that runs a test. If the function returns successfully, +// the test succeeds; if the function panics then the test fails. We +// may need to come up with a more clever definition of test in order +// to support isolation of tests into threads. +pub enum TestFn { + StaticTestFn(fn()), + StaticBenchFn(fn(&mut Bencher)), + DynTestFn(Box), + DynBenchFn(Box), +} + +impl TestFn { + pub fn padding(&self) -> NamePadding { + match *self { + StaticTestFn(..) => PadNone, + StaticBenchFn(..) => PadOnRight, + DynTestFn(..) => PadNone, + DynBenchFn(..) => PadOnRight, + } + } +} + +impl fmt::Debug for TestFn { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(match *self { + StaticTestFn(..) => "StaticTestFn(..)", + StaticBenchFn(..) => "StaticBenchFn(..)", + DynTestFn(..) => "DynTestFn(..)", + DynBenchFn(..) => "DynBenchFn(..)", + }) + } +} + +// The definition of a single test. A test runner will run a list of +// these. +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct TestDesc { + pub name: TestName, + pub ignore: bool, + pub should_panic: options::ShouldPanic, + pub allow_fail: bool, + pub test_type: TestType, +} + +impl TestDesc { + pub fn padded_name(&self, column_count: usize, align: NamePadding) -> String { + let mut name = String::from(self.name.as_slice()); + let fill = column_count.saturating_sub(name.len()); + let pad = " ".repeat(fill); + match align { + PadNone => name, + PadOnRight => { + name.push_str(&pad); + name + } + } + } +} + +#[derive(Debug)] +pub struct TestDescAndFn { + pub desc: TestDesc, + pub testfn: TestFn, +}