rustfmt libtest
This commit is contained in:
parent
08886499cf
commit
2acfa838a0
@ -77,12 +77,10 @@ use std::time::{Instant, Duration};
|
||||
|
||||
// to be used by rustc to compile tests in libtest
|
||||
pub mod test {
|
||||
pub use {Bencher, TestName, TestResult, TestDesc,
|
||||
TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
|
||||
Metric, MetricMap,
|
||||
StaticTestFn, StaticTestName, DynTestName, DynTestFn,
|
||||
run_test, test_main, test_main_static, filter_tests,
|
||||
parse_opts, StaticBenchFn, ShouldPanic};
|
||||
pub use {Bencher, TestName, TestResult, TestDesc, TestDescAndFn, TestOpts, TrFailed,
|
||||
TrIgnored, TrOk, Metric, MetricMap, StaticTestFn, StaticTestName, DynTestName,
|
||||
DynTestFn, run_test, test_main, test_main_static, filter_tests, parse_opts,
|
||||
StaticBenchFn, ShouldPanic};
|
||||
}
|
||||
|
||||
pub mod stats;
|
||||
@ -95,13 +93,13 @@ pub mod stats;
|
||||
#[derive(Clone, PartialEq, Eq, Hash, Debug)]
|
||||
pub enum TestName {
|
||||
StaticTestName(&'static str),
|
||||
DynTestName(String)
|
||||
DynTestName(String),
|
||||
}
|
||||
impl TestName {
|
||||
fn as_slice(&self) -> &str {
|
||||
match *self {
|
||||
StaticTestName(s) => s,
|
||||
DynTestName(ref s) => s
|
||||
DynTestName(ref s) => s,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -146,19 +144,19 @@ pub enum TestFn {
|
||||
StaticBenchFn(fn(&mut Bencher)),
|
||||
StaticMetricFn(fn(&mut MetricMap)),
|
||||
DynTestFn(Box<FnBox() + Send>),
|
||||
DynMetricFn(Box<FnBox(&mut MetricMap)+Send>),
|
||||
DynBenchFn(Box<TDynBenchFn+'static>)
|
||||
DynMetricFn(Box<FnBox(&mut MetricMap) + Send>),
|
||||
DynBenchFn(Box<TDynBenchFn + 'static>),
|
||||
}
|
||||
|
||||
impl TestFn {
|
||||
fn padding(&self) -> NamePadding {
|
||||
match *self {
|
||||
StaticTestFn(..) => PadNone,
|
||||
StaticBenchFn(..) => PadOnRight,
|
||||
StaticTestFn(..) => PadNone,
|
||||
StaticBenchFn(..) => PadOnRight,
|
||||
StaticMetricFn(..) => PadOnRight,
|
||||
DynTestFn(..) => PadNone,
|
||||
DynMetricFn(..) => PadOnRight,
|
||||
DynBenchFn(..) => PadOnRight,
|
||||
DynTestFn(..) => PadNone,
|
||||
DynMetricFn(..) => PadOnRight,
|
||||
DynBenchFn(..) => PadOnRight,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -171,7 +169,7 @@ impl fmt::Debug for TestFn {
|
||||
StaticMetricFn(..) => "StaticMetricFn(..)",
|
||||
DynTestFn(..) => "DynTestFn(..)",
|
||||
DynMetricFn(..) => "DynMetricFn(..)",
|
||||
DynBenchFn(..) => "DynBenchFn(..)"
|
||||
DynBenchFn(..) => "DynBenchFn(..)",
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -192,7 +190,7 @@ pub struct Bencher {
|
||||
pub enum ShouldPanic {
|
||||
No,
|
||||
Yes,
|
||||
YesWithMessage(&'static str)
|
||||
YesWithMessage(&'static str),
|
||||
}
|
||||
|
||||
// The definition of a single test. A test runner will run a list of
|
||||
@ -215,17 +213,20 @@ pub struct TestDescAndFn {
|
||||
#[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug, Copy)]
|
||||
pub struct Metric {
|
||||
value: f64,
|
||||
noise: f64
|
||||
noise: f64,
|
||||
}
|
||||
|
||||
impl Metric {
|
||||
pub fn new(value: f64, noise: f64) -> Metric {
|
||||
Metric {value: value, noise: noise}
|
||||
Metric {
|
||||
value: value,
|
||||
noise: noise,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq)]
|
||||
pub struct MetricMap(BTreeMap<String,Metric>);
|
||||
pub struct MetricMap(BTreeMap<String, Metric>);
|
||||
|
||||
impl Clone for MetricMap {
|
||||
fn clone(&self) -> MetricMap {
|
||||
@ -236,13 +237,12 @@ impl Clone for MetricMap {
|
||||
|
||||
// The default console test runner. It accepts the command line
|
||||
// arguments and a vector of test_descs.
|
||||
pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) {
|
||||
let opts =
|
||||
match parse_opts(args) {
|
||||
Some(Ok(o)) => o,
|
||||
Some(Err(msg)) => panic!("{:?}", msg),
|
||||
None => return
|
||||
};
|
||||
pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>) {
|
||||
let opts = match parse_opts(args) {
|
||||
Some(Ok(o)) => o,
|
||||
Some(Err(msg)) => panic!("{:?}", msg),
|
||||
None => return,
|
||||
};
|
||||
match run_tests_console(&opts, tests) {
|
||||
Ok(true) => {}
|
||||
Ok(false) => std::process::exit(101),
|
||||
@ -259,13 +259,25 @@ pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) {
|
||||
// rather than a &[].
|
||||
pub fn test_main_static(tests: &[TestDescAndFn]) {
|
||||
let args = env::args().collect::<Vec<_>>();
|
||||
let owned_tests = tests.iter().map(|t| {
|
||||
match t.testfn {
|
||||
StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
|
||||
StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
|
||||
_ => panic!("non-static tests passed to test::test_main_static")
|
||||
}
|
||||
}).collect();
|
||||
let owned_tests = tests.iter()
|
||||
.map(|t| {
|
||||
match t.testfn {
|
||||
StaticTestFn(f) => {
|
||||
TestDescAndFn {
|
||||
testfn: StaticTestFn(f),
|
||||
desc: t.desc.clone(),
|
||||
}
|
||||
}
|
||||
StaticBenchFn(f) => {
|
||||
TestDescAndFn {
|
||||
testfn: StaticBenchFn(f),
|
||||
desc: t.desc.clone(),
|
||||
}
|
||||
}
|
||||
_ => panic!("non-static tests passed to test::test_main_static"),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
test_main(&args, owned_tests)
|
||||
}
|
||||
|
||||
@ -305,18 +317,26 @@ impl TestOpts {
|
||||
pub type OptRes = Result<TestOpts, String>;
|
||||
|
||||
fn optgroups() -> Vec<getopts::OptGroup> {
|
||||
vec!(getopts::optflag("", "ignored", "Run ignored tests"),
|
||||
getopts::optflag("", "test", "Run tests and not benchmarks"),
|
||||
getopts::optflag("", "bench", "Run benchmarks instead of tests"),
|
||||
getopts::optflag("h", "help", "Display this message (longer with --help)"),
|
||||
getopts::optopt("", "logfile", "Write logs to the specified file instead \
|
||||
of stdout", "PATH"),
|
||||
getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
|
||||
task, allow printing directly"),
|
||||
getopts::optopt("", "color", "Configure coloring of output:
|
||||
auto = colorize if stdout is a tty and tests are run on serially (default);
|
||||
always = always colorize output;
|
||||
never = never colorize output;", "auto|always|never"))
|
||||
vec![getopts::optflag("", "ignored", "Run ignored tests"),
|
||||
getopts::optflag("", "test", "Run tests and not benchmarks"),
|
||||
getopts::optflag("", "bench", "Run benchmarks instead of tests"),
|
||||
getopts::optflag("h", "help", "Display this message (longer with --help)"),
|
||||
getopts::optopt("",
|
||||
"logfile",
|
||||
"Write logs to the specified file instead of stdout",
|
||||
"PATH"),
|
||||
getopts::optflag("",
|
||||
"nocapture",
|
||||
"don't capture stdout/stderr of each task, allow printing directly"),
|
||||
getopts::optopt("",
|
||||
"color",
|
||||
"Configure coloring of output:
|
||||
auto = colorize if stdout \
|
||||
is a tty and tests are run on serially (default);
|
||||
always = \
|
||||
always colorize output;
|
||||
never = never colorize output;",
|
||||
"auto|always|never")]
|
||||
}
|
||||
|
||||
fn usage(binary: &str) {
|
||||
@ -353,13 +373,15 @@ Test Attributes:
|
||||
// Parses command line arguments into test options
|
||||
pub fn parse_opts(args: &[String]) -> Option<OptRes> {
|
||||
let args_ = &args[1..];
|
||||
let matches =
|
||||
match getopts::getopts(args_, &optgroups()) {
|
||||
Ok(m) => m,
|
||||
Err(f) => return Some(Err(f.to_string()))
|
||||
};
|
||||
let matches = match getopts::getopts(args_, &optgroups()) {
|
||||
Ok(m) => m,
|
||||
Err(f) => return Some(Err(f.to_string())),
|
||||
};
|
||||
|
||||
if matches.opt_present("h") { usage(&args[0]); return None; }
|
||||
if matches.opt_present("h") {
|
||||
usage(&args[0]);
|
||||
return None;
|
||||
}
|
||||
|
||||
let filter = if !matches.free.is_empty() {
|
||||
Some(matches.free[0].clone())
|
||||
@ -373,8 +395,7 @@ pub fn parse_opts(args: &[String]) -> Option<OptRes> {
|
||||
let logfile = logfile.map(|s| PathBuf::from(&s));
|
||||
|
||||
let bench_benchmarks = matches.opt_present("bench");
|
||||
let run_tests = ! bench_benchmarks ||
|
||||
matches.opt_present("test");
|
||||
let run_tests = !bench_benchmarks || matches.opt_present("test");
|
||||
|
||||
let mut nocapture = matches.opt_present("nocapture");
|
||||
if !nocapture {
|
||||
@ -386,9 +407,11 @@ pub fn parse_opts(args: &[String]) -> Option<OptRes> {
|
||||
Some("always") => AlwaysColor,
|
||||
Some("never") => NeverColor,
|
||||
|
||||
Some(v) => return Some(Err(format!("argument for --color must be \
|
||||
auto, always, or never (was {})",
|
||||
v))),
|
||||
Some(v) => {
|
||||
return Some(Err(format!("argument for --color must be auto, always, or never (was \
|
||||
{})",
|
||||
v)))
|
||||
}
|
||||
};
|
||||
|
||||
let test_opts = TestOpts {
|
||||
@ -436,20 +459,19 @@ struct ConsoleTestState<T> {
|
||||
ignored: usize,
|
||||
measured: usize,
|
||||
metrics: MetricMap,
|
||||
failures: Vec<(TestDesc, Vec<u8> )> ,
|
||||
failures: Vec<(TestDesc, Vec<u8>)>,
|
||||
max_name_len: usize, // number of columns to fill when aligning names
|
||||
}
|
||||
|
||||
impl<T: Write> ConsoleTestState<T> {
|
||||
pub fn new(opts: &TestOpts,
|
||||
_: Option<T>) -> io::Result<ConsoleTestState<io::Stdout>> {
|
||||
pub fn new(opts: &TestOpts, _: Option<T>) -> io::Result<ConsoleTestState<io::Stdout>> {
|
||||
let log_out = match opts.logfile {
|
||||
Some(ref path) => Some(try!(File::create(path))),
|
||||
None => None
|
||||
None => None,
|
||||
};
|
||||
let out = match term::stdout() {
|
||||
None => Raw(io::stdout()),
|
||||
Some(t) => Pretty(t)
|
||||
Some(t) => Pretty(t),
|
||||
};
|
||||
|
||||
Ok(ConsoleTestState {
|
||||
@ -487,9 +509,7 @@ impl<T: Write> ConsoleTestState<T> {
|
||||
self.write_pretty("bench", term::color::CYAN)
|
||||
}
|
||||
|
||||
pub fn write_pretty(&mut self,
|
||||
word: &str,
|
||||
color: term::color::Color) -> io::Result<()> {
|
||||
pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::Result<()> {
|
||||
match self.out {
|
||||
Pretty(ref mut term) => {
|
||||
if self.use_color {
|
||||
@ -513,22 +533,25 @@ impl<T: Write> ConsoleTestState<T> {
|
||||
Pretty(ref mut term) => {
|
||||
try!(term.write_all(s.as_bytes()));
|
||||
term.flush()
|
||||
},
|
||||
}
|
||||
Raw(ref mut stdout) => {
|
||||
try!(stdout.write_all(s.as_bytes()));
|
||||
stdout.flush()
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write_run_start(&mut self, len: usize) -> io::Result<()> {
|
||||
self.total = len;
|
||||
let noun = if len != 1 { "tests" } else { "test" };
|
||||
let noun = if len != 1 {
|
||||
"tests"
|
||||
} else {
|
||||
"test"
|
||||
};
|
||||
self.write_plain(&format!("\nrunning {} {}\n", len, noun))
|
||||
}
|
||||
|
||||
pub fn write_test_start(&mut self, test: &TestDesc,
|
||||
align: NamePadding) -> io::Result<()> {
|
||||
pub fn write_test_start(&mut self, test: &TestDesc, align: NamePadding) -> io::Result<()> {
|
||||
let name = test.padded_name(self.max_name_len, align);
|
||||
self.write_plain(&format!("test {} ... ", name))
|
||||
}
|
||||
@ -553,18 +576,19 @@ impl<T: Write> ConsoleTestState<T> {
|
||||
self.write_plain("\n")
|
||||
}
|
||||
|
||||
pub fn write_log(&mut self, test: &TestDesc,
|
||||
result: &TestResult) -> io::Result<()> {
|
||||
pub fn write_log(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
|
||||
match self.log_out {
|
||||
None => Ok(()),
|
||||
Some(ref mut o) => {
|
||||
let s = format!("{} {}\n", match *result {
|
||||
TrOk => "ok".to_owned(),
|
||||
TrFailed => "failed".to_owned(),
|
||||
TrIgnored => "ignored".to_owned(),
|
||||
TrMetrics(ref mm) => mm.fmt_metrics(),
|
||||
TrBench(ref bs) => fmt_bench_samples(bs)
|
||||
}, test.name);
|
||||
let s = format!("{} {}\n",
|
||||
match *result {
|
||||
TrOk => "ok".to_owned(),
|
||||
TrFailed => "failed".to_owned(),
|
||||
TrIgnored => "ignored".to_owned(),
|
||||
TrMetrics(ref mm) => mm.fmt_metrics(),
|
||||
TrBench(ref bs) => fmt_bench_samples(bs),
|
||||
},
|
||||
test.name);
|
||||
o.write_all(s.as_bytes())
|
||||
}
|
||||
}
|
||||
@ -612,7 +636,10 @@ impl<T: Write> ConsoleTestState<T> {
|
||||
try!(self.write_failed());
|
||||
}
|
||||
let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
|
||||
self.passed, self.failed, self.ignored, self.measured);
|
||||
self.passed,
|
||||
self.failed,
|
||||
self.ignored,
|
||||
self.measured);
|
||||
try!(self.write_plain(&s));
|
||||
return Ok(success);
|
||||
}
|
||||
@ -650,8 +677,9 @@ pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
|
||||
let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
|
||||
|
||||
output.write_fmt(format_args!("{:>11} ns/iter (+/- {})",
|
||||
fmt_thousands_sep(median, ','),
|
||||
fmt_thousands_sep(deviation, ','))).unwrap();
|
||||
fmt_thousands_sep(median, ','),
|
||||
fmt_thousands_sep(deviation, ',')))
|
||||
.unwrap();
|
||||
if bs.mb_s != 0 {
|
||||
output.write_fmt(format_args!(" = {} MB/s", bs.mb_s)).unwrap();
|
||||
}
|
||||
@ -659,10 +687,9 @@ pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
|
||||
}
|
||||
|
||||
// A simple console test runner
|
||||
pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> io::Result<bool> {
|
||||
pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
|
||||
|
||||
fn callback<T: Write>(event: &TestEvent,
|
||||
st: &mut ConsoleTestState<T>) -> io::Result<()> {
|
||||
fn callback<T: Write>(event: &TestEvent, st: &mut ConsoleTestState<T>) -> io::Result<()> {
|
||||
match (*event).clone() {
|
||||
TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
|
||||
TeWait(ref test, padding) => st.write_test_start(test, padding),
|
||||
@ -675,13 +702,9 @@ pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> io::Res
|
||||
TrMetrics(mm) => {
|
||||
let tname = test.name;
|
||||
let MetricMap(mm) = mm;
|
||||
for (k,v) in &mm {
|
||||
for (k, v) in &mm {
|
||||
st.metrics
|
||||
.insert_metric(&format!("{}.{}",
|
||||
tname,
|
||||
k),
|
||||
v.value,
|
||||
v.noise);
|
||||
.insert_metric(&format!("{}.{}", tname, k), v.value, v.noise);
|
||||
}
|
||||
st.measured += 1
|
||||
}
|
||||
@ -708,11 +731,11 @@ pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> io::Res
|
||||
PadOnRight => t.desc.name.as_slice().len(),
|
||||
}
|
||||
}
|
||||
match tests.iter().max_by_key(|t|len_if_padded(*t)) {
|
||||
match tests.iter().max_by_key(|t| len_if_padded(*t)) {
|
||||
Some(t) => {
|
||||
let n = t.desc.name.as_slice();
|
||||
st.max_name_len = n.len();
|
||||
},
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
|
||||
@ -724,13 +747,13 @@ fn should_sort_failures_before_printing_them() {
|
||||
let test_a = TestDesc {
|
||||
name: StaticTestName("a"),
|
||||
ignore: false,
|
||||
should_panic: ShouldPanic::No
|
||||
should_panic: ShouldPanic::No,
|
||||
};
|
||||
|
||||
let test_b = TestDesc {
|
||||
name: StaticTestName("b"),
|
||||
ignore: false,
|
||||
should_panic: ShouldPanic::No
|
||||
should_panic: ShouldPanic::No,
|
||||
};
|
||||
|
||||
let mut st = ConsoleTestState {
|
||||
@ -744,13 +767,13 @@ fn should_sort_failures_before_printing_them() {
|
||||
measured: 0,
|
||||
max_name_len: 10,
|
||||
metrics: MetricMap::new(),
|
||||
failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
|
||||
failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
|
||||
};
|
||||
|
||||
st.write_failures().unwrap();
|
||||
let s = match st.out {
|
||||
Raw(ref m) => String::from_utf8_lossy(&m[..]),
|
||||
Pretty(_) => unreachable!()
|
||||
Pretty(_) => unreachable!(),
|
||||
};
|
||||
|
||||
let apos = s.find("a").unwrap();
|
||||
@ -790,18 +813,16 @@ fn stdout_isatty() -> bool {
|
||||
|
||||
#[derive(Clone)]
|
||||
enum TestEvent {
|
||||
TeFiltered(Vec<TestDesc> ),
|
||||
TeFiltered(Vec<TestDesc>),
|
||||
TeWait(TestDesc, NamePadding),
|
||||
TeResult(TestDesc, TestResult, Vec<u8> ),
|
||||
TeResult(TestDesc, TestResult, Vec<u8>),
|
||||
}
|
||||
|
||||
pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
|
||||
pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
|
||||
|
||||
|
||||
fn run_tests<F>(opts: &TestOpts,
|
||||
tests: Vec<TestDescAndFn> ,
|
||||
mut callback: F) -> io::Result<()> where
|
||||
F: FnMut(TestEvent) -> io::Result<()>,
|
||||
fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
|
||||
where F: FnMut(TestEvent) -> io::Result<()>
|
||||
{
|
||||
let mut filtered_tests = filter_tests(opts, tests);
|
||||
if !opts.bench_benchmarks {
|
||||
@ -818,7 +839,7 @@ fn run_tests<F>(opts: &TestOpts,
|
||||
filtered_tests.into_iter().partition(|e| {
|
||||
match e.testfn {
|
||||
StaticTestFn(_) | DynTestFn(_) => true,
|
||||
_ => false
|
||||
_ => false,
|
||||
}
|
||||
});
|
||||
|
||||
@ -873,7 +894,10 @@ fn get_concurrency() -> usize {
|
||||
let opt_n: Option<usize> = s.parse().ok();
|
||||
match opt_n {
|
||||
Some(n) if n > 0 => n,
|
||||
_ => panic!("RUST_TEST_THREADS is `{}`, should be a positive integer.", s)
|
||||
_ => {
|
||||
panic!("RUST_TEST_THREADS is `{}`, should be a positive integer.",
|
||||
s)
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(..) => num_cpus(),
|
||||
@ -911,9 +935,7 @@ fn get_concurrency() -> usize {
|
||||
target_os = "ios",
|
||||
target_os = "android"))]
|
||||
fn num_cpus() -> usize {
|
||||
unsafe {
|
||||
libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize
|
||||
}
|
||||
unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
|
||||
}
|
||||
|
||||
#[cfg(any(target_os = "freebsd",
|
||||
@ -926,18 +948,22 @@ fn get_concurrency() -> usize {
|
||||
let mut mib = [libc::CTL_HW, libc::HW_AVAILCPU, 0, 0];
|
||||
|
||||
unsafe {
|
||||
libc::sysctl(mib.as_mut_ptr(), 2,
|
||||
libc::sysctl(mib.as_mut_ptr(),
|
||||
2,
|
||||
&mut cpus as *mut _ as *mut _,
|
||||
&mut cpus_size as *mut _ as *mut _,
|
||||
0 as *mut _, 0);
|
||||
0 as *mut _,
|
||||
0);
|
||||
}
|
||||
if cpus < 1 {
|
||||
mib[1] = libc::HW_NCPU;
|
||||
unsafe {
|
||||
libc::sysctl(mib.as_mut_ptr(), 2,
|
||||
libc::sysctl(mib.as_mut_ptr(),
|
||||
2,
|
||||
&mut cpus as *mut _ as *mut _,
|
||||
&mut cpus_size as *mut _ as *mut _,
|
||||
0 as *mut _, 0);
|
||||
0 as *mut _,
|
||||
0);
|
||||
}
|
||||
if cpus < 1 {
|
||||
cpus = 1;
|
||||
@ -953,10 +979,12 @@ fn get_concurrency() -> usize {
|
||||
let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
|
||||
|
||||
unsafe {
|
||||
libc::sysctl(mib.as_mut_ptr(), 2,
|
||||
libc::sysctl(mib.as_mut_ptr(),
|
||||
2,
|
||||
&mut cpus as *mut _ as *mut _,
|
||||
&mut cpus_size as *mut _ as *mut _,
|
||||
0 as *mut _, 0);
|
||||
0 as *mut _,
|
||||
0);
|
||||
}
|
||||
if cpus < 1 {
|
||||
cpus = 1;
|
||||
@ -972,9 +1000,9 @@ pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescA
|
||||
filtered = match opts.filter {
|
||||
None => filtered,
|
||||
Some(ref filter) => {
|
||||
filtered.into_iter().filter(|test| {
|
||||
test.desc.name.as_slice().contains(&filter[..])
|
||||
}).collect()
|
||||
filtered.into_iter()
|
||||
.filter(|test| test.desc.name.as_slice().contains(&filter[..]))
|
||||
.collect()
|
||||
}
|
||||
};
|
||||
|
||||
@ -986,8 +1014,8 @@ pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescA
|
||||
if test.desc.ignore {
|
||||
let TestDescAndFn {desc, testfn} = test;
|
||||
Some(TestDescAndFn {
|
||||
desc: TestDesc {ignore: false, ..desc},
|
||||
testfn: testfn
|
||||
desc: TestDesc { ignore: false, ..desc },
|
||||
testfn: testfn,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
@ -1004,18 +1032,23 @@ pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescA
|
||||
|
||||
pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
|
||||
// convert benchmarks to tests, if we're not benchmarking them
|
||||
tests.into_iter().map(|x| {
|
||||
let testfn = match x.testfn {
|
||||
DynBenchFn(bench) => {
|
||||
DynTestFn(Box::new(move || bench::run_once(|b| bench.run(b))))
|
||||
}
|
||||
StaticBenchFn(benchfn) => {
|
||||
DynTestFn(Box::new(move || bench::run_once(|b| benchfn(b))))
|
||||
}
|
||||
f => f
|
||||
};
|
||||
TestDescAndFn { desc: x.desc, testfn: testfn }
|
||||
}).collect()
|
||||
tests.into_iter()
|
||||
.map(|x| {
|
||||
let testfn = match x.testfn {
|
||||
DynBenchFn(bench) => {
|
||||
DynTestFn(Box::new(move || bench::run_once(|b| bench.run(b))))
|
||||
}
|
||||
StaticBenchFn(benchfn) => {
|
||||
DynTestFn(Box::new(move || bench::run_once(|b| benchfn(b))))
|
||||
}
|
||||
f => f,
|
||||
};
|
||||
TestDescAndFn {
|
||||
desc: x.desc,
|
||||
testfn: testfn,
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn run_test(opts: &TestOpts,
|
||||
@ -1039,7 +1072,9 @@ pub fn run_test(opts: &TestOpts,
|
||||
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
|
||||
Write::write(&mut *self.0.lock().unwrap(), data)
|
||||
}
|
||||
fn flush(&mut self) -> io::Result<()> { Ok(()) }
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
thread::spawn(move || {
|
||||
@ -1051,12 +1086,13 @@ pub fn run_test(opts: &TestOpts,
|
||||
});
|
||||
|
||||
let result_guard = cfg.spawn(move || {
|
||||
if !nocapture {
|
||||
io::set_print(box Sink(data2.clone()));
|
||||
io::set_panic(box Sink(data2));
|
||||
}
|
||||
testfn()
|
||||
}).unwrap();
|
||||
if !nocapture {
|
||||
io::set_print(box Sink(data2.clone()));
|
||||
io::set_panic(box Sink(data2));
|
||||
}
|
||||
testfn()
|
||||
})
|
||||
.unwrap();
|
||||
let test_result = calc_result(&desc, result_guard.join());
|
||||
let stdout = data.lock().unwrap().to_vec();
|
||||
monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
|
||||
@ -1087,27 +1123,25 @@ pub fn run_test(opts: &TestOpts,
|
||||
return;
|
||||
}
|
||||
DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
|
||||
StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
|
||||
Box::new(f))
|
||||
StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(f)),
|
||||
}
|
||||
}
|
||||
|
||||
fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any+Send>>) -> TestResult {
|
||||
fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any + Send>>) -> TestResult {
|
||||
match (&desc.should_panic, task_result) {
|
||||
(&ShouldPanic::No, Ok(())) |
|
||||
(&ShouldPanic::Yes, Err(_)) => TrOk,
|
||||
(&ShouldPanic::YesWithMessage(msg), Err(ref err))
|
||||
if err.downcast_ref::<String>()
|
||||
.map(|e| &**e)
|
||||
.or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
|
||||
.map(|e| e.contains(msg))
|
||||
.unwrap_or(false) => TrOk,
|
||||
.map(|e| &**e)
|
||||
.or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
|
||||
.map(|e| e.contains(msg))
|
||||
.unwrap_or(false) => TrOk,
|
||||
_ => TrFailed,
|
||||
}
|
||||
}
|
||||
|
||||
impl MetricMap {
|
||||
|
||||
pub fn new() -> MetricMap {
|
||||
MetricMap(BTreeMap::new())
|
||||
}
|
||||
@ -1128,7 +1162,7 @@ impl MetricMap {
|
||||
pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
|
||||
let m = Metric {
|
||||
value: value,
|
||||
noise: noise
|
||||
noise: noise,
|
||||
};
|
||||
let MetricMap(ref mut map) = *self;
|
||||
map.insert(name.to_owned(), m);
|
||||
@ -1136,10 +1170,9 @@ impl MetricMap {
|
||||
|
||||
pub fn fmt_metrics(&self) -> String {
|
||||
let MetricMap(ref mm) = *self;
|
||||
let v : Vec<String> = mm.iter()
|
||||
.map(|(k,v)| format!("{}: {} (+/- {})", *k,
|
||||
v.value, v.noise))
|
||||
.collect();
|
||||
let v: Vec<String> = mm.iter()
|
||||
.map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
|
||||
.collect();
|
||||
v.join(", ")
|
||||
}
|
||||
}
|
||||
@ -1156,17 +1189,21 @@ impl MetricMap {
|
||||
pub fn black_box<T>(dummy: T) -> T {
|
||||
// we need to "use" the argument in some way LLVM can't
|
||||
// introspect.
|
||||
unsafe {asm!("" : : "r"(&dummy))}
|
||||
unsafe { asm!("" : : "r"(&dummy)) }
|
||||
dummy
|
||||
}
|
||||
#[cfg(all(target_os = "nacl", target_arch = "le32"))]
|
||||
#[inline(never)]
|
||||
pub fn black_box<T>(dummy: T) -> T { dummy }
|
||||
pub fn black_box<T>(dummy: T) -> T {
|
||||
dummy
|
||||
}
|
||||
|
||||
|
||||
impl Bencher {
|
||||
/// Callback for benchmark functions to run in their body.
|
||||
pub fn iter<T, F>(&mut self, mut inner: F) where F: FnMut() -> T {
|
||||
pub fn iter<T, F>(&mut self, mut inner: F)
|
||||
where F: FnMut() -> T
|
||||
{
|
||||
let start = Instant::now();
|
||||
let k = self.iterations;
|
||||
for _ in 0..k {
|
||||
@ -1187,13 +1224,17 @@ impl Bencher {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bench_n<F>(&mut self, n: u64, f: F) where F: FnOnce(&mut Bencher) {
|
||||
pub fn bench_n<F>(&mut self, n: u64, f: F)
|
||||
where F: FnOnce(&mut Bencher)
|
||||
{
|
||||
self.iterations = n;
|
||||
f(self);
|
||||
}
|
||||
|
||||
// This is a more statistics-driven benchmark algorithm
|
||||
pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary where F: FnMut(&mut Bencher) {
|
||||
pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary
|
||||
where F: FnMut(&mut Bencher)
|
||||
{
|
||||
// Initial bench run to get ballpark figure.
|
||||
let mut n = 1;
|
||||
self.bench_n(n, |x| f(x));
|
||||
@ -1210,17 +1251,19 @@ impl Bencher {
|
||||
// side effect of not being able to do as many runs is
|
||||
// automatically handled by the statistical analysis below
|
||||
// (i.e. larger error bars).
|
||||
if n == 0 { n = 1; }
|
||||
if n == 0 {
|
||||
n = 1;
|
||||
}
|
||||
|
||||
let mut total_run = Duration::new(0, 0);
|
||||
let samples : &mut [f64] = &mut [0.0_f64; 50];
|
||||
let samples: &mut [f64] = &mut [0.0_f64; 50];
|
||||
loop {
|
||||
let loop_start = Instant::now();
|
||||
|
||||
for p in &mut *samples {
|
||||
self.bench_n(n, |x| f(x));
|
||||
*p = self.ns_per_iter() as f64;
|
||||
};
|
||||
}
|
||||
|
||||
stats::winsorize(samples, 5.0);
|
||||
let summ = stats::Summary::new(samples);
|
||||
@ -1228,7 +1271,7 @@ impl Bencher {
|
||||
for p in &mut *samples {
|
||||
self.bench_n(5 * n, |x| f(x));
|
||||
*p = self.ns_per_iter() as f64;
|
||||
};
|
||||
}
|
||||
|
||||
stats::winsorize(samples, 5.0);
|
||||
let summ5 = stats::Summary::new(samples);
|
||||
@ -1236,9 +1279,8 @@ impl Bencher {
|
||||
|
||||
// If we've run for 100ms and seem to have converged to a
|
||||
// stable median.
|
||||
if loop_run > Duration::from_millis(100) &&
|
||||
summ.median_abs_dev_pct < 1.0 &&
|
||||
summ.median - summ5.median < summ5.median_abs_dev {
|
||||
if loop_run > Duration::from_millis(100) && summ.median_abs_dev_pct < 1.0 &&
|
||||
summ.median - summ5.median < summ5.median_abs_dev {
|
||||
return summ5;
|
||||
}
|
||||
|
||||
@ -1265,11 +1307,13 @@ pub mod bench {
|
||||
use std::time::Duration;
|
||||
use super::{Bencher, BenchSamples};
|
||||
|
||||
pub fn benchmark<F>(f: F) -> BenchSamples where F: FnMut(&mut Bencher) {
|
||||
pub fn benchmark<F>(f: F) -> BenchSamples
|
||||
where F: FnMut(&mut Bencher)
|
||||
{
|
||||
let mut bs = Bencher {
|
||||
iterations: 0,
|
||||
dur: Duration::new(0, 0),
|
||||
bytes: 0
|
||||
bytes: 0,
|
||||
};
|
||||
|
||||
let ns_iter_summ = bs.auto_bench(f);
|
||||
@ -1279,15 +1323,17 @@ pub mod bench {
|
||||
|
||||
BenchSamples {
|
||||
ns_iter_summ: ns_iter_summ,
|
||||
mb_s: mb_s as usize
|
||||
mb_s: mb_s as usize,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn run_once<F>(f: F) where F: FnOnce(&mut Bencher) {
|
||||
pub fn run_once<F>(f: F)
|
||||
where F: FnOnce(&mut Bencher)
|
||||
{
|
||||
let mut bs = Bencher {
|
||||
iterations: 0,
|
||||
dur: Duration::new(0, 0),
|
||||
bytes: 0
|
||||
bytes: 0,
|
||||
};
|
||||
bs.bench_n(1, f);
|
||||
}
|
||||
@ -1295,22 +1341,22 @@ pub mod bench {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
|
||||
TestDesc, TestDescAndFn, TestOpts, run_test,
|
||||
MetricMap,
|
||||
StaticTestName, DynTestName, DynTestFn, ShouldPanic};
|
||||
use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts, TestDesc, TestDescAndFn,
|
||||
TestOpts, run_test, MetricMap, StaticTestName, DynTestName, DynTestFn, ShouldPanic};
|
||||
use std::sync::mpsc::channel;
|
||||
|
||||
#[test]
|
||||
pub fn do_not_run_ignored_tests() {
|
||||
fn f() { panic!(); }
|
||||
fn f() {
|
||||
panic!();
|
||||
}
|
||||
let desc = TestDescAndFn {
|
||||
desc: TestDesc {
|
||||
name: StaticTestName("whatever"),
|
||||
ignore: true,
|
||||
should_panic: ShouldPanic::No,
|
||||
},
|
||||
testfn: DynTestFn(Box::new(move|| f())),
|
||||
testfn: DynTestFn(Box::new(move || f())),
|
||||
};
|
||||
let (tx, rx) = channel();
|
||||
run_test(&TestOpts::new(), false, desc, tx);
|
||||
@ -1320,14 +1366,14 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
pub fn ignored_tests_result_in_ignored() {
|
||||
fn f() { }
|
||||
fn f() {}
|
||||
let desc = TestDescAndFn {
|
||||
desc: TestDesc {
|
||||
name: StaticTestName("whatever"),
|
||||
ignore: true,
|
||||
should_panic: ShouldPanic::No,
|
||||
},
|
||||
testfn: DynTestFn(Box::new(move|| f())),
|
||||
testfn: DynTestFn(Box::new(move || f())),
|
||||
};
|
||||
let (tx, rx) = channel();
|
||||
run_test(&TestOpts::new(), false, desc, tx);
|
||||
@ -1337,14 +1383,16 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_should_panic() {
|
||||
fn f() { panic!(); }
|
||||
fn f() {
|
||||
panic!();
|
||||
}
|
||||
let desc = TestDescAndFn {
|
||||
desc: TestDesc {
|
||||
name: StaticTestName("whatever"),
|
||||
ignore: false,
|
||||
should_panic: ShouldPanic::Yes,
|
||||
},
|
||||
testfn: DynTestFn(Box::new(move|| f())),
|
||||
testfn: DynTestFn(Box::new(move || f())),
|
||||
};
|
||||
let (tx, rx) = channel();
|
||||
run_test(&TestOpts::new(), false, desc, tx);
|
||||
@ -1354,14 +1402,16 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_should_panic_good_message() {
|
||||
fn f() { panic!("an error message"); }
|
||||
fn f() {
|
||||
panic!("an error message");
|
||||
}
|
||||
let desc = TestDescAndFn {
|
||||
desc: TestDesc {
|
||||
name: StaticTestName("whatever"),
|
||||
ignore: false,
|
||||
should_panic: ShouldPanic::YesWithMessage("error message"),
|
||||
},
|
||||
testfn: DynTestFn(Box::new(move|| f())),
|
||||
testfn: DynTestFn(Box::new(move || f())),
|
||||
};
|
||||
let (tx, rx) = channel();
|
||||
run_test(&TestOpts::new(), false, desc, tx);
|
||||
@ -1371,14 +1421,16 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_should_panic_bad_message() {
|
||||
fn f() { panic!("an error message"); }
|
||||
fn f() {
|
||||
panic!("an error message");
|
||||
}
|
||||
let desc = TestDescAndFn {
|
||||
desc: TestDesc {
|
||||
name: StaticTestName("whatever"),
|
||||
ignore: false,
|
||||
should_panic: ShouldPanic::YesWithMessage("foobar"),
|
||||
},
|
||||
testfn: DynTestFn(Box::new(move|| f())),
|
||||
testfn: DynTestFn(Box::new(move || f())),
|
||||
};
|
||||
let (tx, rx) = channel();
|
||||
run_test(&TestOpts::new(), false, desc, tx);
|
||||
@ -1388,14 +1440,14 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_should_panic_but_succeeds() {
|
||||
fn f() { }
|
||||
fn f() {}
|
||||
let desc = TestDescAndFn {
|
||||
desc: TestDesc {
|
||||
name: StaticTestName("whatever"),
|
||||
ignore: false,
|
||||
should_panic: ShouldPanic::Yes,
|
||||
},
|
||||
testfn: DynTestFn(Box::new(move|| f())),
|
||||
testfn: DynTestFn(Box::new(move || f())),
|
||||
};
|
||||
let (tx, rx) = channel();
|
||||
run_test(&TestOpts::new(), false, desc, tx);
|
||||
@ -1405,12 +1457,10 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn parse_ignored_flag() {
|
||||
let args = vec!("progname".to_string(),
|
||||
"filter".to_string(),
|
||||
"--ignored".to_string());
|
||||
let args = vec!["progname".to_string(), "filter".to_string(), "--ignored".to_string()];
|
||||
let opts = match parse_opts(&args) {
|
||||
Some(Ok(o)) => o,
|
||||
_ => panic!("Malformed arg in parse_ignored_flag")
|
||||
_ => panic!("Malformed arg in parse_ignored_flag"),
|
||||
};
|
||||
assert!((opts.run_ignored));
|
||||
}
|
||||
@ -1424,28 +1474,26 @@ mod tests {
|
||||
opts.run_tests = true;
|
||||
opts.run_ignored = true;
|
||||
|
||||
let tests = vec!(
|
||||
TestDescAndFn {
|
||||
desc: TestDesc {
|
||||
name: StaticTestName("1"),
|
||||
ignore: true,
|
||||
should_panic: ShouldPanic::No,
|
||||
},
|
||||
testfn: DynTestFn(Box::new(move|| {})),
|
||||
},
|
||||
TestDescAndFn {
|
||||
desc: TestDesc {
|
||||
name: StaticTestName("2"),
|
||||
ignore: false,
|
||||
should_panic: ShouldPanic::No,
|
||||
},
|
||||
testfn: DynTestFn(Box::new(move|| {})),
|
||||
});
|
||||
let tests = vec![TestDescAndFn {
|
||||
desc: TestDesc {
|
||||
name: StaticTestName("1"),
|
||||
ignore: true,
|
||||
should_panic: ShouldPanic::No,
|
||||
},
|
||||
testfn: DynTestFn(Box::new(move || {})),
|
||||
},
|
||||
TestDescAndFn {
|
||||
desc: TestDesc {
|
||||
name: StaticTestName("2"),
|
||||
ignore: false,
|
||||
should_panic: ShouldPanic::No,
|
||||
},
|
||||
testfn: DynTestFn(Box::new(move || {})),
|
||||
}];
|
||||
let filtered = filter_tests(&opts, tests);
|
||||
|
||||
assert_eq!(filtered.len(), 1);
|
||||
assert_eq!(filtered[0].desc.name.to_string(),
|
||||
"1");
|
||||
assert_eq!(filtered[0].desc.name.to_string(), "1");
|
||||
assert!(filtered[0].desc.ignore == false);
|
||||
}
|
||||
|
||||
@ -1454,19 +1502,17 @@ mod tests {
|
||||
let mut opts = TestOpts::new();
|
||||
opts.run_tests = true;
|
||||
|
||||
let names =
|
||||
vec!("sha1::test".to_string(),
|
||||
"isize::test_to_str".to_string(),
|
||||
"isize::test_pow".to_string(),
|
||||
"test::do_not_run_ignored_tests".to_string(),
|
||||
"test::ignored_tests_result_in_ignored".to_string(),
|
||||
"test::first_free_arg_should_be_a_filter".to_string(),
|
||||
"test::parse_ignored_flag".to_string(),
|
||||
"test::filter_for_ignored_option".to_string(),
|
||||
"test::sort_tests".to_string());
|
||||
let tests =
|
||||
{
|
||||
fn testfn() { }
|
||||
let names = vec!["sha1::test".to_string(),
|
||||
"isize::test_to_str".to_string(),
|
||||
"isize::test_pow".to_string(),
|
||||
"test::do_not_run_ignored_tests".to_string(),
|
||||
"test::ignored_tests_result_in_ignored".to_string(),
|
||||
"test::first_free_arg_should_be_a_filter".to_string(),
|
||||
"test::parse_ignored_flag".to_string(),
|
||||
"test::filter_for_ignored_option".to_string(),
|
||||
"test::sort_tests".to_string()];
|
||||
let tests = {
|
||||
fn testfn() {}
|
||||
let mut tests = Vec::new();
|
||||
for name in &names {
|
||||
let test = TestDescAndFn {
|
||||
@ -1483,16 +1529,15 @@ mod tests {
|
||||
};
|
||||
let filtered = filter_tests(&opts, tests);
|
||||
|
||||
let expected =
|
||||
vec!("isize::test_pow".to_string(),
|
||||
"isize::test_to_str".to_string(),
|
||||
"sha1::test".to_string(),
|
||||
"test::do_not_run_ignored_tests".to_string(),
|
||||
"test::filter_for_ignored_option".to_string(),
|
||||
"test::first_free_arg_should_be_a_filter".to_string(),
|
||||
"test::ignored_tests_result_in_ignored".to_string(),
|
||||
"test::parse_ignored_flag".to_string(),
|
||||
"test::sort_tests".to_string());
|
||||
let expected = vec!["isize::test_pow".to_string(),
|
||||
"isize::test_to_str".to_string(),
|
||||
"sha1::test".to_string(),
|
||||
"test::do_not_run_ignored_tests".to_string(),
|
||||
"test::filter_for_ignored_option".to_string(),
|
||||
"test::first_free_arg_should_be_a_filter".to_string(),
|
||||
"test::ignored_tests_result_in_ignored".to_string(),
|
||||
"test::parse_ignored_flag".to_string(),
|
||||
"test::sort_tests".to_string()];
|
||||
|
||||
for (a, b) in expected.iter().zip(filtered) {
|
||||
assert!(*a == b.desc.name.to_string());
|
||||
|
@ -111,7 +111,7 @@ pub trait Stats {
|
||||
/// is otherwise equivalent.
|
||||
///
|
||||
/// See also: https://en.wikipedia.org/wiki/Quartile
|
||||
fn quartiles(&self) -> (f64,f64,f64);
|
||||
fn quartiles(&self) -> (f64, f64, f64);
|
||||
|
||||
/// Inter-quartile range: the difference between the 25th percentile (1st quartile) and the 75th
|
||||
/// percentile (3rd quartile). See `quartiles`.
|
||||
@ -134,7 +134,7 @@ pub struct Summary {
|
||||
pub std_dev_pct: f64,
|
||||
pub median_abs_dev: f64,
|
||||
pub median_abs_dev_pct: f64,
|
||||
pub quartiles: (f64,f64,f64),
|
||||
pub quartiles: (f64, f64, f64),
|
||||
pub iqr: f64,
|
||||
}
|
||||
|
||||
@ -153,7 +153,7 @@ impl Summary {
|
||||
median_abs_dev: samples.median_abs_dev(),
|
||||
median_abs_dev_pct: samples.median_abs_dev_pct(),
|
||||
quartiles: samples.quartiles(),
|
||||
iqr: samples.iqr()
|
||||
iqr: samples.iqr(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -187,7 +187,7 @@ impl Stats for [f64] {
|
||||
partials.push(x);
|
||||
} else {
|
||||
partials[j] = x;
|
||||
partials.truncate(j+1);
|
||||
partials.truncate(j + 1);
|
||||
}
|
||||
}
|
||||
let zero: f64 = 0.0;
|
||||
@ -221,13 +221,13 @@ impl Stats for [f64] {
|
||||
let mut v: f64 = 0.0;
|
||||
for s in self {
|
||||
let x = *s - mean;
|
||||
v = v + x*x;
|
||||
v = v + x * x;
|
||||
}
|
||||
// NB: this is _supposed to be_ len-1, not len. If you
|
||||
// change it back to len, you will be calculating a
|
||||
// population variance, not a sample variance.
|
||||
let denom = (self.len() - 1) as f64;
|
||||
v/denom
|
||||
v / denom
|
||||
}
|
||||
}
|
||||
|
||||
@ -260,7 +260,7 @@ impl Stats for [f64] {
|
||||
percentile_of_sorted(&tmp, pct)
|
||||
}
|
||||
|
||||
fn quartiles(&self) -> (f64,f64,f64) {
|
||||
fn quartiles(&self) -> (f64, f64, f64) {
|
||||
let mut tmp = self.to_vec();
|
||||
local_sort(&mut tmp);
|
||||
let first = 25f64;
|
||||
@ -269,11 +269,11 @@ impl Stats for [f64] {
|
||||
let b = percentile_of_sorted(&tmp, secound);
|
||||
let third = 75f64;
|
||||
let c = percentile_of_sorted(&tmp, third);
|
||||
(a,b,c)
|
||||
(a, b, c)
|
||||
}
|
||||
|
||||
fn iqr(&self) -> f64 {
|
||||
let (a,_,c) = self.quartiles();
|
||||
let (a, _, c) = self.quartiles();
|
||||
c - a
|
||||
}
|
||||
}
|
||||
@ -299,7 +299,7 @@ fn percentile_of_sorted(sorted_samples: &[f64], pct: f64) -> f64 {
|
||||
let d = rank - lrank;
|
||||
let n = lrank as usize;
|
||||
let lo = sorted_samples[n];
|
||||
let hi = sorted_samples[n+1];
|
||||
let hi = sorted_samples[n + 1];
|
||||
lo + (hi - lo) * d
|
||||
}
|
||||
|
||||
@ -316,7 +316,7 @@ pub fn winsorize(samples: &mut [f64], pct: f64) {
|
||||
local_sort(&mut tmp);
|
||||
let lo = percentile_of_sorted(&tmp, pct);
|
||||
let hundred = 100 as f64;
|
||||
let hi = percentile_of_sorted(&tmp, hundred-pct);
|
||||
let hi = percentile_of_sorted(&tmp, hundred - pct);
|
||||
for samp in samples {
|
||||
if *samp > hi {
|
||||
*samp = hi
|
||||
@ -380,10 +380,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_norm2() {
|
||||
let val = &[
|
||||
958.0000000000,
|
||||
924.0000000000,
|
||||
];
|
||||
let val = &[958.0000000000, 924.0000000000];
|
||||
let summ = &Summary {
|
||||
sum: 1882.0000000000,
|
||||
min: 924.0000000000,
|
||||
@ -395,25 +392,23 @@ mod tests {
|
||||
std_dev_pct: 2.5549022912,
|
||||
median_abs_dev: 25.2042000000,
|
||||
median_abs_dev_pct: 2.6784484591,
|
||||
quartiles: (932.5000000000,941.0000000000,949.5000000000),
|
||||
quartiles: (932.5000000000, 941.0000000000, 949.5000000000),
|
||||
iqr: 17.0000000000,
|
||||
};
|
||||
check(val, summ);
|
||||
}
|
||||
#[test]
|
||||
fn test_norm10narrow() {
|
||||
let val = &[
|
||||
966.0000000000,
|
||||
985.0000000000,
|
||||
1110.0000000000,
|
||||
848.0000000000,
|
||||
821.0000000000,
|
||||
975.0000000000,
|
||||
962.0000000000,
|
||||
1157.0000000000,
|
||||
1217.0000000000,
|
||||
955.0000000000,
|
||||
];
|
||||
let val = &[966.0000000000,
|
||||
985.0000000000,
|
||||
1110.0000000000,
|
||||
848.0000000000,
|
||||
821.0000000000,
|
||||
975.0000000000,
|
||||
962.0000000000,
|
||||
1157.0000000000,
|
||||
1217.0000000000,
|
||||
955.0000000000];
|
||||
let summ = &Summary {
|
||||
sum: 9996.0000000000,
|
||||
min: 821.0000000000,
|
||||
@ -425,25 +420,23 @@ mod tests {
|
||||
std_dev_pct: 12.6742097933,
|
||||
median_abs_dev: 102.2994000000,
|
||||
median_abs_dev_pct: 10.5408964451,
|
||||
quartiles: (956.7500000000,970.5000000000,1078.7500000000),
|
||||
quartiles: (956.7500000000, 970.5000000000, 1078.7500000000),
|
||||
iqr: 122.0000000000,
|
||||
};
|
||||
check(val, summ);
|
||||
}
|
||||
#[test]
|
||||
fn test_norm10medium() {
|
||||
let val = &[
|
||||
954.0000000000,
|
||||
1064.0000000000,
|
||||
855.0000000000,
|
||||
1000.0000000000,
|
||||
743.0000000000,
|
||||
1084.0000000000,
|
||||
704.0000000000,
|
||||
1023.0000000000,
|
||||
357.0000000000,
|
||||
869.0000000000,
|
||||
];
|
||||
let val = &[954.0000000000,
|
||||
1064.0000000000,
|
||||
855.0000000000,
|
||||
1000.0000000000,
|
||||
743.0000000000,
|
||||
1084.0000000000,
|
||||
704.0000000000,
|
||||
1023.0000000000,
|
||||
357.0000000000,
|
||||
869.0000000000];
|
||||
let summ = &Summary {
|
||||
sum: 8653.0000000000,
|
||||
min: 357.0000000000,
|
||||
@ -455,25 +448,23 @@ mod tests {
|
||||
std_dev_pct: 25.4846418487,
|
||||
median_abs_dev: 195.7032000000,
|
||||
median_abs_dev_pct: 21.4704552935,
|
||||
quartiles: (771.0000000000,911.5000000000,1017.2500000000),
|
||||
quartiles: (771.0000000000, 911.5000000000, 1017.2500000000),
|
||||
iqr: 246.2500000000,
|
||||
};
|
||||
check(val, summ);
|
||||
}
|
||||
#[test]
|
||||
fn test_norm10wide() {
|
||||
let val = &[
|
||||
505.0000000000,
|
||||
497.0000000000,
|
||||
1591.0000000000,
|
||||
887.0000000000,
|
||||
1026.0000000000,
|
||||
136.0000000000,
|
||||
1580.0000000000,
|
||||
940.0000000000,
|
||||
754.0000000000,
|
||||
1433.0000000000,
|
||||
];
|
||||
let val = &[505.0000000000,
|
||||
497.0000000000,
|
||||
1591.0000000000,
|
||||
887.0000000000,
|
||||
1026.0000000000,
|
||||
136.0000000000,
|
||||
1580.0000000000,
|
||||
940.0000000000,
|
||||
754.0000000000,
|
||||
1433.0000000000];
|
||||
let summ = &Summary {
|
||||
sum: 9349.0000000000,
|
||||
min: 136.0000000000,
|
||||
@ -485,40 +476,38 @@ mod tests {
|
||||
std_dev_pct: 52.3146817750,
|
||||
median_abs_dev: 611.5725000000,
|
||||
median_abs_dev_pct: 66.9482758621,
|
||||
quartiles: (567.2500000000,913.5000000000,1331.2500000000),
|
||||
quartiles: (567.2500000000, 913.5000000000, 1331.2500000000),
|
||||
iqr: 764.0000000000,
|
||||
};
|
||||
check(val, summ);
|
||||
}
|
||||
#[test]
|
||||
fn test_norm25verynarrow() {
|
||||
let val = &[
|
||||
991.0000000000,
|
||||
1018.0000000000,
|
||||
998.0000000000,
|
||||
1013.0000000000,
|
||||
974.0000000000,
|
||||
1007.0000000000,
|
||||
1014.0000000000,
|
||||
999.0000000000,
|
||||
1011.0000000000,
|
||||
978.0000000000,
|
||||
985.0000000000,
|
||||
999.0000000000,
|
||||
983.0000000000,
|
||||
982.0000000000,
|
||||
1015.0000000000,
|
||||
1002.0000000000,
|
||||
977.0000000000,
|
||||
948.0000000000,
|
||||
1040.0000000000,
|
||||
974.0000000000,
|
||||
996.0000000000,
|
||||
989.0000000000,
|
||||
1015.0000000000,
|
||||
994.0000000000,
|
||||
1024.0000000000,
|
||||
];
|
||||
let val = &[991.0000000000,
|
||||
1018.0000000000,
|
||||
998.0000000000,
|
||||
1013.0000000000,
|
||||
974.0000000000,
|
||||
1007.0000000000,
|
||||
1014.0000000000,
|
||||
999.0000000000,
|
||||
1011.0000000000,
|
||||
978.0000000000,
|
||||
985.0000000000,
|
||||
999.0000000000,
|
||||
983.0000000000,
|
||||
982.0000000000,
|
||||
1015.0000000000,
|
||||
1002.0000000000,
|
||||
977.0000000000,
|
||||
948.0000000000,
|
||||
1040.0000000000,
|
||||
974.0000000000,
|
||||
996.0000000000,
|
||||
989.0000000000,
|
||||
1015.0000000000,
|
||||
994.0000000000,
|
||||
1024.0000000000];
|
||||
let summ = &Summary {
|
||||
sum: 24926.0000000000,
|
||||
min: 948.0000000000,
|
||||
@ -530,25 +519,23 @@ mod tests {
|
||||
std_dev_pct: 1.9888308788,
|
||||
median_abs_dev: 22.2390000000,
|
||||
median_abs_dev_pct: 2.2283567134,
|
||||
quartiles: (983.0000000000,998.0000000000,1013.0000000000),
|
||||
quartiles: (983.0000000000, 998.0000000000, 1013.0000000000),
|
||||
iqr: 30.0000000000,
|
||||
};
|
||||
check(val, summ);
|
||||
}
|
||||
#[test]
|
||||
fn test_exp10a() {
|
||||
let val = &[
|
||||
23.0000000000,
|
||||
11.0000000000,
|
||||
2.0000000000,
|
||||
57.0000000000,
|
||||
4.0000000000,
|
||||
12.0000000000,
|
||||
5.0000000000,
|
||||
29.0000000000,
|
||||
3.0000000000,
|
||||
21.0000000000,
|
||||
];
|
||||
let val = &[23.0000000000,
|
||||
11.0000000000,
|
||||
2.0000000000,
|
||||
57.0000000000,
|
||||
4.0000000000,
|
||||
12.0000000000,
|
||||
5.0000000000,
|
||||
29.0000000000,
|
||||
3.0000000000,
|
||||
21.0000000000];
|
||||
let summ = &Summary {
|
||||
sum: 167.0000000000,
|
||||
min: 2.0000000000,
|
||||
@ -560,25 +547,23 @@ mod tests {
|
||||
std_dev_pct: 101.5828843560,
|
||||
median_abs_dev: 13.3434000000,
|
||||
median_abs_dev_pct: 116.0295652174,
|
||||
quartiles: (4.2500000000,11.5000000000,22.5000000000),
|
||||
quartiles: (4.2500000000, 11.5000000000, 22.5000000000),
|
||||
iqr: 18.2500000000,
|
||||
};
|
||||
check(val, summ);
|
||||
}
|
||||
#[test]
|
||||
fn test_exp10b() {
|
||||
let val = &[
|
||||
24.0000000000,
|
||||
17.0000000000,
|
||||
6.0000000000,
|
||||
38.0000000000,
|
||||
25.0000000000,
|
||||
7.0000000000,
|
||||
51.0000000000,
|
||||
2.0000000000,
|
||||
61.0000000000,
|
||||
32.0000000000,
|
||||
];
|
||||
let val = &[24.0000000000,
|
||||
17.0000000000,
|
||||
6.0000000000,
|
||||
38.0000000000,
|
||||
25.0000000000,
|
||||
7.0000000000,
|
||||
51.0000000000,
|
||||
2.0000000000,
|
||||
61.0000000000,
|
||||
32.0000000000];
|
||||
let summ = &Summary {
|
||||
sum: 263.0000000000,
|
||||
min: 2.0000000000,
|
||||
@ -590,25 +575,23 @@ mod tests {
|
||||
std_dev_pct: 74.4671410520,
|
||||
median_abs_dev: 22.9803000000,
|
||||
median_abs_dev_pct: 93.7971428571,
|
||||
quartiles: (9.5000000000,24.5000000000,36.5000000000),
|
||||
quartiles: (9.5000000000, 24.5000000000, 36.5000000000),
|
||||
iqr: 27.0000000000,
|
||||
};
|
||||
check(val, summ);
|
||||
}
|
||||
#[test]
|
||||
fn test_exp10c() {
|
||||
let val = &[
|
||||
71.0000000000,
|
||||
2.0000000000,
|
||||
32.0000000000,
|
||||
1.0000000000,
|
||||
6.0000000000,
|
||||
28.0000000000,
|
||||
13.0000000000,
|
||||
37.0000000000,
|
||||
16.0000000000,
|
||||
36.0000000000,
|
||||
];
|
||||
let val = &[71.0000000000,
|
||||
2.0000000000,
|
||||
32.0000000000,
|
||||
1.0000000000,
|
||||
6.0000000000,
|
||||
28.0000000000,
|
||||
13.0000000000,
|
||||
37.0000000000,
|
||||
16.0000000000,
|
||||
36.0000000000];
|
||||
let summ = &Summary {
|
||||
sum: 242.0000000000,
|
||||
min: 1.0000000000,
|
||||
@ -620,40 +603,38 @@ mod tests {
|
||||
std_dev_pct: 88.4507754589,
|
||||
median_abs_dev: 21.4977000000,
|
||||
median_abs_dev_pct: 97.7168181818,
|
||||
quartiles: (7.7500000000,22.0000000000,35.0000000000),
|
||||
quartiles: (7.7500000000, 22.0000000000, 35.0000000000),
|
||||
iqr: 27.2500000000,
|
||||
};
|
||||
check(val, summ);
|
||||
}
|
||||
#[test]
|
||||
fn test_exp25() {
|
||||
let val = &[
|
||||
3.0000000000,
|
||||
24.0000000000,
|
||||
1.0000000000,
|
||||
19.0000000000,
|
||||
7.0000000000,
|
||||
5.0000000000,
|
||||
30.0000000000,
|
||||
39.0000000000,
|
||||
31.0000000000,
|
||||
13.0000000000,
|
||||
25.0000000000,
|
||||
48.0000000000,
|
||||
1.0000000000,
|
||||
6.0000000000,
|
||||
42.0000000000,
|
||||
63.0000000000,
|
||||
2.0000000000,
|
||||
12.0000000000,
|
||||
108.0000000000,
|
||||
26.0000000000,
|
||||
1.0000000000,
|
||||
7.0000000000,
|
||||
44.0000000000,
|
||||
25.0000000000,
|
||||
11.0000000000,
|
||||
];
|
||||
let val = &[3.0000000000,
|
||||
24.0000000000,
|
||||
1.0000000000,
|
||||
19.0000000000,
|
||||
7.0000000000,
|
||||
5.0000000000,
|
||||
30.0000000000,
|
||||
39.0000000000,
|
||||
31.0000000000,
|
||||
13.0000000000,
|
||||
25.0000000000,
|
||||
48.0000000000,
|
||||
1.0000000000,
|
||||
6.0000000000,
|
||||
42.0000000000,
|
||||
63.0000000000,
|
||||
2.0000000000,
|
||||
12.0000000000,
|
||||
108.0000000000,
|
||||
26.0000000000,
|
||||
1.0000000000,
|
||||
7.0000000000,
|
||||
44.0000000000,
|
||||
25.0000000000,
|
||||
11.0000000000];
|
||||
let summ = &Summary {
|
||||
sum: 593.0000000000,
|
||||
min: 1.0000000000,
|
||||
@ -665,40 +646,38 @@ mod tests {
|
||||
std_dev_pct: 103.3565983562,
|
||||
median_abs_dev: 19.2738000000,
|
||||
median_abs_dev_pct: 101.4410526316,
|
||||
quartiles: (6.0000000000,19.0000000000,31.0000000000),
|
||||
quartiles: (6.0000000000, 19.0000000000, 31.0000000000),
|
||||
iqr: 25.0000000000,
|
||||
};
|
||||
check(val, summ);
|
||||
}
|
||||
#[test]
|
||||
fn test_binom25() {
|
||||
let val = &[
|
||||
18.0000000000,
|
||||
17.0000000000,
|
||||
27.0000000000,
|
||||
15.0000000000,
|
||||
21.0000000000,
|
||||
25.0000000000,
|
||||
17.0000000000,
|
||||
24.0000000000,
|
||||
25.0000000000,
|
||||
24.0000000000,
|
||||
26.0000000000,
|
||||
26.0000000000,
|
||||
23.0000000000,
|
||||
15.0000000000,
|
||||
23.0000000000,
|
||||
17.0000000000,
|
||||
18.0000000000,
|
||||
18.0000000000,
|
||||
21.0000000000,
|
||||
16.0000000000,
|
||||
15.0000000000,
|
||||
31.0000000000,
|
||||
20.0000000000,
|
||||
17.0000000000,
|
||||
15.0000000000,
|
||||
];
|
||||
let val = &[18.0000000000,
|
||||
17.0000000000,
|
||||
27.0000000000,
|
||||
15.0000000000,
|
||||
21.0000000000,
|
||||
25.0000000000,
|
||||
17.0000000000,
|
||||
24.0000000000,
|
||||
25.0000000000,
|
||||
24.0000000000,
|
||||
26.0000000000,
|
||||
26.0000000000,
|
||||
23.0000000000,
|
||||
15.0000000000,
|
||||
23.0000000000,
|
||||
17.0000000000,
|
||||
18.0000000000,
|
||||
18.0000000000,
|
||||
21.0000000000,
|
||||
16.0000000000,
|
||||
15.0000000000,
|
||||
31.0000000000,
|
||||
20.0000000000,
|
||||
17.0000000000,
|
||||
15.0000000000];
|
||||
let summ = &Summary {
|
||||
sum: 514.0000000000,
|
||||
min: 15.0000000000,
|
||||
@ -710,40 +689,38 @@ mod tests {
|
||||
std_dev_pct: 22.2037202539,
|
||||
median_abs_dev: 5.9304000000,
|
||||
median_abs_dev_pct: 29.6520000000,
|
||||
quartiles: (17.0000000000,20.0000000000,24.0000000000),
|
||||
quartiles: (17.0000000000, 20.0000000000, 24.0000000000),
|
||||
iqr: 7.0000000000,
|
||||
};
|
||||
check(val, summ);
|
||||
}
|
||||
#[test]
|
||||
fn test_pois25lambda30() {
|
||||
let val = &[
|
||||
27.0000000000,
|
||||
33.0000000000,
|
||||
34.0000000000,
|
||||
34.0000000000,
|
||||
24.0000000000,
|
||||
39.0000000000,
|
||||
28.0000000000,
|
||||
27.0000000000,
|
||||
31.0000000000,
|
||||
28.0000000000,
|
||||
38.0000000000,
|
||||
21.0000000000,
|
||||
33.0000000000,
|
||||
36.0000000000,
|
||||
29.0000000000,
|
||||
37.0000000000,
|
||||
32.0000000000,
|
||||
34.0000000000,
|
||||
31.0000000000,
|
||||
39.0000000000,
|
||||
25.0000000000,
|
||||
31.0000000000,
|
||||
32.0000000000,
|
||||
40.0000000000,
|
||||
24.0000000000,
|
||||
];
|
||||
let val = &[27.0000000000,
|
||||
33.0000000000,
|
||||
34.0000000000,
|
||||
34.0000000000,
|
||||
24.0000000000,
|
||||
39.0000000000,
|
||||
28.0000000000,
|
||||
27.0000000000,
|
||||
31.0000000000,
|
||||
28.0000000000,
|
||||
38.0000000000,
|
||||
21.0000000000,
|
||||
33.0000000000,
|
||||
36.0000000000,
|
||||
29.0000000000,
|
||||
37.0000000000,
|
||||
32.0000000000,
|
||||
34.0000000000,
|
||||
31.0000000000,
|
||||
39.0000000000,
|
||||
25.0000000000,
|
||||
31.0000000000,
|
||||
32.0000000000,
|
||||
40.0000000000,
|
||||
24.0000000000];
|
||||
let summ = &Summary {
|
||||
sum: 787.0000000000,
|
||||
min: 21.0000000000,
|
||||
@ -755,40 +732,38 @@ mod tests {
|
||||
std_dev_pct: 16.3814245145,
|
||||
median_abs_dev: 5.9304000000,
|
||||
median_abs_dev_pct: 18.5325000000,
|
||||
quartiles: (28.0000000000,32.0000000000,34.0000000000),
|
||||
quartiles: (28.0000000000, 32.0000000000, 34.0000000000),
|
||||
iqr: 6.0000000000,
|
||||
};
|
||||
check(val, summ);
|
||||
}
|
||||
#[test]
|
||||
fn test_pois25lambda40() {
|
||||
let val = &[
|
||||
42.0000000000,
|
||||
50.0000000000,
|
||||
42.0000000000,
|
||||
46.0000000000,
|
||||
34.0000000000,
|
||||
45.0000000000,
|
||||
34.0000000000,
|
||||
49.0000000000,
|
||||
39.0000000000,
|
||||
28.0000000000,
|
||||
40.0000000000,
|
||||
35.0000000000,
|
||||
37.0000000000,
|
||||
39.0000000000,
|
||||
46.0000000000,
|
||||
44.0000000000,
|
||||
32.0000000000,
|
||||
45.0000000000,
|
||||
42.0000000000,
|
||||
37.0000000000,
|
||||
48.0000000000,
|
||||
42.0000000000,
|
||||
33.0000000000,
|
||||
42.0000000000,
|
||||
48.0000000000,
|
||||
];
|
||||
let val = &[42.0000000000,
|
||||
50.0000000000,
|
||||
42.0000000000,
|
||||
46.0000000000,
|
||||
34.0000000000,
|
||||
45.0000000000,
|
||||
34.0000000000,
|
||||
49.0000000000,
|
||||
39.0000000000,
|
||||
28.0000000000,
|
||||
40.0000000000,
|
||||
35.0000000000,
|
||||
37.0000000000,
|
||||
39.0000000000,
|
||||
46.0000000000,
|
||||
44.0000000000,
|
||||
32.0000000000,
|
||||
45.0000000000,
|
||||
42.0000000000,
|
||||
37.0000000000,
|
||||
48.0000000000,
|
||||
42.0000000000,
|
||||
33.0000000000,
|
||||
42.0000000000,
|
||||
48.0000000000];
|
||||
let summ = &Summary {
|
||||
sum: 1019.0000000000,
|
||||
min: 28.0000000000,
|
||||
@ -800,40 +775,38 @@ mod tests {
|
||||
std_dev_pct: 14.3978417577,
|
||||
median_abs_dev: 5.9304000000,
|
||||
median_abs_dev_pct: 14.1200000000,
|
||||
quartiles: (37.0000000000,42.0000000000,45.0000000000),
|
||||
quartiles: (37.0000000000, 42.0000000000, 45.0000000000),
|
||||
iqr: 8.0000000000,
|
||||
};
|
||||
check(val, summ);
|
||||
}
|
||||
#[test]
|
||||
fn test_pois25lambda50() {
|
||||
let val = &[
|
||||
45.0000000000,
|
||||
43.0000000000,
|
||||
44.0000000000,
|
||||
61.0000000000,
|
||||
51.0000000000,
|
||||
53.0000000000,
|
||||
59.0000000000,
|
||||
52.0000000000,
|
||||
49.0000000000,
|
||||
51.0000000000,
|
||||
51.0000000000,
|
||||
50.0000000000,
|
||||
49.0000000000,
|
||||
56.0000000000,
|
||||
42.0000000000,
|
||||
52.0000000000,
|
||||
51.0000000000,
|
||||
43.0000000000,
|
||||
48.0000000000,
|
||||
48.0000000000,
|
||||
50.0000000000,
|
||||
42.0000000000,
|
||||
43.0000000000,
|
||||
42.0000000000,
|
||||
60.0000000000,
|
||||
];
|
||||
let val = &[45.0000000000,
|
||||
43.0000000000,
|
||||
44.0000000000,
|
||||
61.0000000000,
|
||||
51.0000000000,
|
||||
53.0000000000,
|
||||
59.0000000000,
|
||||
52.0000000000,
|
||||
49.0000000000,
|
||||
51.0000000000,
|
||||
51.0000000000,
|
||||
50.0000000000,
|
||||
49.0000000000,
|
||||
56.0000000000,
|
||||
42.0000000000,
|
||||
52.0000000000,
|
||||
51.0000000000,
|
||||
43.0000000000,
|
||||
48.0000000000,
|
||||
48.0000000000,
|
||||
50.0000000000,
|
||||
42.0000000000,
|
||||
43.0000000000,
|
||||
42.0000000000,
|
||||
60.0000000000];
|
||||
let summ = &Summary {
|
||||
sum: 1235.0000000000,
|
||||
min: 42.0000000000,
|
||||
@ -845,40 +818,38 @@ mod tests {
|
||||
std_dev_pct: 11.3913245723,
|
||||
median_abs_dev: 4.4478000000,
|
||||
median_abs_dev_pct: 8.8956000000,
|
||||
quartiles: (44.0000000000,50.0000000000,52.0000000000),
|
||||
quartiles: (44.0000000000, 50.0000000000, 52.0000000000),
|
||||
iqr: 8.0000000000,
|
||||
};
|
||||
check(val, summ);
|
||||
}
|
||||
#[test]
|
||||
fn test_unif25() {
|
||||
let val = &[
|
||||
99.0000000000,
|
||||
55.0000000000,
|
||||
92.0000000000,
|
||||
79.0000000000,
|
||||
14.0000000000,
|
||||
2.0000000000,
|
||||
33.0000000000,
|
||||
49.0000000000,
|
||||
3.0000000000,
|
||||
32.0000000000,
|
||||
84.0000000000,
|
||||
59.0000000000,
|
||||
22.0000000000,
|
||||
86.0000000000,
|
||||
76.0000000000,
|
||||
31.0000000000,
|
||||
29.0000000000,
|
||||
11.0000000000,
|
||||
41.0000000000,
|
||||
53.0000000000,
|
||||
45.0000000000,
|
||||
44.0000000000,
|
||||
98.0000000000,
|
||||
98.0000000000,
|
||||
7.0000000000,
|
||||
];
|
||||
let val = &[99.0000000000,
|
||||
55.0000000000,
|
||||
92.0000000000,
|
||||
79.0000000000,
|
||||
14.0000000000,
|
||||
2.0000000000,
|
||||
33.0000000000,
|
||||
49.0000000000,
|
||||
3.0000000000,
|
||||
32.0000000000,
|
||||
84.0000000000,
|
||||
59.0000000000,
|
||||
22.0000000000,
|
||||
86.0000000000,
|
||||
76.0000000000,
|
||||
31.0000000000,
|
||||
29.0000000000,
|
||||
11.0000000000,
|
||||
41.0000000000,
|
||||
53.0000000000,
|
||||
45.0000000000,
|
||||
44.0000000000,
|
||||
98.0000000000,
|
||||
98.0000000000,
|
||||
7.0000000000];
|
||||
let summ = &Summary {
|
||||
sum: 1242.0000000000,
|
||||
min: 2.0000000000,
|
||||
@ -890,7 +861,7 @@ mod tests {
|
||||
std_dev_pct: 64.1488719719,
|
||||
median_abs_dev: 45.9606000000,
|
||||
median_abs_dev_pct: 102.1346666667,
|
||||
quartiles: (29.0000000000,45.0000000000,79.0000000000),
|
||||
quartiles: (29.0000000000, 45.0000000000, 79.0000000000),
|
||||
iqr: 50.0000000000,
|
||||
};
|
||||
check(val, summ);
|
||||
@ -920,7 +891,7 @@ mod bench {
|
||||
#[bench]
|
||||
pub fn sum_many_f64(b: &mut Bencher) {
|
||||
let nums = [-1e30f64, 1e60, 1e30, 1.0, -1e60];
|
||||
let v = (0..500).map(|i| nums[i%5]).collect::<Vec<_>>();
|
||||
let v = (0..500).map(|i| nums[i % 5]).collect::<Vec<_>>();
|
||||
|
||||
b.iter(|| {
|
||||
v.sum();
|
||||
|
Loading…
x
Reference in New Issue
Block a user