2011-07-09 18:08:03 -05:00
|
|
|
// Support code for rustc's built in test runner generator. Currently,
|
|
|
|
// none of this is meant for users. It is intended to support the
|
|
|
|
// simplest interface possible for representing and running tests
|
|
|
|
// while providing a base that other test frameworks may build off of.
|
|
|
|
|
2012-01-11 08:15:54 -06:00
|
|
|
import result::{ok, err};
|
2012-01-13 14:38:29 -06:00
|
|
|
import io::writer_util;
|
2012-01-16 04:21:01 -06:00
|
|
|
import core::ctypes;
|
2011-07-16 19:04:20 -05:00
|
|
|
|
2011-07-09 18:08:03 -05:00
|
|
|
export test_name;
|
|
|
|
export test_fn;
|
2011-10-13 18:42:43 -05:00
|
|
|
export default_test_fn;
|
2011-07-09 18:08:03 -05:00
|
|
|
export test_desc;
|
|
|
|
export test_main;
|
2011-07-14 13:29:54 -05:00
|
|
|
export test_result;
|
2011-07-14 18:05:33 -05:00
|
|
|
export test_opts;
|
2011-07-14 13:29:54 -05:00
|
|
|
export tr_ok;
|
|
|
|
export tr_failed;
|
|
|
|
export tr_ignored;
|
The Big Test Suite Overhaul
This replaces the make-based test runner with a set of Rust-based test
runners. I believe that all existing functionality has been
preserved. The primary objective is to dogfood the Rust test
framework.
A few main things happen here:
1) The run-pass/lib-* tests are all moved into src/test/stdtest. This
is a standalone test crate intended for all standard library tests. It
compiles to build/test/stdtest.stageN.
2) rustc now compiles into yet another build artifact, this one a test
runner that runs any tests contained directly in the rustc crate. This
allows much more fine-grained unit testing of the compiler. It
compiles to build/test/rustctest.stageN.
3) There is a new custom test runner crate at src/test/compiletest
that reproduces all the functionality for running the compile-fail,
run-fail, run-pass and bench tests while integrating with Rust's test
framework. It compiles to build/test/compiletest.stageN.
4) The build rules have been completely changed to use the new test
runners, while also being less redundant, following the example of the
recent stageN.mk rewrite.
It adds two new features to the cfail/rfail/rpass/bench tests:
1) Tests can specify multiple 'error-pattern' directives which must be
satisfied in order.
2) Tests can specify a 'compile-flags' directive which will make the
test runner provide additional command line arguments to rustc.
There are some downsides, the primary being that Rust has to be
functioning pretty well just to run _any_ tests, which I imagine will
be the source of some frustration when the entire test suite
breaks. Will also cause some headaches during porting.
Not having individual make rules, each rpass, etc test no longer
remembers between runs whether it completed successfully. As a result,
it's not possible to incrementally fix multiple tests by just running
'make check', fixing a test, and repeating without re-running all the
tests contained in the test runner. Instead you can filter just the
tests you want to run by using the TESTNAME environment variable.
This also dispenses with the ability to run stage0 tests, but they
tended to be broken more often than not anyway.
2011-07-12 21:01:09 -05:00
|
|
|
export run_tests_console;
|
2011-07-26 20:34:29 -05:00
|
|
|
export run_tests_console_;
|
2011-07-14 13:29:54 -05:00
|
|
|
export run_test;
|
2011-07-14 18:05:33 -05:00
|
|
|
export filter_tests;
|
|
|
|
export parse_opts;
|
2011-07-26 20:34:29 -05:00
|
|
|
export test_to_task;
|
|
|
|
export default_test_to_task;
|
2011-07-27 20:30:57 -05:00
|
|
|
export configure_test_task;
|
2011-08-17 17:33:29 -05:00
|
|
|
export joinable;
|
2011-07-09 18:08:03 -05:00
|
|
|
|
2011-11-16 22:49:38 -06:00
|
|
|
#[abi = "cdecl"]
|
|
|
|
native mod rustrt {
|
2012-01-16 04:21:01 -06:00
|
|
|
fn sched_threads() -> ctypes::size_t;
|
2011-07-25 20:07:25 -05:00
|
|
|
}
|
|
|
|
|
2011-07-09 18:08:03 -05:00
|
|
|
// The name of a test. By convention this follows the rules for rust
|
2011-09-26 12:51:23 -05:00
|
|
|
// paths; i.e. it should be a series of identifiers seperated by double
|
2011-07-09 18:08:03 -05:00
|
|
|
// colons. This way if some test runner wants to arrange the tests
|
2011-09-26 12:51:23 -05:00
|
|
|
// hierarchically it may.
|
2011-09-02 17:34:58 -05:00
|
|
|
type test_name = str;
|
2011-07-09 18:08:03 -05:00
|
|
|
|
|
|
|
// A function that runs a test. If the function returns successfully,
|
|
|
|
// the test succeeds; if the function fails then the test fails. We
|
|
|
|
// may need to come up with a more clever definition of test in order
|
|
|
|
// to support isolation of tests into tasks.
|
2011-10-25 08:56:55 -05:00
|
|
|
type test_fn<T> = T;
|
2011-10-13 18:42:43 -05:00
|
|
|
|
2012-01-12 17:38:44 -06:00
|
|
|
type default_test_fn = test_fn<fn~()>;
|
2011-07-09 18:08:03 -05:00
|
|
|
|
|
|
|
// The definition of a single test. A test runner will run a list of
|
|
|
|
// these.
|
2011-10-25 08:56:55 -05:00
|
|
|
type test_desc<T> = {
|
2011-10-13 18:42:43 -05:00
|
|
|
name: test_name,
|
|
|
|
fn: test_fn<T>,
|
2011-11-01 12:31:23 -05:00
|
|
|
ignore: bool,
|
|
|
|
should_fail: bool
|
2011-10-13 18:42:43 -05:00
|
|
|
};
|
2011-07-09 18:08:03 -05:00
|
|
|
|
|
|
|
// The default console test runner. It accepts the command line
|
|
|
|
// arguments and a vector of test_descs (generated at compile time).
|
2011-10-13 18:42:43 -05:00
|
|
|
fn test_main(args: [str], tests: [test_desc<default_test_fn>]) {
|
2011-08-15 18:38:23 -05:00
|
|
|
check (vec::is_not_empty(args));
|
2011-07-27 07:19:39 -05:00
|
|
|
let opts =
|
2011-08-14 00:34:03 -05:00
|
|
|
alt parse_opts(args) {
|
2011-07-27 07:19:39 -05:00
|
|
|
either::left(o) { o }
|
|
|
|
either::right(m) { fail m }
|
|
|
|
};
|
|
|
|
if !run_tests_console(opts, tests) { fail "Some tests failed"; }
|
2011-07-09 18:08:03 -05:00
|
|
|
}
|
|
|
|
|
2011-09-02 17:34:58 -05:00
|
|
|
type test_opts = {filter: option::t<str>, run_ignored: bool};
|
2011-07-14 18:05:33 -05:00
|
|
|
|
2011-09-02 17:34:58 -05:00
|
|
|
type opt_res = either::t<test_opts, str>;
|
2011-07-14 18:05:33 -05:00
|
|
|
|
|
|
|
// Parses command line arguments into test options
|
2011-09-12 04:27:30 -05:00
|
|
|
fn parse_opts(args: [str]) : vec::is_not_empty(args) -> opt_res {
|
2011-07-14 18:05:33 -05:00
|
|
|
|
2011-08-31 18:56:38 -05:00
|
|
|
let args_ = vec::tail(args);
|
2011-09-02 17:34:58 -05:00
|
|
|
let opts = [getopts::optflag("ignored")];
|
2011-07-27 07:19:39 -05:00
|
|
|
let match =
|
2011-08-12 01:27:32 -05:00
|
|
|
alt getopts::getopts(args_, opts) {
|
2011-12-15 18:11:29 -06:00
|
|
|
ok(m) { m }
|
|
|
|
err(f) { ret either::right(getopts::fail_str(f)) }
|
2011-07-27 07:19:39 -05:00
|
|
|
};
|
|
|
|
|
|
|
|
let filter =
|
2011-08-15 18:38:23 -05:00
|
|
|
if vec::len(match.free) > 0u {
|
2011-08-31 18:56:38 -05:00
|
|
|
option::some(match.free[0])
|
2011-07-27 07:19:39 -05:00
|
|
|
} else { option::none };
|
2011-07-14 18:05:33 -05:00
|
|
|
|
2011-09-02 17:34:58 -05:00
|
|
|
let run_ignored = getopts::opt_present(match, "ignored");
|
2011-07-14 18:05:33 -05:00
|
|
|
|
2011-07-27 07:19:39 -05:00
|
|
|
let test_opts = {filter: filter, run_ignored: run_ignored};
|
2011-07-11 18:33:21 -05:00
|
|
|
|
2011-07-14 18:05:33 -05:00
|
|
|
ret either::left(test_opts);
|
2011-07-11 18:33:21 -05:00
|
|
|
}
|
|
|
|
|
2011-07-27 07:19:39 -05:00
|
|
|
tag test_result { tr_ok; tr_failed; tr_ignored; }
|
2011-07-14 13:29:54 -05:00
|
|
|
|
2012-01-11 08:15:54 -06:00
|
|
|
type joinable = (task::task, comm::port<task::task_notification>);
|
2011-08-17 17:33:29 -05:00
|
|
|
|
2011-07-26 20:34:29 -05:00
|
|
|
// To get isolation and concurrency tests have to be run in their own tasks.
|
2011-09-26 12:51:23 -05:00
|
|
|
// In cases where test functions are closures it is not ok to just dump them
|
2011-07-26 20:34:29 -05:00
|
|
|
// into a task and run them, so this transformation gives the caller a chance
|
|
|
|
// to create the test task.
|
2011-10-25 08:56:55 -05:00
|
|
|
type test_to_task<T> = fn@(test_fn<T>) -> joinable;
|
2011-07-26 20:34:29 -05:00
|
|
|
|
2011-07-11 18:33:21 -05:00
|
|
|
// A simple console test runner
|
2011-10-13 18:42:43 -05:00
|
|
|
fn run_tests_console(opts: test_opts,
|
|
|
|
tests: [test_desc<default_test_fn>]) -> bool {
|
2011-07-26 20:34:29 -05:00
|
|
|
run_tests_console_(opts, tests, default_test_to_task)
|
|
|
|
}
|
|
|
|
|
2012-01-05 08:35:37 -06:00
|
|
|
fn run_tests_console_<T: copy>(opts: test_opts, tests: [test_desc<T>],
|
2011-11-18 05:39:20 -06:00
|
|
|
to_task: test_to_task<T>) -> bool {
|
2011-07-11 18:33:21 -05:00
|
|
|
|
2011-08-19 17:16:48 -05:00
|
|
|
type test_state =
|
|
|
|
@{out: io::writer,
|
|
|
|
use_color: bool,
|
|
|
|
mutable total: uint,
|
|
|
|
mutable passed: uint,
|
|
|
|
mutable failed: uint,
|
|
|
|
mutable ignored: uint,
|
2011-10-13 18:42:43 -05:00
|
|
|
mutable failures: [test_desc<T>]};
|
2011-07-29 21:54:05 -05:00
|
|
|
|
2012-01-05 08:35:37 -06:00
|
|
|
fn callback<T: copy>(event: testevent<T>, st: test_state) {
|
2011-07-29 21:54:05 -05:00
|
|
|
alt event {
|
|
|
|
te_filtered(filtered_tests) {
|
2011-08-15 18:38:23 -05:00
|
|
|
st.total = vec::len(filtered_tests);
|
2011-09-02 17:34:58 -05:00
|
|
|
st.out.write_line(#fmt["\nrunning %u tests", st.total]);
|
2011-08-01 13:26:29 -05:00
|
|
|
}
|
2011-09-02 17:34:58 -05:00
|
|
|
te_wait(test) { st.out.write_str(#fmt["test %s ... ", test.name]); }
|
2011-08-01 13:26:29 -05:00
|
|
|
te_result(test, result) {
|
2011-07-29 21:54:05 -05:00
|
|
|
alt result {
|
2012-01-19 00:37:22 -06:00
|
|
|
tr_ok {
|
2011-07-29 21:54:05 -05:00
|
|
|
st.passed += 1u;
|
|
|
|
write_ok(st.out, st.use_color);
|
2011-09-02 17:34:58 -05:00
|
|
|
st.out.write_line("");
|
2011-07-29 21:54:05 -05:00
|
|
|
}
|
2012-01-19 00:37:22 -06:00
|
|
|
tr_failed {
|
2011-07-29 21:54:05 -05:00
|
|
|
st.failed += 1u;
|
|
|
|
write_failed(st.out, st.use_color);
|
2011-09-02 17:34:58 -05:00
|
|
|
st.out.write_line("");
|
2011-08-19 17:16:48 -05:00
|
|
|
st.failures += [test];
|
2011-07-29 21:54:05 -05:00
|
|
|
}
|
2012-01-19 00:37:22 -06:00
|
|
|
tr_ignored {
|
2011-07-29 21:54:05 -05:00
|
|
|
st.ignored += 1u;
|
|
|
|
write_ignored(st.out, st.use_color);
|
2011-09-02 17:34:58 -05:00
|
|
|
st.out.write_line("");
|
2011-07-29 21:54:05 -05:00
|
|
|
}
|
|
|
|
}
|
2011-07-27 07:19:39 -05:00
|
|
|
}
|
2011-07-11 13:19:32 -05:00
|
|
|
}
|
2011-07-09 18:08:03 -05:00
|
|
|
}
|
|
|
|
|
2011-08-19 17:16:48 -05:00
|
|
|
let st =
|
|
|
|
@{out: io::stdout(),
|
|
|
|
use_color: use_color(),
|
|
|
|
mutable total: 0u,
|
|
|
|
mutable passed: 0u,
|
|
|
|
mutable failed: 0u,
|
|
|
|
mutable ignored: 0u,
|
|
|
|
mutable failures: []};
|
2011-07-29 21:54:05 -05:00
|
|
|
|
2011-08-19 17:16:48 -05:00
|
|
|
run_tests(opts, tests, to_task, bind callback(_, st));
|
2011-07-29 21:54:05 -05:00
|
|
|
|
2011-08-19 17:16:48 -05:00
|
|
|
assert (st.passed + st.failed + st.ignored == st.total);
|
2011-07-29 21:54:05 -05:00
|
|
|
let success = st.failed == 0u;
|
2011-07-11 13:19:32 -05:00
|
|
|
|
2011-07-27 07:19:39 -05:00
|
|
|
if !success {
|
2011-09-02 17:34:58 -05:00
|
|
|
st.out.write_line("\nfailures:");
|
2011-10-13 18:42:43 -05:00
|
|
|
for test: test_desc<T> in st.failures {
|
2011-07-29 21:54:05 -05:00
|
|
|
let testname = test.name; // Satisfy alias analysis
|
2011-09-01 20:49:10 -05:00
|
|
|
st.out.write_line(#fmt[" %s", testname]);
|
2011-07-22 00:26:53 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-09-01 20:49:10 -05:00
|
|
|
st.out.write_str(#fmt["\nresult: "]);
|
2011-07-27 07:19:39 -05:00
|
|
|
if success {
|
2011-08-01 13:13:55 -05:00
|
|
|
// There's no parallelism at this point so it's safe to use color
|
|
|
|
write_ok(st.out, true);
|
|
|
|
} else { write_failed(st.out, true); }
|
2011-09-02 17:34:58 -05:00
|
|
|
st.out.write_str(#fmt[". %u passed; %u failed; %u ignored\n\n", st.passed,
|
2011-08-28 02:24:28 -05:00
|
|
|
st.failed, st.ignored]);
|
2011-07-11 13:19:32 -05:00
|
|
|
|
2011-07-19 23:28:45 -05:00
|
|
|
ret success;
|
2011-07-11 13:19:32 -05:00
|
|
|
|
2011-09-12 04:27:30 -05:00
|
|
|
fn write_ok(out: io::writer, use_color: bool) {
|
2011-09-02 17:34:58 -05:00
|
|
|
write_pretty(out, "ok", term::color_green, use_color);
|
2011-07-27 07:19:39 -05:00
|
|
|
}
|
2011-07-11 13:19:32 -05:00
|
|
|
|
2011-09-12 04:27:30 -05:00
|
|
|
fn write_failed(out: io::writer, use_color: bool) {
|
2011-09-02 17:34:58 -05:00
|
|
|
write_pretty(out, "FAILED", term::color_red, use_color);
|
2011-07-11 13:19:32 -05:00
|
|
|
}
|
2011-07-14 13:29:54 -05:00
|
|
|
|
2011-09-12 04:27:30 -05:00
|
|
|
fn write_ignored(out: io::writer, use_color: bool) {
|
2011-09-02 17:34:58 -05:00
|
|
|
write_pretty(out, "ignored", term::color_yellow, use_color);
|
2011-07-15 02:31:00 -05:00
|
|
|
}
|
|
|
|
|
2011-09-12 04:27:30 -05:00
|
|
|
fn write_pretty(out: io::writer, word: str, color: u8, use_color: bool) {
|
2011-08-12 00:55:08 -05:00
|
|
|
if use_color && term::color_supported() {
|
2012-01-11 08:15:54 -06:00
|
|
|
term::fg(out, color);
|
2011-07-14 13:29:54 -05:00
|
|
|
}
|
2011-09-01 01:46:44 -05:00
|
|
|
out.write_str(word);
|
2011-08-12 00:55:08 -05:00
|
|
|
if use_color && term::color_supported() {
|
2012-01-11 08:15:54 -06:00
|
|
|
term::reset(out);
|
2011-07-14 13:29:54 -05:00
|
|
|
}
|
|
|
|
}
|
2011-07-09 18:08:03 -05:00
|
|
|
}
|
|
|
|
|
2011-08-19 17:16:48 -05:00
|
|
|
fn use_color() -> bool { ret get_concurrency() == 1u; }
|
2011-07-29 21:54:05 -05:00
|
|
|
|
2011-10-25 08:56:55 -05:00
|
|
|
tag testevent<T> {
|
2011-10-13 18:42:43 -05:00
|
|
|
te_filtered([test_desc<T>]);
|
|
|
|
te_wait(test_desc<T>);
|
|
|
|
te_result(test_desc<T>, test_result);
|
2011-07-29 21:54:05 -05:00
|
|
|
}
|
|
|
|
|
2012-01-05 08:35:37 -06:00
|
|
|
fn run_tests<T: copy>(opts: test_opts, tests: [test_desc<T>],
|
2011-11-18 05:39:20 -06:00
|
|
|
to_task: test_to_task<T>,
|
|
|
|
callback: fn@(testevent<T>)) {
|
2011-07-29 21:54:05 -05:00
|
|
|
|
|
|
|
let filtered_tests = filter_tests(opts, tests);
|
|
|
|
callback(te_filtered(filtered_tests));
|
|
|
|
|
|
|
|
// It's tempting to just spawn all the tests at once but that doesn't
|
|
|
|
// provide a great user experience because you might sit waiting for the
|
|
|
|
// result of a particular test for an unusually long amount of time.
|
|
|
|
let concurrency = get_concurrency();
|
2011-12-22 16:42:52 -06:00
|
|
|
#debug("using %u test tasks", concurrency);
|
2011-08-15 18:38:23 -05:00
|
|
|
let total = vec::len(filtered_tests);
|
2011-07-29 21:54:05 -05:00
|
|
|
let run_idx = 0u;
|
|
|
|
let wait_idx = 0u;
|
2011-08-19 17:16:48 -05:00
|
|
|
let futures = [];
|
2011-07-29 21:54:05 -05:00
|
|
|
|
|
|
|
while wait_idx < total {
|
2011-08-15 18:38:23 -05:00
|
|
|
while vec::len(futures) < concurrency && run_idx < total {
|
2011-08-19 17:16:48 -05:00
|
|
|
futures += [run_test(filtered_tests[run_idx], to_task)];
|
2011-07-29 21:54:05 -05:00
|
|
|
run_idx += 1u;
|
|
|
|
}
|
|
|
|
|
2011-08-19 17:16:48 -05:00
|
|
|
let future = futures[0];
|
2011-08-01 13:26:29 -05:00
|
|
|
callback(te_wait(future.test));
|
2011-07-29 21:54:05 -05:00
|
|
|
let result = future.wait();
|
|
|
|
callback(te_result(future.test, result));
|
2011-08-15 18:38:23 -05:00
|
|
|
futures = vec::slice(futures, 1u, vec::len(futures));
|
2011-07-29 21:54:05 -05:00
|
|
|
wait_idx += 1u;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-25 20:07:25 -05:00
|
|
|
fn get_concurrency() -> uint { rustrt::sched_threads() }
|
2011-07-25 17:21:36 -05:00
|
|
|
|
2012-01-05 08:35:37 -06:00
|
|
|
fn filter_tests<T: copy>(opts: test_opts,
|
2011-11-18 05:39:20 -06:00
|
|
|
tests: [test_desc<T>]) -> [test_desc<T>] {
|
2011-07-27 07:19:39 -05:00
|
|
|
let filtered = tests;
|
2011-07-11 18:33:21 -05:00
|
|
|
|
2011-07-16 19:04:20 -05:00
|
|
|
// Remove tests that don't match the test filter
|
2011-10-13 18:42:43 -05:00
|
|
|
filtered = if option::is_none(opts.filter) {
|
|
|
|
filtered
|
|
|
|
} else {
|
|
|
|
let filter_str =
|
|
|
|
alt opts.filter {
|
|
|
|
option::some(f) { f }
|
2012-01-19 00:37:22 -06:00
|
|
|
option::none { "" }
|
2011-07-27 07:19:39 -05:00
|
|
|
};
|
2011-07-14 18:05:33 -05:00
|
|
|
|
2012-01-05 08:35:37 -06:00
|
|
|
fn filter_fn<T: copy>(test: test_desc<T>, filter_str: str) ->
|
2011-10-13 18:42:43 -05:00
|
|
|
option::t<test_desc<T>> {
|
|
|
|
if str::find(test.name, filter_str) >= 0 {
|
|
|
|
ret option::some(test);
|
|
|
|
} else { ret option::none; }
|
|
|
|
}
|
|
|
|
|
|
|
|
let filter = bind filter_fn(_, filter_str);
|
|
|
|
|
2011-12-16 08:27:50 -06:00
|
|
|
vec::filter_map(filtered, filter)
|
2011-10-13 18:42:43 -05:00
|
|
|
};
|
|
|
|
|
2011-07-16 19:04:20 -05:00
|
|
|
// Maybe pull out the ignored test and unignore them
|
2011-10-13 18:42:43 -05:00
|
|
|
filtered = if !opts.run_ignored {
|
|
|
|
filtered
|
|
|
|
} else {
|
2012-01-05 08:35:37 -06:00
|
|
|
fn filter<T: copy>(test: test_desc<T>) -> option::t<test_desc<T>> {
|
2011-10-13 18:42:43 -05:00
|
|
|
if test.ignore {
|
|
|
|
ret option::some({name: test.name,
|
|
|
|
fn: test.fn,
|
2011-11-01 12:31:23 -05:00
|
|
|
ignore: false,
|
|
|
|
should_fail: test.should_fail});
|
2011-10-13 18:42:43 -05:00
|
|
|
} else { ret option::none; }
|
2011-07-14 18:05:33 -05:00
|
|
|
};
|
|
|
|
|
2011-12-16 08:27:50 -06:00
|
|
|
vec::filter_map(filtered, bind filter(_))
|
2011-10-13 18:42:43 -05:00
|
|
|
};
|
|
|
|
|
2011-07-16 19:04:20 -05:00
|
|
|
// Sort the tests alphabetically
|
2011-07-27 07:19:39 -05:00
|
|
|
filtered =
|
|
|
|
{
|
2011-10-25 08:56:55 -05:00
|
|
|
fn lteq<T>(t1: test_desc<T>, t2: test_desc<T>) -> bool {
|
2011-09-01 19:27:58 -05:00
|
|
|
str::lteq(t1.name, t2.name)
|
2011-07-27 07:19:39 -05:00
|
|
|
}
|
2011-10-18 17:07:40 -05:00
|
|
|
sort::merge_sort(bind lteq(_, _), filtered)
|
2011-07-27 07:19:39 -05:00
|
|
|
};
|
2011-07-11 18:33:21 -05:00
|
|
|
|
2011-07-14 18:05:33 -05:00
|
|
|
ret filtered;
|
2011-07-11 18:33:21 -05:00
|
|
|
}
|
2011-07-09 18:08:03 -05:00
|
|
|
|
2011-10-25 08:56:55 -05:00
|
|
|
type test_future<T> = {test: test_desc<T>, wait: fn@() -> test_result};
|
2011-07-25 17:21:36 -05:00
|
|
|
|
2012-01-05 08:35:37 -06:00
|
|
|
fn run_test<T: copy>(test: test_desc<T>,
|
2011-11-18 05:39:20 -06:00
|
|
|
to_task: test_to_task<T>) -> test_future<T> {
|
2011-11-01 12:31:23 -05:00
|
|
|
if test.ignore {
|
2012-01-11 14:52:25 -06:00
|
|
|
ret {test: test, wait: fn@() -> test_result { tr_ignored }};
|
2011-11-01 12:31:23 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
let test_task = to_task(test.fn);
|
|
|
|
ret {test: test,
|
2012-01-11 14:52:25 -06:00
|
|
|
wait: fn@() -> test_result {
|
|
|
|
alt task::join(test_task) {
|
2012-01-19 00:37:22 -06:00
|
|
|
task::tr_success {
|
2012-01-11 14:52:25 -06:00
|
|
|
if test.should_fail { tr_failed }
|
|
|
|
else { tr_ok }
|
|
|
|
}
|
2012-01-19 00:37:22 -06:00
|
|
|
task::tr_failure {
|
2012-01-11 14:52:25 -06:00
|
|
|
if test.should_fail { tr_ok }
|
|
|
|
else { tr_failed }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
2011-07-14 13:29:54 -05:00
|
|
|
}
|
|
|
|
|
2011-07-15 00:24:19 -05:00
|
|
|
// We need to run our tests in another task in order to trap test failures.
|
2011-08-17 14:47:04 -05:00
|
|
|
// This function only works with functions that don't contain closures.
|
2011-10-13 18:42:43 -05:00
|
|
|
fn default_test_to_task(&&f: default_test_fn) -> joinable {
|
2012-01-12 17:38:44 -06:00
|
|
|
ret task::spawn_joinable(fn~[copy f]() {
|
2011-10-13 18:42:43 -05:00
|
|
|
configure_test_task();
|
|
|
|
f();
|
2011-12-30 22:46:08 -06:00
|
|
|
});
|
2011-07-15 00:24:19 -05:00
|
|
|
}
|
|
|
|
|
2011-07-27 20:30:57 -05:00
|
|
|
// Call from within a test task to make sure it's set up correctly
|
|
|
|
fn configure_test_task() {
|
|
|
|
// If this task fails we don't want that failure to propagate to the
|
|
|
|
// test runner or else we couldn't keep running tests
|
|
|
|
task::unsupervise();
|
|
|
|
}
|
2011-07-11 13:19:32 -05:00
|
|
|
|
2012-01-17 21:05:07 -06:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn do_not_run_ignored_tests() {
|
|
|
|
fn f() { fail; }
|
|
|
|
let desc = {
|
|
|
|
name: "whatever",
|
|
|
|
fn: f,
|
|
|
|
ignore: true,
|
|
|
|
should_fail: false
|
|
|
|
};
|
|
|
|
let future = test::run_test(desc, test::default_test_to_task);
|
|
|
|
let result = future.wait();
|
|
|
|
assert result != test::tr_ok;
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn ignored_tests_result_in_ignored() {
|
|
|
|
fn f() { }
|
|
|
|
let desc = {
|
|
|
|
name: "whatever",
|
|
|
|
fn: f,
|
|
|
|
ignore: true,
|
|
|
|
should_fail: false
|
|
|
|
};
|
|
|
|
let res = test::run_test(desc, test::default_test_to_task).wait();
|
|
|
|
assert (res == test::tr_ignored);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
#[ignore(cfg(target_os = "win32"))]
|
|
|
|
fn test_should_fail() {
|
|
|
|
fn f() { fail; }
|
|
|
|
let desc = {
|
|
|
|
name: "whatever",
|
|
|
|
fn: f,
|
|
|
|
ignore: false,
|
|
|
|
should_fail: true
|
|
|
|
};
|
|
|
|
let res = test::run_test(desc, test::default_test_to_task).wait();
|
|
|
|
assert res == test::tr_ok;
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_should_fail_but_succeeds() {
|
|
|
|
fn f() { }
|
|
|
|
let desc = {
|
|
|
|
name: "whatever",
|
|
|
|
fn: f,
|
|
|
|
ignore: false,
|
|
|
|
should_fail: true
|
|
|
|
};
|
|
|
|
let res = test::run_test(desc, test::default_test_to_task).wait();
|
|
|
|
assert res == test::tr_failed;
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn first_free_arg_should_be_a_filter() {
|
|
|
|
let args = ["progname", "filter"];
|
|
|
|
check (vec::is_not_empty(args));
|
|
|
|
let opts = alt test::parse_opts(args) { either::left(o) { o } };
|
|
|
|
assert (str::eq("filter", option::get(opts.filter)));
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn parse_ignored_flag() {
|
|
|
|
let args = ["progname", "filter", "--ignored"];
|
|
|
|
check (vec::is_not_empty(args));
|
|
|
|
let opts = alt test::parse_opts(args) { either::left(o) { o } };
|
|
|
|
assert (opts.run_ignored);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn filter_for_ignored_option() {
|
|
|
|
// When we run ignored tests the test filter should filter out all the
|
|
|
|
// unignored tests and flip the ignore flag on the rest to false
|
|
|
|
|
|
|
|
let opts = {filter: option::none, run_ignored: true};
|
|
|
|
let tests =
|
|
|
|
[{name: "1", fn: fn@() { }, ignore: true, should_fail: false},
|
|
|
|
{name: "2", fn: fn@() { }, ignore: false, should_fail: false}];
|
|
|
|
let filtered = test::filter_tests(opts, tests);
|
|
|
|
|
|
|
|
assert (vec::len(filtered) == 1u);
|
|
|
|
assert (filtered[0].name == "1");
|
|
|
|
assert (filtered[0].ignore == false);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn sort_tests() {
|
|
|
|
let opts = {filter: option::none, run_ignored: false};
|
|
|
|
|
|
|
|
let names =
|
|
|
|
["sha1::test", "int::test_to_str", "int::test_pow",
|
|
|
|
"test::do_not_run_ignored_tests",
|
|
|
|
"test::ignored_tests_result_in_ignored",
|
|
|
|
"test::first_free_arg_should_be_a_filter",
|
|
|
|
"test::parse_ignored_flag", "test::filter_for_ignored_option",
|
|
|
|
"test::sort_tests"];
|
|
|
|
let tests =
|
|
|
|
{
|
|
|
|
let testfn = fn@() { };
|
|
|
|
let tests = [];
|
|
|
|
for name: str in names {
|
|
|
|
let test = {name: name, fn: testfn, ignore: false,
|
|
|
|
should_fail: false};
|
|
|
|
tests += [test];
|
|
|
|
}
|
|
|
|
tests
|
|
|
|
};
|
|
|
|
let filtered = test::filter_tests(opts, tests);
|
|
|
|
|
|
|
|
let expected =
|
|
|
|
["int::test_pow", "int::test_to_str", "sha1::test",
|
|
|
|
"test::do_not_run_ignored_tests", "test::filter_for_ignored_option",
|
|
|
|
"test::first_free_arg_should_be_a_filter",
|
|
|
|
"test::ignored_tests_result_in_ignored", "test::parse_ignored_flag",
|
|
|
|
"test::sort_tests"];
|
|
|
|
|
|
|
|
check (vec::same_length(expected, filtered));
|
|
|
|
let pairs = vec::zip(expected, filtered);
|
|
|
|
|
|
|
|
|
|
|
|
for (a, b) in pairs { assert (a == b.name); }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-07-09 18:08:03 -05:00
|
|
|
// Local Variables:
|
|
|
|
// mode: rust;
|
|
|
|
// fill-column: 78;
|
|
|
|
// indent-tabs-mode: nil
|
|
|
|
// c-basic-offset: 4
|
|
|
|
// buffer-file-coding-system: utf-8-unix
|
|
|
|
// End:
|