2011-07-09 18:08:03 -05:00
|
|
|
// Support code for rustc's built in test runner generator. Currently,
|
|
|
|
// none of this is meant for users. It is intended to support the
|
|
|
|
// simplest interface possible for representing and running tests
|
|
|
|
// while providing a base that other test frameworks may build off of.
|
|
|
|
|
2011-07-16 19:04:20 -05:00
|
|
|
import sort = sort::ivector;
|
2011-07-25 17:21:36 -05:00
|
|
|
import getenv = generic_os::getenv;
|
2011-07-16 19:04:20 -05:00
|
|
|
|
2011-07-09 18:08:03 -05:00
|
|
|
export test_name;
|
|
|
|
export test_fn;
|
|
|
|
export test_desc;
|
|
|
|
export test_main;
|
2011-07-14 13:29:54 -05:00
|
|
|
export test_result;
|
2011-07-14 18:05:33 -05:00
|
|
|
export test_opts;
|
2011-07-14 13:29:54 -05:00
|
|
|
export tr_ok;
|
|
|
|
export tr_failed;
|
|
|
|
export tr_ignored;
|
The Big Test Suite Overhaul
This replaces the make-based test runner with a set of Rust-based test
runners. I believe that all existing functionality has been
preserved. The primary objective is to dogfood the Rust test
framework.
A few main things happen here:
1) The run-pass/lib-* tests are all moved into src/test/stdtest. This
is a standalone test crate intended for all standard library tests. It
compiles to build/test/stdtest.stageN.
2) rustc now compiles into yet another build artifact, this one a test
runner that runs any tests contained directly in the rustc crate. This
allows much more fine-grained unit testing of the compiler. It
compiles to build/test/rustctest.stageN.
3) There is a new custom test runner crate at src/test/compiletest
that reproduces all the functionality for running the compile-fail,
run-fail, run-pass and bench tests while integrating with Rust's test
framework. It compiles to build/test/compiletest.stageN.
4) The build rules have been completely changed to use the new test
runners, while also being less redundant, following the example of the
recent stageN.mk rewrite.
It adds two new features to the cfail/rfail/rpass/bench tests:
1) Tests can specify multiple 'error-pattern' directives which must be
satisfied in order.
2) Tests can specify a 'compile-flags' directive which will make the
test runner provide additional command line arguments to rustc.
There are some downsides, the primary being that Rust has to be
functioning pretty well just to run _any_ tests, which I imagine will
be the source of some frustration when the entire test suite
breaks. Will also cause some headaches during porting.
Not having individual make rules, each rpass, etc test no longer
remembers between runs whether it completed successfully. As a result,
it's not possible to incrementally fix multiple tests by just running
'make check', fixing a test, and repeating without re-running all the
tests contained in the test runner. Instead you can filter just the
tests you want to run by using the TESTNAME environment variable.
This also dispenses with the ability to run stage0 tests, but they
tended to be broken more often than not anyway.
2011-07-12 21:01:09 -05:00
|
|
|
export run_tests_console;
|
2011-07-26 20:34:29 -05:00
|
|
|
export run_tests_console_;
|
2011-07-14 13:29:54 -05:00
|
|
|
export run_test;
|
2011-07-14 18:05:33 -05:00
|
|
|
export filter_tests;
|
|
|
|
export parse_opts;
|
2011-07-26 20:34:29 -05:00
|
|
|
export test_to_task;
|
|
|
|
export default_test_to_task;
|
2011-07-09 18:08:03 -05:00
|
|
|
|
|
|
|
// The name of a test. By convention this follows the rules for rust
|
|
|
|
// paths, i.e it should be a series of identifiers seperated by double
|
|
|
|
// colons. This way if some test runner wants to arrange the tests
|
|
|
|
// heirarchically it may.
|
|
|
|
type test_name = str;
|
|
|
|
|
|
|
|
// A function that runs a test. If the function returns successfully,
|
|
|
|
// the test succeeds; if the function fails then the test fails. We
|
|
|
|
// may need to come up with a more clever definition of test in order
|
|
|
|
// to support isolation of tests into tasks.
|
|
|
|
type test_fn = fn();
|
|
|
|
|
|
|
|
// The definition of a single test. A test runner will run a list of
|
|
|
|
// these.
|
|
|
|
type test_desc = rec(test_name name,
|
2011-07-14 13:29:54 -05:00
|
|
|
test_fn fn,
|
|
|
|
bool ignore);
|
2011-07-09 18:08:03 -05:00
|
|
|
|
|
|
|
// The default console test runner. It accepts the command line
|
|
|
|
// arguments and a vector of test_descs (generated at compile time).
|
2011-07-14 12:51:38 -05:00
|
|
|
fn test_main(&vec[str] args, &test_desc[] tests) {
|
2011-07-14 18:05:33 -05:00
|
|
|
auto ivec_args = {
|
|
|
|
auto iargs = ~[];
|
|
|
|
for (str arg in args) {
|
|
|
|
iargs += ~[arg]
|
|
|
|
}
|
|
|
|
iargs
|
|
|
|
};
|
|
|
|
check ivec::is_not_empty(ivec_args);
|
|
|
|
auto opts = alt (parse_opts(ivec_args)) {
|
|
|
|
either::left(?o) { o }
|
|
|
|
either::right(?m) { fail m }
|
|
|
|
};
|
The Big Test Suite Overhaul
This replaces the make-based test runner with a set of Rust-based test
runners. I believe that all existing functionality has been
preserved. The primary objective is to dogfood the Rust test
framework.
A few main things happen here:
1) The run-pass/lib-* tests are all moved into src/test/stdtest. This
is a standalone test crate intended for all standard library tests. It
compiles to build/test/stdtest.stageN.
2) rustc now compiles into yet another build artifact, this one a test
runner that runs any tests contained directly in the rustc crate. This
allows much more fine-grained unit testing of the compiler. It
compiles to build/test/rustctest.stageN.
3) There is a new custom test runner crate at src/test/compiletest
that reproduces all the functionality for running the compile-fail,
run-fail, run-pass and bench tests while integrating with Rust's test
framework. It compiles to build/test/compiletest.stageN.
4) The build rules have been completely changed to use the new test
runners, while also being less redundant, following the example of the
recent stageN.mk rewrite.
It adds two new features to the cfail/rfail/rpass/bench tests:
1) Tests can specify multiple 'error-pattern' directives which must be
satisfied in order.
2) Tests can specify a 'compile-flags' directive which will make the
test runner provide additional command line arguments to rustc.
There are some downsides, the primary being that Rust has to be
functioning pretty well just to run _any_ tests, which I imagine will
be the source of some frustration when the entire test suite
breaks. Will also cause some headaches during porting.
Not having individual make rules, each rpass, etc test no longer
remembers between runs whether it completed successfully. As a result,
it's not possible to incrementally fix multiple tests by just running
'make check', fixing a test, and repeating without re-running all the
tests contained in the test runner. Instead you can filter just the
tests you want to run by using the TESTNAME environment variable.
This also dispenses with the ability to run stage0 tests, but they
tended to be broken more often than not anyway.
2011-07-12 21:01:09 -05:00
|
|
|
if (!run_tests_console(opts, tests)) {
|
2011-07-14 12:51:38 -05:00
|
|
|
fail "Some tests failed";
|
2011-07-09 18:08:03 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-14 18:05:33 -05:00
|
|
|
type test_opts = rec(option::t[str] filter,
|
|
|
|
bool run_ignored);
|
|
|
|
|
|
|
|
type opt_res = either::t[test_opts, str];
|
|
|
|
|
|
|
|
// Parses command line arguments into test options
|
|
|
|
fn parse_opts(&str[] args) : ivec::is_not_empty(args) -> opt_res {
|
|
|
|
|
|
|
|
// FIXME (#649): Shouldn't have to check here
|
|
|
|
check ivec::is_not_empty(args);
|
|
|
|
auto args_ = ivec::tail(args);
|
|
|
|
auto opts = ~[getopts::optflag("ignored")];
|
|
|
|
auto match = alt (getopts::getopts_ivec(args_, opts)) {
|
|
|
|
getopts::success(?m) { m }
|
|
|
|
getopts::failure(?f) { ret either::right(getopts::fail_str(f)) }
|
|
|
|
};
|
|
|
|
|
|
|
|
auto filter = if (vec::len(match.free) > 0u) {
|
|
|
|
option::some(match.free.(0))
|
|
|
|
} else {
|
|
|
|
option::none
|
|
|
|
};
|
|
|
|
|
|
|
|
auto run_ignored = getopts::opt_present(match, "ignored");
|
|
|
|
|
|
|
|
auto test_opts = rec(filter = filter,
|
|
|
|
run_ignored = run_ignored);
|
2011-07-11 18:33:21 -05:00
|
|
|
|
2011-07-14 18:05:33 -05:00
|
|
|
ret either::left(test_opts);
|
2011-07-11 18:33:21 -05:00
|
|
|
}
|
|
|
|
|
2011-07-14 13:29:54 -05:00
|
|
|
tag test_result {
|
|
|
|
tr_ok;
|
|
|
|
tr_failed;
|
|
|
|
tr_ignored;
|
|
|
|
}
|
|
|
|
|
2011-07-26 20:34:29 -05:00
|
|
|
// To get isolation and concurrency tests have to be run in their own tasks.
|
|
|
|
// In cases where test functions and closures it is not ok to just dump them
|
|
|
|
// into a task and run them, so this transformation gives the caller a chance
|
|
|
|
// to create the test task.
|
|
|
|
type test_to_task = fn(&fn()) -> task;
|
|
|
|
|
2011-07-11 18:33:21 -05:00
|
|
|
// A simple console test runner
|
2011-07-26 20:34:29 -05:00
|
|
|
fn run_tests_console(&test_opts opts,
|
|
|
|
&test_desc[] tests) -> bool {
|
|
|
|
run_tests_console_(opts, tests, default_test_to_task)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn run_tests_console_(&test_opts opts,
|
|
|
|
&test_desc[] tests,
|
|
|
|
&test_to_task to_task) -> bool {
|
2011-07-11 18:33:21 -05:00
|
|
|
|
|
|
|
auto filtered_tests = filter_tests(opts, tests);
|
2011-07-11 13:19:32 -05:00
|
|
|
|
2011-07-09 18:08:03 -05:00
|
|
|
auto out = io::stdout();
|
|
|
|
|
2011-07-11 18:33:21 -05:00
|
|
|
auto total = ivec::len(filtered_tests);
|
2011-07-11 13:19:32 -05:00
|
|
|
out.write_line(#fmt("running %u tests", total));
|
|
|
|
|
2011-07-25 17:21:36 -05:00
|
|
|
auto futures = ~[];
|
|
|
|
|
2011-07-11 13:19:32 -05:00
|
|
|
auto passed = 0u;
|
|
|
|
auto failed = 0u;
|
2011-07-14 13:29:54 -05:00
|
|
|
auto ignored = 0u;
|
2011-07-11 13:19:32 -05:00
|
|
|
|
2011-07-22 00:26:53 -05:00
|
|
|
auto failures = ~[];
|
|
|
|
|
2011-07-25 17:21:36 -05:00
|
|
|
// It's tempting to just spawn all the tests at once but that doesn't
|
|
|
|
// provide a great user experience because you might sit waiting for the
|
|
|
|
// result of a particular test for an unusually long amount of time.
|
|
|
|
auto concurrency = get_concurrency();
|
|
|
|
log #fmt("using %u test tasks", concurrency);
|
|
|
|
auto run_idx = 0u;
|
|
|
|
auto wait_idx = 0u;
|
|
|
|
|
|
|
|
while (wait_idx < total) {
|
|
|
|
while (ivec::len(futures) < concurrency
|
|
|
|
&& run_idx < total) {
|
2011-07-26 20:34:29 -05:00
|
|
|
futures += ~[run_test(filtered_tests.(run_idx), to_task)];
|
2011-07-25 17:21:36 -05:00
|
|
|
run_idx += 1u;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto future = futures.(0);
|
|
|
|
out.write_str(#fmt("running %s ... ", future.test.name));
|
|
|
|
auto result = future.wait();
|
|
|
|
alt (result) {
|
2011-07-14 13:29:54 -05:00
|
|
|
tr_ok {
|
|
|
|
passed += 1u;
|
2011-07-25 17:21:36 -05:00
|
|
|
write_ok(out, concurrency);
|
2011-07-14 13:29:54 -05:00
|
|
|
out.write_line("");
|
|
|
|
}
|
|
|
|
tr_failed {
|
|
|
|
failed += 1u;
|
2011-07-25 17:21:36 -05:00
|
|
|
write_failed(out, concurrency);
|
2011-07-14 13:29:54 -05:00
|
|
|
out.write_line("");
|
2011-07-25 17:21:36 -05:00
|
|
|
failures += ~[future.test];
|
2011-07-14 13:29:54 -05:00
|
|
|
}
|
|
|
|
tr_ignored {
|
|
|
|
ignored += 1u;
|
2011-07-25 17:21:36 -05:00
|
|
|
write_ignored(out, concurrency);
|
2011-07-14 13:29:54 -05:00
|
|
|
out.write_line("");
|
|
|
|
}
|
2011-07-11 13:19:32 -05:00
|
|
|
}
|
2011-07-25 17:21:36 -05:00
|
|
|
futures = ivec::slice(futures, 1u, ivec::len(futures));
|
|
|
|
wait_idx += 1u;
|
2011-07-09 18:08:03 -05:00
|
|
|
}
|
|
|
|
|
2011-07-14 13:29:54 -05:00
|
|
|
assert passed + failed + ignored == total;
|
2011-07-19 23:28:45 -05:00
|
|
|
auto success = failed == 0u;
|
2011-07-11 13:19:32 -05:00
|
|
|
|
2011-07-22 00:26:53 -05:00
|
|
|
if (!success) {
|
|
|
|
out.write_line("\nfailures:");
|
|
|
|
for (test_desc test in failures) {
|
|
|
|
out.write_line(#fmt(" %s", test.name));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-12 15:16:00 -05:00
|
|
|
out.write_str(#fmt("\nresult: "));
|
2011-07-19 23:28:45 -05:00
|
|
|
if (success) {
|
2011-07-25 17:21:36 -05:00
|
|
|
write_ok(out, concurrency);
|
2011-07-12 15:16:00 -05:00
|
|
|
} else {
|
2011-07-25 17:21:36 -05:00
|
|
|
write_failed(out, concurrency);
|
2011-07-12 15:16:00 -05:00
|
|
|
}
|
2011-07-14 13:29:54 -05:00
|
|
|
out.write_str(#fmt(". %u passed; %u failed; %u ignored\n\n",
|
|
|
|
passed, failed, ignored));
|
2011-07-11 13:19:32 -05:00
|
|
|
|
2011-07-19 23:28:45 -05:00
|
|
|
ret success;
|
2011-07-11 13:19:32 -05:00
|
|
|
|
2011-07-25 17:21:36 -05:00
|
|
|
fn write_ok(&io::writer out, uint concurrency) {
|
|
|
|
write_pretty(out, "ok", term::color_green, concurrency);
|
2011-07-11 13:19:32 -05:00
|
|
|
}
|
|
|
|
|
2011-07-25 17:21:36 -05:00
|
|
|
fn write_failed(&io::writer out, uint concurrency) {
|
|
|
|
write_pretty(out, "FAILED", term::color_red, concurrency);
|
2011-07-11 13:19:32 -05:00
|
|
|
}
|
2011-07-14 13:29:54 -05:00
|
|
|
|
2011-07-25 17:21:36 -05:00
|
|
|
fn write_ignored(&io::writer out, uint concurrency) {
|
|
|
|
write_pretty(out, "ignored", term::color_yellow, concurrency);
|
2011-07-15 02:31:00 -05:00
|
|
|
}
|
|
|
|
|
2011-07-25 17:21:36 -05:00
|
|
|
fn write_pretty(&io::writer out, &str word, u8 color,
|
|
|
|
uint concurrency) {
|
|
|
|
// In the presence of concurrency, outputing control characters
|
|
|
|
// can cause some crazy artifacting
|
|
|
|
if (concurrency == 1u && term::color_supported()) {
|
2011-07-15 02:31:00 -05:00
|
|
|
term::fg(out.get_buf_writer(), color);
|
2011-07-14 13:29:54 -05:00
|
|
|
}
|
2011-07-15 02:31:00 -05:00
|
|
|
out.write_str(word);
|
2011-07-25 17:21:36 -05:00
|
|
|
if (concurrency == 1u && term::color_supported()) {
|
2011-07-14 13:29:54 -05:00
|
|
|
term::reset(out.get_buf_writer());
|
|
|
|
}
|
|
|
|
}
|
2011-07-09 18:08:03 -05:00
|
|
|
}
|
|
|
|
|
2011-07-25 17:21:36 -05:00
|
|
|
fn get_concurrency() -> uint {
|
|
|
|
alt getenv("RUST_THREADS") {
|
|
|
|
option::some(?t) {
|
|
|
|
auto threads = uint::parse_buf(str::bytes(t), 10u);
|
|
|
|
threads > 0u ? threads : 1u
|
|
|
|
}
|
|
|
|
option::none {
|
|
|
|
1u
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-11 18:33:21 -05:00
|
|
|
fn filter_tests(&test_opts opts, &test_desc[] tests) -> test_desc[] {
|
2011-07-14 18:05:33 -05:00
|
|
|
auto filtered = tests;
|
2011-07-11 18:33:21 -05:00
|
|
|
|
2011-07-16 19:04:20 -05:00
|
|
|
// Remove tests that don't match the test filter
|
2011-07-14 18:05:33 -05:00
|
|
|
filtered = if (option::is_none(opts.filter)) {
|
|
|
|
filtered
|
|
|
|
} else {
|
|
|
|
auto filter_str = alt opts.filter { option::some(?f) { f }
|
|
|
|
option::none { "" } };
|
|
|
|
|
|
|
|
auto filter = bind fn(&test_desc test,
|
|
|
|
str filter_str) -> option::t[test_desc] {
|
|
|
|
if (str::find(test.name, filter_str) >= 0) {
|
|
|
|
ret option::some(test);
|
|
|
|
} else {
|
|
|
|
ret option::none;
|
|
|
|
}
|
|
|
|
} (_, filter_str);
|
2011-07-11 18:33:21 -05:00
|
|
|
|
2011-07-14 18:05:33 -05:00
|
|
|
ivec::filter_map(filter, filtered)
|
|
|
|
};
|
|
|
|
|
2011-07-16 19:04:20 -05:00
|
|
|
// Maybe pull out the ignored test and unignore them
|
2011-07-14 18:05:33 -05:00
|
|
|
filtered = if (!opts.run_ignored) {
|
|
|
|
filtered
|
|
|
|
} else {
|
|
|
|
auto filter = fn(&test_desc test) -> option::t[test_desc] {
|
|
|
|
if (test.ignore) {
|
|
|
|
ret option::some(rec(name = test.name,
|
|
|
|
fn = test.fn,
|
|
|
|
ignore = false));
|
|
|
|
} else {
|
|
|
|
ret option::none;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
ivec::filter_map(filter, filtered)
|
|
|
|
};
|
2011-07-16 19:04:20 -05:00
|
|
|
|
|
|
|
// Sort the tests alphabetically
|
|
|
|
filtered = {
|
|
|
|
fn lteq(&test_desc t1, &test_desc t2) -> bool {
|
|
|
|
str::lteq(t1.name, t2.name)
|
|
|
|
}
|
|
|
|
sort::merge_sort(lteq, filtered)
|
|
|
|
};
|
2011-07-11 18:33:21 -05:00
|
|
|
|
2011-07-14 18:05:33 -05:00
|
|
|
ret filtered;
|
2011-07-11 18:33:21 -05:00
|
|
|
}
|
2011-07-09 18:08:03 -05:00
|
|
|
|
2011-07-25 17:21:36 -05:00
|
|
|
type test_future = rec(test_desc test,
|
|
|
|
@fn() fnref,
|
|
|
|
fn() -> test_result wait);
|
|
|
|
|
2011-07-26 20:34:29 -05:00
|
|
|
fn run_test(&test_desc test, &test_to_task to_task) -> test_future {
|
2011-07-25 17:21:36 -05:00
|
|
|
// FIXME: Because of the unsafe way we're passing the test function
|
|
|
|
// to the test task, we need to make sure we keep a reference to that
|
|
|
|
// function around for longer than the lifetime of the task. To that end
|
|
|
|
// we keep the function boxed in the test future.
|
|
|
|
auto fnref = @test.fn;
|
2011-07-14 13:29:54 -05:00
|
|
|
if (!test.ignore) {
|
2011-07-26 20:34:29 -05:00
|
|
|
auto test_task = to_task(*fnref);
|
2011-07-25 17:21:36 -05:00
|
|
|
ret rec(test = test,
|
|
|
|
fnref = fnref,
|
|
|
|
wait = bind fn(&task test_task) -> test_result {
|
|
|
|
alt (task::join(test_task)) {
|
|
|
|
task::tr_success { tr_ok }
|
|
|
|
task::tr_failure { tr_failed }
|
|
|
|
}
|
|
|
|
} (test_task));
|
2011-07-14 13:29:54 -05:00
|
|
|
} else {
|
2011-07-25 17:21:36 -05:00
|
|
|
ret rec(test = test,
|
|
|
|
fnref = fnref,
|
|
|
|
wait = fn() -> test_result { tr_ignored });
|
2011-07-14 13:29:54 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-20 01:33:37 -05:00
|
|
|
native "rust" mod rustrt {
|
|
|
|
fn hack_allow_leaks();
|
|
|
|
}
|
|
|
|
|
2011-07-15 00:24:19 -05:00
|
|
|
// We need to run our tests in another task in order to trap test failures.
|
|
|
|
// But, at least currently, functions can't be used as spawn arguments so
|
2011-07-26 20:34:29 -05:00
|
|
|
// we've got to treat our test functions as unsafe pointers. This function
|
|
|
|
// only works with functions that don't contain closures.
|
|
|
|
fn default_test_to_task(&fn() f) -> task {
|
2011-07-15 00:24:19 -05:00
|
|
|
fn run_task(*mutable fn() fptr) {
|
2011-07-20 01:33:37 -05:00
|
|
|
// If this task fails we don't want that failure to propagate to the
|
|
|
|
// test runner or else we couldn't keep running tests
|
2011-07-15 00:24:19 -05:00
|
|
|
task::unsupervise();
|
2011-07-20 01:33:37 -05:00
|
|
|
|
|
|
|
// FIXME (236): Hack supreme - unwinding doesn't work yet so if this
|
|
|
|
// task fails memory will not be freed correctly. This turns off the
|
|
|
|
// sanity checks in the runtime's memory region for the task, so that
|
|
|
|
// the test runner can continue.
|
|
|
|
rustrt::hack_allow_leaks();
|
|
|
|
|
|
|
|
// Run the test
|
2011-07-15 00:24:19 -05:00
|
|
|
(*fptr)()
|
|
|
|
}
|
|
|
|
auto fptr = ptr::addr_of(f);
|
2011-07-25 17:21:36 -05:00
|
|
|
ret spawn run_task(fptr);
|
2011-07-15 00:24:19 -05:00
|
|
|
}
|
|
|
|
|
2011-07-11 13:19:32 -05:00
|
|
|
|
2011-07-09 18:08:03 -05:00
|
|
|
// Local Variables:
|
|
|
|
// mode: rust;
|
|
|
|
// fill-column: 78;
|
|
|
|
// indent-tabs-mode: nil
|
|
|
|
// c-basic-offset: 4
|
|
|
|
// buffer-file-coding-system: utf-8-unix
|
|
|
|
// compile-command: "make -k -C .. 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
|
|
|
|
// End:
|