2011-07-09 18:08:03 -05:00
|
|
|
// Support code for rustc's built in test runner generator. Currently,
|
|
|
|
// none of this is meant for users. It is intended to support the
|
|
|
|
// simplest interface possible for representing and running tests
|
|
|
|
// while providing a base that other test frameworks may build off of.
|
|
|
|
|
|
|
|
export test_name;
|
|
|
|
export test_fn;
|
|
|
|
export test_desc;
|
|
|
|
export test_main;
|
2011-07-14 13:29:54 -05:00
|
|
|
export test_result;
|
2011-07-14 18:05:33 -05:00
|
|
|
export test_opts;
|
2011-07-14 13:29:54 -05:00
|
|
|
export tr_ok;
|
|
|
|
export tr_failed;
|
|
|
|
export tr_ignored;
|
|
|
|
export run_test;
|
2011-07-14 18:05:33 -05:00
|
|
|
export filter_tests;
|
|
|
|
export parse_opts;
|
2011-07-09 18:08:03 -05:00
|
|
|
|
|
|
|
// The name of a test. By convention this follows the rules for rust
|
|
|
|
// paths, i.e it should be a series of identifiers seperated by double
|
|
|
|
// colons. This way if some test runner wants to arrange the tests
|
|
|
|
// heirarchically it may.
|
|
|
|
type test_name = str;
|
|
|
|
|
|
|
|
// A function that runs a test. If the function returns successfully,
|
|
|
|
// the test succeeds; if the function fails then the test fails. We
|
|
|
|
// may need to come up with a more clever definition of test in order
|
|
|
|
// to support isolation of tests into tasks.
|
|
|
|
type test_fn = fn();
|
|
|
|
|
|
|
|
// The definition of a single test. A test runner will run a list of
|
|
|
|
// these.
|
|
|
|
type test_desc = rec(test_name name,
|
2011-07-14 13:29:54 -05:00
|
|
|
test_fn fn,
|
|
|
|
bool ignore);
|
2011-07-09 18:08:03 -05:00
|
|
|
|
|
|
|
// The default console test runner. It accepts the command line
|
|
|
|
// arguments and a vector of test_descs (generated at compile time).
|
2011-07-14 12:51:38 -05:00
|
|
|
fn test_main(&vec[str] args, &test_desc[] tests) {
|
2011-07-14 18:05:33 -05:00
|
|
|
auto ivec_args = {
|
|
|
|
auto iargs = ~[];
|
|
|
|
for (str arg in args) {
|
|
|
|
iargs += ~[arg]
|
|
|
|
}
|
|
|
|
iargs
|
|
|
|
};
|
|
|
|
check ivec::is_not_empty(ivec_args);
|
|
|
|
auto opts = alt (parse_opts(ivec_args)) {
|
|
|
|
either::left(?o) { o }
|
|
|
|
either::right(?m) { fail m }
|
|
|
|
};
|
|
|
|
if (!run_tests(opts, tests)) {
|
2011-07-14 12:51:38 -05:00
|
|
|
fail "Some tests failed";
|
2011-07-09 18:08:03 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-14 18:05:33 -05:00
|
|
|
type test_opts = rec(option::t[str] filter,
|
|
|
|
bool run_ignored);
|
|
|
|
|
|
|
|
type opt_res = either::t[test_opts, str];
|
|
|
|
|
|
|
|
// Parses command line arguments into test options
|
|
|
|
fn parse_opts(&str[] args) : ivec::is_not_empty(args) -> opt_res {
|
|
|
|
|
|
|
|
// FIXME (#649): Shouldn't have to check here
|
|
|
|
check ivec::is_not_empty(args);
|
|
|
|
auto args_ = ivec::tail(args);
|
|
|
|
auto opts = ~[getopts::optflag("ignored")];
|
|
|
|
auto match = alt (getopts::getopts_ivec(args_, opts)) {
|
|
|
|
getopts::success(?m) { m }
|
|
|
|
getopts::failure(?f) { ret either::right(getopts::fail_str(f)) }
|
|
|
|
};
|
|
|
|
|
|
|
|
auto filter = if (vec::len(match.free) > 0u) {
|
|
|
|
option::some(match.free.(0))
|
|
|
|
} else {
|
|
|
|
option::none
|
|
|
|
};
|
|
|
|
|
|
|
|
auto run_ignored = getopts::opt_present(match, "ignored");
|
|
|
|
|
|
|
|
auto test_opts = rec(filter = filter,
|
|
|
|
run_ignored = run_ignored);
|
2011-07-11 18:33:21 -05:00
|
|
|
|
2011-07-14 18:05:33 -05:00
|
|
|
ret either::left(test_opts);
|
2011-07-11 18:33:21 -05:00
|
|
|
}
|
|
|
|
|
2011-07-14 13:29:54 -05:00
|
|
|
tag test_result {
|
|
|
|
tr_ok;
|
|
|
|
tr_failed;
|
|
|
|
tr_ignored;
|
|
|
|
}
|
|
|
|
|
2011-07-11 18:33:21 -05:00
|
|
|
// A simple console test runner
|
|
|
|
fn run_tests(&test_opts opts, &test_desc[] tests) -> bool {
|
|
|
|
|
|
|
|
auto filtered_tests = filter_tests(opts, tests);
|
2011-07-11 13:19:32 -05:00
|
|
|
|
2011-07-09 18:08:03 -05:00
|
|
|
auto out = io::stdout();
|
|
|
|
|
2011-07-11 18:33:21 -05:00
|
|
|
auto total = ivec::len(filtered_tests);
|
2011-07-11 13:19:32 -05:00
|
|
|
out.write_line(#fmt("running %u tests", total));
|
|
|
|
|
|
|
|
auto passed = 0u;
|
|
|
|
auto failed = 0u;
|
2011-07-14 13:29:54 -05:00
|
|
|
auto ignored = 0u;
|
2011-07-11 13:19:32 -05:00
|
|
|
|
2011-07-11 18:33:21 -05:00
|
|
|
for (test_desc test in filtered_tests) {
|
2011-07-11 13:19:32 -05:00
|
|
|
out.write_str(#fmt("running %s ... ", test.name));
|
2011-07-14 13:29:54 -05:00
|
|
|
alt (run_test(test)) {
|
|
|
|
tr_ok {
|
|
|
|
passed += 1u;
|
|
|
|
write_ok(out);
|
|
|
|
out.write_line("");
|
|
|
|
}
|
|
|
|
tr_failed {
|
|
|
|
failed += 1u;
|
|
|
|
write_failed(out);
|
|
|
|
out.write_line("");
|
|
|
|
}
|
|
|
|
tr_ignored {
|
|
|
|
ignored += 1u;
|
|
|
|
write_ignored(out);
|
|
|
|
out.write_line("");
|
|
|
|
}
|
2011-07-11 13:19:32 -05:00
|
|
|
}
|
2011-07-09 18:08:03 -05:00
|
|
|
}
|
|
|
|
|
2011-07-14 13:29:54 -05:00
|
|
|
assert passed + failed + ignored == total;
|
2011-07-11 13:19:32 -05:00
|
|
|
|
2011-07-12 15:16:00 -05:00
|
|
|
out.write_str(#fmt("\nresult: "));
|
|
|
|
if (failed == 0u) {
|
|
|
|
write_ok(out);
|
|
|
|
} else {
|
|
|
|
write_failed(out);
|
|
|
|
}
|
2011-07-14 13:29:54 -05:00
|
|
|
out.write_str(#fmt(". %u passed; %u failed; %u ignored\n\n",
|
|
|
|
passed, failed, ignored));
|
2011-07-11 13:19:32 -05:00
|
|
|
|
2011-07-09 18:08:03 -05:00
|
|
|
ret true;
|
2011-07-11 13:19:32 -05:00
|
|
|
|
|
|
|
fn write_ok(&io::writer out) {
|
2011-07-15 02:31:00 -05:00
|
|
|
write_pretty(out, "ok", term::color_green);
|
2011-07-11 13:19:32 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
fn write_failed(&io::writer out) {
|
2011-07-15 02:31:00 -05:00
|
|
|
write_pretty(out, "FAILED", term::color_red);
|
2011-07-11 13:19:32 -05:00
|
|
|
}
|
2011-07-14 13:29:54 -05:00
|
|
|
|
|
|
|
fn write_ignored(&io::writer out) {
|
2011-07-15 02:31:00 -05:00
|
|
|
write_pretty(out, "ignored", term::color_yellow);
|
|
|
|
}
|
|
|
|
|
|
|
|
fn write_pretty(&io::writer out, &str word, u8 color) {
|
2011-07-14 13:29:54 -05:00
|
|
|
if (term::color_supported()) {
|
2011-07-15 02:31:00 -05:00
|
|
|
term::fg(out.get_buf_writer(), color);
|
2011-07-14 13:29:54 -05:00
|
|
|
}
|
2011-07-15 02:31:00 -05:00
|
|
|
out.write_str(word);
|
2011-07-14 13:29:54 -05:00
|
|
|
if (term::color_supported()) {
|
|
|
|
term::reset(out.get_buf_writer());
|
|
|
|
}
|
|
|
|
}
|
2011-07-09 18:08:03 -05:00
|
|
|
}
|
|
|
|
|
2011-07-11 18:33:21 -05:00
|
|
|
fn filter_tests(&test_opts opts, &test_desc[] tests) -> test_desc[] {
|
2011-07-14 18:05:33 -05:00
|
|
|
auto filtered = tests;
|
2011-07-11 18:33:21 -05:00
|
|
|
|
2011-07-14 18:05:33 -05:00
|
|
|
filtered = if (option::is_none(opts.filter)) {
|
|
|
|
filtered
|
|
|
|
} else {
|
|
|
|
auto filter_str = alt opts.filter { option::some(?f) { f }
|
|
|
|
option::none { "" } };
|
|
|
|
|
|
|
|
auto filter = bind fn(&test_desc test,
|
|
|
|
str filter_str) -> option::t[test_desc] {
|
|
|
|
if (str::find(test.name, filter_str) >= 0) {
|
|
|
|
ret option::some(test);
|
|
|
|
} else {
|
|
|
|
ret option::none;
|
|
|
|
}
|
|
|
|
} (_, filter_str);
|
2011-07-11 18:33:21 -05:00
|
|
|
|
2011-07-14 18:05:33 -05:00
|
|
|
ivec::filter_map(filter, filtered)
|
|
|
|
};
|
|
|
|
|
|
|
|
filtered = if (!opts.run_ignored) {
|
|
|
|
filtered
|
|
|
|
} else {
|
|
|
|
auto filter = fn(&test_desc test) -> option::t[test_desc] {
|
|
|
|
if (test.ignore) {
|
|
|
|
ret option::some(rec(name = test.name,
|
|
|
|
fn = test.fn,
|
|
|
|
ignore = false));
|
|
|
|
} else {
|
|
|
|
ret option::none;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
ivec::filter_map(filter, filtered)
|
|
|
|
};
|
2011-07-11 18:33:21 -05:00
|
|
|
|
2011-07-14 18:05:33 -05:00
|
|
|
ret filtered;
|
2011-07-11 18:33:21 -05:00
|
|
|
}
|
2011-07-09 18:08:03 -05:00
|
|
|
|
2011-07-14 13:29:54 -05:00
|
|
|
fn run_test(&test_desc test) -> test_result {
|
|
|
|
if (!test.ignore) {
|
2011-07-15 00:24:19 -05:00
|
|
|
if (run_test_fn_in_task(test.fn)) {
|
|
|
|
ret tr_ok;
|
|
|
|
} else {
|
|
|
|
ret tr_failed;
|
|
|
|
}
|
2011-07-14 13:29:54 -05:00
|
|
|
} else {
|
|
|
|
ret tr_ignored;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-15 00:24:19 -05:00
|
|
|
// We need to run our tests in another task in order to trap test failures.
|
|
|
|
// But, at least currently, functions can't be used as spawn arguments so
|
|
|
|
// we've got to treat our test functions as unsafe pointers.
|
|
|
|
fn run_test_fn_in_task(&fn() f) -> bool {
|
|
|
|
fn run_task(*mutable fn() fptr) {
|
|
|
|
task::unsupervise();
|
|
|
|
(*fptr)()
|
|
|
|
}
|
|
|
|
auto fptr = ptr::addr_of(f);
|
|
|
|
auto test_task = spawn run_task(fptr);
|
|
|
|
ret alt (task::join(test_task)) {
|
|
|
|
task::tr_success { true }
|
|
|
|
task::tr_failure { false }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-11 13:19:32 -05:00
|
|
|
|
2011-07-09 18:08:03 -05:00
|
|
|
// Local Variables:
|
|
|
|
// mode: rust;
|
|
|
|
// fill-column: 78;
|
|
|
|
// indent-tabs-mode: nil
|
|
|
|
// c-basic-offset: 4
|
|
|
|
// buffer-file-coding-system: utf-8-unix
|
|
|
|
// compile-command: "make -k -C .. 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
|
|
|
|
// End:
|