Logfile output from tests; summarise in make check

Add an optional --logfile argument to std::test::test_main and to
compiletest.

Use this features and the new 'check-summary.py' script to
summarise all the tests performed by the 'check' target. This is
a short term fix for #2075.
This commit is contained in:
Grahame Bowland 2012-04-03 23:27:51 +08:00 committed by Brian Anderson
parent 3aed498842
commit 5cc050b265
6 changed files with 122 additions and 26 deletions

View File

@ -31,6 +31,7 @@ clean-misc:
$(Q)rm -f $(CRATE_DEPFILES:%.d=%.d.tmp)
$(Q)rm -Rf $(DOCS)
$(Q)rm -Rf $(GENERATED)
$(Q)rm -f tmp/*.log
$(Q)rm -f rustllvm/$(CFG_RUSTLLVM) rustllvm/rustllvmbits.a
$(Q)rm -f rt/$(CFG_RUNTIME)
$(Q)find rustllvm rt -name '*.[odasS]' -delete

View File

@ -66,9 +66,11 @@ endif
# Main test targets
######################################################################
check: tidy all check-stage2 \
check: tidy all check-stage2
$(S)src/etc/check-summary.py tmp/*.log
check-full: tidy all check-stage1 check-stage2 check-stage3 \
check-full: tidy all check-stage1 check-stage2 check-stage3
$(S)src/etc/check-summary.py tmp/*.log
# Run the tidy script in multiple parts to avoid huge 'echo' commands
ifdef CFG_NOTIDY
@ -120,7 +122,6 @@ tidy:
| xargs -n 10 python $(S)src/etc/tidy.py
endif
######################################################################
# Extracting tests for docs
######################################################################
@ -232,7 +233,8 @@ $(3)/test/coretest.stage$(1)-$(2)$$(X): \
check-stage$(1)-T-$(2)-H-$(3)-core-dummy: \
$(3)/test/coretest.stage$(1)-$(2)$$(X)
@$$(call E, run: $$<)
$$(Q)$$(call CFG_RUN_TEST,$$<,$(2),$(3)) $$(TESTARGS)
$$(Q)$$(call CFG_RUN_TEST,$$<,$(2),$(3)) $$(TESTARGS) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-core.log
# Rules for the standard library test runner
@ -245,7 +247,8 @@ $(3)/test/stdtest.stage$(1)-$(2)$$(X): \
check-stage$(1)-T-$(2)-H-$(3)-std-dummy: \
$(3)/test/stdtest.stage$(1)-$(2)$$(X)
@$$(call E, run: $$<)
$$(Q)$$(call CFG_RUN_TEST,$$<,$(2),$(3)) $$(TESTARGS)
$$(Q)$$(call CFG_RUN_TEST,$$<,$(2),$(3)) $$(TESTARGS) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-std.log
# Rules for the rustc test runner
@ -260,7 +263,8 @@ $(3)/test/rustctest.stage$(1)-$(2)$$(X): \
check-stage$(1)-T-$(2)-H-$(3)-rustc-dummy: \
$(3)/test/rustctest.stage$(1)-$(2)$$(X)
@$$(call E, run: $$<)
$$(Q)$$(call CFG_RUN_TEST,$$<,$(2),$(3)) $$(TESTARGS)
$$(Q)$$(call CFG_RUN_TEST,$$<,$(2),$(3)) $$(TESTARGS) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-rustc.log
# Rules for the rustdoc test runner
@ -276,7 +280,8 @@ $(3)/test/rustdoctest.stage$(1)-$(2)$$(X): \
check-stage$(1)-T-$(2)-H-$(3)-rustdoc-dummy: \
$(3)/test/rustdoctest.stage$(1)-$(2)$$(X)
@$$(call E, run: $$<)
$$(Q)$$(call CFG_RUN_TEST,$$<,$(2),$(3)) $$(TESTARGS)
$$(Q)$$(call CFG_RUN_TEST,$$<,$(2),$(3)) $$(TESTARGS) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-rustdoc.log
# Rules for the cfail/rfail/rpass/bench/perf test runner
@ -365,7 +370,8 @@ check-stage$(1)-T-$(2)-H-$(3)-cfail-dummy: \
$$(CFAIL_TESTS)
@$$(call E, run cfail: $$<)
$$(Q)$$(call CFG_RUN_CTEST,$(1),$$<,$(3)) \
$$(CFAIL_ARGS$(1)-T-$(2)-H-$(3))
$$(CFAIL_ARGS$(1)-T-$(2)-H-$(3)) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-cfail.log
check-stage$(1)-T-$(2)-H-$(3)-rfail-dummy: \
$$(HBIN$(1)_H_$(3))/compiletest$$(X) \
@ -373,7 +379,8 @@ check-stage$(1)-T-$(2)-H-$(3)-rfail-dummy: \
$$(RFAIL_TESTS)
@$$(call E, run rfail: $$<)
$$(Q)$$(call CFG_RUN_CTEST,$(1),$$<,$(3)) \
$$(RFAIL_ARGS$(1)-T-$(2)-H-$(3))
$$(RFAIL_ARGS$(1)-T-$(2)-H-$(3)) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-rfail.log
check-stage$(1)-T-$(2)-H-$(3)-rpass-dummy: \
$$(HBIN$(1)_H_$(3))/compiletest$$(X) \
@ -381,7 +388,8 @@ check-stage$(1)-T-$(2)-H-$(3)-rpass-dummy: \
$$(RPASS_TESTS)
@$$(call E, run rpass: $$<)
$$(Q)$$(call CFG_RUN_CTEST,$(1),$$<,$(3)) \
$$(RPASS_ARGS$(1)-T-$(2)-H-$(3))
$$(RPASS_ARGS$(1)-T-$(2)-H-$(3)) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-rpass.log
check-stage$(1)-T-$(2)-H-$(3)-bench-dummy: \
$$(HBIN$(1)_H_$(3))/compiletest$$(X) \
@ -389,7 +397,8 @@ check-stage$(1)-T-$(2)-H-$(3)-bench-dummy: \
$$(BENCH_TESTS)
@$$(call E, run bench: $$<)
$$(Q)$$(call CFG_RUN_CTEST,$(1),$$<,$(3)) \
$$(BENCH_ARGS$(1)-T-$(2)-H-$(3))
$$(BENCH_ARGS$(1)-T-$(2)-H-$(3)) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-bench.log
check-stage$(1)-T-$(2)-H-$(3)-perf-dummy: \
$$(HBIN$(1)_H_$(3))/compiletest$$(X) \
@ -397,7 +406,8 @@ check-stage$(1)-T-$(2)-H-$(3)-perf-dummy: \
$$(BENCH_TESTS)
@$$(call E, perf: $$<)
$$(Q)$$(call CFG_RUN_CTEST,$(1),$$<,$(3)) \
$$(PERF_ARGS$(1)-T-$(2)-H-$(3))
$$(PERF_ARGS$(1)-T-$(2)-H-$(3)) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-perf.log
check-stage$(1)-T-$(2)-H-$(3)-pretty-rpass-dummy: \
$$(HBIN$(1)_H_$(3))/compiletest$$(X) \
@ -405,7 +415,8 @@ check-stage$(1)-T-$(2)-H-$(3)-pretty-rpass-dummy: \
$$(RPASS_TESTS)
@$$(call E, run pretty-rpass: $$<)
$$(Q)$$(call CFG_RUN_CTEST,$(1),$$<,$(3)) \
$$(PRETTY_RPASS_ARGS$(1)-T-$(2)-H-$(3))
$$(PRETTY_RPASS_ARGS$(1)-T-$(2)-H-$(3)) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-pretty-rpass.log
check-stage$(1)-T-$(2)-H-$(3)-pretty-rfail-dummy: \
$$(HBIN$(1)_H_$(3))/compiletest$$(X) \
@ -413,7 +424,8 @@ check-stage$(1)-T-$(2)-H-$(3)-pretty-rfail-dummy: \
$$(RFAIL_TESTS)
@$$(call E, run pretty-rfail: $$<)
$$(Q)$$(call CFG_RUN_CTEST,$(1),$$<,$(3)) \
$$(PRETTY_RFAIL_ARGS$(1)-T-$(2)-H-$(3))
$$(PRETTY_RFAIL_ARGS$(1)-T-$(2)-H-$(3)) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-pretty-rfail.log
check-stage$(1)-T-$(2)-H-$(3)-pretty-bench-dummy: \
$$(HBIN$(1)_H_$(3))/compiletest$$(X) \
@ -421,7 +433,8 @@ check-stage$(1)-T-$(2)-H-$(3)-pretty-bench-dummy: \
$$(BENCH_TESTS)
@$$(call E, run pretty-bench: $$<)
$$(Q)$$(call CFG_RUN_CTEST,$(1),$$<,$(3)) \
$$(PRETTY_BENCH_ARGS$(1)-T-$(2)-H-$(3))
$$(PRETTY_BENCH_ARGS$(1)-T-$(2)-H-$(3)) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-pretty-bench.log
check-stage$(1)-T-$(2)-H-$(3)-pretty-pretty-dummy: \
$$(HBIN$(1)_H_$(3))/compiletest$$(X) \
@ -429,7 +442,8 @@ check-stage$(1)-T-$(2)-H-$(3)-pretty-pretty-dummy: \
$$(PRETTY_TESTS)
@$$(call E, run pretty-pretty: $$<)
$$(Q)$$(call CFG_RUN_CTEST,$(1),$$<,$(3)) \
$$(PRETTY_PRETTY_ARGS$(1)-T-$(2)-H-$(3))
$$(PRETTY_PRETTY_ARGS$(1)-T-$(2)-H-$(3)) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-pretty-pretty.log
check-stage$(1)-T-$(2)-H-$(3)-doc-tutorial-dummy: \
$$(HBIN$(1)_H_$(3))/compiletest$$(X) \
@ -437,7 +451,8 @@ check-stage$(1)-T-$(2)-H-$(3)-doc-tutorial-dummy: \
doc-tutorial-extract$(3)
@$$(call E, run doc-tutorial: $$<)
$$(Q)$$(call CFG_RUN_CTEST,$(1),$$<,$(3)) \
$$(DOC_TUTORIAL_ARGS$(1)-T-$(2)-H-$(3))
$$(DOC_TUTORIAL_ARGS$(1)-T-$(2)-H-$(3)) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-doc-tutorial.log
check-stage$(1)-T-$(2)-H-$(3)-doc-ref-dummy: \
$$(HBIN$(1)_H_$(3))/compiletest$$(X) \
@ -445,7 +460,8 @@ check-stage$(1)-T-$(2)-H-$(3)-doc-ref-dummy: \
doc-ref-extract$(3)
@$$(call E, run doc-ref: $$<)
$$(Q)$$(call CFG_RUN_CTEST,$(1),$$<,$(3)) \
$$(DOC_REF_ARGS$(1)-T-$(2)-H-$(3))
$$(DOC_REF_ARGS$(1)-T-$(2)-H-$(3)) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-doc-ref.log
endef
@ -489,7 +505,8 @@ $(3)/test/$$(FT_DRIVER)-$(2)$$(X): \
$(3)/test/$$(FT_DRIVER)-$(2).out: \
$(3)/test/$$(FT_DRIVER)-$(2)$$(X) \
$$(SREQ2_T_$(2)_H_$(3))
$$(Q)$$(call CFG_RUN_TEST,$$<,$(2),$(3))
$$(Q)$$(call CFG_RUN_TEST,$$<,$(2),$(3)) \
--logfile tmp/$$(FT_DRIVER)-$(2).log
check-fast-T-$(2)-H-$(3): tidy \
check-stage2-T-$(2)-H-$(3)-rustc \

View File

@ -33,6 +33,9 @@ enum mode { mode_compile_fail, mode_run_fail, mode_run_pass, mode_pretty, }
// Only run tests that match this filter
filter: option<str>,
// Write out a parseable log of tests that were run
logfile: option<str>,
// A command line to prefix program execution with,
// for running under valgrind
runtool: option<str>,

View File

@ -35,7 +35,8 @@ fn parse_config(args: [str]) -> config {
getopts::reqopt("stage-id"),
getopts::reqopt("mode"), getopts::optflag("ignored"),
getopts::optopt("runtool"), getopts::optopt("rustcflags"),
getopts::optflag("verbose")];
getopts::optflag("verbose"),
getopts::optopt("logfile")];
check (vec::is_not_empty(args));
let args_ = vec::tail(args);
@ -58,6 +59,7 @@ fn parse_config(args: [str]) -> config {
if vec::len(match.free) > 0u {
option::some(match.free[0])
} else { option::none },
logfile: getopts::opt_maybe_str(match, "logfile"),
runtool: getopts::opt_maybe_str(match, "runtool"),
rustcflags: getopts::opt_maybe_str(match, "rustcflags"),
verbose: getopts::opt_present(match, "verbose")};
@ -121,7 +123,13 @@ fn test_opts(config: config) -> test::test_opts {
option::some(s) { option::some(s) }
option::none { option::none }
},
run_ignored: config.run_ignored}
run_ignored: config.run_ignored,
logfile:
alt config.logfile {
option::some(s) { option::some(s) }
option::none { option::none }
}
}
}
fn make_tests(config: config) -> [test::test_desc] {

32
src/etc/check-summary.py Executable file
View File

@ -0,0 +1,32 @@
#!/usr/bin/env python
import sys
if __name__ == '__main__':
summaries = []
def summarise(fname):
summary = {}
fd = open(fname)
for line in fd:
status, test = line.strip().split(' ', 1)
if not summary.has_key(status):
summary[status] = []
summary[status].append(test)
summaries.append((fname, summary))
def count(t):
return sum(map(lambda (f, s): len(s.get(t, [])), summaries))
logfiles = sys.argv[1:]
map(summarise, logfiles)
ok = count('ok')
failed = count('failed')
ignored = count('ignored')
print "summary of %d test logs: %d passed; %d failed; %d ignored" % \
(len(logfiles), ok, failed, ignored)
if failed > 0:
print "failed tests:"
for f, s in summaries:
failures = s.get('failed', [])
if len(failures) > 0:
print " %s:" % (f)
for test in failures:
print " %s" % (test)

View File

@ -57,14 +57,15 @@ fn test_main(args: [str], tests: [test_desc]) {
if !run_tests_console(opts, tests) { fail "Some tests failed"; }
}
type test_opts = {filter: option<str>, run_ignored: bool};
type test_opts = {filter: option<str>, run_ignored: bool,
logfile: option<str>};
type opt_res = either<test_opts, str>;
// Parses command line arguments into test options
fn parse_opts(args: [str]) -> opt_res {
let args_ = vec::tail(args);
let opts = [getopts::optflag("ignored")];
let opts = [getopts::optflag("ignored"), getopts::optopt("logfile")];
let match =
alt getopts::getopts(args_, opts) {
ok(m) { m }
@ -77,8 +78,10 @@ fn parse_opts(args: [str]) -> opt_res {
} else { option::none };
let run_ignored = getopts::opt_present(match, "ignored");
let logfile = getopts::opt_maybe_str(match, "logfile");
let test_opts = {filter: filter, run_ignored: run_ignored};
let test_opts = {filter: filter, run_ignored: run_ignored,
logfile: logfile};
ret either::left(test_opts);
}
@ -87,6 +90,7 @@ enum test_result { tr_ok, tr_failed, tr_ignored, }
type console_test_state =
@{out: io::writer,
log_out: option<io::writer>,
use_color: bool,
mut total: uint,
mut passed: uint,
@ -106,6 +110,12 @@ fn callback(event: testevent, st: console_test_state) {
}
te_wait(test) { st.out.write_str(#fmt["test %s ... ", test.name]); }
te_result(test, result) {
alt st.log_out {
some(f) {
write_log(f, result, test);
}
none {}
}
alt result {
tr_ok {
st.passed += 1u;
@ -128,8 +138,21 @@ fn callback(event: testevent, st: console_test_state) {
}
}
let log_out = alt opts.logfile {
some(path) {
alt io::file_writer(path, [io::create, io::truncate]) {
result::ok(w) { some(w) }
result::err(s) {
fail(#fmt("can't open output file: %s", s))
}
}
}
none { none }
};
let st =
@{out: io::stdout(),
log_out: log_out,
use_color: use_color(),
mut total: 0u,
mut passed: 0u,
@ -156,6 +179,15 @@ fn callback(event: testevent, st: console_test_state) {
ret success;
fn write_log(out: io::writer, result: test_result, test: test_desc) {
out.write_line(#fmt("%s %s",
alt result {
tr_ok { "ok" }
tr_failed { "failed" }
tr_ignored { "ignored" }
}, test.name));
}
fn write_ok(out: io::writer, use_color: bool) {
write_pretty(out, "ok", term::color_green, use_color);
}
@ -209,6 +241,7 @@ fn should_sort_failures_before_printing_them() {
let st =
@{out: writer,
log_out: option::none,
use_color: false,
mut total: 0u,
mut passed: 0u,
@ -466,7 +499,8 @@ fn filter_for_ignored_option() {
// When we run ignored tests the test filter should filter out all the
// unignored tests and flip the ignore flag on the rest to false
let opts = {filter: option::none, run_ignored: true};
let opts = {filter: option::none, run_ignored: true,
logfile: option::none};
let tests =
[{name: "1", fn: fn~() { }, ignore: true, should_fail: false},
{name: "2", fn: fn~() { }, ignore: false, should_fail: false}];
@ -479,7 +513,8 @@ fn filter_for_ignored_option() {
#[test]
fn sort_tests() {
let opts = {filter: option::none, run_ignored: false};
let opts = {filter: option::none, run_ignored: false,
logfile: option::none};
let names =
["sha1::test", "int::test_to_str", "int::test_pow",