From e14cd392a4278223d858451a7b5cae327f76707c Mon Sep 17 00:00:00 2001 From: Graydon Hoare Date: Sat, 6 Jul 2013 00:44:40 -0700 Subject: [PATCH 1/7] initial sketch of codegen mode for compiletest; doesn't measure / compare / ratchet the disassembly yet --- src/compiletest/common.rs | 7 ++ src/compiletest/compiletest.rs | 61 ++++++++++++----- src/compiletest/runtest.rs | 118 ++++++++++++++++++++++++++++++++- 3 files changed, 169 insertions(+), 17 deletions(-) diff --git a/src/compiletest/common.rs b/src/compiletest/common.rs index 38289f6274180..df00286c87f25 100644 --- a/src/compiletest/common.rs +++ b/src/compiletest/common.rs @@ -15,6 +15,7 @@ pub enum mode { mode_run_pass, mode_pretty, mode_debug_info, + mode_codegen } pub struct config { @@ -27,6 +28,12 @@ pub struct config { // The rustc executable rustc_path: Path, + // The clang executable + clang_path: Option, + + // The llvm binaries path + llvm_bin_path: Option, + // The directory containing the tests to run src_base: Path, diff --git a/src/compiletest/compiletest.rs b/src/compiletest/compiletest.rs index 7d9a7c3ea75dc..5d3f81fd88456 100644 --- a/src/compiletest/compiletest.rs +++ b/src/compiletest/compiletest.rs @@ -19,6 +19,7 @@ extern mod extra; use std::os; use extra::getopts; +use extra::getopts::groups::{optopt, optflag, reqopt}; use extra::test; use common::config; @@ -27,6 +28,7 @@ use common::mode_run_fail; use common::mode_compile_fail; use common::mode_pretty; use common::mode_debug_info; +use common::mode_codegen; use common::mode; use util::logv; @@ -45,31 +47,54 @@ pub fn main() { } pub fn parse_config(args: ~[~str]) -> config { - let opts = - ~[getopts::reqopt("compile-lib-path"), - getopts::reqopt("run-lib-path"), - getopts::reqopt("rustc-path"), getopts::reqopt("src-base"), - getopts::reqopt("build-base"), getopts::reqopt("aux-base"), - getopts::reqopt("stage-id"), - getopts::reqopt("mode"), getopts::optflag("ignored"), - getopts::optopt("runtool"), getopts::optopt("rustcflags"), - getopts::optflag("verbose"), - getopts::optopt("logfile"), - getopts::optflag("jit"), - getopts::optflag("newrt"), - getopts::optopt("target"), - getopts::optopt("adb-path"), - getopts::optopt("adb-test-dir") + + let groups : ~[getopts::groups::OptGroup] = + ~[reqopt("", "compile-lib-path", "path to host shared libraries", "PATH"), + reqopt("", "run-lib-path", "path to target shared libraries", "PATH"), + reqopt("", "rustc-path", "path to rustc to use for compiling", "PATH"), + optopt("", "clang-path", "path to executable for codegen tests", "PATH"), + optopt("", "llvm-bin-path", "path to directory holding llvm binaries", "DIR"), + reqopt("", "src-base", "directory to scan for test files", "PATH"), + reqopt("", "build-base", "directory to deposit test outputs", "PATH"), + reqopt("", "aux-base", "directory to find auxiliary test files", "PATH"), + reqopt("", "stage-id", "the target-stage identifier", "stageN-TARGET"), + reqopt("", "mode", "which sort of compile tests to run", + "(compile-fail|run-fail|run-pass|pretty|debug-info)"), + optflag("", "ignored", "run tests marked as ignored / xfailed"), + optopt("", "runtool", "supervisor program to run tests under \ + (eg. emulator, valgrind)", "PROGRAM"), + optopt("", "rustcflags", "flags to pass to rustc", "FLAGS"), + optflag("", "verbose", "run tests verbosely, showing all output"), + optopt("", "logfile", "file to log test execution to", "FILE"), + optflag("", "jit", "run tests under the JIT"), + optflag("", "newrt", "run tests on the new runtime / scheduler"), + optopt("", "target", "the target to build for", "TARGET"), + optopt("", "adb-path", "path to the android debugger", "PATH"), + optopt("", "adb-test-dir", "path to tests for the android debugger", "PATH"), + optflag("h", "help", "show this message"), ]; assert!(!args.is_empty()); + let argv0 = copy args[0]; let args_ = args.tail(); + if args[1] == ~"-h" || args[1] == ~"--help" { + let message = fmt!("Usage: %s [OPTIONS] [TESTNAME...]", argv0); + io::println(getopts::groups::usage(message, groups)); + fail!() + } + let matches = - &match getopts::getopts(args_, opts) { + &match getopts::groups::getopts(args_, groups) { Ok(m) => m, Err(f) => fail!(getopts::fail_str(f)) }; + if getopts::opt_present(matches, "h") || getopts::opt_present(matches, "help") { + let message = fmt!("Usage: %s [OPTIONS] [TESTNAME...]", argv0); + io::println(getopts::groups::usage(message, groups)); + fail!() + } + fn opt_path(m: &getopts::Matches, nm: &str) -> Path { Path(getopts::opt_str(m, nm)) } @@ -78,6 +103,8 @@ pub fn parse_config(args: ~[~str]) -> config { compile_lib_path: getopts::opt_str(matches, "compile-lib-path"), run_lib_path: getopts::opt_str(matches, "run-lib-path"), rustc_path: opt_path(matches, "rustc-path"), + clang_path: getopts::opt_maybe_str(matches, "clang-path").map(|s| Path(*s)), + llvm_bin_path: getopts::opt_maybe_str(matches, "llvm-bin-path").map(|s| Path(*s)), src_base: opt_path(matches, "src-base"), build_base: opt_path(matches, "build-base"), aux_base: opt_path(matches, "aux-base"), @@ -159,6 +186,7 @@ pub fn str_mode(s: ~str) -> mode { ~"run-pass" => mode_run_pass, ~"pretty" => mode_pretty, ~"debug-info" => mode_debug_info, + ~"codegen" => mode_codegen, _ => fail!("invalid mode") } } @@ -170,6 +198,7 @@ pub fn mode_str(mode: mode) -> ~str { mode_run_pass => ~"run-pass", mode_pretty => ~"pretty", mode_debug_info => ~"debug-info", + mode_codegen => ~"codegen", } } diff --git a/src/compiletest/runtest.rs b/src/compiletest/runtest.rs index 91016ba91fa55..dee07c6de495d 100644 --- a/src/compiletest/runtest.rs +++ b/src/compiletest/runtest.rs @@ -39,7 +39,8 @@ pub fn run(config: config, testfile: ~str) { mode_run_fail => run_rfail_test(&config, &props, &testfile), mode_run_pass => run_rpass_test(&config, &props, &testfile), mode_pretty => run_pretty_test(&config, &props, &testfile), - mode_debug_info => run_debuginfo_test(&config, &props, &testfile) + mode_debug_info => run_debuginfo_test(&config, &props, &testfile), + mode_codegen => run_codegen_test(&config, &props, &testfile) } } @@ -835,3 +836,118 @@ fn _arm_push_aux_shared_library(config: &config, testfile: &Path) { } } } + +// codegen tests (vs. clang) + +fn make_o_name(config: &config, testfile: &Path) -> Path { + output_base_name(config, testfile).with_filetype("o") +} + +fn append_suffix_to_stem(p: &Path, suffix: &str) -> Path { + if suffix.len() == 0 { + copy *p + } else { + let stem = p.filestem().get(); + p.with_filestem(stem + "-" + suffix) + } +} + +fn compile_test_and_save_bitcode(config: &config, props: &TestProps, + testfile: &Path) -> ProcRes { + let link_args = ~[~"-L", aux_output_dir_name(config, testfile).to_str()]; + let llvm_args = ~[~"-c", ~"--lib", ~"--save-temps"]; + let args = make_compile_args(config, props, + link_args + llvm_args, + make_o_name, testfile); + compose_and_run_compiler(config, props, testfile, args, None) +} + +fn compile_cc_with_clang_and_save_bitcode(config: &config, _props: &TestProps, + testfile: &Path) -> ProcRes { + let bitcodefile = output_base_name(config, testfile).with_filetype("bc"); + let bitcodefile = append_suffix_to_stem(&bitcodefile, "clang"); + let ProcArgs = ProcArgs { + prog: config.clang_path.get_ref().to_str(), + args: ~[~"-c", + ~"-emit-llvm", + ~"-o", bitcodefile.to_str(), + testfile.with_filetype("cc").to_str() ] + }; + compose_and_run(config, testfile, ProcArgs, ~[], "", None) +} + +fn extract_function_from_bitcode(config: &config, _props: &TestProps, + fname: &str, testfile: &Path, + suffix: &str) -> ProcRes { + let bitcodefile = output_base_name(config, testfile).with_filetype("bc"); + let bitcodefile = append_suffix_to_stem(&bitcodefile, suffix); + let extracted_bc = append_suffix_to_stem(&bitcodefile, "extract"); + let ProcArgs = ProcArgs { + prog: config.llvm_bin_path.get_ref().push("llvm-extract").to_str(), + args: ~[~"-func=" + fname, + ~"-o=" + extracted_bc.to_str(), + bitcodefile.to_str() ] + }; + compose_and_run(config, testfile, ProcArgs, ~[], "", None) +} + +fn disassemble_extract(config: &config, _props: &TestProps, + testfile: &Path, suffix: &str) -> ProcRes { + let bitcodefile = output_base_name(config, testfile).with_filetype("bc"); + let bitcodefile = append_suffix_to_stem(&bitcodefile, suffix); + let extracted_bc = append_suffix_to_stem(&bitcodefile, "extract"); + let extracted_ll = extracted_bc.with_filetype("ll"); + let ProcArgs = ProcArgs { + prog: config.llvm_bin_path.get_ref().push("llvm-dis").to_str(), + args: ~[~"-o=" + extracted_ll.to_str(), + extracted_bc.to_str() ] + }; + compose_and_run(config, testfile, ProcArgs, ~[], "", None) +} + + +fn run_codegen_test(config: &config, props: &TestProps, testfile: &Path) { + + if config.llvm_bin_path.is_none() { + fatal(~"missing --llvm-bin-path"); + } + + if config.clang_path.is_none() { + fatal(~"missing --clang-path"); + } + + let mut ProcRes = compile_test_and_save_bitcode(config, props, testfile); + if ProcRes.status != 0 { + fatal_ProcRes(~"compilation failed!", &ProcRes); + } + + ProcRes = extract_function_from_bitcode(config, props, "test", testfile, ""); + if ProcRes.status != 0 { + fatal_ProcRes(~"extracting 'test' function failed", &ProcRes); + } + + ProcRes = disassemble_extract(config, props, testfile, ""); + if ProcRes.status != 0 { + fatal_ProcRes(~"disassembling extract failed", &ProcRes); + } + + + let mut ProcRes = compile_cc_with_clang_and_save_bitcode(config, props, testfile); + if ProcRes.status != 0 { + fatal_ProcRes(~"compilation failed!", &ProcRes); + } + + ProcRes = extract_function_from_bitcode(config, props, "test", testfile, "clang"); + if ProcRes.status != 0 { + fatal_ProcRes(~"extracting 'test' function failed", &ProcRes); + } + + ProcRes = disassemble_extract(config, props, testfile, "clang"); + if ProcRes.status != 0 { + fatal_ProcRes(~"disassembling extract failed", &ProcRes); + } + + + +} + From fbc5bb4c0ae10ef159d1c06a40fdf2b7883083d3 Mon Sep 17 00:00:00 2001 From: Graydon Hoare Date: Sat, 6 Jul 2013 01:03:03 -0700 Subject: [PATCH 2/7] wire up makefile to run codegen tests and add one to start --- configure | 1 + mk/tests.mk | 19 ++++++++++++++++++- src/test/codegen/hello.cc | 12 ++++++++++++ src/test/codegen/hello.rs | 4 ++++ 4 files changed, 35 insertions(+), 1 deletion(-) create mode 100644 src/test/codegen/hello.cc create mode 100644 src/test/codegen/hello.rs diff --git a/configure b/configure index 12782fa9fdb62..8f757a0715f8c 100755 --- a/configure +++ b/configure @@ -731,6 +731,7 @@ do make_dir $h/test/perf make_dir $h/test/pretty make_dir $h/test/debug-info + make_dir $h/test/codegen make_dir $h/test/doc-tutorial make_dir $h/test/doc-tutorial-ffi make_dir $h/test/doc-tutorial-macros diff --git a/mk/tests.mk b/mk/tests.mk index 6b6f515ce2b5a..7a5a5dc15c30e 100644 --- a/mk/tests.mk +++ b/mk/tests.mk @@ -246,6 +246,7 @@ check-stage$(1)-T-$(2)-H-$(3)-exec: \ check-stage$(1)-T-$(2)-H-$(3)-crates-exec \ check-stage$(1)-T-$(2)-H-$(3)-bench-exec \ check-stage$(1)-T-$(2)-H-$(3)-debuginfo-exec \ + check-stage$(1)-T-$(2)-H-$(3)-codegen-exec \ check-stage$(1)-T-$(2)-H-$(3)-doc-exec \ check-stage$(1)-T-$(2)-H-$(3)-pretty-exec @@ -430,6 +431,8 @@ CFAIL_RS := $(wildcard $(S)src/test/compile-fail/*.rs) BENCH_RS := $(wildcard $(S)src/test/bench/*.rs) PRETTY_RS := $(wildcard $(S)src/test/pretty/*.rs) DEBUGINFO_RS := $(wildcard $(S)src/test/debug-info/*.rs) +CODEGEN_RS := $(wildcard $(S)src/test/codegen/*.rs) +CODEGEN_CC := $(wildcard $(S)src/test/codegen/*.cc) # perf tests are the same as bench tests only they run under # a performance monitor. @@ -443,6 +446,7 @@ BENCH_TESTS := $(BENCH_RS) PERF_TESTS := $(PERF_RS) PRETTY_TESTS := $(PRETTY_RS) DEBUGINFO_TESTS := $(DEBUGINFO_RS) +CODEGEN_TESTS := $(CODEGEN_RS) $(CODEGEN_CC) CTEST_SRC_BASE_rpass = run-pass CTEST_BUILD_BASE_rpass = run-pass @@ -479,10 +483,19 @@ CTEST_BUILD_BASE_debuginfo = debug-info CTEST_MODE_debuginfo = debug-info CTEST_RUNTOOL_debuginfo = $(CTEST_RUNTOOL) +CTEST_SRC_BASE_codegen = codegen +CTEST_BUILD_BASE_codegen = codegen +CTEST_MODE_codegen = codegen +CTEST_RUNTOOL_codegen = $(CTEST_RUNTOOL) + ifeq ($(CFG_GDB),) CTEST_DISABLE_debuginfo = "no gdb found" endif +ifeq ($(CFG_CLANG),) +CTEST_DISABLE_codegen = "no clang found" +endif + ifeq ($(CFG_OSTYPE),apple-darwin) CTEST_DISABLE_debuginfo = "gdb on darwing needs root" endif @@ -507,6 +520,8 @@ CTEST_COMMON_ARGS$(1)-T-$(2)-H-$(3) := \ --compile-lib-path $$(HLIB$(1)_H_$(3)) \ --run-lib-path $$(TLIB$(1)_T_$(2)_H_$(3)) \ --rustc-path $$(HBIN$(1)_H_$(3))/rustc$$(X_$(3)) \ + --clang-path $(if $(CFG_CLANG),$(CFG_CLANG),clang) \ + --llvm-bin-path $(CFG_LLVM_INST_DIR_$(CFG_BUILD_TRIPLE))/bin \ --aux-base $$(S)src/test/auxiliary/ \ --stage-id stage$(1)-$(2) \ --target $(2) \ @@ -522,6 +537,7 @@ CTEST_DEPS_cfail_$(1)-T-$(2)-H-$(3) = $$(CFAIL_TESTS) CTEST_DEPS_bench_$(1)-T-$(2)-H-$(3) = $$(BENCH_TESTS) CTEST_DEPS_perf_$(1)-T-$(2)-H-$(3) = $$(PERF_TESTS) CTEST_DEPS_debuginfo_$(1)-T-$(2)-H-$(3) = $$(DEBUGINFO_TESTS) +CTEST_DEPS_codegen_$(1)-T-$(2)-H-$(3) = $$(CODEGEN_TESTS) endef @@ -565,7 +581,7 @@ endif endef -CTEST_NAMES = rpass rpass-full rfail cfail bench perf debuginfo +CTEST_NAMES = rpass rpass-full rfail cfail bench perf debuginfo codegen $(foreach host,$(CFG_HOST_TRIPLES), \ $(eval $(foreach target,$(CFG_TARGET_TRIPLES), \ @@ -674,6 +690,7 @@ TEST_GROUPS = \ bench \ perf \ debuginfo \ + codegen \ doc \ $(foreach docname,$(DOC_TEST_NAMES),doc-$(docname)) \ pretty \ diff --git a/src/test/codegen/hello.cc b/src/test/codegen/hello.cc new file mode 100644 index 0000000000000..01eae9b16bb6f --- /dev/null +++ b/src/test/codegen/hello.cc @@ -0,0 +1,12 @@ +#include + +struct slice { + char const *p; + size_t len; +}; + +extern "C" +void test() { + struct slice s = { .p = "hello", + .len = 5 }; +} diff --git a/src/test/codegen/hello.rs b/src/test/codegen/hello.rs new file mode 100644 index 0000000000000..e7cd84f63f2b0 --- /dev/null +++ b/src/test/codegen/hello.rs @@ -0,0 +1,4 @@ +#[no_mangle] +fn test() { + let _x = "hello"; +} From 9e67bc37ff0854fbd2b27d3e5ae73639b490cbad Mon Sep 17 00:00:00 2001 From: Graydon Hoare Date: Sun, 7 Jul 2013 15:43:31 -0700 Subject: [PATCH 3/7] extra: simplify the bench stat loop, improve stability somewhat (?) --- src/libextra/stats.rs | 5 ++- src/libextra/test.rs | 94 +++++++++++++++++++------------------------ 2 files changed, 46 insertions(+), 53 deletions(-) diff --git a/src/libextra/stats.rs b/src/libextra/stats.rs index 5446515c1efad..b6a2deb166331 100644 --- a/src/libextra/stats.rs +++ b/src/libextra/stats.rs @@ -100,6 +100,7 @@ pub trait Stats { } /// Extracted collection of all the summary statistics of a sample set. +#[deriving(Eq)] struct Summary { sum: f64, min: f64, @@ -116,7 +117,9 @@ struct Summary { } impl Summary { - fn new(samples: &[f64]) -> Summary { + + /// Construct a new summary of a sample set. + pub fn new(samples: &[f64]) -> Summary { Summary { sum: samples.sum(), min: samples.min(), diff --git a/src/libextra/test.rs b/src/libextra/test.rs index 1c6e2a25c01b7..14de7d0c21d0c 100644 --- a/src/libextra/test.rs +++ b/src/libextra/test.rs @@ -18,6 +18,7 @@ use getopts; use sort; +use stats; use stats::Stats; use term; use time::precise_time_ns; @@ -25,16 +26,12 @@ use time::precise_time_ns; use std::comm::{stream, SharedChan}; use std::either; use std::io; -use std::num; use std::option; -use std::rand::RngUtil; -use std::rand; use std::result; use std::task; use std::to_str::ToStr; use std::u64; use std::uint; -use std::vec; // The name of a test. By convention this follows the rules for rust @@ -184,7 +181,7 @@ pub fn parse_opts(args: &[~str]) -> OptRes { #[deriving(Eq)] pub struct BenchSamples { - ns_iter_samples: ~[f64], + ns_iter_summ: stats::Summary, mb_s: uint } @@ -299,16 +296,15 @@ pub fn run_tests_console(opts: &TestOpts, return success; fn fmt_bench_samples(bs: &BenchSamples) -> ~str { - use stats::Stats; if bs.mb_s != 0 { fmt!("%u ns/iter (+/- %u) = %u MB/s", - bs.ns_iter_samples.median() as uint, - 3 * (bs.ns_iter_samples.median_abs_dev() as uint), + bs.ns_iter_summ.median as uint, + (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint, bs.mb_s) } else { fmt!("%u ns/iter (+/- %u)", - bs.ns_iter_samples.median() as uint, - 3 * (bs.ns_iter_samples.median_abs_dev() as uint)) + bs.ns_iter_summ.median as uint, + (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint) } } @@ -688,54 +684,48 @@ impl BenchHarness { } } - // This is a more statistics-driven benchmark algorithm. - // It stops as quickly as 50ms, so long as the statistical - // properties are satisfactory. If those properties are - // not met, it may run as long as the Go algorithm. - pub fn auto_bench(&mut self, f: &fn(&mut BenchHarness)) -> ~[f64] { + // This is a more statistics-driven benchmark algorithm. It stops as + // quickly as 100ms, so long as the statistical properties are + // satisfactory. If those properties are not met, it may run as long as + // the Go algorithm. + pub fn auto_bench(&mut self, f: &fn(&mut BenchHarness)) -> stats::Summary { - let mut rng = rand::rng(); - let mut magnitude = 10; - let mut prev_madp = 0.0; + let mut magnitude = 1000; + let samples : &mut [f64] = [0.0_f64, ..100]; loop { - let n_samples = rng.gen_uint_range(50, 60); - let n_iter = rng.gen_uint_range(magnitude, - magnitude * 2); + let loop_start = precise_time_ns(); - let samples = do vec::from_fn(n_samples) |_| { - self.bench_n(n_iter as u64, |x| f(x)); - self.ns_per_iter() as f64 + for samples.mut_iter().advance() |p| { + self.bench_n(magnitude as u64, |x| f(x)); + *p = self.ns_per_iter() as f64; }; - // Eliminate outliers - let med = samples.median(); - let mad = samples.median_abs_dev(); - let samples = do samples.consume_iter().filter |f| { - num::abs(*f - med) <= 3.0 * mad - }.collect::<~[f64]>(); - - debug!("%u samples, median %f, MAD=%f, %u survived filter", - n_samples, med as float, mad as float, - samples.len()); - - if samples.len() != 0 { - // If we have _any_ cluster of signal... - let curr_madp = samples.median_abs_dev_pct(); - if self.ns_elapsed() > 1_000_000 && - (curr_madp < 1.0 || - num::abs(curr_madp - prev_madp) < 0.1) { - return samples; - } - prev_madp = curr_madp; + // Clip top 10% and bottom 10% of outliers + stats::winsorize(samples, 10.0); + let summ = stats::Summary::new(samples); - if n_iter > 20_000_000 || - self.ns_elapsed() > 20_000_000 { - return samples; - } + debug!("%u samples, median %f, MAD=%f, MADP=%f", + samples.len(), + summ.median as float, + summ.median_abs_dev as float, + summ.median_abs_dev_pct as float); + + let now = precise_time_ns(); + let loop_run = now - loop_start; + + // Stop early if we have a good signal after a 100ms loop. + if loop_run > 100_000_000 && summ.median_abs_dev_pct < 5.0 { + return summ; + } + + // Longest we ever run for is 1s. + if loop_run > 1_000_000_000 { + return summ; } - magnitude *= 2; + magnitude *= 3; + magnitude /= 2; } } } @@ -752,13 +742,13 @@ pub mod bench { bytes: 0 }; - let ns_iter_samples = bs.auto_bench(f); + let ns_iter_summ = bs.auto_bench(f); - let iter_s = 1_000_000_000 / (ns_iter_samples.median() as u64); + let iter_s = 1_000_000_000 / (ns_iter_summ.median as u64); let mb_s = (bs.bytes * iter_s) / 1_000_000; BenchSamples { - ns_iter_samples: ns_iter_samples, + ns_iter_summ: ns_iter_summ, mb_s: mb_s as uint } } From bf1f69c1565acfb33ea02389e2c6d6c87ee63e3a Mon Sep 17 00:00:00 2001 From: Graydon Hoare Date: Mon, 8 Jul 2013 16:21:09 -0700 Subject: [PATCH 4/7] extra: add ToJson for TreeMap. --- src/libextra/json.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/libextra/json.rs b/src/libextra/json.rs index 06b6d0cb29e8a..3567d5001bdf1 100644 --- a/src/libextra/json.rs +++ b/src/libextra/json.rs @@ -27,6 +27,7 @@ use std::to_str; use serialize::Encodable; use serialize; use sort::Sort; +use treemap::TreeMap; /// Represents a json value pub enum Json { @@ -1340,6 +1341,16 @@ impl ToJson for HashMap<~str, A> { } } +impl ToJson for TreeMap<~str, A> { + fn to_json(&self) -> Json { + let mut d = HashMap::new(); + for self.iter().advance |(key, value)| { + d.insert(copy *key, value.to_json()); + } + Object(~d) + } +} + impl ToJson for Option { fn to_json(&self) -> Json { match *self { From 8614d1694cda26118c2f44a2acd986f137935816 Mon Sep 17 00:00:00 2001 From: Graydon Hoare Date: Tue, 9 Jul 2013 17:18:02 -0700 Subject: [PATCH 5/7] extra: factor ConsoleTestState methods into an impl, fix perf bug. It was re-reading terminfo on each line of output. --- src/libextra/test.rs | 287 ++++++++++++++++++++++--------------------- 1 file changed, 148 insertions(+), 139 deletions(-) diff --git a/src/libextra/test.rs b/src/libextra/test.rs index 14de7d0c21d0c..6ef03744366ad 100644 --- a/src/libextra/test.rs +++ b/src/libextra/test.rs @@ -31,7 +31,6 @@ use std::result; use std::task; use std::to_str::ToStr; use std::u64; -use std::uint; // The name of a test. By convention this follows the rules for rust @@ -191,6 +190,7 @@ pub enum TestResult { TrOk, TrFailed, TrIgnored, TrBench(BenchSamples) } struct ConsoleTestState { out: @io::Writer, log_out: Option<@io::Writer>, + term: Option, use_color: bool, total: uint, passed: uint, @@ -200,171 +200,180 @@ struct ConsoleTestState { failures: ~[TestDesc] } -// A simple console test runner -pub fn run_tests_console(opts: &TestOpts, - tests: ~[TestDescAndFn]) -> bool { - fn callback(event: &TestEvent, st: &mut ConsoleTestState) { - debug!("callback(event=%?)", event); - match copy *event { - TeFiltered(ref filtered_tests) => { - st.total = filtered_tests.len(); - let noun = if st.total != 1 { ~"tests" } else { ~"test" }; - st.out.write_line(fmt!("\nrunning %u %s", st.total, noun)); - } - TeWait(ref test) => st.out.write_str( - fmt!("test %s ... ", test.name.to_str())), - TeResult(test, result) => { - match st.log_out { - Some(f) => write_log(f, copy result, &test), - None => () - } - match result { - TrOk => { - st.passed += 1; - write_ok(st.out, st.use_color); - st.out.write_line(""); - } - TrFailed => { - st.failed += 1; - write_failed(st.out, st.use_color); - st.out.write_line(""); - st.failures.push(test); - } - TrIgnored => { - st.ignored += 1; - write_ignored(st.out, st.use_color); - st.out.write_line(""); - } - TrBench(bs) => { - st.benchmarked += 1u; - write_bench(st.out, st.use_color); - st.out.write_line(fmt!(": %s", - fmt_bench_samples(&bs))); - } - } - } +impl ConsoleTestState { + pub fn new(opts: &TestOpts) -> ConsoleTestState { + let log_out = match opts.logfile { + Some(ref path) => match io::file_writer(path, + [io::Create, + io::Truncate]) { + result::Ok(w) => Some(w), + result::Err(ref s) => { + fail!("can't open output file: %s", *s) + } + }, + None => None + }; + let out = io::stdout(); + let term = match term::Terminal::new(out) { + Err(_) => None, + Ok(t) => Some(t) + }; + ConsoleTestState { + out: out, + log_out: log_out, + use_color: use_color(), + term: term, + total: 0u, + passed: 0u, + failed: 0u, + ignored: 0u, + benchmarked: 0u, + failures: ~[] } } - let log_out = match opts.logfile { - Some(ref path) => match io::file_writer(path, - [io::Create, - io::Truncate]) { - result::Ok(w) => Some(w), - result::Err(ref s) => { - fail!("can't open output file: %s", *s) - } - }, - None => None - }; - - let st = @mut ConsoleTestState { - out: io::stdout(), - log_out: log_out, - use_color: use_color(), - total: 0u, - passed: 0u, - failed: 0u, - ignored: 0u, - benchmarked: 0u, - failures: ~[] - }; - - run_tests(opts, tests, |x| callback(&x, st)); - - assert!(st.passed + st.failed + - st.ignored + st.benchmarked == st.total); - let success = st.failed == 0u; + pub fn write_ok(&self) { + self.write_pretty("ok", term::color::GREEN); + } - if !success { - print_failures(st); + pub fn write_failed(&self) { + self.write_pretty("FAILED", term::color::RED); } - { - let st: &mut ConsoleTestState = st; - st.out.write_str(fmt!("\nresult: ")); - if success { - // There's no parallelism at this point so it's safe to use color - write_ok(st.out, true); - } else { - write_failed(st.out, true); - } - st.out.write_str(fmt!(". %u passed; %u failed; %u ignored\n\n", - st.passed, st.failed, st.ignored)); + pub fn write_ignored(&self) { + self.write_pretty("ignored", term::color::YELLOW); } - return success; + pub fn write_bench(&self) { + self.write_pretty("bench", term::color::CYAN); + } - fn fmt_bench_samples(bs: &BenchSamples) -> ~str { - if bs.mb_s != 0 { - fmt!("%u ns/iter (+/- %u) = %u MB/s", - bs.ns_iter_summ.median as uint, - (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint, - bs.mb_s) - } else { - fmt!("%u ns/iter (+/- %u)", - bs.ns_iter_summ.median as uint, - (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint) + pub fn write_pretty(&self, + word: &str, + color: term::color::Color) { + match self.term { + None => self.out.write_str(word), + Some(ref t) => { + if self.use_color { + t.fg(color); + } + self.out.write_str(word); + if self.use_color { + t.reset(); + } + } } } - fn write_log(out: @io::Writer, result: TestResult, test: &TestDesc) { - out.write_line(fmt!("%s %s", - match result { - TrOk => ~"ok", - TrFailed => ~"failed", - TrIgnored => ~"ignored", - TrBench(ref bs) => fmt_bench_samples(bs) - }, test.name.to_str())); + pub fn write_run_start(&mut self, len: uint) { + self.total = len; + let noun = if len != 1 { &"tests" } else { &"test" }; + self.out.write_line(fmt!("\nrunning %u %s", len, noun)); } - fn write_ok(out: @io::Writer, use_color: bool) { - write_pretty(out, "ok", term::color::GREEN, use_color); + pub fn write_test_start(&self, test: &TestDesc) { + self.out.write_str(fmt!("test %s ... ", test.name.to_str())); } - fn write_failed(out: @io::Writer, use_color: bool) { - write_pretty(out, "FAILED", term::color::RED, use_color); + pub fn write_result(&self, result: &TestResult) { + match *result { + TrOk => self.write_ok(), + TrFailed => self.write_failed(), + TrIgnored => self.write_ignored(), + TrBench(ref bs) => { + self.write_bench(); + self.out.write_str(": " + fmt_bench_samples(bs)) + } + } + self.out.write_str(&"\n"); + } + + pub fn write_log(&self, test: &TestDesc, result: &TestResult) { + match self.log_out { + None => (), + Some(out) => { + out.write_line(fmt!("%s %s", + match *result { + TrOk => ~"ok", + TrFailed => ~"failed", + TrIgnored => ~"ignored", + TrBench(ref bs) => fmt_bench_samples(bs) + }, test.name.to_str())); + } + } } - fn write_ignored(out: @io::Writer, use_color: bool) { - write_pretty(out, "ignored", term::color::YELLOW, use_color); + pub fn write_failures(&self) { + self.out.write_line("\nfailures:"); + let mut failures = ~[]; + for self.failures.iter().advance() |f| { + failures.push(f.name.to_str()); + } + sort::tim_sort(failures); + for failures.iter().advance |name| { + self.out.write_line(fmt!(" %s", name.to_str())); + } } - fn write_bench(out: @io::Writer, use_color: bool) { - write_pretty(out, "bench", term::color::CYAN, use_color); - } + pub fn write_run_finish(&self) -> bool { + assert!(self.passed + self.failed + self.ignored + self.benchmarked == self.total); + let success = self.failed == 0u; + if !success { + self.write_failures(); + } - fn write_pretty(out: @io::Writer, - word: &str, - color: term::color::Color, - use_color: bool) { - let t = term::Terminal::new(out); - match t { - Ok(term) => { - if use_color { - term.fg(color); - } - out.write_str(word); - if use_color { - term.reset(); - } - }, - Err(_) => out.write_str(word) + self.out.write_str("\nresult: "); + if success { + // There's no parallelism at this point so it's safe to use color + self.write_ok(); + } else { + self.write_failed(); } + self.out.write_str(fmt!(". %u passed; %u failed; %u ignored, %u benchmarked\n\n", + self.passed, self.failed, self.ignored, self.benchmarked)); + return success; } } -fn print_failures(st: &ConsoleTestState) { - st.out.write_line("\nfailures:"); - let mut failures = ~[]; - for uint::range(0, st.failures.len()) |i| { - let name = copy st.failures[i].name; - failures.push(name.to_str()); +pub fn fmt_bench_samples(bs: &BenchSamples) -> ~str { + if bs.mb_s != 0 { + fmt!("%u ns/iter (+/- %u) = %u MB/s", + bs.ns_iter_summ.median as uint, + (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint, + bs.mb_s) + } else { + fmt!("%u ns/iter (+/- %u)", + bs.ns_iter_summ.median as uint, + (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint) } - sort::tim_sort(failures); - for failures.iter().advance |name| { - st.out.write_line(fmt!(" %s", name.to_str())); +} + +// A simple console test runner +pub fn run_tests_console(opts: &TestOpts, + tests: ~[TestDescAndFn]) -> bool { + fn callback(event: &TestEvent, st: &mut ConsoleTestState) { + debug!("callback(event=%?)", event); + match copy *event { + TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()), + TeWait(ref test) => st.write_test_start(test), + TeResult(test, result) => { + st.write_log(&test, &result); + st.write_result(&result); + match result { + TrOk => st.passed += 1, + TrIgnored => st.ignored += 1, + TrBench(_) => st.benchmarked += 1, + TrFailed => { + st.failed += 1; + st.failures.push(test); + } + } + } + } } + let st = @mut ConsoleTestState::new(opts); + run_tests(opts, tests, |x| callback(&x, st)); + return st.write_run_finish(); } #[test] From 83fb3d224a40022d6083b42f4296731a9edfdc70 Mon Sep 17 00:00:00 2001 From: Graydon Hoare Date: Wed, 10 Jul 2013 16:17:41 -0700 Subject: [PATCH 6/7] extra: add metrics ratchet to test driver. --- src/libextra/json.rs | 2 +- src/libextra/test.rs | 381 +++++++++++++++++++++++++++++++++---------- 2 files changed, 299 insertions(+), 84 deletions(-) diff --git a/src/libextra/json.rs b/src/libextra/json.rs index 3567d5001bdf1..7a3d3bf0cf355 100644 --- a/src/libextra/json.rs +++ b/src/libextra/json.rs @@ -1226,7 +1226,7 @@ impl Ord for Json { } /// A trait for converting values to JSON -trait ToJson { +pub trait ToJson { /// Converts the value of `self` to an instance of JSON fn to_json(&self) -> Json; } diff --git a/src/libextra/test.rs b/src/libextra/test.rs index 6ef03744366ad..a284c8071696f 100644 --- a/src/libextra/test.rs +++ b/src/libextra/test.rs @@ -17,11 +17,15 @@ use getopts; +use json::ToJson; +use json; +use serialize::Decodable; use sort; -use stats; use stats::Stats; +use stats; use term; use time::precise_time_ns; +use treemap::TreeMap; use std::comm::{stream, SharedChan}; use std::either; @@ -31,6 +35,8 @@ use std::result; use std::task; use std::to_str::ToStr; use std::u64; +use std::hashmap::HashMap; +use std::os; // The name of a test. By convention this follows the rules for rust @@ -83,6 +89,25 @@ pub struct TestDescAndFn { testfn: TestFn, } +#[deriving(Encodable,Decodable,Eq)] +pub struct Metric { + value: f64, + noise: f64 +} + +pub struct MetricMap(TreeMap<~str,Metric>); + +/// Analysis of a single change in metric +pub enum MetricChange { + LikelyNoise, + MetricAdded, + MetricRemoved, + Improvement(f64), + Regression(f64) +} + +pub type MetricDiff = TreeMap<~str,MetricChange>; + // The default console test runner. It accepts the command line // arguments and a vector of test_descs. pub fn test_main(args: &[~str], tests: ~[TestDescAndFn]) { @@ -123,8 +148,8 @@ pub struct TestOpts { run_ignored: bool, run_tests: bool, run_benchmarks: bool, - save_results: Option, - compare_results: Option, + ratchet_metrics: Option, + save_metrics: Option, logfile: Option } @@ -136,8 +161,8 @@ pub fn parse_opts(args: &[~str]) -> OptRes { let opts = ~[getopts::optflag("ignored"), getopts::optflag("test"), getopts::optflag("bench"), - getopts::optopt("save"), - getopts::optopt("diff"), + getopts::optopt("save-metrics"), + getopts::optopt("ratchet-metrics"), getopts::optopt("logfile")]; let matches = match getopts::getopts(args_, opts) { @@ -159,19 +184,19 @@ pub fn parse_opts(args: &[~str]) -> OptRes { let run_tests = ! run_benchmarks || getopts::opt_present(&matches, "test"); - let save_results = getopts::opt_maybe_str(&matches, "save"); - let save_results = save_results.map(|s| Path(*s)); + let ratchet_metrics = getopts::opt_maybe_str(&matches, "ratchet-metrics"); + let ratchet_metrics = ratchet_metrics.map(|s| Path(*s)); - let compare_results = getopts::opt_maybe_str(&matches, "diff"); - let compare_results = compare_results.map(|s| Path(*s)); + let save_metrics = getopts::opt_maybe_str(&matches, "save-metrics"); + let save_metrics = save_metrics.map(|s| Path(*s)); let test_opts = TestOpts { filter: filter, run_ignored: run_ignored, run_tests: run_tests, run_benchmarks: run_benchmarks, - save_results: save_results, - compare_results: compare_results, + ratchet_metrics: ratchet_metrics, + save_metrics: save_metrics, logfile: logfile }; @@ -197,6 +222,7 @@ struct ConsoleTestState { failed: uint, ignored: uint, benchmarked: uint, + metrics: MetricMap, failures: ~[TestDesc] } @@ -228,6 +254,7 @@ impl ConsoleTestState { failed: 0u, ignored: 0u, benchmarked: 0u, + metrics: MetricMap::new(), failures: ~[] } } @@ -248,6 +275,23 @@ impl ConsoleTestState { self.write_pretty("bench", term::color::CYAN); } + + pub fn write_added(&self) { + self.write_pretty("added", term::color::GREEN); + } + + pub fn write_improved(&self) { + self.write_pretty("improved", term::color::GREEN); + } + + pub fn write_removed(&self) { + self.write_pretty("removed", term::color::YELLOW); + } + + pub fn write_regressed(&self) { + self.write_pretty("regressed", term::color::RED); + } + pub fn write_pretty(&self, word: &str, color: term::color::Color) { @@ -315,14 +359,73 @@ impl ConsoleTestState { } } - pub fn write_run_finish(&self) -> bool { + pub fn write_metric_diff(&self, diff: &MetricDiff) { + let mut noise = 0; + let mut improved = 0; + let mut regressed = 0; + let mut added = 0; + let mut removed = 0; + + for diff.iter().advance() |(k, v)| { + match *v { + LikelyNoise => noise += 1, + MetricAdded => { + added += 1; + self.write_added(); + self.out.write_line(fmt!(": %s", *k)); + } + MetricRemoved => { + removed += 1; + self.write_removed(); + self.out.write_line(fmt!(": %s", *k)); + } + Improvement(pct) => { + improved += 1; + self.out.write_str(*k); + self.out.write_str(": "); + self.write_improved(); + self.out.write_line(fmt!(" by %.2f%%", pct as float)) + } + Regression(pct) => { + regressed += 1; + self.out.write_str(*k); + self.out.write_str(": "); + self.write_regressed(); + self.out.write_line(fmt!(" by %.2f%%", pct as float)) + } + } + } + self.out.write_line(fmt!("result of ratchet: %u matrics added, %u removed, \ + %u improved, %u regressed, %u noise", + added, removed, improved, regressed, noise)); + if regressed == 0 { + self.out.write_line("updated ratchet file") + } else { + self.out.write_line("left ratchet file untouched") + } + } + + pub fn write_run_finish(&self, ratchet_metrics: &Option) -> bool { assert!(self.passed + self.failed + self.ignored + self.benchmarked == self.total); - let success = self.failed == 0u; - if !success { + + let ratchet_success = match *ratchet_metrics { + None => true, + Some(ref pth) => { + self.out.write_str(fmt!("\nusing metrics ratchet: %s\n", pth.to_str())); + let (diff, ok) = self.metrics.ratchet(pth); + self.write_metric_diff(&diff); + ok + } + }; + + let test_success = self.failed == 0u; + if !test_success { self.write_failures(); } - self.out.write_str("\nresult: "); + let success = ratchet_success && test_success; + + self.out.write_str("\ntest result: "); if success { // There's no parallelism at this point so it's safe to use color self.write_ok(); @@ -362,7 +465,12 @@ pub fn run_tests_console(opts: &TestOpts, match result { TrOk => st.passed += 1, TrIgnored => st.ignored += 1, - TrBench(_) => st.benchmarked += 1, + TrBench(bs) => { + st.metrics.insert_metric(test.name.to_str(), + bs.ns_iter_summ.median, + bs.ns_iter_summ.max - bs.ns_iter_summ.min); + st.benchmarked += 1 + } TrFailed => { st.failed += 1; st.failures.push(test); @@ -373,7 +481,14 @@ pub fn run_tests_console(opts: &TestOpts, } let st = @mut ConsoleTestState::new(opts); run_tests(opts, tests, |x| callback(&x, st)); - return st.write_run_finish(); + match opts.save_metrics { + None => (), + Some(ref pth) => { + st.metrics.save(pth); + st.out.write_str(fmt!("\nmetrics saved to: %s", pth.to_str())); + } + } + return st.write_run_finish(&opts.ratchet_metrics); } #[test] @@ -402,6 +517,7 @@ fn should_sort_failures_before_printing_them() { failed: 0u, ignored: 0u, benchmarked: 0u, + metrics: MetricsMap::new(), failures: ~[test_b, test_a] }; @@ -610,6 +726,133 @@ fn calc_result(desc: &TestDesc, task_succeeded: bool) -> TestResult { } } + +impl ToJson for Metric { + fn to_json(&self) -> json::Json { + let mut map = ~HashMap::new(); + map.insert(~"value", json::Number(self.value as float)); + map.insert(~"noise", json::Number(self.noise as float)); + json::Object(map) + } +} + +impl MetricMap { + + fn new() -> MetricMap { + MetricMap(TreeMap::new()) + } + + /// Load MetricDiff from a file. + fn load(p: &Path) -> MetricMap { + assert!(os::path_exists(p)); + let f = io::file_reader(p).get(); + let mut decoder = json::Decoder(json::from_reader(f).get()); + MetricMap(Decodable::decode(&mut decoder)) + } + + /// Write MetricDiff to a file. + pub fn save(&self, p: &Path) { + let f = io::file_writer(p, [io::Create, io::Truncate]).get(); + json::to_pretty_writer(f, &self.to_json()); + } + + /// Compare against another MetricMap + pub fn compare_to_old(&self, old: MetricMap) -> MetricDiff { + let mut diff : MetricDiff = TreeMap::new(); + for old.iter().advance |(k, vold)| { + let r = match self.find(k) { + None => MetricRemoved, + Some(v) => { + let delta = (v.value - vold.value); + if delta.abs() < vold.noise.abs() { + LikelyNoise + } else { + let pct = delta.abs() / v.value * 100.0; + if vold.noise < 0.0 { + // When 'noise' is negative, it means we want + // to see deltas that go up over time, and can + // only tolerate slight negative movement. + if delta < 0.0 { + Regression(pct) + } else { + Improvement(pct) + } + } else { + // When 'noise' is positive, it means we want + // to see deltas that go down over time, and + // can only tolerate slight positive movements. + if delta < 0.0 { + Improvement(pct) + } else { + Regression(pct) + } + } + } + } + }; + diff.insert(copy *k, r); + } + for self.iter().advance |(k, _)| { + if !diff.contains_key(k) { + diff.insert(copy *k, MetricAdded); + } + } + diff + } + + /// Insert a named `value` (+/- `noise`) metric into the map. The value + /// must be non-negative. The `noise` indicates the uncertainty of the + /// metric, which doubles as the "noise range" of acceptable + /// pairwise-regressions on this named value, when comparing from one + /// metric to the next using `compare_to_old`. + /// + /// If `noise` is positive, then it means this metric is of a value + /// you want to see grow smaller, so a change larger than `noise` in the + /// positive direction represents a regression. + /// + /// If `noise` is negative, then it means this metric is of a value + /// you want to see grow larger, so a change larger than `noise` in the + /// negative direction represents a regression. + pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) { + let m = Metric { + value: value, + noise: noise + }; + self.insert(name.to_owned(), m); + } + + /// Attempt to "ratchet" an external metric file. This involves loading + /// metrics from a metric file (if it exists), comparing against + /// the metrics in `self` using `compare_to_old`, and rewriting the + /// file to contain the metrics in `self` if none of the + /// `MetricChange`s are `Regression`. Returns the diff as well + /// as a boolean indicating whether the ratchet succeeded. + pub fn ratchet(&self, p: &Path) -> (MetricDiff, bool) { + let old = if os::path_exists(p) { + MetricMap::load(p) + } else { + MetricMap::new() + }; + + let diff : MetricDiff = self.compare_to_old(old); + let ok = do diff.iter().all() |(_, v)| { + match *v { + Regression(_) => false, + _ => true + } + }; + + if ok { + debug!("rewriting file '%s' with updated metrics"); + self.save(p); + } + return (diff, ok) + } +} + + +// Benchmarking + impl BenchHarness { /// Callback for benchmark functions to run in their body. pub fn iter(&mut self, inner:&fn()) { @@ -644,76 +887,42 @@ impl BenchHarness { f(self); } - // This is the Go benchmark algorithm. It produces a single - // datapoint and always tries to run for 1s. - pub fn go_bench(&mut self, f: &fn(&mut BenchHarness)) { - - // Rounds a number down to the nearest power of 10. - fn round_down_10(n: u64) -> u64 { - let mut n = n; - let mut res = 1; - while n > 10 { - n = n / 10; - res *= 10; - } - res - } - - // Rounds x up to a number of the form [1eX, 2eX, 5eX]. - fn round_up(n: u64) -> u64 { - let base = round_down_10(n); - if n < (2 * base) { - 2 * base - } else if n < (5 * base) { - 5 * base - } else { - 10 * base - } - } + // This is a more statistics-driven benchmark algorithm + pub fn auto_bench(&mut self, f: &fn(&mut BenchHarness)) -> stats::Summary { // Initial bench run to get ballpark figure. let mut n = 1_u64; self.bench_n(n, |x| f(x)); - while n < 1_000_000_000 && - self.ns_elapsed() < 1_000_000_000 { - let last = n; - - // Try to estimate iter count for 1s falling back to 1bn - // iterations if first run took < 1ns. - if self.ns_per_iter() == 0 { - n = 1_000_000_000; - } else { - n = 1_000_000_000 / self.ns_per_iter(); - } - - n = u64::max(u64::min(n+n/2, 100*last), last+1); - n = round_up(n); - self.bench_n(n, |x| f(x)); + // Try to estimate iter count for 1ms falling back to 1m + // iterations if first run took < 1ns. + if self.ns_per_iter() == 0 { + n = 1_000_000; + } else { + n = 1_000_000 / self.ns_per_iter(); } - } - // This is a more statistics-driven benchmark algorithm. It stops as - // quickly as 100ms, so long as the statistical properties are - // satisfactory. If those properties are not met, it may run as long as - // the Go algorithm. - pub fn auto_bench(&mut self, f: &fn(&mut BenchHarness)) -> stats::Summary { - - let mut magnitude = 1000; - - let samples : &mut [f64] = [0.0_f64, ..100]; + let mut total_run = 0; + let samples : &mut [f64] = [0.0_f64, ..50]; loop { let loop_start = precise_time_ns(); for samples.mut_iter().advance() |p| { - self.bench_n(magnitude as u64, |x| f(x)); + self.bench_n(n as u64, |x| f(x)); *p = self.ns_per_iter() as f64; }; - // Clip top 10% and bottom 10% of outliers - stats::winsorize(samples, 10.0); + stats::winsorize(samples, 5.0); let summ = stats::Summary::new(samples); + for samples.mut_iter().advance() |p| { + self.bench_n(5 * n as u64, |x| f(x)); + *p = self.ns_per_iter() as f64; + }; + + stats::winsorize(samples, 5.0); + let summ5 = stats::Summary::new(samples); + debug!("%u samples, median %f, MAD=%f, MADP=%f", samples.len(), summ.median as float, @@ -723,20 +932,27 @@ impl BenchHarness { let now = precise_time_ns(); let loop_run = now - loop_start; - // Stop early if we have a good signal after a 100ms loop. - if loop_run > 100_000_000 && summ.median_abs_dev_pct < 5.0 { - return summ; + // If we've run for 100ms an seem to have converged to a + // stable median. + if loop_run > 100_000_000 && + summ.median_abs_dev_pct < 1.0 && + summ.median - summ5.median < summ5.median_abs_dev { + return summ5; } - // Longest we ever run for is 1s. - if loop_run > 1_000_000_000 { - return summ; + total_run += loop_run; + // Longest we ever run for is 10s. + if total_run > 10_000_000_000 { + return summ5; } - magnitude *= 3; - magnitude /= 2; + n *= 2; } } + + + + } pub mod bench { @@ -881,8 +1097,7 @@ mod tests { logfile: option::None, run_tests: true, run_benchmarks: false, - save_results: option::None, - compare_results: option::None + ratchet: option::None, }; let tests = ~[ @@ -918,8 +1133,8 @@ mod tests { logfile: option::None, run_tests: true, run_benchmarks: false, - save_results: option::None, - compare_results: option::None + ratchet_metrics: option::None, + save_metrics: option::None, }; let names = From bbdbd3c69d35e3409d6c652c188ebd2d951b2e0e Mon Sep 17 00:00:00 2001 From: Graydon Hoare Date: Thu, 11 Jul 2013 15:16:11 -0700 Subject: [PATCH 7/7] extra: add explicit ratchet-noise-percent option to benchmark ratchet, plus a few test breaking fixes. --- src/compiletest/compiletest.rs | 9 +++-- src/libextra/json.rs | 4 +- src/libextra/test.rs | 74 +++++++++++++++++++++++----------- 3 files changed, 57 insertions(+), 30 deletions(-) diff --git a/src/compiletest/compiletest.rs b/src/compiletest/compiletest.rs index 5d3f81fd88456..a411e714247ed 100644 --- a/src/compiletest/compiletest.rs +++ b/src/compiletest/compiletest.rs @@ -79,7 +79,7 @@ pub fn parse_config(args: ~[~str]) -> config { let args_ = args.tail(); if args[1] == ~"-h" || args[1] == ~"--help" { let message = fmt!("Usage: %s [OPTIONS] [TESTNAME...]", argv0); - io::println(getopts::groups::usage(message, groups)); + println(getopts::groups::usage(message, groups)); fail!() } @@ -91,7 +91,7 @@ pub fn parse_config(args: ~[~str]) -> config { if getopts::opt_present(matches, "h") || getopts::opt_present(matches, "help") { let message = fmt!("Usage: %s [OPTIONS] [TESTNAME...]", argv0); - io::println(getopts::groups::usage(message, groups)); + println(getopts::groups::usage(message, groups)); fail!() } @@ -216,8 +216,9 @@ pub fn test_opts(config: &config) -> test::TestOpts { logfile: copy config.logfile, run_tests: true, run_benchmarks: false, - save_results: None, - compare_results: None + ratchet_metrics: None, + ratchet_noise_percent: None, + save_metrics: None, } } diff --git a/src/libextra/json.rs b/src/libextra/json.rs index 7a3d3bf0cf355..2f17e4a741761 100644 --- a/src/libextra/json.rs +++ b/src/libextra/json.rs @@ -1331,7 +1331,7 @@ impl ToJson for ~[A] { fn to_json(&self) -> Json { List(self.map(|elt| elt.to_json())) } } -impl ToJson for HashMap<~str, A> { +impl ToJson for HashMap<~str, A> { fn to_json(&self) -> Json { let mut d = HashMap::new(); for self.iter().advance |(key, value)| { @@ -1341,7 +1341,7 @@ impl ToJson for HashMap<~str, A> { } } -impl ToJson for TreeMap<~str, A> { +impl ToJson for TreeMap<~str, A> { fn to_json(&self) -> Json { let mut d = HashMap::new(); for self.iter().advance |(key, value)| { diff --git a/src/libextra/test.rs b/src/libextra/test.rs index a284c8071696f..96ca429676850 100644 --- a/src/libextra/test.rs +++ b/src/libextra/test.rs @@ -30,11 +30,11 @@ use treemap::TreeMap; use std::comm::{stream, SharedChan}; use std::either; use std::io; -use std::option; use std::result; use std::task; use std::to_str::ToStr; use std::u64; +use std::f64; use std::hashmap::HashMap; use std::os; @@ -149,6 +149,7 @@ pub struct TestOpts { run_tests: bool, run_benchmarks: bool, ratchet_metrics: Option, + ratchet_noise_percent: Option, save_metrics: Option, logfile: Option } @@ -163,6 +164,7 @@ pub fn parse_opts(args: &[~str]) -> OptRes { getopts::optflag("bench"), getopts::optopt("save-metrics"), getopts::optopt("ratchet-metrics"), + getopts::optopt("ratchet-noise-percent"), getopts::optopt("logfile")]; let matches = match getopts::getopts(args_, opts) { @@ -172,8 +174,8 @@ pub fn parse_opts(args: &[~str]) -> OptRes { let filter = if matches.free.len() > 0 { - option::Some(copy (matches).free[0]) - } else { option::None }; + Some(copy (matches).free[0]) + } else { None }; let run_ignored = getopts::opt_present(&matches, "ignored"); @@ -187,6 +189,10 @@ pub fn parse_opts(args: &[~str]) -> OptRes { let ratchet_metrics = getopts::opt_maybe_str(&matches, "ratchet-metrics"); let ratchet_metrics = ratchet_metrics.map(|s| Path(*s)); + let ratchet_noise_percent = + getopts::opt_maybe_str(&matches, "ratchet-noise-percent"); + let ratchet_noise_percent = ratchet_noise_percent.map(|s| f64::from_str(*s).get()); + let save_metrics = getopts::opt_maybe_str(&matches, "save-metrics"); let save_metrics = save_metrics.map(|s| Path(*s)); @@ -196,6 +202,7 @@ pub fn parse_opts(args: &[~str]) -> OptRes { run_tests: run_tests, run_benchmarks: run_benchmarks, ratchet_metrics: ratchet_metrics, + ratchet_noise_percent: ratchet_noise_percent, save_metrics: save_metrics, logfile: logfile }; @@ -405,14 +412,22 @@ impl ConsoleTestState { } } - pub fn write_run_finish(&self, ratchet_metrics: &Option) -> bool { + pub fn write_run_finish(&self, + ratchet_metrics: &Option, + ratchet_pct: Option) -> bool { assert!(self.passed + self.failed + self.ignored + self.benchmarked == self.total); let ratchet_success = match *ratchet_metrics { None => true, Some(ref pth) => { self.out.write_str(fmt!("\nusing metrics ratchet: %s\n", pth.to_str())); - let (diff, ok) = self.metrics.ratchet(pth); + match ratchet_pct { + None => (), + Some(pct) => + self.out.write_str(fmt!("with noise-tolerance forced to: %f%%\n", + pct as float)) + } + let (diff, ok) = self.metrics.ratchet(pth, ratchet_pct); self.write_metric_diff(&diff); ok } @@ -488,7 +503,7 @@ pub fn run_tests_console(opts: &TestOpts, st.out.write_str(fmt!("\nmetrics saved to: %s", pth.to_str())); } } - return st.write_run_finish(&opts.ratchet_metrics); + return st.write_run_finish(&opts.ratchet_metrics, opts.ratchet_noise_percent); } #[test] @@ -510,18 +525,19 @@ fn should_sort_failures_before_printing_them() { let st = @ConsoleTestState { out: wr, - log_out: option::None, + log_out: None, + term: None, use_color: false, total: 0u, passed: 0u, failed: 0u, ignored: 0u, benchmarked: 0u, - metrics: MetricsMap::new(), + metrics: MetricMap::new(), failures: ~[test_b, test_a] }; - print_failures(st); + st.write_failures(); }; let apos = s.find_str("a").get(); @@ -624,15 +640,17 @@ pub fn filter_tests( filtered } else { let filter_str = match opts.filter { - option::Some(ref f) => copy *f, - option::None => ~"" + Some(ref f) => copy *f, + None => ~"" }; fn filter_fn(test: TestDescAndFn, filter_str: &str) -> Option { if test.desc.name.to_str().contains(filter_str) { - return option::Some(test); - } else { return option::None; } + return Some(test); + } else { + return None; + } } filtered.consume_iter().filter_map(|x| filter_fn(x, filter_str)).collect() @@ -757,14 +775,19 @@ impl MetricMap { } /// Compare against another MetricMap - pub fn compare_to_old(&self, old: MetricMap) -> MetricDiff { + pub fn compare_to_old(&self, old: MetricMap, + noise_pct: Option) -> MetricDiff { let mut diff : MetricDiff = TreeMap::new(); for old.iter().advance |(k, vold)| { let r = match self.find(k) { None => MetricRemoved, Some(v) => { let delta = (v.value - vold.value); - if delta.abs() < vold.noise.abs() { + let noise = match noise_pct { + None => f64::max(vold.noise.abs(), v.noise.abs()), + Some(pct) => vold.value * pct / 100.0 + }; + if delta.abs() < noise { LikelyNoise } else { let pct = delta.abs() / v.value * 100.0; @@ -827,14 +850,14 @@ impl MetricMap { /// file to contain the metrics in `self` if none of the /// `MetricChange`s are `Regression`. Returns the diff as well /// as a boolean indicating whether the ratchet succeeded. - pub fn ratchet(&self, p: &Path) -> (MetricDiff, bool) { + pub fn ratchet(&self, p: &Path, pct: Option) -> (MetricDiff, bool) { let old = if os::path_exists(p) { MetricMap::load(p) } else { MetricMap::new() }; - let diff : MetricDiff = self.compare_to_old(old); + let diff : MetricDiff = self.compare_to_old(old, pct); let ok = do diff.iter().all() |(_, v)| { match *v { Regression(_) => false, @@ -1092,12 +1115,14 @@ mod tests { // unignored tests and flip the ignore flag on the rest to false let opts = TestOpts { - filter: option::None, + filter: None, run_ignored: true, - logfile: option::None, + logfile: None, run_tests: true, run_benchmarks: false, - ratchet: option::None, + ratchet_noise_percent: None, + ratchet_metrics: None, + save_metrics: None, }; let tests = ~[ @@ -1128,13 +1153,14 @@ mod tests { #[test] pub fn sort_tests() { let opts = TestOpts { - filter: option::None, + filter: None, run_ignored: false, - logfile: option::None, + logfile: None, run_tests: true, run_benchmarks: false, - ratchet_metrics: option::None, - save_metrics: option::None, + ratchet_noise_percent: None, + ratchet_metrics: None, + save_metrics: None, }; let names =