Merge remote-tracking branch 'rust-lang/master'

Conflicts:
	mk/tests.mk
	src/liballoc/arc.rs
	src/liballoc/boxed.rs
	src/liballoc/rc.rs
	src/libcollections/bit.rs
	src/libcollections/btree/map.rs
	src/libcollections/btree/set.rs
	src/libcollections/dlist.rs
	src/libcollections/ring_buf.rs
	src/libcollections/slice.rs
	src/libcollections/str.rs
	src/libcollections/string.rs
	src/libcollections/vec.rs
	src/libcollections/vec_map.rs
	src/libcore/any.rs
	src/libcore/array.rs
	src/libcore/borrow.rs
	src/libcore/error.rs
	src/libcore/fmt/mod.rs
	src/libcore/iter.rs
	src/libcore/marker.rs
	src/libcore/ops.rs
	src/libcore/result.rs
	src/libcore/slice.rs
	src/libcore/str/mod.rs
	src/libregex/lib.rs
	src/libregex/re.rs
	src/librustc/lint/builtin.rs
	src/libstd/collections/hash/map.rs
	src/libstd/collections/hash/set.rs
	src/libstd/sync/mpsc/mod.rs
	src/libstd/sync/mutex.rs
	src/libstd/sync/poison.rs
	src/libstd/sync/rwlock.rs
	src/libsyntax/feature_gate.rs
	src/libsyntax/test.rs
This commit is contained in:
Brian Anderson 2015-01-24 09:15:42 -08:00
commit 63fcbcf3ce
433 changed files with 7348 additions and 12011 deletions

View file

@ -43,15 +43,14 @@
#![feature(path)]
#![feature(rustc_private)]
#![feature(std_misc)]
#![feature(hash)]
extern crate getopts;
extern crate regex;
extern crate serialize;
extern crate "serialize" as rustc_serialize;
extern crate term;
pub use self::TestFn::*;
pub use self::MetricChange::*;
pub use self::ColorConfig::*;
pub use self::TestResult::*;
pub use self::TestName::*;
@ -61,18 +60,14 @@ use self::OutputLocation::*;
use stats::Stats;
use getopts::{OptGroup, optflag, optopt};
use regex::Regex;
use serialize::{json, Decodable, Encodable};
use serialize::Encodable;
use term::Terminal;
use term::color::{Color, RED, YELLOW, GREEN, CYAN};
use std::any::Any;
use std::cmp;
use std::collections::BTreeMap;
use std::f64;
use std::fmt::Show;
use std::fmt;
use std::io::fs::PathExtensions;
use std::io::stdio::StdWriter;
use std::io::{File, ChanReader, ChanWriter};
use std::io;
@ -89,8 +84,7 @@ use std::time::Duration;
pub mod test {
pub use {Bencher, TestName, TestResult, TestDesc,
TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
Metric, MetricMap, MetricAdded, MetricRemoved,
MetricChange, Improvement, Regression, LikelyNoise,
Metric, MetricMap,
StaticTestFn, StaticTestName, DynTestName, DynTestFn,
run_test, test_main, test_main_static, filter_tests,
parse_opts, StaticBenchFn, ShouldFail};
@ -116,9 +110,9 @@ impl TestName {
}
}
}
impl fmt::String for TestName {
impl fmt::Display for TestName {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::String::fmt(self.as_slice(), f)
fmt::Display::fmt(self.as_slice(), f)
}
}
@ -179,7 +173,7 @@ impl TestFn {
}
}
impl fmt::Show for TestFn {
impl fmt::Debug for TestFn {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match *self {
StaticTestFn(..) => "StaticTestFn(..)",
@ -249,18 +243,6 @@ impl Clone for MetricMap {
}
}
/// Analysis of a single change in metric
#[derive(Copy, PartialEq, Show)]
pub enum MetricChange {
LikelyNoise,
MetricAdded,
MetricRemoved,
Improvement(f64),
Regression(f64)
}
pub type MetricDiff = BTreeMap<String,MetricChange>;
// The default console test runner. It accepts the command line
// arguments and a vector of test_descs.
pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) {
@ -303,20 +285,13 @@ pub enum ColorConfig {
}
pub struct TestOpts {
pub filter: Option<Regex>,
pub filter: Option<String>,
pub run_ignored: bool,
pub run_tests: bool,
pub run_benchmarks: bool,
pub ratchet_metrics: Option<Path>,
pub ratchet_noise_percent: Option<f64>,
pub save_metrics: Option<Path>,
pub test_shard: Option<(uint,uint)>,
pub logfile: Option<Path>,
pub nocapture: bool,
pub color: ColorConfig,
pub show_boxplot: bool,
pub boxplot_width: uint,
pub show_all_stats: bool,
}
impl TestOpts {
@ -327,16 +302,9 @@ impl TestOpts {
run_ignored: false,
run_tests: false,
run_benchmarks: false,
ratchet_metrics: None,
ratchet_noise_percent: None,
save_metrics: None,
test_shard: None,
logfile: None,
nocapture: false,
color: AutoColor,
show_boxplot: false,
boxplot_width: 50,
show_all_stats: false,
}
}
}
@ -349,28 +317,14 @@ fn optgroups() -> Vec<getopts::OptGroup> {
getopts::optflag("", "test", "Run tests and not benchmarks"),
getopts::optflag("", "bench", "Run benchmarks instead of tests"),
getopts::optflag("h", "help", "Display this message (longer with --help)"),
getopts::optopt("", "save-metrics", "Location to save bench metrics",
"PATH"),
getopts::optopt("", "ratchet-metrics",
"Location to load and save metrics from. The metrics \
loaded are cause benchmarks to fail if they run too \
slowly", "PATH"),
getopts::optopt("", "ratchet-noise-percent",
"Tests within N% of the recorded metrics will be \
considered as passing", "PERCENTAGE"),
getopts::optopt("", "logfile", "Write logs to the specified file instead \
of stdout", "PATH"),
getopts::optopt("", "test-shard", "run shard A, of B shards, worth of the testsuite",
"A.B"),
getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
task, allow printing directly"),
getopts::optopt("", "color", "Configure coloring of output:
auto = colorize if stdout is a tty and tests are run on serially (default);
always = always colorize output;
never = never colorize output;", "auto|always|never"),
getopts::optflag("", "boxplot", "Display a boxplot of the benchmark statistics"),
getopts::optopt("", "boxplot-width", "Set the boxplot width (default 50)", "WIDTH"),
getopts::optflag("", "stats", "Display the benchmark min, max, and quartiles"))
never = never colorize output;", "auto|always|never"))
}
fn usage(binary: &str) {
@ -417,11 +371,7 @@ pub fn parse_opts(args: &[String]) -> Option<OptRes> {
if matches.opt_present("h") { usage(args[0].as_slice()); return None; }
let filter = if matches.free.len() > 0 {
let s = matches.free[0].as_slice();
match Regex::new(s) {
Ok(re) => Some(re),
Err(e) => return Some(Err(format!("could not parse /{}/: {:?}", s, e)))
}
Some(matches.free[0].clone())
} else {
None
};
@ -435,19 +385,6 @@ pub fn parse_opts(args: &[String]) -> Option<OptRes> {
let run_tests = ! run_benchmarks ||
matches.opt_present("test");
let ratchet_metrics = matches.opt_str("ratchet-metrics");
let ratchet_metrics = ratchet_metrics.map(|s| Path::new(s));
let ratchet_noise_percent = matches.opt_str("ratchet-noise-percent");
let ratchet_noise_percent =
ratchet_noise_percent.map(|s| s.as_slice().parse::<f64>().unwrap());
let save_metrics = matches.opt_str("save-metrics");
let save_metrics = save_metrics.map(|s| Path::new(s));
let test_shard = matches.opt_str("test-shard");
let test_shard = opt_shard(test_shard);
let mut nocapture = matches.opt_present("nocapture");
if !nocapture {
nocapture = os::getenv("RUST_TEST_NOCAPTURE").is_some();
@ -463,63 +400,19 @@ pub fn parse_opts(args: &[String]) -> Option<OptRes> {
v))),
};
let show_boxplot = matches.opt_present("boxplot");
let boxplot_width = match matches.opt_str("boxplot-width") {
Some(width) => {
match FromStr::from_str(width.as_slice()) {
Some(width) => width,
None => {
return Some(Err(format!("argument for --boxplot-width must be a uint")));
}
}
}
None => 50,
};
let show_all_stats = matches.opt_present("stats");
let test_opts = TestOpts {
filter: filter,
run_ignored: run_ignored,
run_tests: run_tests,
run_benchmarks: run_benchmarks,
ratchet_metrics: ratchet_metrics,
ratchet_noise_percent: ratchet_noise_percent,
save_metrics: save_metrics,
test_shard: test_shard,
logfile: logfile,
nocapture: nocapture,
color: color,
show_boxplot: show_boxplot,
boxplot_width: boxplot_width,
show_all_stats: show_all_stats,
};
Some(Ok(test_opts))
}
pub fn opt_shard(maybestr: Option<String>) -> Option<(uint,uint)> {
match maybestr {
None => None,
Some(s) => {
let mut it = s.split('.');
match (it.next().and_then(|s| s.parse::<uint>()),
it.next().and_then(|s| s.parse::<uint>()),
it.next()) {
(Some(a), Some(b), None) => {
if a <= 0 || a > b {
panic!("tried to run shard {a}.{b}, but {a} is out of bounds \
(should be between 1 and {b}", a=a, b=b)
}
Some((a, b))
}
_ => None,
}
}
}
}
#[derive(Clone, PartialEq)]
pub struct BenchSamples {
ns_iter_summ: stats::Summary<f64>,
@ -546,9 +439,6 @@ struct ConsoleTestState<T> {
log_out: Option<File>,
out: OutputLocation<T>,
use_color: bool,
show_boxplot: bool,
boxplot_width: uint,
show_all_stats: bool,
total: uint,
passed: uint,
failed: uint,
@ -575,9 +465,6 @@ impl<T: Writer> ConsoleTestState<T> {
out: out,
log_out: log_out,
use_color: use_color(opts),
show_boxplot: opts.show_boxplot,
boxplot_width: opts.boxplot_width,
show_all_stats: opts.show_all_stats,
total: 0u,
passed: 0u,
failed: 0u,
@ -609,22 +496,6 @@ impl<T: Writer> ConsoleTestState<T> {
self.write_pretty("bench", term::color::CYAN)
}
pub fn write_added(&mut self) -> io::IoResult<()> {
self.write_pretty("added", term::color::GREEN)
}
pub fn write_improved(&mut self) -> io::IoResult<()> {
self.write_pretty("improved", term::color::GREEN)
}
pub fn write_removed(&mut self) -> io::IoResult<()> {
self.write_pretty("removed", term::color::YELLOW)
}
pub fn write_regressed(&mut self) -> io::IoResult<()> {
self.write_pretty("regressed", term::color::RED)
}
pub fn write_pretty(&mut self,
word: &str,
color: term::color::Color) -> io::IoResult<()> {
@ -669,33 +540,13 @@ impl<T: Writer> ConsoleTestState<T> {
TrIgnored => self.write_ignored(),
TrMetrics(ref mm) => {
try!(self.write_metric());
self.write_plain(format!(": {}", fmt_metrics(mm)).as_slice())
self.write_plain(format!(": {}", mm.fmt_metrics()).as_slice())
}
TrBench(ref bs) => {
try!(self.write_bench());
if self.show_boxplot {
let mut wr = Vec::new();
try!(stats::write_boxplot(&mut wr, &bs.ns_iter_summ, self.boxplot_width));
let s = String::from_utf8(wr).unwrap();
try!(self.write_plain(format!(": {}", s).as_slice()));
}
if self.show_all_stats {
let mut wr = Vec::new();
try!(stats::write_5_number_summary(&mut wr, &bs.ns_iter_summ));
let s = String::from_utf8(wr).unwrap();
try!(self.write_plain(format!(": {}", s).as_slice()));
} else {
try!(self.write_plain(format!(": {}",
fmt_bench_samples(bs)).as_slice()));
}
try!(self.write_plain(format!(": {}",
fmt_bench_samples(bs)).as_slice()));
Ok(())
}
@ -712,7 +563,7 @@ impl<T: Writer> ConsoleTestState<T> {
TrOk => "ok".to_string(),
TrFailed => "failed".to_string(),
TrIgnored => "ignored".to_string(),
TrMetrics(ref mm) => fmt_metrics(mm),
TrMetrics(ref mm) => mm.fmt_metrics(),
TrBench(ref bs) => fmt_bench_samples(bs)
}, test.name.as_slice());
o.write(s.as_bytes())
@ -748,85 +599,14 @@ impl<T: Writer> ConsoleTestState<T> {
Ok(())
}
pub fn write_metric_diff(&mut self, diff: &MetricDiff) -> io::IoResult<()> {
let mut noise = 0u;
let mut improved = 0u;
let mut regressed = 0u;
let mut added = 0u;
let mut removed = 0u;
for (k, v) in diff.iter() {
match *v {
LikelyNoise => noise += 1,
MetricAdded => {
added += 1;
try!(self.write_added());
try!(self.write_plain(format!(": {}\n", *k).as_slice()));
}
MetricRemoved => {
removed += 1;
try!(self.write_removed());
try!(self.write_plain(format!(": {}\n", *k).as_slice()));
}
Improvement(pct) => {
improved += 1;
try!(self.write_plain(format!(": {} ", *k).as_slice()));
try!(self.write_improved());
try!(self.write_plain(format!(" by {:.2}%\n",
pct as f64).as_slice()));
}
Regression(pct) => {
regressed += 1;
try!(self.write_plain(format!(": {} ", *k).as_slice()));
try!(self.write_regressed());
try!(self.write_plain(format!(" by {:.2}%\n",
pct as f64).as_slice()));
}
}
}
try!(self.write_plain(format!("result of ratchet: {} metrics added, \
{} removed, {} improved, {} regressed, \
{} noise\n",
added, removed, improved, regressed,
noise).as_slice()));
if regressed == 0 {
try!(self.write_plain("updated ratchet file\n"));
} else {
try!(self.write_plain("left ratchet file untouched\n"));
}
Ok(())
}
pub fn write_run_finish(&mut self,
ratchet_metrics: &Option<Path>,
ratchet_pct: Option<f64>) -> io::IoResult<bool> {
pub fn write_run_finish(&mut self) -> io::IoResult<bool> {
assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
let ratchet_success = match *ratchet_metrics {
None => true,
Some(ref pth) => {
try!(self.write_plain(format!("\nusing metrics ratchet: {:?}\n",
pth.display()).as_slice()));
match ratchet_pct {
None => (),
Some(pct) =>
try!(self.write_plain(format!("with noise-tolerance \
forced to: {}%\n",
pct).as_slice()))
}
let (diff, ok) = self.metrics.ratchet(pth, ratchet_pct);
try!(self.write_metric_diff(&diff));
ok
}
};
let test_success = self.failed == 0u;
if !test_success {
let success = self.failed == 0u;
if !success {
try!(self.write_failures());
}
let success = ratchet_success && test_success;
try!(self.write_plain("\ntest result: "));
if success {
// There's no parallelism at this point so it's safe to use color
@ -841,15 +621,6 @@ impl<T: Writer> ConsoleTestState<T> {
}
}
pub fn fmt_metrics(mm: &MetricMap) -> String {
let MetricMap(ref mm) = *mm;
let v : Vec<String> = mm.iter()
.map(|(k,v)| format!("{}: {} (+/- {})", *k,
v.value as f64, v.noise as f64))
.collect();
v.connect(", ")
}
pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
if bs.mb_s != 0 {
format!("{:>9} ns/iter (+/- {}) = {} MB/s",
@ -920,15 +691,7 @@ pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> io::IoR
None => {}
}
try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
match opts.save_metrics {
None => (),
Some(ref pth) => {
try!(st.metrics.save(pth));
try!(st.write_plain(format!("\nmetrics saved to: {:?}",
pth.display()).as_slice()));
}
}
return st.write_run_finish(&opts.ratchet_metrics, opts.ratchet_noise_percent);
return st.write_run_finish();
}
#[test]
@ -949,9 +712,6 @@ fn should_sort_failures_before_printing_them() {
log_out: None,
out: Raw(Vec::new()),
use_color: false,
show_boxplot: false,
boxplot_width: 0,
show_all_stats: false,
total: 0u,
passed: 0u,
failed: 0u,
@ -1075,9 +835,10 @@ pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescA
// Remove tests that don't match the test filter
filtered = match opts.filter {
None => filtered,
Some(ref re) => {
filtered.into_iter()
.filter(|test| re.is_match(test.desc.name.as_slice())).collect()
Some(ref filter) => {
filtered.into_iter().filter(|test| {
test.desc.name.as_slice().contains(&filter[])
}).collect()
}
};
@ -1102,18 +863,7 @@ pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescA
// Sort the tests alphabetically
filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
// Shard the remaining tests, if sharding requested.
match opts.test_shard {
None => filtered,
Some((a,b)) => {
filtered.into_iter().enumerate()
// note: using a - 1 so that the valid shards, for example, are
// 1.2 and 2.2 instead of 0.2 and 1.2
.filter(|&(i,_)| i % b == (a - 1))
.map(|(_,t)| t)
.collect()
}
}
filtered
}
pub fn run_test(opts: &TestOpts,
@ -1204,87 +954,6 @@ impl MetricMap {
MetricMap(BTreeMap::new())
}
/// Load MetricDiff from a file.
///
/// # Panics
///
/// This function will panic if the path does not exist or the path does not
/// contain a valid metric map.
pub fn load(p: &Path) -> MetricMap {
assert!(p.exists());
let mut f = File::open(p).unwrap();
let value = json::from_reader(&mut f as &mut io::Reader).unwrap();
let mut decoder = json::Decoder::new(value);
MetricMap(match Decodable::decode(&mut decoder) {
Ok(t) => t,
Err(e) => panic!("failure decoding JSON: {:?}", e)
})
}
/// Write MetricDiff to a file.
pub fn save(&self, p: &Path) -> io::IoResult<()> {
let mut file = try!(File::create(p));
let MetricMap(ref map) = *self;
write!(&mut file, "{}", json::as_json(map))
}
/// Compare against another MetricMap. Optionally compare all
/// measurements in the maps using the provided `noise_pct` as a
/// percentage of each value to consider noise. If `None`, each
/// measurement's noise threshold is independently chosen as the
/// maximum of that measurement's recorded noise quantity in either
/// map.
pub fn compare_to_old(&self, old: &MetricMap,
noise_pct: Option<f64>) -> MetricDiff {
let mut diff : MetricDiff = BTreeMap::new();
let MetricMap(ref selfmap) = *self;
let MetricMap(ref old) = *old;
for (k, vold) in old.iter() {
let r = match selfmap.get(k) {
None => MetricRemoved,
Some(v) => {
let delta = v.value - vold.value;
let noise = match noise_pct {
None => vold.noise.abs().max(v.noise.abs()),
Some(pct) => vold.value * pct / 100.0
};
if delta.abs() <= noise {
LikelyNoise
} else {
let pct = delta.abs() / vold.value.max(f64::EPSILON) * 100.0;
if vold.noise < 0.0 {
// When 'noise' is negative, it means we want
// to see deltas that go up over time, and can
// only tolerate slight negative movement.
if delta < 0.0 {
Regression(pct)
} else {
Improvement(pct)
}
} else {
// When 'noise' is positive, it means we want
// to see deltas that go down over time, and
// can only tolerate slight positive movements.
if delta < 0.0 {
Improvement(pct)
} else {
Regression(pct)
}
}
}
}
};
diff.insert((*k).clone(), r);
}
let MetricMap(ref map) = *self;
for (k, _) in map.iter() {
if !diff.contains_key(k) {
diff.insert((*k).clone(), MetricAdded);
}
}
diff
}
/// Insert a named `value` (+/- `noise`) metric into the map. The value
/// must be non-negative. The `noise` indicates the uncertainty of the
/// metric, which doubles as the "noise range" of acceptable
@ -1307,31 +976,13 @@ impl MetricMap {
map.insert(name.to_string(), m);
}
/// Attempt to "ratchet" an external metric file. This involves loading
/// metrics from a metric file (if it exists), comparing against
/// the metrics in `self` using `compare_to_old`, and rewriting the
/// file to contain the metrics in `self` if none of the
/// `MetricChange`s are `Regression`. Returns the diff as well
/// as a boolean indicating whether the ratchet succeeded.
pub fn ratchet(&self, p: &Path, pct: Option<f64>) -> (MetricDiff, bool) {
let old = if p.exists() {
MetricMap::load(p)
} else {
MetricMap::new()
};
let diff : MetricDiff = self.compare_to_old(&old, pct);
let ok = diff.iter().all(|(_, v)| {
match *v {
Regression(_) => false,
_ => true
}
});
if ok {
self.save(p).unwrap();
}
return (diff, ok)
pub fn fmt_metrics(&self) -> String {
let MetricMap(ref mm) = *self;
let v : Vec<String> = mm.iter()
.map(|(k,v)| format!("{}: {} (+/- {})", *k,
v.value as f64, v.noise as f64))
.collect();
v.connect(", ")
}
}
@ -1474,8 +1125,7 @@ pub mod bench {
mod tests {
use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
TestDesc, TestDescAndFn, TestOpts, run_test,
Metric, MetricMap, MetricAdded, MetricRemoved,
Improvement, Regression, LikelyNoise,
Metric, MetricMap,
StaticTestName, DynTestName, DynTestFn, ShouldFail};
use std::io::TempDir;
use std::thunk::Thunk;
@ -1583,16 +1233,6 @@ mod tests {
assert!(res == TrFailed);
}
#[test]
fn first_free_arg_should_be_a_filter() {
let args = vec!("progname".to_string(), "some_regex_filter".to_string());
let opts = match parse_opts(args.as_slice()) {
Some(Ok(o)) => o,
_ => panic!("Malformed arg in first_free_arg_should_be_a_filter")
};
assert!(opts.filter.expect("should've found filter").is_match("some_regex_filter"))
}
#[test]
fn parse_ignored_flag() {
let args = vec!("progname".to_string(),
@ -1689,37 +1329,6 @@ mod tests {
}
}
#[test]
pub fn filter_tests_regex() {
let mut opts = TestOpts::new();
opts.filter = Some(::regex::Regex::new("a.*b.+c").unwrap());
let mut names = ["yes::abXc", "yes::aXXXbXXXXc",
"no::XYZ", "no::abc"];
names.sort();
fn test_fn() {}
let tests = names.iter().map(|name| {
TestDescAndFn {
desc: TestDesc {
name: DynTestName(name.to_string()),
ignore: false,
should_fail: ShouldFail::No,
},
testfn: DynTestFn(Thunk::new(test_fn))
}
}).collect();
let filtered = filter_tests(&opts, tests);
let expected: Vec<&str> =
names.iter().map(|&s| s).filter(|name| name.starts_with("yes")).collect();
assert_eq!(filtered.len(), expected.len());
for (test, expected_name) in filtered.iter().zip(expected.iter()) {
assert_eq!(test.desc.name.as_slice(), *expected_name);
}
}
#[test]
pub fn test_metricmap_compare() {
let mut m1 = MetricMap::new();
@ -1741,81 +1350,5 @@ mod tests {
m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
let diff1 = m2.compare_to_old(&m1, None);
assert_eq!(*(diff1.get(&"in-both-noise".to_string()).unwrap()), LikelyNoise);
assert_eq!(*(diff1.get(&"in-first-noise".to_string()).unwrap()), MetricRemoved);
assert_eq!(*(diff1.get(&"in-second-noise".to_string()).unwrap()), MetricAdded);
assert_eq!(*(diff1.get(&"in-both-want-downwards-but-regressed".to_string()).unwrap()),
Regression(100.0));
assert_eq!(*(diff1.get(&"in-both-want-downwards-and-improved".to_string()).unwrap()),
Improvement(50.0));
assert_eq!(*(diff1.get(&"in-both-want-upwards-but-regressed".to_string()).unwrap()),
Regression(50.0));
assert_eq!(*(diff1.get(&"in-both-want-upwards-and-improved".to_string()).unwrap()),
Improvement(100.0));
assert_eq!(diff1.len(), 7);
let diff2 = m2.compare_to_old(&m1, Some(200.0));
assert_eq!(*(diff2.get(&"in-both-noise".to_string()).unwrap()), LikelyNoise);
assert_eq!(*(diff2.get(&"in-first-noise".to_string()).unwrap()), MetricRemoved);
assert_eq!(*(diff2.get(&"in-second-noise".to_string()).unwrap()), MetricAdded);
assert_eq!(*(diff2.get(&"in-both-want-downwards-but-regressed".to_string()).unwrap()),
LikelyNoise);
assert_eq!(*(diff2.get(&"in-both-want-downwards-and-improved".to_string()).unwrap()),
LikelyNoise);
assert_eq!(*(diff2.get(&"in-both-want-upwards-but-regressed".to_string()).unwrap()),
LikelyNoise);
assert_eq!(*(diff2.get(&"in-both-want-upwards-and-improved".to_string()).unwrap()),
LikelyNoise);
assert_eq!(diff2.len(), 7);
}
#[test]
pub fn ratchet_test() {
let dpth = TempDir::new("test-ratchet").ok().expect("missing test for ratchet");
let pth = dpth.path().join("ratchet.json");
let mut m1 = MetricMap::new();
m1.insert_metric("runtime", 1000.0, 2.0);
m1.insert_metric("throughput", 50.0, 2.0);
let mut m2 = MetricMap::new();
m2.insert_metric("runtime", 1100.0, 2.0);
m2.insert_metric("throughput", 50.0, 2.0);
m1.save(&pth).unwrap();
// Ask for a ratchet that should fail to advance.
let (diff1, ok1) = m2.ratchet(&pth, None);
assert_eq!(ok1, false);
assert_eq!(diff1.len(), 2);
assert_eq!(*(diff1.get(&"runtime".to_string()).unwrap()), Regression(10.0));
assert_eq!(*(diff1.get(&"throughput".to_string()).unwrap()), LikelyNoise);
// Check that it was not rewritten.
let m3 = MetricMap::load(&pth);
let MetricMap(m3) = m3;
assert_eq!(m3.len(), 2);
assert_eq!(*(m3.get(&"runtime".to_string()).unwrap()), Metric::new(1000.0, 2.0));
assert_eq!(*(m3.get(&"throughput".to_string()).unwrap()), Metric::new(50.0, 2.0));
// Ask for a ratchet with an explicit noise-percentage override,
// that should advance.
let (diff2, ok2) = m2.ratchet(&pth, Some(10.0));
assert_eq!(ok2, true);
assert_eq!(diff2.len(), 2);
assert_eq!(*(diff2.get(&"runtime".to_string()).unwrap()), LikelyNoise);
assert_eq!(*(diff2.get(&"throughput".to_string()).unwrap()), LikelyNoise);
// Check that it was rewritten.
let m4 = MetricMap::load(&pth);
let MetricMap(m4) = m4;
assert_eq!(m4.len(), 2);
assert_eq!(*(m4.get(&"runtime".to_string()).unwrap()), Metric::new(1100.0, 2.0));
assert_eq!(*(m4.get(&"throughput".to_string()).unwrap()), Metric::new(50.0, 2.0));
}
}

View file

@ -13,9 +13,7 @@
use std::cmp::Ordering::{self, Less, Greater, Equal};
use std::collections::hash_map::Entry::{Occupied, Vacant};
use std::collections::hash_map::{self, Hasher};
use std::fmt;
use std::hash::Hash;
use std::io;
use std::mem;
use std::num::{Float, FromPrimitive};
@ -332,111 +330,6 @@ pub fn winsorize<T: Float + FromPrimitive>(samples: &mut [T], pct: T) {
}
}
/// Render writes the min, max and quartiles of the provided `Summary` to the provided `Writer`.
pub fn write_5_number_summary<W: Writer, T: Float + fmt::String + fmt::Show>(w: &mut W,
s: &Summary<T>) -> io::IoResult<()> {
let (q1,q2,q3) = s.quartiles;
write!(w, "(min={}, q1={}, med={}, q3={}, max={})",
s.min,
q1,
q2,
q3,
s.max)
}
/// Render a boxplot to the provided writer. The boxplot shows the min, max and quartiles of the
/// provided `Summary` (thus includes the mean) and is scaled to display within the range of the
/// nearest multiple-of-a-power-of-ten above and below the min and max of possible values, and
/// target `width_hint` characters of display (though it will be wider if necessary).
///
/// As an example, the summary with 5-number-summary `(min=15, q1=17, med=20, q3=24, max=31)` might
/// display as:
///
/// ```{.ignore}
/// 10 | [--****#******----------] | 40
/// ```
pub fn write_boxplot<W: Writer, T: Float + fmt::String + fmt::Show + FromPrimitive>(
w: &mut W,
s: &Summary<T>,
width_hint: uint)
-> io::IoResult<()> {
let (q1,q2,q3) = s.quartiles;
// the .abs() handles the case where numbers are negative
let ten: T = FromPrimitive::from_uint(10).unwrap();
let lomag = ten.powf(s.min.abs().log10().floor());
let himag = ten.powf(s.max.abs().log10().floor());
// need to consider when the limit is zero
let zero: T = Float::zero();
let lo = if lomag == Float::zero() {
zero
} else {
(s.min / lomag).floor() * lomag
};
let hi = if himag == Float::zero() {
zero
} else {
(s.max / himag).ceil() * himag
};
let range = hi - lo;
let lostr = lo.to_string();
let histr = hi.to_string();
let overhead_width = lostr.len() + histr.len() + 4;
let range_width = width_hint - overhead_width;
let range_float = FromPrimitive::from_uint(range_width).unwrap();
let char_step = range / range_float;
try!(write!(w, "{} |", lostr));
let mut c = 0;
let mut v = lo;
while c < range_width && v < s.min {
try!(write!(w, " "));
v = v + char_step;
c += 1;
}
try!(write!(w, "["));
c += 1;
while c < range_width && v < q1 {
try!(write!(w, "-"));
v = v + char_step;
c += 1;
}
while c < range_width && v < q2 {
try!(write!(w, "*"));
v = v + char_step;
c += 1;
}
try!(write!(w, "#"));
c += 1;
while c < range_width && v < q3 {
try!(write!(w, "*"));
v = v + char_step;
c += 1;
}
while c < range_width && v < s.max {
try!(write!(w, "-"));
v = v + char_step;
c += 1;
}
try!(write!(w, "]"));
while c < range_width {
try!(write!(w, " "));
v = v + char_step;
c += 1;
}
try!(write!(w, "| {}", histr));
Ok(())
}
/// Returns a HashMap with the number of occurrences of every element in the
/// sequence that the iterator exposes.
pub fn freq_count<T, U>(mut iter: T) -> hash_map::HashMap<U, uint>
@ -458,8 +351,6 @@ pub fn freq_count<T, U>(mut iter: T) -> hash_map::HashMap<U, uint>
mod tests {
use stats::Stats;
use stats::Summary;
use stats::write_5_number_summary;
use stats::write_boxplot;
use std::io;
use std::f64;
@ -479,10 +370,6 @@ mod tests {
let mut w = io::stdout();
let w = &mut w;
(write!(w, "\n")).unwrap();
write_5_number_summary(w, &summ2).unwrap();
(write!(w, "\n")).unwrap();
write_boxplot(w, &summ2, 50).unwrap();
(write!(w, "\n")).unwrap();
assert_eq!(summ.sum, summ2.sum);
assert_eq!(summ.min, summ2.min);
@ -1028,23 +915,6 @@ mod tests {
check(val, summ);
}
#[test]
fn test_boxplot_nonpositive() {
fn t(s: &Summary<f64>, expected: String) {
let mut m = Vec::new();
write_boxplot(&mut m, s, 30).unwrap();
let out = String::from_utf8(m).unwrap();
assert_eq!(out, expected);
}
t(&Summary::new(&[-2.0f64, -1.0f64]),
"-2 |[------******#*****---]| -1".to_string());
t(&Summary::new(&[0.0f64, 2.0f64]),
"0 |[-------*****#*******---]| 2".to_string());
t(&Summary::new(&[-2.0f64, 0.0f64]),
"-2 |[------******#******---]| 0".to_string());
}
#[test]
fn test_sum_f64s() {
assert_eq!([0.5f64, 3.2321f64, 1.5678f64].sum(), 5.2999);