Auto merge of #65503 - popzxc:refactor-libtest, r=wesleywiser

Refactor libtest

## Short overview

`libtest` got refactored and splitted into smaller modules

## Description

`libtest` module is already pretty big and hard to understand. Everything is mixed up: CLI, console output, test execution, etc.

This PR splits `libtest` into smaller logically-consistent modules, makes big functions smaller and more readable, and adds more comments, so `libtest` will be easier to understand and maintain.

Although there are a lot of changes, all the refactoring is "soft", meaning that no public interfaces were affected and nothing should be broken.

Thus this PR (at least should be) completely backward-compatible.

r? @wesleywiser
cc @Centril
This commit is contained in:
bors 2019-10-22 12:01:41 +00:00
commit 57bfb80962
21 changed files with 2127 additions and 1797 deletions

258
src/libtest/bench.rs Normal file
View file

@ -0,0 +1,258 @@
//! Benchmarking module.
pub use std::hint::black_box;
use super::{
event::CompletedTest,
helpers::sink::Sink,
options::BenchMode,
types::TestDesc,
test_result::TestResult,
Sender,
};
use crate::stats;
use std::time::{Duration, Instant};
use std::cmp;
use std::io;
use std::panic::{catch_unwind, AssertUnwindSafe};
use std::sync::{Arc, Mutex};
/// Manager of the benchmarking runs.
///
/// This is fed into functions marked with `#[bench]` to allow for
/// set-up & tear-down before running a piece of code repeatedly via a
/// call to `iter`.
#[derive(Clone)]
pub struct Bencher {
mode: BenchMode,
summary: Option<stats::Summary>,
pub bytes: u64,
}
impl Bencher {
/// Callback for benchmark functions to run in their body.
pub fn iter<T, F>(&mut self, mut inner: F)
where
F: FnMut() -> T,
{
if self.mode == BenchMode::Single {
ns_iter_inner(&mut inner, 1);
return;
}
self.summary = Some(iter(&mut inner));
}
pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
where
F: FnMut(&mut Bencher),
{
f(self);
return self.summary;
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct BenchSamples {
pub ns_iter_summ: stats::Summary,
pub mb_s: usize,
}
pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
use std::fmt::Write;
let mut output = String::new();
let median = bs.ns_iter_summ.median as usize;
let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
output
.write_fmt(format_args!(
"{:>11} ns/iter (+/- {})",
fmt_thousands_sep(median, ','),
fmt_thousands_sep(deviation, ',')
))
.unwrap();
if bs.mb_s != 0 {
output
.write_fmt(format_args!(" = {} MB/s", bs.mb_s))
.unwrap();
}
output
}
// Format a number with thousands separators
fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
use std::fmt::Write;
let mut output = String::new();
let mut trailing = false;
for &pow in &[9, 6, 3, 0] {
let base = 10_usize.pow(pow);
if pow == 0 || trailing || n / base != 0 {
if !trailing {
output.write_fmt(format_args!("{}", n / base)).unwrap();
} else {
output.write_fmt(format_args!("{:03}", n / base)).unwrap();
}
if pow != 0 {
output.push(sep);
}
trailing = true;
}
n %= base;
}
output
}
fn ns_from_dur(dur: Duration) -> u64 {
dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64)
}
fn ns_iter_inner<T, F>(inner: &mut F, k: u64) -> u64
where
F: FnMut() -> T,
{
let start = Instant::now();
for _ in 0..k {
black_box(inner());
}
return ns_from_dur(start.elapsed());
}
pub fn iter<T, F>(inner: &mut F) -> stats::Summary
where
F: FnMut() -> T,
{
// Initial bench run to get ballpark figure.
let ns_single = ns_iter_inner(inner, 1);
// Try to estimate iter count for 1ms falling back to 1m
// iterations if first run took < 1ns.
let ns_target_total = 1_000_000; // 1ms
let mut n = ns_target_total / cmp::max(1, ns_single);
// if the first run took more than 1ms we don't want to just
// be left doing 0 iterations on every loop. The unfortunate
// side effect of not being able to do as many runs is
// automatically handled by the statistical analysis below
// (i.e., larger error bars).
n = cmp::max(1, n);
let mut total_run = Duration::new(0, 0);
let samples: &mut [f64] = &mut [0.0_f64; 50];
loop {
let loop_start = Instant::now();
for p in &mut *samples {
*p = ns_iter_inner(inner, n) as f64 / n as f64;
}
stats::winsorize(samples, 5.0);
let summ = stats::Summary::new(samples);
for p in &mut *samples {
let ns = ns_iter_inner(inner, 5 * n);
*p = ns as f64 / (5 * n) as f64;
}
stats::winsorize(samples, 5.0);
let summ5 = stats::Summary::new(samples);
let loop_run = loop_start.elapsed();
// If we've run for 100ms and seem to have converged to a
// stable median.
if loop_run > Duration::from_millis(100)
&& summ.median_abs_dev_pct < 1.0
&& summ.median - summ5.median < summ5.median_abs_dev
{
return summ5;
}
total_run = total_run + loop_run;
// Longest we ever run for is 3s.
if total_run > Duration::from_secs(3) {
return summ5;
}
// If we overflow here just return the results so far. We check a
// multiplier of 10 because we're about to multiply by 2 and the
// next iteration of the loop will also multiply by 5 (to calculate
// the summ5 result)
n = match n.checked_mul(10) {
Some(_) => n * 2,
None => {
return summ5;
}
};
}
}
pub fn benchmark<F>(desc: TestDesc, monitor_ch: Sender<CompletedTest>, nocapture: bool, f: F)
where
F: FnMut(&mut Bencher),
{
let mut bs = Bencher {
mode: BenchMode::Auto,
summary: None,
bytes: 0,
};
let data = Arc::new(Mutex::new(Vec::new()));
let oldio = if !nocapture {
Some((
io::set_print(Some(Sink::new_boxed(&data))),
io::set_panic(Some(Sink::new_boxed(&data))),
))
} else {
None
};
let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f)));
if let Some((printio, panicio)) = oldio {
io::set_print(printio);
io::set_panic(panicio);
}
let test_result = match result {
//bs.bench(f) {
Ok(Some(ns_iter_summ)) => {
let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
let mb_s = bs.bytes * 1000 / ns_iter;
let bs = BenchSamples {
ns_iter_summ,
mb_s: mb_s as usize,
};
TestResult::TrBench(bs)
}
Ok(None) => {
// iter not called, so no data.
// FIXME: error in this case?
let samples: &mut [f64] = &mut [0.0_f64; 1];
let bs = BenchSamples {
ns_iter_summ: stats::Summary::new(samples),
mb_s: 0,
};
TestResult::TrBench(bs)
}
Err(_) => TestResult::TrFailed,
};
let stdout = data.lock().unwrap().to_vec();
let message = CompletedTest::new(desc, test_result, None, stdout);
monitor_ch.send(message).unwrap();
}
pub fn run_once<F>(f: F)
where
F: FnMut(&mut Bencher),
{
let mut bs = Bencher {
mode: BenchMode::Single,
summary: None,
bytes: 0,
};
bs.bench(f);
}

444
src/libtest/cli.rs Normal file
View file

@ -0,0 +1,444 @@
//! Module converting command-line arguments into test configuration.
use std::env;
use std::path::PathBuf;
use getopts;
use super::options::{RunIgnored, ColorConfig, OutputFormat, Options};
use super::time::TestTimeOptions;
use super::helpers::isatty;
#[derive(Debug)]
pub struct TestOpts {
pub list: bool,
pub filter: Option<String>,
pub filter_exact: bool,
pub exclude_should_panic: bool,
pub run_ignored: RunIgnored,
pub run_tests: bool,
pub bench_benchmarks: bool,
pub logfile: Option<PathBuf>,
pub nocapture: bool,
pub color: ColorConfig,
pub format: OutputFormat,
pub test_threads: Option<usize>,
pub skip: Vec<String>,
pub time_options: Option<TestTimeOptions>,
pub options: Options,
}
impl TestOpts {
pub fn use_color(&self) -> bool {
match self.color {
ColorConfig::AutoColor => !self.nocapture && isatty::stdout_isatty(),
ColorConfig::AlwaysColor => true,
ColorConfig::NeverColor => false,
}
}
}
/// Result of parsing the options.
pub type OptRes = Result<TestOpts, String>;
/// Result of parsing the option part.
type OptPartRes<T> = Result<T, String>;
fn optgroups() -> getopts::Options {
let mut opts = getopts::Options::new();
opts.optflag("", "include-ignored", "Run ignored and not ignored tests")
.optflag("", "ignored", "Run only ignored tests")
.optflag("", "exclude-should-panic", "Excludes tests marked as should_panic")
.optflag("", "test", "Run tests and not benchmarks")
.optflag("", "bench", "Run benchmarks instead of tests")
.optflag("", "list", "List all tests and benchmarks")
.optflag("h", "help", "Display this message (longer with --help)")
.optopt(
"",
"logfile",
"Write logs to the specified file instead \
of stdout",
"PATH",
)
.optflag(
"",
"nocapture",
"don't capture stdout/stderr of each \
task, allow printing directly",
)
.optopt(
"",
"test-threads",
"Number of threads used for running tests \
in parallel",
"n_threads",
)
.optmulti(
"",
"skip",
"Skip tests whose names contain FILTER (this flag can \
be used multiple times)",
"FILTER",
)
.optflag(
"q",
"quiet",
"Display one character per test instead of one line. \
Alias to --format=terse",
)
.optflag(
"",
"exact",
"Exactly match filters rather than by substring",
)
.optopt(
"",
"color",
"Configure coloring of output:
auto = colorize if stdout is a tty and tests are run on serially (default);
always = always colorize output;
never = never colorize output;",
"auto|always|never",
)
.optopt(
"",
"format",
"Configure formatting of output:
pretty = Print verbose output;
terse = Display one character per test;
json = Output a json document",
"pretty|terse|json",
)
.optflag(
"",
"show-output",
"Show captured stdout of successful tests"
)
.optopt(
"Z",
"",
"Enable nightly-only flags:
unstable-options = Allow use of experimental features",
"unstable-options",
)
.optflagopt(
"",
"report-time",
"Show execution time of each test. Awailable values:
plain = do not colorize the execution time (default);
colored = colorize output according to the `color` parameter value;
Threshold values for colorized output can be configured via
`RUST_TEST_TIME_UNIT`, `RUST_TEST_TIME_INTEGRATION` and
`RUST_TEST_TIME_DOCTEST` environment variables.
Expected format of environment variable is `VARIABLE=WARN_TIME,CRITICAL_TIME`.
Not available for --format=terse",
"plain|colored"
)
.optflag(
"",
"ensure-time",
"Treat excess of the test execution time limit as error.
Threshold values for this option can be configured via
`RUST_TEST_TIME_UNIT`, `RUST_TEST_TIME_INTEGRATION` and
`RUST_TEST_TIME_DOCTEST` environment variables.
Expected format of environment variable is `VARIABLE=WARN_TIME,CRITICAL_TIME`.
`CRITICAL_TIME` here means the limit that should not be exceeded by test.
"
);
return opts;
}
fn usage(binary: &str, options: &getopts::Options) {
let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
println!(
r#"{usage}
The FILTER string is tested against the name of all tests, and only those
tests whose names contain the filter are run.
By default, all tests are run in parallel. This can be altered with the
--test-threads flag or the RUST_TEST_THREADS environment variable when running
tests (set it to 1).
All tests have their standard output and standard error captured by default.
This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
environment variable to a value other than "0". Logging is not captured by default.
Test Attributes:
`#[test]` - Indicates a function is a test to be run. This function
takes no arguments.
`#[bench]` - Indicates a function is a benchmark to be run. This
function takes one argument (test::Bencher).
`#[should_panic]` - This function (also labeled with `#[test]`) will only pass if
the code causes a panic (an assertion failure or panic!)
A message may be provided, which the failure string must
contain: #[should_panic(expected = "foo")].
`#[ignore]` - When applied to a function which is already attributed as a
test, then the test runner will ignore these tests during
normal test runs. Running with --ignored or --include-ignored will run
these tests."#,
usage = options.usage(&message)
);
}
/// Parses command line arguments into test options.
/// Returns `None` if help was requested (since we only show help message and don't run tests),
/// returns `Some(Err(..))` if provided arguments are incorrect,
/// otherwise creates a `TestOpts` object and returns it.
pub fn parse_opts(args: &[String]) -> Option<OptRes> {
// Parse matches.
let opts = optgroups();
let args = args.get(1..).unwrap_or(args);
let matches = match opts.parse(args) {
Ok(m) => m,
Err(f) => return Some(Err(f.to_string())),
};
// Check if help was requested.
if matches.opt_present("h") {
// Show help and do nothing more.
usage(&args[0], &opts);
return None;
}
// Actually parse the opts.
let opts_result = parse_opts_impl(matches);
Some(opts_result)
}
// Gets the option value and checks if unstable features are enabled.
macro_rules! unstable_optflag {
($matches:ident, $allow_unstable:ident, $option_name:literal) => {{
let opt = $matches.opt_present($option_name);
if !$allow_unstable && opt {
return Err(format!(
"The \"{}\" flag is only accepted on the nightly compiler",
$option_name
));
}
opt
}};
}
// Implementation of `parse_opts` that doesn't care about help message
// and returns a `Result`.
fn parse_opts_impl(matches: getopts::Matches) -> OptRes {
let allow_unstable = get_allow_unstable(&matches)?;
// Unstable flags
let exclude_should_panic = unstable_optflag!(matches, allow_unstable, "exclude-should-panic");
let include_ignored = unstable_optflag!(matches, allow_unstable, "include-ignored");
let time_options = get_time_options(&matches, allow_unstable)?;
let quiet = matches.opt_present("quiet");
let exact = matches.opt_present("exact");
let list = matches.opt_present("list");
let skip = matches.opt_strs("skip");
let bench_benchmarks = matches.opt_present("bench");
let run_tests = !bench_benchmarks || matches.opt_present("test");
let logfile = get_log_file(&matches)?;
let run_ignored = get_run_ignored(&matches, include_ignored)?;
let filter = get_filter(&matches)?;
let nocapture = get_nocapture(&matches)?;
let test_threads = get_test_threads(&matches)?;
let color = get_color_config(&matches)?;
let format = get_format(&matches, quiet, allow_unstable)?;
let options = Options::new().display_output(matches.opt_present("show-output"));
let test_opts = TestOpts {
list,
filter,
filter_exact: exact,
exclude_should_panic,
run_ignored,
run_tests,
bench_benchmarks,
logfile,
nocapture,
color,
format,
test_threads,
skip,
time_options,
options,
};
Ok(test_opts)
}
// FIXME: Copied from libsyntax until linkage errors are resolved. Issue #47566
fn is_nightly() -> bool {
// Whether this is a feature-staged build, i.e., on the beta or stable channel
let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some();
// Whether we should enable unstable features for bootstrapping
let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok();
bootstrap || !disable_unstable_features
}
// Gets the CLI options assotiated with `report-time` feature.
fn get_time_options(
matches: &getopts::Matches,
allow_unstable: bool)
-> OptPartRes<Option<TestTimeOptions>> {
let report_time = unstable_optflag!(matches, allow_unstable, "report-time");
let colored_opt_str = matches.opt_str("report-time");
let mut report_time_colored = report_time && colored_opt_str == Some("colored".into());
let ensure_test_time = unstable_optflag!(matches, allow_unstable, "ensure-time");
// If `ensure-test-time` option is provided, time output is enforced,
// so user won't be confused if any of tests will silently fail.
let options = if report_time || ensure_test_time {
if ensure_test_time && !report_time {
report_time_colored = true;
}
Some(TestTimeOptions::new_from_env(ensure_test_time, report_time_colored))
} else {
None
};
Ok(options)
}
fn get_test_threads(matches: &getopts::Matches) -> OptPartRes<Option<usize>> {
let test_threads = match matches.opt_str("test-threads") {
Some(n_str) => match n_str.parse::<usize>() {
Ok(0) => return Err("argument for --test-threads must not be 0".to_string()),
Ok(n) => Some(n),
Err(e) => {
return Err(format!(
"argument for --test-threads must be a number > 0 \
(error: {})",
e
));
}
},
None => None,
};
Ok(test_threads)
}
fn get_format(
matches: &getopts::Matches,
quiet: bool,
allow_unstable: bool
) -> OptPartRes<OutputFormat> {
let format = match matches.opt_str("format").as_ref().map(|s| &**s) {
None if quiet => OutputFormat::Terse,
Some("pretty") | None => OutputFormat::Pretty,
Some("terse") => OutputFormat::Terse,
Some("json") => {
if !allow_unstable {
return Err(
"The \"json\" format is only accepted on the nightly compiler".into(),
);
}
OutputFormat::Json
}
Some(v) => {
return Err(format!(
"argument for --format must be pretty, terse, or json (was \
{})",
v
));
}
};
Ok(format)
}
fn get_color_config(matches: &getopts::Matches) -> OptPartRes<ColorConfig> {
let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
Some("auto") | None => ColorConfig::AutoColor,
Some("always") => ColorConfig::AlwaysColor,
Some("never") => ColorConfig::NeverColor,
Some(v) => {
return Err(format!(
"argument for --color must be auto, always, or never (was \
{})",
v
));
}
};
Ok(color)
}
fn get_nocapture(matches: &getopts::Matches) -> OptPartRes<bool> {
let mut nocapture = matches.opt_present("nocapture");
if !nocapture {
nocapture = match env::var("RUST_TEST_NOCAPTURE") {
Ok(val) => &val != "0",
Err(_) => false,
};
}
Ok(nocapture)
}
fn get_run_ignored(matches: &getopts::Matches, include_ignored: bool) -> OptPartRes<RunIgnored> {
let run_ignored = match (include_ignored, matches.opt_present("ignored")) {
(true, true) => {
return Err(
"the options --include-ignored and --ignored are mutually exclusive".into(),
);
}
(true, false) => RunIgnored::Yes,
(false, true) => RunIgnored::Only,
(false, false) => RunIgnored::No,
};
Ok(run_ignored)
}
fn get_filter(matches: &getopts::Matches) -> OptPartRes<Option<String>> {
let filter = if !matches.free.is_empty() {
Some(matches.free[0].clone())
} else {
None
};
Ok(filter)
}
fn get_allow_unstable(matches: &getopts::Matches) -> OptPartRes<bool> {
let mut allow_unstable = false;
if let Some(opt) = matches.opt_str("Z") {
if !is_nightly() {
return Err(
"the option `Z` is only accepted on the nightly compiler".into(),
);
}
match &*opt {
"unstable-options" => {
allow_unstable = true;
}
_ => {
return Err("Unrecognized option to `Z`".into());
}
}
};
Ok(allow_unstable)
}
fn get_log_file(matches: &getopts::Matches) -> OptPartRes<Option<PathBuf>> {
let logfile = matches.opt_str("logfile").map(|s| PathBuf::from(&s));
Ok(logfile)
}

308
src/libtest/console.rs Normal file
View file

@ -0,0 +1,308 @@
//! Module providing interface for running tests in the console.
use std::fs::File;
use std::io::prelude::Write;
use std::io;
use term;
use super::{
bench::fmt_bench_samples,
cli::TestOpts,
event::{TestEvent, CompletedTest},
formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter},
helpers::{
concurrency::get_concurrency,
metrics::MetricMap,
},
types::{TestDesc, TestDescAndFn, NamePadding},
options::{Options, OutputFormat},
test_result::TestResult,
time::TestExecTime,
run_tests,
filter_tests,
};
/// Generic wrapper over stdout.
pub enum OutputLocation<T> {
Pretty(Box<term::StdoutTerminal>),
Raw(T),
}
impl<T: Write> Write for OutputLocation<T> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
match *self {
OutputLocation::Pretty(ref mut term) => term.write(buf),
OutputLocation::Raw(ref mut stdout) => stdout.write(buf),
}
}
fn flush(&mut self) -> io::Result<()> {
match *self {
OutputLocation::Pretty(ref mut term) => term.flush(),
OutputLocation::Raw(ref mut stdout) => stdout.flush(),
}
}
}
pub struct ConsoleTestState {
pub log_out: Option<File>,
pub total: usize,
pub passed: usize,
pub failed: usize,
pub ignored: usize,
pub allowed_fail: usize,
pub filtered_out: usize,
pub measured: usize,
pub metrics: MetricMap,
pub failures: Vec<(TestDesc, Vec<u8>)>,
pub not_failures: Vec<(TestDesc, Vec<u8>)>,
pub time_failures: Vec<(TestDesc, Vec<u8>)>,
pub options: Options,
}
impl ConsoleTestState {
pub fn new(opts: &TestOpts) -> io::Result<ConsoleTestState> {
let log_out = match opts.logfile {
Some(ref path) => Some(File::create(path)?),
None => None,
};
Ok(ConsoleTestState {
log_out,
total: 0,
passed: 0,
failed: 0,
ignored: 0,
allowed_fail: 0,
filtered_out: 0,
measured: 0,
metrics: MetricMap::new(),
failures: Vec::new(),
not_failures: Vec::new(),
time_failures: Vec::new(),
options: opts.options,
})
}
pub fn write_log<F, S>(
&mut self,
msg: F,
) -> io::Result<()>
where
S: AsRef<str>,
F: FnOnce() -> S,
{
match self.log_out {
None => Ok(()),
Some(ref mut o) => {
let msg = msg();
let msg = msg.as_ref();
o.write_all(msg.as_bytes())
},
}
}
pub fn write_log_result(&mut self,test: &TestDesc,
result: &TestResult,
exec_time: Option<&TestExecTime>,
) -> io::Result<()> {
self.write_log(|| format!(
"{} {}",
match *result {
TestResult::TrOk => "ok".to_owned(),
TestResult::TrFailed => "failed".to_owned(),
TestResult::TrFailedMsg(ref msg) => format!("failed: {}", msg),
TestResult::TrIgnored => "ignored".to_owned(),
TestResult::TrAllowedFail => "failed (allowed)".to_owned(),
TestResult::TrBench(ref bs) => fmt_bench_samples(bs),
TestResult::TrTimedFail => "failed (time limit exceeded)".to_owned(),
},
test.name,
))?;
if let Some(exec_time) = exec_time {
self.write_log(|| format!(" <{}>", exec_time))?;
}
self.write_log(|| "\n")
}
fn current_test_count(&self) -> usize {
self.passed + self.failed + self.ignored + self.measured + self.allowed_fail
}
}
// List the tests to console, and optionally to logfile. Filters are honored.
pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> {
let mut output = match term::stdout() {
None => OutputLocation::Raw(io::stdout()),
Some(t) => OutputLocation::Pretty(t),
};
let quiet = opts.format == OutputFormat::Terse;
let mut st = ConsoleTestState::new(opts)?;
let mut ntest = 0;
let mut nbench = 0;
for test in filter_tests(&opts, tests) {
use crate::TestFn::*;
let TestDescAndFn {
desc: TestDesc { name, .. },
testfn,
} = test;
let fntype = match testfn {
StaticTestFn(..) | DynTestFn(..) => {
ntest += 1;
"test"
}
StaticBenchFn(..) | DynBenchFn(..) => {
nbench += 1;
"benchmark"
}
};
writeln!(output, "{}: {}", name, fntype)?;
st.write_log(|| format!("{} {}\n", fntype, name))?;
}
fn plural(count: u32, s: &str) -> String {
match count {
1 => format!("{} {}", 1, s),
n => format!("{} {}s", n, s),
}
}
if !quiet {
if ntest != 0 || nbench != 0 {
writeln!(output, "")?;
}
writeln!(
output,
"{}, {}",
plural(ntest, "test"),
plural(nbench, "benchmark")
)?;
}
Ok(())
}
// Updates `ConsoleTestState` depending on result of the test execution.
fn handle_test_result(st: &mut ConsoleTestState, completed_test: CompletedTest) {
let test = completed_test.desc;
let stdout = completed_test.stdout;
match completed_test.result {
TestResult::TrOk => {
st.passed += 1;
st.not_failures.push((test, stdout));
}
TestResult::TrIgnored => st.ignored += 1,
TestResult::TrAllowedFail => st.allowed_fail += 1,
TestResult::TrBench(bs) => {
st.metrics.insert_metric(
test.name.as_slice(),
bs.ns_iter_summ.median,
bs.ns_iter_summ.max - bs.ns_iter_summ.min,
);
st.measured += 1
}
TestResult::TrFailed => {
st.failed += 1;
st.failures.push((test, stdout));
}
TestResult::TrFailedMsg(msg) => {
st.failed += 1;
let mut stdout = stdout;
stdout.extend_from_slice(format!("note: {}", msg).as_bytes());
st.failures.push((test, stdout));
}
TestResult::TrTimedFail => {
st.failed += 1;
st.time_failures.push((test, stdout));
}
}
}
// Handler for events that occur during test execution.
// It is provided as a callback to the `run_tests` function.
fn on_test_event(
event: &TestEvent,
st: &mut ConsoleTestState,
out: &mut dyn OutputFormatter,
) -> io::Result<()> {
match (*event).clone() {
TestEvent::TeFiltered(ref filtered_tests) => {
st.total = filtered_tests.len();
out.write_run_start(filtered_tests.len())?;
}
TestEvent::TeFilteredOut(filtered_out) => {
st.filtered_out = filtered_out;
}
TestEvent::TeWait(ref test) => out.write_test_start(test)?,
TestEvent::TeTimeout(ref test) => out.write_timeout(test)?,
TestEvent::TeResult(completed_test) => {
let test = &completed_test.desc;
let result = &completed_test.result;
let exec_time = &completed_test.exec_time;
let stdout = &completed_test.stdout;
st.write_log_result(test, result, exec_time.as_ref())?;
out.write_result(test, result, exec_time.as_ref(), &*stdout, st)?;
handle_test_result(st, completed_test);
}
}
Ok(())
}
/// A simple console test runner.
/// Runs provided tests reporting process and results to the stdout.
pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
let output = match term::stdout() {
None => OutputLocation::Raw(io::stdout()),
Some(t) => OutputLocation::Pretty(t),
};
let max_name_len = tests
.iter()
.max_by_key(|t| len_if_padded(*t))
.map(|t| t.desc.name.as_slice().len())
.unwrap_or(0);
let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1;
let mut out: Box<dyn OutputFormatter> = match opts.format {
OutputFormat::Pretty => Box::new(PrettyFormatter::new(
output,
opts.use_color(),
max_name_len,
is_multithreaded,
opts.time_options,
)),
OutputFormat::Terse => Box::new(TerseFormatter::new(
output,
opts.use_color(),
max_name_len,
is_multithreaded,
)),
OutputFormat::Json => Box::new(JsonFormatter::new(output)),
};
let mut st = ConsoleTestState::new(opts)?;
run_tests(opts, tests, |x| on_test_event(&x, &mut st, &mut *out))?;
assert!(st.current_test_count() == st.total);
return out.write_run_finish(&st);
}
// Calculates padding for given test description.
fn len_if_padded(t: &TestDescAndFn) -> usize {
match t.testfn.padding() {
NamePadding::PadNone => 0,
NamePadding::PadOnRight => t.desc.name.as_slice().len(),
}
}

41
src/libtest/event.rs Normal file
View file

@ -0,0 +1,41 @@
//! Module containing different events that can occur
//! during tests execution process.
use super::types::TestDesc;
use super::test_result::TestResult;
use super::time::TestExecTime;
#[derive(Debug, Clone)]
pub struct CompletedTest {
pub desc: TestDesc,
pub result: TestResult,
pub exec_time: Option<TestExecTime>,
pub stdout: Vec<u8>,
}
impl CompletedTest {
pub fn new(
desc: TestDesc,
result: TestResult,
exec_time: Option<TestExecTime>,
stdout: Vec<u8>
) -> Self {
Self {
desc,
result,
exec_time,
stdout,
}
}
}
unsafe impl Send for CompletedTest {}
#[derive(Debug, Clone)]
pub enum TestEvent {
TeFiltered(Vec<TestDesc>),
TeWait(TestDesc),
TeResult(CompletedTest),
TeTimeout(TestDesc),
TeFilteredOut(usize),
}

View file

@ -1,4 +1,16 @@
use super::*;
use std::{
io,
io::prelude::Write,
borrow::Cow,
};
use crate::{
types::TestDesc,
time,
test_result::TestResult,
console::{ConsoleTestState, OutputLocation},
};
use super::OutputFormatter;
pub(crate) struct JsonFormatter<T> {
out: OutputLocation<T>,
@ -27,7 +39,7 @@ impl<T: Write> JsonFormatter<T> {
ty: &str,
name: &str,
evt: &str,
exec_time: Option<&TestExecTime>,
exec_time: Option<&time::TestExecTime>,
stdout: Option<Cow<'_, str>>,
extra: Option<&str>,
) -> io::Result<()> {
@ -76,25 +88,26 @@ impl<T: Write> OutputFormatter for JsonFormatter<T> {
&mut self,
desc: &TestDesc,
result: &TestResult,
exec_time: Option<&TestExecTime>,
exec_time: Option<&time::TestExecTime>,
stdout: &[u8],
state: &ConsoleTestState,
) -> io::Result<()> {
let stdout = if (state.options.display_output || *result != TrOk) && stdout.len() > 0 {
let display_stdout = state.options.display_output || *result != TestResult::TrOk;
let stdout = if display_stdout && stdout.len() > 0 {
Some(String::from_utf8_lossy(stdout))
} else {
None
};
match *result {
TrOk => {
TestResult::TrOk => {
self.write_event("test", desc.name.as_slice(), "ok", exec_time, stdout, None)
}
TrFailed => {
TestResult::TrFailed => {
self.write_event("test", desc.name.as_slice(), "failed", exec_time, stdout, None)
}
TrTimedFail => self.write_event(
TestResult::TrTimedFail => self.write_event(
"test",
desc.name.as_slice(),
"failed",
@ -103,7 +116,7 @@ impl<T: Write> OutputFormatter for JsonFormatter<T> {
Some(r#""reason": "time limit exceeded""#),
),
TrFailedMsg(ref m) => self.write_event(
TestResult::TrFailedMsg(ref m) => self.write_event(
"test",
desc.name.as_slice(),
"failed",
@ -112,11 +125,11 @@ impl<T: Write> OutputFormatter for JsonFormatter<T> {
Some(&*format!(r#""message": "{}""#, EscapedString(m))),
),
TrIgnored => {
TestResult::TrIgnored => {
self.write_event("test", desc.name.as_slice(), "ignored", exec_time, stdout, None)
}
TrAllowedFail => self.write_event(
TestResult::TrAllowedFail => self.write_event(
"test",
desc.name.as_slice(),
"allowed_failure",
@ -125,7 +138,7 @@ impl<T: Write> OutputFormatter for JsonFormatter<T> {
None,
),
TrBench(ref bs) => {
TestResult::TrBench(ref bs) => {
let median = bs.ns_iter_summ.median as usize;
let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;

View file

@ -1,4 +1,14 @@
use super::*;
use std::{
io,
io::prelude::Write,
};
use crate::{
types::{TestDesc, TestName},
time,
test_result::TestResult,
console::{ConsoleTestState},
};
mod pretty;
mod json;
@ -16,7 +26,7 @@ pub(crate) trait OutputFormatter {
&mut self,
desc: &TestDesc,
result: &TestResult,
exec_time: Option<&TestExecTime>,
exec_time: Option<&time::TestExecTime>,
stdout: &[u8],
state: &ConsoleTestState,
) -> io::Result<()>;

View file

@ -1,9 +1,21 @@
use super::*;
use std::{
io,
io::prelude::Write,
};
use crate::{
types::TestDesc,
time,
test_result::TestResult,
console::{ConsoleTestState, OutputLocation},
bench::fmt_bench_samples,
};
use super::OutputFormatter;
pub(crate) struct PrettyFormatter<T> {
out: OutputLocation<T>,
use_color: bool,
time_options: Option<TestTimeOptions>,
time_options: Option<time::TestTimeOptions>,
/// Number of columns to fill when aligning names
max_name_len: usize,
@ -17,7 +29,7 @@ impl<T: Write> PrettyFormatter<T> {
use_color: bool,
max_name_len: usize,
is_multithreaded: bool,
time_options: Option<TestTimeOptions>,
time_options: Option<time::TestTimeOptions>,
) -> Self {
PrettyFormatter {
out,
@ -67,7 +79,7 @@ impl<T: Write> PrettyFormatter<T> {
pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::Result<()> {
match self.out {
Pretty(ref mut term) => {
OutputLocation::Pretty(ref mut term) => {
if self.use_color {
term.fg(color)?;
}
@ -77,7 +89,7 @@ impl<T: Write> PrettyFormatter<T> {
}
term.flush()
}
Raw(ref mut stdout) => {
OutputLocation::Raw(ref mut stdout) => {
stdout.write_all(word.as_bytes())?;
stdout.flush()
}
@ -93,7 +105,7 @@ impl<T: Write> PrettyFormatter<T> {
fn write_time(
&mut self,
desc: &TestDesc,
exec_time: Option<&TestExecTime>
exec_time: Option<&time::TestExecTime>
) -> io::Result<()> {
if let (Some(opts), Some(time)) = (self.time_options, exec_time) {
let time_str = format!(" <{}>", time);
@ -194,7 +206,7 @@ impl<T: Write> OutputFormatter for PrettyFormatter<T> {
&mut self,
desc: &TestDesc,
result: &TestResult,
exec_time: Option<&TestExecTime>,
exec_time: Option<&time::TestExecTime>,
_: &[u8],
_: &ConsoleTestState,
) -> io::Result<()> {
@ -203,15 +215,15 @@ impl<T: Write> OutputFormatter for PrettyFormatter<T> {
}
match *result {
TrOk => self.write_ok()?,
TrFailed | TrFailedMsg(_) => self.write_failed()?,
TrIgnored => self.write_ignored()?,
TrAllowedFail => self.write_allowed_fail()?,
TrBench(ref bs) => {
TestResult::TrOk => self.write_ok()?,
TestResult::TrFailed | TestResult::TrFailedMsg(_) => self.write_failed()?,
TestResult::TrIgnored => self.write_ignored()?,
TestResult::TrAllowedFail => self.write_allowed_fail()?,
TestResult::TrBench(ref bs) => {
self.write_bench()?;
self.write_plain(&format!(": {}", fmt_bench_samples(bs)))?;
}
TrTimedFail => self.write_time_failed()?,
TestResult::TrTimedFail => self.write_time_failed()?,
}
self.write_time(desc, exec_time)?;
@ -225,7 +237,7 @@ impl<T: Write> OutputFormatter for PrettyFormatter<T> {
self.write_plain(&format!(
"test {} has been running for over {} seconds\n",
desc.name, TEST_WARN_TIMEOUT_S
desc.name, time::TEST_WARN_TIMEOUT_S
))
}

View file

@ -1,4 +1,20 @@
use super::*;
use std::{
io,
io::prelude::Write,
};
use crate::{
types::TestDesc,
time,
test_result::TestResult,
types::NamePadding,
console::{ConsoleTestState, OutputLocation},
bench::fmt_bench_samples,
};
use super::OutputFormatter;
// insert a '\n' after 100 tests in quiet mode
const QUIET_MODE_MAX_COLUMN: usize = 100;
pub(crate) struct TerseFormatter<T> {
out: OutputLocation<T>,
@ -68,7 +84,7 @@ impl<T: Write> TerseFormatter<T> {
pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::Result<()> {
match self.out {
Pretty(ref mut term) => {
OutputLocation::Pretty(ref mut term) => {
if self.use_color {
term.fg(color)?;
}
@ -78,7 +94,7 @@ impl<T: Write> TerseFormatter<T> {
}
term.flush()
}
Raw(ref mut stdout) => {
OutputLocation::Raw(ref mut stdout) => {
stdout.write_all(word.as_bytes())?;
stdout.flush()
}
@ -163,7 +179,7 @@ impl<T: Write> OutputFormatter for TerseFormatter<T> {
// in order to indicate benchmarks.
// When running benchmarks, terse-mode should still print their name as if
// it is the Pretty formatter.
if !self.is_multithreaded && desc.name.padding() == PadOnRight {
if !self.is_multithreaded && desc.name.padding() == NamePadding::PadOnRight {
self.write_test_name(desc)?;
}
@ -174,16 +190,18 @@ impl<T: Write> OutputFormatter for TerseFormatter<T> {
&mut self,
desc: &TestDesc,
result: &TestResult,
_: Option<&TestExecTime>,
_: Option<&time::TestExecTime>,
_: &[u8],
_: &ConsoleTestState,
) -> io::Result<()> {
match *result {
TrOk => self.write_ok(),
TrFailed | TrFailedMsg(_) | TrTimedFail => self.write_failed(),
TrIgnored => self.write_ignored(),
TrAllowedFail => self.write_allowed_fail(),
TrBench(ref bs) => {
TestResult::TrOk => self.write_ok(),
TestResult::TrFailed
| TestResult::TrFailedMsg(_)
| TestResult::TrTimedFail => self.write_failed(),
TestResult::TrIgnored => self.write_ignored(),
TestResult::TrAllowedFail => self.write_allowed_fail(),
TestResult::TrBench(ref bs) => {
if self.is_multithreaded {
self.write_test_name(desc)?;
}
@ -196,7 +214,7 @@ impl<T: Write> OutputFormatter for TerseFormatter<T> {
fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> {
self.write_plain(&format!(
"test {} has been running for over {} seconds\n",
desc.name, TEST_WARN_TIMEOUT_S
desc.name, time::TEST_WARN_TIMEOUT_S
))
}

View file

@ -0,0 +1,143 @@
//! Helper module which helps to determine amount of threads to be used
//! during tests execution.
use std::env;
#[allow(deprecated)]
pub fn get_concurrency() -> usize {
return match env::var("RUST_TEST_THREADS") {
Ok(s) => {
let opt_n: Option<usize> = s.parse().ok();
match opt_n {
Some(n) if n > 0 => n,
_ => panic!("RUST_TEST_THREADS is `{}`, should be a positive integer.", s),
}
}
Err(..) => num_cpus(),
};
#[cfg(windows)]
#[allow(nonstandard_style)]
fn num_cpus() -> usize {
#[repr(C)]
struct SYSTEM_INFO {
wProcessorArchitecture: u16,
wReserved: u16,
dwPageSize: u32,
lpMinimumApplicationAddress: *mut u8,
lpMaximumApplicationAddress: *mut u8,
dwActiveProcessorMask: *mut u8,
dwNumberOfProcessors: u32,
dwProcessorType: u32,
dwAllocationGranularity: u32,
wProcessorLevel: u16,
wProcessorRevision: u16,
}
extern "system" {
fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
}
unsafe {
let mut sysinfo = std::mem::zeroed();
GetSystemInfo(&mut sysinfo);
sysinfo.dwNumberOfProcessors as usize
}
}
#[cfg(target_os = "vxworks")]
fn num_cpus() -> usize {
// FIXME: Implement num_cpus on vxWorks
1
}
#[cfg(target_os = "redox")]
fn num_cpus() -> usize {
// FIXME: Implement num_cpus on Redox
1
}
#[cfg(any(
all(target_arch = "wasm32", not(target_os = "emscripten")),
all(target_vendor = "fortanix", target_env = "sgx")
))]
fn num_cpus() -> usize {
1
}
#[cfg(any(
target_os = "android",
target_os = "cloudabi",
target_os = "emscripten",
target_os = "fuchsia",
target_os = "ios",
target_os = "linux",
target_os = "macos",
target_os = "solaris",
))]
fn num_cpus() -> usize {
unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
}
#[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "netbsd"))]
fn num_cpus() -> usize {
use std::ptr;
let mut cpus: libc::c_uint = 0;
let mut cpus_size = std::mem::size_of_val(&cpus);
unsafe {
cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
}
if cpus < 1 {
let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
unsafe {
libc::sysctl(
mib.as_mut_ptr(),
2,
&mut cpus as *mut _ as *mut _,
&mut cpus_size as *mut _ as *mut _,
ptr::null_mut(),
0,
);
}
if cpus < 1 {
cpus = 1;
}
}
cpus as usize
}
#[cfg(target_os = "openbsd")]
fn num_cpus() -> usize {
use std::ptr;
let mut cpus: libc::c_uint = 0;
let mut cpus_size = std::mem::size_of_val(&cpus);
let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
unsafe {
libc::sysctl(
mib.as_mut_ptr(),
2,
&mut cpus as *mut _ as *mut _,
&mut cpus_size as *mut _ as *mut _,
ptr::null_mut(),
0,
);
}
if cpus < 1 {
cpus = 1;
}
cpus as usize
}
#[cfg(target_os = "haiku")]
fn num_cpus() -> usize {
// FIXME: implement
1
}
#[cfg(target_os = "l4re")]
fn num_cpus() -> usize {
// FIXME: implement
1
}
}

View file

@ -0,0 +1,20 @@
//! Helper module to detect subprocess exit code.
use std::process::ExitStatus;
#[cfg(not(unix))]
pub fn get_exit_code(status: ExitStatus) -> Result<i32, String> {
status.code().ok_or("received no exit code from child process".into())
}
#[cfg(unix)]
pub fn get_exit_code(status: ExitStatus) -> Result<i32, String> {
use std::os::unix::process::ExitStatusExt;
match status.code() {
Some(code) => Ok(code),
None => match status.signal() {
Some(signal) => Err(format!("child process exited with signal {}", signal)),
None => Err("child process exited with unknown signal".into()),
}
}
}

View file

@ -0,0 +1,33 @@
//! Helper module which provides a function to test
//! if stdout is a tty.
#[cfg(any(
target_os = "cloudabi",
all(target_arch = "wasm32", not(target_os = "emscripten")),
all(target_vendor = "fortanix", target_env = "sgx")
))]
pub fn stdout_isatty() -> bool {
// FIXME: Implement isatty on SGX
false
}
#[cfg(unix)]
pub fn stdout_isatty() -> bool {
unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
}
#[cfg(windows)]
pub fn stdout_isatty() -> bool {
type DWORD = u32;
type BOOL = i32;
type HANDLE = *mut u8;
type LPDWORD = *mut u32;
const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
extern "system" {
fn GetStdHandle(which: DWORD) -> HANDLE;
fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
}
unsafe {
let handle = GetStdHandle(STD_OUTPUT_HANDLE);
let mut out = 0;
GetConsoleMode(handle, &mut out) != 0
}
}

View file

@ -0,0 +1,50 @@
//! Benchmark metrics.
use std::collections::BTreeMap;
#[derive(Clone, PartialEq, Debug, Copy)]
pub struct Metric {
value: f64,
noise: f64,
}
impl Metric {
pub fn new(value: f64, noise: f64) -> Metric {
Metric { value, noise }
}
}
#[derive(Clone, PartialEq)]
pub struct MetricMap(BTreeMap<String, Metric>);
impl MetricMap {
pub fn new() -> MetricMap {
MetricMap(BTreeMap::new())
}
/// Insert a named `value` (+/- `noise`) metric into the map. The value
/// must be non-negative. The `noise` indicates the uncertainty of the
/// metric, which doubles as the "noise range" of acceptable
/// pairwise-regressions on this named value, when comparing from one
/// metric to the next using `compare_to_old`.
///
/// If `noise` is positive, then it means this metric is of a value
/// you want to see grow smaller, so a change larger than `noise` in the
/// positive direction represents a regression.
///
/// If `noise` is negative, then it means this metric is of a value
/// you want to see grow larger, so a change larger than `noise` in the
/// negative direction represents a regression.
pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
let m = Metric { value, noise };
self.0.insert(name.to_owned(), m);
}
pub fn fmt_metrics(&self) -> String {
let v = self
.0
.iter()
.map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
.collect::<Vec<_>>();
v.join(", ")
}
}

View file

@ -0,0 +1,8 @@
//! Module with common helpers not directly related to tests
//! but used in `libtest`.
pub mod concurrency;
pub mod isatty;
pub mod metrics;
pub mod sink;
pub mod exit_code;

View file

@ -0,0 +1,24 @@
//! Module providing a helper structure to capture output in subprocesses.
use std::{
io,
io::prelude::Write,
sync::{Arc, Mutex},
};
pub struct Sink(Arc<Mutex<Vec<u8>>>);
impl Sink {
pub fn new_boxed(data: &Arc<Mutex<Vec<u8>>>) -> Box<Self> {
Box::new(Self(data.clone()))
}
}
impl Write for Sink {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
Write::write(&mut *self.0.lock().unwrap(), data)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}

File diff suppressed because it is too large Load diff

90
src/libtest/options.rs Normal file
View file

@ -0,0 +1,90 @@
//! Enums denoting options for test execution.
/// Whether to execute tests concurrently or not
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum Concurrent {
Yes,
No,
}
/// Number of times to run a benchmarked function
#[derive(Clone, PartialEq, Eq)]
pub enum BenchMode {
Auto,
Single,
}
/// Whether test is expected to panic or not
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum ShouldPanic {
No,
Yes,
YesWithMessage(&'static str),
}
/// Whether should console output be colored or not
#[derive(Copy, Clone, Debug)]
pub enum ColorConfig {
AutoColor,
AlwaysColor,
NeverColor,
}
/// Format of the test results output
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum OutputFormat {
/// Verbose output
Pretty,
/// Quiet output
Terse,
/// JSON output
Json,
}
/// Whether ignored test should be runned or not
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum RunIgnored {
Yes,
No,
/// Run only ignored tests
Only,
}
#[derive(Clone, Copy)]
pub enum RunStrategy {
/// Runs the test in the current process, and sends the result back over the
/// supplied channel.
InProcess,
/// Spawns a subprocess to run the test, and sends the result back over the
/// supplied channel. Requires `argv[0]` to exist and point to the binary
/// that's currently running.
SpawnPrimary,
}
/// Options for the test run defined by the caller (instead of CLI arguments).
/// In case we want to add other options as well, just add them in this struct.
#[derive(Copy, Clone, Debug)]
pub struct Options {
pub display_output: bool,
pub panic_abort: bool,
}
impl Options {
pub fn new() -> Options {
Options {
display_output: false,
panic_abort: false,
}
}
pub fn display_output(mut self, display_output: bool) -> Options {
self.display_output = display_output;
self
}
pub fn panic_abort(mut self, panic_abort: bool) -> Options {
self.panic_abort = panic_abort;
self
}
}

View file

@ -4,7 +4,7 @@ extern crate test;
use std::f64;
use std::io::prelude::*;
use std::io;
use self::test::Bencher;
use self::test::test::Bencher;
// Test vectors generated from R, using the script src/etc/stat-test-vectors.r.

107
src/libtest/test_result.rs Normal file
View file

@ -0,0 +1,107 @@
use std::any::Any;
use super::bench::BenchSamples;
use super::time;
use super::types::TestDesc;
use super::options::ShouldPanic;
pub use self::TestResult::*;
// Return codes for secondary process.
// Start somewhere other than 0 so we know the return code means what we think
// it means.
pub const TR_OK: i32 = 50;
pub const TR_FAILED: i32 = 51;
#[derive(Debug, Clone, PartialEq)]
pub enum TestResult {
TrOk,
TrFailed,
TrFailedMsg(String),
TrIgnored,
TrAllowedFail,
TrBench(BenchSamples),
TrTimedFail,
}
unsafe impl Send for TestResult {}
/// Creates a `TestResult` depending on the raw result of test execution
/// and assotiated data.
pub fn calc_result<'a>(
desc: &TestDesc,
task_result: Result<(), &'a (dyn Any + 'static + Send)>,
time_opts: &Option<time::TestTimeOptions>,
exec_time: &Option<time::TestExecTime>
) -> TestResult {
let result = match (&desc.should_panic, task_result) {
(&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TestResult::TrOk,
(&ShouldPanic::YesWithMessage(msg), Err(ref err)) => {
if err
.downcast_ref::<String>()
.map(|e| &**e)
.or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
.map(|e| e.contains(msg))
.unwrap_or(false)
{
TestResult::TrOk
} else {
if desc.allow_fail {
TestResult::TrAllowedFail
} else {
TestResult::TrFailedMsg(
format!("panic did not include expected string '{}'", msg)
)
}
}
}
(&ShouldPanic::Yes, Ok(())) => {
TestResult::TrFailedMsg("test did not panic as expected".to_string())
}
_ if desc.allow_fail => TestResult::TrAllowedFail,
_ => TestResult::TrFailed,
};
// If test is already failed (or allowed to fail), do not change the result.
if result != TestResult::TrOk {
return result;
}
// Check if test is failed due to timeout.
if let (Some(opts), Some(time)) = (time_opts, exec_time) {
if opts.error_on_excess && opts.is_critical(desc, time) {
return TestResult::TrTimedFail;
}
}
result
}
/// Creates a `TestResult` depending on the exit code of test subprocess.
pub fn get_result_from_exit_code(
desc: &TestDesc,
code: i32,
time_opts: &Option<time::TestTimeOptions>,
exec_time: &Option<time::TestExecTime>,
) -> TestResult {
let result = match (desc.allow_fail, code) {
(_, TR_OK) => TestResult::TrOk,
(true, TR_FAILED) => TestResult::TrAllowedFail,
(false, TR_FAILED) => TestResult::TrFailed,
(_, _) => TestResult::TrFailedMsg(format!("got unexpected return code {}", code)),
};
// If test is already failed (or allowed to fail), do not change the result.
if result != TestResult::TrOk {
return result;
}
// Check if test is failed due to timeout.
if let (Some(opts), Some(time)) = (time_opts, exec_time) {
if opts.error_on_excess && opts.is_critical(desc, time) {
return TestResult::TrTimedFail;
}
}
result
}

View file

@ -1,11 +1,19 @@
use super::*;
use crate::test::{
filter_tests, parse_opts, run_test, DynTestFn, DynTestName, MetricMap, RunIgnored, RunStrategy,
// ShouldPanic, StaticTestName, TestDesc, TestDescAndFn, TestOpts, TestTimeOptions,
// TestType, TrFailedMsg, TrIgnored, TrOk,
ShouldPanic, StaticTestName, TestDesc, TestDescAndFn, TestOpts,
TrIgnored, TrOk,
use crate::{
bench::Bencher,
console::OutputLocation,
options::OutputFormat,
time::{TimeThreshold, TestTimeOptions},
formatters::PrettyFormatter,
test::{
filter_tests, parse_opts, run_test, DynTestFn, DynTestName, MetricMap,
RunIgnored, RunStrategy, ShouldPanic, StaticTestName, TestDesc,
TestDescAndFn, TestOpts, TrIgnored, TrOk,
// FIXME (introduced by #65251)
// ShouldPanic, StaticTestName, TestDesc, TestDescAndFn, TestOpts, TestTimeOptions,
// TestType, TrFailedMsg, TrIgnored, TrOk,
},
};
use std::sync::mpsc::channel;
use std::time::Duration;
@ -74,8 +82,8 @@ pub fn do_not_run_ignored_tests() {
};
let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No);
let (_, res, _, _) = rx.recv().unwrap();
assert!(res != TrOk);
let result = rx.recv().unwrap().result;
assert!(result != TrOk);
}
#[test]
@ -93,11 +101,11 @@ pub fn ignored_tests_result_in_ignored() {
};
let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No);
let (_, res, _, _) = rx.recv().unwrap();
assert!(res == TrIgnored);
let result = rx.recv().unwrap().result;
assert!(result == TrIgnored);
}
// FIXME: Re-enable emscripten once it can catch panics again
// FIXME: Re-enable emscripten once it can catch panics again (introduced by #65251)
#[test]
#[cfg(not(target_os = "emscripten"))]
fn test_should_panic() {
@ -116,11 +124,11 @@ fn test_should_panic() {
};
let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No);
let (_, res, _, _) = rx.recv().unwrap();
assert!(res == TrOk);
let result = rx.recv().unwrap().result;
assert!(result == TrOk);
}
// FIXME: Re-enable emscripten once it can catch panics again
// FIXME: Re-enable emscripten once it can catch panics again (introduced by #65251)
#[test]
#[cfg(not(target_os = "emscripten"))]
fn test_should_panic_good_message() {
@ -139,11 +147,11 @@ fn test_should_panic_good_message() {
};
let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No);
let (_, res, _, _) = rx.recv().unwrap();
assert!(res == TrOk);
let result = rx.recv().unwrap().result;
assert!(result == TrOk);
}
// FIXME: Re-enable emscripten once it can catch panics again
// FIXME: Re-enable emscripten once it can catch panics again (introduced by #65251)
#[test]
#[cfg(not(target_os = "emscripten"))]
fn test_should_panic_bad_message() {
@ -165,11 +173,11 @@ fn test_should_panic_bad_message() {
};
let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No);
let (_, res, _, _) = rx.recv().unwrap();
assert!(res == TrFailedMsg(format!("{} '{}'", failed_msg, expected)));
let result = rx.recv().unwrap().result;
assert!(result == TrFailedMsg(format!("{} '{}'", failed_msg, expected)));
}
// FIXME: Re-enable emscripten once it can catch panics again
// FIXME: Re-enable emscripten once it can catch panics again (introduced by #65251)
#[test]
#[cfg(not(target_os = "emscripten"))]
fn test_should_panic_but_succeeds() {
@ -186,8 +194,8 @@ fn test_should_panic_but_succeeds() {
};
let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No);
let (_, res, _, _) = rx.recv().unwrap();
assert!(res == TrFailedMsg("test did not panic as expected".to_string()));
let result = rx.recv().unwrap().result;
assert!(result == TrFailedMsg("test did not panic as expected".to_string()));
}
fn report_time_test_template(report_time: bool) -> Option<TestExecTime> {
@ -214,7 +222,7 @@ fn report_time_test_template(report_time: bool) -> Option<TestExecTime> {
};
let (tx, rx) = channel();
run_test(&test_opts, false, desc, RunStrategy::InProcess, tx, Concurrent::No);
let (_, _, exec_time, _) = rx.recv().unwrap();
let exec_time = rx.recv().unwrap().exec_time;
exec_time
}
@ -252,7 +260,7 @@ fn time_test_failure_template(test_type: TestType) -> TestResult {
};
let (tx, rx) = channel();
run_test(&test_opts, false, desc, RunStrategy::InProcess, tx, Concurrent::No);
let (_, result, _, _) = rx.recv().unwrap();
let result = rx.recv().unwrap().result;
result
}
@ -658,9 +666,9 @@ fn should_sort_failures_before_printing_them() {
test_type: TestType::Unknown,
};
let mut out = PrettyFormatter::new(Raw(Vec::new()), false, 10, false, None);
let mut out = PrettyFormatter::new(OutputLocation::Raw(Vec::new()), false, 10, false, None);
let st = ConsoleTestState {
let st = console::ConsoleTestState {
log_out: None,
total: 0,
passed: 0,
@ -678,8 +686,8 @@ fn should_sort_failures_before_printing_them() {
out.write_failures(&st).unwrap();
let s = match out.output_location() {
&Raw(ref m) => String::from_utf8_lossy(&m[..]),
&Pretty(_) => unreachable!(),
&OutputLocation::Raw(ref m) => String::from_utf8_lossy(&m[..]),
&OutputLocation::Pretty(_) => unreachable!(),
};
let apos = s.find("a").unwrap();

206
src/libtest/time.rs Normal file
View file

@ -0,0 +1,206 @@
//! Module `time` contains everything related to the time measurement of unit tests
//! execution.
//! Two main purposes of this module:
//! - Check whether test is timed out.
//! - Provide helpers for `report-time` and `measure-time` options.
use std::time::{Duration, Instant};
use std::str::FromStr;
use std::fmt;
use std::env;
use super::types::{TestDesc, TestType};
pub const TEST_WARN_TIMEOUT_S: u64 = 60;
/// This small module contains constants used by `report-time` option.
/// Those constants values will be used if corresponding environment variables are not set.
///
/// To override values for unit-tests, use a constant `RUST_TEST_TIME_UNIT`,
/// To override values for integration tests, use a constant `RUST_TEST_TIME_INTEGRATION`,
/// To override values for doctests, use a constant `RUST_TEST_TIME_DOCTEST`.
///
/// Example of the expected format is `RUST_TEST_TIME_xxx=100,200`, where 100 means
/// warn time, and 200 means critical time.
pub mod time_constants {
use std::time::Duration;
use super::TEST_WARN_TIMEOUT_S;
/// Environment variable for overriding default threshold for unit-tests.
pub const UNIT_ENV_NAME: &str = "RUST_TEST_TIME_UNIT";
// Unit tests are supposed to be really quick.
pub const UNIT_WARN: Duration = Duration::from_millis(50);
pub const UNIT_CRITICAL: Duration = Duration::from_millis(100);
/// Environment variable for overriding default threshold for unit-tests.
pub const INTEGRATION_ENV_NAME: &str = "RUST_TEST_TIME_INTEGRATION";
// Integration tests may have a lot of work, so they can take longer to execute.
pub const INTEGRATION_WARN: Duration = Duration::from_millis(500);
pub const INTEGRATION_CRITICAL: Duration = Duration::from_millis(1000);
/// Environment variable for overriding default threshold for unit-tests.
pub const DOCTEST_ENV_NAME: &str = "RUST_TEST_TIME_DOCTEST";
// Doctests are similar to integration tests, because they can include a lot of
// initialization code.
pub const DOCTEST_WARN: Duration = INTEGRATION_WARN;
pub const DOCTEST_CRITICAL: Duration = INTEGRATION_CRITICAL;
// Do not suppose anything about unknown tests, base limits on the
// `TEST_WARN_TIMEOUT_S` constant.
pub const UNKNOWN_WARN: Duration = Duration::from_secs(TEST_WARN_TIMEOUT_S);
pub const UNKNOWN_CRITICAL: Duration = Duration::from_secs(TEST_WARN_TIMEOUT_S * 2);
}
/// Returns an `Instance` object denoting when the test should be considered
/// timed out.
pub fn get_default_test_timeout() -> Instant {
Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S)
}
/// The meassured execution time of a unit test.
#[derive(Debug, Clone, PartialEq)]
pub struct TestExecTime(pub Duration);
impl fmt::Display for TestExecTime {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:.3}s", self.0.as_secs_f64())
}
}
/// Structure denoting time limits for test execution.
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)]
pub struct TimeThreshold {
pub warn: Duration,
pub critical: Duration,
}
impl TimeThreshold {
/// Creates a new `TimeThreshold` instance with provided durations.
pub fn new(warn: Duration, critical: Duration) -> Self {
Self {
warn,
critical,
}
}
/// Attempts to create a `TimeThreshold` instance with values obtained
/// from the environment variable, and returns `None` if the variable
/// is not set.
/// Environment variable format is expected to match `\d+,\d+`.
///
/// # Panics
///
/// Panics if variable with provided name is set but contains inappropriate
/// value.
pub fn from_env_var(env_var_name: &str) -> Option<Self> {
let durations_str = env::var(env_var_name).ok()?;
// Split string into 2 substrings by comma and try to parse numbers.
let mut durations = durations_str
.splitn(2, ',')
.map(|v| {
u64::from_str(v).unwrap_or_else(|_| {
panic!(
"Duration value in variable {} is expected to be a number, but got {}",
env_var_name, v
)
})
});
// Callback to be called if the environment variable has unexpected structure.
let panic_on_incorrect_value = || {
panic!(
"Duration variable {} expected to have 2 numbers separated by comma, but got {}",
env_var_name, durations_str
);
};
let (warn, critical) = (
durations.next().unwrap_or_else(panic_on_incorrect_value),
durations.next().unwrap_or_else(panic_on_incorrect_value)
);
if warn > critical {
panic!("Test execution warn time should be less or equal to the critical time");
}
Some(Self::new(Duration::from_millis(warn), Duration::from_millis(critical)))
}
}
/// Structure with parameters for calculating test execution time.
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)]
pub struct TestTimeOptions {
/// Denotes if the test critical execution time limit excess should be considered
/// a test failure.
pub error_on_excess: bool,
pub colored: bool,
pub unit_threshold: TimeThreshold,
pub integration_threshold: TimeThreshold,
pub doctest_threshold: TimeThreshold,
}
impl TestTimeOptions {
pub fn new_from_env(error_on_excess: bool, colored: bool) -> Self {
let unit_threshold =
TimeThreshold::from_env_var(time_constants::UNIT_ENV_NAME)
.unwrap_or_else(Self::default_unit);
let integration_threshold =
TimeThreshold::from_env_var(time_constants::INTEGRATION_ENV_NAME)
.unwrap_or_else(Self::default_integration);
let doctest_threshold =
TimeThreshold::from_env_var(time_constants::DOCTEST_ENV_NAME)
.unwrap_or_else(Self::default_doctest);
Self {
error_on_excess,
colored,
unit_threshold,
integration_threshold,
doctest_threshold,
}
}
pub fn is_warn(&self, test: &TestDesc, exec_time: &TestExecTime) -> bool {
exec_time.0 >= self.warn_time(test)
}
pub fn is_critical(&self, test: &TestDesc, exec_time: &TestExecTime) -> bool {
exec_time.0 >= self.critical_time(test)
}
fn warn_time(&self, test: &TestDesc) -> Duration {
match test.test_type {
TestType::UnitTest => self.unit_threshold.warn,
TestType::IntegrationTest => self.integration_threshold.warn,
TestType::DocTest => self.doctest_threshold.warn,
TestType::Unknown => time_constants::UNKNOWN_WARN,
}
}
fn critical_time(&self, test: &TestDesc) -> Duration {
match test.test_type {
TestType::UnitTest => self.unit_threshold.critical,
TestType::IntegrationTest => self.integration_threshold.critical,
TestType::DocTest => self.doctest_threshold.critical,
TestType::Unknown => time_constants::UNKNOWN_CRITICAL,
}
}
fn default_unit() -> TimeThreshold {
TimeThreshold::new(time_constants::UNIT_WARN, time_constants::UNIT_CRITICAL)
}
fn default_integration() -> TimeThreshold {
TimeThreshold::new(time_constants::INTEGRATION_WARN, time_constants::INTEGRATION_CRITICAL)
}
fn default_doctest() -> TimeThreshold {
TimeThreshold::new(time_constants::DOCTEST_WARN, time_constants::DOCTEST_CRITICAL)
}
}

145
src/libtest/types.rs Normal file
View file

@ -0,0 +1,145 @@
//! Common types used by `libtest`.
use std::fmt;
use std::borrow::Cow;
use super::options;
use super::bench::Bencher;
pub use NamePadding::*;
pub use TestName::*;
pub use TestFn::*;
/// Type of the test according to the [rust book](https://doc.rust-lang.org/cargo/guide/tests.html)
/// conventions.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum TestType {
/// Unit-tests are expected to be in the `src` folder of the crate.
UnitTest,
/// Integration-style tests are expected to be in the `tests` folder of the crate.
IntegrationTest,
/// Doctests are created by the `librustdoc` manually, so it's a different type of test.
DocTest,
/// Tests for the sources that don't follow the project layout convention
/// (e.g. tests in raw `main.rs` compiled by calling `rustc --test` directly).
Unknown,
}
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
pub enum NamePadding {
PadNone,
PadOnRight,
}
// The name of a test. By convention this follows the rules for rust
// paths; i.e., it should be a series of identifiers separated by double
// colons. This way if some test runner wants to arrange the tests
// hierarchically it may.
#[derive(Clone, PartialEq, Eq, Hash, Debug)]
pub enum TestName {
StaticTestName(&'static str),
DynTestName(String),
AlignedTestName(Cow<'static, str>, NamePadding),
}
impl TestName {
pub fn as_slice(&self) -> &str {
match *self {
StaticTestName(s) => s,
DynTestName(ref s) => s,
AlignedTestName(ref s, _) => &*s,
}
}
pub fn padding(&self) -> NamePadding {
match self {
&AlignedTestName(_, p) => p,
_ => PadNone,
}
}
pub fn with_padding(&self, padding: NamePadding) -> TestName {
let name = match self {
&TestName::StaticTestName(name) => Cow::Borrowed(name),
&TestName::DynTestName(ref name) => Cow::Owned(name.clone()),
&TestName::AlignedTestName(ref name, _) => name.clone(),
};
TestName::AlignedTestName(name, padding)
}
}
impl fmt::Display for TestName {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(self.as_slice(), f)
}
}
/// Represents a benchmark function.
pub trait TDynBenchFn: Send {
fn run(&self, harness: &mut Bencher);
}
// A function that runs a test. If the function returns successfully,
// the test succeeds; if the function panics then the test fails. We
// may need to come up with a more clever definition of test in order
// to support isolation of tests into threads.
pub enum TestFn {
StaticTestFn(fn()),
StaticBenchFn(fn(&mut Bencher)),
DynTestFn(Box<dyn FnOnce() + Send>),
DynBenchFn(Box<dyn TDynBenchFn + 'static>),
}
impl TestFn {
pub fn padding(&self) -> NamePadding {
match *self {
StaticTestFn(..) => PadNone,
StaticBenchFn(..) => PadOnRight,
DynTestFn(..) => PadNone,
DynBenchFn(..) => PadOnRight,
}
}
}
impl fmt::Debug for TestFn {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match *self {
StaticTestFn(..) => "StaticTestFn(..)",
StaticBenchFn(..) => "StaticBenchFn(..)",
DynTestFn(..) => "DynTestFn(..)",
DynBenchFn(..) => "DynBenchFn(..)",
})
}
}
// The definition of a single test. A test runner will run a list of
// these.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct TestDesc {
pub name: TestName,
pub ignore: bool,
pub should_panic: options::ShouldPanic,
pub allow_fail: bool,
pub test_type: TestType,
}
impl TestDesc {
pub fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
let mut name = String::from(self.name.as_slice());
let fill = column_count.saturating_sub(name.len());
let pad = " ".repeat(fill);
match align {
PadNone => name,
PadOnRight => {
name.push_str(&pad);
name
}
}
}
}
#[derive(Debug)]
pub struct TestDescAndFn {
pub desc: TestDesc,
pub testfn: TestFn,
}