diff options
Diffstat (limited to 'utils/google-benchmark/tools/compare_bench.py')
| -rw-r--r-- | utils/google-benchmark/tools/compare_bench.py | 58 |
1 files changed, 48 insertions, 10 deletions
diff --git a/utils/google-benchmark/tools/compare_bench.py b/utils/google-benchmark/tools/compare_bench.py index ed0f133e0dc9..d54baaa0e8f8 100644 --- a/utils/google-benchmark/tools/compare_bench.py +++ b/utils/google-benchmark/tools/compare_bench.py @@ -3,25 +3,63 @@ compare_bench.py - Compare two benchmarks or their results and report the difference. """ +import argparse +from argparse import ArgumentParser import sys import gbench from gbench import util, report +from gbench.util import * + +def check_inputs(in1, in2, flags): + """ + Perform checking on the user provided inputs and diagnose any abnormalities + """ + in1_kind, in1_err = classify_input_file(in1) + in2_kind, in2_err = classify_input_file(in2) + output_file = find_benchmark_flag('--benchmark_out=', flags) + output_type = find_benchmark_flag('--benchmark_out_format=', flags) + if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file: + print(("WARNING: '--benchmark_out=%s' will be passed to both " + "benchmarks causing it to be overwritten") % output_file) + if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0: + print("WARNING: passing --benchmark flags has no effect since both " + "inputs are JSON") + if output_type is not None and output_type != 'json': + print(("ERROR: passing '--benchmark_out_format=%s' to 'compare_bench.py`" + " is not supported.") % output_type) + sys.exit(1) + def main(): + parser = ArgumentParser( + description='compare the results of two benchmarks') + parser.add_argument( + 'test1', metavar='test1', type=str, nargs=1, + help='A benchmark executable or JSON output file') + parser.add_argument( + 'test2', metavar='test2', type=str, nargs=1, + help='A benchmark executable or JSON output file') + # FIXME this is a dummy argument which will never actually match + # any --benchmark flags but it helps generate a better usage message + parser.add_argument( + 'benchmark_options', metavar='benchmark_option', nargs='*', + help='Arguments to pass when running benchmark executables' + ) + args, unknown_args = parser.parse_known_args() # Parse the command line flags - def usage(): - print('compare_bench.py <test1> <test2> [benchmark options]...') + test1 = args.test1[0] + test2 = args.test2[0] + if args.benchmark_options: + print("Unrecognized positional argument arguments: '%s'" + % args.benchmark_options) exit(1) - if '--help' in sys.argv or len(sys.argv) < 3: - usage() - tests = sys.argv[1:3] - bench_opts = sys.argv[3:] - bench_opts = list(bench_opts) + benchmark_options = unknown_args + check_inputs(test1, test2, benchmark_options) # Run the benchmarks and report the results - json1 = gbench.util.run_or_load_benchmark(tests[0], bench_opts) - json2 = gbench.util.run_or_load_benchmark(tests[1], bench_opts) + json1 = gbench.util.run_or_load_benchmark(test1, benchmark_options) + json2 = gbench.util.run_or_load_benchmark(test2, benchmark_options) output_lines = gbench.report.generate_difference_report(json1, json2) - print 'Comparing %s to %s' % (tests[0], tests[1]) + print('Comparing %s to %s' % (test1, test2)) for ln in output_lines: print(ln) |
