|
| 1 | +#!/usr/bin/env python |
1 | 2 | ''' |
2 | 3 | Created on 04.10.2019 |
3 | 4 |
|
4 | 5 | @author: gocht |
5 | 6 | ''' |
| 7 | +import argparse |
6 | 8 | import sys |
7 | 9 | import benchmark_helper |
8 | 10 | import pickle |
9 | 11 | import numpy as np |
10 | 12 |
|
| 13 | +# Available tests |
11 | 14 | tests = ["bm_baseline.py", "bm_simplefunc.py"] |
12 | 15 |
|
| 16 | +# Available instrumenters |
13 | 17 | instrumenters = ["profile", "trace", "dummy", "None"] |
14 | 18 | if sys.version_info.major >= 3: |
15 | 19 | instrumenters.extend(["cProfile", "cTrace"]) |
16 | 20 |
|
17 | | -# How many times the instrumented code is run during 1 test run |
| 21 | +# Default values for: How many times the instrumented code is run during 1 test run |
18 | 22 | reps_x = { |
19 | 23 | "bm_baseline.py": ["1000000", "2000000", "3000000", "4000000", "5000000"], |
20 | 24 | "bm_simplefunc.py": ["100000", "200000", "300000", "400000", "500000"], |
21 | 25 | } |
22 | | -# How many times a test invocation is repeated (number of timings per test instance) |
23 | | -test_repetitions = 51 |
24 | 26 |
|
25 | | -bench = benchmark_helper.BenchmarkEnv(repetitions=test_repetitions) |
| 27 | + |
| 28 | +def str_to_int(s): |
| 29 | + return int(float(s)) |
| 30 | + |
| 31 | + |
| 32 | +parser = argparse.ArgumentParser(description='Benchmark the instrumenters.', |
| 33 | + formatter_class=argparse.ArgumentDefaultsHelpFormatter) |
| 34 | +parser.add_argument('--test', '-t', metavar='TEST', nargs='+', default=tests, |
| 35 | + choices=tests, help='Which test(s) to run') |
| 36 | +parser.add_argument('--repetitions', '-r', default=51, type=str_to_int, |
| 37 | + help='How many times a test invocation is repeated (number of timings per test instance)') |
| 38 | +parser.add_argument('--loop-count', '-l', type=str_to_int, nargs='+', |
| 39 | + help=('How many times the instrumented code is run during 1 test run. ' |
| 40 | + 'Can be repeated and will create 1 test instance per argument')) |
| 41 | +parser.add_argument('--instrumenter', '-i', metavar='INST', nargs='+', default=instrumenters, |
| 42 | + choices=instrumenters, help='The instrumenter(s) to use') |
| 43 | +parser.add_argument('--output', '-o', default='results.pkl', help='Output file for the results') |
| 44 | +parser.add_argument('--dry-run', action='store_true', help='Print parsed arguments and exit') |
| 45 | +args = parser.parse_args() |
| 46 | + |
| 47 | +if args.dry_run: |
| 48 | + print(args) |
| 49 | + sys.exit(0) |
| 50 | + |
| 51 | +bench = benchmark_helper.BenchmarkEnv(repetitions=args.repetitions) |
26 | 52 | results = {} |
27 | 53 |
|
28 | | -for test in tests: |
| 54 | +for test in args.test: |
29 | 55 | results[test] = {} |
30 | 56 |
|
31 | | - for instrumenter in instrumenters: |
| 57 | + for instrumenter in args.instrumenter: |
32 | 58 | results[test][instrumenter] = {} |
33 | 59 |
|
34 | 60 | if instrumenter == "None": |
|
39 | 65 | print("#########") |
40 | 66 | print("{}: {}".format(test, scorep_settings)) |
41 | 67 | print("#########") |
42 | | - max_reps_width = len(str(max(reps_x[test]))) |
43 | | - for reps in reps_x[test]: |
44 | | - times = bench.call(test, [reps], |
| 68 | + loop_counts = args.loop_count if args.loop_count else reps_x[test] |
| 69 | + for reps in loop_counts: |
| 70 | + times = bench.call(test, [str(reps)], |
| 71 | + enable_scorep, |
45 | 72 | scorep_settings=scorep_settings) |
46 | 73 | times = np.array(times) |
47 | 74 | print("{:>{width}}: Range={:{prec}}-{:{prec}} Mean={:{prec}} Median={:{prec}}".format( |
48 | 75 | reps, times.min(), times.max(), times.mean(), np.median(times), width=max_reps_width, prec='5.4f')) |
49 | 76 | results[test][instrumenter][reps] = times |
50 | 77 |
|
51 | | -with open("results.pkl", "wb") as f: |
| 78 | +with open(args.output, "wb") as f: |
52 | 79 | pickle.dump(results, f) |
0 commit comments