|
3 | 3 |
|
4 | 4 | @author: gocht |
5 | 5 | ''' |
| 6 | +import argparse |
6 | 7 | import sys |
7 | 8 | import benchmark_helper |
8 | 9 | import pickle |
9 | 10 |
|
| 11 | +# Available tests |
10 | 12 | tests = ["bm_baseline.py", "bm_simplefunc.py"] |
11 | 13 |
|
| 14 | +# Available instrumenters |
12 | 15 | instrumenters = ["profile", "trace", "dummy", "None"] |
13 | 16 | if sys.version_info.major >= 3: |
14 | 17 | instrumenters.extend(["cProfile", "cTrace"]) |
15 | 18 |
|
16 | | -# How many times the instrumented code is run during 1 test run |
| 19 | +# Default values for: How many times the instrumented code is run during 1 test run |
17 | 20 | reps_x = { |
18 | 21 | "bm_baseline.py": ["1000000", "2000000", "3000000", "4000000", "5000000"], |
19 | 22 | "bm_simplefunc.py": ["100000", "200000", "300000", "400000", "500000"], |
20 | 23 | } |
21 | | -# How many times a test invocation is repeated (number of timings per test instance) |
22 | | -test_repetitions = 51 |
23 | 24 |
|
24 | | -bench = benchmark_helper.BenchmarkEnv(repetitions=test_repetitions) |
| 25 | + |
| 26 | +def str_to_int(s): |
| 27 | + try: |
| 28 | + return int(s) |
| 29 | + except ValueError: |
| 30 | + return int(float(s)) |
| 31 | + |
| 32 | + |
| 33 | +parser = argparse.ArgumentParser(description='Benchmark the instrumenters.', |
| 34 | + formatter_class=argparse.ArgumentDefaultsHelpFormatter) |
| 35 | +parser.add_argument('--test', '-t', metavar='TEST', nargs='+', default=tests, |
| 36 | + choices=tests, help='Which test(s) to run') |
| 37 | +parser.add_argument('--repetitions', '-r', default=51, type=str_to_int, |
| 38 | + help='How many times a test invocation is repeated (number of timings per test instance)') |
| 39 | +parser.add_argument('--loop-count', '-l', type=str_to_int, nargs='+', |
| 40 | + help=('How many times the instrumented code is run during 1 test run. ' |
| 41 | + 'Can be repeated and will create 1 test instance per argument')) |
| 42 | +parser.add_argument('--instrumenter', '-i', metavar='INST', nargs='+', default=instrumenters, |
| 43 | + choices=instrumenters, help='The instrumenter(s) to use') |
| 44 | +parser.add_argument('--output', '-o', default='results.pkl', help='Output file for the results') |
| 45 | +parser.add_argument('--dry-run', action='store_true', help='Print parsed arguments and exit') |
| 46 | +args = parser.parse_args() |
| 47 | + |
| 48 | +if args.dry_run: |
| 49 | + print(args) |
| 50 | + sys.exit(0) |
| 51 | + |
| 52 | +bench = benchmark_helper.BenchmarkEnv(repetitions=args.repetitions) |
25 | 53 | results = {} |
26 | 54 |
|
27 | | -for test in tests: |
| 55 | +for test in args.test: |
28 | 56 | results[test] = {} |
29 | 57 |
|
30 | | - for instrumenter in instrumenters: |
| 58 | + for instrumenter in args.instrumenter: |
31 | 59 | results[test][instrumenter] = {} |
32 | 60 |
|
33 | 61 | if instrumenter == "None": |
|
40 | 68 | print("#########") |
41 | 69 | print("{}: {}".format(test, scorep_settings)) |
42 | 70 | print("#########") |
43 | | - for reps in reps_x[test]: |
| 71 | + loop_counts = args.loop_count if args.loop_count else reps_x[test] |
| 72 | + for reps in loop_counts: |
44 | 73 | times = bench.call(test, [reps], |
45 | 74 | enable_scorep, |
46 | 75 | scorep_settings=scorep_settings) |
47 | 76 | print("{:<8}: {}".format(reps, times)) |
48 | 77 | results[test][instrumenter][reps] = times |
49 | 78 |
|
50 | | -with open("results.pkl", "wb") as f: |
| 79 | +with open(args.output, "wb") as f: |
51 | 80 | pickle.dump(results, f) |
0 commit comments