Skip to content

Commit 05feb24

Browse files
committed
Allow customization of benchmark runs via CLI
1 parent cd2f7fa commit 05feb24

1 file changed

Lines changed: 37 additions & 8 deletions

File tree

benchmark/benchmark.py

Lines changed: 37 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3,31 +3,59 @@
33
44
@author: gocht
55
'''
6+
import argparse
67
import sys
78
import benchmark_helper
89
import pickle
910

11+
# Available tests
1012
tests = ["bm_baseline.py", "bm_simplefunc.py"]
1113

14+
# Available instrumenters
1215
instrumenters = ["profile", "trace", "dummy", "None"]
1316
if sys.version_info.major >= 3:
1417
instrumenters.extend(["cProfile", "cTrace"])
1518

16-
# How many times the instrumented code is run during 1 test run
19+
# Default values for: How many times the instrumented code is run during 1 test run
1720
reps_x = {
1821
"bm_baseline.py": ["1000000", "2000000", "3000000", "4000000", "5000000"],
1922
"bm_simplefunc.py": ["100000", "200000", "300000", "400000", "500000"],
2023
}
21-
# How many times a test invocation is repeated (number of timings per test instance)
22-
test_repetitions = 51
2324

24-
bench = benchmark_helper.BenchmarkEnv(repetitions=test_repetitions)
25+
26+
def str_to_int(s):
27+
try:
28+
return int(s)
29+
except ValueError:
30+
return int(float(s))
31+
32+
33+
parser = argparse.ArgumentParser(description='Benchmark the instrumenters.',
34+
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
35+
parser.add_argument('--test', '-t', metavar='TEST', nargs='+', default=tests,
36+
choices=tests, help='Which test(s) to run')
37+
parser.add_argument('--repetitions', '-r', default=51, type=str_to_int,
38+
help='How many times a test invocation is repeated (number of timings per test instance)')
39+
parser.add_argument('--loop-count', '-l', type=str_to_int, nargs='+',
40+
help=('How many times the instrumented code is run during 1 test run. '
41+
'Can be repeated and will create 1 test instance per argument'))
42+
parser.add_argument('--instrumenter', '-i', metavar='INST', nargs='+', default=instrumenters,
43+
choices=instrumenters, help='The instrumenter(s) to use')
44+
parser.add_argument('--output', '-o', default='results.pkl', help='Output file for the results')
45+
parser.add_argument('--dry-run', action='store_true', help='Print parsed arguments and exit')
46+
args = parser.parse_args()
47+
48+
if args.dry_run:
49+
print(args)
50+
sys.exit(0)
51+
52+
bench = benchmark_helper.BenchmarkEnv(repetitions=args.repetitions)
2553
results = {}
2654

27-
for test in tests:
55+
for test in args.test:
2856
results[test] = {}
2957

30-
for instrumenter in instrumenters:
58+
for instrumenter in args.instrumenter:
3159
results[test][instrumenter] = {}
3260

3361
if instrumenter == "None":
@@ -40,12 +68,13 @@
4068
print("#########")
4169
print("{}: {}".format(test, scorep_settings))
4270
print("#########")
43-
for reps in reps_x[test]:
71+
loop_counts = args.loop_count if args.loop_count else reps_x[test]
72+
for reps in loop_counts:
4473
times = bench.call(test, [reps],
4574
enable_scorep,
4675
scorep_settings=scorep_settings)
4776
print("{:<8}: {}".format(reps, times))
4877
results[test][instrumenter][reps] = times
4978

50-
with open("results.pkl", "wb") as f:
79+
with open(args.output, "wb") as f:
5180
pickle.dump(results, f)

0 commit comments

Comments
 (0)