Skip to content

Commit dff644b

Browse files
committed
Merge branch 'Flamefire-benchmark_args'
2 parents 6fa1cfc + c84f7b6 commit dff644b

1 file changed

Lines changed: 37 additions & 10 deletions

File tree

benchmark/benchmark.py

Lines changed: 37 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,34 +1,60 @@
1+
#!/usr/bin/env python
12
'''
23
Created on 04.10.2019
34
45
@author: gocht
56
'''
7+
import argparse
68
import sys
79
import benchmark_helper
810
import pickle
911
import numpy as np
1012

13+
# Available tests
1114
tests = ["bm_baseline.py", "bm_simplefunc.py"]
1215

16+
# Available instrumenters
1317
instrumenters = ["profile", "trace", "dummy", "None"]
1418
if sys.version_info.major >= 3:
1519
instrumenters.extend(["cProfile", "cTrace"])
1620

17-
# How many times the instrumented code is run during 1 test run
21+
# Default values for: How many times the instrumented code is run during 1 test run
1822
reps_x = {
1923
"bm_baseline.py": ["1000000", "2000000", "3000000", "4000000", "5000000"],
2024
"bm_simplefunc.py": ["100000", "200000", "300000", "400000", "500000"],
2125
}
22-
# How many times a test invocation is repeated (number of timings per test instance)
23-
test_repetitions = 51
2426

25-
bench = benchmark_helper.BenchmarkEnv(repetitions=test_repetitions)
27+
28+
def str_to_int(s):
29+
return int(float(s))
30+
31+
32+
parser = argparse.ArgumentParser(description='Benchmark the instrumenters.',
33+
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
34+
parser.add_argument('--test', '-t', metavar='TEST', nargs='+', default=tests,
35+
choices=tests, help='Which test(s) to run')
36+
parser.add_argument('--repetitions', '-r', default=51, type=str_to_int,
37+
help='How many times a test invocation is repeated (number of timings per test instance)')
38+
parser.add_argument('--loop-count', '-l', type=str_to_int, nargs='+',
39+
help=('How many times the instrumented code is run during 1 test run. '
40+
'Can be repeated and will create 1 test instance per argument'))
41+
parser.add_argument('--instrumenter', '-i', metavar='INST', nargs='+', default=instrumenters,
42+
choices=instrumenters, help='The instrumenter(s) to use')
43+
parser.add_argument('--output', '-o', default='results.pkl', help='Output file for the results')
44+
parser.add_argument('--dry-run', action='store_true', help='Print parsed arguments and exit')
45+
args = parser.parse_args()
46+
47+
if args.dry_run:
48+
print(args)
49+
sys.exit(0)
50+
51+
bench = benchmark_helper.BenchmarkEnv(repetitions=args.repetitions)
2652
results = {}
2753

28-
for test in tests:
54+
for test in args.test:
2955
results[test] = {}
3056

31-
for instrumenter in instrumenters:
57+
for instrumenter in args.instrumenter:
3258
results[test][instrumenter] = {}
3359

3460
if instrumenter == "None":
@@ -39,14 +65,15 @@
3965
print("#########")
4066
print("{}: {}".format(test, scorep_settings))
4167
print("#########")
42-
max_reps_width = len(str(max(reps_x[test])))
43-
for reps in reps_x[test]:
44-
times = bench.call(test, [reps],
68+
loop_counts = args.loop_count if args.loop_count else reps_x[test]
69+
for reps in loop_counts:
70+
times = bench.call(test, [str(reps)],
71+
enable_scorep,
4572
scorep_settings=scorep_settings)
4673
times = np.array(times)
4774
print("{:>{width}}: Range={:{prec}}-{:{prec}} Mean={:{prec}} Median={:{prec}}".format(
4875
reps, times.min(), times.max(), times.mean(), np.median(times), width=max_reps_width, prec='5.4f'))
4976
results[test][instrumenter][reps] = times
5077

51-
with open("results.pkl", "wb") as f:
78+
with open(args.output, "wb") as f:
5279
pickle.dump(results, f)

0 commit comments

Comments
 (0)