Skip to content

Commit a0bd19e

Browse files
committed
Improve readability of benchmark script
1 parent dbdf367 commit a0bd19e

2 files changed

Lines changed: 23 additions & 16 deletions

File tree

benchmark/benchmark.py

Lines changed: 19 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -7,20 +7,29 @@
77
import benchmark_helper
88
import pickle
99

10-
bench = benchmark_helper.BenchmarkEnv(repetitions=51)
1110
tests = ["bm_baseline.py", "bm_simplefunc.py"]
12-
results = {}
1311

14-
reps_x = {}
15-
reps_x["bm_baseline.py"] = ["1000000", "2000000", "3000000", "4000000", "5000000"]
16-
reps_x["bm_simplefunc.py"] = ["100000", "200000", "300000", "400000", "500000"]
12+
instrumenters = ["profile", "trace", "dummy", "None"]
13+
if sys.version_info.major >= 3:
14+
instrumenters.extend(["cProfile", "cTrace"])
15+
16+
# How many times the instrumented code is run during 1 test run
17+
reps_x = {
18+
"bm_baseline.py": ["1000000", "2000000", "3000000", "4000000", "5000000"],
19+
"bm_simplefunc.py": ["100000", "200000", "300000", "400000", "500000"],
20+
}
21+
# How many times a test invocation is repeated (number of timings per test instance)
22+
test_repetitions = 51
23+
24+
bench = benchmark_helper.BenchmarkEnv(repetitions=test_repetitions)
25+
results = {}
1726

1827
for test in tests:
19-
results[test] = {"profile": {}, "trace": {}, "dummy": {}, "None": {}}
20-
if sys.version_info.major >= 3:
21-
results[test].update({"cProfile": {}, "cTrace": {}})
28+
results[test] = {}
29+
30+
for instrumenter in instrumenters:
31+
results[test][instrumenter] = {}
2232

23-
for instrumenter in results[test]:
2433
if instrumenter == "None":
2534
enable_scorep = False
2635
scorep_settings = []
@@ -35,8 +44,8 @@
3544
times = bench.call(test, [reps],
3645
enable_scorep,
3746
scorep_settings=scorep_settings)
38-
results[test][instrumenter][reps] = times
3947
print("{:<8}: {}".format(reps, times))
48+
results[test][instrumenter][reps] = times
4049

4150
with open("results.pkl", "wb") as f:
4251
pickle.dump(results, f)

benchmark/benchmark_helper.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -36,18 +36,16 @@ def call(self, script="", ops=[], enable_scorep=True, scorep_settings=[]):
3636
arguments.extend(scorep_settings)
3737
arguments.append(script)
3838
arguments.extend(ops)
39+
print(arguments)
3940

4041
runtimes = []
41-
for i in range(self.repetitions):
42+
for _ in range(self.repetitions):
4243
begin = time.time()
43-
print(arguments)
4444
out = subprocess.run(
4545
arguments,
46-
env=self.env,
47-
stdout=subprocess.PIPE,
48-
stderr=subprocess.PIPE)
46+
env=self.env)
4947
end = time.time()
50-
assert(out.returncode == 0)
48+
assert out.returncode == 0
5149

5250
runtime = end - begin
5351
runtimes.append(runtime)

0 commit comments

Comments
 (0)