|
7 | 7 | import benchmark_helper |
8 | 8 | import pickle |
9 | 9 |
|
10 | | -bench = benchmark_helper.BenchmarkEnv(repetitions=51) |
11 | 10 | tests = ["bm_baseline.py", "bm_simplefunc.py"] |
12 | | -results = {} |
13 | 11 |
|
14 | | -reps_x = {} |
15 | | -reps_x["bm_baseline.py"] = ["1000000", "2000000", "3000000", "4000000", "5000000"] |
16 | | -reps_x["bm_simplefunc.py"] = ["100000", "200000", "300000", "400000", "500000"] |
| 12 | +instrumenters = ["profile", "trace", "dummy", "None"] |
| 13 | +if sys.version_info.major >= 3: |
| 14 | + instrumenters.extend(["cProfile", "cTrace"]) |
| 15 | + |
| 16 | +# How many times the instrumented code is run during 1 test run |
| 17 | +reps_x = { |
| 18 | + "bm_baseline.py": ["1000000", "2000000", "3000000", "4000000", "5000000"], |
| 19 | + "bm_simplefunc.py": ["100000", "200000", "300000", "400000", "500000"], |
| 20 | +} |
| 21 | +# How many times a test invocation is repeated (number of timings per test instance) |
| 22 | +test_repetitions = 51 |
| 23 | + |
| 24 | +bench = benchmark_helper.BenchmarkEnv(repetitions=test_repetitions) |
| 25 | +results = {} |
17 | 26 |
|
18 | 27 | for test in tests: |
19 | | - results[test] = {"profile": {}, "trace": {}, "dummy": {}, "None": {}} |
20 | | - if sys.version_info.major >= 3: |
21 | | - results[test].update({"cProfile": {}, "cTrace": {}}) |
| 28 | + results[test] = {} |
| 29 | + |
| 30 | + for instrumenter in instrumenters: |
| 31 | + results[test][instrumenter] = {} |
22 | 32 |
|
23 | | - for instrumenter in results[test]: |
24 | 33 | if instrumenter == "None": |
25 | 34 | enable_scorep = False |
26 | 35 | scorep_settings = [] |
|
35 | 44 | times = bench.call(test, [reps], |
36 | 45 | enable_scorep, |
37 | 46 | scorep_settings=scorep_settings) |
38 | | - results[test][instrumenter][reps] = times |
39 | 47 | print("{:<8}: {}".format(reps, times)) |
| 48 | + results[test][instrumenter][reps] = times |
40 | 49 |
|
41 | 50 | with open("results.pkl", "wb") as f: |
42 | 51 | pickle.dump(results, f) |
0 commit comments