From 0399e7d200bec35bb11232eca5f94106bbb66ed0 Mon Sep 17 00:00:00 2001 From: Alexander Grund Date: Wed, 17 Jun 2020 13:22:13 +0200 Subject: [PATCH 1/2] Print summary of timings instead of individual times --- benchmark/benchmark.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/benchmark/benchmark.py b/benchmark/benchmark.py index c44d88a..394dfd7 100644 --- a/benchmark/benchmark.py +++ b/benchmark/benchmark.py @@ -6,6 +6,7 @@ import sys import benchmark_helper import pickle +import numpy as np tests = ["bm_baseline.py", "bm_simplefunc.py"] @@ -44,7 +45,9 @@ times = bench.call(test, [reps], enable_scorep, scorep_settings=scorep_settings) - print("{:<8}: {}".format(reps, times)) + times = np.array(times) + print("{:>8}: Range={:{prec}}-{:{prec}} Mean={:{prec}} Median={:{prec}}".format( + reps, times.min(), times.max(), times.mean(), np.median(times), prec='5.4f')) results[test][instrumenter][reps] = times with open("results.pkl", "wb") as f: From 5c7b6bffd3849a33990df114136cd93b4acb00f1 Mon Sep 17 00:00:00 2001 From: Alexander Grund Date: Wed, 17 Jun 2020 13:25:58 +0200 Subject: [PATCH 2/2] Don't print script invocation for every repetition --- benchmark/benchmark.py | 10 ++++------ benchmark/benchmark_helper.py | 7 ++----- 2 files changed, 6 insertions(+), 11 deletions(-) diff --git a/benchmark/benchmark.py b/benchmark/benchmark.py index 394dfd7..bccb8f6 100644 --- a/benchmark/benchmark.py +++ b/benchmark/benchmark.py @@ -32,22 +32,20 @@ results[test][instrumenter] = {} if instrumenter == "None": - enable_scorep = False scorep_settings = [] else: - enable_scorep = True - scorep_settings = ["--instrumenter-type={}".format(instrumenter)] + scorep_settings = ["-m", "scorep", "--instrumenter-type={}".format(instrumenter)] print("#########") print("{}: {}".format(test, scorep_settings)) print("#########") + max_reps_width = len(str(max(reps_x[test]))) for reps in reps_x[test]: times = bench.call(test, [reps], - enable_scorep, scorep_settings=scorep_settings) times = np.array(times) - print("{:>8}: Range={:{prec}}-{:{prec}} Mean={:{prec}} Median={:{prec}}".format( - reps, times.min(), times.max(), times.mean(), np.median(times), prec='5.4f')) + print("{:>{width}}: Range={:{prec}}-{:{prec}} Mean={:{prec}} Median={:{prec}}".format( + reps, times.min(), times.max(), times.mean(), np.median(times), width=max_reps_width, prec='5.4f')) results[test][instrumenter][reps] = times with open("results.pkl", "wb") as f: diff --git a/benchmark/benchmark_helper.py b/benchmark/benchmark_helper.py index 2ad83e6..b38036d 100644 --- a/benchmark/benchmark_helper.py +++ b/benchmark/benchmark_helper.py @@ -21,17 +21,14 @@ def __del__(self): self.exp_dir, ignore_errors=True) - def call(self, script="", ops=[], enable_scorep=True, scorep_settings=[]): + def call(self, script, ops=[], scorep_settings=[]): self.env["SCOREP_EXPERIMENT_DIRECTORY"] = self.exp_dir + \ "/{}-{}-{}".format(script, ops, scorep_settings) arguments = [sys.executable] - if enable_scorep: - arguments.extend(["-m", "scorep"]) - arguments.extend(scorep_settings) + arguments.extend(scorep_settings) arguments.append(script) arguments.extend(ops) - print(arguments) runtimes = [] for _ in range(self.repetitions):