8000
We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 0e9646c commit 22b2819Copy full SHA for 22b2819
doc/usage.rst
@@ -102,8 +102,8 @@ Usage::
102
pyperformance run [-h] [-r] [-f] [--debug-single-value] [-v] [-m]
103
[--affinity CPU_LIST] [-o FILENAME]
104
[--append FILENAME] [--manifest MANIFEST]
105
- [-b BM_LIST] [--inherit-environ VAR_LIST]
106
- [-p PYTHON]
+ [--timeout TIMEOUT] [-b BM_LIST]
+ [--inherit-environ VAR_LIST] [-p PYTHON]
107
108
options::
109
@@ -124,6 +124,8 @@ options::
124
baseline_python, not changed_python.
125
--append FILENAME Add runs to an existing file, or create it if
126
it doesn't exist
127
+ --timeout TIMEOUT Specify a timeout in seconds for a single
128
+ benchmark run (default: disabled)
129
--manifest MANIFEST benchmark manifest file to use
130
-b BM_LIST, --benchmarks BM_LIST
131
Comma-separated list of benchmarks to run. Can
pyperformance/_benchmark.py
@@ -233,7 +233,11 @@ def _run_perf_script(python, runscript, runid, *,
233
sys.stderr.flush()
234
sys.stderr.write(stderr)
235
236
- raise RuntimeError("Benchmark died")
+ # pyperf returns exit code 124 if the benchmark execution times out
237
+ if ec == 124:
238
+ raise TimeoutError("Benchmark timed out")
239
+ else:
240
+ raise RuntimeError("Benchmark died")
241
return pyperf.BenchmarkSuite.load(tmp)
242
243
pyperformance/cli.py
@@ -25,6 +25,13 @@ def comma_separated(values):
25
return list(filter(None, values))
26
27
28
+def check_positive(value):
29
+ value = int(value)
30
+ if value <= 0:
31
+ raise argparse.ArgumentTypeError("Argument must a be positive integer.")
32
+ return value
33
+
34
35
def filter_opts(cmd, *, allow_no_benchmarks=False):
36
cmd.add_argument("--manifest", help="benchmark manifest file to use")
37
@@ -82,6 +89,10 @@ def parse_args():
82
89
help="Use the same number of loops as a previous run "
83
90
"(i.e., don't recalibrate). Should be a path to a "
84
91
".json file from a previous run.")
92
+ cmd.add_argument("--timeout",
93
+ help="Specify a timeout in seconds for a single "
94
+ "benchmark run (default: disabled)",
95
+ type=check_positive)
85
96
filter_opts(cmd)
86
97
87
98
# show
pyperformance/commands.py
@@ -191,8 +191,8 @@ def cmd_run(options, benchmarks):
191
192
if errors:
193
print("%s benchmarks failed:" % len(errors))
194
- for name in errors:
195
- print("- %s" % name)
+ for name, reason in errors:
+ print("- %s (%s)" % (name, reason))
196
print()
197
sys.exit(1)
198
pyperformance/requirements/requirements.txt
@@ -10,5 +10,5 @@ psutil==5.9.5
10
# via
11
# -r requirements.in
12
# pyperf
13
-pyperf==2.7.0
+pyperf==2.8.0
14
# via -r requirements.in
pyperformance/run.py
@@ -164,7 +164,7 @@ def add_bench(dest_suite, obj):
164
bench_venv, bench_runid = benchmarks.get(bench)
165
if bench_venv is None:
166
print("ERROR: Benchmark %s failed: could not install requirements" % name)
167
- errors.append(name)
+ errors.append((name, "Install requirements error"))
168
continue
169
try:
170
result = bench.run(
@@ -174,10 +174,17 @@ def add_bench(dest_suite, obj):
174
venv=bench_venv,
175
verbose=options.verbose,
176
)
177
+ except TimeoutError as exc:
178
+ print("ERROR: Benchmark %s timed out" % name)
179
+ errors.append((name, exc))
180
+ except RuntimeError as exc:
181
+ print("ERROR: Benchmark %s failed: %s" % (name, exc))
182
+ traceback.print_exc()
183
184
except Exception as exc:
185
print("ERROR: Benchmark %s failed: %s" % (name, exc))
186
traceback.print_exc()
187
188
else:
189
suite = add_bench(suite, result)
190
@@ -233,5 +240,7 @@ def get_pyperf_opts(options):
opts.append('--inherit-environ=%s' % ','.join(options.inherit_environ))
if options.min_time:
opts.append('--min-time=%s' % options.min_time)
+ if options.timeout:
244
+ opts.append('--timeout=%s' % options.timeout)
245
246
return opts
pyperformance/tests/test_commands.py
@@ -399,7 +399,7 @@ def test_compare_single_value(self):
399
Performance version: 0.2
400
401
### call_simple ###
402
- 7896.0 kB -> 7900.0 kB: 1.00x larger
+ 7896.0 KiB -> 7900.0 KiB: 1.00x larger
403
''').lstrip())
404
405
def test_compare_csv(self):
@@ -458,11 +458,11 @@ def test_compare_table_single_value(self):
458
459
460
461
- +-------------+-----------+-----------+--------------+------------------------------------------+
462
- | Benchmark | mem1.json | mem2.json | Change | Significance |
463
- +=============+===========+===========+==============+==========================================+
464
- | call_simple | 7896.0 kB | 7900.0 kB | 1.00x larger | (benchmark only contains a single value) |
465
+ +-------------+------------+------------+--------------+------------------------------------------+
+ | Benchmark | mem1.json | mem2.json | Change | Significance |
+ +=============+============+============+==============+==========================================+
+ | call_simple | 7896.0 KiB | 7900.0 KiB | 1.00x larger | (benchmark only contains a single value) |
466
467
468