8000 Pass --timeout flag to pyperf (#354) · python/pyperformance@22b2819 · GitHub
[go: up one dir, main page]

Skip to content

Commit 22b2819

Browse files
authored
Pass --timeout flag to pyperf (#354)
* Implement timeout mechanism for a benchmark run * Address feedbacks * Pin pyperf to 2.8.0 Update test_commands to reflect a change how pyperf reports data.
1 parent 0e9646c commit 22b2819

File tree

7 files changed

+40
-14
lines changed

7 files changed

+40
-14
lines changed

doc/usage.rst

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -102,8 +102,8 @@ Usage::
102102
pyperformance run [-h] [-r] [-f] [--debug-single-value] [-v] [-m]
103103
[--affinity CPU_LIST] [-o FILENAME]
104104
[--append FILENAME] [--manifest MANIFEST]
105-
[-b BM_LIST] [--inherit-environ VAR_LIST]
106-
[-p PYTHON]
105+
[--timeout TIMEOUT] [-b BM_LIST]
106+
[--inherit-environ VAR_LIST] [-p PYTHON]
107107

108108
options::
109109

@@ -124,6 +124,8 @@ options::
124124
baseline_python, not changed_python.
125125
--append FILENAME Add runs to an existing file, or create it if
126126
it doesn't exist
127+
--timeout TIMEOUT Specify a timeout in seconds for a single
128+
benchmark run (default: disabled)
127129
--manifest MANIFEST benchmark manifest file to use
128130
-b BM_LIST, --benchmarks BM_LIST
129131
Comma-separated list of benchmarks to run. Can

pyperformance/_benchmark.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -233,7 +233,11 @@ def _run_perf_script(python, runscript, runid, *,
233233
sys.stderr.flush()
234234
sys.stderr.write(stderr)
235235
sys.stderr.flush()
236-
raise RuntimeError("Benchmark died")
236+
# pyperf returns exit code 124 if the benchmark execution times out
237+
if ec == 124:
238+
raise TimeoutError("Benchmark timed out")
239+
else:
240+
raise RuntimeError("Benchmark died")
237241
return pyperf.BenchmarkSuite.load(tmp)
238242

239243

pyperformance/cli.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,13 @@ def comma_separated(values):
2525
return list(filter(None, values))
2626

2727

28+
def check_positive(value):
29+
value = int(value)
30+
if value <= 0:
31+
raise argparse.ArgumentTypeError("Argument must a be positive integer.")
32+
return value
33+
34+
2835
def filter_opts(cmd, *, allow_no_benchmarks=False):
2936
cmd.add_argument("--manifest", help="benchmark manifest file to use")
3037

@@ -82,6 +89,10 @@ def parse_args():
8289
help="Use the same number of loops as a previous run "
8390
"(i.e., don't recalibrate). Should be a path to a "
8491
".json file from a previous run.")
92+
cmd.add_argument("--timeout",
93+
help="Specify a timeout in seconds for a single "
94+
"benchmark run (default: disabled)",
95+
type=check_positive)
8596
filter_opts(cmd)
8697

8798
# show

pyperformance/commands.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -191,8 +191,8 @@ def cmd_run(options, benchmarks):
191191

192192
if errors:
193193
print("%s benchmarks failed:" % len(errors))
194-
for name in errors:
195-
print("- %s" % name)
194+
for name, reason in errors:
195+
print("- %s (%s)" % (name, reason))
196196
print()
197197
sys.exit(1)
198198

pyperformance/requirements/requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,5 +10,5 @@ psutil==5.9.5
1010
# via
1111
# -r requirements.in
1212
# pyperf
13-
pyperf==2.7.0
13+
pyperf==2.8.0
1414
# via -r requirements.in

pyperformance/run.py

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ def add_bench(dest_suite, obj):
164164
bench_venv, bench_runid = benchmarks.get(bench)
165165
if bench_venv is None:
166166
print("ERROR: Benchmark %s failed: could not install requirements" % name)
167-
errors.append(name)
167+
errors.append((name, "Install requirements error"))
168168
continue
169169
try:
170170
result = bench.run(
@@ -174,10 +174,17 @@ def add_bench(dest_suite, obj):
174174
venv=bench_venv,
175175
verbose=options.verbose,
176176
)
177+
except TimeoutError as exc:
178+
print("ERROR: Benchmark %s timed out" % name)
179+
errors.append((name, exc))
180+
except RuntimeError as exc:
181+
print("ERROR: Benchmark %s failed: %s" % (name, exc))
182+
traceback.print_exc()
183+
errors.append((name, exc))
177184
except Exception as exc:
178185
print("ERROR: Benchmark %s failed: %s" % (name, exc))
179186
traceback.print_exc()
180-
errors.append(name)
187+
errors.append((name, exc))
181188
else:
182189
suite = add_bench(suite, result)
183190

@@ -233,5 +240,7 @@ def get_pyperf_opts(options):
233240
opts.append('--inherit-environ=%s' % ','.join(options.inherit_environ))
234241
if options.min_time:
235242
opts.append('--min-time=%s' % options.min_time)
243+
if options.timeout:
244+
opts.append('--timeout=%s' % options.timeout)
236245

237246
return opts

pyperformance/tests/test_commands.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -399,7 +399,7 @@ def test_compare_single_value(self):
399399
Performance version: 0.2
400400
401401
### call_simple ###
402-
7896.0 kB -> 7900.0 kB: 1.00x larger
402+
7896.0 KiB -> 7900.0 KiB: 1.00x larger
403403
''').lstrip())
404404

405405
def test_compare_csv(self):
@@ -458,11 +458,11 @@ def test_compare_table_single_value(self):
458458
459459
Performance version: 0.2
460460
461-
+-------------+-----------+-----------+--------------+------------------------------------------+
462-
| Benchmark | mem1.json | mem2.json | Change | Significance |
463-
+=============+===========+===========+==============+==========================================+
464-
| call_simple | 7896.0 kB | 7900.0 kB | 1.00x larger | (benchmark only contains a single value) |
465-
+-------------+-----------+-----------+--------------+------------------------------------------+
461+
+-------------+------------+------------+--------------+------------------------------------------+
462+
| Benchmark | mem1.json | mem2.json | Change | Significance |
463+
+=============+============+============+==============+==========================================+
464+
| call_simple | 7896.0 KiB | 7900.0 KiB | 1.00x larger | (benchmark only contains a single value) |
465+
+-------------+------------+------------+--------------+------------------------------------------+
466466
''').lstrip())
467467

468468

0 commit comments

Comments
 (0)
0