diff --git a/doc/benchmark.conf.sample b/doc/benchmark.conf.sample index ea69746e..b1224460 100644 --- a/doc/benchmark.conf.sample +++ b/doc/benchmark.conf.sample @@ -4,10 +4,10 @@ # - results of patched Python are written into json_dir/patch/ json_dir = ~/json -# If True, compile CPython is debug mode (LTO and PGO disabled), +# If True, compile CPython in debug mode (LTO and PGO disabled), # run benchmarks with --debug-single-sample, and disable upload. # -# Use this option used to quickly test a configuration. +# Use this option to quickly test a configuration. debug = False @@ -59,6 +59,9 @@ pkg_only = # really understand what you are doing! install = True +# Specify '-j' parameter in 'make' command +jobs = 8 + [run_benchmark] # Run "sudo python3 -m pyperf system tune" before running benchmarks? @@ -79,9 +82,6 @@ affinity = # disabled. upload = False -# Specify '-j' parameter in 'make' command -jobs = 8 - # Configuration to upload results to a Codespeed website [upload] diff --git a/doc/changelog.rst b/doc/changelog.rst index de5c0126..3611e3c5 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,14 @@ Changelog ========= +Version 1.11.0 (2024-03-09) +-------------- +* Add a --same-loops option to the run command to use the exact same number of + loops as a previous run (without recalibrating). +* Bump pyperf to 2.6.3 +* Fix the django_template benchmark for compatibilty with 3.13 +* Fix benchmark.conf.sample + Version 1.10.0 (2023-10-22) -------------- * Add benchmark for asyncio_webockets diff --git a/doc/index.rst b/doc/index.rst index 7a15e4ee..4f7cf71b 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -14,7 +14,7 @@ possible. pyperformance is distributed under the MIT license. -Documenation: +Documentation: .. toctree:: :maxdepth: 2 diff --git a/doc/usage.rst b/doc/usage.rst index c7336407..34706144 100644 --- a/doc/usage.rst +++ b/doc/usage.rst @@ -140,6 +140,10 @@ options:: -p PYTHON, --python PYTHON Python executable (default: use running Python) + --same-loops SAME_LOOPS + Use the same number of loops as a previous run + (i.e., don't recalibrate). Should be a path to a + .json file from a previous run. show ---- diff --git a/pyperformance/__init__.py b/pyperformance/__init__.py index 3941313b..b4efa911 100644 --- a/pyperformance/__init__.py +++ b/pyperformance/__init__.py @@ -2,7 +2,7 @@ import sys -VERSION = (1, 10, 0) +VERSION = (1, 11, 0) __version__ = '.'.join(map(str, VERSION)) diff --git a/pyperformance/cli.py b/pyperformance/cli.py index 34544a11..3d83772b 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -75,6 +75,10 @@ def parse_args(): cmd.add_argument("--min-time", metavar="MIN_TIME", help="Minimum duration in seconds of a single " "value, used to calibrate the number of loops") + cmd.add_argument("--same-loops", + help="Use the same number of loops as a previous run " + "(i.e., don't recalibrate). Should be a path to a " + ".json file from a previous run.") filter_opts(cmd) # show diff --git a/pyperformance/compile.py b/pyperformance/compile.py index 0a7c8332..8f26aded 100644 --- a/pyperformance/compile.py +++ b/pyperformance/compile.py @@ -543,6 +543,8 @@ def run_benchmark(self, python=None): cmd.extend(('--affinity', self.conf.affinity)) if self.conf.debug: cmd.append('--debug-single-value') + if self.conf.same_loops: + cmd.append('--same_loops=%s' % self.conf.same_loops) exitcode = self.run_nocheck(*cmd) if os.path.exists(self.filename): @@ -812,6 +814,7 @@ def getint(section, key, default=None): conf.benchmarks = getstr('run_benchmark', 'benchmarks', default='') conf.affinity = getstr('run_benchmark', 'affinity', default='') conf.upload = getboolean('run_benchmark', 'upload', False) + conf.same_loops = getfile('run_benchmark', 'same_loops', default='') # paths conf.build_dir = os.path.join(conf.directory, 'build') diff --git a/pyperformance/data-files/benchmarks/MANIFEST b/pyperformance/data-files/benchmarks/MANIFEST index b4d22f7b..3210b97f 100644 --- a/pyperformance/data-files/benchmarks/MANIFEST +++ b/pyperformance/data-files/benchmarks/MANIFEST @@ -77,6 +77,9 @@ spectral_norm sqlalchemy_declarative sqlalchemy_imperative sqlglot +sqlglot_parse +sqlglot_transpile +sqlglot_optimize sqlite_synth sympy telco diff --git a/pyperformance/data-files/benchmarks/bm_django_template/pyproject.toml b/pyperformance/data-files/benchmarks/bm_django_template/pyproject.toml index 0b66d9d0..19772e54 100644 --- a/pyperformance/data-files/benchmarks/bm_django_template/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_django_template/pyproject.toml @@ -1,9 +1,10 @@ [project] name = "pyperformance_bm_django_template" -requires-python = ">=3.8" +requires-python = ">=3.10" dependencies = [ "pyperf", "django", + "legacy-cgi", ] urls = {repository = "https://github.com/python/pyperformance"} dynamic = ["version"] diff --git a/pyperformance/data-files/benchmarks/bm_django_template/requirements.txt b/pyperformance/data-files/benchmarks/bm_django_template/requirements.txt index 4a3490bf..4b71dc07 100644 --- a/pyperformance/data-files/benchmarks/bm_django_template/requirements.txt +++ b/pyperformance/data-files/benchmarks/bm_django_template/requirements.txt @@ -2,3 +2,4 @@ asgiref==3.3.4 django==3.2.4 pytz==2021.1 sqlparse==0.4.1 +legacy-cgi==2.6 \ No newline at end of file diff --git a/pyperformance/data-files/benchmarks/bm_sqlglot/bm_sqlglot_optimize.toml b/pyperformance/data-files/benchmarks/bm_sqlglot/bm_sqlglot_optimize.toml new file mode 100644 index 00000000..7f59f0b8 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_sqlglot/bm_sqlglot_optimize.toml @@ -0,0 +1,3 @@ +[tool.pyperformance] +name = "sqlglot_optimize" +extra_opts = ["optimize"] diff --git a/pyperformance/data-files/benchmarks/bm_sqlglot/bm_sqlglot_parse.toml b/pyperformance/data-files/benchmarks/bm_sqlglot/bm_sqlglot_parse.toml new file mode 100644 index 00000000..b886688a --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_sqlglot/bm_sqlglot_parse.toml @@ -0,0 +1,3 @@ +[tool.pyperformance] +name = "sqlglot_parse" +extra_opts = ["parse"] diff --git a/pyperformance/data-files/benchmarks/bm_sqlglot/bm_sqlglot_transpile.toml b/pyperformance/data-files/benchmarks/bm_sqlglot/bm_sqlglot_transpile.toml new file mode 100644 index 00000000..25a26a3f --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_sqlglot/bm_sqlglot_transpile.toml @@ -0,0 +1,3 @@ +[tool.pyperformance] +name = "sqlglot_transpile" +extra_opts = ["transpile"] diff --git a/pyperformance/data-files/benchmarks/bm_sqlglot/pyproject.toml b/pyperformance/data-files/benchmarks/bm_sqlglot/pyproject.toml index 6e59a668..cb8656a2 100644 --- a/pyperformance/data-files/benchmarks/bm_sqlglot/pyproject.toml +++ b/pyperformance/data-files/benchmarks/bm_sqlglot/pyproject.toml @@ -10,3 +10,4 @@ dynamic = ["version"] [tool.pyperformance] name = "sqlglot" +extra_opts = ["normalize"] diff --git a/pyperformance/data-files/benchmarks/bm_sqlglot/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_sqlglot/run_benchmark.py index fa7d9efb..f8fbb79a 100644 --- a/pyperformance/data-files/benchmarks/bm_sqlglot/run_benchmark.py +++ b/pyperformance/data-files/benchmarks/bm_sqlglot/run_benchmark.py @@ -164,10 +164,31 @@ def bench_normalize(loops): return elapsed +BENCHMARKS = { + "parse": bench_parse, + "transpile": bench_transpile, + "optimize": bench_optimize, + "normalize": bench_normalize +} + + +def add_cmdline_args(cmd, args): + cmd.append(args.benchmark) + + +def add_parser_args(parser): + parser.add_argument( + "benchmark", + choices=BENCHMARKS, + help="Which benchmark to run." + ) + + if __name__ == "__main__": - runner = pyperf.Runner() + runner = pyperf.Runner(add_cmdline_args=add_cmdline_args) runner.metadata['description'] = "SQLGlot benchmark" - runner.bench_time_func("sqlglot_parse", bench_parse) - runner.bench_time_func("sqlglot_transpile", bench_transpile) - runner.bench_time_func("sqlglot_optimize", bench_optimize) - runner.bench_time_func("sqlglot_normalize", bench_normalize) + add_parser_args(runner.argparser) + args = runner.parse_args() + benchmark = args.benchmark + + runner.bench_time_func(f"sqlglot_{benchmark}", BENCHMARKS[benchmark]) diff --git a/pyperformance/requirements/requirements.txt b/pyperformance/requirements/requirements.txt index d4aa6631..7936dbb9 100644 --- a/pyperformance/requirements/requirements.txt +++ b/pyperformance/requirements/requirements.txt @@ -10,5 +10,5 @@ psutil==5.9.5 # via # -r requirements.in # pyperf -pyperf==2.6.1 +pyperf==2.6.3 # via -r requirements.in diff --git a/pyperformance/run.py b/pyperformance/run.py index aa2b3744..f572181c 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -1,5 +1,6 @@ from collections import namedtuple import hashlib +import json import sys import time import traceback @@ -50,7 +51,28 @@ def get_run_id(python, bench=None): return RunID(py_id, compat_id, bench, ts) +def get_loops_from_file(filename): + with open(filename) as fd: + data = json.load(fd) + + loops = {} + for benchmark in data["benchmarks"]: + metadata = benchmark.get("metadata", data["metadata"]) + name = metadata["name"] + if name.endswith("_none"): + name = name[:-len("_none")] + if "loops" in metadata: + loops[name] = metadata["loops"] + + return loops + + def run_benchmarks(should_run, python, options): + if options.same_loops is not None: + loops = get_loops_from_file(options.same_loops) + else: + loops = {} + to_run = sorted(should_run) info = _pythoninfo.get_info(python) @@ -136,6 +158,9 @@ def add_bench(dest_suite, obj): return dest_suite + if name in loops: + pyperf_opts.append(f"--loops={loops[name]}") + bench_venv, bench_runid = benchmarks.get(bench) if bench_venv is None: print("ERROR: Benchmark %s failed: could not install requirements" % name)