diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000..13c177fa --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,4 @@ +version: 2 +updates: + - package-ecosystem: "pip" + directory: "/pyperformance/requirements" diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 7e9b8243..becf1a59 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -17,6 +17,7 @@ jobs: permissions: pull-requests: write name: ${{ matrix.os }} - ${{ matrix.python }} + if: ${{ github.event_name != 'schedule' || (github.repository == 'python/pyperformance' && github.event_name == 'schedule') }} strategy: fail-fast: false matrix: diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml new file mode 100644 index 00000000..d9fb4a66 --- /dev/null +++ b/.github/workflows/mypy.yml @@ -0,0 +1,29 @@ +name: mypy + +on: + push: + branches: [main] + pull_request: + workflow_dispatch: + +permissions: + contents: read + +env: + FORCE_COLOR: 1 + TERM: xterm-256color # needed for FORCE_COLOR to work on mypy on Ubuntu, see https://github.com/python/mypy/issues/13817 + +jobs: + mypy: + name: Check code with mypy + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + cache: "pip" + cache-dependency-path: "pyproject.toml" + python-version: "3.11" + - run: pip install -e .[dev] + - run: pip freeze --all + - run: mypy diff --git a/MANIFEST.in b/MANIFEST.in index 9f89da35..f41e2b99 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -10,10 +10,11 @@ include doc/*.rst doc/images/*.png doc/images/*.jpg include doc/conf.py doc/Makefile doc/make.bat include pyperformance/*.py -include pyperformance/data-files/requirements.txt +include pyperformance/requirements/requirements.txt include pyperformance/data-files/benchmarks/MANIFEST include pyperformance/data-files/benchmarks/bm_*/*.toml include pyperformance/data-files/benchmarks/bm_*/*.py include pyperformance/data-files/benchmarks/bm_*/requirements.txt +include pyperformance/data-files/benchmarks/bm_*/*.pem recursive-include pyperformance/data-files/benchmarks/bm_*/data * recursive-exclude pyperformance/tests * diff --git a/doc/benchmarks.rst b/doc/benchmarks.rst index b8c330fd..b0cff367 100644 --- a/doc/benchmarks.rst +++ b/doc/benchmarks.rst @@ -66,12 +66,15 @@ Async workload benchmark, which calls ``asyncio.gather()`` on a tree (6 levels d * ``async_tree``: no actual async work at any leaf node. * ``async_tree_io``: all leaf nodes simulate async IO workload (async sleep 50ms). -* ``async_tree_memoization``: all leaf nodes simulate async IO workload with 90% of +* ``async_tree_memoization``: all leaf nodes simulate async IO workload with 90% of the data memoized. * ``async_tree_cpu_io_mixed``: half of the leaf nodes simulate CPU-bound workload - (``math.factorial(500)``) and the other half simulate the same workload as the + (``math.factorial(500)``) and the other half simulate the same workload as the ``async_tree_memoization`` variant. +These benchmarks also have an "eager" flavor that uses asyncio eager task factory, +if available. + chameleon --------- diff --git a/doc/changelog.rst b/doc/changelog.rst index bcabdb9c..17dbb6ed 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,24 @@ Changelog ========= +Version 1.0.8 (2023-06-02) +------------- + +* Move the main requirements.txt file to pyperformance/requirements + so that dependabot can only run on that one file +* Update dependencies of benchmarks not to specify setuptools +* On older versions of Python, skip benchmarks that use features + introduced in newer Python versions +* Support ``--inherit-environ`` when reusing a venv +* Use tomllib/tomli over toml +* Update MANIFEST.in to include cert files for asyncio_tcp_ssl benchmark +* Fix undefined variable issue when raising VenvPipInstallFailedError +* Add mypy config; run mypy in CI +* Fix typo of str.partition from _pyproject_toml.py +* Add version of Richards benchmark that uses super() +* Add a benchmark for runtime-checkable protocols +* Extend async tree benchmarks to cover eager task execution + Version 1.0.7 (2023-04-22) ------------- diff --git a/pyperformance/__init__.py b/pyperformance/__init__.py index 647d1072..296e6539 100644 --- a/pyperformance/__init__.py +++ b/pyperformance/__init__.py @@ -2,7 +2,7 @@ import sys -VERSION = (1, 0, 7) +VERSION = (1, 0, 8) __version__ = '.'.join(map(str, VERSION)) @@ -34,10 +34,10 @@ def _is_devel_install(): # This means it creates a link back to the checkout instead # of copying the files. try: - import toml + import packaging except ModuleNotFoundError: return False - sitepackages = os.path.dirname(os.path.dirname(toml.__file__)) + sitepackages = os.path.dirname(os.path.dirname(packaging.__file__)) if os.path.isdir(os.path.join(sitepackages, 'pyperformance')): return False if not os.path.exists(os.path.join(sitepackages, 'pyperformance.egg-link')): diff --git a/pyperformance/_benchmark.py b/pyperformance/_benchmark.py index 398b2049..8ca5eaac 100644 --- a/pyperformance/_benchmark.py +++ b/pyperformance/_benchmark.py @@ -13,6 +13,7 @@ import sys import pyperf +from packaging.specifiers import SpecifierSet from . import _utils, _benchmark_metadata @@ -164,9 +165,12 @@ def runscript(self): def extra_opts(self): return self._get_metadata_value('extra_opts', ()) + @property + def python(self): + return SpecifierSet(self._get_metadata_value("python", "")) + # Other metadata keys: # * base - # * python # * dependencies # * requirements @@ -178,7 +182,7 @@ def run(self, python, runid=None, pyperf_opts=None, *, python = venv.python if not runid: - from ..run import get_run_id + from .run import get_run_id runid = get_run_id(python, self) runscript = self.runscript diff --git a/pyperformance/_pyproject_toml.py b/pyperformance/_pyproject_toml.py index e7c6563c..637c5888 100644 --- a/pyperformance/_pyproject_toml.py +++ b/pyperformance/_pyproject_toml.py @@ -18,7 +18,11 @@ import packaging.specifiers import packaging.utils import packaging.version -import toml + +try: + import tomllib # type: ignore[import] # tomllib doesn't exist on 3.7-3.10 +except ImportError: + import tomli as tomllib from ._utils import check_name @@ -40,7 +44,7 @@ def parse_entry_point(text): # See: # * https://packaging.python.org/specifications/entry-points/#data-model # * https://www.python.org/dev/peps/pep-0517/#source-trees - module, sep, qualname = text.parition(':') + module, sep, qualname = text.partition(':') if all(p.isidentifier() for p in module.split('.')): if not sep or all(p.isidentifier() for p in qualname.split('.')): return module, qualname @@ -52,7 +56,7 @@ def parse_pyproject_toml(text, rootdir, name=None, *, tools=None, requirefiles=True, ): - data = toml.loads(text) + data = tomllib.loads(text) unused = list(data) for section, normalize in SECTIONS.items(): diff --git a/pyperformance/_venv.py b/pyperformance/_venv.py index b6838c6c..34f6f82c 100644 --- a/pyperformance/_venv.py +++ b/pyperformance/_venv.py @@ -215,9 +215,9 @@ def ensure_pip(self, downloaddir=None, *, installer=True, upgrade=True): upgrade=upgrade, ) if ec != 0: - raise VenvPipInstallFailedError(root, ec) + raise VenvPipInstallFailedError(self.root, ec) elif not _pip.is_pip_installed(self.python, env=self._env): - raise VenvPipInstallFailedError(root, 0, "pip doesn't work") + raise VenvPipInstallFailedError(self.root, 0, "pip doesn't work") if installer: # Upgrade installer dependencies (setuptools, ...) diff --git a/pyperformance/cli.py b/pyperformance/cli.py index 4fe3ca1a..330d6e57 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -241,11 +241,15 @@ def parse_entry(o, s): # Get the selections. selected = [] + this_python_version = ".".join(map(str, sys.version_info[:3])) for bench in _benchmark_selections.iter_selections(manifest, parsed_infos): if isinstance(bench, str): logging.warning(f"no benchmark named {bench!r}") continue - selected.append(bench) + # Filter out any benchmarks that can't be run on the Python version we're running + if this_python_version in bench.python: + selected.append(bench) + return selected diff --git a/pyperformance/data-files/benchmarks/MANIFEST b/pyperformance/data-files/benchmarks/MANIFEST index 7d62c5a0..0a338e44 100644 --- a/pyperformance/data-files/benchmarks/MANIFEST +++ b/pyperformance/data-files/benchmarks/MANIFEST @@ -7,6 +7,10 @@ async_tree async_tree_cpu_io_mixed async_tree_io async_tree_memoization +async_tree_eager +async_tree_eager_cpu_io_mixed +async_tree_eager_io +async_tree_eager_memoization asyncio_tcp asyncio_tcp_ssl concurrent_imap @@ -58,6 +62,7 @@ regex_dna regex_effbot regex_v8 richards +richards_super scimark spectral_norm sqlalchemy_declarative @@ -68,6 +73,7 @@ sympy telco tomli_loads tornado_http +typing_runtime_protocols unpack_sequence unpickle unpickle_list diff --git a/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_cpu_io_mixed.toml b/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_cpu_io_mixed.toml index 7fae0d4a..8303bc74 100644 --- a/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_cpu_io_mixed.toml +++ b/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_cpu_io_mixed.toml @@ -1,4 +1,3 @@ [tool.pyperformance] name = "async_tree_cpu_io_mixed" extra_opts = ["cpu_io_mixed"] - diff --git a/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager.toml b/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager.toml new file mode 100644 index 00000000..09d16ee8 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager.toml @@ -0,0 +1,3 @@ +[tool.pyperformance] +name = "async_tree_eager" +extra_opts = ["eager"] diff --git a/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager_cpu_io_mixed.toml b/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager_cpu_io_mixed.toml new file mode 100644 index 00000000..4766cb23 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager_cpu_io_mixed.toml @@ -0,0 +1,3 @@ +[tool.pyperformance] +name = "async_tree_eager_cpu_io_mixed" +extra_opts = ["eager_cpu_io_mixed"] diff --git a/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager_io.toml b/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager_io.toml new file mode 100644 index 00000000..de1dfb2a --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager_io.toml @@ -0,0 +1,3 @@ +[tool.pyperformance] +name = "async_tree_eager_io" +extra_opts = ["eager_io"] diff --git a/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager_memoization.toml b/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager_memoization.toml new file mode 100644 index 00000000..ec199382 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager_memoization.toml @@ -0,0 +1,3 @@ +[tool.pyperformance] +name = "async_tree_eager_memoization" +extra_opts = ["eager_memoization"] diff --git a/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_io.toml b/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_io.toml index 86898965..c8fab8da 100644 --- a/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_io.toml +++ b/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_io.toml @@ -1,4 +1,3 @@ [tool.pyperformance] name = "async_tree_io" extra_opts = ["io"] - diff --git a/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_memoization.toml b/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_memoization.toml index e644c4ea..4d394e38 100644 --- a/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_memoization.toml +++ b/pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_memoization.toml @@ -1,4 +1,3 @@ [tool.pyperformance] name = "async_tree_memoization" extra_opts = ["memoization"] - diff --git a/pyperformance/data-files/benchmarks/bm_async_tree/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_async_tree/run_benchmark.py index ebffd5de..72fc917c 100644 --- a/pyperformance/data-files/benchmarks/bm_async_tree/run_benchmark.py +++ b/pyperformance/data-files/benchmarks/bm_async_tree/run_benchmark.py @@ -6,11 +6,14 @@ 1) "none": No actual async work in the async tree. 2) "io": All leaf nodes simulate async IO workload (async sleep 50ms). -3) "memoization": All leaf nodes simulate async IO workload with 90% of +3) "memoization": All leaf nodes simulate async IO workload with 90% of the data memoized -4) "cpu_io_mixed": Half of the leaf nodes simulate CPU-bound workload and - the other half simulate the same workload as the +4) "cpu_io_mixed": Half of the leaf nodes simulate CPU-bound workload and + the other half simulate the same workload as the "memoization" variant. + +All variants also have an "eager" flavor that uses +the asyncio eager task factory (if available). """ @@ -57,16 +60,32 @@ async def run(self): await self.recurse(NUM_RECURSE_LEVELS) +class EagerMixin: + async def run(self): + loop = asyncio.get_running_loop() + if hasattr(asyncio, 'eager_task_factory'): + loop.set_task_factory(asyncio.eager_task_factory) + return await super().run() + + class NoneAsyncTree(AsyncTree): async def workload_func(self): return +class EagerAsyncTree(EagerMixin, NoneAsyncTree): + pass + + class IOAsyncTree(AsyncTree): async def workload_func(self): await self.mock_io_call() +class EagerIOAsyncTree(EagerMixin, IOAsyncTree): + pass + + class MemoizationAsyncTree(AsyncTree): async def workload_func(self): # deterministic random, seed set in AsyncTree.__init__() @@ -82,6 +101,10 @@ async def workload_func(self): return data +class EagerMemoizationAsyncTree(EagerMixin, MemoizationAsyncTree): + pass + + class CpuIoMixedAsyncTree(MemoizationAsyncTree): async def workload_func(self): # deterministic random, seed set in AsyncTree.__init__() @@ -92,6 +115,10 @@ async def workload_func(self): return await MemoizationAsyncTree.workload_func(self) +class EagerCpuIoMixedAsyncTree(EagerMixin, CpuIoMixedAsyncTree): + pass + + def add_metadata(runner): runner.metadata["description"] = "Async tree workloads." runner.metadata["async_tree_recurse_levels"] = NUM_RECURSE_LEVELS @@ -115,10 +142,10 @@ def add_parser_args(parser): Determines which benchmark to run. Options: 1) "none": No actual async work in the async tree. 2) "io": All leaf nodes simulate async IO workload (async sleep 50ms). -3) "memoization": All leaf nodes simulate async IO workload with 90% of +3) "memoization": All leaf nodes simulate async IO workload with 90% of the data memoized -4) "cpu_io_mixed": Half of the leaf nodes simulate CPU-bound workload and - the other half simulate the same workload as the +4) "cpu_io_mixed": Half of the leaf nodes simulate CPU-bound workload and + the other half simulate the same workload as the "memoization" variant. """, ) @@ -126,9 +153,13 @@ def add_parser_args(parser): BENCHMARKS = { "none": NoneAsyncTree, + "eager": EagerAsyncTree, "io": IOAsyncTree, + "eager_io": EagerIOAsyncTree, "memoization": MemoizationAsyncTree, + "eager_memoization": EagerMemoizationAsyncTree, "cpu_io_mixed": CpuIoMixedAsyncTree, + "eager_cpu_io_mixed": EagerCpuIoMixedAsyncTree, } @@ -142,4 +173,3 @@ def add_parser_args(parser): async_tree_class = BENCHMARKS[benchmark] async_tree = async_tree_class() runner.bench_async_func(f"async_tree_{benchmark}", async_tree.run) - diff --git a/pyperformance/data-files/benchmarks/bm_django_template/requirements.txt b/pyperformance/data-files/benchmarks/bm_django_template/requirements.txt index a22c8e05..4a3490bf 100644 --- a/pyperformance/data-files/benchmarks/bm_django_template/requirements.txt +++ b/pyperformance/data-files/benchmarks/bm_django_template/requirements.txt @@ -1,5 +1,4 @@ asgiref==3.3.4 django==3.2.4 pytz==2021.1 -setuptools==65.6.3 sqlparse==0.4.1 diff --git a/pyperformance/data-files/benchmarks/bm_richards_super/pyproject.toml b/pyperformance/data-files/benchmarks/bm_richards_super/pyproject.toml new file mode 100644 index 00000000..3157a818 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_richards_super/pyproject.toml @@ -0,0 +1,9 @@ +[project] +name = "pyperformance_bm_richards_super" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "richards_super" diff --git a/pyperformance/data-files/benchmarks/bm_richards_super/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_richards_super/run_benchmark.py new file mode 100644 index 00000000..271ebfb3 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_richards_super/run_benchmark.py @@ -0,0 +1,430 @@ +""" +based on a Java version: + Based on original version written in BCPL by Dr Martin Richards + in 1981 at Cambridge University Computer Laboratory, England + and a C++ version derived from a Smalltalk version written by + L Peter Deutsch. + Java version: Copyright (C) 1995 Sun Microsystems, Inc. + Translation from C++, Mario Wolczko + Outer loop added by Alex Jacoby + super() usage added by Carl Meyer +""" + +import pyperf + + +# Task IDs +I_IDLE = 1 +I_WORK = 2 +I_HANDLERA = 3 +I_HANDLERB = 4 +I_DEVA = 5 +I_DEVB = 6 + +# Packet types +K_DEV = 1000 +K_WORK = 1001 + +# Packet + +BUFSIZE = 4 + +BUFSIZE_RANGE = range(BUFSIZE) + + +class Packet: + + def __init__(self, l, i, k): + self.link = l + self.ident = i + self.kind = k + self.datum = 0 + self.data = [0] * BUFSIZE + + def append_to(self, lst): + self.link = None + if lst is None: + return self + else: + p = lst + next = p.link + while next is not None: + p = next + next = p.link + p.link = self + return lst + +# Task Records + + +class TaskRec: + pass + + +class DeviceTaskRec(TaskRec): + + def __init__(self): + self.pending = None + + +class IdleTaskRec(TaskRec): + + def __init__(self): + self.control = 1 + self.count = 10000 + + +class HandlerTaskRec(TaskRec): + + def __init__(self): + self.work_in = None + self.device_in = None + + def workInAdd(self, p): + self.work_in = p.append_to(self.work_in) + return self.work_in + + def deviceInAdd(self, p): + self.device_in = p.append_to(self.device_in) + return self.device_in + + +class WorkerTaskRec(TaskRec): + + def __init__(self): + self.destination = I_HANDLERA + self.count = 0 +# Task + + +class TaskState: + + def __init__(self): + self.packet_pending = True + self.task_waiting = False + self.task_holding = False + + def packetPending(self): + self.packet_pending = True + self.task_waiting = False + self.task_holding = False + return self + + def waiting(self): + self.packet_pending = False + self.task_waiting = True + self.task_holding = False + return self + + def running(self): + self.packet_pending = False + self.task_waiting = False + self.task_holding = False + return self + + def waitingWithPacket(self): + self.packet_pending = True + self.task_waiting = True + self.task_holding = False + return self + + def isPacketPending(self): + return self.packet_pending + + def isTaskWaiting(self): + return self.task_waiting + + def isTaskHolding(self): + return self.task_holding + + def isTaskHoldingOrWaiting(self): + return self.task_holding or (not self.packet_pending and self.task_waiting) + + def isWaitingWithPacket(self): + return self.packet_pending and self.task_waiting and not self.task_holding + + +tracing = False +layout = 0 + + +def trace(a): + global layout + layout -= 1 + if layout <= 0: + print() + layout = 50 + print(a, end='') + + +TASKTABSIZE = 10 + + +class TaskWorkArea: + + def __init__(self): + self.taskTab = [None] * TASKTABSIZE + + self.taskList = None + + self.holdCount = 0 + self.qpktCount = 0 + + +taskWorkArea = TaskWorkArea() + + +class Task(TaskState): + + def __init__(self, i, p, w, initialState, r): + self.link = taskWorkArea.taskList + self.ident = i + self.priority = p + self.input = w + + self.packet_pending = initialState.isPacketPending() + self.task_waiting = initialState.isTaskWaiting() + self.task_holding = initialState.isTaskHolding() + + self.handle = r + + taskWorkArea.taskList = self + taskWorkArea.taskTab[i] = self + + self.last_packet = None + + def fn(self, pkt, r): + self.last_packet = pkt + + def addPacket(self, p, old): + if self.input is None: + self.input = p + self.packet_pending = True + if self.priority > old.priority: + return self + else: + p.append_to(self.input) + return old + + def runTask(self): + if self.isWaitingWithPacket(): + msg = self.input + self.input = msg.link + if self.input is None: + self.running() + else: + self.packetPending() + else: + msg = None + + return self.fn(msg, self.handle) + + def waitTask(self): + self.task_waiting = True + return self + + def hold(self): + taskWorkArea.holdCount += 1 + self.task_holding = True + return self.link + + def release(self, i): + t = self.findtcb(i) + t.task_holding = False + if t.priority > self.priority: + return t + else: + return self + + def qpkt(self, pkt): + t = self.findtcb(pkt.ident) + taskWorkArea.qpktCount += 1 + pkt.link = None + pkt.ident = self.ident + return t.addPacket(pkt, self) + + def findtcb(self, id): + t = taskWorkArea.taskTab[id] + if t is None: + raise Exception("Bad task id %d" % id) + return t + + +# DeviceTask + + +class DeviceTask(Task): + + def __init__(self, i, p, w, s, r): + super().__init__(i, p, w, s, r) + + def fn(self, pkt, r): + d = r + assert isinstance(d, DeviceTaskRec) + super().fn(pkt, r) + if pkt is None: + pkt = d.pending + if pkt is None: + return self.waitTask() + else: + d.pending = None + return self.qpkt(pkt) + else: + d.pending = pkt + if tracing: + trace(pkt.datum) + return self.hold() + + +class HandlerTask(Task): + + def __init__(self, i, p, w, s, r): + super().__init__(i, p, w, s, r) + + def fn(self, pkt, r): + h = r + assert isinstance(h, HandlerTaskRec) + super().fn(pkt, r) + if pkt is not None: + if pkt.kind == K_WORK: + h.workInAdd(pkt) + else: + h.deviceInAdd(pkt) + work = h.work_in + if work is None: + return self.waitTask() + count = work.datum + if count >= BUFSIZE: + h.work_in = work.link + return self.qpkt(work) + + dev = h.device_in + if dev is None: + return self.waitTask() + + h.device_in = dev.link + dev.datum = work.data[count] + work.datum = count + 1 + return self.qpkt(dev) + +# IdleTask + + +class IdleTask(Task): + + def __init__(self, i, p, w, s, r): + super().__init__(i, 0, None, s, r) + + def fn(self, pkt, r): + i = r + assert isinstance(i, IdleTaskRec) + super().fn(pkt, r) + i.count -= 1 + if i.count == 0: + return self.hold() + elif i.control & 1 == 0: + i.control //= 2 + return self.release(I_DEVA) + else: + i.control = i.control // 2 ^ 0xd008 + return self.release(I_DEVB) + + +# WorkTask + + +A = ord('A') + + +class WorkTask(Task): + + def __init__(self, i, p, w, s, r): + super().__init__(i, p, w, s, r) + + def fn(self, pkt, r): + w = r + assert isinstance(w, WorkerTaskRec) + super().fn(pkt, r) + if pkt is None: + return self.waitTask() + + if w.destination == I_HANDLERA: + dest = I_HANDLERB + else: + dest = I_HANDLERA + + w.destination = dest + pkt.ident = dest + pkt.datum = 0 + + for i in BUFSIZE_RANGE: # range(BUFSIZE) + w.count += 1 + if w.count > 26: + w.count = 1 + pkt.data[i] = A + w.count - 1 + + return self.qpkt(pkt) + + +def schedule(): + t = taskWorkArea.taskList + while t is not None: + if tracing: + print("tcb =", t.ident) + + if t.isTaskHoldingOrWaiting(): + t = t.link + else: + if tracing: + trace(chr(ord("0") + t.ident)) + t = t.runTask() + + +class Richards: + + def run(self, iterations): + for i in range(iterations): + taskWorkArea.holdCount = 0 + taskWorkArea.qpktCount = 0 + + IdleTask(I_IDLE, 1, 10000, TaskState().running(), IdleTaskRec()) + + wkq = Packet(None, 0, K_WORK) + wkq = Packet(wkq, 0, K_WORK) + WorkTask(I_WORK, 1000, wkq, TaskState( + ).waitingWithPacket(), WorkerTaskRec()) + + wkq = Packet(None, I_DEVA, K_DEV) + wkq = Packet(wkq, I_DEVA, K_DEV) + wkq = Packet(wkq, I_DEVA, K_DEV) + HandlerTask(I_HANDLERA, 2000, wkq, TaskState( + ).waitingWithPacket(), HandlerTaskRec()) + + wkq = Packet(None, I_DEVB, K_DEV) + wkq = Packet(wkq, I_DEVB, K_DEV) + wkq = Packet(wkq, I_DEVB, K_DEV) + HandlerTask(I_HANDLERB, 3000, wkq, TaskState( + ).waitingWithPacket(), HandlerTaskRec()) + + wkq = None + DeviceTask(I_DEVA, 4000, wkq, + TaskState().waiting(), DeviceTaskRec()) + DeviceTask(I_DEVB, 5000, wkq, + TaskState().waiting(), DeviceTaskRec()) + + schedule() + + if taskWorkArea.holdCount == 9297 and taskWorkArea.qpktCount == 23246: + pass + else: + return False + + return True + + +if __name__ == "__main__": + runner = pyperf.Runner() + runner.metadata['description'] = "The Richards benchmark, with super()" + + richard = Richards() + runner.bench_func('richards_super', richard.run, 1) diff --git a/pyperformance/data-files/benchmarks/bm_sympy/requirements.txt b/pyperformance/data-files/benchmarks/bm_sympy/requirements.txt index da67e057..652d404d 100644 --- a/pyperformance/data-files/benchmarks/bm_sympy/requirements.txt +++ b/pyperformance/data-files/benchmarks/bm_sympy/requirements.txt @@ -1,3 +1,2 @@ mpmath==1.2.1 -setuptools==65.6.3 sympy==1.8 diff --git a/pyperformance/data-files/benchmarks/bm_typing_runtime_protocols/pyproject.toml b/pyperformance/data-files/benchmarks/bm_typing_runtime_protocols/pyproject.toml new file mode 100644 index 00000000..a69cf598 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_typing_runtime_protocols/pyproject.toml @@ -0,0 +1,9 @@ +[project] +name = "pyperformance_bm_typing_runtime_protocols" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "typing_runtime_protocols" diff --git a/pyperformance/data-files/benchmarks/bm_typing_runtime_protocols/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_typing_runtime_protocols/run_benchmark.py new file mode 100644 index 00000000..dd8e7ef3 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_typing_runtime_protocols/run_benchmark.py @@ -0,0 +1,180 @@ +""" +Test the performance of isinstance() checks against runtime-checkable protocols. + +For programmes that make extensive use of this feature, +these calls can easily become a bottleneck. +See https://github.com/python/cpython/issues/74690 + +The following situations all exercise different code paths +in typing._ProtocolMeta.__instancecheck__, +so each is tested in this benchmark: + + (1) Comparing an instance of a class that directly inherits + from a protocol to that protocol. + (2) Comparing an instance of a class that fulfils the interface + of a protocol using instance attributes. + (3) Comparing an instance of a class that fulfils the interface + of a protocol using class attributes. + (4) Comparing an instance of a class that fulfils the interface + of a protocol using properties. + +Protocols with callable and non-callable members also +exercise different code paths in _ProtocolMeta.__instancecheck__, +so are also tested separately. +""" + +import time +from typing import Protocol, runtime_checkable + +import pyperf + + +################################################## +# Protocols to call isinstance() against +################################################## + + +@runtime_checkable +class HasX(Protocol): + """A runtime-checkable protocol with a single non-callable member""" + x: int + +@runtime_checkable +class HasManyAttributes(Protocol): + """A runtime-checkable protocol with many non-callable members""" + a: int + b: int + c: int + d: int + e: int + +@runtime_checkable +class SupportsInt(Protocol): + """A runtime-checkable protocol with a single callable member""" + def __int__(self) -> int: ... + +@runtime_checkable +class SupportsManyMethods(Protocol): + """A runtime-checkable protocol with many callable members""" + def one(self) -> int: ... + def two(self) -> str: ... + def three(self) -> bytes: ... + def four(self) -> memoryview: ... + def five(self) -> bytearray: ... + +@runtime_checkable +class SupportsIntAndX(Protocol): + """A runtime-checkable protocol with a mix of callable and non-callable members""" + x: int + def __int__(self) -> int: ... + + +################################################## +# Classes for comparing against the protocols +################################################## + + +class Empty: + """Empty class with no attributes""" + +class PropertyX: + """Class with a property x""" + @property + def x(self) -> int: return 42 + +class HasIntMethod: + """Class with an __int__ method""" + def __int__(self): return 42 + +class HasManyMethods: + """Class with many methods""" + def one(self): return 42 + def two(self): return "42" + def three(self): return b"42" + def four(self): return memoryview(b"42") + def five(self): return bytearray(b"42") + +class PropertyXWithInt: + """Class with a property x and an __int__ method""" + @property + def x(self) -> int: return 42 + def __int__(self): return 42 + +class ClassVarX: + """Class with a ClassVar x""" + x = 42 + +class ClassVarXWithInt: + """Class with a ClassVar x and an __int__ method""" + x = 42 + def __int__(self): return 42 + +class InstanceVarX: + """Class with an instance var x""" + def __init__(self): + self.x = 42 + +class ManyInstanceVars: + """Class with many instance vars""" + def __init__(self): + for attr in 'abcde': + setattr(self, attr, 42) + +class InstanceVarXWithInt: + """Class with an instance var x and an __int__ method""" + def __init__(self): + self.x = 42 + def __int__(self): + return 42 + +class NominalX(HasX): + """Class that explicitly subclasses HasX""" + def __init__(self): + self.x = 42 + +class NominalSupportsInt(SupportsInt): + """Class that explicitly subclasses SupportsInt""" + def __int__(self): + return 42 + +class NominalXWithInt(SupportsIntAndX): + """Class that explicitly subclasses NominalXWithInt""" + def __init__(self): + self.x = 42 + + +################################################## +# Time to test the performance of isinstance()! +################################################## + + +def bench_protocols(loops: int) -> float: + protocols = [ + HasX, HasManyAttributes, SupportsInt, SupportsManyMethods, SupportsIntAndX + ] + instances = [ + cls() + for cls in ( + Empty, PropertyX, HasIntMethod, HasManyMethods, PropertyXWithInt, + ClassVarX, ClassVarXWithInt, InstanceVarX, ManyInstanceVars, + InstanceVarXWithInt, NominalX, NominalSupportsInt, NominalXWithInt + ) + ] + + t0 = time.perf_counter() + + for _ in range(loops): + for protocol in protocols: + for instance in instances: + isinstance(instance, protocol) + + return time.perf_counter() - t0 + + +if __name__ == "__main__": + runner = pyperf.Runner() + runner.metadata["description"] = ( + "Test the performance of isinstance() checks " + "against runtime-checkable protocols" + ) + runner.bench_time_func("typing_runtime_protocols", bench_protocols) diff --git a/pyperformance/data-files/requirements.txt b/pyperformance/requirements/requirements.txt similarity index 55% rename from pyperformance/data-files/requirements.txt rename to pyperformance/requirements/requirements.txt index 9c1bbf8f..988c67b1 100644 --- a/pyperformance/data-files/requirements.txt +++ b/pyperformance/requirements/requirements.txt @@ -2,17 +2,13 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --output-file=requirements.txt requirements.in +# pip-compile --output-file=pyperformance/requirements/requirements.txt requirements.in # -packaging==21.3 +packaging==23.1 # via -r requirements.in -psutil==5.9.0 +psutil==5.9.5 # via # -r requirements.in # pyperf -pyparsing==3.0.8 - # via packaging pyperf==2.6.0 # via -r requirements.in -toml==0.10.2 - # via -r requirements.in diff --git a/pyperformance/tests/data/bm_local_wheel/pyproject.toml b/pyperformance/tests/data/bm_local_wheel/pyproject.toml index a3191642..2710ced7 100644 --- a/pyperformance/tests/data/bm_local_wheel/pyproject.toml +++ b/pyperformance/tests/data/bm_local_wheel/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "pyperformance_bm_local_wheel" -requires-python = ">=3.8" +requires-python = ">=3.7" dependencies = ["pyperf"] urls = {repository = "https://github.com/python/pyperformance"} version = "1.0" diff --git a/pyperformance/venv.py b/pyperformance/venv.py index 5a703024..17bd1bee 100644 --- a/pyperformance/venv.py +++ b/pyperformance/venv.py @@ -6,7 +6,9 @@ from . import _utils, _pip, _venv -REQUIREMENTS_FILE = os.path.join(pyperformance.DATA_DIR, 'requirements.txt') +REQUIREMENTS_FILE = os.path.join( + os.path.dirname(__file__), 'requirements', 'requirements.txt' +) PYPERF_OPTIONAL = ['psutil'] @@ -171,6 +173,7 @@ def create(cls, root=None, python=None, *, @classmethod def ensure(cls, root, python=None, *, + inherit_environ=None, upgrade=False, **kwargs ): @@ -184,6 +187,7 @@ def ensure(cls, root, python=None, *, if exists: self = super().ensure(root) + self.inherit_environ = inherit_environ if upgrade: self.upgrade_pip() else: @@ -193,6 +197,7 @@ def ensure(cls, root, python=None, *, return cls.create( root, python, + inherit_environ=inherit_environ, upgrade=upgrade, **kwargs ) diff --git a/pyproject.toml b/pyproject.toml index f69c39c4..e5ea4dd0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -59,11 +59,17 @@ classifiers = [ 'Programming Language :: Python', ] requires-python = ">=3.7" -dependencies = ["pyperf", "toml", "packaging"] +dependencies = [ + "pyperf", + "tomli; python_version < '3.11'", + "packaging", +] [project.optional-dependencies] dev = [ 'tox', + 'mypy==1.2.0', + 'tomli', # Needed even on 3.11+ for typechecking with mypy ] [project.scripts] @@ -77,3 +83,24 @@ find = {} # Scanning implicit namespaces is active by default [tool.setuptools.dynamic] version = {attr = "pyperformance.__version__"} + +[tool.mypy] +python_version = "3.7" +pretty = true +enable_error_code = "ignore-without-code" +disallow_any_generics = true +strict_concatenate = true +warn_redundant_casts = true +warn_unused_ignores = true +warn_unused_configs = true +files = [ + 'pyperformance/', +] +exclude = [ + 'pyperformance/data-files/', + 'pyperformance/tests/' +] + +[[tool.mypy.overrides]] +module = "pyperf" +ignore_missing_imports = true diff --git a/requirements.in b/requirements.in index 0d76bbd8..55f432ef 100644 --- a/requirements.in +++ b/requirements.in @@ -13,7 +13,7 @@ pyperf # for benchmark metadata: packaging -toml +tomli; python_version < '3.11' # Optional dependencies diff --git a/requirements.txt b/requirements.txt index 4ce56a00..b340e549 120000 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1 @@ -pyperformance/data-files/requirements.txt \ No newline at end of file +pyperformance/requirements/requirements.txt \ No newline at end of file diff --git a/tox.ini b/tox.ini index a38f48b2..f9030b3b 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py3, pypy3, doc, pep8 +envlist = py3, pypy3, doc, pep8, mypy isolated_build = True [testenv] @@ -27,3 +27,10 @@ commands = flake8 pyperformance runtests.py setup.py # E741 ambiguous variable name 'l' (don't modify benhcmarks just for that) # W503 line break before binary operator ignore = E501,E741,W503 + +[testenv:mypy] +basepython = python3 +deps= + mypy + tomli +commands = mypy