diff --git a/.editorconfig b/.editorconfig index f560af744..ae430ffd6 100644 --- a/.editorconfig +++ b/.editorconfig @@ -18,6 +18,9 @@ trim_trailing_whitespace = true [*.py] max_line_length = 100 +[*.pyi] +max_line_length = 100 + [*.c] max_line_length = 100 diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 5c471c32d..0721ddc0c 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -18,6 +18,7 @@ defaults: env: PIP_DISABLE_PIP_VERSION_CHECK: 1 + FORCE_COLOR: 1 # Get colored pytest output permissions: contents: read @@ -46,15 +47,25 @@ jobs: - "3.8" - "3.9" - "3.10" - - "3.11.0-rc.2" + - "3.11" - "pypy-3.7" + - "pypy-3.8" + - "pypy-3.9" exclude: # Windows PyPy doesn't seem to work? - os: windows-latest python-version: "pypy-3.7" + - os: windows-latest + python-version: "pypy-3.8" + - os: windows-latest + python-version: "pypy-3.9" # Mac PyPy always takes the longest, and doesn't add anything. - os: macos-latest python-version: "pypy-3.7" + - os: macos-latest + python-version: "pypy-3.8" + - os: macos-latest + python-version: "pypy-3.9" # If one job fails, stop the whole thing. fail-fast: true @@ -84,6 +95,13 @@ jobs: set -xe python -m tox + - name: "Combine data" + env: + COVERAGE_RCFILE: "metacov.ini" + run: | + python -m coverage combine + mv .metacov .metacov.${{ matrix.python-version }}.${{ matrix.os }} + - name: "Upload coverage data" uses: actions/upload-artifact@v3 with: @@ -94,6 +112,10 @@ jobs: name: "Combine coverage data" needs: coverage runs-on: ubuntu-latest + outputs: + total: ${{ steps.total.outputs.total }} + env: + COVERAGE_RCFILE: "metacov.ini" steps: - name: "Check out the repo" @@ -102,7 +124,7 @@ jobs: - name: "Set up Python" uses: "actions/setup-python@v4" with: - python-version: "3.8" + python-version: "3.7" # Minimum of PYVERSIONS cache: pip cache-dependency-path: 'requirements/*.pip' @@ -122,13 +144,10 @@ jobs: - name: "Combine and report" id: combine env: - COVERAGE_RCFILE: "metacov.ini" - COVERAGE_METAFILE: ".metacov" COVERAGE_CONTEXT: "yes" run: | set -xe - python -m igor combine_html - python -m coverage json + python igor.py combine_html - name: "Upload HTML report" uses: actions/upload-artifact@v3 @@ -136,11 +155,10 @@ jobs: name: html_report path: htmlcov - - name: "Upload JSON report" - uses: actions/upload-artifact@v3 - with: - name: json_report - path: coverage.json + - name: "Get total" + id: total + run: | + echo "total=$(python -m coverage report --format=total)" >> $GITHUB_OUTPUT publish: name: "Publish coverage report" @@ -148,45 +166,46 @@ jobs: runs-on: ubuntu-latest steps: - - name: "Checkout reports repo" - run: | - set -xe - git clone --depth=1 --no-checkout https://${{ secrets.COVERAGE_REPORTS_TOKEN }}@github.com/nedbat/coverage-reports reports_repo - cd reports_repo - git sparse-checkout init --cone - git sparse-checkout set --skip-checks '/*' '!/reports' - git config user.name nedbat - git config user.email ned@nedbatchelder.com - git checkout main - - - name: "Download coverage JSON report" - uses: actions/download-artifact@v3 - with: - name: json_report - - name: "Compute info for later steps" id: info run: | set -xe - export TOTAL=$(python -c "import json;print(json.load(open('coverage.json'))['totals']['percent_covered_display'])") export SHA10=$(echo ${{ github.sha }} | cut -c 1-10) export SLUG=$(date +'%Y%m%d')_$SHA10 export REPORT_DIR=reports/$SLUG/htmlcov export REF="${{ github.ref }}" - echo "total=$TOTAL" >> $GITHUB_ENV + echo "total=${{ needs.combine.outputs.total }}" >> $GITHUB_ENV echo "sha10=$SHA10" >> $GITHUB_ENV echo "slug=$SLUG" >> $GITHUB_ENV echo "report_dir=$REPORT_DIR" >> $GITHUB_ENV echo "url=https://nedbat.github.io/coverage-reports/$REPORT_DIR" >> $GITHUB_ENV echo "branch=${REF#refs/heads/}" >> $GITHUB_ENV + - name: "Summarize" + run: | + echo '### Total coverage: ${{ env.total }}%' >> $GITHUB_STEP_SUMMARY + + - name: "Checkout reports repo" + if: ${{ github.ref == 'refs/heads/master' }} + run: | + set -xe + git clone --depth=1 --no-checkout https://${{ secrets.COVERAGE_REPORTS_TOKEN }}@github.com/nedbat/coverage-reports reports_repo + cd reports_repo + git sparse-checkout init --cone + git sparse-checkout set --skip-checks '/*' '!/reports' + git config user.name nedbat + git config user.email ned@nedbatchelder.com + git checkout main + - name: "Download coverage HTML report" + if: ${{ github.ref == 'refs/heads/master' }} uses: actions/download-artifact@v3 with: name: html_report path: reports_repo/${{ env.report_dir }} - name: "Push to report repo" + if: ${{ github.ref == 'refs/heads/master' }} env: COMMIT_MESSAGE: ${{ github.event.head_commit.message }} run: | @@ -207,11 +226,12 @@ jobs: git add ${{ env.report_dir }} latest.html git commit --file=../commit.txt git push + echo '[${{ env.url }}](${{ env.url }})' >> $GITHUB_STEP_SUMMARY - name: "Create badge" + if: ${{ github.ref == 'refs/heads/master' }} # https://gist.githubusercontent.com/nedbat/8c6980f77988a327348f9b02bbaf67f5 - # uses: schneegans/dynamic-badges-action@v1.4.0 - uses: schneegans/dynamic-badges-action@54d929a33e7521ab6bf19d323d28fb7b876c53f7 + uses: schneegans/dynamic-badges-action@5d424ad4060f866e4d1dab8f8da0456e6b1c4f56 with: auth: ${{ secrets.METACOV_GIST_SECRET }} gistID: 8c6980f77988a327348f9b02bbaf67f5 @@ -221,8 +241,3 @@ jobs: minColorRange: 60 maxColorRange: 95 valColorRange: ${{ env.total }} - - - name: "Create summary" - run: | - echo '### Total coverage: ${{ env.total }}%' >> $GITHUB_STEP_SUMMARY - echo '[${{ env.url }}](${{ env.url }})' >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 845c763e8..34b14c395 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -17,4 +17,4 @@ jobs: - name: 'Checkout Repository' uses: actions/checkout@v3 - name: 'Dependency Review' - uses: actions/dependency-review-action@v2 + uses: actions/dependency-review-action@v3 diff --git a/.github/workflows/kit.yml b/.github/workflows/kit.yml index 9ee25fdb4..fd1b3a307 100644 --- a/.github/workflows/kit.yml +++ b/.github/workflows/kit.yml @@ -77,11 +77,13 @@ jobs: # } # # PYVERSIONS. Available versions: # # https://github.com/actions/python-versions/blob/main/versions-manifest.json + # # Include prereleases if they are at rc stage. + # # PyPy versions are handled further below in the "pypy" step. # pys = ["cp37", "cp38", "cp39", "cp310", "cp311"] # # # Some OS/arch combinations need overrides for the Python versions: # os_arch_pys = { - # ("macos", "arm64"): ["cp38", "cp39", "cp310"], + # ("macos", "arm64"): ["cp38", "cp39", "cp310", "cp311"], # } # # #----- ^^^ ---------------------- ^^^ ----- @@ -115,6 +117,7 @@ jobs: - {"os": "macos", "py": "cp38", "arch": "arm64"} - {"os": "macos", "py": "cp39", "arch": "arm64"} - {"os": "macos", "py": "cp310", "arch": "arm64"} + - {"os": "macos", "py": "cp311", "arch": "arm64"} - {"os": "macos", "py": "cp37", "arch": "x86_64"} - {"os": "macos", "py": "cp38", "arch": "x86_64"} - {"os": "macos", "py": "cp39", "arch": "x86_64"} @@ -130,14 +133,13 @@ jobs: - {"os": "windows", "py": "cp39", "arch": "AMD64"} - {"os": "windows", "py": "cp310", "arch": "AMD64"} - {"os": "windows", "py": "cp311", "arch": "AMD64"} - # [[[end]]] (checksum: 428e5138336453464dde968cc3149f4f) + # [[[end]]] (checksum: ded8a9f214bf59776562d91ae6828863) fail-fast: false steps: - name: "Setup QEMU" if: matrix.os == 'ubuntu' - # uses: docker/setup-qemu-action@v2 - uses: docker/setup-qemu-action@8b122486cedac8393e77aa9734c3528886e4a1a8 + uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 with: platforms: arm64 @@ -216,8 +218,7 @@ jobs: - name: "Install PyPy" uses: actions/setup-python@v4 with: - # PYVERSIONS - python-version: "pypy-3.7" + python-version: "pypy-3.7" # Minimum of PyPy PYVERSIONS cache: pip cache-dependency-path: 'requirements/*.pip' @@ -227,9 +228,9 @@ jobs: - name: "Build wheel" run: | - # One wheel works for all PyPy versions. + # One wheel works for all PyPy versions. PYVERSIONS # yes, this is weird syntax: https://github.com/pypa/build/issues/202 - pypy3 -m build -w -C="--global-option=--python-tag" -C="--global-option=pp36.pp37.pp38" + pypy3 -m build -w -C="--global-option=--python-tag" -C="--global-option=pp37.pp38.pp39" - name: "List wheels" run: | diff --git a/.github/workflows/python-nightly.yml b/.github/workflows/python-nightly.yml index 5743dfbb6..88b2b3897 100644 --- a/.github/workflows/python-nightly.yml +++ b/.github/workflows/python-nightly.yml @@ -32,7 +32,12 @@ concurrency: jobs: tests: name: "Python ${{ matrix.python-version }}" - runs-on: ubuntu-latest + # Choose a recent Ubuntu that deadsnakes still builds all the versions for. + # For example, deadsnakes doesn't provide 3.10 nightly for 22.04 (jammy) + # because jammy ships 3.10, and deadsnakes doesn't want to clobber it. + # https://launchpad.net/~deadsnakes/+archive/ubuntu/nightly/+packages + # https://github.com/deadsnakes/issues/issues/234 + runs-on: ubuntu-20.04 strategy: matrix: @@ -41,9 +46,9 @@ jobs: # tox.ini so that tox will run properly. PYVERSIONS # Available versions: # https://launchpad.net/~deadsnakes/+archive/ubuntu/nightly/+packages - - "3.9-dev" - "3.10-dev" - "3.11-dev" + - "3.12-dev" # https://github.com/actions/setup-python#available-versions-of-pypy - "pypy-3.7-nightly" - "pypy-3.8-nightly" @@ -55,8 +60,7 @@ jobs: uses: "actions/checkout@v3" - name: "Install ${{ matrix.python-version }} with deadsnakes" - # uses: deadsnakes/action@v2.1.1 - uses: deadsnakes/action@7ab8819e223c70d2bdedd692dfcea75824e0a617 + uses: deadsnakes/action@e3117c2981fd8afe4af79f3e1be80066c82b70f5 if: "!startsWith(matrix.python-version, 'pypy-')" with: python-version: "${{ matrix.python-version }}" diff --git a/.github/workflows/quality.yml b/.github/workflows/quality.yml index 5483f7b87..0901d5caa 100644 --- a/.github/workflows/quality.yml +++ b/.github/workflows/quality.yml @@ -46,15 +46,37 @@ jobs: - name: "Install dependencies" run: | - set -xe - python -VV - python -m site python -m pip install --require-hashes -r requirements/tox.pip - name: "Tox lint" run: | python -m tox -e lint + mypy: + name: "Check types" + runs-on: ubuntu-latest + + steps: + - name: "Check out the repo" + uses: "actions/checkout@v3" + + - name: "Install Python" + uses: "actions/setup-python@v4" + with: + python-version: "3.8" # Minimum of PYVERSIONS, but at least 3.8 + cache: pip + cache-dependency-path: 'requirements/*.pip' + + - name: "Install dependencies" + run: | + # We run on 3.8, but the pins were made on 3.7, so don't insist on + # hashes, which won't match. + python -m pip install -r requirements/tox.pip + + - name: "Tox mypy" + run: | + python -m tox -e mypy + doc: name: "Build docs" runs-on: ubuntu-latest diff --git a/.github/workflows/testsuite.yml b/.github/workflows/testsuite.yml index 81b9e1bb1..e07989630 100644 --- a/.github/workflows/testsuite.yml +++ b/.github/workflows/testsuite.yml @@ -18,6 +18,7 @@ defaults: env: PIP_DISABLE_PIP_VERSION_CHECK: 1 COVERAGE_IGOR_VERBOSE: 1 + FORCE_COLOR: 1 # Get colored pytest output permissions: contents: read @@ -47,9 +48,13 @@ jobs: - "3.8" - "3.9" - "3.10" - - "3.11.0-rc.2" + - "3.11" - "pypy-3.7" - "pypy-3.9" + exclude: + # Windows PyPy-3.9 always gets killed. + - os: windows + python-version: "pypy-3.9" fail-fast: false steps: @@ -73,31 +78,26 @@ jobs: # python -c "import urllib.request as r; exec(r.urlopen('https://bit.ly/pydoctor').read())" - name: "Run tox for ${{ matrix.python-version }}" - continue-on-error: true - id: tox1 run: | python -m tox -- -rfsEX - name: "Retry tox for ${{ matrix.python-version }}" - id: tox2 - if: steps.tox1.outcome == 'failure' + if: failure() run: | - python -m tox -- -rfsEX - - - name: "Set status" - if: always() - run: | - if ${{ steps.tox1.outcome != 'success' && steps.tox2.outcome != 'success' }}; then - exit 1 - fi + # `exit 1` makes sure that the job remains red with flaky runs + python -m tox -- -rfsEX --lf -vvvvv && exit 1 - # A final step to give a simple name for required status checks. + # This job aggregates test results. It's the required check for branch protection. + # https://github.com/marketplace/actions/alls-green#why # https://github.com/orgs/community/discussions/33579 success: - needs: tests - runs-on: ubuntu-latest name: Tests successful + if: always() + needs: + - tests + runs-on: ubuntu-latest steps: - - name: "Success" - run: | - echo Tests successful + - name: Decide whether the needed jobs succeeded or failed + uses: re-actors/alls-green@05ac9388f0aebcb5727afa17fcccfecd6f8ec5fe + with: + jobs: ${{ toJSON(needs) }} diff --git a/CHANGES.rst b/CHANGES.rst index 6e80b1a98..f535d9d0e 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -17,13 +17,247 @@ development at the same time, such as 4.5.x and 5.0. .. Version 9.8.1 — 2027-07-27 .. -------------------------- +.. _changes_7-0-3: + +Version 7.0.3 — 2023-01-03 +-------------------------- + +- Fix: when using pytest-cov or pytest-xdist, or perhaps both, the combining + step could fail with ``assert row is not None`` using 7.0.2. This was due to + a race condition that has always been possible and is still possible. In + 7.0.1 and before, the error was silently swallowed by the combining code. + Now it will produce a message "Couldn't combine data file" and ignore the + data file as it used to do before 7.0.2. Closes `issue 1522`_. + +.. _issue 1522: https://github.com/nedbat/coveragepy/issues/1522 + + +.. _changes_7-0-2: + +Version 7.0.2 — 2023-01-02 +-------------------------- + +- Fix: when using the ``[run] relative_files = True`` setting, a relative + ``[paths]`` pattern was still being made absolute. This is now fixed, + closing `issue 1519`_. + +- Fix: if Python doesn't provide tomllib, then TOML configuration files can + only be read if coverage.py is installed with the ``[toml]`` extra. + Coverage.py will raise an error if TOML support is not installed when it sees + your settings are in a .toml file. But it didn't understand that + ``[tools.coverage]`` was a valid section header, so the error wasn't reported + if you used that header, and settings were silently ignored. This is now + fixed, closing `issue 1516`_. + +- Fix: adjusted how decorators are traced on PyPy 7.3.10, fixing `issue 1515`_. + +- Fix: the ``coverage lcov`` report did not properly implement the + ``--fail-under=MIN`` option. This has been fixed. + +- Refactor: added many type annotations, including a number of refactorings. + This should not affect outward behavior, but they were a bit invasive in some + places, so keep your eyes peeled for oddities. + +- Refactor: removed the vestigial and long untested support for Jython and + IronPython. + +.. _issue 1515: https://github.com/nedbat/coveragepy/issues/1515 +.. _issue 1516: https://github.com/nedbat/coveragepy/issues/1516 +.. _issue 1519: https://github.com/nedbat/coveragepy/issues/1519 + + +.. _changes_7-0-1: + +Version 7.0.1 — 2022-12-23 +-------------------------- + +- When checking if a file mapping resolved to a file that exists, we weren't + considering files in .whl files. This is now fixed, closing `issue 1511`_. + +- File pattern rules were too strict, forbidding plus signs and curly braces in + directory and file names. This is now fixed, closing `issue 1513`_. + +- Unusual Unicode or control characters in source files could prevent + reporting. This is now fixed, closing `issue 1512`_. + +- The PyPy wheel now installs on PyPy 3.7, 3.8, and 3.9, closing `issue 1510`_. + +.. _issue 1510: https://github.com/nedbat/coveragepy/issues/1510 +.. _issue 1511: https://github.com/nedbat/coveragepy/issues/1511 +.. _issue 1512: https://github.com/nedbat/coveragepy/issues/1512 +.. _issue 1513: https://github.com/nedbat/coveragepy/issues/1513 + + +.. _changes_7-0-0: + +Version 7.0.0 — 2022-12-18 +-------------------------- + +Nothing new beyond 7.0.0b1. + + +.. _changes_7-0-0b1: + +Version 7.0.0b1 — 2022-12-03 +---------------------------- + +A number of changes have been made to file path handling, including pattern +matching and path remapping with the ``[paths]`` setting (see +:ref:`config_paths`). These changes might affect you, and require you to +update your settings. + +(This release includes the changes from `6.6.0b1 `_, since +6.6.0 was never released.) + +- Changes to file pattern matching, which might require updating your + configuration: + + - Previously, ``*`` would incorrectly match directory separators, making + precise matching difficult. This is now fixed, closing `issue 1407`_. + + - Now ``**`` matches any number of nested directories, including none. + +- Improvements to combining data files when using the + :ref:`config_run_relative_files` setting, which might require updating your + configuration: + + - During ``coverage combine``, relative file paths are implicitly combined + without needing a ``[paths]`` configuration setting. This also fixed + `issue 991`_. + + - A ``[paths]`` setting like ``*/foo`` will now match ``foo/bar.py`` so that + relative file paths can be combined more easily. + + - The :ref:`config_run_relative_files` setting is properly interpreted in + more places, fixing `issue 1280`_. + +- When remapping file paths with ``[paths]``, a path will be remapped only if + the resulting path exists. The documentation has long said the prefix had to + exist, but it was never enforced. This fixes `issue 608`_, improves `issue + 649`_, and closes `issue 757`_. + +- Reporting operations now implicitly use the ``[paths]`` setting to remap file + paths within a single data file. Combining multiple files still requires the + ``coverage combine`` step, but this simplifies some single-file situations. + Closes `issue 1212`_ and `issue 713`_. + +- The ``coverage report`` command now has a ``--format=`` option. The original + style is now ``--format=text``, and is the default. + + - Using ``--format=markdown`` will write the table in Markdown format, thanks + to `Steve Oswald `_, closing `issue 1418`_. + + - Using ``--format=total`` will write a single total number to the + output. This can be useful for making badges or writing status updates. + +- Combining data files with ``coverage combine`` now hashes the data files to + skip files that add no new information. This can reduce the time needed. + Many details affect the speed-up, but for coverage.py's own test suite, + combining is about 40% faster. Closes `issue 1483`_. + +- When searching for completely un-executed files, coverage.py uses the + presence of ``__init__.py`` files to determine which directories have source + that could have been imported. However, `implicit namespace packages`_ don't + require ``__init__.py``. A new setting ``[report] + include_namespace_packages`` tells coverage.py to consider these directories + during reporting. Thanks to `Felix Horvat `_ for the + contribution. Closes `issue 1383`_ and `issue 1024`_. + +- Fixed environment variable expansion in pyproject.toml files. It was overly + broad, causing errors outside of coverage.py settings, as described in `issue + 1481`_ and `issue 1345`_. This is now fixed, but in rare cases will require + changing your pyproject.toml to quote non-string values that use environment + substitution. + +- An empty file has a coverage total of 100%, but used to fail with + ``--fail-under``. This has been fixed, closing `issue 1470`_. + +- The text report table no longer writes out two separator lines if there are + no files listed in the table. One is plenty. + +- Fixed a mis-measurement of a strange use of wildcard alternatives in + match/case statements, closing `issue 1421`_. + +- Fixed internal logic that prevented coverage.py from running on + implementations other than CPython or PyPy (`issue 1474`_). + +- The deprecated ``[run] note`` setting has been completely removed. + +.. _implicit namespace packages: https://peps.python.org/pep-0420/ +.. _issue 608: https://github.com/nedbat/coveragepy/issues/608 +.. _issue 649: https://github.com/nedbat/coveragepy/issues/649 +.. _issue 713: https://github.com/nedbat/coveragepy/issues/713 +.. _issue 757: https://github.com/nedbat/coveragepy/issues/757 +.. _issue 991: https://github.com/nedbat/coveragepy/issues/991 +.. _issue 1024: https://github.com/nedbat/coveragepy/issues/1024 +.. _issue 1212: https://github.com/nedbat/coveragepy/issues/1212 +.. _issue 1280: https://github.com/nedbat/coveragepy/issues/1280 +.. _issue 1345: https://github.com/nedbat/coveragepy/issues/1345 +.. _issue 1383: https://github.com/nedbat/coveragepy/issues/1383 +.. _issue 1407: https://github.com/nedbat/coveragepy/issues/1407 +.. _issue 1418: https://github.com/nedbat/coveragepy/issues/1418 +.. _issue 1421: https://github.com/nedbat/coveragepy/issues/1421 +.. _issue 1470: https://github.com/nedbat/coveragepy/issues/1470 +.. _issue 1474: https://github.com/nedbat/coveragepy/issues/1474 +.. _issue 1481: https://github.com/nedbat/coveragepy/issues/1481 +.. _issue 1483: https://github.com/nedbat/coveragepy/issues/1483 +.. _pull 1387: https://github.com/nedbat/coveragepy/pull/1387 +.. _pull 1479: https://github.com/nedbat/coveragepy/pull/1479 + + + +.. _changes_6-6-0b1: + +Version 6.6.0b1 — 2022-10-31 +---------------------------- + +(Note: 6.6.0 final was never released. These changes are part of `7.0.0b1 +`_.) + +- Changes to file pattern matching, which might require updating your + configuration: + + - Previously, ``*`` would incorrectly match directory separators, making + precise matching difficult. This is now fixed, closing `issue 1407`_. + + - Now ``**`` matches any number of nested directories, including none. + +- Improvements to combining data files when using the + :ref:`config_run_relative_files` setting: + + - During ``coverage combine``, relative file paths are implicitly combined + without needing a ``[paths]`` configuration setting. This also fixed + `issue 991`_. + + - A ``[paths]`` setting like ``*/foo`` will now match ``foo/bar.py`` so that + relative file paths can be combined more easily. + + - The setting is properly interpreted in more places, fixing `issue 1280`_. + +- Fixed environment variable expansion in pyproject.toml files. It was overly + broad, causing errors outside of coverage.py settings, as described in `issue + 1481`_ and `issue 1345`_. This is now fixed, but in rare cases will require + changing your pyproject.toml to quote non-string values that use environment + substitution. + +- Fixed internal logic that prevented coverage.py from running on + implementations other than CPython or PyPy (`issue 1474`_). + +.. _issue 991: https://github.com/nedbat/coveragepy/issues/991 +.. _issue 1280: https://github.com/nedbat/coveragepy/issues/1280 +.. _issue 1345: https://github.com/nedbat/coveragepy/issues/1345 +.. _issue 1407: https://github.com/nedbat/coveragepy/issues/1407 +.. _issue 1474: https://github.com/nedbat/coveragepy/issues/1474 +.. _issue 1481: https://github.com/nedbat/coveragepy/issues/1481 + + .. _changes_6-5-0: Version 6.5.0 — 2022-09-29 -------------------------- - The JSON report now includes details of which branches were taken, and which - are missing for each file. Thanks, Christoph Blessing (`pull 1438`_). Closes + are missing for each file. Thanks, `Christoph Blessing `_. Closes `issue 1425`_. - Starting with coverage.py 6.2, ``class`` statements were marked as a branch. @@ -43,8 +277,8 @@ Version 6.5.0 — 2022-09-29 .. _PEP 517: https://peps.python.org/pep-0517/ .. _issue 1395: https://github.com/nedbat/coveragepy/issues/1395 .. _issue 1425: https://github.com/nedbat/coveragepy/issues/1425 -.. _pull 1438: https://github.com/nedbat/coveragepy/pull/1438 .. _issue 1449: https://github.com/nedbat/coveragepy/issues/1449 +.. _pull 1438: https://github.com/nedbat/coveragepy/pull/1438 .. _changes_6-4-4: @@ -60,29 +294,28 @@ Version 6.4.4 — 2022-08-16 Version 6.4.3 — 2022-08-06 -------------------------- -- Fix a failure when combining data files if the file names contained - glob-like patterns (`pull 1405`_). Thanks, Michael Krebs and Benjamin - Schubert. +- Fix a failure when combining data files if the file names contained glob-like + patterns. Thanks, `Michael Krebs and Benjamin Schubert `_. - Fix a messaging failure when combining Windows data files on a different - drive than the current directory. (`pull 1430`_, fixing `issue 1428`_). - Thanks, Lorenzo Micò. + drive than the current directory, closing `issue 1428`_. Thanks, `Lorenzo + Micò `_. - Fix path calculations when running in the root directory, as you might do in - a Docker container: `pull 1403`_, thanks Arthur Rio. + a Docker container. Thanks `Arthur Rio `_. - Filtering in the HTML report wouldn't work when reloading the index page. - This is now fixed (`pull 1413`_). Thanks, Marc Legendre. + This is now fixed. Thanks, `Marc Legendre `_. -- Fix a problem with Cython code measurement (`pull 1347`_, fixing `issue - 972`_). Thanks, Matus Valo. +- Fix a problem with Cython code measurement, closing `issue 972`_. Thanks, + `Matus Valo `_. .. _issue 972: https://github.com/nedbat/coveragepy/issues/972 +.. _issue 1428: https://github.com/nedbat/coveragepy/issues/1428 .. _pull 1347: https://github.com/nedbat/coveragepy/pull/1347 .. _pull 1403: https://github.com/nedbat/coveragepy/issues/1403 .. _pull 1405: https://github.com/nedbat/coveragepy/issues/1405 .. _pull 1413: https://github.com/nedbat/coveragepy/issues/1413 -.. _issue 1428: https://github.com/nedbat/coveragepy/issues/1428 .. _pull 1430: https://github.com/nedbat/coveragepy/pull/1430 @@ -92,17 +325,17 @@ Version 6.4.2 — 2022-07-12 -------------------------- - Updated for a small change in Python 3.11.0 beta 4: modules now start with a - line with line number 0, which is ignored. This line cannnot be executed, so + line with line number 0, which is ignored. This line cannot be executed, so coverage totals were thrown off. This line is now ignored by coverage.py, but this also means that truly empty modules (like ``__init__.py``) have no lines in them, rather than one phantom line. Fixes `issue 1419`_. - Internal debugging data added to sys.modules is now an actual module, to avoid confusing code that examines everything in sys.modules. Thanks, - Yilei Yang (`pull 1399`_). + `Yilei Yang `_. -.. _pull 1399: https://github.com/nedbat/coveragepy/pull/1399 .. _issue 1419: https://github.com/nedbat/coveragepy/issues/1419 +.. _pull 1399: https://github.com/nedbat/coveragepy/pull/1399 .. _changes_6-4-1: @@ -143,7 +376,7 @@ Version 6.4 — 2022-05-22 ``?`` to open/close the help panel. Thanks, `J. M. F. Tsang `_. - - The timestamp and version are displayed at the top of the report. Thanks, + - The time stamp and version are displayed at the top of the report. Thanks, `Ammar Askar `_. Closes `issue 1351`_. - A new debug option ``debug=sqldata`` adds more detail to ``debug=sql``, @@ -432,7 +665,7 @@ Version 6.0.2 — 2021-10-11 - Packages named as "source packages" (with ``source``, or ``source_pkgs``, or pytest-cov's ``--cov``) might have been only partially measured. Their - top-level statements could be marked as unexecuted, because they were + top-level statements could be marked as un-executed, because they were imported by coverage.py before measurement began (`issue 1232`_). This is now fixed, but the package will be imported twice, once by coverage.py, then again by your test suite. This could cause problems if importing the package diff --git a/CONTRIBUTORS.txt b/CONTRIBUTORS.txt index 8fbd4b176..af6c4c26a 100644 --- a/CONTRIBUTORS.txt +++ b/CONTRIBUTORS.txt @@ -68,6 +68,7 @@ Eli Skeggs Emil Madsen Éric Larivière Federico Bond +Felix Horvat Frazer McLean Geoff Bache George Paci @@ -116,6 +117,7 @@ Matus Valo Max Linke Michael Krebs Michał Bultrowicz +Michał Górny Mickie Betz Mike Fiedler Naveen Yadav @@ -149,7 +151,9 @@ Stephan Richter Stephen Finucane Steve Dower Steve Leonard +Steve Oswald Steve Peak +Sviatoslav Sydorenko S. Y. Lee Teake Nutma Ted Wexler diff --git a/MANIFEST.in b/MANIFEST.in index 1db4d7f6f..b1616dd0a 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -29,6 +29,7 @@ recursive-include ci * recursive-include lab * recursive-include .github * +recursive-include coverage *.pyi recursive-include coverage/fullcoverage *.py recursive-include coverage/ctracer *.c *.h diff --git a/Makefile b/Makefile index 7778a1447..d906554f6 100644 --- a/Makefile +++ b/Makefile @@ -83,7 +83,7 @@ metasmoke: .PHONY: upgrade -PIP_COMPILE = pip-compile --upgrade --allow-unsafe --generate-hashes +PIP_COMPILE = pip-compile --upgrade --allow-unsafe --generate-hashes --resolver=backtracking upgrade: export CUSTOM_COMPILE_COMMAND=make upgrade upgrade: ## Update the *.pip files with the latest packages satisfying *.in files. pip install -q -r requirements/pip-tools.pip @@ -96,7 +96,10 @@ upgrade: ## Update the *.pip files with the latest packages satisfying *.in $(PIP_COMPILE) -o requirements/light-threads.pip requirements/light-threads.in $(PIP_COMPILE) -o doc/requirements.pip doc/requirements.in $(PIP_COMPILE) -o requirements/lint.pip doc/requirements.in requirements/dev.in + $(PIP_COMPILE) -o requirements/mypy.pip requirements/mypy.in +diff_upgrade: ## Summarize the last `make upgrade` + @git diff -U0 | grep -v '^@' | grep == | sort -k1.2,1.99 -k1.1,1.1r -u ##@ Pre-builds for prepping the code @@ -146,6 +149,9 @@ sample_html_beta: _sample_cog_html ## Generate sample HTML report for a beta rel REPO_OWNER = nedbat/coveragepy +edit_for_release: ## Edit sources to insert release facts. + python igor.py edit_for_release + kit: ## Make the source distribution. python -m build @@ -181,6 +187,9 @@ update_stable: ## Set the stable branch to the latest release. git branch -f stable $$(python setup.py --version) git push origin stable +bump_version: ## Edit sources to bump the version after a release. + python igor.py bump_version + ##@ Documentation diff --git a/NOTICE.txt b/NOTICE.txt index 4e589c8be..68810cd4e 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1,5 +1,5 @@ Copyright 2001 Gareth Rees. All rights reserved. -Copyright 2004-2022 Ned Batchelder. All rights reserved. +Copyright 2004-2023 Ned Batchelder. All rights reserved. Except where noted otherwise, this software is licensed under the Apache License, Version 2.0 (the "License"); you may not use this work except in diff --git a/README.rst b/README.rst index 16b8b849a..1ca0210db 100644 --- a/README.rst +++ b/README.rst @@ -17,8 +17,8 @@ Code coverage testing for Python. | |test-status| |quality-status| |docs| |metacov| | |kit| |downloads| |format| |repos| | |stars| |forks| |contributors| -| |tidelift| |core-infrastructure| |open-ssf| -| |sponsor| |twitter-coveragepy| |twitter-nedbat| +| |core-infrastructure| |open-ssf| |snyk| +| |tidelift| |sponsor| |twitter-coveragepy| |twitter-nedbat| |mastodon-nedbat| Coverage.py measures code coverage, typically during test execution. It uses the code analysis tools and tracing hooks provided in the Python standard @@ -28,8 +28,8 @@ Coverage.py runs on these versions of Python: .. PYVERSIONS -* CPython 3.7 through 3.11.0 rc2. -* PyPy3 7.3.8. +* CPython 3.7 through 3.12.0a3 +* PyPy3 7.3.11. Documentation is on `Read the Docs`_. Code repository and issue tracker are on `GitHub`_. @@ -37,8 +37,12 @@ Documentation is on `Read the Docs`_. Code repository and issue tracker are on .. _Read the Docs: https://coverage.readthedocs.io/ .. _GitHub: https://github.com/nedbat/coveragepy +**New in 7.x:** +improved data combining; +``report --format=``. -**New in 6.x:** dropped support for Python 2.7, 3.5, and 3.6; +**New in 6.x:** +dropped support for Python 2.7, 3.5, and 3.6; write data on SIGTERM; added support for 3.10 match/case statements. @@ -159,6 +163,9 @@ Licensed under the `Apache 2.0 License`_. For details, see `NOTICE.txt`_. .. |contributors| image:: https://img.shields.io/github/contributors/nedbat/coveragepy.svg?logo=github :target: https://github.com/nedbat/coveragepy/graphs/contributors :alt: Contributors +.. |mastodon-nedbat| image:: https://img.shields.io/badge/dynamic/json?style=flat&labelColor=450657&logo=mastodon&logoColor=ffffff&link=https%3A%2F%2Fhachyderm.io%2F%40nedbat&url=https%3A%2F%2Fhachyderm.io%2Fusers%2Fnedbat%2Ffollowers.json&query=totalItems&label=Mastodon + :target: https://hachyderm.io/@nedbat + :alt: nedbat on Mastodon .. |twitter-coveragepy| image:: https://img.shields.io/twitter/follow/coveragepy.svg?label=coveragepy&style=flat&logo=twitter&logoColor=4FADFF :target: https://twitter.com/coveragepy :alt: coverage.py on Twitter @@ -174,3 +181,6 @@ Licensed under the `Apache 2.0 License`_. For details, see `NOTICE.txt`_. .. |open-ssf| image:: https://api.securityscorecards.dev/projects/github.com/nedbat/coveragepy/badge :target: https://deps.dev/pypi/coverage :alt: OpenSSF Scorecard +.. |snyk| image:: https://snyk.io/advisor/python/coverage/badge.svg + :target: https://snyk.io/advisor/python/coverage + :alt: Snyk package health diff --git a/ci/github_releases.py b/ci/github_releases.py index 166011fb3..5ba3d5229 100644 --- a/ci/github_releases.py +++ b/ci/github_releases.py @@ -74,35 +74,42 @@ def get_releases(session, repo): releases = { r['tag_name']: r for r in github_paginated(session, url) } return releases +RELEASE_BODY_FMT = """\ +{relnote_text} + +:arrow_right:\xa0 PyPI page: [coverage {version}](https://pypi.org/project/coverage/{version}). +:arrow_right:\xa0 To install: `python3 -m pip install coverage=={version}` +""" + def release_for_relnote(relnote): """ Turn a release note dict into the data needed by GitHub for a release. """ - tag = relnote['version'] + relnote_text = relnote["text"] + tag = version = relnote["version"] + body = RELEASE_BODY_FMT.format(relnote_text=relnote_text, version=version) return { "tag_name": tag, - "name": tag, - "body": relnote["text"], + "name": version, + "body": body, "draft": False, "prerelease": relnote["prerelease"], } -def create_release(session, repo, relnote): +def create_release(session, repo, release_data): """ Create a new GitHub release. """ - print(f"Creating {relnote['version']}") - data = release_for_relnote(relnote) - resp = session.post(RELEASES_URL.format(repo=repo), json=data) + print(f"Creating {release_data['name']}") + resp = session.post(RELEASES_URL.format(repo=repo), json=release_data) check_ok(resp) -def update_release(session, url, relnote): +def update_release(session, url, release_data): """ Update an existing GitHub release. """ - print(f"Updating {relnote['version']}") - data = release_for_relnote(relnote) - resp = session.patch(url, json=data) + print(f"Updating {release_data['name']}") + resp = session.patch(url, json=release_data) check_ok(resp) def update_github_releases(json_filename, repo): @@ -125,14 +132,15 @@ def update_github_releases(json_filename, repo): tag = relnote["version"] if not does_tag_exist(tag): continue + release_data = release_for_relnote(relnote) exists = tag in releases if not exists: - create_release(gh_session, repo, relnote) + create_release(gh_session, repo, release_data) else: release = releases[tag] - if release["body"] != relnote["text"]: + if release["body"] != release_data["body"]: url = release["url"] - update_release(gh_session, url, relnote) + update_release(gh_session, url, release_data) if __name__ == "__main__": update_github_releases(*sys.argv[1:3]) diff --git a/coverage/annotate.py b/coverage/annotate.py index 07ff644dd..c92c29b7e 100644 --- a/coverage/annotate.py +++ b/coverage/annotate.py @@ -3,12 +3,22 @@ """Source file annotation for coverage.py.""" +from __future__ import annotations + import os import re +from typing import Iterable, Optional, TYPE_CHECKING + from coverage.files import flat_rootname from coverage.misc import ensure_dir, isolate_module +from coverage.plugin import FileReporter from coverage.report import get_analysis_to_report +from coverage.results import Analysis +from coverage.types import TMorf + +if TYPE_CHECKING: + from coverage import Coverage os = isolate_module(os) @@ -35,15 +45,15 @@ class AnnotateReporter: """ - def __init__(self, coverage): + def __init__(self, coverage: Coverage) -> None: self.coverage = coverage self.config = self.coverage.config - self.directory = None + self.directory: Optional[str] = None blank_re = re.compile(r"\s*(#|$)") else_re = re.compile(r"\s*else\s*:\s*(#|$)") - def report(self, morfs, directory=None): + def report(self, morfs: Optional[Iterable[TMorf]], directory: Optional[str]=None) -> None: """Run the report. See `coverage.report()` for arguments. @@ -54,7 +64,7 @@ def report(self, morfs, directory=None): for fr, analysis in get_analysis_to_report(self.coverage, morfs): self.annotate_file(fr, analysis) - def annotate_file(self, fr, analysis): + def annotate_file(self, fr: FileReporter, analysis: Analysis) -> None: """Annotate a single file. `fr` is the FileReporter for the file to annotate. diff --git a/coverage/bytecode.py b/coverage/bytecode.py index ceb18cf37..15bf755b6 100644 --- a/coverage/bytecode.py +++ b/coverage/bytecode.py @@ -3,10 +3,11 @@ """Bytecode manipulation for coverage.py""" -import types +from types import CodeType +from typing import Generator -def code_objects(code): +def code_objects(code: CodeType) -> Generator[CodeType, None, None]: """Iterate over all the code objects in `code`.""" stack = [code] while stack: @@ -14,6 +15,6 @@ def code_objects(code): # push its children for later returning. code = stack.pop() for c in code.co_consts: - if isinstance(c, types.CodeType): + if isinstance(c, CodeType): stack.append(c) yield code diff --git a/coverage/cmdline.py b/coverage/cmdline.py index dbf66e0a8..b8ca2e7e0 100644 --- a/coverage/cmdline.py +++ b/coverage/cmdline.py @@ -12,10 +12,12 @@ import textwrap import traceback +from typing import cast, Any, List, NoReturn, Optional, Tuple + import coverage from coverage import Coverage from coverage import env -from coverage.collector import CTracer +from coverage.collector import HAS_CTRACER from coverage.config import CoverageConfig from coverage.control import DEFAULT_DATAFILE from coverage.data import combinable_files, debug_data_file @@ -96,6 +98,10 @@ class Opts: '', '--fail-under', action='store', metavar="MIN", type="float", help="Exit with a status of 2 if the total coverage is less than MIN.", ) + format = optparse.make_option( + '', '--format', action='store', metavar="FORMAT", + help="Output format, either text (default), markdown, or total.", + ) help = optparse.make_option( '-h', '--help', action='store_true', help="Get help on this command.", @@ -231,8 +237,9 @@ class CoverageOptionParser(optparse.OptionParser): """ - def __init__(self, *args, **kwargs): - super().__init__(add_help_option=False, *args, **kwargs) + def __init__(self, *args: Any, **kwargs: Any) -> None: + kwargs["add_help_option"] = False + super().__init__(*args, **kwargs) self.set_defaults( # Keep these arguments alphabetized by their names. action=None, @@ -245,6 +252,7 @@ def __init__(self, *args, **kwargs): debug=None, directory=None, fail_under=None, + format=None, help=None, ignore_errors=None, include=None, @@ -273,19 +281,19 @@ class OptionParserError(Exception): """Used to stop the optparse error handler ending the process.""" pass - def parse_args_ok(self, args=None, options=None): + def parse_args_ok(self, args: List[str]) -> Tuple[bool, Optional[optparse.Values], List[str]]: """Call optparse.parse_args, but return a triple: (ok, options, args) """ try: - options, args = super().parse_args(args, options) + options, args = super().parse_args(args) except self.OptionParserError: - return False, None, None + return False, None, [] return True, options, args - def error(self, msg): + def error(self, msg: str) -> NoReturn: """Override optparse.error so sys.exit doesn't get called.""" show_help(msg) raise self.OptionParserError @@ -294,7 +302,7 @@ def error(self, msg): class GlobalOptionParser(CoverageOptionParser): """Command-line parser for coverage.py global option arguments.""" - def __init__(self): + def __init__(self) -> None: super().__init__() self.add_options([ @@ -306,14 +314,19 @@ def __init__(self): class CmdOptionParser(CoverageOptionParser): """Parse one of the new-style commands for coverage.py.""" - def __init__(self, action, options, defaults=None, usage=None, description=None): + def __init__( + self, + action: str, + options: List[optparse.Option], + description: str, + usage: Optional[str]=None, + ): """Create an OptionParser for a coverage.py command. `action` is the slug to put into `options.action`. `options` is a list of Option's for the command. - `defaults` is a dict of default value for options. - `usage` is the usage string to display in help. `description` is the description of the command, for the help text. + `usage` is the usage string to display in help. """ if usage: @@ -322,18 +335,18 @@ def __init__(self, action, options, defaults=None, usage=None, description=None) usage=usage, description=description, ) - self.set_defaults(action=action, **(defaults or {})) + self.set_defaults(action=action) self.add_options(options) self.cmd = action - def __eq__(self, other): + def __eq__(self, other: str) -> bool: # type: ignore[override] # A convenience equality, so that I can put strings in unit test # results, and they will compare equal to objects. return (other == f"") - __hash__ = None # This object doesn't need to be hashed. + __hash__ = None # type: ignore[assignment] - def get_prog_name(self): + def get_prog_name(self) -> str: """Override of an undocumented function in optparse.OptionParser.""" program_name = super().get_prog_name() @@ -379,8 +392,8 @@ def get_prog_name(self): ] + GLOBAL_ARGS, usage="[options] ... ", description=( - "Combine data from multiple coverage files collected " + - "with 'run -p'. The combined results are written to a single " + + "Combine data from multiple coverage files. " + + "The combined results are written to a single " + "file representing the union of the data. The positional " + "arguments are data files or directories containing data files. " + "If no paths are provided, data files in the default data file's " + @@ -482,6 +495,7 @@ def get_prog_name(self): Opts.contexts, Opts.input_datafile, Opts.fail_under, + Opts.format, Opts.ignore_errors, Opts.include, Opts.omit, @@ -534,7 +548,11 @@ def get_prog_name(self): } -def show_help(error=None, topic=None, parser=None): +def show_help( + error: Optional[str]=None, + topic: Optional[str]=None, + parser: Optional[optparse.OptionParser]=None, +) -> None: """Display an error message, or the named topic.""" assert error or topic or parser @@ -555,7 +573,7 @@ def show_help(error=None, topic=None, parser=None): help_params = dict(coverage.__dict__) help_params['program_name'] = program_name - if CTracer is not None: + if HAS_CTRACER: help_params['extension_modifier'] = 'with C extension' else: help_params['extension_modifier'] = 'without C extension' @@ -567,6 +585,7 @@ def show_help(error=None, topic=None, parser=None): print(parser.format_help().strip()) print() else: + assert topic is not None help_msg = textwrap.dedent(HELP_TOPICS.get(topic, '')).strip() if help_msg: print(help_msg.format(**help_params)) @@ -581,11 +600,11 @@ def show_help(error=None, topic=None, parser=None): class CoverageScript: """The command-line interface to coverage.py.""" - def __init__(self): + def __init__(self) -> None: self.global_option = False - self.coverage = None + self.coverage: Coverage - def command_line(self, argv): + def command_line(self, argv: List[str]) -> int: """The bulk of the command line interface to coverage.py. `argv` is the argument list to process. @@ -600,6 +619,7 @@ def command_line(self, argv): # The command syntax we parse depends on the first argument. Global # switch syntax always starts with an option. + parser: Optional[optparse.OptionParser] self.global_option = argv[0].startswith('-') if self.global_option: parser = GlobalOptionParser() @@ -613,6 +633,7 @@ def command_line(self, argv): ok, options, args = parser.parse_args_ok(argv) if not ok: return ERR + assert options is not None # Handle help and version. if self.do_help(options, args, parser): @@ -689,6 +710,7 @@ def command_line(self, argv): skip_covered=options.skip_covered, skip_empty=options.skip_empty, sort=options.sort, + output_format=options.format, **report_args ) elif options.action == "annotate": @@ -733,8 +755,8 @@ def command_line(self, argv): if options.precision is not None: self.coverage.set_option("report:precision", options.precision) - fail_under = self.coverage.get_option("report:fail_under") - precision = self.coverage.get_option("report:precision") + fail_under = cast(float, self.coverage.get_option("report:fail_under")) + precision = cast(int, self.coverage.get_option("report:precision")) if should_fail_under(total, fail_under, precision): msg = "total of {total} is less than fail-under={fail_under:.{p}f}".format( total=Numbers(precision=precision).display_covered(total), @@ -746,7 +768,12 @@ def command_line(self, argv): return OK - def do_help(self, options, args, parser): + def do_help( + self, + options: optparse.Values, + args: List[str], + parser: optparse.OptionParser, + ) -> bool: """Deal with help requests. Return True if it handled the request, False if not. @@ -763,9 +790,9 @@ def do_help(self, options, args, parser): if options.action == "help": if args: for a in args: - parser = COMMANDS.get(a) - if parser: - show_help(parser=parser) + parser_maybe = COMMANDS.get(a) + if parser_maybe is not None: + show_help(parser=parser_maybe) else: show_help(topic=a) else: @@ -779,7 +806,7 @@ def do_help(self, options, args, parser): return False - def do_run(self, options, args): + def do_run(self, options: optparse.Values, args: List[str]) -> int: """Implementation of 'coverage run'.""" if not args: @@ -787,7 +814,7 @@ def do_run(self, options, args): # Specified -m with nothing else. show_help("No module specified for -m") return ERR - command_line = self.coverage.get_option("run:command_line") + command_line = cast(str, self.coverage.get_option("run:command_line")) if command_line is not None: args = shlex.split(command_line) if args and args[0] in {"-m", "--module"}: @@ -838,7 +865,7 @@ def do_run(self, options, args): return OK - def do_debug(self, args): + def do_debug(self, args: List[str]) -> int: """Implementation of 'coverage debug'.""" if not args: @@ -871,7 +898,7 @@ def do_debug(self, args): return OK -def unshell_list(s): +def unshell_list(s: str) -> Optional[List[str]]: """Turn a command-line argument into a list.""" if not s: return None @@ -885,7 +912,7 @@ def unshell_list(s): return s.split(',') -def unglob_args(args): +def unglob_args(args: List[str]) -> List[str]: """Interpret shell wildcards for platforms that need it.""" if env.WINDOWS: globbed = [] @@ -931,7 +958,7 @@ def unglob_args(args): } -def main(argv=None): +def main(argv: Optional[List[str]]=None) -> Optional[int]: """The main entry point to coverage.py. This is installed as the script entry point. @@ -969,7 +996,9 @@ def main(argv=None): from ox_profile.core.launchers import SimpleLauncher # pylint: disable=import-error original_main = main - def main(argv=None): # pylint: disable=function-redefined + def main( # pylint: disable=function-redefined + argv: Optional[List[str]]=None, + ) -> Optional[int]: """A wrapper around main that profiles.""" profiler = SimpleLauncher.launch() try: diff --git a/coverage/collector.py b/coverage/collector.py index 241de05ea..a3c537d66 100644 --- a/coverage/collector.py +++ b/coverage/collector.py @@ -3,16 +3,29 @@ """Raw data collector for coverage.py.""" +from __future__ import annotations + +import functools import os import sys +from types import FrameType +from typing import ( + cast, Any, Callable, Dict, List, Mapping, Optional, Set, Tuple, Type, TypeVar, +) + from coverage import env from coverage.config import CoverageConfig +from coverage.data import CoverageData from coverage.debug import short_stack from coverage.disposition import FileDisposition from coverage.exceptions import ConfigError -from coverage.misc import human_sorted, isolate_module +from coverage.misc import human_sorted_items, isolate_module +from coverage.plugin import CoveragePlugin from coverage.pytracer import PyTracer +from coverage.types import ( + TArc, TFileDisposition, TLineNo, TTraceData, TTraceFn, TTracer, TWarnFn, +) os = isolate_module(os) @@ -20,6 +33,7 @@ try: # Use the C extension code when we can, for speed. from coverage.tracer import CTracer, CFileDisposition + HAS_CTRACER = True except ImportError: # Couldn't import the C extension, maybe it isn't built. if os.getenv('COVERAGE_TEST_TRACER') == 'c': # pragma: part covered @@ -31,8 +45,9 @@ # exception here causes all sorts of other noise in unittest. sys.stderr.write("*** COVERAGE_TEST_TRACER is 'c' but can't import CTracer!\n") sys.exit(1) - CTracer = None + HAS_CTRACER = False +T = TypeVar("T") class Collector: """Collects trace data. @@ -53,15 +68,22 @@ class Collector: # The stack of active Collectors. Collectors are added here when started, # and popped when stopped. Collectors on the stack are paused when not # the top, and resumed when they become the top again. - _collectors = [] + _collectors: List[Collector] = [] # The concurrency settings we support here. LIGHT_THREADS = {"greenlet", "eventlet", "gevent"} def __init__( - self, should_trace, check_include, should_start_context, file_mapper, - timid, branch, warn, concurrency, - ): + self, + should_trace: Callable[[str, FrameType], TFileDisposition], + check_include: Callable[[str, FrameType], bool], + should_start_context: Optional[Callable[[FrameType], Optional[str]]], + file_mapper: Callable[[str], str], + timid: bool, + branch: bool, + warn: TWarnFn, + concurrency: List[str], + ) -> None: """Create a collector. `should_trace` is a function, taking a file name and a frame, and @@ -107,28 +129,29 @@ def __init__( self.concurrency = concurrency assert isinstance(self.concurrency, list), f"Expected a list: {self.concurrency!r}" + self.covdata: CoverageData self.threading = None - self.covdata = None - self.static_context = None + self.static_context: Optional[str] = None self.origin = short_stack() self.concur_id_func = None - self.mapped_file_cache = {} - if timid: - # Being timid: use the simple Python trace function. - self._trace_class = PyTracer - else: - # Being fast: use the C Tracer if it is available, else the Python - # trace function. - self._trace_class = CTracer or PyTracer + self._trace_class: Type[TTracer] + self.file_disposition_class: Type[TFileDisposition] + + use_ctracer = False + if HAS_CTRACER and not timid: + use_ctracer = True - if self._trace_class is CTracer: + #if HAS_CTRACER and self._trace_class is CTracer: + if use_ctracer: + self._trace_class = CTracer self.file_disposition_class = CFileDisposition self.supports_plugins = True self.packed_arcs = True else: + self._trace_class = PyTracer self.file_disposition_class = FileDisposition self.supports_plugins = False self.packed_arcs = False @@ -182,22 +205,22 @@ def __init__( self.reset() - def __repr__(self): + def __repr__(self) -> str: return f"" - def use_data(self, covdata, context): + def use_data(self, covdata: CoverageData, context: Optional[str]) -> None: """Use `covdata` for recording data.""" self.covdata = covdata self.static_context = context self.covdata.set_context(self.static_context) - def tracer_name(self): + def tracer_name(self) -> str: """Return the class name of the tracer we're using.""" return self._trace_class.__name__ - def _clear_data(self): + def _clear_data(self) -> None: """Clear out existing data, but stay ready for more collection.""" - # We used to used self.data.clear(), but that would remove filename + # We used to use self.data.clear(), but that would remove filename # keys and data values that were still in use higher up the stack # when we are called as part of switch_context. for d in self.data.values(): @@ -206,18 +229,16 @@ def _clear_data(self): for tracer in self.tracers: tracer.reset_activity() - def reset(self): + def reset(self) -> None: """Clear collected data, and prepare to collect more.""" - # A dictionary mapping file names to dicts with line number keys (if not - # branch coverage), or mapping file names to dicts with line number - # pairs as keys (if branch coverage). - self.data = {} + # The trace data we are collecting. + self.data: TTraceData = {} # type: ignore[assignment] # A dictionary mapping file names to file tracer plugin names that will # handle them. - self.file_tracers = {} + self.file_tracers: Dict[str, str] = {} - self.disabled_plugins = set() + self.disabled_plugins: Set[str] = set() # The .should_trace_cache attribute is a cache from file names to # coverage.FileDisposition objects, or None. When a file is first @@ -248,11 +269,11 @@ def reset(self): self.should_trace_cache = {} # Our active Tracers. - self.tracers = [] + self.tracers: List[TTracer] = [] self._clear_data() - def _start_tracer(self): + def _start_tracer(self) -> TTraceFn: """Start a new Tracer object, and store it in self.tracers.""" tracer = self._trace_class() tracer.data = self.data @@ -271,6 +292,7 @@ def _start_tracer(self): tracer.check_include = self.check_include if hasattr(tracer, 'should_start_context'): tracer.should_start_context = self.should_start_context + if hasattr(tracer, 'switch_context'): tracer.switch_context = self.switch_context if hasattr(tracer, 'disable_plugin'): tracer.disable_plugin = self.disable_plugin @@ -288,7 +310,7 @@ def _start_tracer(self): # # New in 3.12: threading.settrace_all_threads: https://github.com/python/cpython/pull/96681 - def _installation_trace(self, frame, event, arg): + def _installation_trace(self, frame: FrameType, event: str, arg: Any) -> TTraceFn: """Called on new threads, installs the real tracer.""" # Remove ourselves as the trace function. sys.settrace(None) @@ -301,7 +323,7 @@ def _installation_trace(self, frame, event, arg): # Return the new trace function to continue tracing in this scope. return fn - def start(self): + def start(self) -> None: """Start collecting trace information.""" if self._collectors: self._collectors[-1].pause() @@ -310,7 +332,7 @@ def start(self): # Check to see whether we had a fullcoverage tracer installed. If so, # get the stack frames it stashed away for us. - traces0 = [] + traces0: List[Tuple[Tuple[FrameType, str, Any], TLineNo]] = [] fn0 = sys.gettrace() if fn0: tracer0 = getattr(fn0, '__self__', None) @@ -341,7 +363,7 @@ def start(self): if self.threading: self.threading.settrace(self._installation_trace) - def stop(self): + def stop(self) -> None: """Stop collecting trace information.""" assert self._collectors if self._collectors[-1] is not self: @@ -360,19 +382,19 @@ def stop(self): if self._collectors: self._collectors[-1].resume() - def pause(self): + def pause(self) -> None: """Pause tracing, but be prepared to `resume`.""" for tracer in self.tracers: tracer.stop() stats = tracer.get_stats() if stats: print("\nCoverage.py tracer stats:") - for k in human_sorted(stats.keys()): - print(f"{k:>20}: {stats[k]}") + for k, v in human_sorted_items(stats.items()): + print(f"{k:>20}: {v}") if self.threading: self.threading.settrace(None) - def resume(self): + def resume(self) -> None: """Resume tracing after a `pause`.""" for tracer in self.tracers: tracer.start() @@ -381,7 +403,7 @@ def resume(self): else: self._start_tracer() - def _activity(self): + def _activity(self) -> bool: """Has any activity been traced? Returns a boolean, True if any trace function was invoked. @@ -389,8 +411,9 @@ def _activity(self): """ return any(tracer.activity() for tracer in self.tracers) - def switch_context(self, new_context): + def switch_context(self, new_context: Optional[str]) -> None: """Switch to a new dynamic context.""" + context: Optional[str] self.flush_data() if self.static_context: context = self.static_context @@ -400,24 +423,22 @@ def switch_context(self, new_context): context = new_context self.covdata.set_context(context) - def disable_plugin(self, disposition): + def disable_plugin(self, disposition: TFileDisposition) -> None: """Disable the plugin mentioned in `disposition`.""" file_tracer = disposition.file_tracer + assert file_tracer is not None plugin = file_tracer._coverage_plugin plugin_name = plugin._coverage_plugin_name self.warn(f"Disabling plug-in {plugin_name!r} due to previous exception") plugin._coverage_enabled = False disposition.trace = False - def cached_mapped_file(self, filename): + @functools.lru_cache(maxsize=0) + def cached_mapped_file(self, filename: str) -> str: """A locally cached version of file names mapped through file_mapper.""" - key = (type(filename), filename) - try: - return self.mapped_file_cache[key] - except KeyError: - return self.mapped_file_cache.setdefault(key, self.file_mapper(filename)) + return self.file_mapper(filename) - def mapped_file_dict(self, d): + def mapped_file_dict(self, d: Mapping[str, T]) -> Dict[str, T]: """Return a dict like d, but with keys modified by file_mapper.""" # The call to list(items()) ensures that the GIL protects the dictionary # iterator against concurrent modifications by tracers running @@ -431,16 +452,17 @@ def mapped_file_dict(self, d): runtime_err = ex else: break - else: - raise runtime_err # pragma: cant happen + else: # pragma: cant happen + assert isinstance(runtime_err, Exception) + raise runtime_err return {self.cached_mapped_file(k): v for k, v in items} - def plugin_was_disabled(self, plugin): + def plugin_was_disabled(self, plugin: CoveragePlugin) -> None: """Record that `plugin` was disabled during the run.""" self.disabled_plugins.add(plugin._coverage_plugin_name) - def flush_data(self): + def flush_data(self) -> bool: """Save the collected data to our associated `CoverageData`. Data may have also been saved along the way. This forces the @@ -456,8 +478,9 @@ def flush_data(self): # Unpack the line number pairs packed into integers. See # tracer.c:CTracer_record_pair for the C code that creates # these packed ints. - data = {} - for fname, packeds in self.data.items(): + arc_data: Dict[str, List[TArc]] = {} + packed_data = cast(Dict[str, Set[int]], self.data) + for fname, packeds in packed_data.items(): tuples = [] for packed in packeds: l1 = packed & 0xFFFFF @@ -467,12 +490,13 @@ def flush_data(self): if packed & (1 << 41): l2 *= -1 tuples.append((l1, l2)) - data[fname] = tuples + arc_data[fname] = tuples else: - data = self.data - self.covdata.add_arcs(self.mapped_file_dict(data)) + arc_data = cast(Dict[str, List[TArc]], self.data) + self.covdata.add_arcs(self.mapped_file_dict(arc_data)) else: - self.covdata.add_lines(self.mapped_file_dict(self.data)) + line_data = cast(Dict[str, Set[int]], self.data) + self.covdata.add_lines(self.mapped_file_dict(line_data)) file_tracers = { k: v for k, v in self.file_tracers.items() diff --git a/coverage/config.py b/coverage/config.py index 1ad46597c..3e5359490 100644 --- a/coverage/config.py +++ b/coverage/config.py @@ -3,6 +3,7 @@ """Config file for coverage.py""" +from __future__ import annotations import collections import configparser import copy @@ -10,18 +11,22 @@ import os.path import re -from coverage.exceptions import ConfigError -from coverage.misc import contract, isolate_module, human_sorted_items, substitute_variables +from typing import ( + Any, Callable, Dict, Iterable, List, Optional, Tuple, Union, +) +from coverage.exceptions import ConfigError +from coverage.misc import isolate_module, human_sorted_items, substitute_variables from coverage.tomlconfig import TomlConfigParser, TomlDecodeError +from coverage.types import TConfigurable, TConfigSection, TConfigValue os = isolate_module(os) -class HandyConfigParser(configparser.RawConfigParser): +class HandyConfigParser(configparser.ConfigParser): """Our specialization of ConfigParser.""" - def __init__(self, our_file): + def __init__(self, our_file: bool) -> None: """Create the HandyConfigParser. `our_file` is True if this config file is specifically for coverage, @@ -29,49 +34,54 @@ def __init__(self, our_file): for possible settings. """ - configparser.RawConfigParser.__init__(self) + super().__init__(interpolation=None) self.section_prefixes = ["coverage:"] if our_file: self.section_prefixes.append("") - def read(self, filenames, encoding_unused=None): + def read( # type: ignore[override] + self, + filenames: Iterable[str], + encoding_unused: Optional[str]=None, + ) -> List[str]: """Read a file name as UTF-8 configuration data.""" - return configparser.RawConfigParser.read(self, filenames, encoding="utf-8") - - def has_option(self, section, option): - for section_prefix in self.section_prefixes: - real_section = section_prefix + section - has = configparser.RawConfigParser.has_option(self, real_section, option) - if has: - return has - return False + return super().read(filenames, encoding="utf-8") - def has_section(self, section): + def real_section(self, section: str) -> Optional[str]: + """Get the actual name of a section.""" for section_prefix in self.section_prefixes: real_section = section_prefix + section - has = configparser.RawConfigParser.has_section(self, real_section) + has = super().has_section(real_section) if has: return real_section + return None + + def has_option(self, section: str, option: str) -> bool: + real_section = self.real_section(section) + if real_section is not None: + return super().has_option(real_section, option) return False - def options(self, section): - for section_prefix in self.section_prefixes: - real_section = section_prefix + section - if configparser.RawConfigParser.has_section(self, real_section): - return configparser.RawConfigParser.options(self, real_section) + def has_section(self, section: str) -> bool: + return bool(self.real_section(section)) + + def options(self, section: str) -> List[str]: + real_section = self.real_section(section) + if real_section is not None: + return super().options(real_section) raise ConfigError(f"No section: {section!r}") - def get_section(self, section): + def get_section(self, section: str) -> TConfigSection: """Get the contents of a section, as a dictionary.""" - d = {} + d: Dict[str, TConfigValue] = {} for opt in self.options(section): d[opt] = self.get(section, opt) return d - def get(self, section, option, *args, **kwargs): + def get(self, section: str, option: str, *args: Any, **kwargs: Any) -> str: # type: ignore """Get a value, replacing environment variables also. - The arguments are the same as `RawConfigParser.get`, but in the found + The arguments are the same as `ConfigParser.get`, but in the found value, ``$WORD`` or ``${WORD}`` are replaced by the value of the environment variable ``WORD``. @@ -80,20 +90,20 @@ def get(self, section, option, *args, **kwargs): """ for section_prefix in self.section_prefixes: real_section = section_prefix + section - if configparser.RawConfigParser.has_option(self, real_section, option): + if super().has_option(real_section, option): break else: raise ConfigError(f"No option {option!r} in section: {section!r}") - v = configparser.RawConfigParser.get(self, real_section, option, *args, **kwargs) + v: str = super().get(real_section, option, *args, **kwargs) v = substitute_variables(v, os.environ) return v - def getlist(self, section, option): + def getlist(self, section: str, option: str) -> List[str]: """Read a list of strings. The value of `section` and `option` is treated as a comma- and newline- - separated list of strings. Each value is stripped of whitespace. + separated list of strings. Each value is stripped of white space. Returns the list of strings. @@ -107,11 +117,11 @@ def getlist(self, section, option): values.append(value) return values - def getregexlist(self, section, option): + def getregexlist(self, section: str, option: str) -> List[str]: """Read a list of full-line regexes. The value of `section` and `option` is treated as a newline-separated - list of regexes. Each value is stripped of whitespace. + list of regexes. Each value is stripped of white space. Returns the list of strings. @@ -131,6 +141,9 @@ def getregexlist(self, section, option): return value_list +TConfigParser = Union[HandyConfigParser, TomlConfigParser] + + # The default line exclusion regexes. DEFAULT_EXCLUDE = [ r'#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(cover|COVER)', @@ -150,7 +163,7 @@ def getregexlist(self, section, option): ] -class CoverageConfig: +class CoverageConfig(TConfigurable): """Coverage.py configuration. The attributes of this class are the various settings that control the @@ -159,16 +172,16 @@ class CoverageConfig: """ # pylint: disable=too-many-instance-attributes - def __init__(self): + def __init__(self) -> None: """Initialize the configuration attributes to their defaults.""" # Metadata about the config. # We tried to read these config files. - self.attempted_config_files = [] + self.attempted_config_files: List[str] = [] # We did read these config files, but maybe didn't find any content for us. - self.config_files_read = [] + self.config_files_read: List[str] = [] # The file that gave us our configuration. - self.config_file = None - self._config_contents = None + self.config_file: Optional[str] = None + self._config_contents: Optional[bytes] = None # Defaults for [run] and [report] self._include = None @@ -176,46 +189,47 @@ def __init__(self): # Defaults for [run] self.branch = False - self.command_line = None - self.concurrency = None - self.context = None + self.command_line: Optional[str] = None + self.concurrency: List[str] = [] + self.context: Optional[str] = None self.cover_pylib = False self.data_file = ".coverage" - self.debug = [] - self.disable_warnings = [] - self.dynamic_context = None - self.note = None + self.debug: List[str] = [] + self.disable_warnings: List[str] = [] + self.dynamic_context: Optional[str] = None self.parallel = False - self.plugins = [] + self.plugins: List[str] = [] self.relative_files = False - self.run_include = None - self.run_omit = None + self.run_include: List[str] = [] + self.run_omit: List[str] = [] self.sigterm = False - self.source = None - self.source_pkgs = [] + self.source: Optional[List[str]] = None + self.source_pkgs: List[str] = [] self.timid = False - self._crash = None + self._crash: Optional[str] = None # Defaults for [report] self.exclude_list = DEFAULT_EXCLUDE[:] self.fail_under = 0.0 + self.format: Optional[str] = None self.ignore_errors = False - self.report_include = None - self.report_omit = None + self.include_namespace_packages = False + self.report_include: Optional[List[str]] = None + self.report_omit: Optional[List[str]] = None self.partial_always_list = DEFAULT_PARTIAL_ALWAYS[:] self.partial_list = DEFAULT_PARTIAL[:] self.precision = 0 - self.report_contexts = None + self.report_contexts: Optional[List[str]] = None self.show_missing = False self.skip_covered = False self.skip_empty = False - self.sort = None + self.sort: Optional[str] = None # Defaults for [html] - self.extra_css = None + self.extra_css: Optional[str] = None self.html_dir = "htmlcov" - self.html_skip_covered = None - self.html_skip_empty = None + self.html_skip_covered: Optional[bool] = None + self.html_skip_empty: Optional[bool] = None self.html_title = "Coverage report" self.show_contexts = False @@ -232,10 +246,10 @@ def __init__(self): self.lcov_output = "coverage.lcov" # Defaults for [paths] - self.paths = collections.OrderedDict() + self.paths: Dict[str, List[str]] = {} # Options for plugins - self.plugin_options = {} + self.plugin_options: Dict[str, TConfigSection] = {} MUST_BE_LIST = { "debug", "concurrency", "plugins", @@ -243,7 +257,7 @@ def __init__(self): "run_omit", "run_include", } - def from_args(self, **kwargs): + def from_args(self, **kwargs: TConfigValue) -> None: """Read config values from `kwargs`.""" for k, v in kwargs.items(): if v is not None: @@ -251,8 +265,7 @@ def from_args(self, **kwargs): v = [v] setattr(self, k, v) - @contract(filename=str) - def from_file(self, filename, warn, our_file): + def from_file(self, filename: str, warn: Callable[[str], None], our_file: bool) -> bool: """Read configuration from a .rc file. `filename` is a file name to read. @@ -266,6 +279,7 @@ def from_file(self, filename, warn, our_file): """ _, ext = os.path.splitext(filename) + cp: TConfigParser if ext == '.toml': cp = TomlConfigParser(our_file) else: @@ -298,7 +312,7 @@ def from_file(self, filename, warn, our_file): all_options[section].add(option) for section, options in all_options.items(): - real_section = cp.has_section(section) + real_section = cp.real_section(section) if real_section: for unknown in set(cp.options(section)) - options: warn( @@ -334,7 +348,7 @@ def from_file(self, filename, warn, our_file): return used - def copy(self): + def copy(self) -> CoverageConfig: """Return a copy of the configuration.""" return copy.deepcopy(self) @@ -359,7 +373,6 @@ def copy(self): ('debug', 'run:debug', 'list'), ('disable_warnings', 'run:disable_warnings', 'list'), ('dynamic_context', 'run:dynamic_context'), - ('note', 'run:note'), ('parallel', 'run:parallel', 'boolean'), ('plugins', 'run:plugins', 'list'), ('relative_files', 'run:relative_files', 'boolean'), @@ -374,7 +387,9 @@ def copy(self): # [report] ('exclude_list', 'report:exclude_lines', 'regexlist'), ('fail_under', 'report:fail_under', 'float'), + ('format', 'report:format', 'boolean'), ('ignore_errors', 'report:ignore_errors', 'boolean'), + ('include_namespace_packages', 'report:include_namespace_packages', 'boolean'), ('partial_always_list', 'report:partial_branches_always', 'regexlist'), ('partial_list', 'report:partial_branches', 'regexlist'), ('precision', 'report:precision', 'int'), @@ -407,7 +422,13 @@ def copy(self): ('lcov_output', 'lcov:output'), ] - def _set_attr_from_config_option(self, cp, attr, where, type_=''): + def _set_attr_from_config_option( + self, + cp: TConfigParser, + attr: str, + where: str, + type_: str='', + ) -> bool: """Set an attribute on self if it exists in the ConfigParser. Returns True if the attribute was set. @@ -420,11 +441,11 @@ def _set_attr_from_config_option(self, cp, attr, where, type_=''): return True return False - def get_plugin_options(self, plugin): + def get_plugin_options(self, plugin: str) -> TConfigSection: """Get a dictionary of options for the plugin named `plugin`.""" return self.plugin_options.get(plugin, {}) - def set_option(self, option_name, value): + def set_option(self, option_name: str, value: Union[TConfigValue, TConfigSection]) -> None: """Set an option in the configuration. `option_name` is a colon-separated string indicating the section and @@ -436,7 +457,7 @@ def set_option(self, option_name, value): """ # Special-cased options. if option_name == "paths": - self.paths = value + self.paths = value # type: ignore return # Check all the hard-coded options. @@ -449,13 +470,13 @@ def set_option(self, option_name, value): # See if it's a plugin option. plugin_name, _, key = option_name.partition(":") if key and plugin_name in self.plugins: - self.plugin_options.setdefault(plugin_name, {})[key] = value + self.plugin_options.setdefault(plugin_name, {})[key] = value # type: ignore return # If we get here, we didn't find the option. raise ConfigError(f"No such option: {option_name!r}") - def get_option(self, option_name): + def get_option(self, option_name: str) -> Optional[TConfigValue]: """Get an option from the configuration. `option_name` is a colon-separated string indicating the section and @@ -467,13 +488,13 @@ def get_option(self, option_name): """ # Special-cased options. if option_name == "paths": - return self.paths + return self.paths # type: ignore # Check all the hard-coded options. for option_spec in self.CONFIG_FILE_OPTIONS: attr, where = option_spec[:2] if where == option_name: - return getattr(self, attr) + return getattr(self, attr) # type: ignore # See if it's a plugin option. plugin_name, _, key = option_name.partition(":") @@ -483,28 +504,28 @@ def get_option(self, option_name): # If we get here, we didn't find the option. raise ConfigError(f"No such option: {option_name!r}") - def post_process_file(self, path): + def post_process_file(self, path: str) -> str: """Make final adjustments to a file path to make it usable.""" return os.path.expanduser(path) - def post_process(self): + def post_process(self) -> None: """Make final adjustments to settings to make them usable.""" self.data_file = self.post_process_file(self.data_file) self.html_dir = self.post_process_file(self.html_dir) self.xml_output = self.post_process_file(self.xml_output) - self.paths = collections.OrderedDict( + self.paths = dict( (k, [self.post_process_file(f) for f in v]) for k, v in self.paths.items() ) - def debug_info(self): + def debug_info(self) -> Iterable[Tuple[str, Any]]: """Make a list of (name, value) pairs for writing debug info.""" return human_sorted_items( (k, v) for k, v in self.__dict__.items() if not k.startswith("_") ) -def config_files_to_try(config_file): +def config_files_to_try(config_file: Union[bool, str]) -> List[Tuple[str, bool, bool]]: """What config files should we try to read? Returns a list of tuples: @@ -518,12 +539,14 @@ def config_files_to_try(config_file): specified_file = (config_file is not True) if not specified_file: # No file was specified. Check COVERAGE_RCFILE. - config_file = os.environ.get('COVERAGE_RCFILE') - if config_file: + rcfile = os.environ.get('COVERAGE_RCFILE') + if rcfile: + config_file = rcfile specified_file = True if not specified_file: # Still no file specified. Default to .coveragerc config_file = ".coveragerc" + assert isinstance(config_file, str) files_to_try = [ (config_file, True, specified_file), ("setup.cfg", False, False), @@ -533,7 +556,11 @@ def config_files_to_try(config_file): return files_to_try -def read_coverage_config(config_file, warn, **kwargs): +def read_coverage_config( + config_file: Union[bool, str], + warn: Callable[[str], None], + **kwargs: TConfigValue, +) -> CoverageConfig: """Read the coverage.py configuration. Arguments: diff --git a/coverage/context.py b/coverage/context.py index 6bb1f1ee1..3b8bc10f6 100644 --- a/coverage/context.py +++ b/coverage/context.py @@ -3,8 +3,13 @@ """Determine contexts for coverage.py""" +from types import FrameType +from typing import cast, Callable, Optional, Sequence -def combine_context_switchers(context_switchers): + +def combine_context_switchers( + context_switchers: Sequence[Callable[[FrameType], Optional[str]]], +) -> Optional[Callable[[FrameType], Optional[str]]]: """Create a single context switcher from multiple switchers. `context_switchers` is a list of functions that take a frame as an @@ -23,7 +28,7 @@ def combine_context_switchers(context_switchers): if len(context_switchers) == 1: return context_switchers[0] - def should_start_context(frame): + def should_start_context(frame: FrameType) -> Optional[str]: """The combiner for multiple context switchers.""" for switcher in context_switchers: new_context = switcher(frame) @@ -34,7 +39,7 @@ def should_start_context(frame): return should_start_context -def should_start_context_test_function(frame): +def should_start_context_test_function(frame: FrameType) -> Optional[str]: """Is this frame calling a test_* function?""" co_name = frame.f_code.co_name if co_name.startswith("test") or co_name == "runTest": @@ -42,7 +47,7 @@ def should_start_context_test_function(frame): return None -def qualname_from_frame(frame): +def qualname_from_frame(frame: FrameType) -> Optional[str]: """Get a qualified name for the code running in `frame`.""" co = frame.f_code fname = co.co_name @@ -55,11 +60,11 @@ def qualname_from_frame(frame): func = frame.f_globals.get(fname) if func is None: return None - return func.__module__ + "." + fname + return cast(str, func.__module__ + "." + fname) func = getattr(method, "__func__", None) if func is None: cls = self.__class__ - return cls.__module__ + "." + cls.__name__ + "." + fname + return cast(str, cls.__module__ + "." + cls.__name__ + "." + fname) - return func.__module__ + "." + func.__qualname__ + return cast(str, func.__module__ + "." + func.__qualname__) diff --git a/coverage/control.py b/coverage/control.py index 5e1e54bf3..8ac6781ee 100644 --- a/coverage/control.py +++ b/coverage/control.py @@ -3,6 +3,8 @@ """Core control stuff for coverage.py.""" +from __future__ import annotations + import atexit import collections import contextlib @@ -15,9 +17,15 @@ import time import warnings +from types import FrameType +from typing import ( + cast, + Any, Callable, Dict, Generator, IO, Iterable, List, Optional, Tuple, Union, +) + from coverage import env from coverage.annotate import AnnotateReporter -from coverage.collector import Collector, CTracer +from coverage.collector import Collector, HAS_CTRACER from coverage.config import read_coverage_config from coverage.context import should_start_context_test_function, combine_context_switchers from coverage.data import CoverageData, combine_parallel_data @@ -31,24 +39,23 @@ from coverage.lcovreport import LcovReporter from coverage.misc import bool_or_none, join_regex, human_sorted from coverage.misc import DefaultValue, ensure_dir_for_file, isolate_module +from coverage.multiproc import patch_multiprocessing from coverage.plugin import FileReporter from coverage.plugin_support import Plugins from coverage.python import PythonFileReporter from coverage.report import render_report from coverage.results import Analysis from coverage.summary import SummaryReporter +from coverage.types import ( + TConfigurable, TConfigSection, TConfigValue, TFileDisposition, TLineNo, TMorf, +) from coverage.xmlreport import XmlReporter -try: - from coverage.multiproc import patch_multiprocessing -except ImportError: # pragma: only jython - # Jython has no multiprocessing module. - patch_multiprocessing = None os = isolate_module(os) @contextlib.contextmanager -def override_config(cov, **kwargs): +def override_config(cov: Coverage, **kwargs: TConfigValue) -> Generator[None, None, None]: """Temporarily tweak the configuration of `cov`. The arguments are applied to `cov.config` with the `from_args` method. @@ -66,7 +73,7 @@ def override_config(cov, **kwargs): DEFAULT_DATAFILE = DefaultValue("MISSING") _DEFAULT_DATAFILE = DEFAULT_DATAFILE # Just in case, for backwards compatibility -class Coverage: +class Coverage(TConfigurable): """Programmatic access to coverage.py. To use:: @@ -88,10 +95,10 @@ class Coverage: """ # The stack of started Coverage instances. - _instances = [] + _instances: List[Coverage] = [] @classmethod - def current(cls): + def current(cls) -> Optional[Coverage]: """Get the latest started `Coverage` instance, if any. Returns: a `Coverage` instance, or None. @@ -104,13 +111,25 @@ def current(cls): else: return None - def __init__( - self, data_file=DEFAULT_DATAFILE, data_suffix=None, cover_pylib=None, - auto_data=False, timid=None, branch=None, config_file=True, - source=None, source_pkgs=None, omit=None, include=None, debug=None, - concurrency=None, check_preimported=False, context=None, - messages=False, - ): # pylint: disable=too-many-arguments + def __init__( # pylint: disable=too-many-arguments + self, + data_file: Optional[Union[str, DefaultValue]]=DEFAULT_DATAFILE, + data_suffix: Optional[Union[str, bool]]=None, + cover_pylib: Optional[bool]=None, + auto_data: bool=False, + timid: Optional[bool]=None, + branch: Optional[bool]=None, + config_file: Union[str, bool]=True, + source: Optional[List[str]]=None, + source_pkgs: Optional[List[str]]=None, + omit: Optional[List[str]]=None, + include: Optional[List[str]]=None, + debug: Optional[List[str]]=None, + concurrency: Optional[Union[str, List[str]]]=None, + check_preimported: bool=False, + context: Optional[str]=None, + messages: bool=False, + ) -> None: """ Many of these arguments duplicate and override values that can be provided in a configuration file. Parameters that are missing here @@ -202,13 +221,11 @@ def __init__( # data_file=None means no disk file at all. data_file missing means # use the value from the config file. self._no_disk = data_file is None - if data_file is DEFAULT_DATAFILE: + if isinstance(data_file, DefaultValue): data_file = None - self.config = None - # This is injectable by tests. - self._debug_file = None + self._debug_file: Optional[IO[str]] = None self._auto_load = self._auto_save = auto_data self._data_suffix_specified = data_suffix @@ -217,21 +234,23 @@ def __init__( self._warn_no_data = True self._warn_unimported_source = True self._warn_preimported_source = check_preimported - self._no_warn_slugs = None + self._no_warn_slugs: List[str] = [] self._messages = messages # A record of all the warnings that have been issued. - self._warnings = [] + self._warnings: List[str] = [] # Other instance attributes, set later. - self._data = self._collector = None - self._plugins = None - self._inorout = None + self._debug: DebugControl + self._plugins: Plugins + self._inorout: InOrOut + self._data: CoverageData + self._collector: Collector + self._file_mapper: Callable[[str], str] + self._data_suffix = self._run_suffix = None - self._exclude_re = None - self._debug = None - self._file_mapper = None - self._old_sigterm = None + self._exclude_re: Dict[str, str] = {} + self._old_sigterm: Optional[Callable[[int, Optional[FrameType]], Any]] = None # State machine variables: # Have we initialized everything? @@ -244,12 +263,22 @@ def __init__( # Build our configuration from a number of sources. self.config = read_coverage_config( - config_file=config_file, warn=self._warn, - data_file=data_file, cover_pylib=cover_pylib, timid=timid, - branch=branch, parallel=bool_or_none(data_suffix), - source=source, source_pkgs=source_pkgs, run_omit=omit, run_include=include, debug=debug, - report_omit=omit, report_include=include, - concurrency=concurrency, context=context, + config_file=config_file, + warn=self._warn, + data_file=data_file, + cover_pylib=cover_pylib, + timid=timid, + branch=branch, + parallel=bool_or_none(data_suffix), + source=source, + source_pkgs=source_pkgs, + run_omit=omit, + run_include=include, + debug=debug, + report_omit=omit, + report_include=include, + concurrency=concurrency, + context=context, ) # If we have sub-process measurement happening automatically, then we @@ -260,7 +289,7 @@ def __init__( if not env.METACOV: _prevent_sub_process_measurement() - def _init(self): + def _init(self) -> None: """Set all the initial state. This is called by the public methods to initialize state. This lets us @@ -300,7 +329,7 @@ def _init(self): # this is a bit childish. :) plugin.configure([self, self.config][int(time.time()) % 2]) - def _post_init(self): + def _post_init(self) -> None: """Stuff to do after everything is initialized.""" if self._should_write_debug: self._should_write_debug = False @@ -311,7 +340,7 @@ def _post_init(self): if self.config._crash and self.config._crash in short_stack(limit=4): raise Exception(f"Crashing because called by {self.config._crash}") - def _write_startup_debug(self): + def _write_startup_debug(self) -> None: """Write out debug info at startup if needed.""" wrote_any = False with self._debug.without_callers(): @@ -335,7 +364,7 @@ def _write_startup_debug(self): if wrote_any: write_formatted_info(self._debug.write, "end", ()) - def _should_trace(self, filename, frame): + def _should_trace(self, filename: str, frame: FrameType) -> TFileDisposition: """Decide whether to trace execution in `filename`. Calls `_should_trace_internal`, and returns the FileDisposition. @@ -346,7 +375,7 @@ def _should_trace(self, filename, frame): self._debug.write(disposition_debug_msg(disp)) return disp - def _check_include_omit_etc(self, filename, frame): + def _check_include_omit_etc(self, filename: str, frame: FrameType) -> bool: """Check a file name against the include/omit/etc, rules, verbosely. Returns a boolean: True if the file should be traced, False if not. @@ -362,7 +391,7 @@ def _check_include_omit_etc(self, filename, frame): return not reason - def _warn(self, msg, slug=None, once=False): + def _warn(self, msg: str, slug: Optional[str]=None, once: bool=False) -> None: """Use `msg` as a warning. For warning suppression, use `slug` as the shorthand. @@ -371,31 +400,32 @@ def _warn(self, msg, slug=None, once=False): slug.) """ - if self._no_warn_slugs is None: - if self.config is not None: + if not self._no_warn_slugs: + # _warn() can be called before self.config is set in __init__... + if hasattr(self, "config"): self._no_warn_slugs = list(self.config.disable_warnings) - if self._no_warn_slugs is not None: - if slug in self._no_warn_slugs: - # Don't issue the warning - return + if slug in self._no_warn_slugs: + # Don't issue the warning + return self._warnings.append(msg) if slug: msg = f"{msg} ({slug})" - if self._debug is not None and self._debug.should('pid'): + if hasattr(self, "_debug") and self._debug.should('pid'): msg = f"[{os.getpid()}] {msg}" warnings.warn(msg, category=CoverageWarning, stacklevel=2) if once: + assert slug is not None self._no_warn_slugs.append(slug) - def _message(self, msg): + def _message(self, msg: str) -> None: """Write a message to the user, if configured to do so.""" if self._messages: print(msg) - def get_option(self, option_name): + def get_option(self, option_name: str) -> Optional[TConfigValue]: """Get an option from the configuration. `option_name` is a colon-separated string indicating the section and @@ -406,14 +436,14 @@ def get_option(self, option_name): selected. As a special case, an `option_name` of ``"paths"`` will return an - OrderedDict with the entire ``[paths]`` section value. + dictionary with the entire ``[paths]`` section value. .. versionadded:: 4.0 """ return self.config.get_option(option_name) - def set_option(self, option_name, value): + def set_option(self, option_name: str, value: Union[TConfigValue, TConfigSection]) -> None: """Set an option in the configuration. `option_name` is a colon-separated string indicating the section and @@ -438,17 +468,17 @@ def set_option(self, option_name, value): branch = True As a special case, an `option_name` of ``"paths"`` will replace the - entire ``[paths]`` section. The value should be an OrderedDict. + entire ``[paths]`` section. The value should be a dictionary. .. versionadded:: 4.0 """ self.config.set_option(option_name, value) - def load(self): + def load(self) -> None: """Load previously-collected coverage data from the data file.""" self._init() - if self._collector: + if hasattr(self, "_collector"): self._collector.reset() should_skip = self.config.parallel and not os.path.exists(self.config.data_file) if not should_skip: @@ -457,15 +487,11 @@ def load(self): if not should_skip: self._data.read() - def _init_for_start(self): + def _init_for_start(self) -> None: """Initialization for start()""" # Construct the collector. - concurrency = self.config.concurrency or [] + concurrency: List[str] = self.config.concurrency or [] if "multiprocessing" in concurrency: - if not patch_multiprocessing: - raise ConfigError( # pragma: only jython - "multiprocessing is not supported on this Python" - ) if self.config.config_file is None: raise ConfigError("multiprocessing requires a configuration file") patch_multiprocessing(rcfile=self.config.config_file) @@ -528,10 +554,11 @@ def _init_for_start(self): # Create the file classifying substructure. self._inorout = InOrOut( + config=self.config, warn=self._warn, debug=(self._debug if self._debug.should('trace') else None), + include_namespace_packages=self.config.include_namespace_packages, ) - self._inorout.configure(self.config) self._inorout.plugins = self._plugins self._inorout.disp_class = self._collector.file_disposition_class @@ -546,11 +573,13 @@ def _init_for_start(self): # The Python docs seem to imply that SIGTERM works uniformly even # on Windows, but that's not my experience, and this agrees: # https://stackoverflow.com/questions/35772001/x/35792192#35792192 - self._old_sigterm = signal.signal(signal.SIGTERM, self._on_sigterm) + self._old_sigterm = signal.signal( # type: ignore[assignment] + signal.SIGTERM, self._on_sigterm, + ) - def _init_data(self, suffix): + def _init_data(self, suffix: Optional[Union[str, bool]]) -> None: """Create a data file if we don't have one yet.""" - if self._data is None: + if not hasattr(self, "_data"): # Create the data file. We do this at construction time so that the # data file will be written into the directory where the process # started rather than wherever the process eventually chdir'd to. @@ -563,7 +592,7 @@ def _init_data(self, suffix): no_disk=self._no_disk, ) - def start(self): + def start(self) -> None: """Start measuring code coverage. Coverage measurement only occurs in functions called after @@ -595,7 +624,7 @@ def start(self): self._started = True self._instances.append(self) - def stop(self): + def stop(self) -> None: """Stop measuring code coverage.""" if self._instances: if self._instances[-1] is self: @@ -604,7 +633,7 @@ def stop(self): self._collector.stop() self._started = False - def _atexit(self, event="atexit"): + def _atexit(self, event: str="atexit") -> None: """Clean up on process shutdown.""" if self._debug.should("process"): self._debug.write(f"{event}: pid: {os.getpid()}, instance: {self!r}") @@ -613,7 +642,7 @@ def _atexit(self, event="atexit"): if self._auto_save: self.save() - def _on_sigterm(self, signum_unused, frame_unused): + def _on_sigterm(self, signum_unused: int, frame_unused: Optional[FrameType]) -> None: """A handler for signal.SIGTERM.""" self._atexit("sigterm") # Statements after here won't be seen by metacov because we just wrote @@ -621,7 +650,7 @@ def _on_sigterm(self, signum_unused, frame_unused): signal.signal(signal.SIGTERM, self._old_sigterm) # pragma: not covered os.kill(os.getpid(), signal.SIGTERM) # pragma: not covered - def erase(self): + def erase(self) -> None: """Erase previously collected coverage data. This removes the in-memory data collected in this session as well as @@ -630,14 +659,14 @@ def erase(self): """ self._init() self._post_init() - if self._collector: + if hasattr(self, "_collector"): self._collector.reset() self._init_data(suffix=None) self._data.erase(parallel=self.config.parallel) - self._data = None + del self._data self._inited_for_start = False - def switch_context(self, new_context): + def switch_context(self, new_context: str) -> None: """Switch to a new dynamic context. `new_context` is a string to use as the :ref:`dynamic context @@ -658,13 +687,13 @@ def switch_context(self, new_context): self._collector.switch_context(new_context) - def clear_exclude(self, which='exclude'): + def clear_exclude(self, which: str='exclude') -> None: """Clear the exclude list.""" self._init() setattr(self.config, which + "_list", []) self._exclude_regex_stale() - def exclude(self, regex, which='exclude'): + def exclude(self, regex: str, which: str='exclude') -> None: """Exclude source lines from execution consideration. A number of lists of regular expressions are maintained. Each list @@ -681,36 +710,54 @@ def exclude(self, regex, which='exclude'): """ self._init() excl_list = getattr(self.config, which + "_list") + assert isinstance(regex, str) excl_list.append(regex) self._exclude_regex_stale() - def _exclude_regex_stale(self): + def _exclude_regex_stale(self) -> None: """Drop all the compiled exclusion regexes, a list was modified.""" self._exclude_re.clear() - def _exclude_regex(self, which): - """Return a compiled regex for the given exclusion list.""" + def _exclude_regex(self, which: str) -> str: + """Return a regex string for the given exclusion list.""" if which not in self._exclude_re: excl_list = getattr(self.config, which + "_list") self._exclude_re[which] = join_regex(excl_list) return self._exclude_re[which] - def get_exclude_list(self, which='exclude'): - """Return a list of excluded regex patterns. + def get_exclude_list(self, which: str='exclude') -> List[str]: + """Return a list of excluded regex strings. `which` indicates which list is desired. See :meth:`exclude` for the lists that are available, and their meaning. """ self._init() - return getattr(self.config, which + "_list") + return cast(List[str], getattr(self.config, which + "_list")) - def save(self): + def save(self) -> None: """Save the collected coverage data to the data file.""" data = self.get_data() data.write() - def combine(self, data_paths=None, strict=False, keep=False): + def _make_aliases(self) -> PathAliases: + """Create a PathAliases from our configuration.""" + aliases = PathAliases( + debugfn=(self._debug.write if self._debug.should("pathmap") else None), + relative=self.config.relative_files, + ) + for paths in self.config.paths.values(): + result = paths[0] + for pattern in paths[1:]: + aliases.add(pattern, result) + return aliases + + def combine( + self, + data_paths: Optional[Iterable[str]]=None, + strict: bool=False, + keep: bool=False + ) -> None: """Combine together a number of similarly-named coverage data files. All coverage data files whose name starts with `data_file` (from the @@ -741,27 +788,16 @@ def combine(self, data_paths=None, strict=False, keep=False): self._post_init() self.get_data() - aliases = None - if self.config.paths: - aliases = PathAliases( - debugfn=(self._debug.write if self._debug.should("pathmap") else None), - relative=self.config.relative_files, - ) - for paths in self.config.paths.values(): - result = paths[0] - for pattern in paths[1:]: - aliases.add(pattern, result) - combine_parallel_data( self._data, - aliases=aliases, + aliases=self._make_aliases(), data_paths=data_paths, strict=strict, keep=keep, message=self._message, ) - def get_data(self): + def get_data(self) -> CoverageData: """Get the collected data. Also warn about various problems collecting data. @@ -779,16 +815,16 @@ def get_data(self): if not plugin._coverage_enabled: self._collector.plugin_was_disabled(plugin) - if self._collector and self._collector.flush_data(): + if hasattr(self, "_collector") and self._collector.flush_data(): self._post_save_work() return self._data - def _post_save_work(self): + def _post_save_work(self) -> None: """After saving data, look for warnings, post-work, etc. Warn about things that should have happened but didn't. - Look for unexecuted files. + Look for un-executed files. """ # If there are still entries in the source_pkgs_unmatched list, @@ -801,7 +837,7 @@ def _post_save_work(self): self._warn("No data was collected.", slug="no-data-collected") # Touch all the files that could have executed, so that we can - # mark completely unexecuted files as 0% covered. + # mark completely un-executed files as 0% covered. if self._data is not None: file_paths = collections.defaultdict(list) for file_path, plugin_name in self._inorout.find_possibly_unexecuted_files(): @@ -810,16 +846,16 @@ def _post_save_work(self): for plugin_name, paths in file_paths.items(): self._data.touch_files(paths, plugin_name) - if self.config.note: - self._warn("The '[run] note' setting is no longer supported.") - # Backward compatibility with version 1. - def analysis(self, morf): + def analysis(self, morf: TMorf) -> Tuple[str, List[TLineNo], List[TLineNo], str]: """Like `analysis2` but doesn't return excluded line numbers.""" f, s, _, m, mf = self.analysis2(morf) return f, s, m, mf - def analysis2(self, morf): + def analysis2( + self, + morf: TMorf, + ) -> Tuple[str, List[TLineNo], List[TLineNo], List[TLineNo], str]: """Analyze a module. `morf` is a module or a file name. It will be analyzed to determine @@ -845,7 +881,7 @@ def analysis2(self, morf): analysis.missing_formatted(), ) - def _analyze(self, it): + def _analyze(self, it: Union[FileReporter, TMorf]) -> Analysis: """Analyze a single morf or code unit. Returns an `Analysis` object. @@ -856,15 +892,17 @@ def _analyze(self, it): self._post_init() data = self.get_data() - if not isinstance(it, FileReporter): - it = self._get_file_reporter(it) + if isinstance(it, FileReporter): + fr = it + else: + fr = self._get_file_reporter(it) - return Analysis(data, self.config.precision, it, self._file_mapper) + return Analysis(data, self.config.precision, fr, self._file_mapper) - def _get_file_reporter(self, morf): + def _get_file_reporter(self, morf: TMorf) -> FileReporter: """Get a FileReporter for a module or file name.""" plugin = None - file_reporter = "python" + file_reporter: Union[str, FileReporter] = "python" if isinstance(morf, str): mapped_morf = self._file_mapper(morf) @@ -884,9 +922,10 @@ def _get_file_reporter(self, morf): if file_reporter == "python": file_reporter = PythonFileReporter(morf, self) + assert isinstance(file_reporter, FileReporter) return file_reporter - def _get_file_reporters(self, morfs=None): + def _get_file_reporters(self, morfs: Optional[Iterable[TMorf]]=None) -> List[FileReporter]: """Get a list of FileReporters for a list of modules or file names. For each module or file name in `morfs`, find a FileReporter. Return @@ -902,16 +941,33 @@ def _get_file_reporters(self, morfs=None): # Be sure we have a collection. if not isinstance(morfs, (list, tuple, set)): - morfs = [morfs] + morfs = [morfs] # type: ignore[list-item] file_reporters = [self._get_file_reporter(morf) for morf in morfs] return file_reporters + def _prepare_data_for_reporting(self) -> None: + """Re-map data before reporting, to get implicit 'combine' behavior.""" + if self.config.paths: + mapped_data = CoverageData(warn=self._warn, debug=self._debug, no_disk=True) + mapped_data.update(self._data, aliases=self._make_aliases()) + self._data = mapped_data + def report( - self, morfs=None, show_missing=None, ignore_errors=None, - file=None, omit=None, include=None, skip_covered=None, - contexts=None, skip_empty=None, precision=None, sort=None - ): + self, + morfs: Optional[Iterable[TMorf]]=None, + show_missing: Optional[bool]=None, + ignore_errors: Optional[bool]=None, + file: Optional[IO[str]]=None, + omit: Optional[List[str]]=None, + include: Optional[List[str]]=None, + skip_covered: Optional[bool]=None, + contexts: Optional[List[str]]=None, + skip_empty: Optional[bool]=None, + precision: Optional[int]=None, + sort: Optional[str]=None, + output_format: Optional[str]=None, + ) -> float: """Write a textual summary report to `file`. Each module in `morfs` is listed, with counts of statements, executed @@ -924,6 +980,9 @@ def report( `file` is a file-like object, suitable for writing. + `output_format` determines the format, either "text" (the default), + "markdown", or "total". + `include` is a list of file name patterns. Files that match will be included in the report. Files matching `omit` will not be included in the report. @@ -933,7 +992,7 @@ def report( If `skip_empty` is true, don't report on empty files (those that have no statements). - `contexts` is a list of regular expressions. Only data from + `contexts` is a list of regular expression strings. Only data from :ref:`dynamic contexts ` that match one of those expressions (using :func:`re.search `) will be included in the report. @@ -955,21 +1014,36 @@ def report( .. versionadded:: 5.2 The `precision` parameter. + .. versionadded:: 7.0 + The `format` parameter. + """ + self._prepare_data_for_reporting() with override_config( self, - ignore_errors=ignore_errors, report_omit=omit, report_include=include, - show_missing=show_missing, skip_covered=skip_covered, - report_contexts=contexts, skip_empty=skip_empty, precision=precision, - sort=sort + ignore_errors=ignore_errors, + report_omit=omit, + report_include=include, + show_missing=show_missing, + skip_covered=skip_covered, + report_contexts=contexts, + skip_empty=skip_empty, + precision=precision, + sort=sort, + format=output_format, ): reporter = SummaryReporter(self) return reporter.report(morfs, outfile=file) def annotate( - self, morfs=None, directory=None, ignore_errors=None, - omit=None, include=None, contexts=None, - ): + self, + morfs: Optional[Iterable[TMorf]]=None, + directory: Optional[str]=None, + ignore_errors: Optional[bool]=None, + omit: Optional[List[str]]=None, + include: Optional[List[str]]=None, + contexts: Optional[List[str]]=None, + ) -> None: """Annotate a list of modules. .. note:: @@ -989,19 +1063,32 @@ def annotate( print("The annotate command will be removed in a future version.") print("Get in touch if you still use it: ned@nedbatchelder.com") - with override_config(self, - ignore_errors=ignore_errors, report_omit=omit, - report_include=include, report_contexts=contexts, + self._prepare_data_for_reporting() + with override_config( + self, + ignore_errors=ignore_errors, + report_omit=omit, + report_include=include, + report_contexts=contexts, ): reporter = AnnotateReporter(self) reporter.report(morfs, directory=directory) def html_report( - self, morfs=None, directory=None, ignore_errors=None, - omit=None, include=None, extra_css=None, title=None, - skip_covered=None, show_contexts=None, contexts=None, - skip_empty=None, precision=None, - ): + self, + morfs: Optional[Iterable[TMorf]]=None, + directory: Optional[str]=None, + ignore_errors: Optional[bool]=None, + omit: Optional[List[str]]=None, + include: Optional[List[str]]=None, + extra_css: Optional[str]=None, + title: Optional[str]=None, + skip_covered: Optional[bool]=None, + show_contexts: Optional[bool]=None, + contexts: Optional[List[str]]=None, + skip_empty: Optional[bool]=None, + precision: Optional[int]=None, + ) -> float: """Generate an HTML report. The HTML is written to `directory`. The file "index.html" is the @@ -1026,20 +1113,35 @@ def html_report( changing the files in the report folder. """ - with override_config(self, - ignore_errors=ignore_errors, report_omit=omit, report_include=include, - html_dir=directory, extra_css=extra_css, html_title=title, - html_skip_covered=skip_covered, show_contexts=show_contexts, report_contexts=contexts, - html_skip_empty=skip_empty, precision=precision, + self._prepare_data_for_reporting() + with override_config( + self, + ignore_errors=ignore_errors, + report_omit=omit, + report_include=include, + html_dir=directory, + extra_css=extra_css, + html_title=title, + html_skip_covered=skip_covered, + show_contexts=show_contexts, + report_contexts=contexts, + html_skip_empty=skip_empty, + precision=precision, ): reporter = HtmlReporter(self) ret = reporter.report(morfs) return ret def xml_report( - self, morfs=None, outfile=None, ignore_errors=None, - omit=None, include=None, contexts=None, skip_empty=None, - ): + self, + morfs: Optional[Iterable[TMorf]]=None, + outfile: Optional[str]=None, + ignore_errors: Optional[bool]=None, + omit: Optional[List[str]]=None, + include: Optional[List[str]]=None, + contexts: Optional[List[str]]=None, + skip_empty: Optional[bool]=None, + ) -> float: """Generate an XML report of coverage results. The report is compatible with Cobertura reports. @@ -1052,22 +1154,36 @@ def xml_report( Returns a float, the total percentage covered. """ - with override_config(self, - ignore_errors=ignore_errors, report_omit=omit, report_include=include, - xml_output=outfile, report_contexts=contexts, skip_empty=skip_empty, + self._prepare_data_for_reporting() + with override_config( + self, + ignore_errors=ignore_errors, + report_omit=omit, + report_include=include, + xml_output=outfile, + report_contexts=contexts, + skip_empty=skip_empty, ): return render_report(self.config.xml_output, XmlReporter(self), morfs, self._message) def json_report( - self, morfs=None, outfile=None, ignore_errors=None, - omit=None, include=None, contexts=None, pretty_print=None, - show_contexts=None - ): + self, + morfs: Optional[Iterable[TMorf]]=None, + outfile: Optional[str]=None, + ignore_errors: Optional[bool]=None, + omit: Optional[List[str]]=None, + include: Optional[List[str]]=None, + contexts: Optional[List[str]]=None, + pretty_print: Optional[bool]=None, + show_contexts: Optional[bool]=None, + ) -> float: """Generate a JSON report of coverage results. Each module in `morfs` is included in the report. `outfile` is the path to write the file to, "-" will write to stdout. + `pretty_print` is a boolean, whether to pretty-print the JSON output or not. + See :meth:`report` for other arguments. Returns a float, the total percentage covered. @@ -1075,17 +1191,28 @@ def json_report( .. versionadded:: 5.0 """ - with override_config(self, - ignore_errors=ignore_errors, report_omit=omit, report_include=include, - json_output=outfile, report_contexts=contexts, json_pretty_print=pretty_print, - json_show_contexts=show_contexts + self._prepare_data_for_reporting() + with override_config( + self, + ignore_errors=ignore_errors, + report_omit=omit, + report_include=include, + json_output=outfile, + report_contexts=contexts, + json_pretty_print=pretty_print, + json_show_contexts=show_contexts, ): return render_report(self.config.json_output, JsonReporter(self), morfs, self._message) def lcov_report( - self, morfs=None, outfile=None, ignore_errors=None, - omit=None, include=None, contexts=None, - ): + self, + morfs: Optional[Iterable[TMorf]]=None, + outfile: Optional[str]=None, + ignore_errors: Optional[bool]=None, + omit: Optional[List[str]]=None, + include: Optional[List[str]]=None, + contexts: Optional[List[str]]=None, + ) -> float: """Generate an LCOV report of coverage results. Each module in 'morfs' is included in the report. 'outfile' is the @@ -1095,13 +1222,18 @@ def lcov_report( .. versionadded:: 6.3 """ - with override_config(self, - ignore_errors=ignore_errors, report_omit=omit, report_include=include, - lcov_output=outfile, report_contexts=contexts, + self._prepare_data_for_reporting() + with override_config( + self, + ignore_errors=ignore_errors, + report_omit=omit, + report_include=include, + lcov_output=outfile, + report_contexts=contexts, ): return render_report(self.config.lcov_output, LcovReporter(self), morfs, self._message) - def sys_info(self): + def sys_info(self) -> Iterable[Tuple[str, Any]]: """Return a list of (key, value) pairs showing internal information.""" import coverage as covmod @@ -1109,7 +1241,7 @@ def sys_info(self): self._init() self._post_init() - def plugin_info(plugins): + def plugin_info(plugins: List[Any]) -> List[str]: """Make an entry for the sys_info from a list of plug-ins.""" entries = [] for plugin in plugins: @@ -1122,8 +1254,8 @@ def plugin_info(plugins): info = [ ('coverage_version', covmod.__version__), ('coverage_module', covmod.__file__), - ('tracer', self._collector.tracer_name() if self._collector else "-none-"), - ('CTracer', 'available' if CTracer else "unavailable"), + ('tracer', self._collector.tracer_name() if hasattr(self, "_collector") else "-none-"), + ('CTracer', 'available' if HAS_CTRACER else "unavailable"), ('plugins.file_tracers', plugin_info(self._plugins.file_tracers)), ('plugins.configurers', plugin_info(self._plugins.configurers)), ('plugins.context_switchers', plugin_info(self._plugins.context_switchers)), @@ -1131,11 +1263,9 @@ def plugin_info(plugins): ('configs_read', self.config.config_files_read), ('config_file', self.config.config_file), ('config_contents', - repr(self.config._config_contents) - if self.config._config_contents - else '-none-' + repr(self.config._config_contents) if self.config._config_contents else '-none-' ), - ('data_file', self._data.data_filename() if self._data is not None else "-none-"), + ('data_file', self._data.data_filename() if hasattr(self, "_data") else "-none-"), ('python', sys.version.replace('\n', '')), ('platform', platform.platform()), ('implementation', platform.python_implementation()), @@ -1156,7 +1286,7 @@ def plugin_info(plugins): ('command_line', " ".join(getattr(sys, 'argv', ['-none-']))), ] - if self._inorout: + if hasattr(self, "_inorout"): info.extend(self._inorout.sys_info()) info.extend(CoverageData.sys_info()) @@ -1169,10 +1299,13 @@ def plugin_info(plugins): if int(os.environ.get("COVERAGE_DEBUG_CALLS", 0)): # pragma: debugging from coverage.debug import decorate_methods, show_calls - Coverage = decorate_methods(show_calls(show_args=True), butnot=['get_data'])(Coverage) + Coverage = decorate_methods( # type: ignore[misc] + show_calls(show_args=True), + butnot=['get_data'] + )(Coverage) -def process_startup(): +def process_startup() -> Optional[Coverage]: """Call this at Python start-up to perhaps measure coverage. If the environment variable COVERAGE_PROCESS_START is defined, coverage @@ -1215,7 +1348,7 @@ def process_startup(): return None cov = Coverage(config_file=cps) - process_startup.coverage = cov + process_startup.coverage = cov # type: ignore[attr-defined] cov._warn_no_data = False cov._warn_unimported_source = False cov._warn_preimported_source = False @@ -1225,7 +1358,7 @@ def process_startup(): return cov -def _prevent_sub_process_measurement(): +def _prevent_sub_process_measurement() -> None: """Stop any subprocess auto-measurement from writing data.""" auto_created_coverage = getattr(process_startup, "coverage", None) if auto_created_coverage is not None: diff --git a/coverage/data.py b/coverage/data.py index 4bdfe3010..baddadddf 100644 --- a/coverage/data.py +++ b/coverage/data.py @@ -11,14 +11,18 @@ """ import glob +import hashlib import os.path +from typing import Callable, Dict, Iterable, List, Optional + from coverage.exceptions import CoverageException, NoDataError -from coverage.misc import file_be_gone, human_sorted, plural +from coverage.files import PathAliases +from coverage.misc import Hasher, file_be_gone, human_sorted, plural from coverage.sqldata import CoverageData -def line_counts(data, fullpath=False): +def line_counts(data: CoverageData, fullpath: bool=False) -> Dict[str, int]: """Return a dict summarizing the line coverage data. Keys are based on the file names, and values are the number of executed @@ -35,11 +39,13 @@ def line_counts(data, fullpath=False): else: filename_fn = os.path.basename for filename in data.measured_files(): - summ[filename_fn(filename)] = len(data.lines(filename)) + lines = data.lines(filename) + assert lines is not None + summ[filename_fn(filename)] = len(lines) return summ -def add_data_to_hash(data, filename, hasher): +def add_data_to_hash(data: CoverageData, filename: str, hasher: Hasher) -> None: """Contribute `filename`'s data to the `hasher`. `hasher` is a `coverage.misc.Hasher` instance to be updated with @@ -50,11 +56,11 @@ def add_data_to_hash(data, filename, hasher): if data.has_arcs(): hasher.update(sorted(data.arcs(filename) or [])) else: - hasher.update(sorted(data.lines(filename) or [])) + hasher.update(sorted_lines(data, filename)) hasher.update(data.file_tracer(filename)) -def combinable_files(data_file, data_paths=None): +def combinable_files(data_file: str, data_paths: Optional[Iterable[str]]=None) -> List[str]: """Make a list of data files to be combined. `data_file` is a path to a data file. `data_paths` is a list of files or @@ -78,8 +84,13 @@ def combinable_files(data_file, data_paths=None): def combine_parallel_data( - data, aliases=None, data_paths=None, strict=False, keep=False, message=None, -): + data: CoverageData, + aliases: Optional[PathAliases]=None, + data_paths: Optional[Iterable[str]]=None, + strict: bool=False, + keep: bool=False, + message: Optional[Callable[[str], None]]=None, +) -> None: """Combine a number of data files together. `data` is a CoverageData. @@ -97,20 +108,24 @@ def combine_parallel_data( If `data_paths` is not provided, then the directory portion of `data.filename` is used as the directory to search for data files. - Unless `keep` is True every data file found and combined is then deleted from disk. If a file - cannot be read, a warning will be issued, and the file will not be - deleted. + Unless `keep` is True every data file found and combined is then deleted + from disk. If a file cannot be read, a warning will be issued, and the + file will not be deleted. If `strict` is true, and no files are found to combine, an error is raised. + `message` is a function to use for printing messages to the user. + """ files_to_combine = combinable_files(data.base_filename(), data_paths) if strict and not files_to_combine: raise NoDataError("No data to combine") - files_combined = 0 + file_hashes = set() + combined_any = False + for f in files_to_combine: if f == data.data_filename(): # Sometimes we are combining into a file which is one of the @@ -118,38 +133,56 @@ def combine_parallel_data( if data._debug.should('dataio'): data._debug.write(f"Skipping combining ourself: {f!r}") continue - if data._debug.should('dataio'): - data._debug.write(f"Combining data file {f!r}") + try: - new_data = CoverageData(f, debug=data._debug) - new_data.read() - except CoverageException as exc: - if data._warn: - # The CoverageException has the file name in it, so just - # use the message as the warning. - data._warn(str(exc)) + rel_file_name = os.path.relpath(f) + except ValueError: + # ValueError can be raised under Windows when os.getcwd() returns a + # folder from a different drive than the drive of f, in which case + # we print the original value of f instead of its relative path + rel_file_name = f + + with open(f, "rb") as fobj: + hasher = hashlib.new("sha3_256") + hasher.update(fobj.read()) + sha = hasher.digest() + combine_this_one = sha not in file_hashes + + delete_this_one = not keep + if combine_this_one: + if data._debug.should('dataio'): + data._debug.write(f"Combining data file {f!r}") + file_hashes.add(sha) + try: + new_data = CoverageData(f, debug=data._debug) + new_data.read() + except CoverageException as exc: + if data._warn: + # The CoverageException has the file name in it, so just + # use the message as the warning. + data._warn(str(exc)) + if message: + message(f"Couldn't combine data file {rel_file_name}: {exc}") + delete_this_one = False + else: + data.update(new_data, aliases=aliases) + combined_any = True + if message: + message(f"Combined data file {rel_file_name}") else: - data.update(new_data, aliases=aliases) - files_combined += 1 if message: - try: - file_name = os.path.relpath(f) - except ValueError: - # ValueError can be raised under Windows when os.getcwd() returns a - # folder from a different drive than the drive of f, in which case - # we print the original value of f instead of its relative path - file_name = f - message(f"Combined data file {file_name}") - if not keep: - if data._debug.should('dataio'): - data._debug.write(f"Deleting combined data file {f!r}") - file_be_gone(f) - - if strict and not files_combined: + message(f"Skipping duplicate data {rel_file_name}") + + if delete_this_one: + if data._debug.should('dataio'): + data._debug.write(f"Deleting data file {f!r}") + file_be_gone(f) + + if strict and not combined_any: raise NoDataError("No usable data files") -def debug_data_file(filename): +def debug_data_file(filename: str) -> None: """Implementation of 'coverage debug data'.""" data = CoverageData(filename) filename = data.data_filename() @@ -169,3 +202,9 @@ def debug_data_file(filename): if plugin: line += f" [{plugin}]" print(line) + + +def sorted_lines(data: CoverageData, filename: str) -> List[int]: + """Get the sorted lines for a file, for tests.""" + lines = data.lines(filename) + return sorted(lines or []) diff --git a/coverage/debug.py b/coverage/debug.py index 4286bc501..82de3c298 100644 --- a/coverage/debug.py +++ b/coverage/debug.py @@ -3,6 +3,8 @@ """Control of and utilities for debugging.""" +from __future__ import annotations + import contextlib import functools import inspect @@ -15,6 +17,11 @@ import types import _thread +from typing import ( + Any, Callable, Generator, IO, Iterable, Iterator, Optional, List, Tuple, + cast, +) + from coverage.misc import isolate_module os = isolate_module(os) @@ -23,16 +30,16 @@ # When debugging, it can be helpful to force some options, especially when # debugging the configuration mechanisms you usually use to control debugging! # This is a list of forced debugging options. -FORCED_DEBUG = [] +FORCED_DEBUG: List[str] = [] FORCED_DEBUG_FILE = None class DebugControl: """Control and output for debugging.""" - show_repr_attr = False # For SimpleReprMixin + show_repr_attr = False # For AutoReprMixin - def __init__(self, options, output): + def __init__(self, options: Iterable[str], output: Optional[IO[str]]) -> None: """Configure the options and output file for debugging.""" self.options = list(options) + FORCED_DEBUG self.suppress_callers = False @@ -47,17 +54,17 @@ def __init__(self, options, output): ) self.raw_output = self.output.outfile - def __repr__(self): + def __repr__(self) -> str: return f"" - def should(self, option): + def should(self, option: str) -> bool: """Decide whether to output debug information in category `option`.""" if option == "callers" and self.suppress_callers: return False return (option in self.options) @contextlib.contextmanager - def without_callers(self): + def without_callers(self) -> Generator[None, None, None]: """A context manager to prevent call stacks from being logged.""" old = self.suppress_callers self.suppress_callers = True @@ -66,7 +73,7 @@ def without_callers(self): finally: self.suppress_callers = old - def write(self, msg): + def write(self, msg: str) -> None: """Write a line of debug output. `msg` is the line to write. A newline will be appended. @@ -84,27 +91,31 @@ def write(self, msg): class DebugControlString(DebugControl): """A `DebugControl` that writes to a StringIO, for testing.""" - def __init__(self, options): + def __init__(self, options: Iterable[str]) -> None: super().__init__(options, io.StringIO()) - def get_output(self): + def get_output(self) -> str: """Get the output text from the `DebugControl`.""" - return self.raw_output.getvalue() + return cast(str, self.raw_output.getvalue()) class NoDebugging: """A replacement for DebugControl that will never try to do anything.""" - def should(self, option): # pylint: disable=unused-argument + def should(self, option: str) -> bool: # pylint: disable=unused-argument """Should we write debug messages? Never.""" return False + def write(self, msg: str) -> None: + """This will never be called.""" + raise AssertionError("NoDebugging.write should never be called.") + -def info_header(label): +def info_header(label: str) -> str: """Make a nice header string.""" return "--{:-<60s}".format(" "+label+" ") -def info_formatter(info): +def info_formatter(info: Iterable[Tuple[str, Any]]) -> Iterator[str]: """Produce a sequence of formatted lines from info. `info` is a sequence of pairs (label, data). The produced lines are @@ -131,7 +142,11 @@ def info_formatter(info): yield "%*s: %s" % (label_len, label, data) -def write_formatted_info(write, header, info): +def write_formatted_info( + write: Callable[[str], None], + header: str, + info: Iterable[Tuple[str, Any]], +) -> None: """Write a sequence of (label,data) pairs nicely. `write` is a function write(str) that accepts each line of output. @@ -145,7 +160,7 @@ def write_formatted_info(write, header, info): write(f" {line}") -def short_stack(limit=None, skip=0): +def short_stack(limit: Optional[int]=None, skip: int=0) -> str: """Return a string summarizing the call stack. The string is multi-line, with one line per stack frame. Each line shows @@ -167,21 +182,25 @@ def short_stack(limit=None, skip=0): return "\n".join("%30s : %s:%d" % (t[3], t[1], t[2]) for t in stack) -def dump_stack_frames(limit=None, out=None, skip=0): +def dump_stack_frames( + limit: Optional[int]=None, + out: Optional[IO[str]]=None, + skip: int=0 +) -> None: """Print a summary of the stack to stdout, or someplace else.""" out = out or sys.stdout out.write(short_stack(limit=limit, skip=skip+1)) out.write("\n") -def clipped_repr(text, numchars=50): +def clipped_repr(text: str, numchars: int=50) -> str: """`repr(text)`, but limited to `numchars`.""" r = reprlib.Repr() r.maxstring = numchars return r.repr(text) -def short_id(id64): +def short_id(id64: int) -> int: """Given a 64-bit id, make a shorter 16-bit one.""" id16 = 0 for offset in range(0, 64, 16): @@ -189,7 +208,7 @@ def short_id(id64): return id16 & 0xFFFF -def add_pid_and_tid(text): +def add_pid_and_tid(text: str) -> str: """A filter to add pid and tid to debug messages.""" # Thread ids are useful, but too long. Make a shorter one. tid = f"{short_id(_thread.get_ident()):04x}" @@ -197,16 +216,16 @@ def add_pid_and_tid(text): return text -class SimpleReprMixin: - """A mixin implementing a simple __repr__.""" - simple_repr_ignore = ['simple_repr_ignore', '$coverage.object_id'] +class AutoReprMixin: + """A mixin implementing an automatic __repr__ for debugging.""" + auto_repr_ignore = ['auto_repr_ignore', '$coverage.object_id'] - def __repr__(self): + def __repr__(self) -> str: show_attrs = ( (k, v) for k, v in self.__dict__.items() if getattr(v, "show_repr_attr", True) and not callable(v) - and k not in self.simple_repr_ignore + and k not in self.auto_repr_ignore ) return "<{klass} @0x{id:x} {attrs}>".format( klass=self.__class__.__name__, @@ -215,7 +234,7 @@ def __repr__(self): ) -def simplify(v): # pragma: debugging +def simplify(v: Any) -> Any: # pragma: debugging """Turn things which are nearly dict/list/etc into dict/list/etc.""" if isinstance(v, dict): return {k:simplify(vv) for k, vv in v.items()} @@ -227,13 +246,13 @@ def simplify(v): # pragma: debugging return v -def pp(v): # pragma: debugging +def pp(v: Any) -> None: # pragma: debugging """Debug helper to pretty-print data, including SimpleNamespace objects.""" # Might not be needed in 3.9+ pprint.pprint(simplify(v)) -def filter_text(text, filters): +def filter_text(text: str, filters: Iterable[Callable[[str], str]]) -> str: """Run `text` through a series of filters. `filters` is a list of functions. Each takes a string and returns a @@ -256,10 +275,10 @@ def filter_text(text, filters): class CwdTracker: # pragma: debugging """A class to add cwd info to debug messages.""" - def __init__(self): - self.cwd = None + def __init__(self) -> None: + self.cwd: Optional[str] = None - def filter(self, text): + def filter(self, text: str) -> str: """Add a cwd message for each new cwd.""" cwd = os.getcwd() if cwd != self.cwd: @@ -270,7 +289,12 @@ def filter(self, text): class DebugOutputFile: # pragma: debugging """A file-like object that includes pid and cwd information.""" - def __init__(self, outfile, show_process, filters): + def __init__( + self, + outfile: Optional[IO[str]], + show_process: bool, + filters: Iterable[Callable[[str], str]], + ): self.outfile = outfile self.show_process = show_process self.filters = list(filters) @@ -286,7 +310,13 @@ def __init__(self, outfile, show_process, filters): SINGLETON_ATTR = 'the_one_and_is_interim' @classmethod - def get_one(cls, fileobj=None, show_process=True, filters=(), interim=False): + def get_one( + cls, + fileobj: Optional[IO[str]]=None, + show_process: bool=True, + filters: Iterable[Callable[[str], str]]=(), + interim: bool=False, + ) -> DebugOutputFile: """Get a DebugOutputFile. If `fileobj` is provided, then a new DebugOutputFile is made with it. @@ -329,13 +359,15 @@ def get_one(cls, fileobj=None, show_process=True, filters=(), interim=False): sys.modules[cls.SYS_MOD_NAME] = singleton_module return the_one - def write(self, text): + def write(self, text: str) -> None: """Just like file.write, but filter through all our filters.""" + assert self.outfile is not None self.outfile.write(filter_text(text, self.filters)) self.outfile.flush() - def flush(self): + def flush(self) -> None: """Flush our file.""" + assert self.outfile is not None self.outfile.flush() @@ -378,7 +410,11 @@ def _wrapper(*args, **kwargs): CALLS = itertools.count() OBJ_ID_ATTR = "$coverage.object_id" -def show_calls(show_args=True, show_stack=False, show_return=False): # pragma: debugging +def show_calls( + show_args: bool=True, + show_stack: bool=False, + show_return: bool=False, +) -> Callable[..., Any]: # pragma: debugging """A method decorator to debug-log each call to the function.""" def _decorator(func): @functools.wraps(func) @@ -412,7 +448,7 @@ def _wrapper(self, *args, **kwargs): return _decorator -def _clean_stack_line(s): # pragma: debugging +def _clean_stack_line(s: str) -> str: # pragma: debugging """Simplify some paths in a stack trace, for compactness.""" s = s.strip() s = s.replace(os.path.dirname(__file__) + '/', '') diff --git a/coverage/disposition.py b/coverage/disposition.py index 34819f428..3cc6c8d68 100644 --- a/coverage/disposition.py +++ b/coverage/disposition.py @@ -3,11 +3,28 @@ """Simple value objects for tracking what to do with files.""" +from __future__ import annotations + +from typing import Optional, Type, TYPE_CHECKING + +from coverage.types import TFileDisposition + +if TYPE_CHECKING: + from coverage.plugin import FileTracer + class FileDisposition: """A simple value type for recording what to do with a file.""" - def __repr__(self): + original_filename: str + canonical_filename: str + source_filename: Optional[str] + trace: bool + reason: str + file_tracer: Optional[FileTracer] + has_dynamic_filename: bool + + def __repr__(self) -> str: return f"" @@ -15,7 +32,7 @@ def __repr__(self): # be implemented in either C or Python. Acting on them is done with these # functions. -def disposition_init(cls, original_filename): +def disposition_init(cls: Type[TFileDisposition], original_filename: str) -> TFileDisposition: """Construct and initialize a new FileDisposition object.""" disp = cls() disp.original_filename = original_filename @@ -28,7 +45,7 @@ def disposition_init(cls, original_filename): return disp -def disposition_debug_msg(disp): +def disposition_debug_msg(disp: TFileDisposition) -> str: """Make a nice debug message of what the FileDisposition is doing.""" if disp.trace: msg = f"Tracing {disp.original_filename!r}" diff --git a/coverage/env.py b/coverage/env.py index 13411699a..c6c1ed13e 100644 --- a/coverage/env.py +++ b/coverage/env.py @@ -7,6 +7,15 @@ import platform import sys +from typing import Any, Iterable, Tuple + +# debug_info() at the bottom wants to show all the globals, but not imports. +# Grab the global names here to know which names to not show. Nothing defined +# above this line will be in the output. +_UNINTERESTING_GLOBALS = list(globals()) +# These names also shouldn't be shown. +_UNINTERESTING_GLOBALS += ["PYBEHAVIOR", "debug_info"] + # Operating systems. WINDOWS = sys.platform == "win32" LINUX = sys.platform.startswith("linux") @@ -15,15 +24,13 @@ # Python implementations. CPYTHON = (platform.python_implementation() == "CPython") PYPY = (platform.python_implementation() == "PyPy") -JYTHON = (platform.python_implementation() == "Jython") -IRONPYTHON = (platform.python_implementation() == "IronPython") # Python versions. We amend version_info with one more value, a zero if an # official version, or 1 if built from source beyond an official version. PYVERSION = sys.version_info + (int(platform.python_version()[-1] == "+"),) if PYPY: - PYPYVERSION = sys.pypy_version_info + PYPYVERSION = sys.pypy_version_info # type: ignore[attr-defined] # Python behavior. class PYBEHAVIOR: @@ -67,11 +74,16 @@ class PYBEHAVIOR: # does the finally jump back to the break/continue/return (3.8) to do the # work? finally_jumps_back = ((3, 8) <= PYVERSION < (3, 10)) + if PYPY and PYPYVERSION < (7, 3, 7): + finally_jumps_back = False # When a function is decorated, does the trace function get called for the # @-line and also the def-line (new behavior in 3.8)? Or just the @-line # (old behavior)? - trace_decorated_def = (CPYTHON and PYVERSION >= (3, 8)) or (PYPY and PYVERSION >= (3, 9)) + trace_decorated_def = ( + (PYVERSION >= (3, 8)) and + (CPYTHON or (PYVERSION > (3, 8)) or (PYPYVERSION > (7, 3, 9))) + ) # Functions are no longer claimed to start at their earliest decorator even though # the decorators are traced? @@ -85,7 +97,10 @@ class PYBEHAVIOR: nix_while_true = (PYVERSION >= (3, 8)) # CPython 3.9a1 made sys.argv[0] and other reported files absolute paths. - report_absolute_files = ((CPYTHON or (PYPYVERSION >= (7, 3, 10))) and PYVERSION >= (3, 9)) + report_absolute_files = ( + (CPYTHON or (PYPY and PYPYVERSION >= (7, 3, 10))) + and PYVERSION >= (3, 9) + ) # Lines after break/continue/return/raise are no longer compiled into the # bytecode. They used to be marked as missing, now they aren't executable. @@ -124,25 +139,15 @@ class PYBEHAVIOR: # Are we running our test suite? # Even when running tests, you can use COVERAGE_TESTING=0 to disable the -# test-specific behavior like contracts. +# test-specific behavior like AST checking. TESTING = os.getenv('COVERAGE_TESTING', '') == 'True' -# Environment COVERAGE_NO_CONTRACTS=1 can turn off contracts while debugging -# tests to remove noise from stack traces. -# $set_env.py: COVERAGE_NO_CONTRACTS - Disable PyContracts to simplify stack traces. -USE_CONTRACTS = ( - TESTING - and not bool(int(os.environ.get("COVERAGE_NO_CONTRACTS", 0))) - and (PYVERSION < (3, 11)) -) -def debug_info(): +def debug_info() -> Iterable[Tuple[str, Any]]: """Return a list of (name, value) pairs for printing debug information.""" info = [ (name, value) for name, value in globals().items() - if not name.startswith("_") and - name not in {"PYBEHAVIOR", "debug_info"} and - not isinstance(value, type(os)) + if not name.startswith("_") and name not in _UNINTERESTING_GLOBALS ] info += [ (name, value) for name, value in PYBEHAVIOR.__dict__.items() diff --git a/coverage/exceptions.py b/coverage/exceptions.py index c6a7f3da0..43dc00477 100644 --- a/coverage/exceptions.py +++ b/coverage/exceptions.py @@ -57,16 +57,6 @@ class _ExceptionDuringRun(CoverageException): pass -class _StopEverything(_BaseCoverageException): - """An exception that means everything should stop. - - The CoverageTest class converts these to SkipTest, so that when running - tests, raising this exception will automatically skip the test. - - """ - pass - - class CoverageWarning(Warning): """A warning from Coverage.py.""" pass diff --git a/coverage/execfile.py b/coverage/execfile.py index b5d3a65fd..93dffcd11 100644 --- a/coverage/execfile.py +++ b/coverage/execfile.py @@ -16,7 +16,6 @@ from coverage.exceptions import CoverageException, _ExceptionDuringRun, NoCode, NoSource from coverage.files import canonical_filename, python_reported_file from coverage.misc import isolate_module -from coverage.phystokens import compile_unicode from coverage.python import get_python_source os = isolate_module(os) @@ -274,8 +273,7 @@ def make_code_from_py(filename): except (OSError, NoSource) as exc: raise NoSource(f"No file to run: '{filename}'") from exc - code = compile_unicode(source, filename, "exec") - return code + return compile(source, filename, "exec") def make_code_from_pyc(filename): diff --git a/coverage/files.py b/coverage/files.py index 4d0c1a2b1..2aca85ed4 100644 --- a/coverage/files.py +++ b/coverage/files.py @@ -3,7 +3,8 @@ """File wrangling.""" -import fnmatch +from __future__ import annotations + import hashlib import ntpath import os @@ -12,15 +13,20 @@ import re import sys +from typing import Callable, Dict, Iterable, List, Optional, Tuple + from coverage import env from coverage.exceptions import ConfigError -from coverage.misc import contract, human_sorted, isolate_module, join_regex +from coverage.misc import human_sorted, isolate_module, join_regex os = isolate_module(os) -def set_relative_directory(): +RELATIVE_DIR: str = "" +CANONICAL_FILENAME_CACHE: Dict[str, str] = {} + +def set_relative_directory() -> None: """Set the directory that `relative_filename` will be relative to.""" global RELATIVE_DIR, CANONICAL_FILENAME_CACHE @@ -38,13 +44,12 @@ def set_relative_directory(): CANONICAL_FILENAME_CACHE = {} -def relative_directory(): +def relative_directory() -> str: """Return the directory that `relative_filename` is relative to.""" return RELATIVE_DIR -@contract(returns='unicode') -def relative_filename(filename): +def relative_filename(filename: str) -> str: """Return the relative form of `filename`. The file name will be relative to the current directory when the @@ -57,8 +62,7 @@ def relative_filename(filename): return filename -@contract(returns='unicode') -def canonical_filename(filename): +def canonical_filename(filename: str) -> str: """Return a canonical file name for `filename`. An absolute path with no redundant components and normalized case. @@ -69,7 +73,7 @@ def canonical_filename(filename): if not os.path.isabs(filename): for path in [os.curdir] + sys.path: if path is None: - continue + continue # type: ignore f = os.path.join(path, filename) try: exists = os.path.exists(f) @@ -85,8 +89,7 @@ def canonical_filename(filename): MAX_FLAT = 100 -@contract(filename='unicode', returns='unicode') -def flat_rootname(filename): +def flat_rootname(filename: str) -> str: """A base for a flat file name to correspond to this file. Useful for writing files about the code where you want all the files in @@ -107,10 +110,10 @@ def flat_rootname(filename): if env.WINDOWS: - _ACTUAL_PATH_CACHE = {} - _ACTUAL_PATH_LIST_CACHE = {} + _ACTUAL_PATH_CACHE: Dict[str, str] = {} + _ACTUAL_PATH_LIST_CACHE: Dict[str, List[str]] = {} - def actual_path(path): + def actual_path(path: str) -> str: """Get the actual path of `path`, including the correct case.""" if path in _ACTUAL_PATH_CACHE: return _ACTUAL_PATH_CACHE[path] @@ -143,36 +146,59 @@ def actual_path(path): return actpath else: - def actual_path(path): + def actual_path(path: str) -> str: """The actual path for non-Windows platforms.""" return path -@contract(returns='unicode') -def abs_file(path): +def abs_file(path: str) -> str: """Return the absolute normalized form of `path`.""" return actual_path(os.path.abspath(os.path.realpath(path))) -def python_reported_file(filename): +def zip_location(filename: str) -> Optional[Tuple[str, str]]: + """Split a filename into a zipfile / inner name pair. + + Only return a pair if the zipfile exists. No check is made if the inner + name is in the zipfile. + + """ + for ext in ['.zip', '.whl', '.egg', '.pex']: + zipbase, extension, inner = filename.partition(ext + sep(filename)) + if extension: + zipfile = zipbase + ext + if os.path.exists(zipfile): + return zipfile, inner + return None + + +def source_exists(path: str) -> bool: + """Determine if a source file path exists.""" + if os.path.exists(path): + return True + + if zip_location(path): + # If zip_location returns anything, then it's a zipfile that + # exists. That's good enough for us. + return True + + return False + + +def python_reported_file(filename: str) -> str: """Return the string as Python would describe this file name.""" if env.PYBEHAVIOR.report_absolute_files: filename = os.path.abspath(filename) return filename -RELATIVE_DIR = None -CANONICAL_FILENAME_CACHE = None -set_relative_directory() - - -def isabs_anywhere(filename): +def isabs_anywhere(filename: str) -> bool: """Is `filename` an absolute path on any OS?""" return ntpath.isabs(filename) or posixpath.isabs(filename) -def prep_patterns(patterns): - """Prepare the file patterns for use in a `FnmatchMatcher`. +def prep_patterns(patterns: Iterable[str]) -> List[str]: + """Prepare the file patterns for use in a `GlobMatcher`. If a pattern starts with a wildcard, it is used as a pattern as-is. If it does not start with a wildcard, then it is made @@ -198,19 +224,20 @@ class TreeMatcher: somewhere in a subtree rooted at one of the directories. """ - def __init__(self, paths, name="unknown"): - self.original_paths = human_sorted(paths) - self.paths = list(map(os.path.normcase, paths)) + def __init__(self, paths: Iterable[str], name: str="unknown") -> None: + self.original_paths: List[str] = human_sorted(paths) + #self.paths = list(map(os.path.normcase, paths)) + self.paths = [os.path.normcase(p) for p in paths] self.name = name - def __repr__(self): + def __repr__(self) -> str: return f"" - def info(self): + def info(self) -> List[str]: """A list of strings for displaying when dumping state.""" return self.original_paths - def match(self, fpath): + def match(self, fpath: str) -> bool: """Does `fpath` indicate a file in one of our trees?""" fpath = os.path.normcase(fpath) for p in self.paths: @@ -226,18 +253,18 @@ def match(self, fpath): class ModuleMatcher: """A matcher for modules in a tree.""" - def __init__(self, module_names, name="unknown"): + def __init__(self, module_names: Iterable[str], name:str = "unknown") -> None: self.modules = list(module_names) self.name = name - def __repr__(self): + def __repr__(self) -> str: return f"" - def info(self): + def info(self) -> List[str]: """A list of strings for displaying when dumping state.""" return self.modules - def match(self, module_name): + def match(self, module_name: str) -> bool: """Does `module_name` indicate a module in one of our packages?""" if not module_name: return False @@ -253,26 +280,26 @@ def match(self, module_name): return False -class FnmatchMatcher: +class GlobMatcher: """A matcher for files by file name pattern.""" - def __init__(self, pats, name="unknown"): + def __init__(self, pats: Iterable[str], name: str="unknown") -> None: self.pats = list(pats) - self.re = fnmatches_to_regex(self.pats, case_insensitive=env.WINDOWS) + self.re = globs_to_regex(self.pats, case_insensitive=env.WINDOWS) self.name = name - def __repr__(self): - return f"" + def __repr__(self) -> str: + return f"" - def info(self): + def info(self) -> List[str]: """A list of strings for displaying when dumping state.""" return self.pats - def match(self, fpath): + def match(self, fpath: str) -> bool: """Does `fpath` match one of our file name patterns?""" return self.re.match(fpath) is not None -def sep(s): +def sep(s: str) -> str: """Find the path separator used in this string, or os.sep if none.""" sep_match = re.search(r"[\\/]", s) if sep_match: @@ -282,12 +309,59 @@ def sep(s): return the_sep -def fnmatches_to_regex(patterns, case_insensitive=False, partial=False): - """Convert fnmatch patterns to a compiled regex that matches any of them. +# Tokenizer for _glob_to_regex. +# None as a sub means disallowed. +G2RX_TOKENS = [(re.compile(rx), sub) for rx, sub in [ + (r"\*\*\*+", None), # Can't have *** + (r"[^/]+\*\*+", None), # Can't have x** + (r"\*\*+[^/]+", None), # Can't have **x + (r"\*\*/\*\*", None), # Can't have **/** + (r"^\*+/", r"(.*[/\\\\])?"), # ^*/ matches any prefix-slash, or nothing. + (r"/\*+$", r"[/\\\\].*"), # /*$ matches any slash-suffix. + (r"\*\*/", r"(.*[/\\\\])?"), # **/ matches any subdirs, including none + (r"/", r"[/\\\\]"), # / matches either slash or backslash + (r"\*", r"[^/\\\\]*"), # * matches any number of non slash-likes + (r"\?", r"[^/\\\\]"), # ? matches one non slash-like + (r"\[.*?\]", r"\g<0>"), # [a-f] matches [a-f] + (r"[a-zA-Z0-9_-]+", r"\g<0>"), # word chars match themselves + (r"[\[\]]", None), # Can't have single square brackets + (r".", r"\\\g<0>"), # Anything else is escaped to be safe +]] + +def _glob_to_regex(pattern: str) -> str: + """Convert a file-path glob pattern into a regex.""" + # Turn all backslashes into slashes to simplify the tokenizer. + pattern = pattern.replace("\\", "/") + if "/" not in pattern: + pattern = "**/" + pattern + path_rx = [] + pos = 0 + while pos < len(pattern): + for rx, sub in G2RX_TOKENS: # pragma: always breaks + m = rx.match(pattern, pos=pos) + if m: + if sub is None: + raise ConfigError(f"File pattern can't include {m[0]!r}") + path_rx.append(m.expand(sub)) + pos = m.end() + break + return "".join(path_rx) + + +def globs_to_regex( + patterns: Iterable[str], + case_insensitive: bool=False, + partial: bool=False +) -> re.Pattern[str]: + """Convert glob patterns to a compiled regex that matches any of them. Slashes are always converted to match either slash or backslash, for Windows support, even when running elsewhere. + If the pattern has no slash or backslash, then it is interpreted as + matching a file name anywhere it appears in the tree. Otherwise, the glob + pattern must match the whole file path. + If `partial` is true, then the pattern will match if the target string starts with the pattern. Otherwise, it must match the entire string. @@ -295,23 +369,13 @@ def fnmatches_to_regex(patterns, case_insensitive=False, partial=False): strings. """ - regexes = (fnmatch.translate(pattern) for pattern in patterns) - # Python3.7 fnmatch translates "/" as "/". Before that, it translates as "\/", - # so we have to deal with maybe a backslash. - regexes = (re.sub(r"\\?/", r"[\\\\/]", regex) for regex in regexes) - - if partial: - # fnmatch always adds a \Z to match the whole string, which we don't - # want, so we remove the \Z. While removing it, we only replace \Z if - # followed by paren (introducing flags), or at end, to keep from - # destroying a literal \Z in the pattern. - regexes = (re.sub(r'\\Z(\(\?|$)', r'\1', regex) for regex in regexes) - flags = 0 if case_insensitive: flags |= re.IGNORECASE - compiled = re.compile(join_regex(regexes), flags=flags) - + rx = join_regex(map(_glob_to_regex, patterns)) + if not partial: + rx = rf"(?:{rx})\Z" + compiled = re.compile(rx, flags=flags) return compiled @@ -326,22 +390,27 @@ class PathAliases: map a path through those aliases to produce a unified path. """ - def __init__(self, debugfn=None, relative=False): - self.aliases = [] # A list of (original_pattern, regex, result) + def __init__( + self, + debugfn: Optional[Callable[[str], None]]=None, + relative: bool=False, + ) -> None: + # A list of (original_pattern, regex, result) + self.aliases: List[Tuple[str, re.Pattern[str], str]] = [] self.debugfn = debugfn or (lambda msg: 0) self.relative = relative self.pprinted = False - def pprint(self): + def pprint(self) -> None: """Dump the important parts of the PathAliases, for debugging.""" self.debugfn(f"Aliases (relative={self.relative}):") for original_pattern, regex, result in self.aliases: self.debugfn(f" Rule: {original_pattern!r} -> {result!r} using regex {regex.pattern!r}") - def add(self, pattern, result): + def add(self, pattern: str, result: str) -> None: """Add the `pattern`/`result` pair to the list of aliases. - `pattern` is an `fnmatch`-style pattern. `result` is a simple + `pattern` is an `glob`-style pattern. `result` is a simple string. When mapping paths, if a path starts with a match against `pattern`, then that match is replaced with `result`. This models isomorphic source trees being rooted at different places on two @@ -361,22 +430,23 @@ def add(self, pattern, result): if pattern.endswith("*"): raise ConfigError("Pattern must not end with wildcards.") - # The pattern is meant to match a filepath. Let's make it absolute + # The pattern is meant to match a file path. Let's make it absolute # unless it already is, or is meant to match any prefix. - if not pattern.startswith('*') and not isabs_anywhere(pattern + pattern_sep): - pattern = abs_file(pattern) + if not self.relative: + if not pattern.startswith('*') and not isabs_anywhere(pattern + pattern_sep): + pattern = abs_file(pattern) if not pattern.endswith(pattern_sep): pattern += pattern_sep # Make a regex from the pattern. - regex = fnmatches_to_regex([pattern], case_insensitive=True, partial=True) + regex = globs_to_regex([pattern], case_insensitive=True, partial=True) # Normalize the result: it must end with a path separator. result_sep = sep(result) result = result.rstrip(r"\/") + result_sep self.aliases.append((original_pattern, regex, result)) - def map(self, path): + def map(self, path: str, exists:Callable[[str], bool]=source_exists) -> str: """Map `path` through the aliases. `path` is checked against all of the patterns. The first pattern to @@ -387,6 +457,9 @@ def map(self, path): The separator style in the result is made to match that of the result in the alias. + `exists` is a function to determine if the resulting path actually + exists. + Returns the mapped path. If a mapping has happened, this is a canonical path. If no mapping has happened, it is the original value of `path` unchanged. @@ -403,16 +476,44 @@ def map(self, path): new = new.replace(sep(path), sep(result)) if not self.relative: new = canonical_filename(new) + dot_start = result.startswith(("./", ".\\")) and len(result) > 2 + if new.startswith(("./", ".\\")) and not dot_start: + new = new[2:] + if not exists(new): + self.debugfn( + f"Rule {original_pattern!r} changed {path!r} to {new!r} " + + "which doesn't exist, continuing" + ) + continue self.debugfn( f"Matched path {path!r} to rule {original_pattern!r} -> {result!r}, " + f"producing {new!r}" ) return new + + # If we get here, no pattern matched. + + if self.relative and not isabs_anywhere(path): + # Auto-generate a pattern to implicitly match relative files + parts = re.split(r"[/\\]", path) + if len(parts) > 1: + dir1 = parts[0] + pattern = f"*/{dir1}" + regex_pat = rf"^(.*[\\/])?{re.escape(dir1)}[\\/]" + result = f"{dir1}{os.sep}" + # Only add a new pattern if we don't already have this pattern. + if not any(p == pattern for p, _, _ in self.aliases): + self.debugfn( + f"Generating rule: {pattern!r} -> {result!r} using regex {regex_pat!r}" + ) + self.aliases.append((pattern, re.compile(regex_pat), result)) + return self.map(path, exists=exists) + self.debugfn(f"No rules match, path {path!r} is unchanged") return path -def find_python_files(dirname): +def find_python_files(dirname: str, include_namespace_packages: bool) -> Iterable[str]: """Yield all of the importable Python files in `dirname`, recursively. To be importable, the files have to be in a directory with a __init__.py, @@ -421,16 +522,27 @@ def find_python_files(dirname): best, but sub-directories are checked for a __init__.py to be sure we only find the importable files. + If `include_namespace_packages` is True, then the check for __init__.py + files is skipped. + + Files with strange characters are skipped, since they couldn't have been + imported, and are probably editor side-files. + """ for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)): - if i > 0 and '__init__.py' not in filenames: - # If a directory doesn't have __init__.py, then it isn't - # importable and neither are its files - del dirnames[:] - continue + if not include_namespace_packages: + if i > 0 and "__init__.py" not in filenames: + # If a directory doesn't have __init__.py, then it isn't + # importable and neither are its files + del dirnames[:] + continue for filename in filenames: # We're only interested in files that look like reasonable Python # files: Must end with .py or .pyw, and must not have certain funny # characters that probably mean they are editor junk. if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename): yield os.path.join(dirpath, filename) + + +# Globally set the relative directory. +set_relative_directory() diff --git a/coverage/html.py b/coverage/html.py index 21b5189e3..b10bab245 100644 --- a/coverage/html.py +++ b/coverage/html.py @@ -3,12 +3,16 @@ """HTML reporting for coverage.py.""" +from __future__ import annotations + import datetime import json import os import re import shutil -import types + +from dataclasses import dataclass +from typing import Iterable, List, Optional, TYPE_CHECKING import coverage from coverage.data import add_data_to_hash @@ -17,13 +21,18 @@ from coverage.misc import ensure_dir, file_be_gone, Hasher, isolate_module, format_local_datetime from coverage.misc import human_sorted, plural from coverage.report import get_analysis_to_report -from coverage.results import Numbers +from coverage.results import Analysis, Numbers from coverage.templite import Templite +from coverage.types import TLineNo, TMorf + +if TYPE_CHECKING: + from coverage import Coverage + from coverage.plugins import FileReporter os = isolate_module(os) -def data_filename(fname): +def data_filename(fname: str) -> str: """Return the path to an "htmlfiles" data file of ours. """ static_dir = os.path.join(os.path.dirname(__file__), "htmlfiles") @@ -31,25 +40,47 @@ def data_filename(fname): return static_filename -def read_data(fname): +def read_data(fname: str) -> str: """Return the contents of a data file of ours.""" with open(data_filename(fname)) as data_file: return data_file.read() -def write_html(fname, html): +def write_html(fname: str, html: str) -> None: """Write `html` to `fname`, properly encoded.""" html = re.sub(r"(\A\s+)|(\s+$)", "", html, flags=re.MULTILINE) + "\n" with open(fname, "wb") as fout: fout.write(html.encode('ascii', 'xmlcharrefreplace')) +@dataclass +class LineData: + """The data for each source line of HTML output.""" + tokens: str + number: TLineNo + category: str + statement: bool + contexts: List[str] + contexts_label: str + context_list: List[str] + short_annotations: List[str] + long_annotations: List[str] + + +@dataclass +class FileData: + """The data for each source file of HTML output.""" + relative_filename: str + nums: Numbers + lines: List[LineData] + + class HtmlDataGeneration: """Generate structured data to be turned into HTML reports.""" EMPTY = "(empty)" - def __init__(self, cov): + def __init__(self, cov: Coverage) -> None: self.coverage = cov self.config = self.coverage.config data = self.coverage.get_data() @@ -59,7 +90,7 @@ def __init__(self, cov): self.coverage._warn("No contexts were measured") data.set_query_contexts(self.config.report_contexts) - def data_for_file(self, fr, analysis): + def data_for_file(self, fr: FileReporter, analysis: Analysis) -> FileData: """Produce the data needed for one file's report.""" if self.has_arcs: missing_branch_arcs = analysis.missing_branch_arcs() @@ -72,7 +103,7 @@ def data_for_file(self, fr, analysis): for lineno, tokens in enumerate(fr.source_token_lines(), start=1): # Figure out how to mark this line. - category = None + category = "" short_annotations = [] long_annotations = [] @@ -86,13 +117,14 @@ def data_for_file(self, fr, analysis): if b < 0: short_annotations.append("exit") else: - short_annotations.append(b) + short_annotations.append(str(b)) long_annotations.append(fr.missing_arc_description(lineno, b, arcs_executed)) elif lineno in analysis.statements: category = 'run' - contexts = contexts_label = None - context_list = None + contexts = [] + contexts_label = "" + context_list = [] if category and self.config.show_contexts: contexts = human_sorted(c or self.EMPTY for c in contexts_by_lineno.get(lineno, ())) if contexts == [self.EMPTY]: @@ -101,7 +133,7 @@ def data_for_file(self, fr, analysis): contexts_label = f"{len(contexts)} ctx" context_list = contexts - lines.append(types.SimpleNamespace( + lines.append(LineData( tokens=tokens, number=lineno, category=category, @@ -113,7 +145,7 @@ def data_for_file(self, fr, analysis): long_annotations=long_annotations, )) - file_data = types.SimpleNamespace( + file_data = FileData( relative_filename=fr.relative_filename(), nums=analysis.numbers, lines=lines, @@ -124,7 +156,7 @@ def data_for_file(self, fr, analysis): class FileToReport: """A file we're considering reporting.""" - def __init__(self, fr, analysis): + def __init__(self, fr: FileReporter, analysis: Analysis) -> None: self.fr = fr self.analysis = analysis self.rootname = flat_rootname(fr.relative_filename()) @@ -144,7 +176,7 @@ class HtmlReporter: "favicon_32.png", ] - def __init__(self, cov): + def __init__(self, cov: Coverage) -> None: self.coverage = cov self.config = self.coverage.config self.directory = self.config.html_dir @@ -160,6 +192,7 @@ def __init__(self, cov): title = self.config.html_title + self.extra_css: Optional[str] if self.config.extra_css: self.extra_css = os.path.basename(self.config.extra_css) else: @@ -204,7 +237,7 @@ def __init__(self, cov): self.pyfile_html_source = read_data("pyfile.html") self.source_tmpl = Templite(self.pyfile_html_source, self.template_globals) - def report(self, morfs): + def report(self, morfs: Optional[Iterable[TMorf]]) -> float: """Generate an HTML report for `morfs`. `morfs` is a list of modules or file names. @@ -254,13 +287,13 @@ def report(self, morfs): self.make_local_static_report_files() return self.totals.n_statements and self.totals.pc_covered - def make_directory(self): + def make_directory(self) -> None: """Make sure our htmlcov directory exists.""" ensure_dir(self.directory) if not os.listdir(self.directory): self.directory_was_empty = True - def make_local_static_report_files(self): + def make_local_static_report_files(self) -> None: """Make local instances of static files for HTML report.""" # The files we provide must always be copied. for static in self.STATIC_FILES: @@ -439,12 +472,12 @@ def __init__(self, directory): self.directory = directory self.reset() - def reset(self): + def reset(self) -> None: """Initialize to empty. Causes all files to be reported.""" self.globals = '' self.files = {} - def read(self): + def read(self) -> None: """Read the information we stored last time.""" usable = False try: @@ -469,7 +502,7 @@ def read(self): else: self.reset() - def write(self): + def write(self) -> None: """Write the current status.""" status_file = os.path.join(self.directory, self.STATUS_FILE) files = {} diff --git a/coverage/inorout.py b/coverage/inorout.py index ec89d1b49..c43e43a4f 100644 --- a/coverage/inorout.py +++ b/coverage/inorout.py @@ -3,6 +3,8 @@ """Determining whether files are being measured/reported or not.""" +from __future__ import annotations + import importlib.util import inspect import itertools @@ -13,33 +15,48 @@ import sysconfig import traceback +from types import FrameType, ModuleType +from typing import ( + cast, Any, Iterable, List, Optional, Set, Tuple, Type, TYPE_CHECKING, +) + from coverage import env from coverage.disposition import FileDisposition, disposition_init from coverage.exceptions import CoverageException, PluginError -from coverage.files import TreeMatcher, FnmatchMatcher, ModuleMatcher +from coverage.files import TreeMatcher, GlobMatcher, ModuleMatcher from coverage.files import prep_patterns, find_python_files, canonical_filename from coverage.misc import sys_modules_saved from coverage.python import source_for_file, source_for_morf +from coverage.types import TFileDisposition, TMorf, TWarnFn, TDebugCtl + +if TYPE_CHECKING: + from coverage.config import CoverageConfig + from coverage.plugin_support import Plugins # Pypy has some unusual stuff in the "stdlib". Consider those locations # when deciding where the stdlib is. These modules are not used for anything, # they are modules importable from the pypy lib directories, so that we can # find those directories. -_structseq = _pypy_irc_topic = None +modules_we_happen_to_have: List[ModuleType] = [ + inspect, itertools, os, platform, re, sysconfig, traceback, +] + if env.PYPY: try: import _structseq + modules_we_happen_to_have.append(_structseq) except ImportError: pass try: import _pypy_irc_topic + modules_we_happen_to_have.append(_pypy_irc_topic) except ImportError: pass -def canonical_path(morf, directory=False): +def canonical_path(morf: TMorf, directory: bool=False) -> str: """Return the canonical path of the module or file `morf`. If the module is a package, then return its directory. If it is a @@ -53,7 +70,7 @@ def canonical_path(morf, directory=False): return morf_path -def name_for_module(filename, frame): +def name_for_module(filename: str, frame: Optional[FrameType]) -> str: """Get the name of the module for a filename and frame. For configurability's sake, we allow __main__ modules to be matched by @@ -66,11 +83,7 @@ def name_for_module(filename, frame): """ module_globals = frame.f_globals if frame is not None else {} - if module_globals is None: # pragma: only ironpython - # IronPython doesn't provide globals: https://github.com/IronLanguages/main/issues/1296 - module_globals = {} - - dunder_name = module_globals.get('__name__', None) + dunder_name: str = module_globals.get('__name__', None) if isinstance(dunder_name, str) and dunder_name != '__main__': # This is the usual case: an imported module. @@ -95,12 +108,12 @@ def name_for_module(filename, frame): return dunder_name -def module_is_namespace(mod): +def module_is_namespace(mod: ModuleType) -> bool: """Is the module object `mod` a PEP420 namespace module?""" return hasattr(mod, '__path__') and getattr(mod, '__file__', None) is None -def module_has_file(mod): +def module_has_file(mod: ModuleType) -> bool: """Does the module object `mod` have an existing __file__ ?""" mod__file__ = getattr(mod, '__file__', None) if mod__file__ is None: @@ -108,7 +121,7 @@ def module_has_file(mod): return os.path.exists(mod__file__) -def file_and_path_for_module(modulename): +def file_and_path_for_module(modulename: str) -> Tuple[Optional[str], List[str]]: """Find the file and search path for `modulename`. Returns: @@ -129,32 +142,19 @@ def file_and_path_for_module(modulename): return filename, path -def add_stdlib_paths(paths): +def add_stdlib_paths(paths: Set[str]) -> None: """Add paths where the stdlib can be found to the set `paths`.""" # Look at where some standard modules are located. That's the # indication for "installed with the interpreter". In some # environments (virtualenv, for example), these modules may be # spread across a few locations. Look at all the candidate modules # we've imported, and take all the different ones. - modules_we_happen_to_have = [ - inspect, itertools, os, platform, re, sysconfig, traceback, - _pypy_irc_topic, _structseq, - ] for m in modules_we_happen_to_have: - if m is not None and hasattr(m, "__file__"): + if hasattr(m, "__file__"): paths.add(canonical_path(m, directory=True)) - if _structseq and not hasattr(_structseq, '__file__'): - # PyPy 2.4 has no __file__ in the builtin modules, but the code - # objects still have the file names. So dig into one to find - # the path to exclude. The "filename" might be synthetic, - # don't be fooled by those. - structseq_file = _structseq.structseq_new.__code__.co_filename - if not structseq_file.startswith("<"): - paths.add(canonical_path(structseq_file)) - -def add_third_party_paths(paths): +def add_third_party_paths(paths: Set[str]) -> None: """Add locations for third-party packages to the set `paths`.""" # Get the paths that sysconfig knows about. scheme_names = set(sysconfig.get_scheme_names()) @@ -168,7 +168,7 @@ def add_third_party_paths(paths): paths.add(config_paths[path_name]) -def add_coverage_paths(paths): +def add_coverage_paths(paths: Set[str]) -> None: """Add paths where coverage.py code can be found to the set `paths`.""" cover_path = canonical_path(__file__, directory=True) paths.add(cover_path) @@ -176,43 +176,23 @@ def add_coverage_paths(paths): # Don't include our own test code. paths.add(os.path.join(cover_path, "tests")) - # When testing, we use PyContracts, which should be considered - # part of coverage.py, and it uses six. Exclude those directories - # just as we exclude ourselves. - if env.USE_CONTRACTS: - import contracts - import six - for mod in [contracts, six]: - paths.add(canonical_path(mod)) - class InOrOut: """Machinery for determining what files to measure.""" - def __init__(self, warn, debug): + def __init__( + self, + config: CoverageConfig, + warn: TWarnFn, + debug: Optional[TDebugCtl], + include_namespace_packages: bool, + ) -> None: self.warn = warn self.debug = debug + self.include_namespace_packages = include_namespace_packages - # The matchers for should_trace. - self.source_match = None - self.source_pkgs_match = None - self.pylib_paths = self.cover_paths = self.third_paths = None - self.pylib_match = self.cover_match = self.third_match = None - self.include_match = self.omit_match = None - self.plugins = [] - self.disp_class = FileDisposition - - # The source argument can be directories or package names. - self.source = [] - self.source_pkgs = [] - self.source_pkgs_unmatched = [] - self.omit = self.include = None - - # Is the source inside a third-party area? - self.source_in_third = False - - def configure(self, config): - """Apply the configuration to get ready for decision-time.""" + self.source: List[str] = [] + self.source_pkgs: List[str] = [] self.source_pkgs.extend(config.source_pkgs) for src in config.source or []: if os.path.isdir(src): @@ -221,31 +201,38 @@ def configure(self, config): self.source_pkgs.append(src) self.source_pkgs_unmatched = self.source_pkgs[:] - self.omit = prep_patterns(config.run_omit) self.include = prep_patterns(config.run_include) + self.omit = prep_patterns(config.run_omit) # The directories for files considered "installed with the interpreter". - self.pylib_paths = set() + self.pylib_paths: Set[str] = set() if not config.cover_pylib: add_stdlib_paths(self.pylib_paths) # To avoid tracing the coverage.py code itself, we skip anything # located where we are. - self.cover_paths = set() + self.cover_paths: Set[str] = set() add_coverage_paths(self.cover_paths) # Find where third-party packages are installed. - self.third_paths = set() + self.third_paths: Set[str] = set() add_third_party_paths(self.third_paths) - def debug(msg): + def _debug(msg: str) -> None: if self.debug: self.debug.write(msg) + # The matchers for should_trace. + # Generally useful information - debug("sys.path:" + "".join(f"\n {p}" for p in sys.path)) + _debug("sys.path:" + "".join(f"\n {p}" for p in sys.path)) # Create the matchers we need for should_trace + self.source_match = None + self.source_pkgs_match = None + self.pylib_match = None + self.include_match = self.omit_match = None + if self.source or self.source_pkgs: against = [] if self.source: @@ -254,44 +241,46 @@ def debug(msg): if self.source_pkgs: self.source_pkgs_match = ModuleMatcher(self.source_pkgs, "source_pkgs") against.append(f"modules {self.source_pkgs_match!r}") - debug("Source matching against " + " and ".join(against)) + _debug("Source matching against " + " and ".join(against)) else: if self.pylib_paths: self.pylib_match = TreeMatcher(self.pylib_paths, "pylib") - debug(f"Python stdlib matching: {self.pylib_match!r}") + _debug(f"Python stdlib matching: {self.pylib_match!r}") if self.include: - self.include_match = FnmatchMatcher(self.include, "include") - debug(f"Include matching: {self.include_match!r}") + self.include_match = GlobMatcher(self.include, "include") + _debug(f"Include matching: {self.include_match!r}") if self.omit: - self.omit_match = FnmatchMatcher(self.omit, "omit") - debug(f"Omit matching: {self.omit_match!r}") + self.omit_match = GlobMatcher(self.omit, "omit") + _debug(f"Omit matching: {self.omit_match!r}") self.cover_match = TreeMatcher(self.cover_paths, "coverage") - debug(f"Coverage code matching: {self.cover_match!r}") + _debug(f"Coverage code matching: {self.cover_match!r}") self.third_match = TreeMatcher(self.third_paths, "third") - debug(f"Third-party lib matching: {self.third_match!r}") + _debug(f"Third-party lib matching: {self.third_match!r}") # Check if the source we want to measure has been installed as a # third-party package. + # Is the source inside a third-party area? + self.source_in_third = False with sys_modules_saved(): for pkg in self.source_pkgs: try: modfile, path = file_and_path_for_module(pkg) - debug(f"Imported source package {pkg!r} as {modfile!r}") + _debug(f"Imported source package {pkg!r} as {modfile!r}") except CoverageException as exc: - debug(f"Couldn't import source package {pkg!r}: {exc}") + _debug(f"Couldn't import source package {pkg!r}: {exc}") continue if modfile: if self.third_match.match(modfile): - debug( + _debug( f"Source is in third-party because of source_pkg {pkg!r} at {modfile!r}" ) self.source_in_third = True else: for pathdir in path: if self.third_match.match(pathdir): - debug( + _debug( f"Source is in third-party because of {pkg!r} path directory " + f"at {pathdir!r}" ) @@ -299,10 +288,13 @@ def debug(msg): for src in self.source: if self.third_match.match(src): - debug(f"Source is in third-party because of source directory {src!r}") + _debug(f"Source is in third-party because of source directory {src!r}") self.source_in_third = True - def should_trace(self, filename, frame=None): + self.plugins: Plugins + self.disp_class: Type[TFileDisposition] = FileDisposition + + def should_trace(self, filename: str, frame: Optional[FrameType]=None) -> TFileDisposition: """Decide whether to trace execution in `filename`, with a reason. This function is called from the trace function. As each new file name @@ -314,7 +306,7 @@ def should_trace(self, filename, frame=None): original_filename = filename disp = disposition_init(self.disp_class, filename) - def nope(disp, reason): + def nope(disp: TFileDisposition, reason: str) -> TFileDisposition: """Simple helper to make it easy to return NO.""" disp.trace = False disp.reason = reason @@ -355,10 +347,6 @@ def nope(disp, reason): # can't do anything with the data later anyway. return nope(disp, "not a real file name") - # Jython reports the .class file to the tracer, use the source file. - if filename.endswith("$py.class"): - filename = filename[:-9] + ".py" - canonical = canonical_filename(filename) disp.canonical_filename = canonical @@ -403,7 +391,7 @@ def nope(disp, reason): return disp - def check_include_omit_etc(self, filename, frame): + def check_include_omit_etc(self, filename: str, frame: Optional[FrameType]) -> Optional[str]: """Check a file name against the include, omit, etc, rules. Returns a string or None. String means, don't trace, and is the reason @@ -465,13 +453,13 @@ def check_include_omit_etc(self, filename, frame): # No reason found to skip this file. return None - def warn_conflicting_settings(self): + def warn_conflicting_settings(self) -> None: """Warn if there are settings that conflict.""" if self.include: if self.source or self.source_pkgs: self.warn("--include is ignored because --source is set", slug="include-ignored") - def warn_already_imported_files(self): + def warn_already_imported_files(self) -> None: """Warn if files have already been imported that we will be measuring.""" if self.include or self.source or self.source_pkgs: warned = set() @@ -503,12 +491,12 @@ def warn_already_imported_files(self): ) ) - def warn_unimported_source(self): + def warn_unimported_source(self) -> None: """Warn about source packages that were of interest, but never traced.""" for pkg in self.source_pkgs_unmatched: self._warn_about_unmeasured_code(pkg) - def _warn_about_unmeasured_code(self, pkg): + def _warn_about_unmeasured_code(self, pkg: str) -> None: """Warn about a package or module that we never traced. `pkg` is a string, the name of the package or module. @@ -534,7 +522,7 @@ def _warn_about_unmeasured_code(self, pkg): msg = f"Module {pkg} was previously imported, but not measured" self.warn(msg, slug="module-not-measured") - def find_possibly_unexecuted_files(self): + def find_possibly_unexecuted_files(self) -> Iterable[Tuple[str, Optional[str]]]: """Find files in the areas of interest that might be untraced. Yields pairs: file path, and responsible plug-in name. @@ -543,19 +531,19 @@ def find_possibly_unexecuted_files(self): if (not pkg in sys.modules or not module_has_file(sys.modules[pkg])): continue - pkg_file = source_for_file(sys.modules[pkg].__file__) + pkg_file = source_for_file(cast(str, sys.modules[pkg].__file__)) yield from self._find_executable_files(canonical_path(pkg_file)) for src in self.source: yield from self._find_executable_files(src) - def _find_plugin_files(self, src_dir): + def _find_plugin_files(self, src_dir: str) -> Iterable[Tuple[str, str]]: """Get executable files from the plugins.""" for plugin in self.plugins.file_tracers: for x_file in plugin.find_executable_files(src_dir): yield x_file, plugin._coverage_plugin_name - def _find_executable_files(self, src_dir): + def _find_executable_files(self, src_dir: str) -> Iterable[Tuple[str, Optional[str]]]: """Find executable files in `src_dir`. Search for files in `src_dir` that can be executed because they @@ -565,18 +553,21 @@ def _find_executable_files(self, src_dir): Yield the file path, and the plugin name that handles the file. """ - py_files = ((py_file, None) for py_file in find_python_files(src_dir)) + py_files = ( + (py_file, None) for py_file in + find_python_files(src_dir, self.include_namespace_packages) + ) plugin_files = self._find_plugin_files(src_dir) for file_path, plugin_name in itertools.chain(py_files, plugin_files): file_path = canonical_filename(file_path) if self.omit_match and self.omit_match.match(file_path): # Turns out this file was omitted, so don't pull it back - # in as unexecuted. + # in as un-executed. continue yield file_path, plugin_name - def sys_info(self): + def sys_info(self) -> Iterable[Tuple[str, Any]]: """Our information for Coverage.sys_info. Returns a list of (key, value) pairs. diff --git a/coverage/jsonreport.py b/coverage/jsonreport.py index 3afae2ccd..7ee1fb99f 100644 --- a/coverage/jsonreport.py +++ b/coverage/jsonreport.py @@ -3,13 +3,22 @@ """Json reporting for coverage.py""" +from __future__ import annotations + import datetime import json import sys +from typing import Any, Dict, IO, Iterable, List, Optional, Tuple, TYPE_CHECKING + from coverage import __version__ from coverage.report import get_analysis_to_report -from coverage.results import Numbers +from coverage.results import Analysis, Numbers +from coverage.types import TMorf, TLineNo + +if TYPE_CHECKING: + from coverage import Coverage + from coverage.data import CoverageData class JsonReporter: @@ -17,13 +26,13 @@ class JsonReporter: report_type = "JSON report" - def __init__(self, coverage): + def __init__(self, coverage: Coverage) -> None: self.coverage = coverage self.config = self.coverage.config self.total = Numbers(self.config.precision) - self.report_data = {} + self.report_data: Dict[str, Any] = {} - def report(self, morfs, outfile=None): + def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float: """Generate a json report for `morfs`. `morfs` is a list of modules or file names. @@ -75,7 +84,7 @@ def report(self, morfs, outfile=None): return self.total.n_statements and self.total.pc_covered - def report_one_file(self, coverage_data, analysis): + def report_one_file(self, coverage_data: CoverageData, analysis: Analysis) -> Dict[str, Any]: """Extract the relevant report data for a single file.""" nums = analysis.numbers self.total += nums @@ -96,7 +105,7 @@ def report_one_file(self, coverage_data, analysis): if self.config.json_show_contexts: reported_file['contexts'] = analysis.data.contexts_by_lineno(analysis.filename) if coverage_data.has_arcs(): - reported_file['summary'].update({ + summary.update({ 'num_branches': nums.n_branches, 'num_partial_branches': nums.n_partial_branches, 'covered_branches': nums.n_executed_branches, @@ -111,7 +120,9 @@ def report_one_file(self, coverage_data, analysis): return reported_file -def _convert_branch_arcs(branch_arcs): +def _convert_branch_arcs( + branch_arcs: Dict[TLineNo, List[TLineNo]], +) -> Iterable[Tuple[TLineNo, TLineNo]]: """Convert branch arcs to a list of two-element tuples.""" for source, targets in branch_arcs.items(): for target in targets: diff --git a/coverage/lcovreport.py b/coverage/lcovreport.py index 4dc73c297..5a84f0f26 100644 --- a/coverage/lcovreport.py +++ b/coverage/lcovreport.py @@ -3,11 +3,22 @@ """LCOV reporting for coverage.py.""" +from __future__ import annotations + import sys import base64 from hashlib import md5 +from typing import IO, Iterable, Optional, TYPE_CHECKING + +from coverage.plugin import FileReporter from coverage.report import get_analysis_to_report +from coverage.results import Analysis, Numbers +from coverage.types import TMorf + +if TYPE_CHECKING: + from coverage import Coverage + from coverage.data import CoverageData class LcovReporter: @@ -15,11 +26,11 @@ class LcovReporter: report_type = "LCOV report" - def __init__(self, coverage): + def __init__(self, coverage: Coverage) -> None: self.coverage = coverage - self.config = self.coverage.config + self.total = Numbers(self.coverage.config.precision) - def report(self, morfs, outfile=None): + def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float: """Renders the full lcov report. 'morfs' is a list of modules or filenames @@ -33,12 +44,16 @@ def report(self, morfs, outfile=None): for fr, analysis in get_analysis_to_report(self.coverage, morfs): self.get_lcov(fr, analysis, outfile) - def get_lcov(self, fr, analysis, outfile=None): + return self.total.n_statements and self.total.pc_covered + + def get_lcov(self, fr: FileReporter, analysis: Analysis, outfile: IO[str]) -> None: """Produces the lcov data for a single file. This currently supports both line and branch coverage, however function coverage is not supported. """ + self.total += analysis.numbers + outfile.write("TN:\n") outfile.write(f"SF:{fr.relative_filename()}\n") source_lines = fr.source().splitlines() diff --git a/coverage/misc.py b/coverage/misc.py index e9b1b8eba..a2ac2fed4 100644 --- a/coverage/misc.py +++ b/coverage/misc.py @@ -3,7 +3,10 @@ """Miscellaneous stuff for coverage.py.""" +from __future__ import annotations + import contextlib +import datetime import errno import hashlib import importlib @@ -16,18 +19,25 @@ import sys import types +from types import ModuleType +from typing import ( + Any, Callable, Dict, Generator, IO, Iterable, List, Mapping, Optional, + Tuple, TypeVar, Union, +) + from coverage import env from coverage.exceptions import CoverageException +from coverage.types import TArc # In 6.0, the exceptions moved from misc.py to exceptions.py. But a number of # other packages were importing the exceptions from misc, so import them here. # pylint: disable=unused-wildcard-import from coverage.exceptions import * # pylint: disable=wildcard-import -ISOLATED_MODULES = {} +ISOLATED_MODULES: Dict[ModuleType, ModuleType] = {} -def isolate_module(mod): +def isolate_module(mod: ModuleType) -> ModuleType: """Copy a module so that we are isolated from aggressive mocking. If a test suite mocks os.path.exists (for example), and then we need to use @@ -50,10 +60,10 @@ def isolate_module(mod): class SysModuleSaver: """Saves the contents of sys.modules, and removes new modules later.""" - def __init__(self): + def __init__(self) -> None: self.old_modules = set(sys.modules) - def restore(self): + def restore(self) -> None: """Remove any modules imported since this object started.""" new_modules = set(sys.modules) - self.old_modules for m in new_modules: @@ -61,7 +71,7 @@ def restore(self): @contextlib.contextmanager -def sys_modules_saved(): +def sys_modules_saved() -> Generator[None, None, None]: """A context manager to remove any modules imported during a block.""" saver = SysModuleSaver() try: @@ -70,7 +80,7 @@ def sys_modules_saved(): saver.restore() -def import_third_party(modname): +def import_third_party(modname: str) -> Tuple[ModuleType, bool]: """Import a third-party module we need, but might not be installed. This also cleans out the module after the import, so that coverage won't @@ -81,64 +91,19 @@ def import_third_party(modname): modname (str): the name of the module to import. Returns: - The imported module, or None if the module couldn't be imported. + The imported module, and a boolean indicating if the module could be imported. + + If the boolean is False, the module returned is not the one you want: don't use it. """ with sys_modules_saved(): try: - return importlib.import_module(modname) + return importlib.import_module(modname), True except ImportError: - return None - + return sys, False -def dummy_decorator_with_args(*args_unused, **kwargs_unused): - """Dummy no-op implementation of a decorator with arguments.""" - def _decorator(func): - return func - return _decorator - -# Use PyContracts for assertion testing on parameters and returns, but only if -# we are running our own test suite. -if env.USE_CONTRACTS: - from contracts import contract # pylint: disable=unused-import - from contracts import new_contract as raw_new_contract - - def new_contract(*args, **kwargs): - """A proxy for contracts.new_contract that doesn't mind happening twice.""" - try: - raw_new_contract(*args, **kwargs) - except ValueError: - # During meta-coverage, this module is imported twice, and - # PyContracts doesn't like redefining contracts. It's OK. - pass - - # Define contract words that PyContract doesn't have. - new_contract('bytes', lambda v: isinstance(v, bytes)) - new_contract('unicode', lambda v: isinstance(v, str)) - - def one_of(argnames): - """Ensure that only one of the argnames is non-None.""" - def _decorator(func): - argnameset = {name.strip() for name in argnames.split(",")} - def _wrapper(*args, **kwargs): - vals = [kwargs.get(name) for name in argnameset] - assert sum(val is not None for val in vals) == 1 - return func(*args, **kwargs) - return _wrapper - return _decorator -else: # pragma: not testing - # We aren't using real PyContracts, so just define our decorators as - # stunt-double no-ops. - contract = dummy_decorator_with_args - one_of = dummy_decorator_with_args - - def new_contract(*args_unused, **kwargs_unused): - """Dummy no-op implementation of `new_contract`.""" - pass - - -def nice_pair(pair): +def nice_pair(pair: TArc) -> str: """Make a nice string representation of a pair of numbers. If the numbers are equal, just return the number, otherwise return the pair @@ -152,7 +117,10 @@ def nice_pair(pair): return "%d-%d" % (start, end) -def expensive(fn): +TSelf = TypeVar("TSelf") +TRetVal = TypeVar("TRetVal") + +def expensive(fn: Callable[[TSelf], TRetVal]) -> Callable[[TSelf], TRetVal]: """A decorator to indicate that a method shouldn't be called more than once. Normally, this does nothing. During testing, this raises an exception if @@ -162,7 +130,7 @@ def expensive(fn): if env.TESTING: attr = "_once_" + fn.__name__ - def _wrapper(self): + def _wrapper(self: TSelf) -> TRetVal: if hasattr(self, attr): raise AssertionError(f"Shouldn't have called {fn.__name__} more than once") setattr(self, attr, True) @@ -172,7 +140,7 @@ def _wrapper(self): return fn # pragma: not testing -def bool_or_none(b): +def bool_or_none(b: Any) -> Optional[bool]: """Return bool(b), but preserve None.""" if b is None: return None @@ -180,12 +148,16 @@ def bool_or_none(b): return bool(b) -def join_regex(regexes): - """Combine a list of regexes into one that matches any of them.""" - return "|".join(f"(?:{r})" for r in regexes) +def join_regex(regexes: Iterable[str]) -> str: + """Combine a series of regex strings into one that matches any of them.""" + regexes = list(regexes) + if len(regexes) == 1: + return regexes[0] + else: + return "|".join(f"(?:{r})" for r in regexes) -def file_be_gone(path): +def file_be_gone(path: str) -> None: """Remove a file, and don't get annoyed if it doesn't exist.""" try: os.remove(path) @@ -194,7 +166,7 @@ def file_be_gone(path): raise -def ensure_dir(directory): +def ensure_dir(directory: str) -> None: """Make sure the directory exists. If `directory` is None or empty, do nothing. @@ -203,12 +175,12 @@ def ensure_dir(directory): os.makedirs(directory, exist_ok=True) -def ensure_dir_for_file(path): +def ensure_dir_for_file(path: str) -> None: """Make sure the directory for the path exists.""" ensure_dir(os.path.dirname(path)) -def output_encoding(outfile=None): +def output_encoding(outfile: Optional[IO[str]]=None) -> str: """Determine the encoding to use for output written to `outfile` or stdout.""" if outfile is None: outfile = sys.stdout @@ -222,10 +194,10 @@ def output_encoding(outfile=None): class Hasher: """Hashes Python data for fingerprinting.""" - def __init__(self): + def __init__(self) -> None: self.hash = hashlib.new("sha3_256") - def update(self, v): + def update(self, v: Any) -> None: """Add `v` to the hash, recursively if needed.""" self.hash.update(str(type(v)).encode("utf-8")) if isinstance(v, str): @@ -255,12 +227,12 @@ def update(self, v): self.update(a) self.hash.update(b'.') - def hexdigest(self): + def hexdigest(self) -> str: """Retrieve the hex digest of the hash.""" return self.hash.hexdigest()[:32] -def _needs_to_implement(that, func_name): +def _needs_to_implement(that: Any, func_name: str) -> None: """Helper to raise NotImplementedError in interface stubs.""" if hasattr(that, "_coverage_plugin_name"): thing = "Plugin" @@ -282,14 +254,14 @@ class DefaultValue: and Sphinx output. """ - def __init__(self, display_as): + def __init__(self, display_as: str) -> None: self.display_as = display_as - def __repr__(self): + def __repr__(self) -> str: return self.display_as -def substitute_variables(text, variables): +def substitute_variables(text: str, variables: Mapping[str, str]) -> str: """Substitute ``${VAR}`` variables in `text` with their values. Variables in the text can take a number of shell-inspired forms:: @@ -322,7 +294,7 @@ def substitute_variables(text, variables): dollar_groups = ('dollar', 'word1', 'word2') - def dollar_replace(match): + def dollar_replace(match: re.Match[str]) -> str: """Called for each $replacement.""" # Only one of the dollar_groups will have matched, just get its text. word = next(g for g in match.group(*dollar_groups) if g) # pragma: always breaks @@ -340,13 +312,13 @@ def dollar_replace(match): return text -def format_local_datetime(dt): +def format_local_datetime(dt: datetime.datetime) -> str: """Return a string with local timezone representing the date. """ return dt.astimezone().strftime('%Y-%m-%d %H:%M %z') -def import_local_file(modname, modfile=None): +def import_local_file(modname: str, modfile: Optional[str]=None) -> ModuleType: """Import a local file as a module. Opens a file in the current directory named `modname`.py, imports it @@ -357,18 +329,20 @@ def import_local_file(modname, modfile=None): if modfile is None: modfile = modname + '.py' spec = importlib.util.spec_from_file_location(modname, modfile) + assert spec is not None mod = importlib.util.module_from_spec(spec) sys.modules[modname] = mod + assert spec.loader is not None spec.loader.exec_module(mod) return mod -def human_key(s): +def _human_key(s: str) -> List[Union[str, int]]: """Turn a string into a list of string and number chunks. "z23a" -> ["z", 23, "a"] """ - def tryint(s): + def tryint(s: str) -> Union[str, int]: """If `s` is a number, return an int, else `s` unchanged.""" try: return int(s) @@ -377,7 +351,7 @@ def tryint(s): return [tryint(c) for c in re.split(r"(\d+)", s)] -def human_sorted(strings): +def human_sorted(strings: Iterable[str]) -> List[str]: """Sort the given iterable of strings the way that humans expect. Numeric components in the strings are sorted as numbers. @@ -385,17 +359,23 @@ def human_sorted(strings): Returns the sorted list. """ - return sorted(strings, key=human_key) + return sorted(strings, key=_human_key) + +def human_sorted_items( + items: Iterable[Tuple[str, Any]], + reverse: bool=False, +) -> List[Tuple[str, Any]]: + """Sort (string, ...) items the way humans expect. -def human_sorted_items(items, reverse=False): - """Sort the (string, value) items the way humans expect. + The elements of `items` can be any tuple/list. They'll be sorted by the + first element (a string), with ties broken by the remaining elements. Returns the sorted list of items. """ - return sorted(items, key=lambda pair: (human_key(pair[0]), pair[1]), reverse=reverse) + return sorted(items, key=lambda item: (_human_key(item[0]), *item[1:]), reverse=reverse) -def plural(n, thing="", things=""): +def plural(n: int, thing: str="", things: str="") -> str: """Pluralize a word. If n is 1, return thing. Otherwise return things, or thing+s. diff --git a/coverage/multiproc.py b/coverage/multiproc.py index 3a9bd6339..e11ca7b70 100644 --- a/coverage/multiproc.py +++ b/coverage/multiproc.py @@ -10,7 +10,8 @@ import sys import traceback -from coverage.misc import contract +from typing import Any, Dict + # An attribute that will be set on the module to indicate that it has been # monkey-patched. @@ -18,12 +19,12 @@ OriginalProcess = multiprocessing.process.BaseProcess -original_bootstrap = OriginalProcess._bootstrap +original_bootstrap = OriginalProcess._bootstrap # type: ignore[attr-defined] class ProcessWithCoverage(OriginalProcess): # pylint: disable=abstract-method """A replacement for multiprocess.Process that starts coverage.""" - def _bootstrap(self, *args, **kwargs): + def _bootstrap(self, *args, **kwargs): # type: ignore[no-untyped-def] """Wrapper around _bootstrap to start coverage.""" try: from coverage import Coverage # avoid circular import @@ -31,6 +32,7 @@ def _bootstrap(self, *args, **kwargs): cov._warn_preimported_source = False cov.start() debug = cov._debug + assert debug is not None if debug.should("multiproc"): debug.write("Calling multiprocessing bootstrap") except Exception: @@ -50,18 +52,17 @@ def _bootstrap(self, *args, **kwargs): class Stowaway: """An object to pickle, so when it is unpickled, it can apply the monkey-patch.""" - def __init__(self, rcfile): + def __init__(self, rcfile: str) -> None: self.rcfile = rcfile - def __getstate__(self): + def __getstate__(self) -> Dict[str, str]: return {'rcfile': self.rcfile} - def __setstate__(self, state): + def __setstate__(self, state: Dict[str, str]) -> None: patch_multiprocessing(state['rcfile']) -@contract(rcfile=str) -def patch_multiprocessing(rcfile): +def patch_multiprocessing(rcfile: str) -> None: """Monkey-patch the multiprocessing module. This enables coverage measurement of processes started by multiprocessing. @@ -74,7 +75,7 @@ def patch_multiprocessing(rcfile): if hasattr(multiprocessing, PATCHED_MARKER): return - OriginalProcess._bootstrap = ProcessWithCoverage._bootstrap + OriginalProcess._bootstrap = ProcessWithCoverage._bootstrap # type: ignore[attr-defined] # Set the value in ProcessWithCoverage that will be pickled into the child # process. @@ -92,7 +93,7 @@ def patch_multiprocessing(rcfile): except (ImportError, AttributeError): pass else: - def get_preparation_data_with_stowaway(name): + def get_preparation_data_with_stowaway(name: str) -> Dict[str, Any]: """Get the original preparation data, and also insert our stowaway.""" d = original_get_preparation_data(name) d['stowaway'] = Stowaway(rcfile) diff --git a/coverage/numbits.py b/coverage/numbits.py index 297795d9d..99d538787 100644 --- a/coverage/numbits.py +++ b/coverage/numbits.py @@ -16,18 +16,12 @@ import json from itertools import zip_longest +from typing import Iterable, List -from coverage.misc import contract, new_contract +import sqlite3 -def _to_blob(b): - """Convert a bytestring into a type SQLite will accept for a blob.""" - return b -new_contract('blob', lambda v: isinstance(v, bytes)) - - -@contract(nums='Iterable', returns='blob') -def nums_to_numbits(nums): +def nums_to_numbits(nums: Iterable[int]) -> bytes: """Convert `nums` into a numbits. Arguments: @@ -40,15 +34,14 @@ def nums_to_numbits(nums): nbytes = max(nums) // 8 + 1 except ValueError: # nums was empty. - return _to_blob(b'') + return b'' b = bytearray(nbytes) for num in nums: b[num//8] |= 1 << num % 8 - return _to_blob(bytes(b)) + return bytes(b) -@contract(numbits='blob', returns='list[int]') -def numbits_to_nums(numbits): +def numbits_to_nums(numbits: bytes) -> List[int]: """Convert a numbits into a list of numbers. Arguments: @@ -69,19 +62,17 @@ def numbits_to_nums(numbits): return nums -@contract(numbits1='blob', numbits2='blob', returns='blob') -def numbits_union(numbits1, numbits2): +def numbits_union(numbits1: bytes, numbits2: bytes) -> bytes: """Compute the union of two numbits. Returns: A new numbits, the union of `numbits1` and `numbits2`. """ byte_pairs = zip_longest(numbits1, numbits2, fillvalue=0) - return _to_blob(bytes(b1 | b2 for b1, b2 in byte_pairs)) + return bytes(b1 | b2 for b1, b2 in byte_pairs) -@contract(numbits1='blob', numbits2='blob', returns='blob') -def numbits_intersection(numbits1, numbits2): +def numbits_intersection(numbits1: bytes, numbits2: bytes) -> bytes: """Compute the intersection of two numbits. Returns: @@ -89,11 +80,10 @@ def numbits_intersection(numbits1, numbits2): """ byte_pairs = zip_longest(numbits1, numbits2, fillvalue=0) intersection_bytes = bytes(b1 & b2 for b1, b2 in byte_pairs) - return _to_blob(intersection_bytes.rstrip(b'\0')) + return intersection_bytes.rstrip(b'\0') -@contract(numbits1='blob', numbits2='blob', returns='bool') -def numbits_any_intersection(numbits1, numbits2): +def numbits_any_intersection(numbits1: bytes, numbits2: bytes) -> bool: """Is there any number that appears in both numbits? Determine whether two number sets have a non-empty intersection. This is @@ -106,8 +96,7 @@ def numbits_any_intersection(numbits1, numbits2): return any(b1 & b2 for b1, b2 in byte_pairs) -@contract(num='int', numbits='blob', returns='bool') -def num_in_numbits(num, numbits): +def num_in_numbits(num: int, numbits: bytes) -> bool: """Does the integer `num` appear in `numbits`? Returns: @@ -119,7 +108,7 @@ def num_in_numbits(num, numbits): return bool(numbits[nbyte] & (1 << nbit)) -def register_sqlite_functions(connection): +def register_sqlite_functions(connection: sqlite3.Connection) -> None: """ Define numbits functions in a SQLite connection. diff --git a/coverage/parser.py b/coverage/parser.py index 8b2a9ac54..2a8d0a50e 100644 --- a/coverage/parser.py +++ b/coverage/parser.py @@ -3,19 +3,28 @@ """Code parsing for coverage.py.""" +from __future__ import annotations + import ast import collections import os import re +import sys import token import tokenize +from types import CodeType +from typing import ( + cast, Any, Callable, Dict, Iterable, List, Optional, Sequence, Set, Tuple, +) + from coverage import env from coverage.bytecode import code_objects from coverage.debug import short_stack -from coverage.exceptions import NoSource, NotPython, _StopEverything -from coverage.misc import contract, join_regex, new_contract, nice_pair, one_of -from coverage.phystokens import compile_unicode, generate_tokens, neuter_encoding_declaration +from coverage.exceptions import NoSource, NotPython +from coverage.misc import join_regex, nice_pair +from coverage.phystokens import generate_tokens +from coverage.types import Protocol, TArc, TLineNo class PythonParser: @@ -25,18 +34,23 @@ class PythonParser: involved. """ - @contract(text='unicode|None') - def __init__(self, text=None, filename=None, exclude=None): + def __init__( + self, + text: Optional[str]=None, + filename: Optional[str]=None, + exclude: Optional[str]=None, + ) -> None: """ Source can be provided as `text`, the text itself, or `filename`, from which the text will be read. Excluded lines are those that match - `exclude`, a regex. + `exclude`, a regex string. """ assert text or filename, "PythonParser needs either text or filename" self.filename = filename or "" - self.text = text - if not self.text: + if text is not None: + self.text: str = text + else: from coverage.python import get_python_source try: self.text = get_python_source(self.filename) @@ -46,45 +60,45 @@ def __init__(self, text=None, filename=None, exclude=None): self.exclude = exclude # The text lines of the parsed code. - self.lines = self.text.split('\n') + self.lines: List[str] = self.text.split('\n') # The normalized line numbers of the statements in the code. Exclusions # are taken into account, and statements are adjusted to their first # lines. - self.statements = set() + self.statements: Set[TLineNo] = set() # The normalized line numbers of the excluded lines in the code, # adjusted to their first lines. - self.excluded = set() + self.excluded: Set[TLineNo] = set() # The raw_* attributes are only used in this class, and in # lab/parser.py to show how this class is working. # The line numbers that start statements, as reported by the line # number table in the bytecode. - self.raw_statements = set() + self.raw_statements: Set[TLineNo] = set() # The raw line numbers of excluded lines of code, as marked by pragmas. - self.raw_excluded = set() + self.raw_excluded: Set[TLineNo] = set() # The line numbers of class definitions. - self.raw_classdefs = set() + self.raw_classdefs: Set[TLineNo] = set() # The line numbers of docstring lines. - self.raw_docstrings = set() + self.raw_docstrings: Set[TLineNo] = set() # Internal detail, used by lab/parser.py. self.show_tokens = False # A dict mapping line numbers to lexical statement starts for # multi-line statements. - self._multiline = {} + self._multiline: Dict[TLineNo, TLineNo] = {} # Lazily-created arc data, and missing arc descriptions. - self._all_arcs = None - self._missing_arc_fragments = None + self._all_arcs: Optional[Set[TArc]] = None + self._missing_arc_fragments: Optional[TArcFragments] = None - def lines_matching(self, *regexes): + def lines_matching(self, *regexes: str) -> Set[TLineNo]: """Find the lines matching one of a list of regexes. Returns a set of line numbers, the lines that contain a match for one @@ -100,7 +114,7 @@ def lines_matching(self, *regexes): matches.add(i) return matches - def _raw_parse(self): + def _raw_parse(self) -> None: """Parse the source to find the interesting facts about its lines. A handful of attributes are updated. @@ -122,6 +136,7 @@ def _raw_parse(self): first_on_line = True nesting = 0 + assert self.text is not None tokgen = generate_tokens(self.text) for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen: if self.show_tokens: # pragma: debugging @@ -167,21 +182,20 @@ def _raw_parse(self): # http://stackoverflow.com/questions/1769332/x/1769794#1769794 self.raw_docstrings.update(range(slineno, elineno+1)) elif toktype == token.NEWLINE: - if first_line is not None and elineno != first_line: + if first_line is not None and elineno != first_line: # type: ignore[unreachable] # We're at the end of a line, and we've ended on a # different line than the first line of the statement, # so record a multi-line range. - for l in range(first_line, elineno+1): + for l in range(first_line, elineno+1): # type: ignore[unreachable] self._multiline[l] = first_line first_line = None first_on_line = True if ttext.strip() and toktype != tokenize.COMMENT: - # A non-whitespace token. + # A non-white-space token. empty = False if first_line is None: - # The token is not whitespace, and is the first in a - # statement. + # The token is not white space, and is the first in a statement. first_line = slineno # Check whether to end an excluded suite. if excluding and indent <= exclude_indent: @@ -203,32 +217,32 @@ def _raw_parse(self): if env.PYBEHAVIOR.module_firstline_1 and self._multiline: self._multiline[1] = min(self.raw_statements) - def first_line(self, line): - """Return the first line number of the statement including `line`.""" - if line < 0: - line = -self._multiline.get(-line, -line) + def first_line(self, lineno: TLineNo) -> TLineNo: + """Return the first line number of the statement including `lineno`.""" + if lineno < 0: + lineno = -self._multiline.get(-lineno, -lineno) else: - line = self._multiline.get(line, line) - return line + lineno = self._multiline.get(lineno, lineno) + return lineno - def first_lines(self, lines): - """Map the line numbers in `lines` to the correct first line of the + def first_lines(self, linenos: Iterable[TLineNo]) -> Set[TLineNo]: + """Map the line numbers in `linenos` to the correct first line of the statement. Returns a set of the first lines. """ - return {self.first_line(l) for l in lines} + return {self.first_line(l) for l in linenos} - def translate_lines(self, lines): + def translate_lines(self, lines: Iterable[TLineNo]) -> Set[TLineNo]: """Implement `FileReporter.translate_lines`.""" return self.first_lines(lines) - def translate_arcs(self, arcs): + def translate_arcs(self, arcs: Iterable[TArc]) -> Set[TArc]: """Implement `FileReporter.translate_arcs`.""" - return [(self.first_line(a), self.first_line(b)) for (a, b) in arcs] + return {(self.first_line(a), self.first_line(b)) for (a, b) in arcs} - def parse_source(self): + def parse_source(self) -> None: """Parse source text to find executable lines, excluded lines, etc. Sets the .excluded and .statements attributes, normalized to the first @@ -253,7 +267,7 @@ def parse_source(self): starts = self.raw_statements - ignore self.statements = self.first_lines(starts) - ignore - def arcs(self): + def arcs(self) -> Set[TArc]: """Get information about the arcs available in the code. Returns a set of line number pairs. Line numbers have been normalized @@ -262,9 +276,10 @@ def arcs(self): """ if self._all_arcs is None: self._analyze_ast() + assert self._all_arcs is not None return self._all_arcs - def _analyze_ast(self): + def _analyze_ast(self) -> None: """Run the AstArcAnalyzer and save its results. `_all_arcs` is the set of arcs in the code. @@ -282,13 +297,13 @@ def _analyze_ast(self): self._missing_arc_fragments = aaa.missing_arc_fragments - def exit_counts(self): + def exit_counts(self) -> Dict[TLineNo, int]: """Get a count of exits from that each line. Excluded lines are excluded. """ - exit_counts = collections.defaultdict(int) + exit_counts: Dict[TLineNo, int] = collections.defaultdict(int) for l1, l2 in self.arcs(): if l1 < 0: # Don't ever report -1 as a line number @@ -309,10 +324,16 @@ def exit_counts(self): return exit_counts - def missing_arc_description(self, start, end, executed_arcs=None): + def missing_arc_description( + self, + start: TLineNo, + end: TLineNo, + executed_arcs: Optional[Set[TArc]]=None, + ) -> str: """Provide an English sentence describing a missing arc.""" if self._missing_arc_fragments is None: self._analyze_ast() + assert self._missing_arc_fragments is not None actual_start = start @@ -352,31 +373,27 @@ def missing_arc_description(self, start, end, executed_arcs=None): class ByteParser: """Parse bytecode to understand the structure of code.""" - @contract(text='unicode') - def __init__(self, text, code=None, filename=None): + def __init__( + self, + text: str, + code: Optional[CodeType]=None, + filename: Optional[str]=None, + ) -> None: self.text = text - if code: + if code is not None: self.code = code else: + assert filename is not None try: - self.code = compile_unicode(text, filename, "exec") + self.code = compile(text, filename, "exec") except SyntaxError as synerr: raise NotPython( "Couldn't parse '%s' as Python source: '%s' at line %d" % ( - filename, synerr.msg, synerr.lineno + filename, synerr.msg, synerr.lineno or 0 ) ) from synerr - # Alternative Python implementations don't always provide all the - # attributes on code objects that we need to do the analysis. - for attr in ['co_lnotab', 'co_firstlineno']: - if not hasattr(self.code, attr): - raise _StopEverything( # pragma: only jython - "This implementation of Python doesn't support code analysis.\n" + - "Run coverage.py under another Python for this command." - ) - - def child_parsers(self): + def child_parsers(self) -> Iterable[ByteParser]: """Iterate over all the code objects nested within this one. The iteration includes `self` as its first value. @@ -384,7 +401,7 @@ def child_parsers(self): """ return (ByteParser(self.text, code=c) for c in code_objects(self.code)) - def _line_numbers(self): + def _line_numbers(self) -> Iterable[TLineNo]: """Yield the line numbers possible in this code object. Uses co_lnotab described in Python/compile.c to find the @@ -414,7 +431,7 @@ def _line_numbers(self): if line_num != last_line_num: yield line_num - def _find_statements(self): + def _find_statements(self) -> Iterable[TLineNo]: """Find the statements in `self.code`. Produce a sequence of line numbers that start statements. Recurses @@ -430,7 +447,36 @@ def _find_statements(self): # AST analysis # -class BlockBase: +class ArcStart(collections.namedtuple("Arc", "lineno, cause")): + """The information needed to start an arc. + + `lineno` is the line number the arc starts from. + + `cause` is an English text fragment used as the `startmsg` for + AstArcAnalyzer.missing_arc_fragments. It will be used to describe why an + arc wasn't executed, so should fit well into a sentence of the form, + "Line 17 didn't run because {cause}." The fragment can include "{lineno}" + to have `lineno` interpolated into it. + + """ + def __new__(cls, lineno: TLineNo, cause: Optional[str]=None) -> ArcStart: + return super().__new__(cls, lineno, cause) + + +class TAddArcFn(Protocol): + """The type for AstArcAnalyzer.add_arc().""" + def __call__( + self, + start: TLineNo, + end: TLineNo, + smsg: Optional[str]=None, + emsg: Optional[str]=None, + ) -> None: + ... + +TArcFragments = Dict[TArc, List[Tuple[Optional[str], Optional[str]]]] + +class Block: """ Blocks need to handle various exiting statements in their own ways. @@ -440,56 +486,54 @@ class BlockBase: stack. """ # pylint: disable=unused-argument - def process_break_exits(self, exits, add_arc): + def process_break_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: """Process break exits.""" # Because break can only appear in loops, and most subclasses # implement process_break_exits, this function is never reached. raise AssertionError - def process_continue_exits(self, exits, add_arc): + def process_continue_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: """Process continue exits.""" # Because continue can only appear in loops, and most subclasses # implement process_continue_exits, this function is never reached. raise AssertionError - def process_raise_exits(self, exits, add_arc): + def process_raise_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: """Process raise exits.""" return False - def process_return_exits(self, exits, add_arc): + def process_return_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: """Process return exits.""" return False -class LoopBlock(BlockBase): +class LoopBlock(Block): """A block on the block stack representing a `for` or `while` loop.""" - @contract(start=int) - def __init__(self, start): + def __init__(self, start: TLineNo) -> None: # The line number where the loop starts. self.start = start # A set of ArcStarts, the arcs from break statements exiting this loop. - self.break_exits = set() + self.break_exits: Set[ArcStart] = set() - def process_break_exits(self, exits, add_arc): + def process_break_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: self.break_exits.update(exits) return True - def process_continue_exits(self, exits, add_arc): + def process_continue_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: for xit in exits: add_arc(xit.lineno, self.start, xit.cause) return True -class FunctionBlock(BlockBase): +class FunctionBlock(Block): """A block on the block stack representing a function definition.""" - @contract(start=int, name=str) - def __init__(self, start, name): + def __init__(self, start: TLineNo, name: str) -> None: # The line number where the function starts. self.start = start # The name of the function. self.name = name - def process_raise_exits(self, exits, add_arc): + def process_raise_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: for xit in exits: add_arc( xit.lineno, -self.start, xit.cause, @@ -497,7 +541,7 @@ def process_raise_exits(self, exits, add_arc): ) return True - def process_return_exits(self, exits, add_arc): + def process_return_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: for xit in exits: add_arc( xit.lineno, -self.start, xit.cause, @@ -506,10 +550,9 @@ def process_return_exits(self, exits, add_arc): return True -class TryBlock(BlockBase): +class TryBlock(Block): """A block on the block stack representing a `try` block.""" - @contract(handler_start='int|None', final_start='int|None') - def __init__(self, handler_start, final_start): + def __init__(self, handler_start: Optional[TLineNo], final_start: Optional[TLineNo]) -> None: # The line number of the first "except" handler, if any. self.handler_start = handler_start # The line number of the "finally:" clause, if any. @@ -517,24 +560,24 @@ def __init__(self, handler_start, final_start): # The ArcStarts for breaks/continues/returns/raises inside the "try:" # that need to route through the "finally:" clause. - self.break_from = set() - self.continue_from = set() - self.raise_from = set() - self.return_from = set() + self.break_from: Set[ArcStart] = set() + self.continue_from: Set[ArcStart] = set() + self.raise_from: Set[ArcStart] = set() + self.return_from: Set[ArcStart] = set() - def process_break_exits(self, exits, add_arc): + def process_break_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: if self.final_start is not None: self.break_from.update(exits) return True return False - def process_continue_exits(self, exits, add_arc): + def process_continue_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: if self.final_start is not None: self.continue_from.update(exits) return True return False - def process_raise_exits(self, exits, add_arc): + def process_raise_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: if self.handler_start is not None: for xit in exits: add_arc(xit.lineno, self.handler_start, xit.cause) @@ -543,17 +586,16 @@ def process_raise_exits(self, exits, add_arc): self.raise_from.update(exits) return True - def process_return_exits(self, exits, add_arc): + def process_return_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: if self.final_start is not None: self.return_from.update(exits) return True return False -class WithBlock(BlockBase): +class WithBlock(Block): """A block on the block stack representing a `with` block.""" - @contract(start=int) - def __init__(self, start): + def __init__(self, start: TLineNo) -> None: # We only ever use this block if it is needed, so that we don't have to # check this setting in all the methods. assert env.PYBEHAVIOR.exit_through_with @@ -563,11 +605,16 @@ def __init__(self, start): # The ArcStarts for breaks/continues/returns/raises inside the "with:" # that need to go through the with-statement while exiting. - self.break_from = set() - self.continue_from = set() - self.return_from = set() - - def _process_exits(self, exits, add_arc, from_set=None): + self.break_from: Set[ArcStart] = set() + self.continue_from: Set[ArcStart] = set() + self.return_from: Set[ArcStart] = set() + + def _process_exits( + self, + exits: Set[ArcStart], + add_arc: TAddArcFn, + from_set: Optional[Set[ArcStart]]=None, + ) -> bool: """Helper to process the four kinds of exits.""" for xit in exits: add_arc(xit.lineno, self.start, xit.cause) @@ -575,48 +622,27 @@ def _process_exits(self, exits, add_arc, from_set=None): from_set.update(exits) return True - def process_break_exits(self, exits, add_arc): + def process_break_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: return self._process_exits(exits, add_arc, self.break_from) - def process_continue_exits(self, exits, add_arc): + def process_continue_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: return self._process_exits(exits, add_arc, self.continue_from) - def process_raise_exits(self, exits, add_arc): + def process_raise_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: return self._process_exits(exits, add_arc) - def process_return_exits(self, exits, add_arc): + def process_return_exits(self, exits: Set[ArcStart], add_arc: TAddArcFn) -> bool: return self._process_exits(exits, add_arc, self.return_from) -class ArcStart(collections.namedtuple("Arc", "lineno, cause")): - """The information needed to start an arc. - - `lineno` is the line number the arc starts from. - - `cause` is an English text fragment used as the `startmsg` for - AstArcAnalyzer.missing_arc_fragments. It will be used to describe why an - arc wasn't executed, so should fit well into a sentence of the form, - "Line 17 didn't run because {cause}." The fragment can include "{lineno}" - to have `lineno` interpolated into it. - - """ - def __new__(cls, lineno, cause=None): - return super().__new__(cls, lineno, cause) - - -# Define contract words that PyContract doesn't have. -# ArcStarts is for a list or set of ArcStart's. -new_contract('ArcStarts', lambda seq: all(isinstance(x, ArcStart) for x in seq)) - - -class NodeList: +class NodeList(ast.AST): """A synthetic fictitious node, containing a sequence of nodes. This is used when collapsing optimized if-statements, to represent the unconditional execution of one of the clauses. """ - def __init__(self, body): + def __init__(self, body: Sequence[ast.AST]) -> None: self.body = body self.lineno = body[0].lineno @@ -624,17 +650,25 @@ def __init__(self, body): # TODO: the cause messages have too many commas. # TODO: Shouldn't the cause messages join with "and" instead of "or"? -def ast_parse(text): - """How we create an AST parse.""" - return ast.parse(neuter_encoding_declaration(text)) +def _make_expression_code_method(noun: str) -> Callable[[AstArcAnalyzer, ast.AST], None]: + """A function to make methods for expression-based callable _code_object__ methods.""" + def _code_object__expression_callable(self: AstArcAnalyzer, node: ast.AST) -> None: + start = self.line_for_node(node) + self.add_arc(-start, start, None, f"didn't run the {noun} on line {start}") + self.add_arc(start, -start, None, f"didn't finish the {noun} on line {start}") + return _code_object__expression_callable class AstArcAnalyzer: """Analyze source text with an AST to find executable code paths.""" - @contract(text='unicode', statements=set) - def __init__(self, text, statements, multiline): - self.root_node = ast_parse(text) + def __init__( + self, + text: str, + statements: Set[TLineNo], + multiline: Dict[TLineNo, TLineNo], + ) -> None: + self.root_node = ast.parse(text) # TODO: I think this is happening in too many places. self.statements = {multiline.get(l, l) for l in statements} self.multiline = multiline @@ -649,20 +683,20 @@ def __init__(self, text, statements, multiline): print(f"Multiline map: {self.multiline}") ast_dump(self.root_node) - self.arcs = set() + self.arcs: Set[TArc] = set() # A map from arc pairs to a list of pairs of sentence fragments: # { (start, end): [(startmsg, endmsg), ...], } # # For an arc from line 17, they should be usable like: # "Line 17 {endmsg}, because {startmsg}" - self.missing_arc_fragments = collections.defaultdict(list) - self.block_stack = [] + self.missing_arc_fragments: TArcFragments = collections.defaultdict(list) + self.block_stack: List[Block] = [] # $set_env.py: COVERAGE_TRACK_ARCS - Trace possible arcs added while parsing code. self.debug = bool(int(os.environ.get("COVERAGE_TRACK_ARCS", 0))) - def analyze(self): + def analyze(self) -> None: """Examine the AST tree from `root_node` to determine possible arcs. This sets the `arcs` attribute to be a set of (from, to) line number @@ -675,8 +709,13 @@ def analyze(self): if code_object_handler is not None: code_object_handler(node) - @contract(start=int, end=int) - def add_arc(self, start, end, smsg=None, emsg=None): + def add_arc( + self, + start: TLineNo, + end: TLineNo, + smsg: Optional[str]=None, + emsg: Optional[str]=None, + ) -> None: """Add an arc, including message fragments to use if it is missing.""" if self.debug: # pragma: debugging print(f"\nAdding possible arc: ({start}, {end}): {smsg!r}, {emsg!r}") @@ -686,25 +725,27 @@ def add_arc(self, start, end, smsg=None, emsg=None): if smsg is not None or emsg is not None: self.missing_arc_fragments[(start, end)].append((smsg, emsg)) - def nearest_blocks(self): + def nearest_blocks(self) -> Iterable[Block]: """Yield the blocks in nearest-to-farthest order.""" return reversed(self.block_stack) - @contract(returns=int) - def line_for_node(self, node): + def line_for_node(self, node: ast.AST) -> TLineNo: """What is the right line number to use for this node? This dispatches to _line__Node functions where needed. """ node_name = node.__class__.__name__ - handler = getattr(self, "_line__" + node_name, None) + handler = cast( + Optional[Callable[[ast.AST], TLineNo]], + getattr(self, "_line__" + node_name, None) + ) if handler is not None: return handler(node) else: return node.lineno - def _line_decorated(self, node): + def _line_decorated(self, node: ast.FunctionDef) -> TLineNo: """Compute first line number for things that can be decorated (classes and functions).""" lineno = node.lineno if env.PYBEHAVIOR.trace_decorated_def or env.PYBEHAVIOR.def_ast_no_decorator: @@ -712,12 +753,12 @@ def _line_decorated(self, node): lineno = node.decorator_list[0].lineno return lineno - def _line__Assign(self, node): + def _line__Assign(self, node: ast.Assign) -> TLineNo: return self.line_for_node(node.value) _line__ClassDef = _line_decorated - def _line__Dict(self, node): + def _line__Dict(self, node: ast.Dict) -> TLineNo: if node.keys: if node.keys[0] is not None: return node.keys[0].lineno @@ -731,13 +772,13 @@ def _line__Dict(self, node): _line__FunctionDef = _line_decorated _line__AsyncFunctionDef = _line_decorated - def _line__List(self, node): + def _line__List(self, node: ast.List) -> TLineNo: if node.elts: return self.line_for_node(node.elts[0]) else: return node.lineno - def _line__Module(self, node): + def _line__Module(self, node: ast.Module) -> TLineNo: if env.PYBEHAVIOR.module_firstline_1: return 1 elif node.body: @@ -752,8 +793,7 @@ def _line__Module(self, node): "Import", "ImportFrom", "Nonlocal", "Pass", } - @contract(returns='ArcStarts') - def add_arcs(self, node): + def add_arcs(self, node: ast.AST) -> Set[ArcStart]: """Add the arcs for `node`. Return a set of ArcStarts, exits from this node to the next. Because a @@ -770,7 +810,10 @@ def add_arcs(self, node): """ node_name = node.__class__.__name__ - handler = getattr(self, "_handle__" + node_name, None) + handler = cast( + Optional[Callable[[ast.AST], Set[ArcStart]]], + getattr(self, "_handle__" + node_name, None) + ) if handler is not None: return handler(node) else: @@ -783,9 +826,12 @@ def add_arcs(self, node): # Default for simple statements: one exit from this node. return {ArcStart(self.line_for_node(node))} - @one_of("from_start, prev_starts") - @contract(returns='ArcStarts') - def add_body_arcs(self, body, from_start=None, prev_starts=None): + def add_body_arcs( + self, + body: Sequence[ast.AST], + from_start: Optional[ArcStart]=None, + prev_starts: Optional[Set[ArcStart]]=None + ) -> Set[ArcStart]: """Add arcs for the body of a compound statement. `body` is the body node. `from_start` is a single `ArcStart` that can @@ -797,21 +843,23 @@ def add_body_arcs(self, body, from_start=None, prev_starts=None): """ if prev_starts is None: + assert from_start is not None prev_starts = {from_start} for body_node in body: lineno = self.line_for_node(body_node) first_line = self.multiline.get(lineno, lineno) if first_line not in self.statements: - body_node = self.find_non_missing_node(body_node) - if body_node is None: + maybe_body_node = self.find_non_missing_node(body_node) + if maybe_body_node is None: continue + body_node = maybe_body_node lineno = self.line_for_node(body_node) for prev_start in prev_starts: self.add_arc(prev_start.lineno, lineno, prev_start.cause) prev_starts = self.add_arcs(body_node) return prev_starts - def find_non_missing_node(self, node): + def find_non_missing_node(self, node: ast.AST) -> Optional[ast.AST]: """Search `node` looking for a child that has not been optimized away. This might return the node you started with, or it will work recursively @@ -828,12 +876,15 @@ def find_non_missing_node(self, node): if first_line in self.statements: return node - missing_fn = getattr(self, "_missing__" + node.__class__.__name__, None) - if missing_fn: - node = missing_fn(node) + missing_fn = cast( + Optional[Callable[[ast.AST], Optional[ast.AST]]], + getattr(self, "_missing__" + node.__class__.__name__, None) + ) + if missing_fn is not None: + ret_node = missing_fn(node) else: - node = None - return node + ret_node = None + return ret_node # Missing nodes: _missing__* # @@ -842,7 +893,7 @@ def find_non_missing_node(self, node): # find_non_missing_node) to find a node to use instead of the missing # node. They can return None if the node should truly be gone. - def _missing__If(self, node): + def _missing__If(self, node: ast.If) -> Optional[ast.AST]: # If the if-node is missing, then one of its children might still be # here, but not both. So return the first of the two that isn't missing. # Use a NodeList to hold the clauses as a single node. @@ -853,14 +904,14 @@ def _missing__If(self, node): return self.find_non_missing_node(NodeList(node.orelse)) return None - def _missing__NodeList(self, node): + def _missing__NodeList(self, node: NodeList) -> Optional[ast.AST]: # A NodeList might be a mixture of missing and present nodes. Find the # ones that are present. non_missing_children = [] for child in node.body: - child = self.find_non_missing_node(child) - if child is not None: - non_missing_children.append(child) + maybe_child = self.find_non_missing_node(child) + if maybe_child is not None: + non_missing_children.append(maybe_child) # Return the simplest representation of the present children. if not non_missing_children: @@ -869,7 +920,7 @@ def _missing__NodeList(self, node): return non_missing_children[0] return NodeList(non_missing_children) - def _missing__While(self, node): + def _missing__While(self, node: ast.While) -> Optional[ast.AST]: body_nodes = self.find_non_missing_node(NodeList(node.body)) if not body_nodes: return None @@ -879,16 +930,17 @@ def _missing__While(self, node): new_while.test = ast.Name() new_while.test.lineno = body_nodes.lineno new_while.test.id = "True" + assert hasattr(body_nodes, "body") new_while.body = body_nodes.body - new_while.orelse = None + new_while.orelse = [] return new_while - def is_constant_expr(self, node): + def is_constant_expr(self, node: ast.AST) -> Optional[str]: """Is this a compile-time constant?""" node_name = node.__class__.__name__ if node_name in ["Constant", "NameConstant", "Num"]: return "Num" - elif node_name == "Name": + elif isinstance(node, ast.Name): if node.id in ["True", "False", "None", "__debug__"]: return "Name" return None @@ -900,7 +952,6 @@ def is_constant_expr(self, node): # listcomps hidden in lists: x = [[i for i in range(10)]] # nested function definitions - # Exit processing: process_*_exits # # These functions process the four kinds of jump exits: break, continue, @@ -909,29 +960,25 @@ def is_constant_expr(self, node): # enclosing loop block, or the nearest enclosing finally block, whichever # is nearer. - @contract(exits='ArcStarts') - def process_break_exits(self, exits): + def process_break_exits(self, exits: Set[ArcStart]) -> None: """Add arcs due to jumps from `exits` being breaks.""" for block in self.nearest_blocks(): # pragma: always breaks if block.process_break_exits(exits, self.add_arc): break - @contract(exits='ArcStarts') - def process_continue_exits(self, exits): + def process_continue_exits(self, exits: Set[ArcStart]) -> None: """Add arcs due to jumps from `exits` being continues.""" for block in self.nearest_blocks(): # pragma: always breaks if block.process_continue_exits(exits, self.add_arc): break - @contract(exits='ArcStarts') - def process_raise_exits(self, exits): + def process_raise_exits(self, exits: Set[ArcStart]) -> None: """Add arcs due to jumps from `exits` being raises.""" for block in self.nearest_blocks(): if block.process_raise_exits(exits, self.add_arc): break - @contract(exits='ArcStarts') - def process_return_exits(self, exits): + def process_return_exits(self, exits: Set[ArcStart]) -> None: """Add arcs due to jumps from `exits` being returns.""" for block in self.nearest_blocks(): # pragma: always breaks if block.process_return_exits(exits, self.add_arc): @@ -948,17 +995,16 @@ def process_return_exits(self, exits): # Every node type that represents a statement should have a handler, or it # should be listed in OK_TO_DEFAULT. - @contract(returns='ArcStarts') - def _handle__Break(self, node): + def _handle__Break(self, node: ast.Break) -> Set[ArcStart]: here = self.line_for_node(node) break_start = ArcStart(here, cause="the break on line {lineno} wasn't executed") - self.process_break_exits([break_start]) + self.process_break_exits({break_start}) return set() - @contract(returns='ArcStarts') - def _handle_decorated(self, node): + def _handle_decorated(self, node: ast.FunctionDef) -> Set[ArcStart]: """Add arcs for things that can be decorated (classes and functions).""" - main_line = last = node.lineno + main_line: TLineNo = node.lineno + last: Optional[TLineNo] = node.lineno decs = node.decorator_list if decs: if env.PYBEHAVIOR.trace_decorated_def or env.PYBEHAVIOR.def_ast_no_decorator: @@ -968,6 +1014,7 @@ def _handle_decorated(self, node): if last is not None and dec_start != last: self.add_arc(last, dec_start) last = dec_start + assert last is not None if env.PYBEHAVIOR.trace_decorated_def: self.add_arc(last, main_line) last = main_line @@ -988,19 +1035,18 @@ def _handle_decorated(self, node): self.add_arc(last, lineno) last = lineno # The body is handled in collect_arcs. + assert last is not None return {ArcStart(last)} _handle__ClassDef = _handle_decorated - @contract(returns='ArcStarts') - def _handle__Continue(self, node): + def _handle__Continue(self, node: ast.Continue) -> Set[ArcStart]: here = self.line_for_node(node) continue_start = ArcStart(here, cause="the continue on line {lineno} wasn't executed") - self.process_continue_exits([continue_start]) + self.process_continue_exits({continue_start}) return set() - @contract(returns='ArcStarts') - def _handle__For(self, node): + def _handle__For(self, node: ast.For) -> Set[ArcStart]: start = self.line_for_node(node.iter) self.block_stack.append(LoopBlock(start=start)) from_start = ArcStart(start, cause="the loop on line {lineno} never started") @@ -1009,6 +1055,7 @@ def _handle__For(self, node): for xit in exits: self.add_arc(xit.lineno, start, xit.cause) my_block = self.block_stack.pop() + assert isinstance(my_block, LoopBlock) exits = my_block.break_exits from_start = ArcStart(start, cause="the loop on line {lineno} didn't complete") if node.orelse: @@ -1024,8 +1071,7 @@ def _handle__For(self, node): _handle__FunctionDef = _handle_decorated _handle__AsyncFunctionDef = _handle_decorated - @contract(returns='ArcStarts') - def _handle__If(self, node): + def _handle__If(self, node: ast.If) -> Set[ArcStart]: start = self.line_for_node(node.test) from_start = ArcStart(start, cause="the condition on line {lineno} was never true") exits = self.add_body_arcs(node.body, from_start=from_start) @@ -1033,48 +1079,50 @@ def _handle__If(self, node): exits |= self.add_body_arcs(node.orelse, from_start=from_start) return exits - @contract(returns='ArcStarts') - def _handle__Match(self, node): - start = self.line_for_node(node) - last_start = start - exits = set() - had_wildcard = False - for case in node.cases: - case_start = self.line_for_node(case.pattern) - if isinstance(case.pattern, ast.MatchAs): - had_wildcard = True - self.add_arc(last_start, case_start, "the pattern on line {lineno} always matched") - from_start = ArcStart(case_start, cause="the pattern on line {lineno} never matched") - exits |= self.add_body_arcs(case.body, from_start=from_start) - last_start = case_start - if not had_wildcard: - exits.add(from_start) - return exits + if sys.version_info >= (3, 10): + def _handle__Match(self, node: ast.Match) -> Set[ArcStart]: + start = self.line_for_node(node) + last_start = start + exits = set() + had_wildcard = False + for case in node.cases: + case_start = self.line_for_node(case.pattern) + pattern = case.pattern + while isinstance(pattern, ast.MatchOr): + pattern = pattern.patterns[-1] + if isinstance(pattern, ast.MatchAs): + had_wildcard = True + self.add_arc(last_start, case_start, "the pattern on line {lineno} always matched") + from_start = ArcStart( + case_start, + cause="the pattern on line {lineno} never matched", + ) + exits |= self.add_body_arcs(case.body, from_start=from_start) + last_start = case_start + if not had_wildcard: + exits.add(from_start) + return exits - @contract(returns='ArcStarts') - def _handle__NodeList(self, node): + def _handle__NodeList(self, node: NodeList) -> Set[ArcStart]: start = self.line_for_node(node) exits = self.add_body_arcs(node.body, from_start=ArcStart(start)) return exits - @contract(returns='ArcStarts') - def _handle__Raise(self, node): + def _handle__Raise(self, node: ast.Raise) -> Set[ArcStart]: here = self.line_for_node(node) raise_start = ArcStart(here, cause="the raise on line {lineno} wasn't executed") - self.process_raise_exits([raise_start]) + self.process_raise_exits({raise_start}) # `raise` statement jumps away, no exits from here. return set() - @contract(returns='ArcStarts') - def _handle__Return(self, node): + def _handle__Return(self, node: ast.Return) -> Set[ArcStart]: here = self.line_for_node(node) return_start = ArcStart(here, cause="the return on line {lineno} wasn't executed") - self.process_return_exits([return_start]) + self.process_return_exits({return_start}) # `return` statement jumps away, no exits from here. return set() - @contract(returns='ArcStarts') - def _handle__Try(self, node): + def _handle__Try(self, node: ast.Try) -> Set[ArcStart]: if node.handlers: handler_start = self.line_for_node(node.handlers[0]) else: @@ -1107,10 +1155,10 @@ def _handle__Try(self, node): else: self.block_stack.pop() - handler_exits = set() + handler_exits: Set[ArcStart] = set() if node.handlers: - last_handler_start = None + last_handler_start: Optional[TLineNo] = None for handler_node in node.handlers: handler_start = self.line_for_node(handler_node) if last_handler_start is not None: @@ -1185,8 +1233,7 @@ def _handle__Try(self, node): return exits - @contract(starts='ArcStarts', exits='ArcStarts', returns='ArcStarts') - def _combine_finally_starts(self, starts, exits): + def _combine_finally_starts(self, starts: Set[ArcStart], exits: Set[ArcStart]) -> Set[ArcStart]: """Helper for building the cause of `finally` branches. "finally" clauses might not execute their exits, and the causes could @@ -1201,8 +1248,7 @@ def _combine_finally_starts(self, starts, exits): exits = {ArcStart(xit.lineno, cause) for xit in exits} return exits - @contract(returns='ArcStarts') - def _handle__While(self, node): + def _handle__While(self, node: ast.While) -> Set[ArcStart]: start = to_top = self.line_for_node(node.test) constant_test = self.is_constant_expr(node.test) top_is_body0 = False @@ -1219,6 +1265,7 @@ def _handle__While(self, node): self.add_arc(xit.lineno, to_top, xit.cause) exits = set() my_block = self.block_stack.pop() + assert isinstance(my_block, LoopBlock) exits.update(my_block.break_exits) from_start = ArcStart(start, cause="the condition on line {lineno} was never false") if node.orelse: @@ -1230,14 +1277,14 @@ def _handle__While(self, node): exits.add(from_start) return exits - @contract(returns='ArcStarts') - def _handle__With(self, node): + def _handle__With(self, node: ast.With) -> Set[ArcStart]: start = self.line_for_node(node) if env.PYBEHAVIOR.exit_through_with: self.block_stack.append(WithBlock(start=start)) exits = self.add_body_arcs(node.body, from_start=ArcStart(start)) if env.PYBEHAVIOR.exit_through_with: with_block = self.block_stack.pop() + assert isinstance(with_block, WithBlock) with_exit = {ArcStart(start)} if exits: for xit in exits: @@ -1264,7 +1311,7 @@ def _handle__With(self, node): # These methods are used by analyze() as the start of the analysis. # There is one for each construct with a code object. - def _code_object__Module(self, node): + def _code_object__Module(self, node: ast.Module) -> None: start = self.line_for_node(node) if node.body: exits = self.add_body_arcs(node.body, from_start=ArcStart(-start)) @@ -1275,7 +1322,7 @@ def _code_object__Module(self, node): self.add_arc(-start, start) self.add_arc(start, -start) - def _code_object__FunctionDef(self, node): + def _code_object__FunctionDef(self, node: ast.FunctionDef) -> None: start = self.line_for_node(node) self.block_stack.append(FunctionBlock(start=start, name=node.name)) exits = self.add_body_arcs(node.body, from_start=ArcStart(-start)) @@ -1284,7 +1331,7 @@ def _code_object__FunctionDef(self, node): _code_object__AsyncFunctionDef = _code_object__FunctionDef - def _code_object__ClassDef(self, node): + def _code_object__ClassDef(self, node: ast.ClassDef) -> None: start = self.line_for_node(node) self.add_arc(-start, start) exits = self.add_body_arcs(node.body, from_start=ArcStart(start)) @@ -1294,14 +1341,6 @@ def _code_object__ClassDef(self, node): f"didn't exit the body of class {node.name!r}", ) - def _make_expression_code_method(noun): # pylint: disable=no-self-argument - """A function to make methods for expression-based callable _code_object__ methods.""" - def _code_object__expression_callable(self, node): - start = self.line_for_node(node) - self.add_arc(-start, start, None, f"didn't run the {noun} on line {start}") - self.add_arc(start, -start, None, f"didn't finish the {noun} on line {start}") - return _code_object__expression_callable - _code_object__Lambda = _make_expression_code_method("lambda") _code_object__GeneratorExp = _make_expression_code_method("generator expression") _code_object__DictComp = _make_expression_code_method("dictionary comprehension") @@ -1313,14 +1352,18 @@ def _code_object__expression_callable(self, node): SKIP_DUMP_FIELDS = ["ctx"] -def _is_simple_value(value): +def _is_simple_value(value: Any) -> bool: """Is `value` simple enough to be displayed on a single line?""" return ( - value in [None, [], (), {}, set()] or + value in [None, [], (), {}, set(), frozenset(), Ellipsis] or isinstance(value, (bytes, int, float, str)) ) -def ast_dump(node, depth=0, print=print): # pylint: disable=redefined-builtin +def ast_dump( + node: ast.AST, + depth: int = 0, + print: Callable[[str], None]=print, # pylint: disable=redefined-builtin +) -> None: """Dump the AST for `node`. This recursively walks the AST, printing a readable version. @@ -1331,6 +1374,7 @@ def ast_dump(node, depth=0, print=print): # pylint: disable=redefined-builtin if lineno is not None: linemark = f" @ {node.lineno},{node.col_offset}" if hasattr(node, "end_lineno"): + assert hasattr(node, "end_col_offset") linemark += ":" if node.end_lineno != node.lineno: linemark += f"{node.end_lineno}," @@ -1352,7 +1396,7 @@ def ast_dump(node, depth=0, print=print): # pylint: disable=redefined-builtin else: print(head) if 0: - print("{}# mro: {}".format( + print("{}# mro: {}".format( # type: ignore[unreachable] indent, ", ".join(c.__name__ for c in node.__class__.__mro__[1:]), )) next_indent = indent + " " diff --git a/coverage/phystokens.py b/coverage/phystokens.py index c6dc1e0a9..a45242fdb 100644 --- a/coverage/phystokens.py +++ b/coverage/phystokens.py @@ -4,16 +4,23 @@ """Better tokenizing for coverage.py.""" import ast +import io import keyword import re +import sys import token import tokenize +from typing import Iterable, List, Optional, Set, Tuple + from coverage import env -from coverage.misc import contract +from coverage.types import TLineNo, TSourceTokenLines + + +TokenInfos = Iterable[tokenize.TokenInfo] -def phys_tokens(toks): +def _phys_tokens(toks: TokenInfos) -> TokenInfos: """Return all physical tokens, even line continuations. tokenize.generate_tokens() doesn't return a token for the backslash that @@ -23,9 +30,9 @@ def phys_tokens(toks): Returns the same values as generate_tokens() """ - last_line = None + last_line: Optional[str] = None last_lineno = -1 - last_ttext = None + last_ttext: str = "" for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks: if last_lineno != elineno: if last_line and last_line.endswith("\\\n"): @@ -56,7 +63,7 @@ def phys_tokens(toks): # Figure out what column the backslash is in. ccol = len(last_line.split("\n")[-2]) - 1 # Yield the token, with a fake token type. - yield ( + yield tokenize.TokenInfo( 99999, "\\\n", (slineno, ccol), (slineno, ccol+2), last_line @@ -64,27 +71,27 @@ def phys_tokens(toks): last_line = ltext if ttype not in (tokenize.NEWLINE, tokenize.NL): last_ttext = ttext - yield ttype, ttext, (slineno, scol), (elineno, ecol), ltext + yield tokenize.TokenInfo(ttype, ttext, (slineno, scol), (elineno, ecol), ltext) last_lineno = elineno class MatchCaseFinder(ast.NodeVisitor): """Helper for finding match/case lines.""" - def __init__(self, source): + def __init__(self, source: str) -> None: # This will be the set of line numbers that start match or case statements. - self.match_case_lines = set() + self.match_case_lines: Set[TLineNo] = set() self.visit(ast.parse(source)) - def visit_Match(self, node): - """Invoked by ast.NodeVisitor.visit""" - self.match_case_lines.add(node.lineno) - for case in node.cases: - self.match_case_lines.add(case.pattern.lineno) - self.generic_visit(node) + if sys.version_info >= (3, 10): + def visit_Match(self, node: ast.Match) -> None: + """Invoked by ast.NodeVisitor.visit""" + self.match_case_lines.add(node.lineno) + for case in node.cases: + self.match_case_lines.add(case.pattern.lineno) + self.generic_visit(node) -@contract(source='unicode') -def source_token_lines(source): +def source_token_lines(source: str) -> TSourceTokenLines: """Generate a series of lines, one for each line in `source`. Each line is a list of pairs, each pair is a token:: @@ -95,13 +102,13 @@ def source_token_lines(source): If you concatenate all the token texts, and then join them with newlines, you should have your original `source` back, with two differences: - trailing whitespace is not preserved, and a final line with no newline + trailing white space is not preserved, and a final line with no newline is indistinguishable from a final line with a newline. """ ws_tokens = {token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL} - line = [] + line: List[Tuple[str, str]] = [] col = 0 source = source.expandtabs(8).replace('\r\n', '\n') @@ -110,7 +117,7 @@ def source_token_lines(source): if env.PYBEHAVIOR.soft_keywords: match_case_lines = MatchCaseFinder(source).match_case_lines - for ttype, ttext, (sline, scol), (_, ecol), _ in phys_tokens(tokgen): + for ttype, ttext, (sline, scol), (_, ecol), _ in _phys_tokens(tokgen): mark_start = True for part in re.split('(\n)', ttext): if part == '\n': @@ -131,17 +138,20 @@ def source_token_lines(source): if keyword.iskeyword(ttext): # Hard keywords are always keywords. tok_class = "key" - elif env.PYBEHAVIOR.soft_keywords and keyword.issoftkeyword(ttext): - # Soft keywords appear at the start of the line, on lines that start - # match or case statements. - if len(line) == 0: - is_start_of_line = True - elif (len(line) == 1) and line[0][0] == "ws": - is_start_of_line = True - else: - is_start_of_line = False - if is_start_of_line and sline in match_case_lines: - tok_class = "key" + elif sys.version_info >= (3, 10): # PYVERSIONS + # Need the version_info check to keep mypy from borking + # on issoftkeyword here. + if env.PYBEHAVIOR.soft_keywords and keyword.issoftkeyword(ttext): + # Soft keywords appear at the start of the line, + # on lines that start match or case statements. + if len(line) == 0: + is_start_of_line = True + elif (len(line) == 1) and line[0][0] == "ws": + is_start_of_line = True + else: + is_start_of_line = False + if is_start_of_line and sline in match_case_lines: + tok_class = "key" line.append((tok_class, part)) mark_end = True scol = 0 @@ -163,16 +173,15 @@ class CachedTokenizer: actually tokenize twice. """ - def __init__(self): - self.last_text = None - self.last_tokens = None + def __init__(self) -> None: + self.last_text: Optional[str] = None + self.last_tokens: List[tokenize.TokenInfo] = [] - @contract(text='unicode') - def generate_tokens(self, text): + def generate_tokens(self, text: str) -> TokenInfos: """A stand-in for `tokenize.generate_tokens`.""" if text != self.last_text: self.last_text = text - readline = iter(text.splitlines(True)).__next__ + readline = io.StringIO(text).readline try: self.last_tokens = list(tokenize.generate_tokens(readline)) except: @@ -184,10 +193,7 @@ def generate_tokens(self, text): generate_tokens = CachedTokenizer().generate_tokens -COOKIE_RE = re.compile(r"^[ \t]*#.*coding[:=][ \t]*([-\w.]+)", flags=re.MULTILINE) - -@contract(source='bytes') -def source_encoding(source): +def source_encoding(source: bytes) -> str: """Determine the encoding for `source`, according to PEP 263. `source` is a byte string: the text of the program. @@ -197,31 +203,3 @@ def source_encoding(source): """ readline = iter(source.splitlines(True)).__next__ return tokenize.detect_encoding(readline)[0] - - -@contract(source='unicode') -def compile_unicode(source, filename, mode): - """Just like the `compile` builtin, but works on any Unicode string. - - Python 2's compile() builtin has a stupid restriction: if the source string - is Unicode, then it may not have a encoding declaration in it. Why not? - Who knows! It also decodes to utf-8, and then tries to interpret those - utf-8 bytes according to the encoding declaration. Why? Who knows! - - This function neuters the coding declaration, and compiles it. - - """ - source = neuter_encoding_declaration(source) - code = compile(source, filename, mode) - return code - - -@contract(source='unicode', returns='unicode') -def neuter_encoding_declaration(source): - """Return `source`, with any encoding declaration neutered.""" - if COOKIE_RE.search(source): - source_lines = source.splitlines(True) - for lineno in range(min(2, len(source_lines))): - source_lines[lineno] = COOKIE_RE.sub("# (deleted declaration)", source_lines[lineno]) - source = "".join(source_lines) - return source diff --git a/coverage/plugin.py b/coverage/plugin.py index bf30b1b73..ccc33337a 100644 --- a/coverage/plugin.py +++ b/coverage/plugin.py @@ -112,16 +112,25 @@ def coverage_init(reg, options): """ +from __future__ import annotations + import functools +from types import FrameType +from typing import Any, Dict, Iterable, Optional, Set, Tuple, Union + from coverage import files -from coverage.misc import contract, _needs_to_implement +from coverage.misc import _needs_to_implement +from coverage.types import TArc, TConfigurable, TLineNo, TSourceTokenLines class CoveragePlugin: """Base class for coverage.py plug-ins.""" - def file_tracer(self, filename): # pylint: disable=unused-argument + _coverage_plugin_name: str + _coverage_enabled: bool + + def file_tracer(self, filename: str) -> Optional[FileTracer]: # pylint: disable=unused-argument """Get a :class:`FileTracer` object for a file. Plug-in type: file tracer. @@ -161,7 +170,10 @@ def file_tracer(self, filename): # pylint: disable=unused-argument """ return None - def file_reporter(self, filename): # pylint: disable=unused-argument + def file_reporter( # type: ignore[return] + self, + filename: str, # pylint: disable=unused-argument + ) -> Union[FileReporter, str]: # str should be Literal["python"] """Get the :class:`FileReporter` class to use for a file. Plug-in type: file tracer. @@ -175,7 +187,10 @@ def file_reporter(self, filename): # pylint: disable=unused-argument """ _needs_to_implement(self, "file_reporter") - def dynamic_context(self, frame): # pylint: disable=unused-argument + def dynamic_context( + self, + frame: FrameType, # pylint: disable=unused-argument + ) -> Optional[str]: """Get the dynamically computed context label for `frame`. Plug-in type: dynamic context. @@ -191,7 +206,10 @@ def dynamic_context(self, frame): # pylint: disable=unused-argument """ return None - def find_executable_files(self, src_dir): # pylint: disable=unused-argument + def find_executable_files( + self, + src_dir: str, # pylint: disable=unused-argument + ) -> Iterable[str]: """Yield all of the executable files in `src_dir`, recursively. Plug-in type: file tracer. @@ -206,7 +224,7 @@ def find_executable_files(self, src_dir): # pylint: disable=unused-argumen """ return [] - def configure(self, config): + def configure(self, config: TConfigurable) -> None: """Modify the configuration of coverage.py. Plug-in type: configurer. @@ -220,7 +238,7 @@ def configure(self, config): """ pass - def sys_info(self): + def sys_info(self) -> Iterable[Tuple[str, Any]]: """Get a list of information useful for debugging. Plug-in type: any. @@ -234,7 +252,12 @@ def sys_info(self): return [] -class FileTracer: +class CoveragePluginBase: + """Plugins produce specialized objects, which point back to the original plugin.""" + _coverage_plugin: CoveragePlugin + + +class FileTracer(CoveragePluginBase): """Support needed for files during the execution phase. File tracer plug-ins implement subclasses of FileTracer to return from @@ -251,7 +274,7 @@ class FileTracer: """ - def source_filename(self): + def source_filename(self) -> str: # type: ignore[return] """The source file name for this file. This may be any file name you like. A key responsibility of a plug-in @@ -266,7 +289,7 @@ def source_filename(self): """ _needs_to_implement(self, "source_filename") - def has_dynamic_source_filename(self): + def has_dynamic_source_filename(self) -> bool: """Does this FileTracer have dynamic source file names? FileTracers can provide dynamically determined file names by @@ -284,7 +307,11 @@ def has_dynamic_source_filename(self): """ return False - def dynamic_source_filename(self, filename, frame): # pylint: disable=unused-argument + def dynamic_source_filename( + self, + filename: str, # pylint: disable=unused-argument + frame: FrameType, # pylint: disable=unused-argument + ) -> Optional[str]: """Get a dynamically computed source file name. Some plug-ins need to compute the source file name dynamically for each @@ -299,7 +326,7 @@ def dynamic_source_filename(self, filename, frame): # pylint: disable=unused """ return None - def line_number_range(self, frame): + def line_number_range(self, frame: FrameType) -> Tuple[TLineNo, TLineNo]: """Get the range of source line numbers for a given a call frame. The call frame is examined, and the source line number in the original @@ -318,7 +345,7 @@ def line_number_range(self, frame): @functools.total_ordering -class FileReporter: +class FileReporter(CoveragePluginBase): """Support needed for files during the analysis and reporting phases. File tracer plug-ins implement a subclass of `FileReporter`, and return @@ -331,7 +358,7 @@ class FileReporter: """ - def __init__(self, filename): + def __init__(self, filename: str) -> None: """Simple initialization of a `FileReporter`. The `filename` argument is the path to the file being reported. This @@ -341,10 +368,10 @@ def __init__(self, filename): """ self.filename = filename - def __repr__(self): + def __repr__(self) -> str: return "<{0.__class__.__name__} filename={0.filename!r}>".format(self) - def relative_filename(self): + def relative_filename(self) -> str: """Get the relative file name for this file. This file path will be displayed in reports. The default @@ -355,8 +382,7 @@ def relative_filename(self): """ return files.relative_filename(self.filename) - @contract(returns='unicode') - def source(self): + def source(self) -> str: """Get the source for the file. Returns a Unicode string. @@ -366,10 +392,10 @@ def source(self): as a text file, or if you need other encoding support. """ - with open(self.filename, "rb") as f: - return f.read().decode("utf-8") + with open(self.filename, encoding="utf-8") as f: + return f.read() - def lines(self): + def lines(self) -> Set[TLineNo]: # type: ignore[return] """Get the executable lines in this file. Your plug-in must determine which lines in the file were possibly @@ -380,7 +406,7 @@ def lines(self): """ _needs_to_implement(self, "lines") - def excluded_lines(self): + def excluded_lines(self) -> Set[TLineNo]: """Get the excluded executable lines in this file. Your plug-in can use any method it likes to allow the user to exclude @@ -393,7 +419,7 @@ def excluded_lines(self): """ return set() - def translate_lines(self, lines): + def translate_lines(self, lines: Iterable[TLineNo]) -> Set[TLineNo]: """Translate recorded lines into reported lines. Some file formats will want to report lines slightly differently than @@ -413,7 +439,7 @@ def translate_lines(self, lines): """ return set(lines) - def arcs(self): + def arcs(self) -> Set[TArc]: """Get the executable arcs in this file. To support branch coverage, your plug-in needs to be able to indicate @@ -427,7 +453,7 @@ def arcs(self): """ return set() - def no_branch_lines(self): + def no_branch_lines(self) -> Set[TLineNo]: """Get the lines excused from branch coverage in this file. Your plug-in can use any method it likes to allow the user to exclude @@ -440,7 +466,7 @@ def no_branch_lines(self): """ return set() - def translate_arcs(self, arcs): + def translate_arcs(self, arcs: Iterable[TArc]) -> Set[TArc]: """Translate recorded arcs into reported arcs. Similar to :meth:`translate_lines`, but for arcs. `arcs` is a set of @@ -451,9 +477,9 @@ def translate_arcs(self, arcs): The default implementation returns `arcs` unchanged. """ - return arcs + return set(arcs) - def exit_counts(self): + def exit_counts(self) -> Dict[TLineNo, int]: """Get a count of exits from that each line. To determine which lines are branches, coverage.py looks for lines that @@ -466,7 +492,12 @@ def exit_counts(self): """ return {} - def missing_arc_description(self, start, end, executed_arcs=None): # pylint: disable=unused-argument + def missing_arc_description( + self, + start: TLineNo, + end: TLineNo, + executed_arcs: Optional[Set[TArc]]=None, # pylint: disable=unused-argument + ) -> str: """Provide an English sentence describing a missing arc. The `start` and `end` arguments are the line numbers of the missing @@ -481,7 +512,7 @@ def missing_arc_description(self, start, end, executed_arcs=None): # pylint: """ return f"Line {start} didn't jump to line {end}" - def source_token_lines(self): + def source_token_lines(self) -> TSourceTokenLines: """Generate a series of tokenized lines, one for each line in `source`. These tokens are used for syntax-colored reports. @@ -512,10 +543,11 @@ def source_token_lines(self): for line in self.source().splitlines(): yield [('txt', line)] - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: return isinstance(other, FileReporter) and self.filename == other.filename - def __lt__(self, other): + def __lt__(self, other: Any) -> bool: return isinstance(other, FileReporter) and self.filename < other.filename - __hash__ = None # This object doesn't need to be hashed. + # This object doesn't need to be hashed. + __hash__ = None # type: ignore[assignment] diff --git a/coverage/plugin_support.py b/coverage/plugin_support.py index 0b8923918..8ac424913 100644 --- a/coverage/plugin_support.py +++ b/coverage/plugin_support.py @@ -3,13 +3,20 @@ """Support for plugins.""" +from __future__ import annotations + import os import os.path import sys +from types import FrameType +from typing import Any, Dict, Iterable, Iterator, List, Optional, Set, Tuple, Union + +from coverage.config import CoverageConfig from coverage.exceptions import PluginError from coverage.misc import isolate_module from coverage.plugin import CoveragePlugin, FileTracer, FileReporter +from coverage.types import TArc, TConfigurable, TDebugCtl, TLineNo, TSourceTokenLines os = isolate_module(os) @@ -17,18 +24,23 @@ class Plugins: """The currently loaded collection of coverage.py plugins.""" - def __init__(self): - self.order = [] - self.names = {} - self.file_tracers = [] - self.configurers = [] - self.context_switchers = [] + def __init__(self) -> None: + self.order: List[CoveragePlugin] = [] + self.names: Dict[str, CoveragePlugin] = {} + self.file_tracers: List[CoveragePlugin] = [] + self.configurers: List[CoveragePlugin] = [] + self.context_switchers: List[CoveragePlugin] = [] - self.current_module = None - self.debug = None + self.current_module: Optional[str] = None + self.debug: Optional[TDebugCtl] @classmethod - def load_plugins(cls, modules, config, debug=None): + def load_plugins( + cls, + modules: Iterable[str], + config: CoverageConfig, + debug: Optional[TDebugCtl]=None, + ) -> Plugins: """Load plugins from `modules`. Returns a Plugins object with the loaded and configured plugins. @@ -54,7 +66,7 @@ def load_plugins(cls, modules, config, debug=None): plugins.current_module = None return plugins - def add_file_tracer(self, plugin): + def add_file_tracer(self, plugin: CoveragePlugin) -> None: """Add a file tracer plugin. `plugin` is an instance of a third-party plugin class. It must @@ -63,7 +75,7 @@ def add_file_tracer(self, plugin): """ self._add_plugin(plugin, self.file_tracers) - def add_configurer(self, plugin): + def add_configurer(self, plugin: CoveragePlugin) -> None: """Add a configuring plugin. `plugin` is an instance of a third-party plugin class. It must @@ -72,7 +84,7 @@ def add_configurer(self, plugin): """ self._add_plugin(plugin, self.configurers) - def add_dynamic_context(self, plugin): + def add_dynamic_context(self, plugin: CoveragePlugin) -> None: """Add a dynamic context plugin. `plugin` is an instance of a third-party plugin class. It must @@ -81,7 +93,7 @@ def add_dynamic_context(self, plugin): """ self._add_plugin(plugin, self.context_switchers) - def add_noop(self, plugin): + def add_noop(self, plugin: CoveragePlugin) -> None: """Add a plugin that does nothing. This is only useful for testing the plugin support. @@ -89,7 +101,11 @@ def add_noop(self, plugin): """ self._add_plugin(plugin, None) - def _add_plugin(self, plugin, specialized): + def _add_plugin( + self, + plugin: CoveragePlugin, + specialized: Optional[List[CoveragePlugin]], + ) -> None: """Add a plugin object. `plugin` is a :class:`CoveragePlugin` instance to add. `specialized` @@ -102,7 +118,6 @@ def _add_plugin(self, plugin, specialized): labelled = LabelledDebug(f"plugin {self.current_module!r}", self.debug) plugin = DebugPluginWrapper(plugin, labelled) - # pylint: disable=attribute-defined-outside-init plugin._coverage_plugin_name = plugin_name plugin._coverage_enabled = True self.order.append(plugin) @@ -110,13 +125,13 @@ def _add_plugin(self, plugin, specialized): if specialized is not None: specialized.append(plugin) - def __bool__(self): + def __bool__(self) -> bool: return bool(self.order) - def __iter__(self): + def __iter__(self) -> Iterator[CoveragePlugin]: return iter(self.order) - def get(self, plugin_name): + def get(self, plugin_name: str) -> CoveragePlugin: """Return a plugin by name.""" return self.names[plugin_name] @@ -124,20 +139,20 @@ def get(self, plugin_name): class LabelledDebug: """A Debug writer, but with labels for prepending to the messages.""" - def __init__(self, label, debug, prev_labels=()): + def __init__(self, label: str, debug: TDebugCtl, prev_labels: Iterable[str]=()): self.labels = list(prev_labels) + [label] self.debug = debug - def add_label(self, label): + def add_label(self, label: str) -> LabelledDebug: """Add a label to the writer, and return a new `LabelledDebug`.""" return LabelledDebug(label, self.debug, self.labels) - def message_prefix(self): + def message_prefix(self) -> str: """The prefix to use on messages, combining the labels.""" prefixes = self.labels + [''] return ":\n".join(" "*i+label for i, label in enumerate(prefixes)) - def write(self, message): + def write(self, message: str) -> None: """Write `message`, but with the labels prepended.""" self.debug.write(f"{self.message_prefix()}{message}") @@ -145,12 +160,12 @@ def write(self, message): class DebugPluginWrapper(CoveragePlugin): """Wrap a plugin, and use debug to report on what it's doing.""" - def __init__(self, plugin, debug): + def __init__(self, plugin: CoveragePlugin, debug: LabelledDebug) -> None: super().__init__() self.plugin = plugin self.debug = debug - def file_tracer(self, filename): + def file_tracer(self, filename: str) -> Optional[FileTracer]: tracer = self.plugin.file_tracer(filename) self.debug.write(f"file_tracer({filename!r}) --> {tracer!r}") if tracer: @@ -158,64 +173,65 @@ def file_tracer(self, filename): tracer = DebugFileTracerWrapper(tracer, debug) return tracer - def file_reporter(self, filename): + def file_reporter(self, filename: str) -> Union[FileReporter, str]: reporter = self.plugin.file_reporter(filename) + assert isinstance(reporter, FileReporter) self.debug.write(f"file_reporter({filename!r}) --> {reporter!r}") if reporter: debug = self.debug.add_label(f"file {filename!r}") reporter = DebugFileReporterWrapper(filename, reporter, debug) return reporter - def dynamic_context(self, frame): + def dynamic_context(self, frame: FrameType) -> Optional[str]: context = self.plugin.dynamic_context(frame) self.debug.write(f"dynamic_context({frame!r}) --> {context!r}") return context - def find_executable_files(self, src_dir): + def find_executable_files(self, src_dir: str) -> Iterable[str]: executable_files = self.plugin.find_executable_files(src_dir) self.debug.write(f"find_executable_files({src_dir!r}) --> {executable_files!r}") return executable_files - def configure(self, config): + def configure(self, config: TConfigurable) -> None: self.debug.write(f"configure({config!r})") self.plugin.configure(config) - def sys_info(self): + def sys_info(self) -> Iterable[Tuple[str, Any]]: return self.plugin.sys_info() class DebugFileTracerWrapper(FileTracer): """A debugging `FileTracer`.""" - def __init__(self, tracer, debug): + def __init__(self, tracer: FileTracer, debug: LabelledDebug) -> None: self.tracer = tracer self.debug = debug - def _show_frame(self, frame): + def _show_frame(self, frame: FrameType) -> str: """A short string identifying a frame, for debug messages.""" return "%s@%d" % ( os.path.basename(frame.f_code.co_filename), frame.f_lineno, ) - def source_filename(self): + def source_filename(self) -> str: sfilename = self.tracer.source_filename() self.debug.write(f"source_filename() --> {sfilename!r}") return sfilename - def has_dynamic_source_filename(self): + def has_dynamic_source_filename(self) -> bool: has = self.tracer.has_dynamic_source_filename() self.debug.write(f"has_dynamic_source_filename() --> {has!r}") return has - def dynamic_source_filename(self, filename, frame): + def dynamic_source_filename(self, filename: str, frame: FrameType) -> Optional[str]: dyn = self.tracer.dynamic_source_filename(filename, frame) self.debug.write("dynamic_source_filename({!r}, {}) --> {!r}".format( filename, self._show_frame(frame), dyn, )) return dyn - def line_number_range(self, frame): + def line_number_range(self, frame: FrameType) -> Tuple[TLineNo, TLineNo]: pair = self.tracer.line_number_range(frame) self.debug.write(f"line_number_range({self._show_frame(frame)}) --> {pair!r}") return pair @@ -224,57 +240,57 @@ def line_number_range(self, frame): class DebugFileReporterWrapper(FileReporter): """A debugging `FileReporter`.""" - def __init__(self, filename, reporter, debug): + def __init__(self, filename: str, reporter: FileReporter, debug: LabelledDebug) -> None: super().__init__(filename) self.reporter = reporter self.debug = debug - def relative_filename(self): + def relative_filename(self) -> str: ret = self.reporter.relative_filename() self.debug.write(f"relative_filename() --> {ret!r}") return ret - def lines(self): + def lines(self) -> Set[TLineNo]: ret = self.reporter.lines() self.debug.write(f"lines() --> {ret!r}") return ret - def excluded_lines(self): + def excluded_lines(self) -> Set[TLineNo]: ret = self.reporter.excluded_lines() self.debug.write(f"excluded_lines() --> {ret!r}") return ret - def translate_lines(self, lines): + def translate_lines(self, lines: Iterable[TLineNo]) -> Set[TLineNo]: ret = self.reporter.translate_lines(lines) self.debug.write(f"translate_lines({lines!r}) --> {ret!r}") return ret - def translate_arcs(self, arcs): + def translate_arcs(self, arcs: Iterable[TArc]) -> Set[TArc]: ret = self.reporter.translate_arcs(arcs) self.debug.write(f"translate_arcs({arcs!r}) --> {ret!r}") return ret - def no_branch_lines(self): + def no_branch_lines(self) -> Set[TLineNo]: ret = self.reporter.no_branch_lines() self.debug.write(f"no_branch_lines() --> {ret!r}") return ret - def exit_counts(self): + def exit_counts(self) -> Dict[TLineNo, int]: ret = self.reporter.exit_counts() self.debug.write(f"exit_counts() --> {ret!r}") return ret - def arcs(self): + def arcs(self) -> Set[TArc]: ret = self.reporter.arcs() self.debug.write(f"arcs() --> {ret!r}") return ret - def source(self): + def source(self) -> str: ret = self.reporter.source() self.debug.write("source() --> %d chars" % (len(ret),)) return ret - def source_token_lines(self): + def source_token_lines(self) -> TSourceTokenLines: ret = list(self.reporter.source_token_lines()) self.debug.write("source_token_lines() --> %d tokens" % (len(ret),)) return ret diff --git a/coverage/python.py b/coverage/python.py index da43e6e8b..2d2faa149 100644 --- a/coverage/python.py +++ b/coverage/python.py @@ -3,23 +3,30 @@ """Python source expertise for coverage.py""" +from __future__ import annotations + import os.path import types import zipimport +from typing import cast, Dict, Iterable, Optional, Set, TYPE_CHECKING + from coverage import env from coverage.exceptions import CoverageException, NoSource -from coverage.files import canonical_filename, relative_filename -from coverage.misc import contract, expensive, isolate_module, join_regex +from coverage.files import canonical_filename, relative_filename, zip_location +from coverage.misc import expensive, isolate_module, join_regex from coverage.parser import PythonParser from coverage.phystokens import source_token_lines, source_encoding from coverage.plugin import FileReporter +from coverage.types import TArc, TLineNo, TMorf, TSourceTokenLines + +if TYPE_CHECKING: + from coverage import Coverage os = isolate_module(os) -@contract(returns='bytes') -def read_python_source(filename): +def read_python_source(filename: str) -> bytes: """Read the Python source text from `filename`. Returns bytes. @@ -28,15 +35,10 @@ def read_python_source(filename): with open(filename, "rb") as f: source = f.read() - if env.IRONPYTHON: - # IronPython reads Unicode strings even for "rb" files. - source = bytes(source) - return source.replace(b"\r\n", b"\n").replace(b"\r", b"\n") -@contract(returns='unicode') -def get_python_source(filename): +def get_python_source(filename: str) -> str: """Return the source code, as unicode.""" base, ext = os.path.splitext(filename) if ext == ".py" and env.WINDOWS: @@ -44,24 +46,25 @@ def get_python_source(filename): else: exts = [ext] + source_bytes: Optional[bytes] for ext in exts: try_filename = base + ext if os.path.exists(try_filename): # A regular text file: open it. - source = read_python_source(try_filename) + source_bytes = read_python_source(try_filename) break # Maybe it's in a zip file? - source = get_zip_bytes(try_filename) - if source is not None: + source_bytes = get_zip_bytes(try_filename) + if source_bytes is not None: break else: # Couldn't find source. raise NoSource(f"No source for code: '{filename}'.") # Replace \f because of http://bugs.python.org/issue19035 - source = source.replace(b'\f', b' ') - source = source.decode(source_encoding(source), "replace") + source_bytes = source_bytes.replace(b'\f', b' ') + source = source_bytes.decode(source_encoding(source_bytes), "replace") # Python code should always end with a line with a newline. if source and source[-1] != '\n': @@ -70,8 +73,7 @@ def get_python_source(filename): return source -@contract(returns='bytes|None') -def get_zip_bytes(filename): +def get_zip_bytes(filename: str) -> Optional[bytes]: """Get data from `filename` if it is a zip file path. Returns the bytestring data read from the zip file, or None if no zip file @@ -79,23 +81,23 @@ def get_zip_bytes(filename): an empty string if the file is empty. """ - markers = ['.zip'+os.sep, '.egg'+os.sep, '.pex'+os.sep] - for marker in markers: - if marker in filename: - parts = filename.split(marker) - try: - zi = zipimport.zipimporter(parts[0]+marker[:-1]) - except zipimport.ZipImportError: - continue - try: - data = zi.get_data(parts[1]) - except OSError: - continue - return data + zipfile_inner = zip_location(filename) + if zipfile_inner is not None: + zipfile, inner = zipfile_inner + try: + zi = zipimport.zipimporter(zipfile) + except zipimport.ZipImportError: + return None + try: + # typeshed is wrong for get_data: https://github.com/python/typeshed/pull/9428 + data = cast(bytes, zi.get_data(inner)) + except OSError: + return None + return data return None -def source_for_file(filename): +def source_for_file(filename: str) -> str: """Return the source filename for `filename`. Given a file name being traced, return the best guess as to the source @@ -120,15 +122,11 @@ def source_for_file(filename): # Didn't find source, but it's probably the .py file we want. return py_filename - elif filename.endswith("$py.class"): - # Jython is easy to guess. - return filename[:-9] + ".py" - # No idea, just use the file name as-is. return filename -def source_for_morf(morf): +def source_for_morf(morf: TMorf) -> str: """Get the source filename for the module-or-file `morf`.""" if hasattr(morf, '__file__') and morf.__file__: filename = morf.__file__ @@ -146,12 +144,19 @@ def source_for_morf(morf): class PythonFileReporter(FileReporter): """Report support for a Python file.""" - def __init__(self, morf, coverage=None): + def __init__(self, morf: TMorf, coverage: Optional[Coverage]=None) -> None: self.coverage = coverage filename = source_for_morf(morf) - super().__init__(canonical_filename(filename)) + fname = filename + canonicalize = True + if self.coverage is not None: + if self.coverage.config.relative_files: + canonicalize = False + if canonicalize: + fname = canonical_filename(filename) + super().__init__(fname) if hasattr(morf, '__name__'): name = morf.__name__.replace(".", os.sep) @@ -162,20 +167,20 @@ def __init__(self, morf, coverage=None): name = relative_filename(filename) self.relname = name - self._source = None - self._parser = None + self._source: Optional[str] = None + self._parser: Optional[PythonParser] = None self._excluded = None - def __repr__(self): + def __repr__(self) -> str: return f"" - @contract(returns='unicode') - def relative_filename(self): + def relative_filename(self) -> str: return self.relname @property - def parser(self): + def parser(self) -> PythonParser: """Lazily create a :class:`PythonParser`.""" + assert self.coverage is not None if self._parser is None: self._parser = PythonParser( filename=self.filename, @@ -184,22 +189,23 @@ def parser(self): self._parser.parse_source() return self._parser - def lines(self): + def lines(self) -> Set[TLineNo]: """Return the line numbers of statements in the file.""" return self.parser.statements - def excluded_lines(self): + def excluded_lines(self) -> Set[TLineNo]: """Return the line numbers of statements in the file.""" return self.parser.excluded - def translate_lines(self, lines): + def translate_lines(self, lines: Iterable[TLineNo]) -> Set[TLineNo]: return self.parser.translate_lines(lines) - def translate_arcs(self, arcs): + def translate_arcs(self, arcs: Iterable[TArc]) -> Set[TArc]: return self.parser.translate_arcs(arcs) @expensive - def no_branch_lines(self): + def no_branch_lines(self) -> Set[TLineNo]: + assert self.coverage is not None no_branch = self.parser.lines_matching( join_regex(self.coverage.config.partial_list), join_regex(self.coverage.config.partial_always_list), @@ -207,23 +213,27 @@ def no_branch_lines(self): return no_branch @expensive - def arcs(self): + def arcs(self) -> Set[TArc]: return self.parser.arcs() @expensive - def exit_counts(self): + def exit_counts(self) -> Dict[TLineNo, int]: return self.parser.exit_counts() - def missing_arc_description(self, start, end, executed_arcs=None): + def missing_arc_description( + self, + start: TLineNo, + end: TLineNo, + executed_arcs: Optional[Set[TArc]]=None, + ) -> str: return self.parser.missing_arc_description(start, end, executed_arcs) - @contract(returns='unicode') - def source(self): + def source(self) -> str: if self._source is None: self._source = get_python_source(self.filename) return self._source - def should_be_python(self): + def should_be_python(self) -> bool: """Does it seem like this file should contain Python? This is used to decide if a file reported as part of the execution of @@ -243,5 +253,5 @@ def should_be_python(self): # Everything else is probably not Python. return False - def source_token_lines(self): + def source_token_lines(self) -> TSourceTokenLines: return source_token_lines(self.source()) diff --git a/coverage/pytracer.py b/coverage/pytracer.py index 4f138074b..027e8e7e0 100644 --- a/coverage/pytracer.py +++ b/coverage/pytracer.py @@ -7,7 +7,11 @@ import dis import sys +from types import FrameType +from typing import Any, Callable, Dict, Optional + from coverage import env +from coverage.types import TFileDisposition, TTraceData, TTraceFn, TTracer, TWarnFn # We need the YIELD_VALUE opcode below, in a comparison-friendly form. RESUME = dis.opmap.get('RESUME') @@ -22,7 +26,7 @@ THIS_FILE = __file__.rstrip("co") -class PyTracer: +class PyTracer(TTracer): """Python implementation of the raw data tracer.""" # Because of poor implementations of trace-function-manipulating tools, @@ -41,21 +45,24 @@ class PyTracer: # PyTracer to get accurate results. The command-line --timid argument is # used to force the use of this tracer. - def __init__(self): + def __init__(self) -> None: + # pylint: disable=super-init-not-called # Attributes set from the collector: - self.data = None + self.data: TTraceData self.trace_arcs = False - self.should_trace = None - self.should_trace_cache = None - self.should_start_context = None - self.warn = None + self.should_trace: Callable[[str, FrameType], TFileDisposition] + self.should_trace_cache: Dict[str, Optional[TFileDisposition]] + self.should_start_context: Optional[Callable[[FrameType], Optional[str]]] = None + self.switch_context: Optional[Callable[[Optional[str]], None]] = None + self.warn: TWarnFn + # The threading module to use, if any. self.threading = None self.cur_file_data = None self.last_line = 0 # int, but uninitialized. - self.cur_file_name = None - self.context = None + self.cur_file_name: Optional[str] = None + self.context: Optional[str] = None self.started_context = False self.data_stack = [] @@ -71,14 +78,13 @@ def __init__(self): # re-create a bound method object all the time. self._cached_bound_method_trace = self._trace - def __repr__(self): - return "".format( - id(self), - sum(len(v) for v in self.data.values()), - len(self.data), - ) + def __repr__(self) -> str: + me = id(self) + points = sum(len(v) for v in self.data.values()) + files = len(self.data) + return f"" - def log(self, marker, *args): + def log(self, marker: str, *args: Any) -> None: """For hard-core logging of what this tracer is doing.""" with open("/tmp/debug_trace.txt", "a") as f: f.write("{} {}[{}]".format( @@ -87,13 +93,13 @@ def log(self, marker, *args): len(self.data_stack), )) if 0: # if you want thread ids.. - f.write(".{:x}.{:x}".format( + f.write(".{:x}.{:x}".format( # type: ignore[unreachable] self.thread.ident, self.threading.current_thread().ident, )) f.write(" {}".format(" ".join(map(str, args)))) if 0: # if you want callers.. - f.write(" | ") + f.write(" | ") # type: ignore[unreachable] stack = " / ".join( (fname or "???").rpartition("/")[-1] for _, fname, _, _ in self.data_stack @@ -101,7 +107,7 @@ def log(self, marker, *args): f.write(stack) f.write("\n") - def _trace(self, frame, event, arg_unused): + def _trace(self, frame: FrameType, event: str, arg_unused: Any) -> Optional[TTraceFn]: """The trace function passed to sys.settrace.""" if THIS_FILE in frame.f_code.co_filename: @@ -113,8 +119,8 @@ def _trace(self, frame, event, arg_unused): # The PyTrace.stop() method has been called, possibly by another # thread, let's deactivate ourselves now. if 0: - self.log("---\nX", frame.f_code.co_filename, frame.f_lineno) - f = frame + f = frame # type: ignore[unreachable] + self.log("---\nX", f.f_code.co_filename, f.f_lineno) while f: self.log(">", f.f_code.co_filename, f.f_lineno, f.f_code.co_name, f.f_trace) f = f.f_back @@ -134,6 +140,7 @@ def _trace(self, frame, event, arg_unused): if context_maybe is not None: self.context = context_maybe started_context = True + assert self.switch_context is not None self.switch_context(self.context) else: started_context = False @@ -169,6 +176,7 @@ def _trace(self, frame, event, arg_unused): self.cur_file_data = None if disp.trace: tracename = disp.source_filename + assert tracename is not None if tracename not in self.data: self.data[tracename] = set() self.cur_file_data = self.data[tracename] @@ -242,7 +250,7 @@ def _trace(self, frame, event, arg_unused): self.switch_context(None) return self._cached_bound_method_trace - def start(self): + def start(self) -> TTraceFn: """Start this Tracer. Return a Python function suitable for use with sys.settrace(). @@ -263,7 +271,7 @@ def start(self): sys.settrace(self._cached_bound_method_trace) return self._cached_bound_method_trace - def stop(self): + def stop(self) -> None: """Stop this Tracer.""" # Get the active tracer callback before setting the stop flag to be # able to detect if the tracer was changed prior to stopping it. @@ -293,14 +301,14 @@ def stop(self): slug="trace-changed", ) - def activity(self): + def activity(self) -> bool: """Has there been any activity?""" return self._activity - def reset_activity(self): + def reset_activity(self) -> None: """Reset the activity() flag.""" self._activity = False - def get_stats(self): + def get_stats(self) -> Optional[Dict[str, int]]: """Return a dictionary of statistics, or None.""" return None diff --git a/coverage/report.py b/coverage/report.py index 6382eb515..74ae18175 100644 --- a/coverage/report.py +++ b/coverage/report.py @@ -3,14 +3,38 @@ """Reporter foundation for coverage.py.""" +from __future__ import annotations + import sys +from typing import Callable, Iterable, Iterator, IO, Optional, Tuple, TYPE_CHECKING + from coverage.exceptions import CoverageException, NoDataError, NotPython -from coverage.files import prep_patterns, FnmatchMatcher +from coverage.files import prep_patterns, GlobMatcher from coverage.misc import ensure_dir_for_file, file_be_gone +from coverage.plugin import FileReporter +from coverage.results import Analysis +from coverage.types import Protocol, TMorf + +if TYPE_CHECKING: + from coverage import Coverage + + +class Reporter(Protocol): + """What we expect of reporters.""" + + report_type: str + + def report(self, morfs: Optional[Iterable[TMorf]], outfile: IO[str]) -> float: + """Generate a report of `morfs`, written to `outfile`.""" -def render_report(output_path, reporter, morfs, msgfn): +def render_report( + output_path: str, + reporter: Reporter, + morfs: Optional[Iterable[TMorf]], + msgfn: Callable[[str], None], +) -> float: """Run a one-file report generator, managing the output file. This function ensures the output file is ready to be written to. Then writes @@ -45,7 +69,10 @@ def render_report(output_path, reporter, morfs, msgfn): msgfn(f"Wrote {reporter.report_type} to {output_path}") -def get_analysis_to_report(coverage, morfs): +def get_analysis_to_report( + coverage: Coverage, + morfs: Optional[Iterable[TMorf]], +) -> Iterator[Tuple[FileReporter, Analysis]]: """Get the files to report on. For each morf in `morfs`, if it should be reported on (based on the omit @@ -57,11 +84,11 @@ def get_analysis_to_report(coverage, morfs): config = coverage.config if config.report_include: - matcher = FnmatchMatcher(prep_patterns(config.report_include), "report_include") + matcher = GlobMatcher(prep_patterns(config.report_include), "report_include") file_reporters = [fr for fr in file_reporters if matcher.match(fr.filename)] if config.report_omit: - matcher = FnmatchMatcher(prep_patterns(config.report_omit), "report_omit") + matcher = GlobMatcher(prep_patterns(config.report_omit), "report_omit") file_reporters = [fr for fr in file_reporters if not matcher.match(fr.filename)] if not file_reporters: @@ -75,7 +102,7 @@ def get_analysis_to_report(coverage, morfs): # explicitly suppress those errors. # NotPython is only raised by PythonFileReporter, which has a # should_be_python() method. - if fr.should_be_python(): + if fr.should_be_python(): # type: ignore[attr-defined] if config.ignore_errors: msg = f"Couldn't parse Python file '{fr.filename}'" coverage._warn(msg, slug="couldnt-parse") diff --git a/coverage/results.py b/coverage/results.py index 79439fd9b..4990d4359 100644 --- a/coverage/results.py +++ b/coverage/results.py @@ -3,17 +3,32 @@ """Results of coverage measurement.""" +from __future__ import annotations + import collections -from coverage.debug import SimpleReprMixin +from typing import Callable, Dict, Iterable, List, Optional, Tuple, TYPE_CHECKING + +from coverage.debug import AutoReprMixin from coverage.exceptions import ConfigError -from coverage.misc import contract, nice_pair +from coverage.misc import nice_pair +from coverage.types import TArc, TLineNo + +if TYPE_CHECKING: + from coverage.data import CoverageData + from coverage.plugin import FileReporter class Analysis: """The results of analyzing a FileReporter.""" - def __init__(self, data, precision, file_reporter, file_mapper): + def __init__( + self, + data: CoverageData, + precision: int, + file_reporter: FileReporter, + file_mapper: Callable[[str], str], + ) -> None: self.data = data self.file_reporter = file_reporter self.filename = file_mapper(self.file_reporter.filename) @@ -21,6 +36,7 @@ def __init__(self, data, precision, file_reporter, file_mapper): self.excluded = self.file_reporter.excluded_lines() # Identify missing statements. + executed: Iterable[TLineNo] executed = self.data.lines(self.filename) or [] executed = self.file_reporter.translate_lines(executed) self.executed = executed @@ -51,7 +67,7 @@ def __init__(self, data, precision, file_reporter, file_mapper): n_missing_branches=n_missing_branches, ) - def missing_formatted(self, branches=False): + def missing_formatted(self, branches: bool=False) -> str: """The missing line numbers, formatted nicely. Returns a string like "1-2, 5-11, 13-14". @@ -66,25 +82,23 @@ def missing_formatted(self, branches=False): return format_lines(self.statements, self.missing, arcs=arcs) - def has_arcs(self): + def has_arcs(self) -> bool: """Were arcs measured in this result?""" return self.data.has_arcs() - @contract(returns='list(tuple(int, int))') - def arc_possibilities(self): + def arc_possibilities(self) -> List[TArc]: """Returns a sorted list of the arcs in the code.""" return self._arc_possibilities - @contract(returns='list(tuple(int, int))') - def arcs_executed(self): + def arcs_executed(self) -> List[TArc]: """Returns a sorted list of the arcs actually executed in the code.""" + executed: Iterable[TArc] executed = self.data.arcs(self.filename) or [] executed = self.file_reporter.translate_arcs(executed) return sorted(executed) - @contract(returns='list(tuple(int, int))') - def arcs_missing(self): - """Returns a sorted list of the unexecuted arcs in the code.""" + def arcs_missing(self) -> List[TArc]: + """Returns a sorted list of the un-executed arcs in the code.""" possible = self.arc_possibilities() executed = self.arcs_executed() missing = ( @@ -95,8 +109,7 @@ def arcs_missing(self): ) return sorted(missing) - @contract(returns='list(tuple(int, int))') - def arcs_unpredicted(self): + def arcs_unpredicted(self) -> List[TArc]: """Returns a sorted list of the executed arcs missing from the code.""" possible = self.arc_possibilities() executed = self.arcs_executed() @@ -113,16 +126,15 @@ def arcs_unpredicted(self): ) return sorted(unpredicted) - def _branch_lines(self): + def _branch_lines(self) -> List[TLineNo]: """Returns a list of line numbers that have more than one exit.""" return [l1 for l1,count in self.exit_counts.items() if count > 1] - def _total_branches(self): + def _total_branches(self) -> int: """How many total branches are there?""" return sum(count for count in self.exit_counts.values() if count > 1) - @contract(returns='dict(int: list(int))') - def missing_branch_arcs(self): + def missing_branch_arcs(self) -> Dict[TLineNo, List[TLineNo]]: """Return arcs that weren't executed from branch lines. Returns {l1:[l2a,l2b,...], ...} @@ -136,8 +148,7 @@ def missing_branch_arcs(self): mba[l1].append(l2) return mba - @contract(returns='dict(int: list(int))') - def executed_branch_arcs(self): + def executed_branch_arcs(self) -> Dict[TLineNo, List[TLineNo]]: """Return arcs that were executed from branch lines. Returns {l1:[l2a,l2b,...], ...} @@ -151,8 +162,7 @@ def executed_branch_arcs(self): eba[l1].append(l2) return eba - @contract(returns='dict(int: tuple(int, int))') - def branch_stats(self): + def branch_stats(self) -> Dict[TLineNo, Tuple[int, int]]: """Get stats about branches. Returns a dict mapping line numbers to a tuple: @@ -168,7 +178,7 @@ def branch_stats(self): return stats -class Numbers(SimpleReprMixin): +class Numbers(AutoReprMixin): """The numerical results of measuring coverage. This holds the basic statistics from `Analysis`, and is used to roll @@ -176,11 +186,17 @@ class Numbers(SimpleReprMixin): """ - def __init__(self, - precision=0, - n_files=0, n_statements=0, n_excluded=0, n_missing=0, - n_branches=0, n_partial_branches=0, n_missing_branches=0 - ): + def __init__( + self, + precision: int=0, + n_files: int=0, + n_statements: int=0, + n_excluded: int=0, + n_missing: int=0, + n_branches: int=0, + n_partial_branches: int=0, + n_missing_branches: int=0, + ) -> None: assert 0 <= precision < 10 self._precision = precision self._near0 = 1.0 / 10**precision @@ -193,7 +209,7 @@ def __init__(self, self.n_partial_branches = n_partial_branches self.n_missing_branches = n_missing_branches - def init_args(self): + def init_args(self) -> List[int]: """Return a list for __init__(*args) to recreate this object.""" return [ self._precision, @@ -202,17 +218,17 @@ def init_args(self): ] @property - def n_executed(self): + def n_executed(self) -> int: """Returns the number of executed statements.""" return self.n_statements - self.n_missing @property - def n_executed_branches(self): + def n_executed_branches(self) -> int: """Returns the number of executed branches.""" return self.n_branches - self.n_missing_branches @property - def pc_covered(self): + def pc_covered(self) -> float: """Returns a single percentage value for coverage.""" if self.n_statements > 0: numerator, denominator = self.ratio_covered @@ -222,7 +238,7 @@ def pc_covered(self): return pc_cov @property - def pc_covered_str(self): + def pc_covered_str(self) -> str: """Returns the percent covered, as a string, without a percent sign. Note that "0" is only returned when the value is truly zero, and "100" @@ -232,7 +248,7 @@ def pc_covered_str(self): """ return self.display_covered(self.pc_covered) - def display_covered(self, pc): + def display_covered(self, pc: float) -> str: """Return a displayable total percentage, as a string. Note that "0" is only returned when the value is truly zero, and "100" @@ -248,7 +264,7 @@ def display_covered(self, pc): pc = round(pc, self._precision) return "%.*f" % (self._precision, pc) - def pc_str_width(self): + def pc_str_width(self) -> int: """How many characters wide can pc_covered_str be?""" width = 3 # "100" if self._precision > 0: @@ -256,13 +272,13 @@ def pc_str_width(self): return width @property - def ratio_covered(self): + def ratio_covered(self) -> Tuple[int, int]: """Return a numerator and denominator for the coverage ratio.""" numerator = self.n_executed + self.n_executed_branches denominator = self.n_statements + self.n_branches return numerator, denominator - def __add__(self, other): + def __add__(self, other: Numbers) -> Numbers: nums = Numbers(precision=self._precision) nums.n_files = self.n_files + other.n_files nums.n_statements = self.n_statements + other.n_statements @@ -277,13 +293,16 @@ def __add__(self, other): ) return nums - def __radd__(self, other): + def __radd__(self, other: int) -> Numbers: # Implementing 0+Numbers allows us to sum() a list of Numbers. assert other == 0 # we only ever call it this way. return self -def _line_ranges(statements, lines): +def _line_ranges( + statements: Iterable[TLineNo], + lines: Iterable[TLineNo], +) -> List[Tuple[TLineNo, TLineNo]]: """Produce a list of ranges for `format_lines`.""" statements = sorted(statements) lines = sorted(lines) @@ -307,7 +326,11 @@ def _line_ranges(statements, lines): return pairs -def format_lines(statements, lines, arcs=None): +def format_lines( + statements: Iterable[TLineNo], + lines: Iterable[TLineNo], + arcs: Optional[Iterable[Tuple[TLineNo, List[TLineNo]]]]=None, +) -> str: """Nicely format a list of line numbers. Format a list of line numbers for printing by coalescing groups of lines as @@ -326,7 +349,7 @@ def format_lines(statements, lines, arcs=None): """ line_items = [(pair[0], nice_pair(pair)) for pair in _line_ranges(statements, lines)] - if arcs: + if arcs is not None: line_exits = sorted(arcs) for line, exits in line_exits: for ex in sorted(exits): @@ -338,8 +361,7 @@ def format_lines(statements, lines, arcs=None): return ret -@contract(total='number', fail_under='number', precision=int, returns=bool) -def should_fail_under(total, fail_under, precision): +def should_fail_under(total: float, fail_under: float, precision: int) -> bool: """Determine if a total should fail due to fail-under. `total` is a float, the coverage measurement total. `fail_under` is the diff --git a/coverage/sqldata.py b/coverage/sqldata.py index 5d62b15b1..4baea3319 100644 --- a/coverage/sqldata.py +++ b/coverage/sqldata.py @@ -3,7 +3,10 @@ """SQLite coverage data.""" +from __future__ import annotations + import collections +import contextlib import datetime import functools import glob @@ -18,16 +21,22 @@ import threading import zlib -from coverage.debug import NoDebugging, SimpleReprMixin, clipped_repr +from typing import ( + cast, Any, Callable, Collection, Dict, Generator, Iterable, List, Mapping, Optional, + Sequence, Set, Tuple, TypeVar, Union, +) + +from coverage.debug import NoDebugging, AutoReprMixin, clipped_repr from coverage.exceptions import CoverageException, DataError from coverage.files import PathAliases -from coverage.misc import contract, file_be_gone, isolate_module +from coverage.misc import file_be_gone, isolate_module from coverage.numbits import numbits_to_nums, numbits_union, nums_to_numbits +from coverage.types import TArc, TDebugCtl, TLineNo, TWarnFn from coverage.version import __version__ os = isolate_module(os) -# If you change the schema, increment the SCHEMA_VERSION, and update the +# If you change the schema: increment the SCHEMA_VERSION and update the # docs in docs/dbschema.rst by running "make cogdoc". SCHEMA_VERSION = 7 @@ -52,7 +61,7 @@ key text, value text, unique (key) - -- Keys: + -- Possible keys: -- 'has_arcs' boolean -- Is this data recording branches? -- 'sys_argv' text -- The coverage command line that recorded the data. -- 'version' text -- The version of coverage.py that made the file. @@ -103,7 +112,22 @@ ); """ -class CoverageData(SimpleReprMixin): +TMethod = TypeVar("TMethod", bound=Callable[..., Any]) + +def _locked(method: TMethod) -> TMethod: + """A decorator for methods that should hold self._lock.""" + @functools.wraps(method) + def _wrapped(self: CoverageData, *args: Any, **kwargs: Any) -> Any: + if self._debug.should("lock"): + self._debug.write(f"Locking {self._lock!r} for {method.__name__}") + with self._lock: + if self._debug.should("lock"): + self._debug.write(f"Locked {self._lock!r} for {method.__name__}") + return method(self, *args, **kwargs) + return _wrapped # type: ignore[return-value] + + +class CoverageData(AutoReprMixin): """Manages collected coverage data, including file storage. This class is the public supported API to the data that coverage.py @@ -186,7 +210,14 @@ class CoverageData(SimpleReprMixin): """ - def __init__(self, basename=None, suffix=None, no_disk=False, warn=None, debug=None): + def __init__( + self, + basename: Optional[str]=None, + suffix: Optional[Union[str, bool]]=None, + no_disk: bool=False, + warn: Optional[TWarnFn]=None, + debug: Optional[TDebugCtl]=None, + ) -> None: """Create a :class:`CoverageData` object to hold coverage-measured data. Arguments: @@ -208,9 +239,10 @@ def __init__(self, basename=None, suffix=None, no_disk=False, warn=None, debug=N self._debug = debug or NoDebugging() self._choose_filename() - self._file_map = {} + # Maps filenames to row ids. + self._file_map: Dict[str, int] = {} # Maps thread ids to SqliteDb objects. - self._dbs = {} + self._dbs: Dict[int, SqliteDb] = {} self._pid = os.getpid() # Synchronize the operations used during collection. self._lock = threading.RLock() @@ -221,24 +253,11 @@ def __init__(self, basename=None, suffix=None, no_disk=False, warn=None, debug=N self._has_lines = False self._has_arcs = False - self._current_context = None - self._current_context_id = None - self._query_context_ids = None + self._current_context: Optional[str] = None + self._current_context_id: Optional[int] = None + self._query_context_ids: Optional[List[int]] = None - def _locked(method): # pylint: disable=no-self-argument - """A decorator for methods that should hold self._lock.""" - @functools.wraps(method) - def _wrapped(self, *args, **kwargs): - if self._debug.should("lock"): - self._debug.write(f"Locking {self._lock!r} for {method.__name__}") - with self._lock: - if self._debug.should("lock"): - self._debug.write(f"Locked {self._lock!r} for {method.__name__}") - # pylint: disable=not-callable - return method(self, *args, **kwargs) - return _wrapped - - def _choose_filename(self): + def _choose_filename(self) -> None: """Set self._filename based on inited attributes.""" if self._no_disk: self._filename = ":memory:" @@ -248,7 +267,7 @@ def _choose_filename(self): if suffix: self._filename += "." + suffix - def _reset(self): + def _reset(self) -> None: """Reset our attributes.""" if not self._no_disk: for db in self._dbs.values(): @@ -258,18 +277,19 @@ def _reset(self): self._have_used = False self._current_context_id = None - def _open_db(self): + def _open_db(self) -> None: """Open an existing db file, and read its metadata.""" if self._debug.should("dataio"): self._debug.write(f"Opening data file {self._filename!r}") self._dbs[threading.get_ident()] = SqliteDb(self._filename, self._debug) self._read_db() - def _read_db(self): + def _read_db(self) -> None: """Read the metadata from a database so that we are ready to use it.""" with self._dbs[threading.get_ident()] as db: try: - schema_version, = db.execute_one("select version from coverage_schema") + row = db.execute_one("select version from coverage_schema") + assert row is not None except Exception as exc: if "no such table: coverage_schema" in str(exc): self._init_db(db) @@ -280,6 +300,7 @@ def _read_db(self): ) ) from exc else: + schema_version = row[0] if schema_version != SCHEMA_VERSION: raise DataError( "Couldn't use data file {!r}: wrong schema: {} instead of {}".format( @@ -287,46 +308,51 @@ def _read_db(self): ) ) - for row in db.execute("select value from meta where key = 'has_arcs'"): + row = db.execute_one("select value from meta where key = 'has_arcs'") + if row is not None: self._has_arcs = bool(int(row[0])) self._has_lines = not self._has_arcs - for file_id, path in db.execute("select id, path from file"): - self._file_map[path] = file_id + with db.execute("select id, path from file") as cur: + for file_id, path in cur: + self._file_map[path] = file_id - def _init_db(self, db): + def _init_db(self, db: SqliteDb) -> None: """Write the initial contents of the database.""" if self._debug.should("dataio"): self._debug.write(f"Initing data file {self._filename!r}") db.executescript(SCHEMA) - db.execute("insert into coverage_schema (version) values (?)", (SCHEMA_VERSION,)) - db.executemany( - "insert or ignore into meta (key, value) values (?, ?)", - [ + db.execute_void("insert into coverage_schema (version) values (?)", (SCHEMA_VERSION,)) + + # When writing metadata, avoid information that will needlessly change + # the hash of the data file, unless we're debugging processes. + meta_data = [ + ("version", __version__), + ] + if self._debug.should("process"): + meta_data.extend([ ("sys_argv", str(getattr(sys, "argv", None))), - ("version", __version__), ("when", datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")), - ] - ) + ]) + db.executemany_void("insert or ignore into meta (key, value) values (?, ?)", meta_data) - def _connect(self): + def _connect(self) -> SqliteDb: """Get the SqliteDb object to use.""" if threading.get_ident() not in self._dbs: self._open_db() return self._dbs[threading.get_ident()] - def __bool__(self): + def __bool__(self) -> bool: if (threading.get_ident() not in self._dbs and not os.path.exists(self._filename)): return False try: with self._connect() as con: - rows = con.execute("select * from file limit 1") - return bool(list(rows)) + with con.execute("select * from file limit 1") as cur: + return bool(list(cur)) except CoverageException: return False - @contract(returns="bytes") - def dumps(self): + def dumps(self) -> bytes: """Serialize the current data to a byte string. The format of the serialized data is not documented. It is only @@ -349,8 +375,7 @@ def dumps(self): script = con.dump() return b"z" + zlib.compress(script.encode("utf-8")) - @contract(data="bytes") - def loads(self, data): + def loads(self, data: bytes) -> None: """Deserialize data from :meth:`dumps`. Use with a newly-created empty :class:`CoverageData` object. It's @@ -378,7 +403,7 @@ def loads(self, data): self._read_db() self._have_used = True - def _file_id(self, filename, add=False): + def _file_id(self, filename: str, add: bool=False) -> Optional[int]: """Get the file id for `filename`. If filename is not in the database yet, add it if `add` is True. @@ -393,19 +418,19 @@ def _file_id(self, filename, add=False): ) return self._file_map.get(filename) - def _context_id(self, context): + def _context_id(self, context: str) -> Optional[int]: """Get the id for a context.""" assert context is not None self._start_using() with self._connect() as con: row = con.execute_one("select id from context where context = ?", (context,)) if row is not None: - return row[0] + return cast(int, row[0]) else: return None @_locked - def set_context(self, context): + def set_context(self, context: Optional[str]) -> None: """Set the current context for future :meth:`add_lines` etc. `context` is a str, the name of the context to use for the next data @@ -419,7 +444,7 @@ def set_context(self, context): self._current_context = context self._current_context_id = None - def _set_context_id(self): + def _set_context_id(self) -> None: """Use the _current_context to set _current_context_id.""" context = self._current_context or "" context_id = self._context_id(context) @@ -432,7 +457,7 @@ def _set_context_id(self): (context,) ) - def base_filename(self): + def base_filename(self) -> str: """The base filename for storing data. .. versionadded:: 5.0 @@ -440,7 +465,7 @@ def base_filename(self): """ return self._basename - def data_filename(self): + def data_filename(self) -> str: """Where is the data stored? .. versionadded:: 5.0 @@ -449,7 +474,7 @@ def data_filename(self): return self._filename @_locked - def add_lines(self, line_data): + def add_lines(self, line_data: Mapping[str, Collection[TLineNo]]) -> None: """Add measured line data. `line_data` is a dictionary mapping file names to iterables of ints:: @@ -459,7 +484,7 @@ def add_lines(self, line_data): """ if self._debug.should("dataop"): self._debug.write("Adding lines: %d files, %d lines total" % ( - len(line_data), sum(len(lines) for lines in line_data.values()) + len(line_data), sum(bool(len(lines)) for lines in line_data.values()) )) self._start_using() self._choose_lines_or_arcs(lines=True) @@ -471,18 +496,19 @@ def add_lines(self, line_data): linemap = nums_to_numbits(linenos) file_id = self._file_id(filename, add=True) query = "select numbits from line_bits where file_id = ? and context_id = ?" - existing = list(con.execute(query, (file_id, self._current_context_id))) + with con.execute(query, (file_id, self._current_context_id)) as cur: + existing = list(cur) if existing: linemap = numbits_union(linemap, existing[0][0]) - con.execute( + con.execute_void( "insert or replace into line_bits " + " (file_id, context_id, numbits) values (?, ?, ?)", (file_id, self._current_context_id, linemap), ) @_locked - def add_arcs(self, arc_data): + def add_arcs(self, arc_data: Mapping[str, Collection[TArc]]) -> None: """Add measured arc data. `arc_data` is a dictionary mapping file names to iterables of pairs of @@ -504,13 +530,13 @@ def add_arcs(self, arc_data): for filename, arcs in arc_data.items(): file_id = self._file_id(filename, add=True) data = [(file_id, self._current_context_id, fromno, tono) for fromno, tono in arcs] - con.executemany( + con.executemany_void( "insert or ignore into arc " + "(file_id, context_id, fromno, tono) values (?, ?, ?, ?)", data, ) - def _choose_lines_or_arcs(self, lines=False, arcs=False): + def _choose_lines_or_arcs(self, lines: bool=False, arcs: bool=False) -> None: """Force the data file to choose between lines and arcs.""" assert lines or arcs assert not (lines and arcs) @@ -526,13 +552,13 @@ def _choose_lines_or_arcs(self, lines=False, arcs=False): self._has_lines = lines self._has_arcs = arcs with self._connect() as con: - con.execute( + con.execute_void( "insert or ignore into meta (key, value) values (?, ?)", ("has_arcs", str(int(arcs))) ) @_locked - def add_file_tracers(self, file_tracers): + def add_file_tracers(self, file_tracers: Mapping[str, str]) -> None: """Add per-file plugin information. `file_tracers` is { filename: plugin_name, ... } @@ -560,24 +586,24 @@ def add_file_tracers(self, file_tracers): ) ) elif plugin_name: - con.execute( + con.execute_void( "insert into tracer (file_id, tracer) values (?, ?)", (file_id, plugin_name) ) - def touch_file(self, filename, plugin_name=""): + def touch_file(self, filename: str, plugin_name: str="") -> None: """Ensure that `filename` appears in the data, empty if needed. - `plugin_name` is the name of the plugin responsible for this file. It is used - to associate the right filereporter, etc. + `plugin_name` is the name of the plugin responsible for this file. + It is used to associate the right filereporter, etc. """ self.touch_files([filename], plugin_name) - def touch_files(self, filenames, plugin_name=""): + def touch_files(self, filenames: Iterable[str], plugin_name: Optional[str]=None) -> None: """Ensure that `filenames` appear in the data, empty if needed. - `plugin_name` is the name of the plugin responsible for these files. It is used - to associate the right filereporter, etc. + `plugin_name` is the name of the plugin responsible for these files. + It is used to associate the right filereporter, etc. """ if self._debug.should("dataop"): self._debug.write(f"Touching {filenames!r}") @@ -592,11 +618,13 @@ def touch_files(self, filenames, plugin_name=""): # Set the tracer for this file self.add_file_tracers({filename: plugin_name}) - def update(self, other_data, aliases=None): + def update(self, other_data: CoverageData, aliases: Optional[PathAliases]=None) -> None: """Update this data with data from several other :class:`CoverageData` instances. If `aliases` is provided, it's a `PathAliases` object that is used to - re-map paths to match the local machine's. + re-map paths to match the local machine's. Note: `aliases` is None + only when called directly from the test suite. + """ if self._debug.should("dataop"): self._debug.write("Updating with data from {!r}".format( @@ -616,78 +644,80 @@ def update(self, other_data, aliases=None): other_data.read() with other_data._connect() as con: # Get files data. - cur = con.execute("select path from file") - files = {path: aliases.map(path) for (path,) in cur} - cur.close() + with con.execute("select path from file") as cur: + files = {path: aliases.map(path) for (path,) in cur} # Get contexts data. - cur = con.execute("select context from context") - contexts = [context for (context,) in cur] - cur.close() + with con.execute("select context from context") as cur: + contexts = [context for (context,) in cur] # Get arc data. - cur = con.execute( + with con.execute( "select file.path, context.context, arc.fromno, arc.tono " + "from arc " + "inner join file on file.id = arc.file_id " + "inner join context on context.id = arc.context_id" - ) - arcs = [(files[path], context, fromno, tono) for (path, context, fromno, tono) in cur] - cur.close() + ) as cur: + arcs = [ + (files[path], context, fromno, tono) + for (path, context, fromno, tono) in cur + ] # Get line data. - cur = con.execute( + with con.execute( "select file.path, context.context, line_bits.numbits " + "from line_bits " + "inner join file on file.id = line_bits.file_id " + "inner join context on context.id = line_bits.context_id" - ) - lines = {(files[path], context): numbits for (path, context, numbits) in cur} - cur.close() + ) as cur: + lines: Dict[Tuple[str, str], bytes] = {} + for path, context, numbits in cur: + key = (files[path], context) + if key in lines: + numbits = numbits_union(lines[key], numbits) + lines[key] = numbits # Get tracer data. - cur = con.execute( + with con.execute( "select file.path, tracer " + "from tracer " + "inner join file on file.id = tracer.file_id" - ) - tracers = {files[path]: tracer for (path, tracer) in cur} - cur.close() + ) as cur: + tracers = {files[path]: tracer for (path, tracer) in cur} with self._connect() as con: + assert con.con is not None con.con.isolation_level = "IMMEDIATE" # Get all tracers in the DB. Files not in the tracers are assumed # to have an empty string tracer. Since Sqlite does not support # full outer joins, we have to make two queries to fill the # dictionary. - this_tracers = {path: "" for path, in con.execute("select path from file")} - this_tracers.update({ - aliases.map(path): tracer - for path, tracer in con.execute( - "select file.path, tracer from tracer " + - "inner join file on file.id = tracer.file_id" - ) - }) + with con.execute("select path from file") as cur: + this_tracers = {path: "" for path, in cur} + with con.execute( + "select file.path, tracer from tracer " + + "inner join file on file.id = tracer.file_id" + ) as cur: + this_tracers.update({ + aliases.map(path): tracer + for path, tracer in cur + }) # Create all file and context rows in the DB. - con.executemany( + con.executemany_void( "insert or ignore into file (path) values (?)", ((file,) for file in files.values()) ) - file_ids = { - path: id - for id, path in con.execute("select id, path from file") - } + with con.execute("select id, path from file") as cur: + file_ids = {path: id for id, path in cur} self._file_map.update(file_ids) - con.executemany( + con.executemany_void( "insert or ignore into context (context) values (?)", ((context,) for context in contexts) ) - context_ids = { - context: id - for id, context in con.execute("select id, context from context") - } + with con.execute("select id, context from context") as cur: + context_ids = {context: id for id, context in cur} # Prepare tracers and fail, if a conflict is found. # tracer_paths is used to ensure consistency over the tracer data @@ -714,24 +744,23 @@ def update(self, other_data, aliases=None): ) # Get line data. - cur = con.execute( + with con.execute( "select file.path, context.context, line_bits.numbits " + "from line_bits " + "inner join file on file.id = line_bits.file_id " + "inner join context on context.id = line_bits.context_id" - ) - for path, context, numbits in cur: - key = (aliases.map(path), context) - if key in lines: - numbits = numbits_union(lines[key], numbits) - lines[key] = numbits - cur.close() + ) as cur: + for path, context, numbits in cur: + key = (aliases.map(path), context) + if key in lines: + numbits = numbits_union(lines[key], numbits) + lines[key] = numbits if arcs: self._choose_lines_or_arcs(arcs=True) # Write the combined data. - con.executemany( + con.executemany_void( "insert or ignore into arc " + "(file_id, context_id, fromno, tono) values (?, ?, ?, ?)", arc_rows @@ -739,8 +768,8 @@ def update(self, other_data, aliases=None): if lines: self._choose_lines_or_arcs(lines=True) - con.execute("delete from line_bits") - con.executemany( + con.execute_void("delete from line_bits") + con.executemany_void( "insert into line_bits " + "(file_id, context_id, numbits) values (?, ?, ?)", [ @@ -748,7 +777,7 @@ def update(self, other_data, aliases=None): for (file, context), numbits in lines.items() ] ) - con.executemany( + con.executemany_void( "insert or ignore into tracer (file_id, tracer) values (?, ?)", ((file_ids[filename], tracer) for filename, tracer in tracer_map.items()) ) @@ -758,7 +787,7 @@ def update(self, other_data, aliases=None): self._reset() self.read() - def erase(self, parallel=False): + def erase(self, parallel: bool=False) -> None: """Erase the data in this object. If `parallel` is true, then also deletes data files created from the @@ -780,17 +809,17 @@ def erase(self, parallel=False): self._debug.write(f"Erasing parallel data file {filename!r}") file_be_gone(filename) - def read(self): + def read(self) -> None: """Start using an existing data file.""" if os.path.exists(self._filename): with self._connect(): self._have_used = True - def write(self): + def write(self) -> None: """Ensure the data is written to the data file.""" pass - def _start_using(self): + def _start_using(self) -> None: """Call this before using the database at all.""" if self._pid != os.getpid(): # Looks like we forked! Have to start a new data file. @@ -801,15 +830,15 @@ def _start_using(self): self.erase() self._have_used = True - def has_arcs(self): + def has_arcs(self) -> bool: """Does the database have arcs (True) or lines (False).""" return bool(self._has_arcs) - def measured_files(self): + def measured_files(self) -> Set[str]: """A set of all files that had been measured.""" return set(self._file_map) - def measured_contexts(self): + def measured_contexts(self) -> Set[str]: """A set of all contexts that have been measured. .. versionadded:: 5.0 @@ -817,10 +846,11 @@ def measured_contexts(self): """ self._start_using() with self._connect() as con: - contexts = {row[0] for row in con.execute("select distinct(context) from context")} + with con.execute("select distinct(context) from context") as cur: + contexts = {row[0] for row in cur} return contexts - def file_tracer(self, filename): + def file_tracer(self, filename: str) -> Optional[str]: """Get the plugin name of the file tracer for a file. Returns the name of the plugin that handles this file. If the file was @@ -838,7 +868,7 @@ def file_tracer(self, filename): return row[0] or "" return "" # File was measured, but no tracer associated. - def set_query_context(self, context): + def set_query_context(self, context: str) -> None: """Set a context for subsequent querying. The next :meth:`lines`, :meth:`arcs`, or :meth:`contexts_by_lineno` @@ -851,10 +881,10 @@ def set_query_context(self, context): """ self._start_using() with self._connect() as con: - cur = con.execute("select id from context where context = ?", (context,)) - self._query_context_ids = [row[0] for row in cur.fetchall()] + with con.execute("select id from context where context = ?", (context,)) as cur: + self._query_context_ids = [row[0] for row in cur.fetchall()] - def set_query_contexts(self, contexts): + def set_query_contexts(self, contexts: Optional[Sequence[str]]) -> None: """Set a number of contexts for subsequent querying. The next :meth:`lines`, :meth:`arcs`, or :meth:`contexts_by_lineno` @@ -870,12 +900,12 @@ def set_query_contexts(self, contexts): if contexts: with self._connect() as con: context_clause = " or ".join(["context regexp ?"] * len(contexts)) - cur = con.execute("select id from context where " + context_clause, contexts) - self._query_context_ids = [row[0] for row in cur.fetchall()] + with con.execute("select id from context where " + context_clause, contexts) as cur: + self._query_context_ids = [row[0] for row in cur.fetchall()] else: self._query_context_ids = None - def lines(self, filename): + def lines(self, filename: str) -> Optional[List[TLineNo]]: """Get the list of lines executed for a source file. If the file was not measured, returns None. A file might be measured, @@ -903,13 +933,14 @@ def lines(self, filename): ids_array = ", ".join("?" * len(self._query_context_ids)) query += " and context_id in (" + ids_array + ")" data += self._query_context_ids - bitmaps = list(con.execute(query, data)) + with con.execute(query, data) as cur: + bitmaps = list(cur) nums = set() for row in bitmaps: nums.update(numbits_to_nums(row[0])) return list(nums) - def arcs(self, filename): + def arcs(self, filename: str) -> Optional[List[TArc]]: """Get the list of arcs executed for a file. If the file was not measured, returns None. A file might be measured, @@ -938,10 +969,10 @@ def arcs(self, filename): ids_array = ", ".join("?" * len(self._query_context_ids)) query += " and context_id in (" + ids_array + ")" data += self._query_context_ids - arcs = con.execute(query, data) - return list(arcs) + with con.execute(query, data) as cur: + return list(cur) - def contexts_by_lineno(self, filename): + def contexts_by_lineno(self, filename: str) -> Dict[TLineNo, List[str]]: """Get the contexts for each line in a file. Returns: @@ -968,11 +999,12 @@ def contexts_by_lineno(self, filename): ids_array = ", ".join("?" * len(self._query_context_ids)) query += " and arc.context_id in (" + ids_array + ")" data += self._query_context_ids - for fromno, tono, context in con.execute(query, data): - if fromno > 0: - lineno_contexts_map[fromno].add(context) - if tono > 0: - lineno_contexts_map[tono].add(context) + with con.execute(query, data) as cur: + for fromno, tono, context in cur: + if fromno > 0: + lineno_contexts_map[fromno].add(context) + if tono > 0: + lineno_contexts_map[tono].add(context) else: query = ( "select l.numbits, c.context from line_bits l, context c " + @@ -984,33 +1016,35 @@ def contexts_by_lineno(self, filename): ids_array = ", ".join("?" * len(self._query_context_ids)) query += " and l.context_id in (" + ids_array + ")" data += self._query_context_ids - for numbits, context in con.execute(query, data): - for lineno in numbits_to_nums(numbits): - lineno_contexts_map[lineno].add(context) + with con.execute(query, data) as cur: + for numbits, context in cur: + for lineno in numbits_to_nums(numbits): + lineno_contexts_map[lineno].add(context) return {lineno: list(contexts) for lineno, contexts in lineno_contexts_map.items()} @classmethod - def sys_info(cls): + def sys_info(cls) -> List[Tuple[str, Any]]: """Our information for `Coverage.sys_info`. Returns a list of (key, value) pairs. """ with SqliteDb(":memory:", debug=NoDebugging()) as db: - temp_store = [row[0] for row in db.execute("pragma temp_store")] - copts = [row[0] for row in db.execute("pragma compile_options")] + with db.execute("pragma temp_store") as cur: + temp_store = [row[0] for row in cur] + with db.execute("pragma compile_options") as cur: + copts = [row[0] for row in cur] copts = textwrap.wrap(", ".join(copts), width=75) return [ - ("sqlite3_version", sqlite3.version), ("sqlite3_sqlite_version", sqlite3.sqlite_version), ("sqlite3_temp_store", temp_store), ("sqlite3_compile_options", copts), ] -def filename_suffix(suffix): +def filename_suffix(suffix: Union[str, bool, None]) -> Union[str, None]: """Compute a filename suffix for a data file. If `suffix` is a string or None, simply return it. If `suffix` is True, @@ -1027,10 +1061,12 @@ def filename_suffix(suffix): # if the process forks. dice = random.Random(os.urandom(8)).randint(0, 999999) suffix = "%s.%s.%06d" % (socket.gethostname(), os.getpid(), dice) + elif suffix is False: + suffix = None return suffix -class SqliteDb(SimpleReprMixin): +class SqliteDb(AutoReprMixin): """A simple abstraction over a SQLite database. Use as a context manager, then you can use it like a @@ -1040,13 +1076,13 @@ class SqliteDb(SimpleReprMixin): db.execute("insert into schema (version) values (?)", (SCHEMA_VERSION,)) """ - def __init__(self, filename, debug): + def __init__(self, filename: str, debug: TDebugCtl) -> None: self.debug = debug self.filename = filename self.nest = 0 - self.con = None + self.con: Optional[sqlite3.Connection] = None - def _connect(self): + def _connect(self) -> None: """Connect to the db and do universal initialization.""" if self.con is not None: return @@ -1068,27 +1104,29 @@ def _connect(self): # This pragma makes writing faster. It disables rollbacks, but we never need them. # PyPy needs the .close() calls here, or sqlite gets twisted up: # https://bitbucket.org/pypy/pypy/issues/2872/default-isolation-mode-is-different-on - self.execute("pragma journal_mode=off").close() + self.execute_void("pragma journal_mode=off") # This pragma makes writing faster. - self.execute("pragma synchronous=off").close() + self.execute_void("pragma synchronous=off") - def close(self): + def close(self) -> None: """If needed, close the connection.""" if self.con is not None and self.filename != ":memory:": self.con.close() self.con = None - def __enter__(self): + def __enter__(self) -> SqliteDb: if self.nest == 0: self._connect() + assert self.con is not None self.con.__enter__() self.nest += 1 return self - def __exit__(self, exc_type, exc_value, traceback): + def __exit__(self, exc_type, exc_value, traceback) -> None: # type: ignore[no-untyped-def] self.nest -= 1 if self.nest == 0: try: + assert self.con is not None self.con.__exit__(exc_type, exc_value, traceback) self.close() except Exception as exc: @@ -1096,19 +1134,20 @@ def __exit__(self, exc_type, exc_value, traceback): self.debug.write(f"EXCEPTION from __exit__: {exc}") raise DataError(f"Couldn't end data file {self.filename!r}: {exc}") from exc - def execute(self, sql, parameters=()): + def _execute(self, sql: str, parameters: Iterable[Any]) -> sqlite3.Cursor: """Same as :meth:`python:sqlite3.Connection.execute`.""" if self.debug.should("sql"): tail = f" with {parameters!r}" if parameters else "" self.debug.write(f"Executing {sql!r}{tail}") try: + assert self.con is not None try: - return self.con.execute(sql, parameters) + return self.con.execute(sql, parameters) # type: ignore[arg-type] except Exception: # In some cases, an error might happen that isn't really an # error. Try again immediately. # https://github.com/nedbat/coveragepy/issues/1010 - return self.con.execute(sql, parameters) + return self.con.execute(sql, parameters) # type: ignore[arg-type] except sqlite3.Error as exc: msg = str(exc) try: @@ -1127,15 +1166,36 @@ def execute(self, sql, parameters=()): self.debug.write(f"EXCEPTION from execute: {msg}") raise DataError(f"Couldn't use data file {self.filename!r}: {msg}") from exc - def execute_for_rowid(self, sql, parameters=()): + @contextlib.contextmanager + def execute( + self, + sql: str, + parameters: Iterable[Any]=(), + ) -> Generator[sqlite3.Cursor, None, None]: + """Context managed :meth:`python:sqlite3.Connection.execute`. + + Use with a ``with`` statement to auto-close the returned cursor. + """ + cur = self._execute(sql, parameters) + try: + yield cur + finally: + cur.close() + + def execute_void(self, sql: str, parameters: Iterable[Any]=()) -> None: + """Same as :meth:`python:sqlite3.Connection.execute` when you don't need the cursor.""" + self._execute(sql, parameters).close() + + def execute_for_rowid(self, sql: str, parameters: Iterable[Any]=()) -> int: """Like execute, but returns the lastrowid.""" - con = self.execute(sql, parameters) - rowid = con.lastrowid + with self.execute(sql, parameters) as cur: + assert cur.lastrowid is not None + rowid: int = cur.lastrowid if self.debug.should("sqldata"): self.debug.write(f"Row id result: {rowid!r}") return rowid - def execute_one(self, sql, parameters=()): + def execute_one(self, sql: str, parameters: Iterable[Any]=()) -> Optional[Tuple[Any, ...]]: """Execute a statement and return the one row that results. This is like execute(sql, parameters).fetchone(), except it is @@ -1144,15 +1204,16 @@ def execute_one(self, sql, parameters=()): Returns a row, or None if there were no rows. """ - rows = list(self.execute(sql, parameters)) + with self.execute(sql, parameters) as cur: + rows = list(cur) if len(rows) == 0: return None elif len(rows) == 1: - return rows[0] + return cast(Tuple[Any, ...], rows[0]) else: raise AssertionError(f"SQL {sql!r} shouldn't return {len(rows)} rows") - def executemany(self, sql, data): + def _executemany(self, sql: str, data: Iterable[Any]) -> sqlite3.Cursor: """Same as :meth:`python:sqlite3.Connection.executemany`.""" if self.debug.should("sql"): data = list(data) @@ -1161,6 +1222,7 @@ def executemany(self, sql, data): if self.debug.should("sqldata"): for i, row in enumerate(data): self.debug.write(f"{i:4d}: {row!r}") + assert self.con is not None try: return self.con.executemany(sql, data) except Exception: # pragma: cant happen @@ -1169,14 +1231,20 @@ def executemany(self, sql, data): # https://github.com/nedbat/coveragepy/issues/1010 return self.con.executemany(sql, data) - def executescript(self, script): + def executemany_void(self, sql: str, data: Iterable[Any]) -> None: + """Same as :meth:`python:sqlite3.Connection.executemany` when you don't need the cursor.""" + self._executemany(sql, data).close() + + def executescript(self, script: str) -> None: """Same as :meth:`python:sqlite3.Connection.executescript`.""" if self.debug.should("sql"): self.debug.write("Executing script with {} chars: {}".format( len(script), clipped_repr(script, 100), )) - self.con.executescript(script) + assert self.con is not None + self.con.executescript(script).close() - def dump(self): + def dump(self) -> str: """Return a multi-line string, the SQL dump of the database.""" + assert self.con is not None return "\n".join(self.con.iterdump()) diff --git a/coverage/summary.py b/coverage/summary.py index 861fbc536..3f3fd688f 100644 --- a/coverage/summary.py +++ b/coverage/summary.py @@ -19,22 +19,138 @@ def __init__(self, coverage): self.config = self.coverage.config self.branches = coverage.get_data().has_arcs() self.outfile = None + self.output_format = self.config.format or "text" + if self.output_format not in {"text", "markdown", "total"}: + raise ConfigError(f"Unknown report format choice: {self.output_format!r}") self.fr_analysis = [] self.skipped_count = 0 self.empty_count = 0 self.total = Numbers(precision=self.config.precision) - self.fmt_err = "%s %s: %s" - def writeout(self, line): + def write(self, line): """Write a line to the output, adding a newline.""" self.outfile.write(line.rstrip()) self.outfile.write("\n") - def report(self, morfs, outfile=None): + def write_items(self, items): + """Write a list of strings, joined together.""" + self.write("".join(items)) + + def _report_text(self, header, lines_values, total_line, end_lines): + """Internal method that prints report data in text format. + + `header` is a list with captions. + `lines_values` is list of lists of sortable values. + `total_line` is a list with values of the total line. + `end_lines` is a list of ending lines with information about skipped files. + + """ + # Prepare the formatting strings, header, and column sorting. + max_name = max([len(line[0]) for line in lines_values] + [5]) + 1 + max_n = max(len(total_line[header.index("Cover")]) + 2, len(" Cover")) + 1 + max_n = max([max_n] + [len(line[header.index("Cover")]) + 2 for line in lines_values]) + formats = dict( + Name="{:{name_len}}", + Stmts="{:>7}", + Miss="{:>7}", + Branch="{:>7}", + BrPart="{:>7}", + Cover="{:>{n}}", + Missing="{:>10}", + ) + header_items = [ + formats[item].format(item, name_len=max_name, n=max_n) + for item in header + ] + header_str = "".join(header_items) + rule = "-" * len(header_str) + + # Write the header + self.write(header_str) + self.write(rule) + + formats.update(dict(Cover="{:>{n}}%"), Missing=" {:9}") + for values in lines_values: + # build string with line values + line_items = [ + formats[item].format(str(value), + name_len=max_name, n=max_n-1) for item, value in zip(header, values) + ] + self.write_items(line_items) + + # Write a TOTAL line + if lines_values: + self.write(rule) + + line_items = [ + formats[item].format(str(value), + name_len=max_name, n=max_n-1) for item, value in zip(header, total_line) + ] + self.write_items(line_items) + + for end_line in end_lines: + self.write(end_line) + + def _report_markdown(self, header, lines_values, total_line, end_lines): + """Internal method that prints report data in markdown format. + + `header` is a list with captions. + `lines_values` is a sorted list of lists containing coverage information. + `total_line` is a list with values of the total line. + `end_lines` is a list of ending lines with information about skipped files. + + """ + # Prepare the formatting strings, header, and column sorting. + max_name = max((len(line[0].replace("_", "\\_")) for line in lines_values), default=0) + max_name = max(max_name, len("**TOTAL**")) + 1 + formats = dict( + Name="| {:{name_len}}|", + Stmts="{:>9} |", + Miss="{:>9} |", + Branch="{:>9} |", + BrPart="{:>9} |", + Cover="{:>{n}} |", + Missing="{:>10} |", + ) + max_n = max(len(total_line[header.index("Cover")]) + 6, len(" Cover ")) + header_items = [formats[item].format(item, name_len=max_name, n=max_n) for item in header] + header_str = "".join(header_items) + rule_str = "|" + " ".join(["- |".rjust(len(header_items[0])-1, '-')] + + ["-: |".rjust(len(item)-1, '-') for item in header_items[1:]] + ) + + # Write the header + self.write(header_str) + self.write(rule_str) + + for values in lines_values: + # build string with line values + formats.update(dict(Cover="{:>{n}}% |")) + line_items = [ + formats[item].format(str(value).replace("_", "\\_"), name_len=max_name, n=max_n-1) + for item, value in zip(header, values) + ] + self.write_items(line_items) + + # Write the TOTAL line + formats.update(dict(Name="|{:>{name_len}} |", Cover="{:>{n}} |")) + total_line_items = [] + for item, value in zip(header, total_line): + if value == "": + insert = value + elif item == "Cover": + insert = f" **{value}%**" + else: + insert = f" **{value}**" + total_line_items += formats[item].format(insert, name_len=max_name, n=max_n) + self.write_items(total_line_items) + for end_line in end_lines: + self.write(end_line) + + def report(self, morfs, outfile=None) -> float: """Writes a report summarizing coverage statistics per module. - `outfile` is a file object to write the summary to. It must be opened - for native strings (bytes on Python 2, Unicode on Python 3). + `outfile` is a text-mode file object to write the summary to. """ self.outfile = outfile or sys.stdout @@ -43,53 +159,46 @@ def report(self, morfs, outfile=None): for fr, analysis in get_analysis_to_report(self.coverage, morfs): self.report_one_file(fr, analysis) - # Prepare the formatting strings, header, and column sorting. - max_name = max([len(fr.relative_filename()) for (fr, analysis) in self.fr_analysis] + [5]) - fmt_name = "%%- %ds " % max_name - fmt_skip_covered = "\n%s file%s skipped due to complete coverage." - fmt_skip_empty = "\n%s empty file%s skipped." + if not self.total.n_files and not self.skipped_count: + raise NoDataError("No data to report.") - header = (fmt_name % "Name") + " Stmts Miss" - fmt_coverage = fmt_name + "%6d %6d" + if self.output_format == "total": + self.write(self.total.pc_covered_str) + else: + self.tabular_report() + + return self.total.pc_covered + + def tabular_report(self): + """Writes tabular report formats.""" + # Prepare the header line and column sorting. + header = ["Name", "Stmts", "Miss"] if self.branches: - header += " Branch BrPart" - fmt_coverage += " %6d %6d" - width100 = Numbers(precision=self.config.precision).pc_str_width() - header += "%*s" % (width100+4, "Cover") - fmt_coverage += "%%%ds%%%%" % (width100+3,) + header += ["Branch", "BrPart"] + header += ["Cover"] if self.config.show_missing: - header += " Missing" - fmt_coverage += " %s" - rule = "-" * len(header) + header += ["Missing"] column_order = dict(name=0, stmts=1, miss=2, cover=-1) if self.branches: column_order.update(dict(branch=3, brpart=4)) - # Write the header - self.writeout(header) - self.writeout(rule) - - # `lines` is a list of pairs, (line text, line values). The line text - # is a string that will be printed, and line values is a tuple of - # sortable values. - lines = [] + # `lines_values` is list of lists of sortable values. + lines_values = [] for (fr, analysis) in self.fr_analysis: nums = analysis.numbers - args = (fr.relative_filename(), nums.n_statements, nums.n_missing) + args = [fr.relative_filename(), nums.n_statements, nums.n_missing] if self.branches: - args += (nums.n_branches, nums.n_partial_branches) - args += (nums.pc_covered_str,) + args += [nums.n_branches, nums.n_partial_branches] + args += [nums.pc_covered_str] if self.config.show_missing: - args += (analysis.missing_formatted(branches=True),) - text = fmt_coverage % args - # Add numeric percent coverage so that sorting makes sense. - args += (nums.pc_covered,) - lines.append((text, args)) + args += [analysis.missing_formatted(branches=True)] + args += [nums.pc_covered] + lines_values.append(args) - # Sort the lines and write them out. + # Line sorting. sort_option = (self.config.sort or "name").lower() reverse = False if sort_option[0] == '-': @@ -97,43 +206,38 @@ def report(self, morfs, outfile=None): sort_option = sort_option[1:] elif sort_option[0] == '+': sort_option = sort_option[1:] - + sort_idx = column_order.get(sort_option) + if sort_idx is None: + raise ConfigError(f"Invalid sorting option: {self.config.sort!r}") if sort_option == "name": - lines = human_sorted_items(lines, reverse=reverse) + lines_values = human_sorted_items(lines_values, reverse=reverse) else: - position = column_order.get(sort_option) - if position is None: - raise ConfigError(f"Invalid sorting option: {self.config.sort!r}") - lines.sort(key=lambda l: (l[1][position], l[0]), reverse=reverse) - - for line in lines: - self.writeout(line[0]) - - # Write a TOTAL line if we had at least one file. - if self.total.n_files > 0: - self.writeout(rule) - args = ("TOTAL", self.total.n_statements, self.total.n_missing) - if self.branches: - args += (self.total.n_branches, self.total.n_partial_branches) - args += (self.total.pc_covered_str,) - if self.config.show_missing: - args += ("",) - self.writeout(fmt_coverage % args) + lines_values.sort(key=lambda line: (line[sort_idx], line[0]), reverse=reverse) - # Write other final lines. - if not self.total.n_files and not self.skipped_count: - raise NoDataError("No data to report.") + # Calculate total if we had at least one file. + total_line = ["TOTAL", self.total.n_statements, self.total.n_missing] + if self.branches: + total_line += [self.total.n_branches, self.total.n_partial_branches] + total_line += [self.total.pc_covered_str] + if self.config.show_missing: + total_line += [""] + # Create other final lines. + end_lines = [] if self.config.skip_covered and self.skipped_count: - self.writeout( - fmt_skip_covered % (self.skipped_count, 's' if self.skipped_count > 1 else '') + file_suffix = 's' if self.skipped_count>1 else '' + end_lines.append( + f"\n{self.skipped_count} file{file_suffix} skipped due to complete coverage." ) if self.config.skip_empty and self.empty_count: - self.writeout( - fmt_skip_empty % (self.empty_count, 's' if self.empty_count > 1 else '') - ) + file_suffix = 's' if self.empty_count > 1 else '' + end_lines.append(f"\n{self.empty_count} empty file{file_suffix} skipped.") - return self.total.n_statements and self.total.pc_covered + if self.output_format == "markdown": + formatter = self._report_markdown + else: + formatter = self._report_text + formatter(header, lines_values, total_line, end_lines) def report_one_file(self, fr, analysis): """Report on just one file, the callback from report().""" diff --git a/coverage/templite.py b/coverage/templite.py index ab3cf1cf4..29596d770 100644 --- a/coverage/templite.py +++ b/coverage/templite.py @@ -92,7 +92,7 @@ class Templite: and joined. Be careful, this could join words together! Any of these constructs can have a hyphen at the end (`-}}`, `-%}`, `-#}`), - which will collapse the whitespace following the tag. + which will collapse the white space following the tag. Construct a Templite with the template text, then use `render` against a dictionary context to create a finished string:: diff --git a/coverage/tomlconfig.py b/coverage/tomlconfig.py index 148c34f89..737c728cd 100644 --- a/coverage/tomlconfig.py +++ b/coverage/tomlconfig.py @@ -3,25 +3,23 @@ """TOML configuration support for coverage.py""" -import configparser import os import re +from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, TypeVar + from coverage import env from coverage.exceptions import ConfigError from coverage.misc import import_third_party, substitute_variables +from coverage.types import TConfigSection, TConfigValue if env.PYVERSION >= (3, 11, 0, "alpha", 7): import tomllib # pylint: disable=import-error + has_tomllib = True else: # TOML support on Python 3.10 and below is an install-time extra option. - # (Import typing is here because import_third_party will unload any module - # that wasn't already imported. tomli imports typing, and if we unload it, - # later it's imported again, and on Python 3.6, this causes infinite - # recursion.) - import typing # pylint: disable=unused-import - tomllib = import_third_party("tomli") + tomllib, has_tomllib = import_third_party("tomli") class TomlDecodeError(Exception): @@ -29,6 +27,8 @@ class TomlDecodeError(Exception): pass +TWant = TypeVar("TWant") + class TomlConfigParser: """TOML file reading with the interface of HandyConfigParser.""" @@ -36,11 +36,11 @@ class TomlConfigParser: # need for docstrings. # pylint: disable=missing-function-docstring - def __init__(self, our_file): + def __init__(self, our_file: bool) -> None: self.our_file = our_file - self.data = None + self.data: Dict[str, Any] = {} - def read(self, filenames): + def read(self, filenames: Iterable[str]) -> List[str]: # RawConfigParser takes a filename or list of filenames, but we only # ever call this with a single filename. assert isinstance(filenames, (bytes, str, os.PathLike)) @@ -51,22 +51,21 @@ def read(self, filenames): toml_text = fp.read() except OSError: return [] - if tomllib is not None: - toml_text = substitute_variables(toml_text, os.environ) + if has_tomllib: try: self.data = tomllib.loads(toml_text) except tomllib.TOMLDecodeError as err: raise TomlDecodeError(str(err)) from err return [filename] else: - has_toml = re.search(r"^\[tool\.coverage\.", toml_text, flags=re.MULTILINE) + has_toml = re.search(r"^\[tool\.coverage(\.|])", toml_text, flags=re.MULTILINE) if self.our_file or has_toml: # Looks like they meant to read TOML, but we can't read it. msg = "Can't read {!r} without TOML support. Install with [toml] extra" raise ConfigError(msg.format(filename)) return [] - def _get_section(self, section): + def _get_section(self, section: str) -> Tuple[Optional[str], Optional[TConfigSection]]: """Get a section from the data. Arguments: @@ -79,8 +78,6 @@ def _get_section(self, section): """ prefixes = ["tool.coverage."] - if self.our_file: - prefixes.append("") for prefix in prefixes: real_section = prefix + section parts = real_section.split(".") @@ -95,60 +92,101 @@ def _get_section(self, section): return None, None return real_section, data - def _get(self, section, option): + def _get(self, section: str, option: str) -> Tuple[str, TConfigValue]: """Like .get, but returns the real section name and the value.""" name, data = self._get_section(section) if data is None: - raise configparser.NoSectionError(section) + raise ConfigError(f"No section: {section!r}") + assert name is not None try: - return name, data[option] - except KeyError as exc: - raise configparser.NoOptionError(option, name) from exc + value = data[option] + except KeyError: + raise ConfigError(f"No option {option!r} in section: {name!r}") from None + return name, value + + def _get_single(self, section: str, option: str) -> Any: + """Get a single-valued option. + + Performs environment substitution if the value is a string. Other types + will be converted later as needed. + """ + name, value = self._get(section, option) + if isinstance(value, str): + value = substitute_variables(value, os.environ) + return name, value - def has_option(self, section, option): + def has_option(self, section: str, option: str) -> bool: _, data = self._get_section(section) if data is None: return False return option in data - def has_section(self, section): + def real_section(self, section: str) -> Optional[str]: name, _ = self._get_section(section) return name - def options(self, section): + def has_section(self, section: str) -> bool: + name, _ = self._get_section(section) + return bool(name) + + def options(self, section: str) -> List[str]: _, data = self._get_section(section) if data is None: - raise configparser.NoSectionError(section) + raise ConfigError(f"No section: {section!r}") return list(data.keys()) - def get_section(self, section): + def get_section(self, section: str) -> TConfigSection: _, data = self._get_section(section) - return data - - def get(self, section, option): - _, value = self._get(section, option) - return value + return data or {} - def _check_type(self, section, option, value, type_, type_desc): - if not isinstance(value, type_): - raise ValueError( - 'Option {!r} in section {!r} is not {}: {!r}' - .format(option, section, type_desc, value) - ) - - def getboolean(self, section, option): - name, value = self._get(section, option) - self._check_type(name, option, value, bool, "a boolean") + def get(self, section: str, option: str) -> Any: + _, value = self._get_single(section, option) return value - def getlist(self, section, option): + def _check_type( + self, + section: str, + option: str, + value: Any, + type_: Type[TWant], + converter: Optional[Callable[[Any], TWant]], + type_desc: str, + ) -> TWant: + """Check that `value` has the type we want, converting if needed. + + Returns the resulting value of the desired type. + """ + if isinstance(value, type_): + return value + if isinstance(value, str) and converter is not None: + try: + return converter(value) + except Exception as e: + raise ValueError( + f"Option [{section}]{option} couldn't convert to {type_desc}: {value!r}" + ) from e + raise ValueError( + f"Option [{section}]{option} is not {type_desc}: {value!r}" + ) + + def getboolean(self, section: str, option: str) -> bool: + name, value = self._get_single(section, option) + bool_strings = {"true": True, "false": False} + return self._check_type(name, option, value, bool, bool_strings.__getitem__, "a boolean") + + def _get_list(self, section: str, option: str) -> Tuple[str, List[str]]: + """Get a list of strings, substituting environment variables in the elements.""" name, values = self._get(section, option) - self._check_type(name, option, values, list, "a list") + values = self._check_type(name, option, values, list, None, "a list") + values = [substitute_variables(value, os.environ) for value in values] + return name, values + + def getlist(self, section: str, option: str) -> List[str]: + _, values = self._get_list(section, option) return values - def getregexlist(self, section, option): - name, values = self._get(section, option) - self._check_type(name, option, values, list, "a list") + def getregexlist(self, section: str, option: str) -> List[str]: + name, values = self._get_list(section, option) for value in values: value = value.strip() try: @@ -157,14 +195,12 @@ def getregexlist(self, section, option): raise ConfigError(f"Invalid [{name}].{option} value {value!r}: {e}") from e return values - def getint(self, section, option): - name, value = self._get(section, option) - self._check_type(name, option, value, int, "an integer") - return value + def getint(self, section: str, option: str) -> int: + name, value = self._get_single(section, option) + return self._check_type(name, option, value, int, int, "an integer") - def getfloat(self, section, option): - name, value = self._get(section, option) + def getfloat(self, section: str, option: str) -> float: + name, value = self._get_single(section, option) if isinstance(value, int): value = float(value) - self._check_type(name, option, value, float, "a float") - return value + return self._check_type(name, option, value, float, float, "a float") diff --git a/coverage/tracer.pyi b/coverage/tracer.pyi new file mode 100644 index 000000000..d1281767b --- /dev/null +++ b/coverage/tracer.pyi @@ -0,0 +1,35 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +from typing import Any, Dict + +from coverage.types import TFileDisposition, TTraceData, TTraceFn, TTracer + +class CFileDisposition(TFileDisposition): + canonical_filename: Any + file_tracer: Any + has_dynamic_filename: Any + original_filename: Any + reason: Any + source_filename: Any + trace: Any + def __init__(self) -> None: ... + +class CTracer(TTracer): + check_include: Any + concur_id_func: Any + data: TTraceData + disable_plugin: Any + file_tracers: Any + should_start_context: Any + should_trace: Any + should_trace_cache: Any + switch_context: Any + trace_arcs: Any + warn: Any + def __init__(self) -> None: ... + def activity(self) -> bool: ... + def get_stats(self) -> Dict[str, int]: ... + def reset_activity(self) -> Any: ... + def start(self) -> TTraceFn: ... + def stop(self) -> None: ... diff --git a/coverage/types.py b/coverage/types.py new file mode 100644 index 000000000..54c1dfba5 --- /dev/null +++ b/coverage/types.py @@ -0,0 +1,168 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +""" +Types for use throughout coverage.py. +""" + +from __future__ import annotations + +from types import FrameType, ModuleType +from typing import ( + Any, Callable, Dict, Iterable, List, Mapping, Optional, Set, Tuple, Union, + TYPE_CHECKING, +) + +if TYPE_CHECKING: + # Protocol is new in 3.8. PYVERSIONS + from typing import Protocol + + from coverage.plugin import FileTracer + +else: + class Protocol: # pylint: disable=missing-class-docstring + pass + +## Python tracing + +class TTraceFn(Protocol): + """A Python trace function.""" + def __call__( + self, + frame: FrameType, + event: str, + arg: Any, + lineno: Optional[int]=None # Our own twist, see collector.py + ) -> TTraceFn: + ... + +## Coverage.py tracing + +# Line numbers are pervasive enough that they deserve their own type. +TLineNo = int + +TArc = Tuple[TLineNo, TLineNo] + +class TFileDisposition(Protocol): + """A simple value type for recording what to do with a file.""" + + original_filename: str + canonical_filename: str + source_filename: Optional[str] + trace: bool + reason: str + file_tracer: Optional[FileTracer] + has_dynamic_filename: bool + + +# When collecting data, we use a dictionary with a few possible shapes. The +# keys are always file names. +# - If measuring line coverage, the values are sets of line numbers. +# - If measuring arcs in the Python tracer, the values are sets of arcs (pairs +# of line numbers). +# - If measuring arcs in the C tracer, the values are sets of packed arcs (two +# line numbers combined into one integer). + +TTraceData = Union[ + Dict[str, Set[TLineNo]], + Dict[str, Set[TArc]], + Dict[str, Set[int]], +] + +class TTracer(Protocol): + """Either CTracer or PyTracer.""" + + data: TTraceData + trace_arcs: bool + should_trace: Callable[[str, FrameType], TFileDisposition] + should_trace_cache: Mapping[str, Optional[TFileDisposition]] + should_start_context: Optional[Callable[[FrameType], Optional[str]]] + switch_context: Optional[Callable[[Optional[str]], None]] + warn: TWarnFn + + def __init__(self) -> None: + ... + + def start(self) -> TTraceFn: + """Start this tracer, returning a trace function.""" + + def stop(self) -> None: + """Stop this tracer.""" + + def activity(self) -> bool: + """Has there been any activity?""" + + def reset_activity(self) -> None: + """Reset the activity() flag.""" + + def get_stats(self) -> Optional[Dict[str, int]]: + """Return a dictionary of statistics, or None.""" + +## Coverage + +# Many places use kwargs as Coverage kwargs. +TCovKwargs = Any + + +## Configuration + +# One value read from a config file. +TConfigValue = Optional[Union[bool, int, float, str, List[str]]] +# An entire config section, mapping option names to values. +TConfigSection = Mapping[str, TConfigValue] + +class TConfigurable(Protocol): + """Something that can proxy to the coverage configuration settings.""" + + def get_option(self, option_name: str) -> Optional[TConfigValue]: + """Get an option from the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with `"run:branch"`. + + Returns the value of the option. + + """ + + def set_option(self, option_name: str, value: Union[TConfigValue, TConfigSection]) -> None: + """Set an option in the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with `"run:branch"`. + + `value` is the new value for the option. + + """ + +## Parsing + +TMorf = Union[ModuleType, str] + +TSourceTokenLines = Iterable[List[Tuple[str, str]]] + +## Plugins + +class TPlugin(Protocol): + """What all plugins have in common.""" + _coverage_plugin_name: str + _coverage_enabled: bool + + +## Debugging + +class TWarnFn(Protocol): + """A callable warn() function.""" + def __call__(self, msg: str, slug: Optional[str]=None, once: bool=False,) -> None: + ... + + +class TDebugCtl(Protocol): + """A DebugControl object, or something like it.""" + + def should(self, option: str) -> bool: + """Decide whether to output debug information in category `option`.""" + + def write(self, msg: str) -> None: + """Write a line of debug output.""" diff --git a/coverage/version.py b/coverage/version.py index 418407db0..0abece768 100644 --- a/coverage/version.py +++ b/coverage/version.py @@ -4,28 +4,46 @@ """The version and URL for coverage.py""" # This file is exec'ed in setup.py, don't import anything! -# Same semantics as sys.version_info. -version_info = (6, 5, 0, "final", 0) - - -def _make_version(major, minor, micro, releaselevel, serial): +# version_info: same semantics as sys.version_info. +# _dev: the .devN suffix if any. +version_info = (7, 0, 3, "final", 0) +_dev = 0 + + +def _make_version( + major: int, + minor: int, + micro: int, + releaselevel: str="final", + serial: int=0, + dev: int=0, +) -> str: """Create a readable version string from version_info tuple components.""" assert releaselevel in ['alpha', 'beta', 'candidate', 'final'] version = "%d.%d.%d" % (major, minor, micro) if releaselevel != 'final': short = {'alpha': 'a', 'beta': 'b', 'candidate': 'rc'}[releaselevel] version += f"{short}{serial}" + if dev != 0: + version += f".dev{dev}" return version -def _make_url(major, minor, micro, releaselevel, serial): +def _make_url( + major: int, + minor: int, + micro: int, + releaselevel: str, + serial: int=0, + dev: int=0, +) -> str: """Make the URL people should start at for this version of coverage.py.""" url = "https://coverage.readthedocs.io" - if releaselevel != 'final': + if releaselevel != "final" or dev != 0: # For pre-releases, use a version-specific URL. - url += "/en/" + _make_version(major, minor, micro, releaselevel, serial) + url += "/en/" + _make_version(major, minor, micro, releaselevel, serial, dev) return url -__version__ = _make_version(*version_info) -__url__ = _make_url(*version_info) +__version__ = _make_version(*version_info, _dev) +__url__ = _make_url(*version_info, _dev) diff --git a/coverage/xmlreport.py b/coverage/xmlreport.py index 2c34cb546..19a8dba51 100644 --- a/coverage/xmlreport.py +++ b/coverage/xmlreport.py @@ -3,15 +3,26 @@ """XML reporting for coverage.py""" +from __future__ import annotations + import os import os.path import sys import time import xml.dom.minidom +from dataclasses import dataclass +from typing import Dict, IO, Iterable, Optional, TYPE_CHECKING, cast + from coverage import __url__, __version__, files from coverage.misc import isolate_module, human_sorted, human_sorted_items +from coverage.plugin import FileReporter from coverage.report import get_analysis_to_report +from coverage.results import Analysis +from coverage.types import TMorf + +if TYPE_CHECKING: + from coverage import Coverage os = isolate_module(os) @@ -19,12 +30,22 @@ DTD_URL = 'https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd' -def rate(hit, num): +def rate(hit: int, num: int) -> str: """Return the fraction of `hit`/`num`, as a string.""" if num == 0: return "1" else: - return "%.4g" % (float(hit) / num) + return "%.4g" % (hit / num) + + +@dataclass +class PackageData: + """Data we keep about each "package" (in Java terms).""" + elements: Dict[str, xml.dom.minidom.Element] + hits: int + lines: int + br_hits: int + branches: int class XmlReporter: @@ -32,7 +53,7 @@ class XmlReporter: report_type = "XML report" - def __init__(self, coverage): + def __init__(self, coverage: Coverage) -> None: self.coverage = coverage self.config = self.coverage.config @@ -43,10 +64,10 @@ def __init__(self, coverage): if not self.config.relative_files: src = files.canonical_filename(src) self.source_paths.add(src) - self.packages = {} - self.xml_out = None + self.packages: Dict[str, PackageData] = {} + self.xml_out: xml.dom.minidom.Document - def report(self, morfs, outfile=None): + def report(self, morfs: Optional[Iterable[TMorf]], outfile: Optional[IO[str]]=None) -> float: """Generate a Cobertura-compatible XML report for `morfs`. `morfs` is a list of modules or file names. @@ -60,6 +81,7 @@ def report(self, morfs, outfile=None): # Create the DOM that will store the data. impl = xml.dom.minidom.getDOMImplementation() + assert impl is not None self.xml_out = impl.createDocument(None, "coverage", None) # Write header stuff. @@ -93,26 +115,25 @@ def report(self, morfs, outfile=None): # Populate the XML DOM with the package info. for pkg_name, pkg_data in human_sorted_items(self.packages.items()): - class_elts, lhits, lnum, bhits, bnum = pkg_data xpackage = self.xml_out.createElement("package") xpackages.appendChild(xpackage) xclasses = self.xml_out.createElement("classes") xpackage.appendChild(xclasses) - for _, class_elt in human_sorted_items(class_elts.items()): + for _, class_elt in human_sorted_items(pkg_data.elements.items()): xclasses.appendChild(class_elt) xpackage.setAttribute("name", pkg_name.replace(os.sep, '.')) - xpackage.setAttribute("line-rate", rate(lhits, lnum)) + xpackage.setAttribute("line-rate", rate(pkg_data.hits, pkg_data.lines)) if has_arcs: - branch_rate = rate(bhits, bnum) + branch_rate = rate(pkg_data.br_hits, pkg_data.branches) else: branch_rate = "0" xpackage.setAttribute("branch-rate", branch_rate) xpackage.setAttribute("complexity", "0") - lnum_tot += lnum - lhits_tot += lhits - bnum_tot += bnum - bhits_tot += bhits + lhits_tot += pkg_data.hits + lnum_tot += pkg_data.lines + bhits_tot += pkg_data.br_hits + bnum_tot += pkg_data.branches xcoverage.setAttribute("lines-valid", str(lnum_tot)) xcoverage.setAttribute("lines-covered", str(lhits_tot)) @@ -138,7 +159,7 @@ def report(self, morfs, outfile=None): pct = 100.0 * (lhits_tot + bhits_tot) / denom return pct - def xml_file(self, fr, analysis, has_arcs): + def xml_file(self, fr: FileReporter, analysis: Analysis, has_arcs: bool) -> None: """Add to the XML report for a single file.""" if self.config.skip_empty: @@ -149,7 +170,8 @@ def xml_file(self, fr, analysis, has_arcs): # are populated later. Note that a package == a directory. filename = fr.filename.replace("\\", "/") for source_path in self.source_paths: - source_path = files.canonical_filename(source_path) + if not self.config.relative_files: + source_path = files.canonical_filename(source_path) if filename.startswith(source_path.replace("\\", "/") + "/"): rel_name = filename[len(source_path)+1:] break @@ -161,9 +183,9 @@ def xml_file(self, fr, analysis, has_arcs): dirname = "/".join(dirname.split("/")[:self.config.xml_package_depth]) package_name = dirname.replace("/", ".") - package = self.packages.setdefault(package_name, [{}, 0, 0, 0, 0]) + package = self.packages.setdefault(package_name, PackageData({}, 0, 0, 0, 0)) - xclass = self.xml_out.createElement("class") + xclass: xml.dom.minidom.Element = self.xml_out.createElement("class") xclass.appendChild(self.xml_out.createElement("methods")) @@ -207,8 +229,8 @@ def xml_file(self, fr, analysis, has_arcs): missing_branches = sum(t - k for t, k in branch_stats.values()) class_br_hits = class_branches - missing_branches else: - class_branches = 0.0 - class_br_hits = 0.0 + class_branches = 0 + class_br_hits = 0 # Finalize the statistics that are collected in the XML DOM. xclass.setAttribute("line-rate", rate(class_hits, class_lines)) @@ -218,13 +240,13 @@ def xml_file(self, fr, analysis, has_arcs): branch_rate = "0" xclass.setAttribute("branch-rate", branch_rate) - package[0][rel_name] = xclass - package[1] += class_hits - package[2] += class_lines - package[3] += class_br_hits - package[4] += class_branches + package.elements[rel_name] = xclass + package.hits += class_hits + package.lines += class_lines + package.br_hits += class_br_hits + package.branches += class_branches -def serialize_xml(dom): +def serialize_xml(dom: xml.dom.minidom.Document) -> str: """Serialize a minidom node to XML.""" - return dom.toprettyxml() + return cast(str, dom.toprettyxml()) diff --git a/doc/changes.rst b/doc/changes.rst index 42af57c74..da0f45aef 100644 --- a/doc/changes.rst +++ b/doc/changes.rst @@ -383,7 +383,7 @@ Version 5.0a6 — 2019-07-16 argument, `no_disk` (default: False). Setting it to True prevents writing any data to the disk. This is useful for transient data objects. -- Added the classmethod :meth:`.Coverage.current` to get the latest started +- Added the class method :meth:`.Coverage.current` to get the latest started Coverage instance. - Multiprocessing support in Python 3.8 was broken, but is now fixed. Closes @@ -556,7 +556,7 @@ Version 5.0a2 — 2018-09-03 - Development moved from `Bitbucket`_ to `GitHub`_. -- HTML files no longer have trailing and extra whitespace. +- HTML files no longer have trailing and extra white space. - The sort order in the HTML report is stored in local storage rather than cookies, closing `issue 611`_. Thanks, Federico Bond. @@ -794,7 +794,7 @@ Version 4.4b1 — 2017-04-04 also continue measurement. Both `issue 79`_ and `issue 448`_ described this problem, and have been fixed. -- Plugins can now find unexecuted files if they choose, by implementing the +- Plugins can now find un-executed files if they choose, by implementing the `find_executable_files` method. Thanks, Emil Madsen. - Minimal IronPython support. You should be able to run IronPython programs @@ -1202,7 +1202,7 @@ Version 4.1b2 — 2016-01-23 - The XML report now produces correct package names for modules found in directories specified with ``source=``. Fixes `issue 465`_. -- ``coverage report`` won't produce trailing whitespace. +- ``coverage report`` won't produce trailing white space. .. _issue 465: https://github.com/nedbat/coveragepy/issues/465 .. _issue 466: https://github.com/nedbat/coveragepy/issues/466 @@ -1532,7 +1532,7 @@ Version 4.0a6 — 2015-06-21 - Files with incorrect encoding declaration comments are no longer ignored by the reporting commands, fixing `issue 351`_. -- HTML reports now include a timestamp in the footer, closing `issue 299`_. +- HTML reports now include a time stamp in the footer, closing `issue 299`_. Thanks, Conrad Ho. - HTML reports now begrudgingly use double-quotes rather than single quotes, @@ -1685,7 +1685,7 @@ Version 4.0a2 — 2015-01-14 `issue 328`_. Thanks, Buck Evan. - The regex for matching exclusion pragmas has been fixed to allow more kinds - of whitespace, fixing `issue 334`_. + of white space, fixing `issue 334`_. - Made some PyPy-specific tweaks to improve speed under PyPy. Thanks, Alex Gaynor. @@ -1739,7 +1739,7 @@ Version 4.0a1 — 2014-09-27 `issue 285`_. Thanks, Chris Rose. - HTML reports no longer raise UnicodeDecodeError if a Python file has - undecodable characters, fixing `issue 303`_ and `issue 331`_. + un-decodable characters, fixing `issue 303`_ and `issue 331`_. - The annotate command will now annotate all files, not just ones relative to the current directory, fixing `issue 57`_. @@ -1791,7 +1791,7 @@ Version 3.7 — 2013-10-06 - Coverage.py properly supports .pyw files, fixing `issue 261`_. - Omitting files within a tree specified with the ``source`` option would - cause them to be incorrectly marked as unexecuted, as described in + cause them to be incorrectly marked as un-executed, as described in `issue 218`_. This is now fixed. - When specifying paths to alias together during data combining, you can now @@ -1802,7 +1802,7 @@ Version 3.7 — 2013-10-06 (``build/$BUILDNUM/src``). - Trying to create an XML report with no files to report on, would cause a - ZeroDivideError, but no longer does, fixing `issue 250`_. + ZeroDivisionError, but no longer does, fixing `issue 250`_. - When running a threaded program under the Python tracer, coverage.py no longer issues a spurious warning about the trace function changing: "Trace @@ -1905,7 +1905,7 @@ Version 3.6b1 — 2012-11-28 Thanks, Marcus Cobden. - Coverage percentage metrics are now computed slightly differently under - branch coverage. This means that completely unexecuted files will now + branch coverage. This means that completely un-executed files will now correctly have 0% coverage, fixing `issue 156`_. This also means that your total coverage numbers will generally now be lower if you are measuring branch coverage. @@ -2068,7 +2068,7 @@ Version 3.5.2b1 — 2012-04-29 - Now the exit status of your product code is properly used as the process status when running ``python -m coverage run ...``. Thanks, JT Olds. -- When installing into pypy, we no longer attempt (and fail) to compile +- When installing into PyPy, we no longer attempt (and fail) to compile the C tracer function, closing `issue 166`_. .. _issue 142: https://github.com/nedbat/coveragepy/issues/142 @@ -2234,7 +2234,7 @@ Version 3.4 — 2010-09-19 Version 3.4b2 — 2010-09-06 -------------------------- -- Completely unexecuted files can now be included in coverage results, reported +- Completely un-executed files can now be included in coverage results, reported as 0% covered. This only happens if the --source option is specified, since coverage.py needs guidance about where to look for source files. @@ -2374,7 +2374,7 @@ Version 3.3 — 2010-02-24 `config_file=False`. - Fixed a problem with nested loops having their branch possibilities - mischaracterized: `issue 39`_. + mis-characterized: `issue 39`_. - Added coverage.process_start to enable coverage measurement when Python starts. diff --git a/doc/cmd.rst b/doc/cmd.rst index c05b7bce9..c1f52ee74 100644 --- a/doc/cmd.rst +++ b/doc/cmd.rst @@ -6,6 +6,12 @@ Running "make prebuild" will bring it up to date. .. [[[cog + # optparse wraps help to the COLUMNS value. Set it here to be sure it's + # consistent regardless of the environment. Has to be set before we + # import cmdline.py, which creates the optparse objects. + import os + os.environ["COLUMNS"] = "80" + import contextlib import io import re @@ -342,7 +348,7 @@ single directory, and use the **combine** command to combine them into one $ coverage combine -You can also name directories or files on the command line:: +You can also name directories or files to be combined on the command line:: $ coverage combine data1.dat windows_data_files/ @@ -364,19 +370,6 @@ An existing combined data file is ignored and re-written. If you want to use runs, use the ``--append`` switch on the **combine** command. This behavior was the default before version 4.2. -To combine data for a source file, coverage has to find its data in each of the -data files. Different test runs may run the same source file from different -locations. For example, different operating systems will use different paths -for the same file, or perhaps each Python version is run from a different -subdirectory. Coverage needs to know that different file paths are actually -the same source file for reporting purposes. - -You can tell coverage.py how different source locations relate with a -``[paths]`` section in your configuration file (see :ref:`config_paths`). -It might be more convenient to use the ``[run] relative_files`` -setting to store relative file paths (see :ref:`relative_files -`). - If any of the data files can't be read, coverage.py will print a warning indicating the file and the problem. @@ -389,11 +382,10 @@ want to keep those files, use the ``--keep`` command-line option. $ coverage combine --help Usage: coverage combine [options] ... - Combine data from multiple coverage files collected with 'run -p'. The - combined results are written to a single file representing the union of the - data. The positional arguments are data files or directories containing data - files. If no paths are provided, data files in the default data file's - directory are combined. + Combine data from multiple coverage files. The combined results are written to + a single file representing the union of the data. The positional arguments are + data files or directories containing data files. If no paths are provided, + data files in the default data file's directory are combined. Options: -a, --append Append coverage data to .coverage, otherwise it starts @@ -409,7 +401,29 @@ want to keep those files, use the ``--keep`` command-line option. --rcfile=RCFILE Specify configuration file. By default '.coveragerc', 'setup.cfg', 'tox.ini', and 'pyproject.toml' are tried. [env: COVERAGE_RCFILE] -.. [[[end]]] (checksum: 0ac91b0781d7146b87953f09090dab92) +.. [[[end]]] (checksum: 0bdd83f647ee76363c955bedd9ddf749) + + +.. _cmd_combine_remapping: + +Re-mapping paths +................ + +To combine data for a source file, coverage has to find its data in each of the +data files. Different test runs may run the same source file from different +locations. For example, different operating systems will use different paths +for the same file, or perhaps each Python version is run from a different +subdirectory. Coverage needs to know that different file paths are actually +the same source file for reporting purposes. + +You can tell coverage.py how different source locations relate with a +``[paths]`` section in your configuration file (see :ref:`config_paths`). +It might be more convenient to use the ``[run] relative_files`` +setting to store relative file paths (see :ref:`relative_files +`). + +If data isn't combining properly, you can see details about the inner workings +with ``--debug=pathmap``. .. _cmd_erase: @@ -510,6 +524,8 @@ as a percentage. file. Defaults to '.coverage'. [env: COVERAGE_FILE] --fail-under=MIN Exit with a status of 2 if the total coverage is less than MIN. + --format=FORMAT Output format, either text (default), markdown, or + total. -i, --ignore-errors Ignore errors while reading source files. --include=PAT1,PAT2,... Include only files whose paths match one of these @@ -532,7 +548,7 @@ as a percentage. --rcfile=RCFILE Specify configuration file. By default '.coveragerc', 'setup.cfg', 'tox.ini', and 'pyproject.toml' are tried. [env: COVERAGE_RCFILE] -.. [[[end]]] (checksum: 2f8dde61bab2f44fbfe837aeae87dfd2) +.. [[[end]]] (checksum: 167272a29d9e7eb017a592a0e0747a06) The ``-m`` flag also shows the line numbers of missing statements:: @@ -583,6 +599,12 @@ decimal point in coverage percentages, defaulting to none. The ``--sort`` option is the name of a column to sort the report by. +The ``--format`` option controls the style of the report. ``--format=text`` +creates plain text tables as shown above. ``--format=markdown`` creates +Markdown tables. ``--format=total`` writes out a single number, the total +coverage percentage as shown at the end of the tables, but without a percent +sign. + Other common reporting options are described above in :ref:`cmd_reporting`. These options can also be set in your .coveragerc file. See :ref:`Configuration: [report] `. @@ -1001,7 +1023,7 @@ of operation to log: * ``multiproc``: log the start and stop of multiprocessing processes. * ``pathmap``: log the remapping of paths that happens during ``coverage - combine`` due to the ``[paths]`` setting. See :ref:`config_paths`. + combine``. See :ref:`config_paths`. * ``pid``: annotate all warnings and debug output with the process and thread ids. @@ -1009,7 +1031,8 @@ of operation to log: * ``plugin``: print information about plugin operations. * ``process``: show process creation information, and changes in the current - directory. + directory. This also writes a time stamp and command arguments into the data + file. * ``pybehave``: show the values of `internal flags `_ describing the behavior of the current version of Python. diff --git a/doc/conf.py b/doc/conf.py index c9599e0ee..a646070e7 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -57,18 +57,20 @@ # General information about the project. project = 'Coverage.py' -copyright = '2009\N{EN DASH}2022, Ned Batchelder' # CHANGEME # pylint: disable=redefined-builtin # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. -# -# The short X.Y.Z version. # CHANGEME -version = "6.5.0" -# The full version, including alpha/beta/rc tags. # CHANGEME -release = "6.5.0" -# The date of release, in "monthname day, year" format. # CHANGEME -release_date = "September 29, 2022" + +# @@@ editable +copyright = "2009–2023, Ned Batchelder" # pylint: disable=redefined-builtin +# The short X.Y.Z version. +version = "7.0.3" +# The full version, including alpha/beta/rc tags. +release = "7.0.3" +# The date of release, in "monthname day, year" format. +release_date = "January 3, 2023" +# @@@ end rst_epilog = """ .. |release_date| replace:: {release_date} @@ -121,6 +123,15 @@ 'python': ('https://docs.python.org/3', None), } +nitpick_ignore = [ + ("py:class", "frame"), + ("py:class", "module"), +] + +nitpick_ignore_regex = [ + (r"py:class", r"coverage\..*\..*"), +] + # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with diff --git a/doc/config.rst b/doc/config.rst index 66b02eacd..90949506a 100644 --- a/doc/config.rst +++ b/doc/config.rst @@ -31,10 +31,14 @@ Coverage.py will read settings from other usual configuration files if no other configuration file is used. It will automatically read from "setup.cfg" or "tox.ini" if they exist. In this case, the section names have "coverage:" prefixed, so the ``[run]`` options described below will be found in the -``[coverage:run]`` section of the file. If coverage.py is installed with the -``toml`` extra (``pip install coverage[toml]``), it will automatically read -from "pyproject.toml". Configuration must be within the ``[tool.coverage]`` -section, for example, ``[tool.coverage.run]``. +``[coverage:run]`` section of the file. + +Coverage.py will read from "pyproject.toml" if TOML support is available, +either because you are running on Python 3.11 or later, or because you +installed with the ``toml`` extra (``pip install coverage[toml]``). +Configuration must be within the ``[tool.coverage]`` section, for example, +``[tool.coverage.run]``. Environment variable expansion in values is +available, but only within quoted strings, even for non-string values. Syntax @@ -218,14 +222,6 @@ measurement or reporting. Ignored if ``source`` is set. See :ref:`source` for details. -.. _config_run_note: - -[run] note -.......... - -(string) This is now obsolete. - - .. _config_run_omit: [run] omit @@ -259,9 +255,9 @@ information. [run] relative_files .................... -(*experimental*, boolean, default False) store relative file paths in the data -file. This makes it easier to measure code in one (or multiple) environments, -and then report in another. See :ref:`cmd_combine` for details. +(boolean, default False) store relative file paths in the data file. This +makes it easier to measure code in one (or multiple) environments, and then +report in another. See :ref:`cmd_combine` for details. Note that setting ``source`` has to be done in the configuration file rather than the command line for this option to work, since the reporting commands @@ -348,12 +344,18 @@ combined with data for "c:\\myproj\\src\\module.py", and will be reported against the source file found at "src/module.py". If you specify more than one list of paths, they will be considered in order. -The first list that has a match will be used. +A file path will only be remapped if the result exists. If a path matches a +list, but the result doesn't exist, the next list will be tried. The first +list that has an existing result will be used. + +Remapping will also be done during reporting, but only within the single data +file being reported. Combining multiple files requires the ``combine`` +command. The ``--debug=pathmap`` option can be used to log details of the re-mapping of paths. See :ref:`the --debug option `. -See :ref:`cmd_combine` for more information. +See :ref:`cmd_combine_remapping` and :ref:`source_glob` for more information. .. _config_report: @@ -414,6 +416,20 @@ warning instead of an exception. See :ref:`source` for details. +.. _config_include_namespace_packages: + +[report] include_namespace_packages +................................... + +(boolean, default False) When searching for completely un-executed files, +include directories without ``__init__.py`` files. These are `implicit +namespace packages`_, and are usually skipped. + +.. _implicit namespace packages: https://peps.python.org/pep-0420/ + +.. versionadded:: 7.0 + + .. _config_report_omit: [report] omit @@ -603,7 +619,7 @@ section also apply to JSON output, where appropriate. [json] pretty_print ................... -(boolean, default false) Controls if the JSON is outputted with whitespace +(boolean, default false) Controls if the JSON is outputted with white space formatted for human consumption (True) or for minimum file size (False). diff --git a/doc/dbschema.rst b/doc/dbschema.rst index 34e0a55da..b576acaaf 100644 --- a/doc/dbschema.rst +++ b/doc/dbschema.rst @@ -66,7 +66,7 @@ This is the database schema: key text, value text, unique (key) - -- Keys: + -- Possible keys: -- 'has_arcs' boolean -- Is this data recording branches? -- 'sys_argv' text -- The coverage command line that recorded the data. -- 'version' text -- The version of coverage.py that made the file. @@ -116,7 +116,7 @@ This is the database schema: foreign key (file_id) references file (id) ); -.. [[[end]]] (checksum: cfce1df016afbb43a5ff94306db56657) +.. [[[end]]] (checksum: 6a04d14b07f08f86cccf43056328dcb7) .. _numbits: diff --git a/doc/dict.txt b/doc/dict.txt index 2c713fe70..63544dcde 100644 --- a/doc/dict.txt +++ b/doc/dict.txt @@ -221,7 +221,6 @@ templite templating testability Tidelift -timestamp todo TODO tokenization @@ -241,7 +240,6 @@ txt ubuntu undecodable unexecutable -unexecuted unicode uninstall unittest @@ -256,7 +254,6 @@ utf vendored versionadded virtualenv -whitespace wikipedia wildcard wildcards diff --git a/doc/faq.rst b/doc/faq.rst index e2fc2f282..b8c2758c5 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -23,11 +23,24 @@ If old data is persisting, you can use an explicit ``coverage erase`` command to clean out the old data. +Q: Why are my function definitions marked as run when I haven't tested them? +............................................................................ + +The ``def`` and ``class`` lines in your Python file are executed when the file +is imported. Those are the lines that define your functions and classes. They +run even if you never call the functions. It's the body of the functions that +will be marked as not executed if you don't test them, not the ``def`` lines. + +This can mean that your code has a moderate coverage total even if no tests +have been written or run. This might seem surprising, but it is accurate: the +``def`` lines have actually been run. + + Q: Why do the bodies of functions show as executed, but the def lines do not? ............................................................................. -This happens because coverage.py is started after the functions are defined. -The definition lines are executed without coverage measurement, then +If this happens, it's because coverage.py has started after the functions are +defined. The definition lines are executed without coverage measurement, then coverage.py is started, then the function is called. This means the body is measured, but the definition of the function itself is not. diff --git a/doc/index.rst b/doc/index.rst index da1562a99..47fe4f1f0 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -18,14 +18,13 @@ supported on: .. PYVERSIONS -* Python versions 3.7 through 3.11.0 rc2. - -* PyPy3 7.3.8. +* Python versions 3.7 through 3.12.0a3. +* PyPy3 7.3.11. .. ifconfig:: prerelease **This is a pre-release build. The usual warnings about possible bugs - apply.** The latest stable version is coverage.py 6.4, `described here`_. + apply.** The latest stable version is coverage.py 6.5.0, `described here`_. .. _described here: http://coverage.readthedocs.io/ diff --git a/doc/python-coverage.1.txt b/doc/python-coverage.1.txt index 47d447304..9d38f4f73 100644 --- a/doc/python-coverage.1.txt +++ b/doc/python-coverage.1.txt @@ -8,7 +8,7 @@ Measure Python code coverage :Author: Ned Batchelder :Author: |author| -:Date: 2022-01-25 +:Date: 2022-12-03 :Copyright: Apache 2.0 license, attribution and disclaimer required. :Manual section: 1 :Manual group: Coverage.py @@ -299,6 +299,9 @@ COMMAND REFERENCE \--fail-under `MIN` Exit with a status of 2 if the total coverage is less than `MIN`. + \--format `FORMAT` + Output format, either text (default), markdown, or total. + \-i, --ignore-errors Ignore errors while reading source files. diff --git a/doc/requirements.pip b/doc/requirements.pip index cd8b722cf..c8a729e2f 100644 --- a/doc/requirements.pip +++ b/doc/requirements.pip @@ -1,6 +1,6 @@ # -# This file is autogenerated by pip-compile with python 3.7 -# To update, run: +# This file is autogenerated by pip-compile with Python 3.7 +# by the following command: # # make upgrade # @@ -8,16 +8,14 @@ alabaster==0.7.12 \ --hash=sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359 \ --hash=sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02 # via sphinx -babel==2.10.3 \ - --hash=sha256:7614553711ee97490f732126dc077f8d0ae084ebc6a96e23db1482afabdb2c51 \ - --hash=sha256:ff56f4892c1c4bf0d814575ea23471c230d544203c7748e8c68f0089478d48eb +babel==2.11.0 \ + --hash=sha256:1ad3eca1c885218f6dce2ab67291178944f810a10a9b5f3cb8382a5a232b64fe \ + --hash=sha256:5ef4b3226b0180dedded4229651c8b0e1a3a6a2837d45a073272f313e4cf97f6 # via sphinx -certifi==2022.5.18.1 \ - --hash=sha256:9c5705e395cd70084351dd8ad5c41e65655e08ce46f2ec9cf6c2c08390f71eb7 \ - --hash=sha256:f1d53542ee8cbedbe2118b5686372fb33c297fcd6379b050cca0ef13a597382a - # via - # -c doc/../requirements/pins.pip - # requests +certifi==2022.12.7 \ + --hash=sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3 \ + --hash=sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18 + # via requests charset-normalizer==2.1.1 \ --hash=sha256:5a3d016c7c547f69d6f81fb0db9449ce888b418b5b9952cc5e6e66843e9dd845 \ --hash=sha256:83e9a75d1911279afd89352c68b45348559d1fc0506b054b346651b5e7fee29f @@ -26,9 +24,9 @@ cogapp==3.3.0 \ --hash=sha256:1be95183f70282422d594fa42426be6923070a4bd8335621f6347f3aeee81db0 \ --hash=sha256:8b5b5f6063d8ee231961c05da010cb27c30876b2279e23ad0eae5f8f09460d50 # via -r doc/requirements.in -colorama==0.4.5 \ - --hash=sha256:854bf444933e37f5824ae7bfc1e98d5bce2ebe4160d46b5edf346a89358e99da \ - --hash=sha256:e6c6b4334fc50988a639d9b98aa429a0b57da6e17b9a44f0451f930b6967b7a4 +colorama==0.4.6 \ + --hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \ + --hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6 # via sphinx-autobuild docutils==0.17.1 \ --hash=sha256:686577d2e4c32380bb50cbb22f575ed742d58168cee37e99117a854bcd88f125 \ @@ -44,9 +42,9 @@ imagesize==1.4.1 \ --hash=sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b \ --hash=sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a # via sphinx -importlib-metadata==4.12.0 \ - --hash=sha256:637245b8bab2b6502fcbc752cc4b7a6f6243bb02b31c5c26156ad103d3d45670 \ - --hash=sha256:7401a975809ea1fdc658c3aa4f78cc2195a0e019c5cbc4c06122884e9ae80c23 +importlib-metadata==5.2.0 \ + --hash=sha256:0eafa39ba42bf225fc00e67f701d71f85aead9f878569caf13c3724f704b970f \ + --hash=sha256:404d48d62bba0b7a77ff9d405efd91501bef2e67ff4ace0bed40a0cf28c3c7cd # via # sphinx # sphinxcontrib-spelling @@ -55,7 +53,8 @@ jinja2==3.1.2 \ --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61 # via sphinx livereload==2.6.3 \ - --hash=sha256:776f2f865e59fde56490a56bcc6773b6917366bce0c267c60ee8aaf1a0959869 + --hash=sha256:776f2f865e59fde56490a56bcc6773b6917366bce0c267c60ee8aaf1a0959869 \ + --hash=sha256:ad4ac6f53b2d62bb6ce1a5e6e96f1f00976a32348afedcb4b6d68df2a1d346e4 # via sphinx-autobuild markupsafe==2.1.1 \ --hash=sha256:0212a68688482dc52b2d45013df70d169f542b7394fc744c02a57374a4207003 \ @@ -99,9 +98,9 @@ markupsafe==2.1.1 \ --hash=sha256:f121a1420d4e173a5d96e47e9a0c0dcff965afdf1626d28de1460815f7c4ee7a \ --hash=sha256:fc7b548b17d238737688817ab67deebb30e8073c95749d55538ed473130ec0c7 # via jinja2 -packaging==21.3 \ - --hash=sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb \ - --hash=sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522 +packaging==22.0 \ + --hash=sha256:2198ec20bd4c017b8f9717e00f0c8714076fc2fd93816750ab48e2c41de2cfd3 \ + --hash=sha256:957e2148ba0e1a3b282772e791ef1d8083648bc131c8ab0c1feba110ce1146c3 # via sphinx pyenchant==3.2.2 \ --hash=sha256:1cf830c6614362a78aab78d50eaf7c6c93831369c52e1bb64ffae1df0341e637 \ @@ -115,13 +114,9 @@ pygments==2.13.0 \ --hash=sha256:56a8508ae95f98e2b9bdf93a6be5ae3f7d8af858b43e02c5a2ff083726be40c1 \ --hash=sha256:f643f331ab57ba3c9d89212ee4a2dabc6e94f117cf4eefde99a0574720d14c42 # via sphinx -pyparsing==3.0.9 \ - --hash=sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb \ - --hash=sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc - # via packaging -pytz==2022.2.1 \ - --hash=sha256:220f481bdafa09c3955dfbdddb7b57780e9a94f5127e35456a48589b9e0c0197 \ - --hash=sha256:cea221417204f2d1a2aa03ddae3e867921971d0d76f14d87abb4414415bbdcf5 +pytz==2022.7 \ + --hash=sha256:7ccfae7b4b2c067464a6733c6261673fdb8fd1be905460396b97a073e9fa683a \ + --hash=sha256:93007def75ae22f7cd991c84e02d434876818661f8df9ad5df9e950ff4e52cfd # via babel requests==2.28.1 \ --hash=sha256:7c5599b102feddaa661c826c56ab4fee28bfd17f5abca1ebbe3e7f19d7c97983 \ @@ -135,9 +130,9 @@ snowballstemmer==2.2.0 \ --hash=sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1 \ --hash=sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a # via sphinx -sphinx==5.2.1 \ - --hash=sha256:3dcf00fcf82cf91118db9b7177edea4fc01998976f893928d0ab0c58c54be2ca \ - --hash=sha256:c009bb2e9ac5db487bcf53f015504005a330ff7c631bb6ab2604e0d65bae8b54 +sphinx==5.3.0 \ + --hash=sha256:060ca5c9f7ba57a08a1219e547b269fadf125ae25b06b9fa7f66768efb652d6d \ + --hash=sha256:51026de0a9ff9fc13c05d74913ad66047e104f56a129ff73e174eb5c3ee794b5 # via # -r doc/requirements.in # sphinx-autobuild @@ -148,9 +143,9 @@ sphinx-autobuild==2021.3.14 \ --hash=sha256:8fe8cbfdb75db04475232f05187c776f46f6e9e04cacf1e49ce81bdac649ccac \ --hash=sha256:de1ca3b66e271d2b5b5140c35034c89e47f263f2cd5db302c9217065f7443f05 # via -r doc/requirements.in -sphinx-rtd-theme==1.0.0 \ - --hash=sha256:4d35a56f4508cfee4c4fb604373ede6feae2a306731d533f409ef5c3496fdbd8 \ - --hash=sha256:eec6d497e4c2195fa0e8b2016b337532b8a699a68bcb22a512870e16925c6a5c +sphinx-rtd-theme==1.1.1 \ + --hash=sha256:31faa07d3e97c8955637fc3f1423a5ab2c44b74b8cc558a51498c202ce5cbda7 \ + --hash=sha256:6146c845f1e1947b3c3dd4432c28998a1693ccc742b4f9ad7c63129f0757c103 # via -r doc/requirements.in sphinxcontrib-applehelp==1.0.2 \ --hash=sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a \ @@ -180,9 +175,9 @@ sphinxcontrib-serializinghtml==1.1.5 \ --hash=sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd \ --hash=sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952 # via sphinx -sphinxcontrib-spelling==7.6.0 \ - --hash=sha256:292cd7e1f73a763451693b4d48c9bded151084f6a91e5337733e9fa8715d20ec \ - --hash=sha256:6c1313618412511109f7b76029fbd60df5aa4acf67a2dc9cd1b1016d15e882ff +sphinxcontrib-spelling==7.7.0 \ + --hash=sha256:56561c3f6a155b0946914e4de988729859315729dc181b5e4dc8a68fe78de35a \ + --hash=sha256:95a0defef8ffec6526f9e83b20cc24b08c9179298729d87976891840e3aa3064 # via -r doc/requirements.in tornado==6.2 \ --hash=sha256:1d54d13ab8414ed44de07efecb97d4ef7c39f7438cf5e976ccd356bebb1b5fca \ @@ -197,15 +192,15 @@ tornado==6.2 \ --hash=sha256:d3a2f5999215a3a06a4fc218026cd84c61b8b2b40ac5296a6db1f1451ef04c1e \ --hash=sha256:e5f923aa6a47e133d1cf87d60700889d7eae68988704e20c75fb2d65677a8e4b # via livereload -typing-extensions==4.3.0 \ - --hash=sha256:25642c956049920a5aa49edcdd6ab1e06d7e5d467fc00e0506c44ac86fbfca02 \ - --hash=sha256:e6d2677a32f47fc7eb2795db1dd15c1f34eff616bcaf2cfb5e997f854fa1c4a6 +typing-extensions==4.4.0 \ + --hash=sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa \ + --hash=sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e # via importlib-metadata -urllib3==1.26.12 \ - --hash=sha256:3fa96cf423e6987997fc326ae8df396db2a8b7c667747d47ddd8ecba91f4a74e \ - --hash=sha256:b930dd878d5a8afb066a637fbb35144fe7901e3b209d1cd4f524bd0e9deee997 +urllib3==1.26.13 \ + --hash=sha256:47cc05d99aaa09c9e72ed5809b60e7ba354e64b59c9c173ac3018642d8bb41fc \ + --hash=sha256:c083dd0dce68dbfbe1129d5271cb90f9447dea7d52097c6e0126120c521ddea8 # via requests -zipp==3.8.1 \ - --hash=sha256:05b45f1ee8f807d0cc928485ca40a07cb491cf092ff587c0df9cb1fd154848d2 \ - --hash=sha256:47c40d7fe183a6f21403a199b3e4192cca5774656965b0a4988ad2f8feb5f009 +zipp==3.11.0 \ + --hash=sha256:83a28fcb75844b5c0cdaf5aa4003c2d728c77e05f5aeabe8e95e56727005fbaa \ + --hash=sha256:a7a22e05929290a67401440b39690ae6563279bced5f314609d9d03798f56766 # via importlib-metadata diff --git a/doc/sample_html/d_7b071bdc2a35fa80___init___py.html b/doc/sample_html/d_7b071bdc2a35fa80___init___py.html index 43bcb6674..ec5faeac1 100644 --- a/doc/sample_html/d_7b071bdc2a35fa80___init___py.html +++ b/doc/sample_html/d_7b071bdc2a35fa80___init___py.html @@ -66,8 +66,8 @@

^ index     » next       - coverage.py v6.5.0, - created at 2022-09-29 12:29 -0400 + coverage.py v7.0.2, + created at 2023-01-02 13:05 -0500