From a5da277a8653ecaf5fdc905f67b6503732e7dba7 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Sat, 2 Oct 2021 23:13:14 -0400 Subject: [PATCH 01/41] build: bump version --- CHANGES.rst | 6 ++++++ coverage/version.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index d9fcc2b8c..909ccde25 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -21,6 +21,12 @@ want to know what's different in 5.0 since 4.5.x, see :ref:`whatsnew5x`. .. Version 9.8.1 --- 2027-07-27 .. ---------------------------- +Unreleased +---------- + +Nothing yet. + + .. _changes_60: Version 6.0 --- 2021-10-03 diff --git a/coverage/version.py b/coverage/version.py index 98bbda8b5..cad69ce79 100644 --- a/coverage/version.py +++ b/coverage/version.py @@ -5,7 +5,7 @@ # This file is exec'ed in setup.py, don't import anything! # Same semantics as sys.version_info. -version_info = (6, 0, 0, "final", 0) +version_info = (6, 0, 1, "alpha", 0) def _make_version(major, minor, micro, releaselevel, serial): From fb21f8e8f33e9737b2e7f237d203773cda1c6567 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Mon, 4 Oct 2021 08:55:06 -0400 Subject: [PATCH 02/41] docs: the 5.0 change summary shouldn't be so prominent anymore --- CHANGES.rst | 7 ++++--- doc/index.rst | 1 - 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 909ccde25..50a81aaf3 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -9,9 +9,7 @@ These changes are listed in decreasing version number order. Note this can be different from a strict chronological order when there are two branches in development at the same time, such as 4.5.x and 5.0. -This list is detailed and covers changes in each pre-release version. If you -want to know what's different in 5.0 since 4.5.x, see :ref:`whatsnew5x`. - +This list is detailed and covers changes in each pre-release version. .. When updating the "Unreleased" header to a specific version, use this .. format. Don't forget the jump target: @@ -468,6 +466,9 @@ Version 5.0 --- 2019-12-14 Nothing new beyond 5.0b2. +A summary of major changes in 5.0 since 4.5.x is in see :ref:`whatsnew5x`. + + .. _changes_50b2: diff --git a/doc/index.rst b/doc/index.rst index 2b4a6a45c..927248e47 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -222,6 +222,5 @@ More information contributing trouble faq - whatsnew5x changes sleepy From 62116801c3ae2f7bfc6302836e46bdfac681c1a5 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Tue, 5 Oct 2021 06:43:24 -0400 Subject: [PATCH 03/41] build: 3.10.0 is out --- .github/workflows/coverage.yml | 2 +- .github/workflows/kit.yml | 1 + .github/workflows/testsuite.yml | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 5686493e2..c305477bc 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -37,7 +37,7 @@ jobs: - "3.7" - "3.8" - "3.9" - - "3.10.0-rc.2" + - "3.10" - "pypy3" exclude: # Windows PyPy doesn't seem to work? diff --git a/.github/workflows/kit.yml b/.github/workflows/kit.yml index 726cefaca..c6b9e43da 100644 --- a/.github/workflows/kit.yml +++ b/.github/workflows/kit.yml @@ -122,6 +122,7 @@ jobs: prerel: name: "Build ${{ matrix.python-version }} wheels on ${{ matrix.os }}" + if: ${{ false }} # disable for now, since there are no pre-rel Python versions. runs-on: ${{ matrix.os }} strategy: matrix: diff --git a/.github/workflows/testsuite.yml b/.github/workflows/testsuite.yml index 49df01e6e..6b0de1b3d 100644 --- a/.github/workflows/testsuite.yml +++ b/.github/workflows/testsuite.yml @@ -35,7 +35,7 @@ jobs: - "3.7" - "3.8" - "3.9" - - "3.10.0-rc.2" + - "3.10" - "pypy3" exclude: # Windows PyPy doesn't seem to work? From 613446ca9da592c6925329b869b9ef785d83f76e Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Tue, 5 Oct 2021 19:43:00 -0400 Subject: [PATCH 04/41] fix: pretend we didn't import third-party packages we use. #1228 tomli couldn't use coverage themselves because we imported it early. Cleaning sys.modules means their own imports will actually execute after coverage has started, so their files will be properly measured. --- CHANGES.rst | 5 ++++- coverage/misc.py | 27 +++++++++++++++++++++++++++ coverage/tomlconfig.py | 7 ++----- tests/test_misc.py | 20 +++++++++++++++++++- 4 files changed, 52 insertions(+), 7 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 50a81aaf3..937d2d0fa 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -22,7 +22,10 @@ This list is detailed and covers changes in each pre-release version. Unreleased ---------- -Nothing yet. +- Changed an internal detail of how tomli is imported, so that tomli can use + coverage.py for their own test suite (`issue 1228`_). + +.. _issue 1228: https://github.com/nedbat/coveragepy/issues/1228 .. _changes_60: diff --git a/coverage/misc.py b/coverage/misc.py index 11dad23e0..cd4a77401 100644 --- a/coverage/misc.py +++ b/coverage/misc.py @@ -5,6 +5,7 @@ import errno import hashlib +import importlib import importlib.util import inspect import locale @@ -43,6 +44,32 @@ def isolate_module(mod): os = isolate_module(os) +def import_third_party(modname): + """Import a third-party module we need, but might not be installed. + + This also cleans out the module after the import, so that coverage won't + appear to have imported it. This lets the third party use coverage for + their own tests. + + Arguments: + modname (str): the name of the module to import. + + Returns: + The imported module, or None if the module couldn't be imported. + + """ + try: + mod = importlib.import_module(modname) + except ImportError: + mod = None + + imported = [m for m in sys.modules if m.startswith(modname)] + for name in imported: + del sys.modules[name] + + return mod + + def dummy_decorator_with_args(*args_unused, **kwargs_unused): """Dummy no-op implementation of a decorator with arguments.""" def _decorator(func): diff --git a/coverage/tomlconfig.py b/coverage/tomlconfig.py index 203192c93..3301acc8e 100644 --- a/coverage/tomlconfig.py +++ b/coverage/tomlconfig.py @@ -8,13 +8,10 @@ import re from coverage.exceptions import CoverageException -from coverage.misc import substitute_variables +from coverage.misc import import_third_party, substitute_variables # TOML support is an install-time extra option. -try: - import tomli -except ImportError: # pragma: not covered - tomli = None +tomli = import_third_party("tomli") class TomlDecodeError(Exception): diff --git a/tests/test_misc.py b/tests/test_misc.py index 3858c4f8b..077c24344 100644 --- a/tests/test_misc.py +++ b/tests/test_misc.py @@ -3,11 +3,13 @@ """Tests of miscellaneous stuff.""" +import sys + import pytest from coverage.exceptions import CoverageException from coverage.misc import contract, dummy_decorator_with_args, file_be_gone -from coverage.misc import Hasher, one_of, substitute_variables +from coverage.misc import Hasher, one_of, substitute_variables, import_third_party from coverage.misc import USE_CONTRACTS from tests.coveragetest import CoverageTest @@ -155,3 +157,19 @@ def test_substitute_variables_errors(text): substitute_variables(text, VARS) assert text in str(exc_info.value) assert "Variable NOTHING is undefined" in str(exc_info.value) + + +class ImportThirdPartyTest(CoverageTest): + """Test import_third_party.""" + + run_in_temp_dir = False + + def test_success(self): + mod = import_third_party("pytest") + assert mod.__name__ == "pytest" + assert "pytest" not in sys.modules + + def test_failure(self): + mod = import_third_party("xyzzy") + assert mod is None + assert "xyzzy" not in sys.modules From 19545b7d78fb91a82088517681e20cf4ffcd8c63 Mon Sep 17 00:00:00 2001 From: glacials Date: Fri, 6 Aug 2021 14:17:26 -0700 Subject: [PATCH 05/41] Fix an incompatibility with pyarmor --- coverage/context.py | 2 +- tests/test_context.py | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/coverage/context.py b/coverage/context.py index 45e86a5c1..43d2b1cc7 100644 --- a/coverage/context.py +++ b/coverage/context.py @@ -48,7 +48,7 @@ def qualname_from_frame(frame): fname = co.co_name method = None if co.co_argcount and co.co_varnames[0] == "self": - self = frame.f_locals["self"] + self = frame.f_locals.get("self", None) method = getattr(self, fname, None) if method is None: diff --git a/tests/test_context.py b/tests/test_context.py index 3f80803bd..de972819f 100644 --- a/tests/test_context.py +++ b/tests/test_context.py @@ -5,6 +5,7 @@ import inspect import os.path +from unittest import mock import coverage from coverage.context import qualname_from_frame @@ -275,3 +276,8 @@ def test_bug_829(self): # A class with a name like a function shouldn't confuse qualname_from_frame. class test_something: # pylint: disable=unused-variable assert get_qualname() is None + + def test_bug_1210(self): + co = mock.Mock(co_name="a_co_name", co_argcount=1, co_varnames=["self"]) + frame = mock.Mock(f_code = co, f_locals={}) + assert qualname_from_frame(frame) == "unittest.mock.a_co_name" From f33b733e92a2422d64cb7f4ba2a64898e1e4f336 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Wed, 6 Oct 2021 16:55:18 -0400 Subject: [PATCH 06/41] docs: note #1210 in the changelog --- CHANGES.rst | 5 +++++ CONTRIBUTORS.txt | 1 + tests/test_context.py | 4 +++- 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 937d2d0fa..784fa5c29 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -25,7 +25,12 @@ Unreleased - Changed an internal detail of how tomli is imported, so that tomli can use coverage.py for their own test suite (`issue 1228`_). +- Defend against an obscure possibility under code obfuscation, where a + function can have an argument called "self", but no local named "self" + (`pull request 1210`_). Thanks, Ben Carlsson. + .. _issue 1228: https://github.com/nedbat/coveragepy/issues/1228 +.. _pull request 1210: https://github.com/nedbat/coveragepy/pull/1210 .. _changes_60: diff --git a/CONTRIBUTORS.txt b/CONTRIBUTORS.txt index 2642e6b1a..1c1fe0e9c 100644 --- a/CONTRIBUTORS.txt +++ b/CONTRIBUTORS.txt @@ -20,6 +20,7 @@ Arcadiy Ivanov Aron Griffis Artem Dayneko Arthur Deygin +Ben Carlsson Ben Finney Bernát Gábor Bill Hart diff --git a/tests/test_context.py b/tests/test_context.py index de972819f..36eff2f0d 100644 --- a/tests/test_context.py +++ b/tests/test_context.py @@ -278,6 +278,8 @@ class test_something: # pylint: disable=unused-variable assert get_qualname() is None def test_bug_1210(self): + # Under pyarmor (an obfuscator), a function can have a "self" argument, + # but then not have a "self" local. co = mock.Mock(co_name="a_co_name", co_argcount=1, co_varnames=["self"]) - frame = mock.Mock(f_code = co, f_locals={}) + frame = mock.Mock(f_code=co, f_locals={}) assert qualname_from_frame(frame) == "unittest.mock.a_co_name" From a309f08287e3bc3f50e2c97feaa44e6b2523d355 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Wed, 6 Oct 2021 20:31:19 -0400 Subject: [PATCH 07/41] fix: make exceptions importable from coverage.misc again. #1226 --- CHANGES.rst | 9 ++++++++- coverage/misc.py | 5 +++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 784fa5c29..3dea0df17 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -22,6 +22,12 @@ This list is detailed and covers changes in each pre-release version. Unreleased ---------- +- In 6.0, the coverage.py exceptions moved from coverage.misc to + coverage.exceptions. These exceptions are not part of the public supported + API, CoverageException is. But a number of other third-party packages were + importing the exceptions from coverage.misc, so they are now available from + there again (`issue 1226`_). + - Changed an internal detail of how tomli is imported, so that tomli can use coverage.py for their own test suite (`issue 1228`_). @@ -29,8 +35,9 @@ Unreleased function can have an argument called "self", but no local named "self" (`pull request 1210`_). Thanks, Ben Carlsson. -.. _issue 1228: https://github.com/nedbat/coveragepy/issues/1228 .. _pull request 1210: https://github.com/nedbat/coveragepy/pull/1210 +.. _issue 1226: https://github.com/nedbat/coveragepy/issues/1226 +.. _issue 1228: https://github.com/nedbat/coveragepy/issues/1228 .. _changes_60: diff --git a/coverage/misc.py b/coverage/misc.py index cd4a77401..0f985be0e 100644 --- a/coverage/misc.py +++ b/coverage/misc.py @@ -20,6 +20,11 @@ from coverage import env from coverage.exceptions import CoverageException +# In 6.0, the exceptions moved from misc.py to exceptions.py. But a number of +# other packages were importing the exceptions from misc, so import them here. +# pylint: disable=unused-wildcard-import +from coverage.exceptions import * # pylint: disable=wildcard-import + ISOLATED_MODULES = {} From 72200e21e58bf358884a28e77f6169a04b206f0f Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Wed, 6 Oct 2021 20:41:02 -0400 Subject: [PATCH 08/41] docs: this document isn't in a toc, and that's ok --- doc/whatsnew5x.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/whatsnew5x.rst b/doc/whatsnew5x.rst index bf0fe6cae..f49739ef0 100644 --- a/doc/whatsnew5x.rst +++ b/doc/whatsnew5x.rst @@ -1,6 +1,8 @@ .. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 .. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt +:orphan: + .. _whatsnew5x: ==================== From 78a9c68a969465cd7197c40405bf6b90d4767c34 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Wed, 6 Oct 2021 20:42:39 -0400 Subject: [PATCH 09/41] build: prep for 6.0.1 --- CHANGES.rst | 6 ++++-- coverage/version.py | 2 +- doc/conf.py | 4 ++-- doc/index.rst | 2 +- 4 files changed, 8 insertions(+), 6 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 3dea0df17..9d810dbde 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -19,8 +19,10 @@ This list is detailed and covers changes in each pre-release version. .. Version 9.8.1 --- 2027-07-27 .. ---------------------------- -Unreleased ----------- +.. _changes_601: + +Version 6.0.1 --- 2021-10-06 +---------------------------- - In 6.0, the coverage.py exceptions moved from coverage.misc to coverage.exceptions. These exceptions are not part of the public supported diff --git a/coverage/version.py b/coverage/version.py index cad69ce79..c9b537e37 100644 --- a/coverage/version.py +++ b/coverage/version.py @@ -5,7 +5,7 @@ # This file is exec'ed in setup.py, don't import anything! # Same semantics as sys.version_info. -version_info = (6, 0, 1, "alpha", 0) +version_info = (6, 0, 1, "final", 0) def _make_version(major, minor, micro, releaselevel, serial): diff --git a/doc/conf.py b/doc/conf.py index 5874b6376..5109ff726 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -67,9 +67,9 @@ # The short X.Y version. version = "6.0" # CHANGEME # The full version, including alpha/beta/rc tags. -release = "6.0" # CHANGEME +release = "6.0.1" # CHANGEME # The date of release, in "monthname day, year" format. -release_date = "October 3, 2021" # CHANGEME +release_date = "October 6, 2021" # CHANGEME rst_epilog = """ .. |release_date| replace:: {release_date} diff --git a/doc/index.rst b/doc/index.rst index 927248e47..dc34c3f79 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -23,7 +23,7 @@ supported on: .. ifconfig:: prerelease **This is a pre-release build. The usual warnings about possible bugs - apply.** The latest stable version is coverage.py 6.0, `described here`_. + apply.** The latest stable version is coverage.py 6.0.1, `described here`_. .. _described here: http://coverage.readthedocs.io/ From 6a47234fa274a02c9e958a2e53216671635f0849 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Wed, 6 Oct 2021 21:17:00 -0400 Subject: [PATCH 10/41] build: bump version --- CHANGES.rst | 6 ++++++ coverage/version.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 9d810dbde..b84ee0fad 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -19,6 +19,12 @@ This list is detailed and covers changes in each pre-release version. .. Version 9.8.1 --- 2027-07-27 .. ---------------------------- +Unreleased +---------- + +Nothing yet. + + .. _changes_601: Version 6.0.1 --- 2021-10-06 diff --git a/coverage/version.py b/coverage/version.py index c9b537e37..3ab3d50ab 100644 --- a/coverage/version.py +++ b/coverage/version.py @@ -5,7 +5,7 @@ # This file is exec'ed in setup.py, don't import anything! # Same semantics as sys.version_info. -version_info = (6, 0, 1, "final", 0) +version_info = (6, 0, 2, "alpha", 0) def _make_version(major, minor, micro, releaselevel, serial): From 56f850956513578b2a73ac1e1be5e3c9483432e6 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Fri, 8 Oct 2021 07:38:19 -0400 Subject: [PATCH 11/41] build: make "clean" targets be quiet --- Makefile | 41 ++++++++++++++++++++-------------------- tests/gold/html/Makefile | 2 +- 2 files changed, 22 insertions(+), 21 deletions(-) diff --git a/Makefile b/Makefile index d65fc3f50..f9f076c47 100644 --- a/Makefile +++ b/Makefile @@ -8,28 +8,29 @@ help: ## Show this help. @grep '^[a-zA-Z]' $(MAKEFILE_LIST) | sort | awk -F ':.*?## ' 'NF==2 {printf " %-26s%s\n", $$1, $$2}' clean_platform: ## Remove files that clash across platforms. - rm -f *.so */*.so - rm -rf __pycache__ */__pycache__ */*/__pycache__ */*/*/__pycache__ */*/*/*/__pycache__ */*/*/*/*/__pycache__ - rm -f *.pyc */*.pyc */*/*.pyc */*/*/*.pyc */*/*/*/*.pyc */*/*/*/*/*.pyc - rm -f *.pyo */*.pyo */*/*.pyo */*/*/*.pyo */*/*/*/*.pyo */*/*/*/*/*.pyo + @rm -f *.so */*.so + @rm -rf __pycache__ */__pycache__ */*/__pycache__ */*/*/__pycache__ */*/*/*/__pycache__ */*/*/*/*/__pycache__ + @rm -f *.pyc */*.pyc */*/*.pyc */*/*/*.pyc */*/*/*/*.pyc */*/*/*/*/*.pyc + @rm -f *.pyo */*.pyo */*/*.pyo */*/*/*.pyo */*/*/*/*.pyo */*/*/*/*/*.pyo clean: clean_platform ## Remove artifacts of test execution, installation, etc. - -pip uninstall -y coverage - rm -f *.pyd */*.pyd - rm -rf build coverage.egg-info dist htmlcov - rm -f *.bak */*.bak */*/*.bak */*/*/*.bak */*/*/*/*.bak */*/*/*/*/*.bak - rm -f *$$py.class */*$$py.class */*/*$$py.class */*/*/*$$py.class */*/*/*/*$$py.class */*/*/*/*/*$$py.class - rm -f coverage/*,cover - rm -f MANIFEST - rm -f .coverage .coverage.* coverage.xml .metacov* - rm -f .tox/*/lib/*/site-packages/zzz_metacov.pth - rm -f */.coverage */*/.coverage */*/*/.coverage */*/*/*/.coverage */*/*/*/*/.coverage */*/*/*/*/*/.coverage - rm -f tests/covmain.zip tests/zipmods.zip tests/zip1.zip - rm -rf doc/_build doc/_spell doc/sample_html_beta - rm -rf tmp - rm -rf .cache .pytest_cache .hypothesis - rm -rf tests/actual - -make -C tests/gold/html clean + @echo "Cleaning..." + @-pip uninstall -yq coverage + @rm -f *.pyd */*.pyd + @rm -rf build coverage.egg-info dist htmlcov + @rm -f *.bak */*.bak */*/*.bak */*/*/*.bak */*/*/*/*.bak */*/*/*/*/*.bak + @rm -f *$$py.class */*$$py.class */*/*$$py.class */*/*/*$$py.class */*/*/*/*$$py.class */*/*/*/*/*$$py.class + @rm -f coverage/*,cover + @rm -f MANIFEST + @rm -f .coverage .coverage.* coverage.xml .metacov* + @rm -f .tox/*/lib/*/site-packages/zzz_metacov.pth + @rm -f */.coverage */*/.coverage */*/*/.coverage */*/*/*/.coverage */*/*/*/*/.coverage */*/*/*/*/*/.coverage + @rm -f tests/covmain.zip tests/zipmods.zip tests/zip1.zip + @rm -rf doc/_build doc/_spell doc/sample_html_beta + @rm -rf tmp + @rm -rf .cache .pytest_cache .hypothesis + @rm -rf tests/actual + @-make -C tests/gold/html clean sterile: clean ## Remove all non-controlled content, even if expensive. rm -rf .tox diff --git a/tests/gold/html/Makefile b/tests/gold/html/Makefile index c10ede3f6..fc3487cdc 100644 --- a/tests/gold/html/Makefile +++ b/tests/gold/html/Makefile @@ -15,7 +15,7 @@ complete: ## Copy support files into directories so the HTML can be viewed prop true # because the for loop exits with 1 for some reason. clean: ## Remove the effects of this Makefile. - git clean -fq . + @git clean -fq . update-gold: ## Copy output files from latest tests to gold files. echo Note: this doesn't work now, it has to be updated for tests/actual From 6217af70939d939b6abfc4abfe32a03bc47da967 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Fri, 8 Oct 2021 07:38:38 -0400 Subject: [PATCH 12/41] test: canonicalize this bit of debug output --- coverage/files.py | 2 +- tests/test_files.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/coverage/files.py b/coverage/files.py index 252e42ec5..c4fb33b3f 100644 --- a/coverage/files.py +++ b/coverage/files.py @@ -199,7 +199,7 @@ class TreeMatcher: """ def __init__(self, paths, name): - self.original_paths = list(paths) + self.original_paths = sorted(paths) self.paths = list(map(os.path.normcase, paths)) self.name = name diff --git a/tests/test_files.py b/tests/test_files.py index 39a51d8c2..d6005d364 100644 --- a/tests/test_files.py +++ b/tests/test_files.py @@ -172,7 +172,7 @@ def test_tree_matcher(self): files.canonical_filename("SUB5/file6.py"), ] tm = TreeMatcher(trees, "test") - assert tm.info() == trees + assert tm.info() == sorted(trees) for filepath, matches in matches_to_try: self.assertMatches(tm, filepath, matches) From c163cdc44980bb39ec8b9031a38f58ed98766419 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Fri, 8 Oct 2021 09:24:52 -0400 Subject: [PATCH 13/41] test: make metacov work with xdist I was using pytest.__file__ as a directory, but in 5.3.2, pytest changed to be a package, so that wasn't the site-packages directory anymore, and our .pth file was written someplace useless. Now we don't rely on the structure of pytest (why did we ever?), and it works again. --- .github/workflows/coverage.yml | 4 +--- igor.py | 3 ++- tests/test_process.py | 2 +- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index c305477bc..e7dd828fb 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -73,9 +73,7 @@ jobs: COVERAGE_CONTEXT: "${{ matrix.python-version }}.${{ matrix.os }}" run: | set -xe - # Something about pytest 6.x with xdist keeps data from collecting. - # Use -n0 for now. - python -m tox -- -n 0 + python -m tox - name: "Upload coverage data" uses: actions/upload-artifact@v2 diff --git a/igor.py b/igor.py index 0ea93e403..58774036f 100644 --- a/igor.py +++ b/igor.py @@ -15,6 +15,7 @@ import os import platform import sys +import sysconfig import textwrap import warnings import zipfile @@ -143,7 +144,7 @@ def run_tests_with_coverage(tracer, *runner_args): # The .pth file seems to have to be alphabetically after easy-install.pth # or the sys.path entries aren't created right? # There's an entry in "make clean" to get rid of this file. - pth_dir = os.path.dirname(pytest.__file__) + pth_dir = sysconfig.get_path("purelib") pth_path = os.path.join(pth_dir, "zzz_metacov.pth") with open(pth_path, "w") as pth_file: pth_file.write("import coverage; coverage.process_startup()\n") diff --git a/tests/test_process.py b/tests/test_process.py index 5510efe56..af2d3e784 100644 --- a/tests/test_process.py +++ b/tests/test_process.py @@ -1411,7 +1411,7 @@ def possible_pth_dirs(): # If we're still looking, then try the Python library directory. # https://github.com/nedbat/coveragepy/issues/339 - yield sysconfig.get_python_lib() # pragma: cant happen + yield sysconfig.get_path("purelib") # pragma: cant happen def find_writable_pth_directory(): From ee5d7ba0ad86068f9413a54659a2f8a58cd97f31 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Fri, 8 Oct 2021 11:26:14 -0400 Subject: [PATCH 14/41] test: this xdist defensiveness now seems unneeded --- tests/conftest.py | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 81c13dd7c..75adf3f24 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -7,7 +7,6 @@ This module is run automatically by pytest, to define and enable fixtures. """ -import os import sys import warnings @@ -66,31 +65,6 @@ def reset_sys_path(): sys.path[:] = sys_path -@pytest.fixture(autouse=True) -def fix_xdist_sys_path(): - """Prevent xdist from polluting the Python path. - - We run tests that care a lot about the contents of sys.path. Pytest-xdist - changes sys.path, so running with xdist, vs without xdist, sets sys.path - differently. With xdist, sys.path[1] is an empty string, without xdist, - it's the virtualenv bin directory. We don't want the empty string, so - clobber that entry. - - See: https://github.com/pytest-dev/pytest-xdist/issues/376 - - """ - if os.environ.get('PYTEST_XDIST_WORKER', ''): # pragma: part covered - # We are running in an xdist worker. - if sys.path[1] == '': - # xdist has set sys.path[1] to ''. Clobber it. - del sys.path[1] - # Also, don't let it sneak stuff in via PYTHONPATH. - try: - del os.environ['PYTHONPATH'] - except KeyError: - pass - - @pytest.hookimpl(hookwrapper=True) def pytest_runtest_call(item): """Convert StopEverything into skipped tests.""" From 35b28c01047f644cd4f1f3e10881ce14e5df87fd Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Fri, 8 Oct 2021 11:43:00 -0400 Subject: [PATCH 15/41] test: mark some uncovered things --- coverage/cmdline.py | 3 +++ coverage/misc.py | 4 +++- coverage/parser.py | 8 ++++++-- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/coverage/cmdline.py b/coverage/cmdline.py index 1fa52a976..eae22f2f2 100644 --- a/coverage/cmdline.py +++ b/coverage/cmdline.py @@ -646,6 +646,9 @@ def command_line(self, argv): show_contexts=options.show_contexts, **report_args ) + else: + # There are no other possible actions. + raise AssertionError if total is not None: # Apply the command line fail-under options, and then use the config diff --git a/coverage/misc.py b/coverage/misc.py index 0f985be0e..30b757448 100644 --- a/coverage/misc.py +++ b/coverage/misc.py @@ -329,10 +329,12 @@ def substitute_variables(text, variables): ) """ + dollar_groups = ('dollar', 'word1', 'word2') + def dollar_replace(match): """Called for each $replacement.""" # Only one of the groups will have matched, just get its text. - word = next(g for g in match.group('dollar', 'word1', 'word2') if g) + word = next(g for g in match.group(*dollar_groups) if g) # pragma: always breaks if word == "$": return "$" elif word in variables: diff --git a/coverage/parser.py b/coverage/parser.py index 8792d0ac0..5c467a7ea 100644 --- a/coverage/parser.py +++ b/coverage/parser.py @@ -437,11 +437,15 @@ class BlockBase: # pylint: disable=unused-argument def process_break_exits(self, exits, add_arc): """Process break exits.""" - return False + # Because break can only appear in loops, and most subclasses + # implement process_break_exits, this function is never reached. + raise AssertionError def process_continue_exits(self, exits, add_arc): """Process continue exits.""" - return False + # Because continue can only appear in loops, and most subclasses + # implement process_continue_exits, this function is never reached. + raise AssertionError def process_raise_exits(self, exits, add_arc): """Process raise exits.""" From 444e26d7aaf1de8d130987cd8794a664a5d95b09 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Fri, 8 Oct 2021 11:43:08 -0400 Subject: [PATCH 16/41] refactor: this import was only needed for Python 2 --- coverage/debug.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/coverage/debug.py b/coverage/debug.py index da4093ffa..74b59d0e5 100644 --- a/coverage/debug.py +++ b/coverage/debug.py @@ -12,10 +12,7 @@ import pprint import reprlib import sys -try: - import _thread -except ImportError: - import thread as _thread +import _thread from coverage.misc import isolate_module From 61a09e755487b0db476a544f04280b8e22013d5b Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Sat, 9 Oct 2021 13:16:55 -0400 Subject: [PATCH 17/41] test: set_query_contexts takes regex, make the examples look like regex --- tests/test_data.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/test_data.py b/tests/test_data.py index 9b5d3d053..25f0f57e5 100644 --- a/tests/test_data.py +++ b/tests/test_data.py @@ -180,9 +180,9 @@ def test_set_query_contexts(self): covdata = CoverageData() covdata.set_context('test_a') covdata.add_lines(LINES_1) - covdata.set_query_contexts(['test_*']) + covdata.set_query_contexts(['te.*a']) assert covdata.lines('a.py') == [1, 2] - covdata.set_query_contexts(['other*']) + covdata.set_query_contexts(['other']) assert covdata.lines('a.py') == [] def test_no_lines_vs_unmeasured_file(self): @@ -197,9 +197,9 @@ def test_lines_with_contexts(self): covdata.set_context('test_a') covdata.add_lines(LINES_1) assert covdata.lines('a.py') == [1, 2] - covdata.set_query_contexts(['test*']) + covdata.set_query_contexts(['test']) assert covdata.lines('a.py') == [1, 2] - covdata.set_query_contexts(['other*']) + covdata.set_query_contexts(['other']) assert covdata.lines('a.py') == [] def test_contexts_by_lineno_with_lines(self): @@ -240,9 +240,9 @@ def test_arcs_with_contexts(self): covdata.set_context('test_x') covdata.add_arcs(ARCS_3) assert covdata.arcs('x.py') == [(-1, 1), (1, 2), (2, 3), (3, -1)] - covdata.set_query_contexts(['test*']) + covdata.set_query_contexts(['test_.$']) assert covdata.arcs('x.py') == [(-1, 1), (1, 2), (2, 3), (3, -1)] - covdata.set_query_contexts(['other*']) + covdata.set_query_contexts(['other']) assert covdata.arcs('x.py') == [] def test_contexts_by_lineno_with_arcs(self): From 57879ab073195c34a24b3e1a8129f5bf095f214d Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Sat, 9 Oct 2021 14:35:40 -0400 Subject: [PATCH 18/41] style: make string quotes uniform --- coverage/sqldata.py | 145 ++++++++++++++++++++++---------------------- 1 file changed, 71 insertions(+), 74 deletions(-) diff --git a/coverage/sqldata.py b/coverage/sqldata.py index 3fe5317e2..412a9eb7e 100644 --- a/coverage/sqldata.py +++ b/coverage/sqldata.py @@ -259,7 +259,7 @@ def _create_db(self): Initializes the schema and certain metadata. """ - if self._debug.should('dataio'): + if self._debug.should("dataio"): self._debug.write(f"Creating data file {self._filename!r}") self._dbs[threading.get_ident()] = db = SqliteDb(self._filename, self._debug) with db: @@ -268,15 +268,15 @@ def _create_db(self): db.executemany( "insert into meta (key, value) values (?, ?)", [ - ('sys_argv', str(getattr(sys, 'argv', None))), - ('version', __version__), - ('when', datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')), + ("sys_argv", str(getattr(sys, "argv", None))), + ("version", __version__), + ("when", datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")), ] ) def _open_db(self): """Open an existing db file, and read its metadata.""" - if self._debug.should('dataio'): + if self._debug.should("dataio"): self._debug.write(f"Opening data file {self._filename!r}") self._dbs[threading.get_ident()] = SqliteDb(self._filename, self._debug) self._read_db() @@ -328,7 +328,7 @@ def __nonzero__(self): __bool__ = __nonzero__ - @contract(returns='bytes') + @contract(returns="bytes") def dumps(self): """Serialize the current data to a byte string. @@ -346,12 +346,12 @@ def dumps(self): .. versionadded:: 5.0 """ - if self._debug.should('dataio'): + if self._debug.should("dataio"): self._debug.write(f"Dumping data from data file {self._filename!r}") with self._connect() as con: - return b'z' + zlib.compress(con.dump().encode("utf8")) + return b"z" + zlib.compress(con.dump().encode("utf8")) - @contract(data='bytes') + @contract(data="bytes") def loads(self, data): """Deserialize data from :meth:`dumps`. @@ -367,9 +367,9 @@ def loads(self, data): .. versionadded:: 5.0 """ - if self._debug.should('dataio'): + if self._debug.should("dataio"): self._debug.write(f"Loading data into data file {self._filename!r}") - if data[:1] != b'z': + if data[:1] != b"z": raise CoverageException( f"Unrecognized serialization: {data[:40]!r} (head of {len(data)} bytes)" ) @@ -414,7 +414,7 @@ def set_context(self, context): .. versionadded:: 5.0 """ - if self._debug.should('dataop'): + if self._debug.should("dataop"): self._debug.write(f"Setting context: {context!r}") self._current_context = context self._current_context_id = None @@ -455,7 +455,7 @@ def add_lines(self, line_data): { filename: { line1, line2, ... }, ...} """ - if self._debug.should('dataop'): + if self._debug.should("dataop"): self._debug.write("Adding lines: %d files, %d lines total" % ( len(line_data), sum(len(lines) for lines in line_data.values()) )) @@ -474,7 +474,7 @@ def add_lines(self, line_data): linemap = numbits_union(linemap, existing[0][0]) con.execute( - "insert or replace into line_bits " + "insert or replace into line_bits " + " (file_id, context_id, numbits) values (?, ?, ?)", (file_id, self._current_context_id, linemap), ) @@ -489,7 +489,7 @@ def add_arcs(self, arc_data): { filename: { (l1,l2), (l1,l2), ... }, ...} """ - if self._debug.should('dataop'): + if self._debug.should("dataop"): self._debug.write("Adding arcs: %d files, %d arcs total" % ( len(arc_data), sum(len(arcs) for arcs in arc_data.values()) )) @@ -503,7 +503,7 @@ def add_arcs(self, arc_data): file_id = self._file_id(filename, add=True) data = [(file_id, self._current_context_id, fromno, tono) for fromno, tono in arcs] con.executemany( - "insert or ignore into arc " + "insert or ignore into arc " + "(file_id, context_id, fromno, tono) values (?, ?, ?, ?)", data, ) @@ -522,7 +522,7 @@ def _choose_lines_or_arcs(self, lines=False, arcs=False): with self._connect() as con: con.execute( "insert into meta (key, value) values (?, ?)", - ('has_arcs', str(int(arcs))) + ("has_arcs", str(int(arcs))) ) @_locked @@ -532,7 +532,7 @@ def add_file_tracers(self, file_tracers): `file_tracers` is { filename: plugin_name, ... } """ - if self._debug.should('dataop'): + if self._debug.should("dataop"): self._debug.write("Adding file tracers: %d files" % (len(file_tracers),)) if not file_tracers: return @@ -573,7 +573,7 @@ def touch_files(self, filenames, plugin_name=""): `plugin_name` is the name of the plugin responsible for these files. It is used to associate the right filereporter, etc. """ - if self._debug.should('dataop'): + if self._debug.should("dataop"): self._debug.write(f"Touching {filenames!r}") self._start_using() with self._connect(): # Use this to get one transaction. @@ -592,9 +592,9 @@ def update(self, other_data, aliases=None): If `aliases` is provided, it's a `PathAliases` object that is used to re-map paths to match the local machine's. """ - if self._debug.should('dataop'): + if self._debug.should("dataop"): self._debug.write("Updating with data from {!r}".format( - getattr(other_data, '_filename', '???'), + getattr(other_data, "_filename", "???"), )) if self._has_lines and other_data._has_arcs: raise CoverageException("Can't combine arc data with line data") @@ -611,79 +611,76 @@ def update(self, other_data, aliases=None): other_data.read() with other_data._connect() as conn: # Get files data. - cur = conn.execute('select path from file') + cur = conn.execute("select path from file") files = {path: aliases.map(path) for (path,) in cur} cur.close() # Get contexts data. - cur = conn.execute('select context from context') + cur = conn.execute("select context from context") contexts = [context for (context,) in cur] cur.close() # Get arc data. cur = conn.execute( - 'select file.path, context.context, arc.fromno, arc.tono ' - 'from arc ' - 'inner join file on file.id = arc.file_id ' - 'inner join context on context.id = arc.context_id' + "select file.path, context.context, arc.fromno, arc.tono " + + "from arc " + + "inner join file on file.id = arc.file_id " + + "inner join context on context.id = arc.context_id" ) arcs = [(files[path], context, fromno, tono) for (path, context, fromno, tono) in cur] cur.close() # Get line data. cur = conn.execute( - 'select file.path, context.context, line_bits.numbits ' - 'from line_bits ' - 'inner join file on file.id = line_bits.file_id ' - 'inner join context on context.id = line_bits.context_id' + "select file.path, context.context, line_bits.numbits " + + "from line_bits " + + "inner join file on file.id = line_bits.file_id " + + "inner join context on context.id = line_bits.context_id" ) - lines = { - (files[path], context): numbits - for (path, context, numbits) in cur - } + lines = {(files[path], context): numbits for (path, context, numbits) in cur} cur.close() # Get tracer data. cur = conn.execute( - 'select file.path, tracer ' - 'from tracer ' - 'inner join file on file.id = tracer.file_id' + "select file.path, tracer " + + "from tracer " + + "inner join file on file.id = tracer.file_id" ) tracers = {files[path]: tracer for (path, tracer) in cur} cur.close() with self._connect() as conn: - conn.con.isolation_level = 'IMMEDIATE' + conn.con.isolation_level = "IMMEDIATE" # Get all tracers in the DB. Files not in the tracers are assumed # to have an empty string tracer. Since Sqlite does not support # full outer joins, we have to make two queries to fill the # dictionary. - this_tracers = {path: '' for path, in conn.execute('select path from file')} + this_tracers = {path: "" for path, in conn.execute("select path from file")} this_tracers.update({ aliases.map(path): tracer for path, tracer in conn.execute( - 'select file.path, tracer from tracer ' - 'inner join file on file.id = tracer.file_id' + "select file.path, tracer from tracer " + + "inner join file on file.id = tracer.file_id" ) }) # Create all file and context rows in the DB. conn.executemany( - 'insert or ignore into file (path) values (?)', + "insert or ignore into file (path) values (?)", ((file,) for file in files.values()) ) file_ids = { path: id - for id, path in conn.execute('select id, path from file') + for id, path in conn.execute("select id, path from file") } conn.executemany( - 'insert or ignore into context (context) values (?)', + "insert or ignore into context (context) values (?)", ((context,) for context in contexts) ) context_ids = { context: id - for id, context in conn.execute('select id, context from context') + for id, context in conn.execute("select id, context from context") } # Prepare tracers and fail, if a conflict is found. @@ -692,7 +689,7 @@ def update(self, other_data, aliases=None): tracer_map = {} for path in files.values(): this_tracer = this_tracers.get(path) - other_tracer = tracers.get(path, '') + other_tracer = tracers.get(path, "") # If there is no tracer, there is always the None tracer. if this_tracer is not None and this_tracer != other_tracer: raise CoverageException( @@ -712,10 +709,10 @@ def update(self, other_data, aliases=None): # Get line data. cur = conn.execute( - 'select file.path, context.context, line_bits.numbits ' - 'from line_bits ' - 'inner join file on file.id = line_bits.file_id ' - 'inner join context on context.id = line_bits.context_id' + "select file.path, context.context, line_bits.numbits " + + "from line_bits " + + "inner join file on file.id = line_bits.file_id " + + "inner join context on context.id = line_bits.context_id" ) for path, context, numbits in cur: key = (aliases.map(path), context) @@ -729,8 +726,8 @@ def update(self, other_data, aliases=None): # Write the combined data. conn.executemany( - 'insert or ignore into arc ' - '(file_id, context_id, fromno, tono) values (?, ?, ?, ?)', + "insert or ignore into arc " + + "(file_id, context_id, fromno, tono) values (?, ?, ?, ?)", arc_rows ) @@ -738,7 +735,7 @@ def update(self, other_data, aliases=None): self._choose_lines_or_arcs(lines=True) conn.execute("delete from line_bits") conn.executemany( - "insert into line_bits " + "insert into line_bits " + "(file_id, context_id, numbits) values (?, ?, ?)", [ (file_ids[file], context_ids[context], numbits) @@ -746,7 +743,7 @@ def update(self, other_data, aliases=None): ] ) conn.executemany( - 'insert or ignore into tracer (file_id, tracer) values (?, ?)', + "insert or ignore into tracer (file_id, tracer) values (?, ?)", ((file_ids[filename], tracer) for filename, tracer in tracer_map.items()) ) @@ -764,15 +761,15 @@ def erase(self, parallel=False): self._reset() if self._no_disk: return - if self._debug.should('dataio'): + if self._debug.should("dataio"): self._debug.write(f"Erasing data file {self._filename!r}") file_be_gone(self._filename) if parallel: data_dir, local = os.path.split(self._filename) - localdot = local + '.*' + localdot = local + ".*" pattern = os.path.join(os.path.abspath(data_dir), localdot) for filename in glob.glob(pattern): - if self._debug.should('dataio'): + if self._debug.should("dataio"): self._debug.write(f"Erasing parallel data file {filename!r}") file_be_gone(filename) @@ -864,7 +861,7 @@ def set_query_contexts(self, contexts): self._start_using() if contexts: with self._connect() as con: - context_clause = ' or '.join(['context regexp ?'] * len(contexts)) + context_clause = " or ".join(["context regexp ?"] * len(contexts)) cur = con.execute("select id from context where " + context_clause, contexts) self._query_context_ids = [row[0] for row in cur.fetchall()] else: @@ -895,7 +892,7 @@ def lines(self, filename): query = "select numbits from line_bits where file_id = ?" data = [file_id] if self._query_context_ids is not None: - ids_array = ', '.join('?' * len(self._query_context_ids)) + ids_array = ", ".join("?" * len(self._query_context_ids)) query += " and context_id in (" + ids_array + ")" data += self._query_context_ids bitmaps = list(con.execute(query, data)) @@ -930,7 +927,7 @@ def arcs(self, filename): query = "select distinct fromno, tono from arc where file_id = ?" data = [file_id] if self._query_context_ids is not None: - ids_array = ', '.join('?' * len(self._query_context_ids)) + ids_array = ", ".join("?" * len(self._query_context_ids)) query += " and context_id in (" + ids_array + ")" data += self._query_context_ids arcs = con.execute(query, data) @@ -953,13 +950,13 @@ def contexts_by_lineno(self, filename): return lineno_contexts_map if self.has_arcs(): query = ( - "select arc.fromno, arc.tono, context.context " - "from arc, context " + "select arc.fromno, arc.tono, context.context " + + "from arc, context " + "where arc.file_id = ? and arc.context_id = context.id" ) data = [file_id] if self._query_context_ids is not None: - ids_array = ', '.join('?' * len(self._query_context_ids)) + ids_array = ", ".join("?" * len(self._query_context_ids)) query += " and arc.context_id in (" + ids_array + ")" data += self._query_context_ids for fromno, tono, context in con.execute(query, data): @@ -969,13 +966,13 @@ def contexts_by_lineno(self, filename): lineno_contexts_map[tono].append(context) else: query = ( - "select l.numbits, c.context from line_bits l, context c " - "where l.context_id = c.id " + "select l.numbits, c.context from line_bits l, context c " + + "where l.context_id = c.id " + "and file_id = ?" ) data = [file_id] if self._query_context_ids is not None: - ids_array = ', '.join('?' * len(self._query_context_ids)) + ids_array = ", ".join("?" * len(self._query_context_ids)) query += " and l.context_id in (" + ids_array + ")" data += self._query_context_ids for numbits, context in con.execute(query, data): @@ -998,10 +995,10 @@ def sys_info(cls): copts = ["; ".join(copts[i:i + 3]) for i in range(0, len(copts), 3)] return [ - ('sqlite3_version', sqlite3.version), - ('sqlite3_sqlite_version', sqlite3.sqlite_version), - ('sqlite3_temp_store', temp_store), - ('sqlite3_compile_options', copts), + ("sqlite3_version", sqlite3.version), + ("sqlite3_sqlite_version", sqlite3.sqlite_version), + ("sqlite3_temp_store", temp_store), + ("sqlite3_compile_options", copts), ] @@ -1016,7 +1013,7 @@ class SqliteDb(SimpleReprMixin): """ def __init__(self, filename, debug): - self.debug = debug if debug.should('sql') else None + self.debug = debug if debug.should("sql") else None self.filename = filename self.nest = 0 self.con = None @@ -1034,7 +1031,7 @@ def _connect(self): if self.debug: self.debug.write(f"Connecting to {self.filename!r}") self.con = sqlite3.connect(self.filename, check_same_thread=False) - self.con.create_function('REGEXP', 2, _regexp) + self.con.create_function("REGEXP", 2, _regexp) # This pragma makes writing faster. It disables rollbacks, but we never need them. # PyPy needs the .close() calls here, or sqlite gets twisted up: @@ -1089,7 +1086,7 @@ def execute(self, sql, parameters=()): cov4_sig = b"!coverage.py: This is a private format" if bad_file.read(len(cov4_sig)) == cov4_sig: msg = ( - "Looks like a coverage 4.x data file. " + "Looks like a coverage 4.x data file. " + "Are you mixing versions of coverage?" ) except Exception: From 8463816dea83b0cc84367b5246744944b81715b3 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Sat, 9 Oct 2021 15:46:57 -0400 Subject: [PATCH 19/41] refactor: don't need this print --- tests/test_debug.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/test_debug.py b/tests/test_debug.py index e93ae0b66..740bd6177 100644 --- a/tests/test_debug.py +++ b/tests/test_debug.py @@ -150,7 +150,6 @@ def test_debug_trace_pid(self): def test_debug_callers(self): out_lines = self.f1_debug_output(["pid", "dataop", "dataio", "callers"]) - print(out_lines) # For every real message, there should be a stack trace with a line like # "f1_debug_output : /Users/ned/coverage/tests/test_debug.py @71" real_messages = re_lines(out_lines, r":\d+", match=False).splitlines() From 4d55ada1bdca638b0fd12e887fb0faaa574dee8d Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Sat, 9 Oct 2021 16:33:42 -0400 Subject: [PATCH 20/41] test: add a test of touching files in an empty CoverageData --- tests/test_data.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/test_data.py b/tests/test_data.py index 25f0f57e5..2d3f164c3 100644 --- a/tests/test_data.py +++ b/tests/test_data.py @@ -473,6 +473,12 @@ def test_empty_arcs_are_still_arcs(self): covdata.touch_file("abc.py") assert covdata.has_arcs() + def test_cant_touch_in_empty_data(self): + covdata = CoverageData() + msg = "Can't touch files in an empty CoverageData" + with pytest.raises(CoverageException, match=msg): + covdata.touch_file("abc.py") + def test_read_and_write_are_opposites(self): covdata1 = CoverageData() covdata1.add_arcs(ARCS_3) From 498b1484e466588a22cef520095f1fd0ed8b8ff8 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Sat, 9 Oct 2021 16:41:59 -0400 Subject: [PATCH 21/41] fix: contexts_by_lineno now returns a true dict --- CHANGES.rst | 4 +++- coverage/html.py | 2 +- coverage/jsonreport.py | 6 ++---- coverage/sqldata.py | 18 ++++++++++-------- tests/test_data.py | 27 ++++++++++++++++++++++++--- 5 files changed, 40 insertions(+), 17 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index b84ee0fad..7e5f77825 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -22,7 +22,9 @@ This list is detailed and covers changes in each pre-release version. Unreleased ---------- -Nothing yet. +- The :meth:`.CoverageData.contexts_by_lineno` method was documented to return + a dict, but was returning a defaultdict. Now it returns a plain dict. It + also no longer returns negative numbered keys. .. _changes_601: diff --git a/coverage/html.py b/coverage/html.py index 208554c8e..b095343ed 100644 --- a/coverage/html.py +++ b/coverage/html.py @@ -123,7 +123,7 @@ def data_for_file(self, fr, analysis): contexts = contexts_label = None context_list = None if category and self.config.show_contexts: - contexts = sorted(c or self.EMPTY for c in contexts_by_lineno[lineno]) + contexts = sorted(c or self.EMPTY for c in contexts_by_lineno.get(lineno, ())) if contexts == [self.EMPTY]: contexts_label = self.EMPTY else: diff --git a/coverage/jsonreport.py b/coverage/jsonreport.py index b22ab10b9..daebca115 100644 --- a/coverage/jsonreport.py +++ b/coverage/jsonreport.py @@ -88,12 +88,10 @@ def report_one_file(self, coverage_data, analysis): 'executed_lines': sorted(analysis.executed), 'summary': summary, 'missing_lines': sorted(analysis.missing), - 'excluded_lines': sorted(analysis.excluded) + 'excluded_lines': sorted(analysis.excluded), } if self.config.json_show_contexts: - reported_file['contexts'] = analysis.data.contexts_by_lineno( - analysis.filename, - ) + reported_file['contexts'] = analysis.data.contexts_by_lineno(analysis.filename) if coverage_data.has_arcs(): reported_file['summary'].update({ 'num_branches': nums.n_branches, diff --git a/coverage/sqldata.py b/coverage/sqldata.py index 412a9eb7e..108a25ef0 100644 --- a/coverage/sqldata.py +++ b/coverage/sqldata.py @@ -942,12 +942,13 @@ def contexts_by_lineno(self, filename): .. versionadded:: 5.0 """ - lineno_contexts_map = collections.defaultdict(list) self._start_using() with self._connect() as con: file_id = self._file_id(filename) if file_id is None: - return lineno_contexts_map + return {} + + lineno_contexts_map = collections.defaultdict(set) if self.has_arcs(): query = ( "select arc.fromno, arc.tono, context.context " + @@ -960,10 +961,10 @@ def contexts_by_lineno(self, filename): query += " and arc.context_id in (" + ids_array + ")" data += self._query_context_ids for fromno, tono, context in con.execute(query, data): - if context not in lineno_contexts_map[fromno]: - lineno_contexts_map[fromno].append(context) - if context not in lineno_contexts_map[tono]: - lineno_contexts_map[tono].append(context) + if fromno > 0: + lineno_contexts_map[fromno].add(context) + if tono > 0: + lineno_contexts_map[tono].add(context) else: query = ( "select l.numbits, c.context from line_bits l, context c " + @@ -977,8 +978,9 @@ def contexts_by_lineno(self, filename): data += self._query_context_ids for numbits, context in con.execute(query, data): for lineno in numbits_to_nums(numbits): - lineno_contexts_map[lineno].append(context) - return lineno_contexts_map + lineno_contexts_map[lineno].add(context) + + return {lineno: list(contexts) for lineno, contexts in lineno_contexts_map.items()} @classmethod def sys_info(cls): diff --git a/tests/test_data.py b/tests/test_data.py index 2d3f164c3..80cd9bc21 100644 --- a/tests/test_data.py +++ b/tests/test_data.py @@ -206,7 +206,8 @@ def test_contexts_by_lineno_with_lines(self): covdata = CoverageData() covdata.set_context('test_a') covdata.add_lines(LINES_1) - assert covdata.contexts_by_lineno('a.py') == {1: ['test_a'], 2: ['test_a']} + expected = {1: ['test_a'], 2: ['test_a']} + assert covdata.contexts_by_lineno('a.py') == expected @pytest.mark.parametrize("lines", [LINES_1, dicts_from_sets(LINES_1)]) def test_no_duplicate_lines(self, lines): @@ -249,13 +250,33 @@ def test_contexts_by_lineno_with_arcs(self): covdata = CoverageData() covdata.set_context('test_x') covdata.add_arcs(ARCS_3) - expected = {-1: ['test_x'], 1: ['test_x'], 2: ['test_x'], 3: ['test_x']} - assert expected == covdata.contexts_by_lineno('x.py') + expected = {1: ['test_x'], 2: ['test_x'], 3: ['test_x']} + assert covdata.contexts_by_lineno('x.py') == expected def test_contexts_by_lineno_with_unknown_file(self): covdata = CoverageData() + covdata.set_context('test_x') + covdata.add_arcs(ARCS_3) assert covdata.contexts_by_lineno('xyz.py') == {} + def test_context_by_lineno_with_query_contexts_with_lines(self): + covdata = CoverageData() + covdata.set_context("test_1") + covdata.add_lines(LINES_1) + covdata.set_context("test_2") + covdata.add_lines(LINES_2) + covdata.set_query_context("test_1") + assert covdata.contexts_by_lineno("a.py") == dict.fromkeys([1,2], ["test_1"]) + + def test_context_by_lineno_with_query_contexts_with_arcs(self): + covdata = CoverageData() + covdata.set_context("test_1") + covdata.add_arcs(ARCS_3) + covdata.set_context("test_2") + covdata.add_arcs(ARCS_4) + covdata.set_query_context("test_1") + assert covdata.contexts_by_lineno("x.py") == dict.fromkeys([1,2,3], ["test_1"]) + def test_file_tracer_name(self): covdata = CoverageData() covdata.add_lines({ From 267622b11b730ec69bf34202fc6258a2614394c5 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Sun, 10 Oct 2021 07:24:35 -0400 Subject: [PATCH 22/41] style: use the official designation for utf-8 Yes, this is completely unimportant. Don't ask me why I bothered, I'm not really sure. --- coverage/annotate.py | 2 +- coverage/inorout.py | 2 +- coverage/misc.py | 6 +++--- coverage/phystokens.py | 4 ++-- coverage/plugin.py | 4 ++-- coverage/sqldata.py | 4 ++-- igor.py | 2 +- tests/goldtest.py | 2 +- tests/helpers.py | 2 +- tests/test_process.py | 2 +- tests/test_python.py | 2 +- 11 files changed, 16 insertions(+), 16 deletions(-) diff --git a/coverage/annotate.py b/coverage/annotate.py index a6ee4636c..9ca1b80ac 100644 --- a/coverage/annotate.py +++ b/coverage/annotate.py @@ -73,7 +73,7 @@ def annotate_file(self, fr, analysis): else: dest_file = fr.filename + ",cover" - with open(dest_file, 'w', encoding='utf8') as dest: + with open(dest_file, 'w', encoding='utf-8') as dest: i = 0 j = 0 covered = True diff --git a/coverage/inorout.py b/coverage/inorout.py index 75b0a9cc7..496ced356 100644 --- a/coverage/inorout.py +++ b/coverage/inorout.py @@ -432,7 +432,7 @@ def check_include_omit_etc(self, filename, frame): # No point tracing a file we can't later write to SQLite. try: - filename.encode("utf8") + filename.encode("utf-8") except UnicodeEncodeError: return "non-encodable filename" diff --git a/coverage/misc.py b/coverage/misc.py index 30b757448..9c414d88c 100644 --- a/coverage/misc.py +++ b/coverage/misc.py @@ -236,15 +236,15 @@ def __init__(self): def update(self, v): """Add `v` to the hash, recursively if needed.""" - self.hash.update(str(type(v)).encode("utf8")) + self.hash.update(str(type(v)).encode("utf-8")) if isinstance(v, str): - self.hash.update(v.encode('utf8')) + self.hash.update(v.encode("utf-8")) elif isinstance(v, bytes): self.hash.update(v) elif v is None: pass elif isinstance(v, (int, float)): - self.hash.update(str(v).encode("utf8")) + self.hash.update(str(v).encode("utf-8")) elif isinstance(v, (tuple, list)): for e in v: self.update(e) diff --git a/coverage/phystokens.py b/coverage/phystokens.py index f06c0c277..b6b08d002 100644 --- a/coverage/phystokens.py +++ b/coverage/phystokens.py @@ -201,8 +201,8 @@ def compile_unicode(source, filename, mode): Python 2's compile() builtin has a stupid restriction: if the source string is Unicode, then it may not have a encoding declaration in it. Why not? - Who knows! It also decodes to utf8, and then tries to interpret those utf8 - bytes according to the encoding declaration. Why? Who knows! + Who knows! It also decodes to utf-8, and then tries to interpret those + utf-8 bytes according to the encoding declaration. Why? Who knows! This function neuters the coding declaration, and compiles it. diff --git a/coverage/plugin.py b/coverage/plugin.py index 5b38e3361..8d149af97 100644 --- a/coverage/plugin.py +++ b/coverage/plugin.py @@ -359,12 +359,12 @@ def source(self): Returns a Unicode string. The base implementation simply reads the `self.filename` file and - decodes it as UTF8. Override this method if your file isn't readable + decodes it as UTF-8. Override this method if your file isn't readable as a text file, or if you need other encoding support. """ with open(self.filename, "rb") as f: - return f.read().decode("utf8") + return f.read().decode("utf-8") def lines(self): """Get the executable lines in this file. diff --git a/coverage/sqldata.py b/coverage/sqldata.py index 108a25ef0..c4e950d34 100644 --- a/coverage/sqldata.py +++ b/coverage/sqldata.py @@ -349,7 +349,7 @@ def dumps(self): if self._debug.should("dataio"): self._debug.write(f"Dumping data from data file {self._filename!r}") with self._connect() as con: - return b"z" + zlib.compress(con.dump().encode("utf8")) + return b"z" + zlib.compress(con.dump().encode("utf-8")) @contract(data="bytes") def loads(self, data): @@ -373,7 +373,7 @@ def loads(self, data): raise CoverageException( f"Unrecognized serialization: {data[:40]!r} (head of {len(data)} bytes)" ) - script = zlib.decompress(data[1:]).decode("utf8") + script = zlib.decompress(data[1:]).decode("utf-8") self._dbs[threading.get_ident()] = db = SqliteDb(self._filename, self._debug) with db: db.executescript(script) diff --git a/igor.py b/igor.py index 58774036f..2e1f7aa59 100644 --- a/igor.py +++ b/igor.py @@ -234,7 +234,7 @@ def do_zip_mods(): """) # These encodings should match the list in tests/test_python.py details = [ - ('utf8', 'ⓗⓔⓛⓛⓞ, ⓦⓞⓡⓛⓓ'), + ('utf-8', 'ⓗⓔⓛⓛⓞ, ⓦⓞⓡⓛⓓ'), ('gb2312', '你好,世界'), ('hebrew', 'שלום, עולם'), ('shift_jis', 'こんにちは世界'), diff --git a/tests/goldtest.py b/tests/goldtest.py index cd946efbe..f2b7fe19f 100644 --- a/tests/goldtest.py +++ b/tests/goldtest.py @@ -122,7 +122,7 @@ def canonicalize_xml(xtext): for node in root.iter(): node.attrib = dict(sorted(node.items())) xtext = xml.etree.ElementTree.tostring(root) - return xtext.decode('utf8') + return xtext.decode("utf-8") def contains(filename, *strlist): diff --git a/tests/helpers.py b/tests/helpers.py index 28adf78c7..c85a36cb4 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -69,7 +69,7 @@ def make_file(filename, text="", bytes=b"", newline=None): text = textwrap.dedent(text) if newline: text = text.replace("\n", newline) - data = text.encode('utf8') + data = text.encode("utf-8") # Make sure the directories are available. dirs, _ = os.path.split(filename) diff --git a/tests/test_process.py b/tests/test_process.py index af2d3e784..c41c57b71 100644 --- a/tests/test_process.py +++ b/tests/test_process.py @@ -759,7 +759,7 @@ def test_fullcoverage(self): # Pypy passes locally, but fails in CI? Perhaps the version of macOS is # significant? https://foss.heptapod.net/pypy/pypy/-/issues/3074 @pytest.mark.skipif(env.PYPY, reason="PyPy is unreliable with this test") - # Jython as of 2.7.1rc3 won't compile a filename that isn't utf8. + # Jython as of 2.7.1rc3 won't compile a filename that isn't utf-8. @pytest.mark.skipif(env.JYTHON, reason="Jython can't handle this test") def test_lang_c(self): # LANG=C forces getfilesystemencoding on Linux to 'ascii', which causes diff --git a/tests/test_python.py b/tests/test_python.py index dc9609c97..5965ca064 100644 --- a/tests/test_python.py +++ b/tests/test_python.py @@ -21,7 +21,7 @@ class GetZipBytesTest(CoverageTest): @pytest.mark.parametrize( "encoding", - ["utf8", "gb2312", "hebrew", "shift_jis", "cp1252"], + ["utf-8", "gb2312", "hebrew", "shift_jis", "cp1252"], ) def test_get_encoded_zip_files(self, encoding): # See igor.py, do_zipmods, for the text of these files. From b5ddfd9ad9978211006588934bc25e93d2ba8023 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Sun, 10 Oct 2021 07:46:35 -0400 Subject: [PATCH 23/41] refactor: remove a Python 2 thing --- tests/test_execfile.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/tests/test_execfile.py b/tests/test_execfile.py index c0ed2d482..5c01f8929 100644 --- a/tests/test_execfile.py +++ b/tests/test_execfile.py @@ -165,12 +165,7 @@ def test_running_py_from_binary(self): path = python_reported_file('binary') msg = ( re.escape(f"Couldn't run '{path}' as Python code: ") + - r"(TypeError|ValueError): " - r"(" - r"compile\(\) expected string without null bytes" # for py2 - r"|" - r"source code string cannot contain null bytes" # for py3 - r")" + r"(TypeError|ValueError): source code string cannot contain null bytes" ) with pytest.raises(Exception, match=msg): run_python_file([bf]) From b97aaf2bc2703ac55a3fc1e048729bea8434c18c Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Sun, 10 Oct 2021 08:01:25 -0400 Subject: [PATCH 24/41] style: prefer explicit string concatenation --- coverage/cmdline.py | 60 ++++++++++++++++++------------------ coverage/execfile.py | 5 ++- coverage/parser.py | 2 +- tests/test_arcs.py | 44 +++++++++++++-------------- tests/test_concurrency.py | 6 ++-- tests/test_config.py | 16 +++++----- tests/test_data.py | 4 +-- tests/test_files.py | 4 +-- tests/test_numbits.py | 12 ++++---- tests/test_oddball.py | 6 ++-- tests/test_parser.py | 2 +- tests/test_process.py | 31 +++++++++---------- tests/test_templite.py | 64 +++++++++++++++++++-------------------- tests/test_testing.py | 17 +++++------ 14 files changed, 133 insertions(+), 140 deletions(-) diff --git a/coverage/cmdline.py b/coverage/cmdline.py index eae22f2f2..1be155b80 100644 --- a/coverage/cmdline.py +++ b/coverage/cmdline.py @@ -45,7 +45,7 @@ class Opts: '', '--concurrency', action='store', metavar="LIB", choices=CONCURRENCY_CHOICES, help=( - "Properly measure code using a concurrency library. " + "Properly measure code using a concurrency library. " + "Valid values are: {}." ).format(", ".join(CONCURRENCY_CHOICES)), ) @@ -77,20 +77,20 @@ class Opts: '', '--include', action='store', metavar="PAT1,PAT2,...", help=( - "Include only files whose paths match one of these patterns. " + "Include only files whose paths match one of these patterns. " + "Accepts shell-style wildcards, which must be quoted." ), ) pylib = optparse.make_option( '-L', '--pylib', action='store_true', help=( - "Measure coverage even inside the Python installed library, " + "Measure coverage even inside the Python installed library, " + "which isn't done by default." ), ) sort = optparse.make_option( '--sort', action='store', metavar='COLUMN', - help="Sort the report by the named column: name, stmts, miss, branch, brpart, or cover. " + help="Sort the report by the named column: name, stmts, miss, branch, brpart, or cover. " + "Default is name." ) show_missing = optparse.make_option( @@ -117,7 +117,7 @@ class Opts: '', '--omit', action='store', metavar="PAT1,PAT2,...", help=( - "Omit files whose paths match one of these patterns. " + "Omit files whose paths match one of these patterns. " + "Accepts shell-style wildcards, which must be quoted." ), ) @@ -125,7 +125,7 @@ class Opts: '', '--contexts', action='store', metavar="REGEX1,REGEX2,...", help=( - "Only display data from lines covered in the given contexts. " + "Only display data from lines covered in the given contexts. " + "Accepts Python regexes, which must be quoted." ), ) @@ -146,30 +146,30 @@ class Opts: parallel_mode = optparse.make_option( '-p', '--parallel-mode', action='store_true', help=( - "Append the machine name, process id and random number to the " - ".coverage data file name to simplify collecting data from " + "Append the machine name, process id and random number to the " + + ".coverage data file name to simplify collecting data from " + "many processes." ), ) module = optparse.make_option( '-m', '--module', action='store_true', help=( - " is an importable Python module, not a script path, " + " is an importable Python module, not a script path, " + "to be run as 'python -m' would run it." ), ) precision = optparse.make_option( '', '--precision', action='store', metavar='N', type=int, help=( - "Number of digits after the decimal point to display for " + "Number of digits after the decimal point to display for " + "reported coverage percentages." ), ) rcfile = optparse.make_option( '', '--rcfile', action='store', help=( - "Specify configuration file. " - "By default '.coveragerc', 'setup.cfg', 'tox.ini', and " + "Specify configuration file. " + + "By default '.coveragerc', 'setup.cfg', 'tox.ini', and " + "'pyproject.toml' are tried. [env: COVERAGE_RCFILE]" ), ) @@ -180,7 +180,7 @@ class Opts: timid = optparse.make_option( '', '--timid', action='store_true', help=( - "Use a simpler but slower trace method. Try this if you get " + "Use a simpler but slower trace method. Try this if you get " + "seemingly impossible results!" ), ) @@ -328,7 +328,7 @@ def get_prog_name(self): ] + GLOBAL_ARGS, usage="[options] [modules]", description=( - "Make annotated copies of the given files, marking statements that are executed " + "Make annotated copies of the given files, marking statements that are executed " + "with > and statements that are missed with !." ), ), @@ -341,11 +341,11 @@ def get_prog_name(self): ] + GLOBAL_ARGS, usage="[options] ... ", description=( - "Combine data from multiple coverage files collected " - "with 'run -p'. The combined results are written to a single " - "file representing the union of the data. The positional " - "arguments are data files or directories containing data files. " - "If no paths are provided, data files in the default data file's " + "Combine data from multiple coverage files collected " + + "with 'run -p'. The combined results are written to a single " + + "file representing the union of the data. The positional " + + "arguments are data files or directories containing data files. " + + "If no paths are provided, data files in the default data file's " + "directory are combined." ), ), @@ -354,12 +354,12 @@ def get_prog_name(self): "debug", GLOBAL_ARGS, usage="", description=( - "Display information about the internals of coverage.py, " - "for diagnosing problems. " - "Topics are: " - "'data' to show a summary of the collected data; " - "'sys' to show installation information; " - "'config' to show the configuration; " + "Display information about the internals of coverage.py, " + + "for diagnosing problems. " + + "Topics are: " + + "'data' to show a summary of the collected data; " + + "'sys' to show installation information; " + + "'config' to show the configuration; " + "'premain' to show what is calling coverage." ), ), @@ -393,8 +393,8 @@ def get_prog_name(self): ] + GLOBAL_ARGS, usage="[options] [modules]", description=( - "Create an HTML report of the coverage of the files. " - "Each file gets its own page, with the source decorated to show " + "Create an HTML report of the coverage of the files. " + + "Each file gets its own page, with the source decorated to show " + "executed, excluded, and missed lines." ), ), @@ -732,9 +732,9 @@ def do_run(self, options, args): # they will be None if they have not been specified. if getattr(options, opt_name) is not None: show_help( - "Options affecting multiprocessing must only be specified " - "in a configuration file.\n" - "Remove --{} from the command line.".format(opt_name) + "Options affecting multiprocessing must only be specified " + + "in a configuration file.\n" + + f"Remove --{opt_name} from the command line." ) return ERR diff --git a/coverage/execfile.py b/coverage/execfile.py index f46955bce..539e368d7 100644 --- a/coverage/execfile.py +++ b/coverage/execfile.py @@ -52,9 +52,8 @@ def find_module(modulename): spec = importlib.util.find_spec(mod_main) if not spec: raise NoSource( - "No module named %s; " - "%r is a package and cannot be directly executed" - % (mod_main, modulename) + f"No module named {mod_main}; " + + f"{modulename!r} is a package and cannot be directly executed" ) pathname = spec.origin packagename = spec.name diff --git a/coverage/parser.py b/coverage/parser.py index 5c467a7ea..3be822d55 100644 --- a/coverage/parser.py +++ b/coverage/parser.py @@ -367,7 +367,7 @@ def __init__(self, text, code=None, filename=None): for attr in ['co_lnotab', 'co_firstlineno']: if not hasattr(self.code, attr): raise StopEverything( # pragma: only jython - "This implementation of Python doesn't support code analysis.\n" + "This implementation of Python doesn't support code analysis.\n" + "Run coverage.py under another Python for this command." ) diff --git a/tests/test_arcs.py b/tests/test_arcs.py index 5b7965aa2..6cdc908e1 100644 --- a/tests/test_arcs.py +++ b/tests/test_arcs.py @@ -517,8 +517,8 @@ def branches_3(l): branches_3([0,1]) """, arcz= - ".1 18 8G GH H. " - ".2 23 34 43 26 3. 6. " + ".1 18 8G GH H. " + + ".2 23 34 43 26 3. 6. " + "-89 9A 9-8 AB BC CB B9 AE E9", arcz_missing="26 6." ) @@ -1077,18 +1077,18 @@ def check_token(data): def test_except_jump_finally(self): if env.PYBEHAVIOR.finally_jumps_back: arcz = ( - ".1 1Q QR RS ST TU U. " - ".2 23 34 45 56 4O 6L " - "78 89 9A AL LA AO 8B BC CD DL LD D4 BE EF FG GL LG G. EH HI IJ JL HL " - "L4 LM " + ".1 1Q QR RS ST TU U. " + + ".2 23 34 45 56 4O 6L " + + "78 89 9A AL LA AO 8B BC CD DL LD D4 BE EF FG GL LG G. EH HI IJ JL HL " + + "L4 LM " + "MN NO O." ) else: arcz = ( - ".1 1Q QR RS ST TU U. " - ".2 23 34 45 56 4O 6L " - "78 89 9A AL 8B BC CD DL BE EF FG GL EH HI IJ JL HL " - "LO L4 L. LM " + ".1 1Q QR RS ST TU U. " + + ".2 23 34 45 56 4O 6L " + + "78 89 9A AL 8B BC CD DL BE EF FG GL EH HI IJ JL HL " + + "LO L4 L. LM " + "MN NO O." ) self.check_coverage("""\ @@ -1131,18 +1131,18 @@ def func(x): def test_else_jump_finally(self): if env.PYBEHAVIOR.finally_jumps_back: arcz = ( - ".1 1S ST TU UV VW W. " - ".2 23 34 45 56 6A 78 8N 4Q " - "AB BC CN NC CQ AD DE EF FN NF F4 DG GH HI IN NI I. GJ JK KL LN JN " - "N4 NO " + ".1 1S ST TU UV VW W. " + + ".2 23 34 45 56 6A 78 8N 4Q " + + "AB BC CN NC CQ AD DE EF FN NF F4 DG GH HI IN NI I. GJ JK KL LN JN " + + "N4 NO " + "OP PQ Q." ) else: arcz = ( - ".1 1S ST TU UV VW W. " - ".2 23 34 45 56 6A 78 8N 4Q " - "AB BC CN AD DE EF FN DG GH HI IN GJ JK KL LN JN " - "N4 NQ N. NO " + ".1 1S ST TU UV VW W. " + + ".2 23 34 45 56 6A 78 8N 4Q " + + "AB BC CN AD DE EF FN DG GH HI IN GJ JK KL LN JN " + + "N4 NQ N. NO " + "OP PQ Q." ) self.check_coverage("""\ @@ -1280,9 +1280,7 @@ def double_inputs(): next(gen) print(gen.send(6)) """, - arcz= - ".1 17 78 89 9A AB B. " - ".2 23 34 45 52 2.", + arcz=".1 17 78 89 9A AB B. .2 23 34 45 52 2.", arcz_missing="2.", ) assert self.stdout() == "20\n12\n" @@ -1850,8 +1848,8 @@ async def print_sum(x, y): # 8 loop.close() # G """, arcz= - ".1 13 38 8E EF FG G. " - "-34 45 56 6-3 " + ".1 13 38 8E EF FG G. " + + "-34 45 56 6-3 " + "-89 9C C-8", arcz_unpredicted="5-3 9-8", ) diff --git a/tests/test_concurrency.py b/tests/test_concurrency.py index 0b8d6a8b3..696b12eb5 100644 --- a/tests/test_concurrency.py +++ b/tests/test_concurrency.py @@ -187,15 +187,13 @@ def cant_trace_msg(concurrency, the_module): # We don't even have the underlying module installed, we expect # coverage to alert us to this fact. expected_out = ( - "Couldn't trace with concurrency=%s, " - "the module isn't installed.\n" % concurrency + f"Couldn't trace with concurrency={concurrency}, the module isn't installed.\n" ) elif env.C_TRACER or concurrency == "thread" or concurrency == "": expected_out = None else: expected_out = ( - "Can't support concurrency=%s with PyTracer, " - "only threads are supported\n" % concurrency + f"Can't support concurrency={concurrency} with PyTracer, only threads are supported\n" ) return expected_out diff --git a/tests/test_config.py b/tests/test_config.py index 9e1268276..aec18bf2e 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -169,14 +169,14 @@ def test_parse_errors(self): ("timid = 1\n", r"no section headers"), ("[run\n", r"\[run"), ("[report]\nexclude_lines = foo(\n", - r"Invalid \[report\].exclude_lines value 'foo\(': " + r"Invalid \[report\].exclude_lines value 'foo\(': " + r"(unbalanced parenthesis|missing \))"), ("[report]\npartial_branches = foo[\n", - r"Invalid \[report\].partial_branches value 'foo\[': " + r"Invalid \[report\].partial_branches value 'foo\[': " + r"(unexpected end of regular expression|unterminated character set)"), ("[report]\npartial_branches_always = foo***\n", - r"Invalid \[report\].partial_branches_always value " - r"'foo\*\*\*': " + r"Invalid \[report\].partial_branches_always value " + + r"'foo\*\*\*': " + r"multiple repeat"), ] @@ -190,14 +190,14 @@ def test_parse_errors(self): ("[tool.coverage.run]\ntimid = \"maybe?\"\n", r"maybe[?]"), ("[tool.coverage.run\n", None), ('[tool.coverage.report]\nexclude_lines = ["foo("]\n', - r"Invalid \[tool.coverage.report\].exclude_lines value u?'foo\(': " + r"Invalid \[tool.coverage.report\].exclude_lines value u?'foo\(': " + r"(unbalanced parenthesis|missing \))"), ('[tool.coverage.report]\npartial_branches = ["foo["]\n', - r"Invalid \[tool.coverage.report\].partial_branches value u?'foo\[': " + r"Invalid \[tool.coverage.report\].partial_branches value u?'foo\[': " + r"(unexpected end of regular expression|unterminated character set)"), ('[tool.coverage.report]\npartial_branches_always = ["foo***"]\n', - r"Invalid \[tool.coverage.report\].partial_branches_always value " - r"u?'foo\*\*\*': " + r"Invalid \[tool.coverage.report\].partial_branches_always value " + + r"u?'foo\*\*\*': " + r"multiple repeat"), ('[tool.coverage.run]\nconcurrency="foo"', "not a list"), ("[tool.coverage.report]\nprecision=1.23", "not an integer"), diff --git a/tests/test_data.py b/tests/test_data.py index 80cd9bc21..134eb55aa 100644 --- a/tests/test_data.py +++ b/tests/test_data.py @@ -616,8 +616,8 @@ def test_debug_output_with_debug_option(self): self.assert_line_counts(covdata2, SUMMARY_1) assert re.search( - r"^Erasing data file '.*\.coverage'\n" - r"Creating data file '.*\.coverage'\n" + r"^Erasing data file '.*\.coverage'\n" + + r"Creating data file '.*\.coverage'\n" + r"Opening data file '.*\.coverage'\n$", debug.get_output() ) diff --git a/tests/test_files.py b/tests/test_files.py index d6005d364..e5dd83bd8 100644 --- a/tests/test_files.py +++ b/tests/test_files.py @@ -93,8 +93,8 @@ def test_flat_rootname(original, flat): @pytest.mark.parametrize( - "patterns, case_insensitive, partial," - "matches," + "patterns, case_insensitive, partial," + + "matches," + "nomatches", [ ( diff --git a/tests/test_numbits.py b/tests/test_numbits.py index 3f69b4de2..f7032de78 100644 --- a/tests/test_numbits.py +++ b/tests/test_numbits.py @@ -115,9 +115,9 @@ def setup_test(self): def test_numbits_union(self): res = self.cursor.execute( - "select numbits_union(" - "(select numbits from data where id = 7)," - "(select numbits from data where id = 9)" + "select numbits_union(" + + "(select numbits from data where id = 7)," + + "(select numbits from data where id = 9)" + ")" ) expected = [ @@ -129,9 +129,9 @@ def test_numbits_union(self): def test_numbits_intersection(self): res = self.cursor.execute( - "select numbits_intersection(" - "(select numbits from data where id = 7)," - "(select numbits from data where id = 9)" + "select numbits_intersection(" + + "(select numbits from data where id = 7)," + + "(select numbits from data where id = 9)" + ")" ) answer = numbits_to_nums(list(res)[0][0]) diff --git a/tests/test_oddball.py b/tests/test_oddball.py index a97fc1905..c3082abbc 100644 --- a/tests/test_oddball.py +++ b/tests/test_oddball.py @@ -484,9 +484,9 @@ def test_unsets_trace(): out = self.stdout().replace(self.last_module_name, "coverage_test") expected = ( - "call: coverage_test.py @ 12\n" - "line: coverage_test.py @ 13\n" - "line: coverage_test.py @ 14\n" + "call: coverage_test.py @ 12\n" + + "line: coverage_test.py @ 13\n" + + "line: coverage_test.py @ 14\n" + "return: coverage_test.py @ 14\n" ) assert expected == out diff --git a/tests/test_parser.py b/tests/test_parser.py index 1b4e8aca7..82bf7616a 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -120,7 +120,7 @@ def foo(): def test_indentation_error(self): msg = ( - "Couldn't parse '' as Python source: " + "Couldn't parse '' as Python source: " + "'unindent does not match any outer indentation level' at line 3" ) with pytest.raises(NotPython, match=msg): diff --git a/tests/test_process.py b/tests/test_process.py index c41c57b71..1adb6cff9 100644 --- a/tests/test_process.py +++ b/tests/test_process.py @@ -130,7 +130,7 @@ def test_combine_parallel_data_with_a_corrupt_file(self): self.assert_exists(".coverage") self.assert_exists(".coverage.bad") warning_regex = ( - r"CoverageWarning: Couldn't use data file '.*\.coverage\.bad': " + r"CoverageWarning: Couldn't use data file '.*\.coverage\.bad': " + r"file (is encrypted or )?is not a database" ) assert re.search(warning_regex, out) @@ -163,9 +163,8 @@ def test_combine_no_usable_files(self): for n in "12": self.assert_exists(f".coverage.bad{n}") warning_regex = ( - r"CoverageWarning: Couldn't use data file '.*\.coverage.bad{}': " + fr"CoverageWarning: Couldn't use data file '.*\.coverage.bad{n}': " + r"file (is encrypted or )?is not a database" - .format(n) ) assert re.search(warning_regex, out) assert re.search(r"No usable data files", out) @@ -725,9 +724,9 @@ def f(): assert "Goodbye!" in out msg = ( - "CoverageWarning: " - "Already imported a file that will be measured: {} " - "(already-imported)").format(goodbye_path) + f"CoverageWarning: Already imported a file that will be measured: {goodbye_path} " + + "(already-imported)" + ) assert msg in out @pytest.mark.expensive @@ -1313,10 +1312,10 @@ def test_accented_dot_py(self): assert ' name="h\xe2t.py"'.encode() in xml report_expected = ( - "Name Stmts Miss Cover\n" - "----------------------------\n" - "h\xe2t.py 1 0 100%\n" - "----------------------------\n" + "Name Stmts Miss Cover\n" + + "----------------------------\n" + + "h\xe2t.py 1 0 100%\n" + + "----------------------------\n" + "TOTAL 1 0 100%\n" ) @@ -1357,12 +1356,12 @@ def test_accented_directory(self): } report_expected = ( - "Name Stmts Miss Cover\n" - "-----------------------------------\n" - "\xe2%saccented.py 1 0 100%%\n" - "-----------------------------------\n" - "TOTAL 1 0 100%%\n" - ) % os.sep + "Name Stmts Miss Cover\n" + + "-----------------------------------\n" + + f"\xe2{os.sep}accented.py 1 0 100%\n" + + "-----------------------------------\n" + + "TOTAL 1 0 100%\n" + ) out = self.run_command("coverage report") assert out == report_expected diff --git a/tests/test_templite.py b/tests/test_templite.py index e4d836478..0f86690be 100644 --- a/tests/test_templite.py +++ b/tests/test_templite.py @@ -148,7 +148,7 @@ def test_multiline_loops(self): def test_multiple_loops(self): self.try_render( - "{% for n in nums %}{{n}}{% endfor %} and " + "{% for n in nums %}{{n}}{% endfor %} and " + "{% for n in nums %}{{n}}{% endfor %}", {'nums': [1,2,3]}, "123 and 123" @@ -201,10 +201,10 @@ def getit(self): return self.it obj = Complex(it={'x':"Hello", 'y': 0}) self.try_render( - "@" - "{% if obj.getit.x %}X{% endif %}" - "{% if obj.getit.y %}Y{% endif %}" - "{% if obj.getit.y|str %}S{% endif %}" + "@" + + "{% if obj.getit.x %}X{% endif %}" + + "{% if obj.getit.y %}Y{% endif %}" + + "{% if obj.getit.y|str %}S{% endif %}" + "!", { 'obj': obj, 'str': str }, "@XS!" @@ -229,10 +229,10 @@ def test_loop_if(self): def test_nested_loops(self): self.try_render( - "@" - "{% for n in nums %}" - "{% for a in abc %}{{a}}{{n}}{% endfor %}" - "{% endfor %}" + "@" + + "{% for n in nums %}" + + "{% for a in abc %}{{a}}{{n}}{% endfor %}" + + "{% endfor %}" + "!", {'nums': [0,1,2], 'abc': ['a', 'b', 'c']}, "@a0b0c0a1b1c1a2b2c2!" @@ -240,28 +240,28 @@ def test_nested_loops(self): def test_whitespace_handling(self): self.try_render( - "@{% for n in nums %}\n" - " {% for a in abc %}{{a}}{{n}}{% endfor %}\n" + "@{% for n in nums %}\n" + + " {% for a in abc %}{{a}}{{n}}{% endfor %}\n" + "{% endfor %}!\n", {'nums': [0, 1, 2], 'abc': ['a', 'b', 'c']}, "@\n a0b0c0\n\n a1b1c1\n\n a2b2c2\n!\n" ) self.try_render( - "@{% for n in nums -%}\n" - " {% for a in abc -%}\n" - " {# this disappears completely -#}\n" - " {{a-}}\n" - " {{n -}}\n" - " {{n -}}\n" - " {% endfor %}\n" + "@{% for n in nums -%}\n" + + " {% for a in abc -%}\n" + + " {# this disappears completely -#}\n" + + " {{a-}}\n" + + " {{n -}}\n" + + " {{n -}}\n" + + " {% endfor %}\n" + "{% endfor %}!\n", {'nums': [0, 1, 2], 'abc': ['a', 'b', 'c']}, "@a00b00c00\na11b11c11\na22b22c22\n!\n" ) self.try_render( - "@{% for n in nums -%}\n" - " {{n -}}\n" - " x\n" + "@{% for n in nums -%}\n" + + " {{n -}}\n" + + " x\n" + "{% endfor %}!\n", {'nums': [0, 1, 2]}, "@0x\n1x\n2x\n!\n" @@ -270,17 +270,17 @@ def test_whitespace_handling(self): def test_eat_whitespace(self): self.try_render( - "Hey!\n" - "{% joined %}\n" - "@{% for n in nums %}\n" - " {% for a in abc %}\n" - " {# this disappears completely #}\n" - " X\n" - " Y\n" - " {{a}}\n" - " {{n }}\n" - " {% endfor %}\n" - "{% endfor %}!\n" + "Hey!\n" + + "{% joined %}\n" + + "@{% for n in nums %}\n" + + " {% for a in abc %}\n" + + " {# this disappears completely #}\n" + + " X\n" + + " Y\n" + + " {{a}}\n" + + " {{n }}\n" + + " {% endfor %}\n" + + "{% endfor %}!\n" + "{% endjoined %}\n", {'nums': [0, 1, 2], 'abc': ['a', 'b', 'c']}, "Hey!\n@XYa0XYb0XYc0XYa1XYb1XYc1XYa2XYb2XYc2!\n" diff --git a/tests/test_testing.py b/tests/test_testing.py index 4699799ec..eae18890a 100644 --- a/tests/test_testing.py +++ b/tests/test_testing.py @@ -61,20 +61,19 @@ def test_file_count(self): self.assert_file_count("afile.*", 1) self.assert_file_count("*.q", 0) msg = re.escape( - "There should be 13 files matching 'a*.txt', but there are these: " + "There should be 13 files matching 'a*.txt', but there are these: " + "['abcde.txt', 'afile.txt', 'axczz.txt']" ) with pytest.raises(AssertionError, match=msg): self.assert_file_count("a*.txt", 13) msg = re.escape( - "There should be 12 files matching '*c*.txt', but there are these: " + "There should be 12 files matching '*c*.txt', but there are these: " + "['abcde.txt', 'axczz.txt']" ) with pytest.raises(AssertionError, match=msg): self.assert_file_count("*c*.txt", 12) msg = re.escape( - "There should be 11 files matching 'afile.*', but there are these: " - "['afile.txt']" + "There should be 11 files matching 'afile.*', but there are these: ['afile.txt']" ) with pytest.raises(AssertionError, match=msg): self.assert_file_count("afile.*", 11) @@ -374,11 +373,11 @@ def test_arcz_to_arcs(self, arcz, arcs): ([(-1, 1), (1, 2), (2, -5)], "(-1, 1) # .1\n(1, 2) # 12\n(2, -5) # 2-5\n"), ([(-26, 10), (12, 11), (18, 29), (35, -10), (1, 33), (100, 7)], ( - "(-26, 10) # -QA\n" - "(12, 11) # CB\n" - "(18, 29) # IT\n" - "(35, -10) # Z-A\n" - "(1, 33) # 1X\n" + "(-26, 10) # -QA\n" + + "(12, 11) # CB\n" + + "(18, 29) # IT\n" + + "(35, -10) # Z-A\n" + + "(1, 33) # 1X\n" + "(100, 7) # ?7\n" ) ), From 2b795b9792ef0a6434761ebd97bd3b72b51e8431 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Sun, 10 Oct 2021 08:12:26 -0400 Subject: [PATCH 25/41] refactor: remove a mixin class from tests --- tests/test_data.py | 131 ++++++++++++++++++++++----------------------- 1 file changed, 64 insertions(+), 67 deletions(-) diff --git a/tests/test_data.py b/tests/test_data.py index 134eb55aa..78c7c280b 100644 --- a/tests/test_data.py +++ b/tests/test_data.py @@ -58,33 +58,30 @@ MEASURED_FILES_3_4 = ['x.py', 'y.py', 'z.py'] -class DataTestHelpers(CoverageTest): - """Test helpers for data tests.""" - - def assert_line_counts(self, covdata, counts, fullpath=False): - """Check that the line_counts of `covdata` is `counts`.""" - assert line_counts(covdata, fullpath) == counts - - def assert_measured_files(self, covdata, measured): - """Check that `covdata`'s measured files are `measured`.""" - assert_count_equal(covdata.measured_files(), measured) - - def assert_lines1_data(self, covdata): - """Check that `covdata` has the data from LINES1.""" - self.assert_line_counts(covdata, SUMMARY_1) - self.assert_measured_files(covdata, MEASURED_FILES_1) - assert_count_equal(covdata.lines("a.py"), A_PY_LINES_1) - assert not covdata.has_arcs() - - def assert_arcs3_data(self, covdata): - """Check that `covdata` has the data from ARCS3.""" - self.assert_line_counts(covdata, SUMMARY_3) - self.assert_measured_files(covdata, MEASURED_FILES_3) - assert_count_equal(covdata.lines("x.py"), X_PY_LINES_3) - assert_count_equal(covdata.arcs("x.py"), X_PY_ARCS_3) - assert_count_equal(covdata.lines("y.py"), Y_PY_LINES_3) - assert_count_equal(covdata.arcs("y.py"), Y_PY_ARCS_3) - assert covdata.has_arcs() +def assert_line_counts(covdata, counts, fullpath=False): + """Check that the line_counts of `covdata` is `counts`.""" + assert line_counts(covdata, fullpath) == counts + +def assert_measured_files(covdata, measured): + """Check that `covdata`'s measured files are `measured`.""" + assert_count_equal(covdata.measured_files(), measured) + +def assert_lines1_data(covdata): + """Check that `covdata` has the data from LINES1.""" + assert_line_counts(covdata, SUMMARY_1) + assert_measured_files(covdata, MEASURED_FILES_1) + assert_count_equal(covdata.lines("a.py"), A_PY_LINES_1) + assert not covdata.has_arcs() + +def assert_arcs3_data(covdata): + """Check that `covdata` has the data from ARCS3.""" + assert_line_counts(covdata, SUMMARY_3) + assert_measured_files(covdata, MEASURED_FILES_3) + assert_count_equal(covdata.lines("x.py"), X_PY_LINES_3) + assert_count_equal(covdata.arcs("x.py"), X_PY_ARCS_3) + assert_count_equal(covdata.lines("y.py"), Y_PY_LINES_3) + assert_count_equal(covdata.arcs("y.py"), Y_PY_ARCS_3) + assert covdata.has_arcs() def dicts_from_sets(file_data): @@ -97,7 +94,7 @@ def dicts_from_sets(file_data): return {k: dict.fromkeys(v) for k, v in file_data.items()} -class CoverageDataTest(DataTestHelpers, CoverageTest): +class CoverageDataTest(CoverageTest): """Test cases for CoverageData.""" def test_empty_data_is_false(self): @@ -128,27 +125,27 @@ def test_empty_arc_data_is_false(self): def test_adding_lines(self, lines): covdata = CoverageData() covdata.add_lines(lines) - self.assert_lines1_data(covdata) + assert_lines1_data(covdata) @pytest.mark.parametrize("arcs", [ARCS_3, dicts_from_sets(ARCS_3)]) def test_adding_arcs(self, arcs): covdata = CoverageData() covdata.add_arcs(arcs) - self.assert_arcs3_data(covdata) + assert_arcs3_data(covdata) def test_ok_to_add_lines_twice(self): covdata = CoverageData() covdata.add_lines(LINES_1) covdata.add_lines(LINES_2) - self.assert_line_counts(covdata, SUMMARY_1_2) - self.assert_measured_files(covdata, MEASURED_FILES_1_2) + assert_line_counts(covdata, SUMMARY_1_2) + assert_measured_files(covdata, MEASURED_FILES_1_2) def test_ok_to_add_arcs_twice(self): covdata = CoverageData() covdata.add_arcs(ARCS_3) covdata.add_arcs(ARCS_4) - self.assert_line_counts(covdata, SUMMARY_3_4) - self.assert_measured_files(covdata, MEASURED_FILES_3_4) + assert_line_counts(covdata, SUMMARY_3_4) + assert_measured_files(covdata, MEASURED_FILES_3_4) def test_cant_add_arcs_with_lines(self): covdata = CoverageData() @@ -168,13 +165,13 @@ def test_touch_file_with_lines(self): covdata = CoverageData() covdata.add_lines(LINES_1) covdata.touch_file('zzz.py') - self.assert_measured_files(covdata, MEASURED_FILES_1 + ['zzz.py']) + assert_measured_files(covdata, MEASURED_FILES_1 + ['zzz.py']) def test_touch_file_with_arcs(self): covdata = CoverageData() covdata.add_arcs(ARCS_3) covdata.touch_file('zzz.py') - self.assert_measured_files(covdata, MEASURED_FILES_3 + ['zzz.py']) + assert_measured_files(covdata, MEASURED_FILES_3 + ['zzz.py']) def test_set_query_contexts(self): covdata = CoverageData() @@ -319,8 +316,8 @@ def test_update_lines(self): covdata3.update(covdata1) covdata3.update(covdata2) - self.assert_line_counts(covdata3, SUMMARY_1_2) - self.assert_measured_files(covdata3, MEASURED_FILES_1_2) + assert_line_counts(covdata3, SUMMARY_1_2) + assert_measured_files(covdata3, MEASURED_FILES_1_2) def test_update_arcs(self): covdata1 = CoverageData(suffix='1') @@ -333,8 +330,8 @@ def test_update_arcs(self): covdata3.update(covdata1) covdata3.update(covdata2) - self.assert_line_counts(covdata3, SUMMARY_3_4) - self.assert_measured_files(covdata3, MEASURED_FILES_3_4) + assert_line_counts(covdata3, SUMMARY_3_4) + assert_measured_files(covdata3, MEASURED_FILES_3_4) def test_update_cant_mix_lines_and_arcs(self): covdata1 = CoverageData(suffix='1') @@ -421,7 +418,7 @@ def test_update_lines_empty(self): covdata2 = CoverageData(suffix='2') covdata1.update(covdata2) - self.assert_line_counts(covdata1, SUMMARY_1) + assert_line_counts(covdata1, SUMMARY_1) def test_update_arcs_empty(self): covdata1 = CoverageData(suffix='1') @@ -429,14 +426,14 @@ def test_update_arcs_empty(self): covdata2 = CoverageData(suffix='2') covdata1.update(covdata2) - self.assert_line_counts(covdata1, SUMMARY_3) + assert_line_counts(covdata1, SUMMARY_3) def test_asking_isnt_measuring(self): # Asking about an unmeasured file shouldn't make it seem measured. covdata = CoverageData() - self.assert_measured_files(covdata, []) + assert_measured_files(covdata, []) assert covdata.arcs("missing.py") is None - self.assert_measured_files(covdata, []) + assert_measured_files(covdata, []) def test_add_to_hash_with_lines(self): covdata = CoverageData() @@ -507,7 +504,7 @@ def test_read_and_write_are_opposites(self): covdata2 = CoverageData() covdata2.read() - self.assert_arcs3_data(covdata2) + assert_arcs3_data(covdata2) def test_thread_stress(self): covdata = CoverageData() @@ -526,11 +523,11 @@ def thread_main(): for t in threads: t.join() - self.assert_lines1_data(covdata) + assert_lines1_data(covdata) assert exceptions == [] -class CoverageDataInTempDirTest(DataTestHelpers, CoverageTest): +class CoverageDataInTempDirTest(CoverageTest): """Tests of CoverageData that need a temporary directory to make files.""" def test_read_write_lines(self): @@ -540,7 +537,7 @@ def test_read_write_lines(self): covdata2 = CoverageData("lines.dat") covdata2.read() - self.assert_lines1_data(covdata2) + assert_lines1_data(covdata2) def test_read_write_arcs(self): covdata1 = CoverageData("arcs.dat") @@ -549,7 +546,7 @@ def test_read_write_arcs(self): covdata2 = CoverageData("arcs.dat") covdata2.read() - self.assert_arcs3_data(covdata2) + assert_arcs3_data(covdata2) def test_read_errors(self): msg = r"Couldn't .* '.*[/\\]{0}': \S+" @@ -585,14 +582,14 @@ def test_read_sql_errors(self): assert not covdata -class CoverageDataFilesTest(DataTestHelpers, CoverageTest): +class CoverageDataFilesTest(CoverageTest): """Tests of CoverageData file handling.""" def test_reading_missing(self): self.assert_doesnt_exist(".coverage") covdata = CoverageData() covdata.read() - self.assert_line_counts(covdata, {}) + assert_line_counts(covdata, {}) def test_writing_and_reading(self): covdata1 = CoverageData() @@ -601,7 +598,7 @@ def test_writing_and_reading(self): covdata2 = CoverageData() covdata2.read() - self.assert_line_counts(covdata2, SUMMARY_1) + assert_line_counts(covdata2, SUMMARY_1) def test_debug_output_with_debug_option(self): # With debug option dataio, we get debug output about reading and @@ -613,7 +610,7 @@ def test_debug_output_with_debug_option(self): covdata2 = CoverageData(debug=debug) covdata2.read() - self.assert_line_counts(covdata2, SUMMARY_1) + assert_line_counts(covdata2, SUMMARY_1) assert re.search( r"^Erasing data file '.*\.coverage'\n" + @@ -632,7 +629,7 @@ def test_debug_output_without_debug_option(self): covdata2 = CoverageData(debug=debug) covdata2.read() - self.assert_line_counts(covdata2, SUMMARY_1) + assert_line_counts(covdata2, SUMMARY_1) assert debug.get_output() == "" @@ -683,8 +680,8 @@ def test_combining(self): covdata3 = CoverageData() combine_parallel_data(covdata3) - self.assert_line_counts(covdata3, SUMMARY_1_2) - self.assert_measured_files(covdata3, MEASURED_FILES_1_2) + assert_line_counts(covdata3, SUMMARY_1_2) + assert_measured_files(covdata3, MEASURED_FILES_1_2) self.assert_file_count(".coverage.*", 0) def test_erasing(self): @@ -693,11 +690,11 @@ def test_erasing(self): covdata1.write() covdata1.erase() - self.assert_line_counts(covdata1, {}) + assert_line_counts(covdata1, {}) covdata2 = CoverageData() covdata2.read() - self.assert_line_counts(covdata2, {}) + assert_line_counts(covdata2, {}) def test_erasing_parallel(self): self.make_file("datafile.1") @@ -742,8 +739,8 @@ def test_combining_with_aliases(self): sub_bpy = canonical_filename('./sub/b.py') template_html = canonical_filename('./template.html') - self.assert_line_counts(covdata3, {apy: 4, sub_bpy: 2, template_html: 1}, fullpath=True) - self.assert_measured_files(covdata3, [apy, sub_bpy, template_html]) + assert_line_counts(covdata3, {apy: 4, sub_bpy: 2, template_html: 1}, fullpath=True) + assert_measured_files(covdata3, [apy, sub_bpy, template_html]) assert covdata3.file_tracer(template_html) == 'html.plugin' def test_combining_from_different_directories(self): @@ -765,8 +762,8 @@ def test_combining_from_different_directories(self): covdata3 = CoverageData() combine_parallel_data(covdata3, data_paths=['cov1', 'cov2']) - self.assert_line_counts(covdata3, SUMMARY_1_2) - self.assert_measured_files(covdata3, MEASURED_FILES_1_2) + assert_line_counts(covdata3, SUMMARY_1_2) + assert_measured_files(covdata3, MEASURED_FILES_1_2) self.assert_doesnt_exist("cov1/.coverage.1") self.assert_doesnt_exist("cov2/.coverage.2") self.assert_exists(".coverage.xxx") @@ -794,8 +791,8 @@ def test_combining_from_files(self): covdata3 = CoverageData() combine_parallel_data(covdata3, data_paths=['cov1', 'cov2/.coverage.2']) - self.assert_line_counts(covdata3, SUMMARY_1_2) - self.assert_measured_files(covdata3, MEASURED_FILES_1_2) + assert_line_counts(covdata3, SUMMARY_1_2) + assert_measured_files(covdata3, MEASURED_FILES_1_2) self.assert_doesnt_exist("cov1/.coverage.1") self.assert_doesnt_exist("cov2/.coverage.2") self.assert_exists(".coverage.xxx") @@ -820,7 +817,7 @@ def test_interleaved_erasing_bug716(self): covdata2.add_lines(LINES_1) -class DumpsLoadsTest(DataTestHelpers, CoverageTest): +class DumpsLoadsTest(CoverageTest): """Tests of CoverageData.dumps and loads.""" run_in_temp_dir = False @@ -833,8 +830,8 @@ def test_serialization(self): covdata2 = CoverageData(no_disk=True) covdata2.loads(serial) - self.assert_line_counts(covdata2, SUMMARY_1_2) - self.assert_measured_files(covdata2, MEASURED_FILES_1_2) + assert_line_counts(covdata2, SUMMARY_1_2) + assert_measured_files(covdata2, MEASURED_FILES_1_2) def test_misfed_serialization(self): covdata = CoverageData(no_disk=True) From 036baa78a006c061862ed2e16db51a2f8be7b29e Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Sun, 10 Oct 2021 08:46:00 -0400 Subject: [PATCH 26/41] test: run test_data.py with debugging on --- tests/test_data.py | 206 ++++++++++++++++++++++++--------------------- 1 file changed, 110 insertions(+), 96 deletions(-) diff --git a/tests/test_data.py b/tests/test_data.py index 78c7c280b..86b3870e6 100644 --- a/tests/test_data.py +++ b/tests/test_data.py @@ -58,6 +58,19 @@ MEASURED_FILES_3_4 = ['x.py', 'y.py', 'z.py'] +def DebugCoverageData(*args, **kwargs): + """Factory for CovergeData instances with debugging turned on. + + This lets us exercise the debugging lines in sqldata.py. We don't make + any assertions about the debug output, but at least we can know that they + execute successfully, and they won't be marked as distracting missing + lines in our coverage reports. + """ + assert "debug" not in kwargs + debug = DebugControlString(options=["dataio", "dataop", "sql"]) + return CoverageData(*args, debug=debug, **kwargs) + + def assert_line_counts(covdata, counts, fullpath=False): """Check that the line_counts of `covdata` is `counts`.""" assert line_counts(covdata, fullpath) == counts @@ -98,83 +111,83 @@ class CoverageDataTest(CoverageTest): """Test cases for CoverageData.""" def test_empty_data_is_false(self): - covdata = CoverageData() + covdata = DebugCoverageData() assert not covdata def test_line_data_is_true(self): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.add_lines(LINES_1) assert covdata def test_arc_data_is_true(self): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.add_arcs(ARCS_3) assert covdata def test_empty_line_data_is_false(self): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.add_lines({}) assert not covdata def test_empty_arc_data_is_false(self): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.add_arcs({}) assert not covdata @pytest.mark.parametrize("lines", [LINES_1, dicts_from_sets(LINES_1)]) def test_adding_lines(self, lines): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.add_lines(lines) assert_lines1_data(covdata) @pytest.mark.parametrize("arcs", [ARCS_3, dicts_from_sets(ARCS_3)]) def test_adding_arcs(self, arcs): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.add_arcs(arcs) assert_arcs3_data(covdata) def test_ok_to_add_lines_twice(self): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.add_lines(LINES_1) covdata.add_lines(LINES_2) assert_line_counts(covdata, SUMMARY_1_2) assert_measured_files(covdata, MEASURED_FILES_1_2) def test_ok_to_add_arcs_twice(self): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.add_arcs(ARCS_3) covdata.add_arcs(ARCS_4) assert_line_counts(covdata, SUMMARY_3_4) assert_measured_files(covdata, MEASURED_FILES_3_4) def test_cant_add_arcs_with_lines(self): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.add_lines(LINES_1) msg = "Can't add branch measurements to existing line data" with pytest.raises(CoverageException, match=msg): covdata.add_arcs(ARCS_3) def test_cant_add_lines_with_arcs(self): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.add_arcs(ARCS_3) msg = "Can't add line measurements to existing branch data" with pytest.raises(CoverageException, match=msg): covdata.add_lines(LINES_1) def test_touch_file_with_lines(self): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.add_lines(LINES_1) covdata.touch_file('zzz.py') assert_measured_files(covdata, MEASURED_FILES_1 + ['zzz.py']) def test_touch_file_with_arcs(self): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.add_arcs(ARCS_3) covdata.touch_file('zzz.py') assert_measured_files(covdata, MEASURED_FILES_3 + ['zzz.py']) def test_set_query_contexts(self): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.set_context('test_a') covdata.add_lines(LINES_1) covdata.set_query_contexts(['te.*a']) @@ -183,14 +196,14 @@ def test_set_query_contexts(self): assert covdata.lines('a.py') == [] def test_no_lines_vs_unmeasured_file(self): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.add_lines(LINES_1) covdata.touch_file('zzz.py') assert covdata.lines('zzz.py') == [] assert covdata.lines('no_such_file.py') is None def test_lines_with_contexts(self): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.set_context('test_a') covdata.add_lines(LINES_1) assert covdata.lines('a.py') == [1, 2] @@ -200,7 +213,7 @@ def test_lines_with_contexts(self): assert covdata.lines('a.py') == [] def test_contexts_by_lineno_with_lines(self): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.set_context('test_a') covdata.add_lines(LINES_1) expected = {1: ['test_a'], 2: ['test_a']} @@ -208,7 +221,7 @@ def test_contexts_by_lineno_with_lines(self): @pytest.mark.parametrize("lines", [LINES_1, dicts_from_sets(LINES_1)]) def test_no_duplicate_lines(self, lines): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.set_context("context1") covdata.add_lines(lines) covdata.set_context("context2") @@ -217,7 +230,7 @@ def test_no_duplicate_lines(self, lines): @pytest.mark.parametrize("arcs", [ARCS_3, dicts_from_sets(ARCS_3)]) def test_no_duplicate_arcs(self, arcs): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.set_context("context1") covdata.add_arcs(arcs) covdata.set_context("context2") @@ -225,7 +238,7 @@ def test_no_duplicate_arcs(self, arcs): assert covdata.arcs('x.py') == X_PY_ARCS_3 def test_no_arcs_vs_unmeasured_file(self): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.add_arcs(ARCS_3) covdata.touch_file('zzz.py') assert covdata.lines('zzz.py') == [] @@ -234,7 +247,7 @@ def test_no_arcs_vs_unmeasured_file(self): assert covdata.arcs('no_such_file.py') is None def test_arcs_with_contexts(self): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.set_context('test_x') covdata.add_arcs(ARCS_3) assert covdata.arcs('x.py') == [(-1, 1), (1, 2), (2, 3), (3, -1)] @@ -244,20 +257,20 @@ def test_arcs_with_contexts(self): assert covdata.arcs('x.py') == [] def test_contexts_by_lineno_with_arcs(self): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.set_context('test_x') covdata.add_arcs(ARCS_3) expected = {1: ['test_x'], 2: ['test_x'], 3: ['test_x']} assert covdata.contexts_by_lineno('x.py') == expected def test_contexts_by_lineno_with_unknown_file(self): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.set_context('test_x') covdata.add_arcs(ARCS_3) assert covdata.contexts_by_lineno('xyz.py') == {} def test_context_by_lineno_with_query_contexts_with_lines(self): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.set_context("test_1") covdata.add_lines(LINES_1) covdata.set_context("test_2") @@ -266,7 +279,7 @@ def test_context_by_lineno_with_query_contexts_with_lines(self): assert covdata.contexts_by_lineno("a.py") == dict.fromkeys([1,2], ["test_1"]) def test_context_by_lineno_with_query_contexts_with_arcs(self): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.set_context("test_1") covdata.add_arcs(ARCS_3) covdata.set_context("test_2") @@ -275,7 +288,7 @@ def test_context_by_lineno_with_query_contexts_with_arcs(self): assert covdata.contexts_by_lineno("x.py") == dict.fromkeys([1,2,3], ["test_1"]) def test_file_tracer_name(self): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.add_lines({ "p1.foo": dict.fromkeys([1, 2, 3]), "p2.html": dict.fromkeys([10, 11, 12]), @@ -287,7 +300,7 @@ def test_file_tracer_name(self): assert covdata.file_tracer("p3.not_here") is None def test_cant_file_tracer_unmeasured_files(self): - covdata = CoverageData() + covdata = DebugCoverageData() msg = "Can't add file tracer data for unmeasured file 'p1.foo'" with pytest.raises(CoverageException, match=msg): covdata.add_file_tracers({"p1.foo": "p1.plugin"}) @@ -297,7 +310,7 @@ def test_cant_file_tracer_unmeasured_files(self): covdata.add_file_tracers({"p1.foo": "p1.plugin"}) def test_cant_change_file_tracer_name(self): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.add_lines({"p1.foo": dict.fromkeys([1, 2, 3])}) covdata.add_file_tracers({"p1.foo": "p1.plugin"}) @@ -306,13 +319,13 @@ def test_cant_change_file_tracer_name(self): covdata.add_file_tracers({"p1.foo": "p1.plugin.foo"}) def test_update_lines(self): - covdata1 = CoverageData(suffix='1') + covdata1 = DebugCoverageData(suffix='1') covdata1.add_lines(LINES_1) - covdata2 = CoverageData(suffix='2') + covdata2 = DebugCoverageData(suffix='2') covdata2.add_lines(LINES_2) - covdata3 = CoverageData(suffix='3') + covdata3 = DebugCoverageData(suffix='3') covdata3.update(covdata1) covdata3.update(covdata2) @@ -320,13 +333,13 @@ def test_update_lines(self): assert_measured_files(covdata3, MEASURED_FILES_1_2) def test_update_arcs(self): - covdata1 = CoverageData(suffix='1') + covdata1 = DebugCoverageData(suffix='1') covdata1.add_arcs(ARCS_3) - covdata2 = CoverageData(suffix='2') + covdata2 = DebugCoverageData(suffix='2') covdata2.add_arcs(ARCS_4) - covdata3 = CoverageData(suffix='3') + covdata3 = DebugCoverageData(suffix='3') covdata3.update(covdata1) covdata3.update(covdata2) @@ -334,10 +347,10 @@ def test_update_arcs(self): assert_measured_files(covdata3, MEASURED_FILES_3_4) def test_update_cant_mix_lines_and_arcs(self): - covdata1 = CoverageData(suffix='1') + covdata1 = DebugCoverageData(suffix='1') covdata1.add_lines(LINES_1) - covdata2 = CoverageData(suffix='2') + covdata2 = DebugCoverageData(suffix='2') covdata2.add_arcs(ARCS_3) with pytest.raises(CoverageException, match="Can't combine arc data with line data"): @@ -347,7 +360,7 @@ def test_update_cant_mix_lines_and_arcs(self): covdata2.update(covdata1) def test_update_file_tracers(self): - covdata1 = CoverageData(suffix='1') + covdata1 = DebugCoverageData(suffix='1') covdata1.add_lines({ "p1.html": dict.fromkeys([1, 2, 3, 4]), "p2.html": dict.fromkeys([5, 6, 7]), @@ -358,7 +371,7 @@ def test_update_file_tracers(self): "p2.html": "html.plugin2", }) - covdata2 = CoverageData(suffix='2') + covdata2 = DebugCoverageData(suffix='2') covdata2.add_lines({ "p1.html": dict.fromkeys([3, 4, 5, 6]), "p2.html": dict.fromkeys([7, 8, 9]), @@ -371,7 +384,7 @@ def test_update_file_tracers(self): "p3.foo": "foo_plugin", }) - covdata3 = CoverageData(suffix='3') + covdata3 = DebugCoverageData(suffix='3') covdata3.update(covdata1) covdata3.update(covdata2) assert covdata3.file_tracer("p1.html") == "html.plugin" @@ -380,11 +393,11 @@ def test_update_file_tracers(self): assert covdata3.file_tracer("main.py") == "" def test_update_conflicting_file_tracers(self): - covdata1 = CoverageData(suffix='1') + covdata1 = DebugCoverageData(suffix='1') covdata1.add_lines({"p1.html": dict.fromkeys([1, 2, 3])}) covdata1.add_file_tracers({"p1.html": "html.plugin"}) - covdata2 = CoverageData(suffix='2') + covdata2 = DebugCoverageData(suffix='2') covdata2.add_lines({"p1.html": dict.fromkeys([1, 2, 3])}) covdata2.add_file_tracers({"p1.html": "html.other_plugin"}) @@ -397,11 +410,11 @@ def test_update_conflicting_file_tracers(self): covdata2.update(covdata1) def test_update_file_tracer_vs_no_file_tracer(self): - covdata1 = CoverageData(suffix="1") + covdata1 = DebugCoverageData(suffix="1") covdata1.add_lines({"p1.html": dict.fromkeys([1, 2, 3])}) covdata1.add_file_tracers({"p1.html": "html.plugin"}) - covdata2 = CoverageData(suffix="2") + covdata2 = DebugCoverageData(suffix="2") covdata2.add_lines({"p1.html": dict.fromkeys([1, 2, 3])}) msg = "Conflicting file tracer name for 'p1.html': u?'html.plugin' vs u?''" @@ -413,30 +426,30 @@ def test_update_file_tracer_vs_no_file_tracer(self): covdata2.update(covdata1) def test_update_lines_empty(self): - covdata1 = CoverageData(suffix='1') + covdata1 = DebugCoverageData(suffix='1') covdata1.add_lines(LINES_1) - covdata2 = CoverageData(suffix='2') + covdata2 = DebugCoverageData(suffix='2') covdata1.update(covdata2) assert_line_counts(covdata1, SUMMARY_1) def test_update_arcs_empty(self): - covdata1 = CoverageData(suffix='1') + covdata1 = DebugCoverageData(suffix='1') covdata1.add_arcs(ARCS_3) - covdata2 = CoverageData(suffix='2') + covdata2 = DebugCoverageData(suffix='2') covdata1.update(covdata2) assert_line_counts(covdata1, SUMMARY_3) def test_asking_isnt_measuring(self): # Asking about an unmeasured file shouldn't make it seem measured. - covdata = CoverageData() + covdata = DebugCoverageData() assert_measured_files(covdata, []) assert covdata.arcs("missing.py") is None assert_measured_files(covdata, []) def test_add_to_hash_with_lines(self): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.add_lines(LINES_1) hasher = mock.Mock() add_data_to_hash(covdata, "a.py", hasher) @@ -446,7 +459,7 @@ def test_add_to_hash_with_lines(self): ] def test_add_to_hash_with_arcs(self): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.add_arcs(ARCS_3) covdata.add_file_tracers({"y.py": "hologram_plugin"}) hasher = mock.Mock() @@ -458,7 +471,7 @@ def test_add_to_hash_with_arcs(self): def test_add_to_lines_hash_with_missing_file(self): # https://github.com/nedbat/coveragepy/issues/403 - covdata = CoverageData() + covdata = DebugCoverageData() covdata.add_lines(LINES_1) hasher = mock.Mock() add_data_to_hash(covdata, "missing.py", hasher) @@ -469,7 +482,7 @@ def test_add_to_lines_hash_with_missing_file(self): def test_add_to_arcs_hash_with_missing_file(self): # https://github.com/nedbat/coveragepy/issues/403 - covdata = CoverageData() + covdata = DebugCoverageData() covdata.add_arcs(ARCS_3) covdata.add_file_tracers({"y.py": "hologram_plugin"}) hasher = mock.Mock() @@ -480,34 +493,34 @@ def test_add_to_arcs_hash_with_missing_file(self): ] def test_empty_lines_are_still_lines(self): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.add_lines({}) covdata.touch_file("abc.py") assert not covdata.has_arcs() def test_empty_arcs_are_still_arcs(self): - covdata = CoverageData() + covdata = DebugCoverageData() covdata.add_arcs({}) covdata.touch_file("abc.py") assert covdata.has_arcs() def test_cant_touch_in_empty_data(self): - covdata = CoverageData() + covdata = DebugCoverageData() msg = "Can't touch files in an empty CoverageData" with pytest.raises(CoverageException, match=msg): covdata.touch_file("abc.py") def test_read_and_write_are_opposites(self): - covdata1 = CoverageData() + covdata1 = DebugCoverageData() covdata1.add_arcs(ARCS_3) covdata1.write() - covdata2 = CoverageData() + covdata2 = DebugCoverageData() covdata2.read() assert_arcs3_data(covdata2) def test_thread_stress(self): - covdata = CoverageData() + covdata = DebugCoverageData() exceptions = [] def thread_main(): @@ -531,20 +544,20 @@ class CoverageDataInTempDirTest(CoverageTest): """Tests of CoverageData that need a temporary directory to make files.""" def test_read_write_lines(self): - covdata1 = CoverageData("lines.dat") + covdata1 = DebugCoverageData("lines.dat") covdata1.add_lines(LINES_1) covdata1.write() - covdata2 = CoverageData("lines.dat") + covdata2 = DebugCoverageData("lines.dat") covdata2.read() assert_lines1_data(covdata2) def test_read_write_arcs(self): - covdata1 = CoverageData("arcs.dat") + covdata1 = DebugCoverageData("arcs.dat") covdata1.add_arcs(ARCS_3) covdata1.write() - covdata2 = CoverageData("arcs.dat") + covdata2 = DebugCoverageData("arcs.dat") covdata2.read() assert_arcs3_data(covdata2) @@ -553,13 +566,13 @@ def test_read_errors(self): self.make_file("xyzzy.dat", "xyzzy") with pytest.raises(CoverageException, match=msg.format("xyzzy.dat")): - covdata = CoverageData("xyzzy.dat") + covdata = DebugCoverageData("xyzzy.dat") covdata.read() assert not covdata self.make_file("empty.dat", "") with pytest.raises(CoverageException, match=msg.format("empty.dat")): - covdata = CoverageData("empty.dat") + covdata = DebugCoverageData("empty.dat") covdata.read() assert not covdata @@ -569,7 +582,7 @@ def test_read_sql_errors(self): con.execute("insert into coverage_schema (version) values (99)") msg = r"Couldn't .* '.*[/\\]{}': wrong schema: 99 instead of \d+".format("wrong_schema.db") with pytest.raises(CoverageException, match=msg): - covdata = CoverageData("wrong_schema.db") + covdata = DebugCoverageData("wrong_schema.db") covdata.read() assert not covdata @@ -577,7 +590,7 @@ def test_read_sql_errors(self): con.execute("create table foobar (baz text)") msg = r"Couldn't .* '.*[/\\]{}': \S+".format("no_schema.db") with pytest.raises(CoverageException, match=msg): - covdata = CoverageData("no_schema.db") + covdata = DebugCoverageData("no_schema.db") covdata.read() assert not covdata @@ -587,16 +600,16 @@ class CoverageDataFilesTest(CoverageTest): def test_reading_missing(self): self.assert_doesnt_exist(".coverage") - covdata = CoverageData() + covdata = DebugCoverageData() covdata.read() assert_line_counts(covdata, {}) def test_writing_and_reading(self): - covdata1 = CoverageData() + covdata1 = DebugCoverageData() covdata1.add_lines(LINES_1) covdata1.write() - covdata2 = CoverageData() + covdata2 = DebugCoverageData() covdata2.read() assert_line_counts(covdata2, SUMMARY_1) @@ -635,7 +648,7 @@ def test_debug_output_without_debug_option(self): def test_explicit_suffix(self): self.assert_doesnt_exist(".coverage.SUFFIX") - covdata = CoverageData(suffix='SUFFIX') + covdata = DebugCoverageData(suffix='SUFFIX') covdata.add_lines(LINES_1) covdata.write() self.assert_exists(".coverage.SUFFIX") @@ -645,7 +658,7 @@ def test_true_suffix(self): self.assert_file_count(".coverage.*", 0) # suffix=True will make a randomly named data file. - covdata1 = CoverageData(suffix=True) + covdata1 = DebugCoverageData(suffix=True) covdata1.add_lines(LINES_1) covdata1.write() self.assert_doesnt_exist(".coverage") @@ -653,7 +666,7 @@ def test_true_suffix(self): assert len(data_files1) == 1 # Another suffix=True will choose a different name. - covdata2 = CoverageData(suffix=True) + covdata2 = DebugCoverageData(suffix=True) covdata2.add_lines(LINES_1) covdata2.write() self.assert_doesnt_exist(".coverage") @@ -666,33 +679,33 @@ def test_true_suffix(self): def test_combining(self): self.assert_file_count(".coverage.*", 0) - covdata1 = CoverageData(suffix='1') + covdata1 = DebugCoverageData(suffix='1') covdata1.add_lines(LINES_1) covdata1.write() self.assert_exists(".coverage.1") self.assert_file_count(".coverage.*", 1) - covdata2 = CoverageData(suffix='2') + covdata2 = DebugCoverageData(suffix='2') covdata2.add_lines(LINES_2) covdata2.write() self.assert_exists(".coverage.2") self.assert_file_count(".coverage.*", 2) - covdata3 = CoverageData() + covdata3 = DebugCoverageData() combine_parallel_data(covdata3) assert_line_counts(covdata3, SUMMARY_1_2) assert_measured_files(covdata3, MEASURED_FILES_1_2) self.assert_file_count(".coverage.*", 0) def test_erasing(self): - covdata1 = CoverageData() + covdata1 = DebugCoverageData() covdata1.add_lines(LINES_1) covdata1.write() covdata1.erase() assert_line_counts(covdata1, {}) - covdata2 = CoverageData() + covdata2 = DebugCoverageData() covdata2.read() assert_line_counts(covdata2, {}) @@ -700,13 +713,13 @@ def test_erasing_parallel(self): self.make_file("datafile.1") self.make_file("datafile.2") self.make_file(".coverage") - data = CoverageData("datafile") + data = DebugCoverageData("datafile") data.erase(parallel=True) self.assert_file_count("datafile.*", 0) self.assert_exists(".coverage") def test_combining_with_aliases(self): - covdata1 = CoverageData(suffix='1') + covdata1 = DebugCoverageData(suffix='1') covdata1.add_lines({ '/home/ned/proj/src/a.py': {1: None, 2: None}, '/home/ned/proj/src/sub/b.py': {3: None}, @@ -717,7 +730,7 @@ def test_combining_with_aliases(self): }) covdata1.write() - covdata2 = CoverageData(suffix='2') + covdata2 = DebugCoverageData(suffix='2') covdata2.add_lines({ r'c:\ned\test\a.py': {4: None, 5: None}, r'c:\ned\test\sub\b.py': {3: None, 6: None}, @@ -726,7 +739,7 @@ def test_combining_with_aliases(self): self.assert_file_count(".coverage.*", 2) - covdata3 = CoverageData() + covdata3 = DebugCoverageData() aliases = PathAliases() aliases.add("/home/ned/proj/src/", "./") aliases.add(r"c:\ned\test", "./") @@ -745,21 +758,21 @@ def test_combining_with_aliases(self): def test_combining_from_different_directories(self): os.makedirs('cov1') - covdata1 = CoverageData('cov1/.coverage.1') + covdata1 = DebugCoverageData('cov1/.coverage.1') covdata1.add_lines(LINES_1) covdata1.write() os.makedirs('cov2') - covdata2 = CoverageData('cov2/.coverage.2') + covdata2 = DebugCoverageData('cov2/.coverage.2') covdata2.add_lines(LINES_2) covdata2.write() # This data won't be included. - covdata_xxx = CoverageData('.coverage.xxx') + covdata_xxx = DebugCoverageData('.coverage.xxx') covdata_xxx.add_arcs(ARCS_3) covdata_xxx.write() - covdata3 = CoverageData() + covdata3 = DebugCoverageData() combine_parallel_data(covdata3, data_paths=['cov1', 'cov2']) assert_line_counts(covdata3, SUMMARY_1_2) @@ -770,25 +783,25 @@ def test_combining_from_different_directories(self): def test_combining_from_files(self): os.makedirs('cov1') - covdata1 = CoverageData('cov1/.coverage.1') + covdata1 = DebugCoverageData('cov1/.coverage.1') covdata1.add_lines(LINES_1) covdata1.write() os.makedirs('cov2') - covdata2 = CoverageData('cov2/.coverage.2') + covdata2 = DebugCoverageData('cov2/.coverage.2') covdata2.add_lines(LINES_2) covdata2.write() # This data won't be included. - covdata_xxx = CoverageData('.coverage.xxx') + covdata_xxx = DebugCoverageData('.coverage.xxx') covdata_xxx.add_arcs(ARCS_3) covdata_xxx.write() - covdata_2xxx = CoverageData('cov2/.coverage.xxx') + covdata_2xxx = DebugCoverageData('cov2/.coverage.xxx') covdata_2xxx.add_arcs(ARCS_3) covdata_2xxx.write() - covdata3 = CoverageData() + covdata3 = DebugCoverageData() combine_parallel_data(covdata3, data_paths=['cov1', 'cov2/.coverage.2']) assert_line_counts(covdata3, SUMMARY_1_2) @@ -799,15 +812,15 @@ def test_combining_from_files(self): self.assert_exists("cov2/.coverage.xxx") def test_combining_from_nonexistent_directories(self): - covdata = CoverageData() + covdata = DebugCoverageData() msg = "Couldn't combine from non-existent path 'xyzzy'" with pytest.raises(CoverageException, match=msg): combine_parallel_data(covdata, data_paths=['xyzzy']) def test_interleaved_erasing_bug716(self): # pytest-cov could produce this scenario. #716 - covdata1 = CoverageData() - covdata2 = CoverageData() + covdata1 = DebugCoverageData() + covdata2 = DebugCoverageData() # this used to create the .coverage database file.. covdata2.set_context("") # then this would erase it all.. @@ -822,13 +835,14 @@ class DumpsLoadsTest(CoverageTest): run_in_temp_dir = False - def test_serialization(self): - covdata1 = CoverageData(no_disk=True) + @pytest.mark.parametrize("klass", [CoverageData, DebugCoverageData]) + def test_serialization(self, klass): + covdata1 = klass(no_disk=True) covdata1.add_lines(LINES_1) covdata1.add_lines(LINES_2) serial = covdata1.dumps() - covdata2 = CoverageData(no_disk=True) + covdata2 = klass(no_disk=True) covdata2.loads(serial) assert_line_counts(covdata2, SUMMARY_1_2) assert_measured_files(covdata2, MEASURED_FILES_1_2) From e54b7576a35b3dd4642788cff557a2ccebf7582b Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Sun, 10 Oct 2021 11:58:26 -0400 Subject: [PATCH 27/41] refactor: no need for maybe-u prefixes in test regexes That was for Python 2, which we don't support anymore. --- tests/test_config.py | 6 +++--- tests/test_data.py | 10 +++++----- tests/test_html.py | 2 +- tests/test_process.py | 4 ++-- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/test_config.py b/tests/test_config.py index aec18bf2e..c2874188c 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -190,14 +190,14 @@ def test_parse_errors(self): ("[tool.coverage.run]\ntimid = \"maybe?\"\n", r"maybe[?]"), ("[tool.coverage.run\n", None), ('[tool.coverage.report]\nexclude_lines = ["foo("]\n', - r"Invalid \[tool.coverage.report\].exclude_lines value u?'foo\(': " + + r"Invalid \[tool.coverage.report\].exclude_lines value 'foo\(': " + r"(unbalanced parenthesis|missing \))"), ('[tool.coverage.report]\npartial_branches = ["foo["]\n', - r"Invalid \[tool.coverage.report\].partial_branches value u?'foo\[': " + + r"Invalid \[tool.coverage.report\].partial_branches value 'foo\[': " + r"(unexpected end of regular expression|unterminated character set)"), ('[tool.coverage.report]\npartial_branches_always = ["foo***"]\n', r"Invalid \[tool.coverage.report\].partial_branches_always value " + - r"u?'foo\*\*\*': " + + r"'foo\*\*\*': " + r"multiple repeat"), ('[tool.coverage.run]\nconcurrency="foo"', "not a list"), ("[tool.coverage.report]\nprecision=1.23", "not an integer"), diff --git a/tests/test_data.py b/tests/test_data.py index 86b3870e6..a1689ad62 100644 --- a/tests/test_data.py +++ b/tests/test_data.py @@ -314,7 +314,7 @@ def test_cant_change_file_tracer_name(self): covdata.add_lines({"p1.foo": dict.fromkeys([1, 2, 3])}) covdata.add_file_tracers({"p1.foo": "p1.plugin"}) - msg = "Conflicting file tracer name for 'p1.foo': u?'p1.plugin' vs u?'p1.plugin.foo'" + msg = "Conflicting file tracer name for 'p1.foo': 'p1.plugin' vs 'p1.plugin.foo'" with pytest.raises(CoverageException, match=msg): covdata.add_file_tracers({"p1.foo": "p1.plugin.foo"}) @@ -401,11 +401,11 @@ def test_update_conflicting_file_tracers(self): covdata2.add_lines({"p1.html": dict.fromkeys([1, 2, 3])}) covdata2.add_file_tracers({"p1.html": "html.other_plugin"}) - msg = "Conflicting file tracer name for 'p1.html': u?'html.plugin' vs u?'html.other_plugin'" + msg = "Conflicting file tracer name for 'p1.html': 'html.plugin' vs 'html.other_plugin'" with pytest.raises(CoverageException, match=msg): covdata1.update(covdata2) - msg = "Conflicting file tracer name for 'p1.html': u?'html.other_plugin' vs u?'html.plugin'" + msg = "Conflicting file tracer name for 'p1.html': 'html.other_plugin' vs 'html.plugin'" with pytest.raises(CoverageException, match=msg): covdata2.update(covdata1) @@ -417,11 +417,11 @@ def test_update_file_tracer_vs_no_file_tracer(self): covdata2 = DebugCoverageData(suffix="2") covdata2.add_lines({"p1.html": dict.fromkeys([1, 2, 3])}) - msg = "Conflicting file tracer name for 'p1.html': u?'html.plugin' vs u?''" + msg = "Conflicting file tracer name for 'p1.html': 'html.plugin' vs ''" with pytest.raises(CoverageException, match=msg): covdata1.update(covdata2) - msg = "Conflicting file tracer name for 'p1.html': u?'' vs u?'html.plugin'" + msg = "Conflicting file tracer name for 'p1.html': '' vs 'html.plugin'" with pytest.raises(CoverageException, match=msg): covdata2.update(covdata1) diff --git a/tests/test_html.py b/tests/test_html.py index f5908d7b8..7368cdb1a 100644 --- a/tests/test_html.py +++ b/tests/test_html.py @@ -607,7 +607,7 @@ def test_cant_find_static_files(self): self.make_file("main.py", "print(17)") cov = coverage.Coverage() self.start_import_stop(cov, "main") - msg = "Couldn't find static file u?'.*'" + msg = "Couldn't find static file '.*'" with pytest.raises(CoverageException, match=msg): cov.html_report() diff --git a/tests/test_process.py b/tests/test_process.py index 1adb6cff9..6c5dd76bf 100644 --- a/tests/test_process.py +++ b/tests/test_process.py @@ -1788,11 +1788,11 @@ def test_us_in_venv_isnt_measured(self, coverage_command): print(re_lines(debug_out, "myproduct")) assert re_lines( debug_out, - r"^Not tracing .*\bmyproduct.py': module u?'myproduct' falls outside the --source spec" + r"^Not tracing .*\bmyproduct.py': module 'myproduct' falls outside the --source spec" ) assert re_lines( debug_out, - r"^Not tracing .*\bcolorsys.py': module u?'colorsys' falls outside the --source spec" + r"^Not tracing .*\bcolorsys.py': module 'colorsys' falls outside the --source spec" ) out = run_in_venv("python -m coverage report") From 6c5ef77a267e244f4e50a161bd6bcf09831de943 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Sun, 10 Oct 2021 12:06:19 -0400 Subject: [PATCH 28/41] test: test two add_file_tracer possibilities --- tests/test_data.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/tests/test_data.py b/tests/test_data.py index a1689ad62..3adba5f09 100644 --- a/tests/test_data.py +++ b/tests/test_data.py @@ -296,9 +296,31 @@ def test_file_tracer_name(self): }) covdata.add_file_tracers({"p1.foo": "p1.plugin", "p2.html": "p2.plugin"}) assert covdata.file_tracer("p1.foo") == "p1.plugin" + assert covdata.file_tracer("p2.html") == "p2.plugin" assert covdata.file_tracer("main.py") == "" assert covdata.file_tracer("p3.not_here") is None + def test_ok_to_repeat_file_tracer(self): + covdata = DebugCoverageData() + covdata.add_lines({ + "p1.foo": dict.fromkeys([1, 2, 3]), + "p2.html": dict.fromkeys([10, 11, 12]), + }) + covdata.add_file_tracers({"p1.foo": "p1.plugin", "p2.html": "p2.plugin"}) + covdata.add_file_tracers({"p1.foo": "p1.plugin"}) + assert covdata.file_tracer("p1.foo") == "p1.plugin" + + def test_ok_to_set_empty_file_tracer(self): + covdata = DebugCoverageData() + covdata.add_lines({ + "p1.foo": dict.fromkeys([1, 2, 3]), + "p2.html": dict.fromkeys([10, 11, 12]), + "main.py": dict.fromkeys([20]), + }) + covdata.add_file_tracers({"p1.foo": "p1.plugin", "main.py": ""}) + assert covdata.file_tracer("p1.foo") == "p1.plugin" + assert covdata.file_tracer("main.py") == "" + def test_cant_file_tracer_unmeasured_files(self): covdata = DebugCoverageData() msg = "Can't add file tracer data for unmeasured file 'p1.foo'" From 4144f9595595f0aff458c3da5c70ff6fe8de570e Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Sun, 10 Oct 2021 12:16:02 -0400 Subject: [PATCH 29/41] refactor: simplify some strange string formatting --- tests/test_data.py | 6 +++--- tests/test_process.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/test_data.py b/tests/test_data.py index 3adba5f09..0660591f2 100644 --- a/tests/test_data.py +++ b/tests/test_data.py @@ -584,7 +584,7 @@ def test_read_write_arcs(self): assert_arcs3_data(covdata2) def test_read_errors(self): - msg = r"Couldn't .* '.*[/\\]{0}': \S+" + msg = r"Couldn't .* '.*[/\\]{}': \S+" self.make_file("xyzzy.dat", "xyzzy") with pytest.raises(CoverageException, match=msg.format("xyzzy.dat")): @@ -602,7 +602,7 @@ def test_read_sql_errors(self): with sqlite3.connect("wrong_schema.db") as con: con.execute("create table coverage_schema (version integer)") con.execute("insert into coverage_schema (version) values (99)") - msg = r"Couldn't .* '.*[/\\]{}': wrong schema: 99 instead of \d+".format("wrong_schema.db") + msg = r"Couldn't .* '.*[/\\]wrong_schema.db': wrong schema: 99 instead of \d+" with pytest.raises(CoverageException, match=msg): covdata = DebugCoverageData("wrong_schema.db") covdata.read() @@ -610,7 +610,7 @@ def test_read_sql_errors(self): with sqlite3.connect("no_schema.db") as con: con.execute("create table foobar (baz text)") - msg = r"Couldn't .* '.*[/\\]{}': \S+".format("no_schema.db") + msg = r"Couldn't .* '.*[/\\]no_schema.db': \S+" with pytest.raises(CoverageException, match=msg): covdata = DebugCoverageData("no_schema.db") covdata.read() diff --git a/tests/test_process.py b/tests/test_process.py index 6c5dd76bf..d5c322fc2 100644 --- a/tests/test_process.py +++ b/tests/test_process.py @@ -1654,9 +1654,9 @@ def run_in_venv(cmd): """ words = cmd.split() if env.WINDOWS: - words[0] = r"{}\Scripts\{}.exe".format("venv", words[0]) + words[0] = fr"venv\Scripts\{words[0]}.exe" else: - words[0] = "{}/bin/{}".format("venv", words[0]) + words[0] = fr"venv/bin/{words[0]}" status, output = run_command(" ".join(words)) assert status == 0 return output From 982eca893a895db262ae56650bd05d9f7ab6a076 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Sun, 10 Oct 2021 12:52:10 -0400 Subject: [PATCH 30/41] fix: raise CoverageException for SQLite connection errors --- coverage/sqldata.py | 6 +++++- tests/test_data.py | 8 ++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/coverage/sqldata.py b/coverage/sqldata.py index c4e950d34..2411fbca5 100644 --- a/coverage/sqldata.py +++ b/coverage/sqldata.py @@ -1032,7 +1032,11 @@ def _connect(self): # is not a problem. if self.debug: self.debug.write(f"Connecting to {self.filename!r}") - self.con = sqlite3.connect(self.filename, check_same_thread=False) + try: + self.con = sqlite3.connect(self.filename, check_same_thread=False) + except sqlite3.Error as exc: + raise CoverageException(f"Couldn't use data file {self.filename!r}: {exc}") from exc + self.con.create_function("REGEXP", 2, _regexp) # This pragma makes writing faster. It disables rollbacks, but we never need them. diff --git a/tests/test_data.py b/tests/test_data.py index 0660591f2..735a12b38 100644 --- a/tests/test_data.py +++ b/tests/test_data.py @@ -598,6 +598,14 @@ def test_read_errors(self): covdata.read() assert not covdata + def test_hard_read_error(self): + self.make_file("noperms.dat", "go away") + os.chmod("noperms.dat", 0) + msg = r"Couldn't .* '.*[/\\]{}': \S+" + with pytest.raises(CoverageException, match=msg.format("noperms.dat")): + covdata = DebugCoverageData("noperms.dat") + covdata.read() + def test_read_sql_errors(self): with sqlite3.connect("wrong_schema.db") as con: con.execute("create table coverage_schema (version integer)") From 6cc672fe011b8df3521abe3d48f52605574deacc Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Sun, 10 Oct 2021 13:49:18 -0400 Subject: [PATCH 31/41] test: cover the last edge cases in sqldata.py --- coverage/sqldata.py | 8 ++++---- tests/test_data.py | 14 ++++++++++++-- 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/coverage/sqldata.py b/coverage/sqldata.py index 2411fbca5..a7df987fb 100644 --- a/coverage/sqldata.py +++ b/coverage/sqldata.py @@ -1068,7 +1068,7 @@ def __exit__(self, exc_type, exc_value, traceback): except Exception as exc: if self.debug: self.debug.write(f"EXCEPTION from __exit__: {exc}") - raise + raise CoverageException(f"Couldn't end data file {self.filename!r}: {exc}") from exc def execute(self, sql, parameters=()): """Same as :meth:`python:sqlite3.Connection.execute`.""" @@ -1095,7 +1095,7 @@ def execute(self, sql, parameters=()): "Looks like a coverage 4.x data file. " + "Are you mixing versions of coverage?" ) - except Exception: + except Exception: # pragma: cant happen pass if self.debug: self.debug.write(f"EXCEPTION from execute: {msg}") @@ -1116,7 +1116,7 @@ def execute_one(self, sql, parameters=()): elif len(rows) == 1: return rows[0] else: - raise CoverageException(f"Sql {sql!r} shouldn't return {len(rows)} rows") + raise AssertionError(f"SQL {sql!r} shouldn't return {len(rows)} rows") def executemany(self, sql, data): """Same as :meth:`python:sqlite3.Connection.executemany`.""" @@ -1125,7 +1125,7 @@ def executemany(self, sql, data): self.debug.write(f"Executing many {sql!r} with {len(data)} rows") try: return self.con.executemany(sql, data) - except Exception: + except Exception: # pragma: cant happen # In some cases, an error might happen that isn't really an # error. Try again immediately. # https://github.com/nedbat/coveragepy/issues/1010 diff --git a/tests/test_data.py b/tests/test_data.py index 735a12b38..a2737a9bf 100644 --- a/tests/test_data.py +++ b/tests/test_data.py @@ -601,11 +601,21 @@ def test_read_errors(self): def test_hard_read_error(self): self.make_file("noperms.dat", "go away") os.chmod("noperms.dat", 0) - msg = r"Couldn't .* '.*[/\\]{}': \S+" - with pytest.raises(CoverageException, match=msg.format("noperms.dat")): + with pytest.raises(CoverageException, match=r"Couldn't .* '.*[/\\]noperms.dat': "): covdata = DebugCoverageData("noperms.dat") covdata.read() + @pytest.mark.parametrize("klass", [CoverageData, DebugCoverageData]) + def test_error_when_closing(self, klass): + msg = r"Couldn't .* '.*[/\\]flaked.dat': \S+" + with pytest.raises(CoverageException, match=msg): + covdata = klass("flaked.dat") + covdata.add_lines(LINES_1) + # I don't know how to make a real error, so let's fake one. + sqldb = list(covdata._dbs.values())[0] + sqldb.close = lambda: 1/0 + covdata.add_lines(LINES_1) + def test_read_sql_errors(self): with sqlite3.connect("wrong_schema.db") as con: con.execute("create table coverage_schema (version integer)") From a05710e863e79bed5f98f01c829b20facce198f9 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Sun, 10 Oct 2021 16:54:35 -0400 Subject: [PATCH 32/41] refactor: remove some left over test prints --- tests/test_process.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/test_process.py b/tests/test_process.py index d5c322fc2..72b47d4ef 100644 --- a/tests/test_process.py +++ b/tests/test_process.py @@ -817,7 +817,6 @@ def foo(): # Remove the file location and source line from the warning. out = re.sub(r"(?m)^[\\/\w.:~_-]+:\d+: CoverageWarning: ", "f:d: CoverageWarning: ", out) out = re.sub(r"(?m)^\s+self.warn.*$\n", "", out) - print("out:", repr(out)) expected = ( "Run 1\n" + "Run 2\n" + @@ -1615,7 +1614,6 @@ def path(basename): data = coverage.CoverageData() data.read() summary = line_counts(data) - print(summary) assert summary[source + '.py'] == 3 assert len(summary) == 1 @@ -1785,7 +1783,6 @@ def test_us_in_venv_isnt_measured(self, coverage_command): r"^Not tracing .*\bexecfile.py': " + "module 'coverage.execfile' falls outside the --source spec" ) - print(re_lines(debug_out, "myproduct")) assert re_lines( debug_out, r"^Not tracing .*\bmyproduct.py': module 'myproduct' falls outside the --source spec" From 27db7b4e9eb4a7f8115af207a21374fdd2e6d8c7 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Sun, 10 Oct 2021 18:43:32 -0400 Subject: [PATCH 33/41] style: the name of the matchers don't need quotes in the reprs --- coverage/files.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/coverage/files.py b/coverage/files.py index c4fb33b3f..686717447 100644 --- a/coverage/files.py +++ b/coverage/files.py @@ -204,7 +204,7 @@ def __init__(self, paths, name): self.name = name def __repr__(self): - return f"" + return f"" def info(self): """A list of strings for displaying when dumping state.""" @@ -231,7 +231,7 @@ def __init__(self, module_names, name): self.name = name def __repr__(self): - return f"" + return f"" def info(self): """A list of strings for displaying when dumping state.""" @@ -261,7 +261,7 @@ def __init__(self, pats, name): self.name = name def __repr__(self): - return f"" + return f"" def info(self): """A list of strings for displaying when dumping state.""" From 9b54389d91c68b27913ded2898f3a03df7e8e90d Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Sun, 10 Oct 2021 20:21:19 -0400 Subject: [PATCH 34/41] fix: make third-party detection work with namespace packages. #1231 --- CHANGES.rst | 7 ++++ coverage/inorout.py | 41 ++++++++++++++++++----- tests/test_process.py | 77 +++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 113 insertions(+), 12 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 7e5f77825..696dd4b0b 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -22,10 +22,17 @@ This list is detailed and covers changes in each pre-release version. Unreleased ---------- +- Namespace packages being measured weren't properly handled by the new code + that ignores third-party packages. If the namespace package was installed, it + was ignored as a third-party package. That problem (`issue 1231`_) is now + fixed. + - The :meth:`.CoverageData.contexts_by_lineno` method was documented to return a dict, but was returning a defaultdict. Now it returns a plain dict. It also no longer returns negative numbered keys. +.. _issue 1231: https://github.com/nedbat/coveragepy/issues/1231 + .. _changes_601: diff --git a/coverage/inorout.py b/coverage/inorout.py index 496ced356..c90e3d594 100644 --- a/coverage/inorout.py +++ b/coverage/inorout.py @@ -107,17 +107,26 @@ def module_has_file(mod): return os.path.exists(mod__file__) -def file_for_module(modulename): - """Find the file for `modulename`, or return None.""" +def file_and_path_for_module(modulename): + """Find the file and search path for `modulename`. + + Returns: + filename: The filename of the module, or None. + path: A list (possibly empty) of directories to find submodules in. + + """ filename = None + path = [] try: spec = importlib.util.find_spec(modulename) except ImportError: pass else: if spec is not None: - filename = spec.origin - return filename + if spec.origin != "namespace": + filename = spec.origin + path = list(spec.submodule_search_locations or ()) + return filename, path def add_stdlib_paths(paths): @@ -263,15 +272,29 @@ def debug(msg): # third-party package. for pkg in self.source_pkgs: try: - modfile = file_for_module(pkg) - debug(f"Imported {pkg} as {modfile}") + modfile, path = file_and_path_for_module(pkg) + debug(f"Imported source package {pkg!r} as {modfile!r}") except CoverageException as exc: - debug(f"Couldn't import {pkg}: {exc}") + debug(f"Couldn't import source package {pkg!r}: {exc}") continue - if modfile and self.third_match.match(modfile): - self.source_in_third = True + if modfile: + if self.third_match.match(modfile): + debug( + f"Source is in third-party because of source_pkg {pkg!r} at {modfile!r}" + ) + self.source_in_third = True + else: + for pathdir in path: + if self.third_match.match(pathdir): + debug( + f"Source is in third-party because of {pkg!r} path directory " + + f"at {pathdir!r}" + ) + self.source_in_third = True + for src in self.source: if self.third_match.match(src): + debug(f"Source is in third-party because of source directory {src!r}") self.source_in_third = True def should_trace(self, filename, frame=None): diff --git a/tests/test_process.py b/tests/test_process.py index 72b47d4ef..781a0170b 100644 --- a/tests/test_process.py +++ b/tests/test_process.py @@ -1691,13 +1691,37 @@ def render(filename, linenum): def fourth(x): return 4 * x """) + # Some namespace packages. + make_file("third_pkg/nspkg/fifth/__init__.py", """\ + def fifth(x): + return 5 * x + """) + # The setup.py to install everything. make_file("third_pkg/setup.py", """\ import setuptools - setuptools.setup(name="third", packages=["third", "fourth"]) + setuptools.setup( + name="third", + packages=["third", "fourth", "nspkg.fifth"], + ) + """) + + # Some namespace packages. + make_file("another_pkg/nspkg/sixth/__init__.py", """\ + def sixth(x): + return 6 * x + """) + # The setup.py to install everything. + make_file("another_pkg/setup.py", """\ + import setuptools + setuptools.setup( + name="another", + packages=["nspkg.sixth"], + ) """) # Install the third-party packages. run_in_venv("python -m pip install --no-index ./third_pkg") + run_in_venv("python -m pip install --no-index -e ./another_pkg") shutil.rmtree("third_pkg") # Install coverage. @@ -1719,6 +1743,8 @@ def coverage_command_fixture(request): class VirtualenvTest(CoverageTest): """Tests of virtualenv considerations.""" + expected_stdout = "33\n110\n198\n1.5\n" + @pytest.fixture(autouse=True) def in_venv_world_fixture(self, venv_world): """For running tests inside venv_world, and cleaning up made files.""" @@ -1726,10 +1752,13 @@ def in_venv_world_fixture(self, venv_world): self.make_file("myproduct.py", """\ import colorsys import third + import nspkg.fifth + import nspkg.sixth print(third.third(11)) + print(nspkg.fifth.fifth(22)) + print(nspkg.sixth.sixth(33)) print(sum(colorsys.rgb_to_hls(1, 0, 0))) """) - self.expected_stdout = "33\n1.5\n" # pylint: disable=attribute-defined-outside-init self.del_environ("COVERAGE_TESTING") # To avoid needing contracts installed. self.set_environ("COVERAGE_DEBUG_FILE", "debug_out.txt") @@ -1738,7 +1767,7 @@ def in_venv_world_fixture(self, venv_world): yield for fname in os.listdir("."): - if fname != "venv": + if fname not in {"venv", "another_pkg"}: os.remove(fname) def get_trace_output(self): @@ -1829,3 +1858,45 @@ def test_venv_with_dynamic_plugin(self, coverage_command): # The output should not have this warning: # Already imported a file that will be measured: ...third/render.py (already-imported) assert out == "HTML: hello.html@1723\n" + + def test_installed_namespace_packages(self, coverage_command): + # https://github.com/nedbat/coveragepy/issues/1231 + # When namespace packages were installed, they were considered + # third-party packages. Test that isn't still happening. + out = run_in_venv(coverage_command + " run --source=nspkg myproduct.py") + # In particular, this warning doesn't appear: + # Already imported a file that will be measured: .../coverage/__main__.py + assert out == self.expected_stdout + + # Check that our tracing was accurate. Files are mentioned because + # --source refers to a file. + debug_out = self.get_trace_output() + assert re_lines( + debug_out, + r"^Not tracing .*\bexecfile.py': " + + "module 'coverage.execfile' falls outside the --source spec" + ) + assert re_lines( + debug_out, + r"^Not tracing .*\bmyproduct.py': module 'myproduct' falls outside the --source spec" + ) + assert re_lines( + debug_out, + r"^Not tracing .*\bcolorsys.py': module 'colorsys' falls outside the --source spec" + ) + + out = run_in_venv("python -m coverage report") + + # Name Stmts Miss Cover + # ------------------------------------------------------------------------------ + # another_pkg/nspkg/sixth/__init__.py 2 0 100% + # venv/lib/python3.9/site-packages/nspkg/fifth/__init__.py 2 0 100% + # ------------------------------------------------------------------------------ + # TOTAL 4 0 100% + + assert "myproduct.py" not in out + assert "third" not in out + assert "coverage" not in out + assert "colorsys" not in out + assert "fifth" in out + assert "sixth" in out From bcff84fb55d03643bf0a182d79a5ac8e809ec457 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Mon, 11 Oct 2021 06:17:02 -0400 Subject: [PATCH 35/41] refactor: remove qualname code that was only for Python 2 --- coverage/context.py | 36 +++++------------------------------- 1 file changed, 5 insertions(+), 31 deletions(-) diff --git a/coverage/context.py b/coverage/context.py index 43d2b1cc7..6bb1f1ee1 100644 --- a/coverage/context.py +++ b/coverage/context.py @@ -55,37 +55,11 @@ def qualname_from_frame(frame): func = frame.f_globals.get(fname) if func is None: return None - return func.__module__ + '.' + fname + return func.__module__ + "." + fname - func = getattr(method, '__func__', None) + func = getattr(method, "__func__", None) if func is None: cls = self.__class__ - return cls.__module__ + '.' + cls.__name__ + "." + fname - - if hasattr(func, '__qualname__'): - qname = func.__module__ + '.' + func.__qualname__ - else: - for cls in getattr(self.__class__, '__mro__', ()): - f = cls.__dict__.get(fname, None) - if f is None: - continue - if f is func: - qname = cls.__module__ + '.' + cls.__name__ + "." + fname - break - else: - # Support for old-style classes. - def mro(bases): - for base in bases: - f = base.__dict__.get(fname, None) - if f is func: - return base.__module__ + '.' + base.__name__ + "." + fname - for base in bases: - qname = mro(base.__bases__) # pylint: disable=cell-var-from-loop - if qname is not None: - return qname - return None - qname = mro([self.__class__]) - if qname is None: - qname = func.__module__ + '.' + fname - - return qname + return cls.__module__ + "." + cls.__name__ + "." + fname + + return func.__module__ + "." + func.__qualname__ From d3f46d2cc5d9fe6c30b31ffe7b268fb7a3addcda Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Mon, 11 Oct 2021 06:39:25 -0400 Subject: [PATCH 36/41] test: add a test of hash-based pyc files --- tests/test_execfile.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tests/test_execfile.py b/tests/test_execfile.py index 5c01f8929..b1306233b 100644 --- a/tests/test_execfile.py +++ b/tests/test_execfile.py @@ -8,6 +8,7 @@ import os import os.path import pathlib +import py_compile import re import pytest @@ -105,7 +106,7 @@ def test_directory_without_main(self): class RunPycFileTest(CoverageTest): """Test cases for `run_python_file`.""" - def make_pyc(self): + def make_pyc(self, **kwargs): """Create a .pyc file, and return the path to it.""" if env.JYTHON: pytest.skip("Can't make .pyc files on Jython") @@ -116,7 +117,7 @@ def doit(): doit() """) - compileall.compile_dir(".", quiet=True) + compileall.compile_dir(".", quiet=True, **kwargs) os.remove("compiled.py") # Find the .pyc file! @@ -149,6 +150,12 @@ def test_running_pyc_from_wrong_python(self): # In some environments, the pycfile persists and pollutes another test. os.remove(pycfile) + @pytest.mark.skipif(not env.PYBEHAVIOR.hashed_pyc_pep552, reason="No hashed .pyc here") + def test_running_hashed_pyc(self): + pycfile = self.make_pyc(invalidation_mode=py_compile.PycInvalidationMode.CHECKED_HASH) + run_python_file([pycfile]) + assert self.stdout() == "I am here!\n" + def test_no_such_pyc_file(self): path = python_reported_file('xyzzy.pyc') msg = re.escape(f"No file to run: '{path}'") From cedd319b6bc76843e570e7e53c4cb98ce359136e Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Mon, 11 Oct 2021 06:43:16 -0400 Subject: [PATCH 37/41] build: clean up the Makefile a bit --- Makefile | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/Makefile b/Makefile index f9f076c47..4a5927531 100644 --- a/Makefile +++ b/Makefile @@ -49,22 +49,19 @@ LINTABLE = coverage tests igor.py setup.py __main__.py lint: ## Run linters and checkers. tox -q -e lint -todo: - -grep -R --include=*.py TODO $(LINTABLE) - -pep8: - pycodestyle --filename=*.py --repeat $(LINTABLE) - test: - tox -q -e py35 $(ARGS) + tox -q -e py39 $(ARGS) PYTEST_SMOKE_ARGS = -n 6 -m "not expensive" --maxfail=3 $(ARGS) smoke: ## Run tests quickly with the C tracer in the lowest supported Python versions. - COVERAGE_NO_PYTRACER=1 tox -q -e py35 -- $(PYTEST_SMOKE_ARGS) + COVERAGE_NO_PYTRACER=1 tox -q -e py39 -- $(PYTEST_SMOKE_ARGS) pysmoke: ## Run tests quickly with the Python tracer in the lowest supported Python versions. - COVERAGE_NO_CTRACER=1 tox -q -e py35 -- $(PYTEST_SMOKE_ARGS) + COVERAGE_NO_CTRACER=1 tox -q -e py39 -- $(PYTEST_SMOKE_ARGS) + +metasmoke: + COVERAGE_NO_PYTRACER=1 ARGS="-e py39" make clean metacov metahtml # Coverage measurement of coverage.py itself (meta-coverage). See metacov.ini # for details. From fdaa8224ccfa16233fda0c84860ef95ca073ee95 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Mon, 11 Oct 2021 07:55:05 -0400 Subject: [PATCH 38/41] test: add more tests of run_python_file The tests in test_process run the exception handling in execfile.py, but only under coverage, so metacov can't see it. These smaller tests exercise the code without coverage on top. --- coverage/execfile.py | 6 +-- tests/test_execfile.py | 87 +++++++++++++++++++++++++++++++++++++++++- tests/test_process.py | 11 ++++-- 3 files changed, 95 insertions(+), 9 deletions(-) diff --git a/coverage/execfile.py b/coverage/execfile.py index 539e368d7..3da3ba1f5 100644 --- a/coverage/execfile.py +++ b/coverage/execfile.py @@ -220,8 +220,7 @@ def run(self): # Call the excepthook. try: - if hasattr(err, "__traceback__"): - err.__traceback__ = err.__traceback__.tb_next + err.__traceback__ = err.__traceback__.tb_next sys.excepthook(typ, err, tb.tb_next) except SystemExit: # pylint: disable=try-except-raise raise @@ -231,8 +230,7 @@ def run(self): sys.stderr.write("Error in sys.excepthook:\n") typ2, err2, tb2 = sys.exc_info() err2.__suppress_context__ = True - if hasattr(err2, "__traceback__"): - err2.__traceback__ = err2.__traceback__.tb_next + err2.__traceback__ = err2.__traceback__.tb_next sys.__excepthook__(typ2, err2, tb2.tb_next) sys.stderr.write("\nOriginal exception was:\n") raise ExceptionDuringRun(typ, err, tb.tb_next) from exc diff --git a/tests/test_execfile.py b/tests/test_execfile.py index b1306233b..21cbb7270 100644 --- a/tests/test_execfile.py +++ b/tests/test_execfile.py @@ -14,7 +14,7 @@ import pytest from coverage import env -from coverage.exceptions import NoCode, NoSource +from coverage.exceptions import NoCode, NoSource, ExceptionDuringRun from coverage.execfile import run_python_file, run_python_module from coverage.files import python_reported_file @@ -102,6 +102,91 @@ def test_directory_without_main(self): with pytest.raises(NoSource, match="Can't find '__main__' module in 'without_main'"): run_python_file(["without_main"]) + def test_code_throws(self): + self.make_file("throw.py", """\ + class MyException(Exception): + pass + + def f1(): + print("about to raise..") + raise MyException("hey!") + + def f2(): + f1() + + f2() + """) + + with pytest.raises(SystemExit) as exc_info: + run_python_file(["throw.py"]) + assert exc_info.value.args == (1,) + assert self.stdout() == "about to raise..\n" + assert self.stderr() == "" + + def test_code_exits(self): + self.make_file("exit.py", """\ + import sys + def f1(): + print("about to exit..") + sys.exit(17) + + def f2(): + f1() + + f2() + """) + + with pytest.raises(SystemExit) as exc_info: + run_python_file(["exit.py"]) + assert exc_info.value.args == (17,) + assert self.stdout() == "about to exit..\n" + assert self.stderr() == "" + + @pytest.mark.skipif(not env.CPYTHON, + reason="non-CPython handles excepthook exits differently, punt for now." + ) + def test_excepthook_exit(self): + self.make_file("excepthook_exit.py", """\ + import sys + + def excepthook(*args): + print('in excepthook') + sys.exit(0) + + sys.excepthook = excepthook + + raise RuntimeError('Error Outside') + """) + with pytest.raises(SystemExit): + run_python_file(["excepthook_exit.py"]) + cov_out = self.stdout() + assert cov_out == "in excepthook\n" + + @pytest.mark.skipif(env.PYPY, reason="PyPy handles excepthook throws differently.") + def test_excepthook_throw(self): + self.make_file("excepthook_throw.py", """\ + import sys + + def excepthook(*args): + # Write this message to stderr so that we don't have to deal + # with interleaved stdout/stderr comparisons in the assertions + # in the test. + sys.stderr.write('in excepthook\\n') + raise RuntimeError('Error Inside') + + sys.excepthook = excepthook + + raise RuntimeError('Error Outside') + """) + with pytest.raises(ExceptionDuringRun) as exc_info: + run_python_file(["excepthook_throw.py"]) + # The ExceptionDuringRun exception has the RuntimeError as its argument. + assert exc_info.value.args[1].args[0] == "Error Outside" + stderr = self.stderr() + assert "in excepthook\n" in stderr + assert "Error in sys.excepthook:\n" in stderr + assert "RuntimeError: Error Inside" in stderr + class RunPycFileTest(CoverageTest): """Test cases for `run_python_file`.""" diff --git a/tests/test_process.py b/tests/test_process.py index 781a0170b..63dd1d5b6 100644 --- a/tests/test_process.py +++ b/tests/test_process.py @@ -468,8 +468,11 @@ def test_running_missing_file(self): def test_code_throws(self): self.make_file("throw.py", """\ + class MyException(Exception): + pass + def f1(): - raise Exception("hey!") + raise MyException("hey!") def f2(): f1() @@ -488,9 +491,9 @@ def f2(): # But also make sure that the output is what we expect. path = python_reported_file('throw.py') - msg = f'File "{re.escape(path)}", line 5,? in f2' + msg = f'File "{re.escape(path)}", line 8, in f2' assert re.search(msg, out) - assert 'raise Exception("hey!")' in out + assert 'raise MyException("hey!")' in out assert status == 1 def test_code_exits(self): @@ -1121,7 +1124,7 @@ def excepthook(*args): assert cov_st == py_st assert cov_st == 0 - assert "in excepthook" in py_out + assert py_out == "in excepthook\n" assert cov_out == py_out @pytest.mark.skipif(env.PYPY, reason="PyPy handles excepthook throws differently.") From 260359756694728cd13f8c8715dddf7c6e2f371d Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Mon, 11 Oct 2021 15:22:18 -0400 Subject: [PATCH 39/41] fix: source modules need to be re-imported. #1232 --- CHANGES.rst | 9 +++++++++ coverage/inorout.py | 38 ++++++++++++++++++++------------------ coverage/misc.py | 38 ++++++++++++++++++++++++++++---------- coverage/tomlconfig.py | 6 +++++- doc/source.rst | 5 +++++ tests/mixins.py | 17 ++++------------- tests/test_misc.py | 7 +++++++ 7 files changed, 78 insertions(+), 42 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 696dd4b0b..2b7add47d 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -27,11 +27,20 @@ Unreleased was ignored as a third-party package. That problem (`issue 1231`_) is now fixed. +- Packages named as "source packages" (with ``source``, or ``source_pkgs``, or + pytest-cov's ``--cov``) might have been only partially measured. Their + top-level statements could be marked as unexecuted, because they were + imported by coverage.py before measurement began (`issue 1232`_). This is + now fixed, but the package will be imported twice, once by coverage.py, then + again by your test suite. This could cause problems if importing the package + has side effects. + - The :meth:`.CoverageData.contexts_by_lineno` method was documented to return a dict, but was returning a defaultdict. Now it returns a plain dict. It also no longer returns negative numbered keys. .. _issue 1231: https://github.com/nedbat/coveragepy/issues/1231 +.. _issue 1232: https://github.com/nedbat/coveragepy/issues/1232 .. _changes_601: diff --git a/coverage/inorout.py b/coverage/inorout.py index c90e3d594..2c216ea9d 100644 --- a/coverage/inorout.py +++ b/coverage/inorout.py @@ -18,6 +18,7 @@ from coverage.exceptions import CoverageException from coverage.files import TreeMatcher, FnmatchMatcher, ModuleMatcher from coverage.files import prep_patterns, find_python_files, canonical_filename +from coverage.misc import sys_modules_saved from coverage.python import source_for_file, source_for_morf @@ -270,27 +271,28 @@ def debug(msg): # Check if the source we want to measure has been installed as a # third-party package. - for pkg in self.source_pkgs: - try: - modfile, path = file_and_path_for_module(pkg) - debug(f"Imported source package {pkg!r} as {modfile!r}") - except CoverageException as exc: - debug(f"Couldn't import source package {pkg!r}: {exc}") - continue - if modfile: - if self.third_match.match(modfile): - debug( - f"Source is in third-party because of source_pkg {pkg!r} at {modfile!r}" - ) - self.source_in_third = True - else: - for pathdir in path: - if self.third_match.match(pathdir): + with sys_modules_saved(): + for pkg in self.source_pkgs: + try: + modfile, path = file_and_path_for_module(pkg) + debug(f"Imported source package {pkg!r} as {modfile!r}") + except CoverageException as exc: + debug(f"Couldn't import source package {pkg!r}: {exc}") + continue + if modfile: + if self.third_match.match(modfile): debug( - f"Source is in third-party because of {pkg!r} path directory " + - f"at {pathdir!r}" + f"Source is in third-party because of source_pkg {pkg!r} at {modfile!r}" ) self.source_in_third = True + else: + for pathdir in path: + if self.third_match.match(pathdir): + debug( + f"Source is in third-party because of {pkg!r} path directory " + + f"at {pathdir!r}" + ) + self.source_in_third = True for src in self.source: if self.third_match.match(src): diff --git a/coverage/misc.py b/coverage/misc.py index 9c414d88c..29397537c 100644 --- a/coverage/misc.py +++ b/coverage/misc.py @@ -3,6 +3,7 @@ """Miscellaneous stuff for coverage.py.""" +import contextlib import errno import hashlib import importlib @@ -49,6 +50,28 @@ def isolate_module(mod): os = isolate_module(os) +class SysModuleSaver: + """Saves the contents of sys.modules, and removes new modules later.""" + def __init__(self): + self.old_modules = set(sys.modules) + + def restore(self): + """Remove any modules imported since this object started.""" + new_modules = set(sys.modules) - self.old_modules + for m in new_modules: + del sys.modules[m] + + +@contextlib.contextmanager +def sys_modules_saved(): + """A context manager to remove any modules imported during a block.""" + saver = SysModuleSaver() + try: + yield + finally: + saver.restore() + + def import_third_party(modname): """Import a third-party module we need, but might not be installed. @@ -63,16 +86,11 @@ def import_third_party(modname): The imported module, or None if the module couldn't be imported. """ - try: - mod = importlib.import_module(modname) - except ImportError: - mod = None - - imported = [m for m in sys.modules if m.startswith(modname)] - for name in imported: - del sys.modules[name] - - return mod + with sys_modules_saved(): + try: + return importlib.import_module(modname) + except ImportError: + return None def dummy_decorator_with_args(*args_unused, **kwargs_unused): diff --git a/coverage/tomlconfig.py b/coverage/tomlconfig.py index 3301acc8e..4a1e322c5 100644 --- a/coverage/tomlconfig.py +++ b/coverage/tomlconfig.py @@ -10,7 +10,11 @@ from coverage.exceptions import CoverageException from coverage.misc import import_third_party, substitute_variables -# TOML support is an install-time extra option. +# TOML support is an install-time extra option. (Import typing is here because +# import_third_party will unload any module that wasn't already imported. +# tomli imports typing, and if we unload it, later it's imported again, and on +# Python 3.6, this causes infinite recursion.) +import typing # pylint: disable=unused-import, wrong-import-order tomli = import_third_party("tomli") diff --git a/doc/source.rst b/doc/source.rst index 8debd575f..bab57a723 100644 --- a/doc/source.rst +++ b/doc/source.rst @@ -39,6 +39,11 @@ in their names will be skipped (they are assumed to be scratch files written by text editors). Files that do not end with ``.py``, ``.pyw``, ``.pyo``, or ``.pyc`` will also be skipped. +.. note:: Modules named as sources may be imported twice, once by coverage.py + to find their location, then again by your own code or test suite. Usually + this isn't a problem, but could cause trouble if a module has side-effects + at import time. + You can further fine-tune coverage.py's attention with the ``--include`` and ``--omit`` switches (or ``[run] include`` and ``[run] omit`` configuration values). ``--include`` is a list of file name patterns. If specified, only diff --git a/tests/mixins.py b/tests/mixins.py index 0638f3366..95b2145a3 100644 --- a/tests/mixins.py +++ b/tests/mixins.py @@ -15,6 +15,7 @@ import pytest +from coverage.misc import SysModuleSaver from tests.helpers import change_dir, make_file, remove_files @@ -96,21 +97,11 @@ def _save_sys_path(self): @pytest.fixture(autouse=True) def _module_saving(self): """Remove modules we imported during the test.""" - self._old_modules = list(sys.modules) + self._sys_module_saver = SysModuleSaver() try: yield finally: - self._cleanup_modules() - - def _cleanup_modules(self): - """Remove any new modules imported since our construction. - - This lets us import the same source files for more than one test, or - if called explicitly, within one test. - - """ - for m in [m for m in sys.modules if m not in self._old_modules]: - del sys.modules[m] + self._sys_module_saver.restore() def clean_local_file_imports(self): """Clean up the results of calls to `import_local_file`. @@ -120,7 +111,7 @@ def clean_local_file_imports(self): """ # So that we can re-import files, clean them out first. - self._cleanup_modules() + self._sys_module_saver.restore() # Also have to clean out the .pyc file, since the timestamp # resolution is only one second, a changed file might not be diff --git a/tests/test_misc.py b/tests/test_misc.py index 077c24344..740022322 100644 --- a/tests/test_misc.py +++ b/tests/test_misc.py @@ -165,8 +165,15 @@ class ImportThirdPartyTest(CoverageTest): run_in_temp_dir = False def test_success(self): + # Make sure we don't have pytest in sys.modules before we start. + del sys.modules["pytest"] + # Import pytest mod = import_third_party("pytest") + # Yes, it's really pytest: assert mod.__name__ == "pytest" + print(dir(mod)) + assert all(hasattr(mod, name) for name in ["skip", "mark", "raises", "warns"]) + # But it's not in sys.modules: assert "pytest" not in sys.modules def test_failure(self): From 19bb1f80a361aaf8e77c5eaa45934aaf58178216 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Mon, 11 Oct 2021 16:58:34 -0400 Subject: [PATCH 40/41] docs: sample HTML from 6.0.2 --- doc/sample_html/d_7b071bdc2a35fa80___init___py.html | 4 ++-- doc/sample_html/d_7b071bdc2a35fa80___main___py.html | 4 ++-- doc/sample_html/d_7b071bdc2a35fa80_backward_py.html | 4 ++-- doc/sample_html/d_7b071bdc2a35fa80_cogapp_py.html | 4 ++-- doc/sample_html/d_7b071bdc2a35fa80_makefiles_py.html | 4 ++-- doc/sample_html/d_7b071bdc2a35fa80_test_cogapp_py.html | 4 ++-- doc/sample_html/d_7b071bdc2a35fa80_test_makefiles_py.html | 4 ++-- doc/sample_html/d_7b071bdc2a35fa80_test_whiteutils_py.html | 4 ++-- doc/sample_html/d_7b071bdc2a35fa80_whiteutils_py.html | 4 ++-- doc/sample_html/index.html | 4 ++-- doc/sample_html/status.json | 2 +- 11 files changed, 21 insertions(+), 21 deletions(-) diff --git a/doc/sample_html/d_7b071bdc2a35fa80___init___py.html b/doc/sample_html/d_7b071bdc2a35fa80___init___py.html index 842693669..617fdbefa 100644 --- a/doc/sample_html/d_7b071bdc2a35fa80___init___py.html +++ b/doc/sample_html/d_7b071bdc2a35fa80___init___py.html @@ -66,8 +66,8 @@

diff --git a/doc/sample_html/d_7b071bdc2a35fa80___main___py.html b/doc/sample_html/d_7b071bdc2a35fa80___main___py.html index 3b7d3f548..464019fb4 100644 --- a/doc/sample_html/d_7b071bdc2a35fa80___main___py.html +++ b/doc/sample_html/d_7b071bdc2a35fa80___main___py.html @@ -62,8 +62,8 @@

diff --git a/doc/sample_html/d_7b071bdc2a35fa80_backward_py.html b/doc/sample_html/d_7b071bdc2a35fa80_backward_py.html index 98c83c3ab..f997eec86 100644 --- a/doc/sample_html/d_7b071bdc2a35fa80_backward_py.html +++ b/doc/sample_html/d_7b071bdc2a35fa80_backward_py.html @@ -99,8 +99,8 @@

diff --git a/doc/sample_html/d_7b071bdc2a35fa80_cogapp_py.html b/doc/sample_html/d_7b071bdc2a35fa80_cogapp_py.html index 283008095..6fe42303e 100644 --- a/doc/sample_html/d_7b071bdc2a35fa80_cogapp_py.html +++ b/doc/sample_html/d_7b071bdc2a35fa80_cogapp_py.html @@ -867,8 +867,8 @@

diff --git a/doc/sample_html/d_7b071bdc2a35fa80_makefiles_py.html b/doc/sample_html/d_7b071bdc2a35fa80_makefiles_py.html index 98df9cfe9..c591fba9a 100644 --- a/doc/sample_html/d_7b071bdc2a35fa80_makefiles_py.html +++ b/doc/sample_html/d_7b071bdc2a35fa80_makefiles_py.html @@ -103,8 +103,8 @@

diff --git a/doc/sample_html/d_7b071bdc2a35fa80_test_cogapp_py.html b/doc/sample_html/d_7b071bdc2a35fa80_test_cogapp_py.html index 0a5081b6c..07e6fa9c7 100644 --- a/doc/sample_html/d_7b071bdc2a35fa80_test_cogapp_py.html +++ b/doc/sample_html/d_7b071bdc2a35fa80_test_cogapp_py.html @@ -2535,8 +2535,8 @@

diff --git a/doc/sample_html/d_7b071bdc2a35fa80_test_makefiles_py.html b/doc/sample_html/d_7b071bdc2a35fa80_test_makefiles_py.html index 6430b7570..3d9602b6d 100644 --- a/doc/sample_html/d_7b071bdc2a35fa80_test_makefiles_py.html +++ b/doc/sample_html/d_7b071bdc2a35fa80_test_makefiles_py.html @@ -179,8 +179,8 @@

diff --git a/doc/sample_html/d_7b071bdc2a35fa80_test_whiteutils_py.html b/doc/sample_html/d_7b071bdc2a35fa80_test_whiteutils_py.html index f66e45a34..c311ee37e 100644 --- a/doc/sample_html/d_7b071bdc2a35fa80_test_whiteutils_py.html +++ b/doc/sample_html/d_7b071bdc2a35fa80_test_whiteutils_py.html @@ -158,8 +158,8 @@

diff --git a/doc/sample_html/d_7b071bdc2a35fa80_whiteutils_py.html b/doc/sample_html/d_7b071bdc2a35fa80_whiteutils_py.html index aee1367ae..8be3ebd69 100644 --- a/doc/sample_html/d_7b071bdc2a35fa80_whiteutils_py.html +++ b/doc/sample_html/d_7b071bdc2a35fa80_whiteutils_py.html @@ -130,8 +130,8 @@

diff --git a/doc/sample_html/index.html b/doc/sample_html/index.html index 1266f3574..7a7a63f11 100644 --- a/doc/sample_html/index.html +++ b/doc/sample_html/index.html @@ -156,8 +156,8 @@

Coverage report: diff --git a/doc/sample_html/status.json b/doc/sample_html/status.json index faaeeb31a..9be42a992 100644 --- a/doc/sample_html/status.json +++ b/doc/sample_html/status.json @@ -1 +1 @@ -{"format":2,"version":"6.0","globals":"6a4256e35f9798f9bb080b10ccdeeac8","files":{"d_7b071bdc2a35fa80___init___py":{"hash":"1fc4f9498f460d6c93280e8962ee0c0f","index":{"nums":[2,1,2,0,0,0,0,0],"html_filename":"d_7b071bdc2a35fa80___init___py.html","relative_filename":"cogapp/__init__.py"}},"d_7b071bdc2a35fa80___main___py":{"hash":"ffe6befa655d4d0b0b31eb0c73811311","index":{"nums":[2,1,3,0,3,0,0,0],"html_filename":"d_7b071bdc2a35fa80___main___py.html","relative_filename":"cogapp/__main__.py"}},"d_7b071bdc2a35fa80_backward_py":{"hash":"32afb71b2a11dd4b8ca4ed66b6815d42","index":{"nums":[2,1,22,0,6,4,2,2],"html_filename":"d_7b071bdc2a35fa80_backward_py.html","relative_filename":"cogapp/backward.py"}},"d_7b071bdc2a35fa80_cogapp_py":{"hash":"f3523a775c5c1d12a213eead8df82291","index":{"nums":[2,1,486,1,215,200,28,132],"html_filename":"d_7b071bdc2a35fa80_cogapp_py.html","relative_filename":"cogapp/cogapp.py"}},"d_7b071bdc2a35fa80_makefiles_py":{"hash":"85ec1064ff86d94238a8d7b76a2178a5","index":{"nums":[2,1,27,0,20,14,0,14],"html_filename":"d_7b071bdc2a35fa80_makefiles_py.html","relative_filename":"cogapp/makefiles.py"}},"d_7b071bdc2a35fa80_test_cogapp_py":{"hash":"6985a05ab5a9b347b3665136686f6fb1","index":{"nums":[2,1,788,8,547,20,0,18],"html_filename":"d_7b071bdc2a35fa80_test_cogapp_py.html","relative_filename":"cogapp/test_cogapp.py"}},"d_7b071bdc2a35fa80_test_makefiles_py":{"hash":"647f7dc911c97a6e646a91c3300a25ff","index":{"nums":[2,1,71,0,53,6,0,6],"html_filename":"d_7b071bdc2a35fa80_test_makefiles_py.html","relative_filename":"cogapp/test_makefiles.py"}},"d_7b071bdc2a35fa80_test_whiteutils_py":{"hash":"9476b26e42e6169b1857cfe7f29bf954","index":{"nums":[2,1,69,0,50,0,0,0],"html_filename":"d_7b071bdc2a35fa80_test_whiteutils_py.html","relative_filename":"cogapp/test_whiteutils.py"}},"d_7b071bdc2a35fa80_whiteutils_py":{"hash":"cf00c3e6149e4b80a2d01b6919c066a4","index":{"nums":[2,1,45,0,5,34,4,4],"html_filename":"d_7b071bdc2a35fa80_whiteutils_py.html","relative_filename":"cogapp/whiteutils.py"}}}} \ No newline at end of file +{"format":2,"version":"6.0.2","globals":"6a4256e35f9798f9bb080b10ccdeeac8","files":{"d_7b071bdc2a35fa80___init___py":{"hash":"1fc4f9498f460d6c93280e8962ee0c0f","index":{"nums":[2,1,2,0,0,0,0,0],"html_filename":"d_7b071bdc2a35fa80___init___py.html","relative_filename":"cogapp/__init__.py"}},"d_7b071bdc2a35fa80___main___py":{"hash":"ffe6befa655d4d0b0b31eb0c73811311","index":{"nums":[2,1,3,0,3,0,0,0],"html_filename":"d_7b071bdc2a35fa80___main___py.html","relative_filename":"cogapp/__main__.py"}},"d_7b071bdc2a35fa80_backward_py":{"hash":"32afb71b2a11dd4b8ca4ed66b6815d42","index":{"nums":[2,1,22,0,6,4,2,2],"html_filename":"d_7b071bdc2a35fa80_backward_py.html","relative_filename":"cogapp/backward.py"}},"d_7b071bdc2a35fa80_cogapp_py":{"hash":"f3523a775c5c1d12a213eead8df82291","index":{"nums":[2,1,486,1,215,200,28,132],"html_filename":"d_7b071bdc2a35fa80_cogapp_py.html","relative_filename":"cogapp/cogapp.py"}},"d_7b071bdc2a35fa80_makefiles_py":{"hash":"85ec1064ff86d94238a8d7b76a2178a5","index":{"nums":[2,1,27,0,20,14,0,14],"html_filename":"d_7b071bdc2a35fa80_makefiles_py.html","relative_filename":"cogapp/makefiles.py"}},"d_7b071bdc2a35fa80_test_cogapp_py":{"hash":"6985a05ab5a9b347b3665136686f6fb1","index":{"nums":[2,1,788,8,547,20,0,18],"html_filename":"d_7b071bdc2a35fa80_test_cogapp_py.html","relative_filename":"cogapp/test_cogapp.py"}},"d_7b071bdc2a35fa80_test_makefiles_py":{"hash":"647f7dc911c97a6e646a91c3300a25ff","index":{"nums":[2,1,71,0,53,6,0,6],"html_filename":"d_7b071bdc2a35fa80_test_makefiles_py.html","relative_filename":"cogapp/test_makefiles.py"}},"d_7b071bdc2a35fa80_test_whiteutils_py":{"hash":"9476b26e42e6169b1857cfe7f29bf954","index":{"nums":[2,1,69,0,50,0,0,0],"html_filename":"d_7b071bdc2a35fa80_test_whiteutils_py.html","relative_filename":"cogapp/test_whiteutils.py"}},"d_7b071bdc2a35fa80_whiteutils_py":{"hash":"cf00c3e6149e4b80a2d01b6919c066a4","index":{"nums":[2,1,45,0,5,34,4,4],"html_filename":"d_7b071bdc2a35fa80_whiteutils_py.html","relative_filename":"cogapp/whiteutils.py"}}}} \ No newline at end of file From a3921d2c9025f570a4c0d59d503bfd056d26e723 Mon Sep 17 00:00:00 2001 From: Ned Batchelder Date: Mon, 11 Oct 2021 16:58:58 -0400 Subject: [PATCH 41/41] build: prep for 6.0.2 --- CHANGES.rst | 7 +++++-- coverage/version.py | 2 +- doc/conf.py | 4 ++-- doc/index.rst | 3 ++- howto.txt | 4 ++-- 5 files changed, 12 insertions(+), 8 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 2b7add47d..12f15d81a 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -19,8 +19,11 @@ This list is detailed and covers changes in each pre-release version. .. Version 9.8.1 --- 2027-07-27 .. ---------------------------- -Unreleased ----------- + +.. _changes_602: + +Version 6.0.2 --- 2021-10-11 +---------------------------- - Namespace packages being measured weren't properly handled by the new code that ignores third-party packages. If the namespace package was installed, it diff --git a/coverage/version.py b/coverage/version.py index 3ab3d50ab..8bc98ac60 100644 --- a/coverage/version.py +++ b/coverage/version.py @@ -5,7 +5,7 @@ # This file is exec'ed in setup.py, don't import anything! # Same semantics as sys.version_info. -version_info = (6, 0, 2, "alpha", 0) +version_info = (6, 0, 2, "final", 0) def _make_version(major, minor, micro, releaselevel, serial): diff --git a/doc/conf.py b/doc/conf.py index 5109ff726..cfa9b445b 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -67,9 +67,9 @@ # The short X.Y version. version = "6.0" # CHANGEME # The full version, including alpha/beta/rc tags. -release = "6.0.1" # CHANGEME +release = "6.0.2" # CHANGEME # The date of release, in "monthname day, year" format. -release_date = "October 6, 2021" # CHANGEME +release_date = "October 11, 2021" # CHANGEME rst_epilog = """ .. |release_date| replace:: {release_date} diff --git a/doc/index.rst b/doc/index.rst index dc34c3f79..b2ceed73a 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -23,7 +23,8 @@ supported on: .. ifconfig:: prerelease **This is a pre-release build. The usual warnings about possible bugs - apply.** The latest stable version is coverage.py 6.0.1, `described here`_. + apply.** The latest stable version is coverage.py 6.0.2, `described here`_. + .. _described here: http://coverage.readthedocs.io/ diff --git a/howto.txt b/howto.txt index 8121b9dbe..e89b42eb9 100644 --- a/howto.txt +++ b/howto.txt @@ -57,6 +57,8 @@ - Tag the tree $ git tag -a 3.0.1 $ git push --follow-tags +- Update GitHub releases: + $ make github_releases - Bump version: - coverage/version.py - increment version number @@ -65,8 +67,6 @@ - CHANGES.rst - add an "Unreleased" section to the top. $ git push -- Update GitHub releases: - $ make github_releases - Update readthedocs - @ https://readthedocs.org/projects/coverage/versions/ - find the latest tag in the inactive list, edit it, make it active.