From 391a6082c297a2f7186c6c3445c62c5075f2c9de Mon Sep 17 00:00:00 2001 From: Conor MacBride Date: Wed, 15 Jun 2022 22:21:49 +0100 Subject: [PATCH 1/9] Rename default branch to `main` I've removed the `workflow_dispatch` trigger from `update-changelog.yaml` as the `workflow_dispatch` event does not have the release details in its payload so I doubt this would run successfully. --- .github/workflows/update-changelog.yaml | 7 +++---- RELEASING.md | 4 ++-- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/.github/workflows/update-changelog.yaml b/.github/workflows/update-changelog.yaml index 4f08d5af..1432b5e0 100644 --- a/.github/workflows/update-changelog.yaml +++ b/.github/workflows/update-changelog.yaml @@ -1,11 +1,10 @@ # This workflow takes the GitHub release notes an updates the changelog on the -# master branch with the body of the release notes, thereby keeping a log in +# main branch with the body of the release notes, thereby keeping a log in # the git repo of the changes. name: "Update Changelog" on: - workflow_dispatch: release: types: [released] @@ -17,7 +16,7 @@ jobs: - name: Checkout code uses: actions/checkout@v2 with: - ref: master + ref: main - name: Update Changelog uses: stefanzweifel/changelog-updater-action@v1 @@ -29,6 +28,6 @@ jobs: - name: Commit updated CHANGELOG uses: stefanzweifel/git-auto-commit-action@v4 with: - branch: master + branch: main commit_message: Update CHANGELOG file_pattern: CHANGES.md diff --git a/RELEASING.md b/RELEASING.md index d91e4bce..635cf077 100644 --- a/RELEASING.md +++ b/RELEASING.md @@ -2,10 +2,10 @@ To make a new release of pytest-mpl follow the following steps: -* Ensure the sdist and wheel GitHub Actions jobs succeeded on master after the last merge. +* Ensure the sdist and wheel GitHub Actions jobs succeeded on main after the last merge. * Also ensure that the tarball built has an autogenerated version number from setuptools_scm. * Write the release notes in the GitHub releases UI, use the autogenerated notes and tidy up a little. * Publish the new release, using the format `vX.Y.X`. -* Watch as GitHub actions builds the sdist and universal wheel and pushes them to PyPI for you, and updates CHANGES.md on the master branch. +* Watch as GitHub actions builds the sdist and universal wheel and pushes them to PyPI for you, and updates CHANGES.md on the main branch. * Enjoy the beverage of your choosing 🍻. From c050129019f70269995522774e918345997d2b66 Mon Sep 17 00:00:00 2001 From: Conor MacBride Date: Wed, 15 Jun 2022 22:27:32 +0100 Subject: [PATCH 2/9] Add changelog for v0.16.0 Needed to be manually added as the automated workflow failed --- CHANGES.md | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 9dbe976f..d8eb9188 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,25 @@ +## v0.16.0 - 2022-06-14 + +### Fixes + +- Make summary log message about test results in general instead of failures by @neutrinoceros in https://github.com/matplotlib/pytest-mpl/pull/148 +- Add support for classes with pytest 7 by @ConorMacBride in https://github.com/matplotlib/pytest-mpl/pull/164 + Note that this change necessitated a minor breaking change for figure tests within classes only, and the following will need to be done: + - Hash library test names will need to be regenerated/updated to include the class name. + - If the undocumented `mpl-use-full-test-name` ini option is enabled, the the baseline images will need to be regenerated, or have their filename updated to include the class name. + +### Other Changes + +- Improve parametrized test names in HTML summaries by @ConorMacBride in https://github.com/matplotlib/pytest-mpl/pull/165 + +### Infrastructure Changes + +- Pin tox environment `mpl35` to matplotlib 3.5.1 by @ConorMacBride in https://github.com/matplotlib/pytest-mpl/pull/162 +- [pre-commit.ci] pre-commit autoupdate by @pre-commit-ci in https://github.com/matplotlib/pytest-mpl/pull/167 +- Improve `tests/subtests` by @ConorMacBride in https://github.com/matplotlib/pytest-mpl/pull/163 + +**Full Changelog**: https://github.com/matplotlib/pytest-mpl/compare/v0.15.1...v0.16.0 + ## v0.15.1 - 2022-04-22 ### Fixes From 4e5981a286ae7c0a2fceab702092822690330e4b Mon Sep 17 00:00:00 2001 From: Conor MacBride Date: Sat, 25 Jun 2022 15:24:16 +0100 Subject: [PATCH 3/9] Refactor `generate_test_name` from method to function --- pytest_mpl/plugin.py | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/pytest_mpl/plugin.py b/pytest_mpl/plugin.py index 4613979c..30b47e37 100644 --- a/pytest_mpl/plugin.py +++ b/pytest_mpl/plugin.py @@ -89,6 +89,17 @@ def _pytest_pyfunc_call(obj, pyfuncitem): return True +def generate_test_name(item): + """ + Generate a unique name for the hash for this test. + """ + if item.cls is not None: + name = f"{item.module.__name__}.{item.cls.__name__}.{item.name}" + else: + name = f"{item.module.__name__}.{item.name}" + return name + + def pytest_report_header(config, startdir): import matplotlib import matplotlib.ft2font @@ -287,7 +298,7 @@ def generate_filename(self, item): Given a pytest item, generate the figure filename. """ if self.config.getini('mpl-use-full-test-name'): - filename = self.generate_test_name(item) + '.png' + filename = generate_test_name(item) + '.png' else: compare = get_compare(item) # Find test name to use as plot name @@ -298,21 +309,11 @@ def generate_filename(self, item): filename = str(pathify(filename)) return filename - def generate_test_name(self, item): - """ - Generate a unique name for the hash for this test. - """ - if item.cls is not None: - name = f"{item.module.__name__}.{item.cls.__name__}.{item.name}" - else: - name = f"{item.module.__name__}.{item.name}" - return name - def make_test_results_dir(self, item): """ Generate the directory to put the results in. """ - test_name = pathify(self.generate_test_name(item)) + test_name = pathify(generate_test_name(item)) results_dir = self.results_dir / test_name results_dir.mkdir(exist_ok=True, parents=True) return results_dir @@ -526,7 +527,7 @@ def compare_image_to_hash_library(self, item, fig, result_dir, summary=None): pytest.fail(f"Can't find hash library at path {hash_library_filename}") hash_library = self.load_hash_library(hash_library_filename) - hash_name = self.generate_test_name(item) + hash_name = generate_test_name(item) baseline_hash = hash_library.get(hash_name, None) summary['baseline_hash'] = baseline_hash @@ -613,7 +614,7 @@ def pytest_runtest_call(self, item): # noqa if remove_text: remove_ticks_and_titles(fig) - test_name = self.generate_test_name(item) + test_name = generate_test_name(item) result_dir = self.make_test_results_dir(item) summary = { From af96b020eba11ba1281250aea1d65bced3af7b78 Mon Sep 17 00:00:00 2001 From: Conor MacBride Date: Sat, 25 Jun 2022 15:44:31 +0100 Subject: [PATCH 4/9] Use a wrapper to intercept and store the figure Rather than overriding the low level `pytest_pyfunc_call`, we can wrap the test function in a wrapper that stores its return value to the plugin object. --- pytest_mpl/plugin.py | 46 ++++++++++++++++++++++++++------------------ 1 file changed, 27 insertions(+), 19 deletions(-) diff --git a/pytest_mpl/plugin.py b/pytest_mpl/plugin.py index 30b47e37..dfbae95b 100644 --- a/pytest_mpl/plugin.py +++ b/pytest_mpl/plugin.py @@ -81,14 +81,6 @@ def pathify(path): return Path(path + ext) -def _pytest_pyfunc_call(obj, pyfuncitem): - testfunction = pyfuncitem.obj - funcargs = pyfuncitem.funcargs - testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames} - obj.result = testfunction(**testargs) - return True - - def generate_test_name(item): """ Generate a unique name for the hash for this test. @@ -100,6 +92,24 @@ def generate_test_name(item): return name +def wrap_figure_interceptor(plugin, item): + """ + Intercept and store figures returned by test functions. + """ + # Only intercept figures on marked figure tests + if get_compare(item) is not None: + + # Use the full test name as a key to ensure correct figure is being retrieved + test_name = generate_test_name(item) + + def figure_interceptor(store, obj): + def wrapper(*args, **kwargs): + store.return_value[test_name] = obj(*args, **kwargs) + return wrapper + + item.obj = figure_interceptor(plugin, item.obj) + + def pytest_report_header(config, startdir): import matplotlib import matplotlib.ft2font @@ -286,6 +296,7 @@ def __init__(self, self._generated_hash_library = {} self._test_results = {} self._test_stats = None + self.return_value = {} # https://stackoverflow.com/questions/51737378/how-should-i-log-in-my-pytest-plugin # turn debug prints on only if "-vv" or more passed @@ -608,13 +619,14 @@ def pytest_runtest_call(self, item): # noqa with plt.style.context(style, after_reset=True), switch_backend(backend): # Run test and get figure object + wrap_figure_interceptor(self, item) yield - fig = self.result + test_name = generate_test_name(item) + fig = self.return_value[test_name] if remove_text: remove_ticks_and_titles(fig) - test_name = generate_test_name(item) result_dir = self.make_test_results_dir(item) summary = { @@ -678,10 +690,6 @@ def pytest_runtest_call(self, item): # noqa if summary['status'] == 'skipped': pytest.skip(summary['status_msg']) - @pytest.hookimpl(tryfirst=True) - def pytest_pyfunc_call(self, pyfuncitem): - return _pytest_pyfunc_call(self, pyfuncitem) - def generate_summary_json(self): json_file = self.results_dir / 'results.json' with open(json_file, 'w') as f: @@ -733,13 +741,13 @@ class FigureCloser: def __init__(self, config): self.config = config + self.return_value = {} @pytest.hookimpl(hookwrapper=True) def pytest_runtest_call(self, item): + wrap_figure_interceptor(self, item) yield if get_compare(item) is not None: - close_mpl_figure(self.result) - - @pytest.hookimpl(tryfirst=True) - def pytest_pyfunc_call(self, pyfuncitem): - return _pytest_pyfunc_call(self, pyfuncitem) + test_name = generate_test_name(item) + fig = self.return_value[test_name] + close_mpl_figure(fig) From 3a22789649854efda54d6d30137c445820fcbc4e Mon Sep 17 00:00:00 2001 From: Conor MacBride Date: Sat, 25 Jun 2022 16:10:55 +0100 Subject: [PATCH 5/9] Add passing and failing tests for `unittest.TestCase` --- tests/test_pytest_mpl.py | 39 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/tests/test_pytest_mpl.py b/tests/test_pytest_mpl.py index fec94cb0..55c825b5 100644 --- a/tests/test_pytest_mpl.py +++ b/tests/test_pytest_mpl.py @@ -3,6 +3,7 @@ import json import subprocess from pathlib import Path +from unittest import TestCase import matplotlib import matplotlib.ft2font @@ -259,6 +260,23 @@ def test_succeeds(self): return fig +class TestClassWithTestCase(TestCase): + + # Regression test for a bug that occurred when using unittest.TestCase + + def setUp(self): + self.x = [1, 2, 3] + + @pytest.mark.mpl_image_compare(baseline_dir=baseline_dir_local, + filename='test_succeeds.png', + tolerance=DEFAULT_TOLERANCE) + def test_succeeds(self): + fig = plt.figure() + ax = fig.add_subplot(1, 1, 1) + ax.plot(self.x) + return fig + + # hashlib @pytest.mark.skipif(not hash_library.exists(), reason="No hash library for this mpl version") @@ -514,8 +532,27 @@ def test_fails(self): return fig """ +TEST_FAILING_UNITTEST_TESTCASE = """ +from unittest import TestCase +import pytest +import matplotlib.pyplot as plt +class TestClassWithTestCase(TestCase): + def setUp(self): + self.x = [1, 2, 3] + @pytest.mark.mpl_image_compare + def test_fails(self): + fig = plt.figure() + ax = fig.add_subplot(1, 1, 1) + ax.plot(self.x) + return fig +""" + -@pytest.mark.parametrize("code", [TEST_FAILING_CLASS, TEST_FAILING_CLASS_SETUP_METHOD]) +@pytest.mark.parametrize("code", [ + TEST_FAILING_CLASS, + TEST_FAILING_CLASS_SETUP_METHOD, + TEST_FAILING_UNITTEST_TESTCASE, +]) def test_class_fail(code, tmpdir): test_file = tmpdir.join('test.py').strpath From 76a560d3dcb66e965e68867f89527dabaf130f82 Mon Sep 17 00:00:00 2001 From: Conor MacBride Date: Sat, 25 Jun 2022 16:43:58 +0100 Subject: [PATCH 6/9] Raise any warnings during pytest --- setup.cfg | 5 +++++ tox.ini | 5 ----- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/setup.cfg b/setup.cfg index b9eb7133..c404d060 100644 --- a/setup.cfg +++ b/setup.cfg @@ -42,6 +42,11 @@ test = [tool:pytest] testpaths = "tests" +markers = + image: run test during image comparison only mode. + hash: run test during hash comparison only mode. +filterwarnings = + error [flake8] max-line-length = 100 diff --git a/tox.ini b/tox.ini index bec14d22..c4bdf468 100644 --- a/tox.ini +++ b/tox.ini @@ -51,8 +51,3 @@ description = check code style, e.g. with flake8 deps = pre-commit commands = pre-commit run --all-files - -[pytest] -markers = - image: run test during image comparison only mode. - hash: run test during hash comparison only mode. From d9d380dcb9fb073014a1f417b528814920194a23 Mon Sep 17 00:00:00 2001 From: Conor MacBride Date: Sat, 25 Jun 2022 16:55:48 +0100 Subject: [PATCH 7/9] Add ignored warnings --- setup.cfg | 2 ++ 1 file changed, 2 insertions(+) diff --git a/setup.cfg b/setup.cfg index c404d060..d2fb5e0a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -47,6 +47,8 @@ markers = hash: run test during hash comparison only mode. filterwarnings = error + ignore:distutils Version classes are deprecated + ignore:the imp module is deprecated in favour of importlib [flake8] max-line-length = 100 From 32b9a127fc505e1ca7d3686a231993788d08d064 Mon Sep 17 00:00:00 2001 From: Conor MacBride Date: Wed, 29 Jun 2022 14:39:32 +0100 Subject: [PATCH 8/9] Add tests to check that plugin works when the user's function fails --- tests/conftest.py | 9 ++++ tests/test_pytest_mpl.py | 104 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 113 insertions(+) create mode 100644 tests/conftest.py diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000..9c3572d9 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,9 @@ +import pytest +from packaging.version import Version + +pytest_plugins = ["pytester"] + +if Version(pytest.__version__) < Version("6.2.0"): + @pytest.fixture + def pytester(testdir): + return testdir diff --git a/tests/test_pytest_mpl.py b/tests/test_pytest_mpl.py index 55c825b5..a9d2cba1 100644 --- a/tests/test_pytest_mpl.py +++ b/tests/test_pytest_mpl.py @@ -566,3 +566,107 @@ def test_class_fail(code, tmpdir): # If we don't use --mpl option, the test should succeed code = call_pytest([test_file]) assert code == 0 + + +@pytest.mark.parametrize("runpytest_args", [(), ("--mpl",)]) +def test_user_fail(pytester, runpytest_args): + pytester.makepyfile( + """ + import pytest + @pytest.mark.mpl_image_compare + def test_fail(): + pytest.fail("Manually failed by user.") + """ + ) + result = pytester.runpytest(*runpytest_args) + result.assert_outcomes(failed=1) + result.stdout.fnmatch_lines("FAILED*Manually failed by user.*") + + +@pytest.mark.parametrize("runpytest_args", [(), ("--mpl",)]) +def test_user_skip(pytester, runpytest_args): + pytester.makepyfile( + """ + import pytest + @pytest.mark.mpl_image_compare + def test_skip(): + pytest.skip("Manually skipped by user.") + """ + ) + result = pytester.runpytest(*runpytest_args) + result.assert_outcomes(skipped=1) + + +@pytest.mark.parametrize("runpytest_args", [(), ("--mpl",)]) +def test_user_importorskip(pytester, runpytest_args): + pytester.makepyfile( + """ + import pytest + @pytest.mark.mpl_image_compare + def test_importorskip(): + pytest.importorskip("nonexistantmodule") + """ + ) + result = pytester.runpytest(*runpytest_args) + result.assert_outcomes(skipped=1) + + +@pytest.mark.parametrize("runpytest_args", [(), ("--mpl",)]) +def test_user_xfail(pytester, runpytest_args): + pytester.makepyfile( + """ + import pytest + @pytest.mark.mpl_image_compare + def test_xfail(): + pytest.xfail() + """ + ) + result = pytester.runpytest(*runpytest_args) + result.assert_outcomes(xfailed=1) + + +@pytest.mark.parametrize("runpytest_args", [(), ("--mpl",)]) +def test_user_exit_success(pytester, runpytest_args): + pytester.makepyfile( + """ + import pytest + @pytest.mark.mpl_image_compare + def test_exit_success(): + pytest.exit("Manually exited by user.", returncode=0) + """ + ) + result = pytester.runpytest(*runpytest_args) + result.assert_outcomes() + assert result.ret == 0 + result.stdout.fnmatch_lines("*Exit*Manually exited by user.*") + + +@pytest.mark.parametrize("runpytest_args", [(), ("--mpl",)]) +def test_user_exit_failure(pytester, runpytest_args): + pytester.makepyfile( + """ + import pytest + @pytest.mark.mpl_image_compare + def test_exit_fail(): + pytest.exit("Manually exited by user.", returncode=1) + """ + ) + result = pytester.runpytest(*runpytest_args) + result.assert_outcomes() + assert result.ret == 1 + result.stdout.fnmatch_lines("*Exit*Manually exited by user.*") + + +@pytest.mark.parametrize("runpytest_args", [(), ("--mpl",)]) +def test_user_function_raises(pytester, runpytest_args): + pytester.makepyfile( + """ + import pytest + @pytest.mark.mpl_image_compare + def test_raises(): + raise ValueError("User code raised an exception.") + """ + ) + result = pytester.runpytest(*runpytest_args) + result.assert_outcomes(failed=1) + result.stdout.fnmatch_lines("FAILED*ValueError*User code*") From d84892b268d6ed3e6253b9a23623884697607371 Mon Sep 17 00:00:00 2001 From: Conor MacBride Date: Wed, 29 Jun 2022 14:49:31 +0100 Subject: [PATCH 9/9] Handle tests which do not complete If the return value hasn't been added to the dictionary, it means that the user's test didn't reach the end. This means that it raised an exception or was otherwise stopped early by pytest (skipped, failed, etc). In any case, we should not proceed with figure testing and use the result that pytest has already determined. Fixes #172 --- pytest_mpl/plugin.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pytest_mpl/plugin.py b/pytest_mpl/plugin.py index dfbae95b..3ddc24b4 100644 --- a/pytest_mpl/plugin.py +++ b/pytest_mpl/plugin.py @@ -622,6 +622,9 @@ def pytest_runtest_call(self, item): # noqa wrap_figure_interceptor(self, item) yield test_name = generate_test_name(item) + if test_name not in self.return_value: + # Test function did not complete successfully + return fig = self.return_value[test_name] if remove_text: @@ -749,5 +752,8 @@ def pytest_runtest_call(self, item): yield if get_compare(item) is not None: test_name = generate_test_name(item) + if test_name not in self.return_value: + # Test function did not complete successfully + return fig = self.return_value[test_name] close_mpl_figure(fig)