diff --git a/.coveragerc b/.coveragerc index 57747ec0d8..f65ab1441f 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,9 +1,18 @@ [run] branch = True -source = nibabel, nisext -include = */nibabel/*, */nisext/* +source = nibabel omit = */externals/* */benchmarks/* - */tests/* nibabel/_version.py + +[report] +exclude_also = + def __repr__ + if (ty\.|typing\.)?TYPE_CHECKING: + class .*\((ty\.|typing\.)Protocol\): + @(ty\.|typing\.)overload + if 0: + if __name__ == .__main__.: + @(abc\.)?abstractmethod + raise NotImplementedError diff --git a/.flake8 b/.flake8 deleted file mode 100644 index 9fe631ac81..0000000000 --- a/.flake8 +++ /dev/null @@ -1,9 +0,0 @@ -[flake8] -max-line-length = 100 -extend-ignore = E203,E266,E402,E731 -exclude = - *test* - *sphinx* - nibabel/externals/* -per-file-ignores = - */__init__.py: F401 diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..6c9e83fcbf --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,10 @@ +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "monthly" + groups: + actions-infrastructure: + patterns: + - "actions/*" diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index a6eb39734f..a741a40714 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -112,30 +112,51 @@ jobs: strategy: fail-fast: false matrix: - os: ['ubuntu-latest', 'windows-latest', 'macos-latest'] - python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] - architecture: ['x64', 'x86'] + os: ['ubuntu-latest', 'windows-latest', 'macos-13', 'macos-latest'] + python-version: ["3.9", "3.10", "3.11", "3.12"] + architecture: ['x64', 'x86', 'arm64'] dependencies: ['full', 'pre'] include: # Basic dependencies only - os: ubuntu-latest - python-version: 3.8 + python-version: 3.9 dependencies: 'none' # Absolute minimum dependencies - os: ubuntu-latest - python-version: 3.8 + python-version: 3.9 dependencies: 'min' - # NumPy 2.0 + # NoGIL - os: ubuntu-latest - python-version: '3.12' + python-version: '3.13-dev' dependencies: 'dev' exclude: + # x86 for Windows + Python<3.12 - os: ubuntu-latest architecture: x86 + - os: macos-13 + architecture: x86 - os: macos-latest architecture: x86 - python-version: '3.12' architecture: x86 + # arm64 is available for macos-14+ + - os: ubuntu-latest + architecture: arm64 + - os: windows-latest + architecture: arm64 + - os: macos-13 + architecture: arm64 + # x64 is not available for macos-14+ + - os: macos-latest + architecture: x64 + # Drop pre tests for macos-13 + - os: macos-13 + dependencies: pre + # Drop pre tests for SPEC-0-unsupported Python versions + - python-version: '3.9' + dependencies: pre + - python-version: '3.10' + dependencies: pre env: DEPENDS: ${{ matrix.dependencies }} @@ -147,11 +168,18 @@ jobs: submodules: recursive fetch-depth: 0 - name: Set up Python ${{ matrix.python-version }} + if: "!endsWith(matrix.python-version, '-dev')" uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.architecture }} allow-prereleases: true + - name: Set up Python ${{ matrix.python-version }} + if: endsWith(matrix.python-version, '-dev') + uses: deadsnakes/action@v3.2.0 + with: + python-version: ${{ matrix.python-version }} + nogil: true - name: Display Python version run: python -c "import sys; print(sys.version)" - name: Install tox @@ -161,7 +189,7 @@ jobs: - name: Show tox config run: tox c - name: Run tox - run: tox -v --exit-and-dump-after 1200 + run: tox -vv --exit-and-dump-after 1200 - uses: codecov/codecov-action@v4 if: ${{ always() }} with: diff --git a/.mailmap b/.mailmap index 7b5dfa0d43..43932c865b 100644 --- a/.mailmap +++ b/.mailmap @@ -75,6 +75,7 @@ Oliver P. Hinds Or Duek Oscar Esteban Paul McCarthy +Paul McCarthy Reinder Vos de Wael Roberto Guidotti Roberto Guidotti diff --git a/.pep8speaks.yml b/.pep8speaks.yml deleted file mode 100644 index 0a0d8c619f..0000000000 --- a/.pep8speaks.yml +++ /dev/null @@ -1,12 +0,0 @@ -scanner: - diff_only: True # Only show errors caused by the patch - linter: flake8 - -message: # Customize the comment made by the bot - opened: # Messages when a new PR is submitted - header: "Hello @{name}, thank you for submitting the Pull Request!" - footer: "To test for issues locally, `pip install flake8` and then run `flake8 nibabel`." - updated: # Messages when new commits are added to the PR - header: "Hello @{name}, Thank you for updating!" - footer: "To test for issues locally, `pip install flake8` and then run `flake8 nibabel`." - no_errors: "Cheers! There are no style issues detected in this Pull Request. :beers: " diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2b620a6de3..4f49318eb0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ exclude: ".*/data/.*" repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v4.6.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer @@ -12,21 +12,19 @@ repos: - id: check-case-conflict - id: check-merge-conflict - id: check-vcs-permalinks - - repo: https://github.com/grantjenks/blue - rev: v0.9.1 + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.6.4 hooks: - - id: blue - - repo: https://github.com/pycqa/isort - rev: 5.12.0 - hooks: - - id: isort - - repo: https://github.com/pycqa/flake8 - rev: 6.1.0 - hooks: - - id: flake8 - exclude: "^(doc|nisext|tools)/" + - id: ruff + args: [ --fix ] + exclude: = ["doc", "tools"] + - id: ruff-format + exclude: = ["doc", "tools"] + - id: ruff + args: [ --select, ISC001, --fix ] + exclude: = ["doc", "tools"] - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.5.1 + rev: v1.11.2 hooks: - id: mypy # Sync with project.optional-dependencies.typing @@ -41,7 +39,7 @@ repos: args: ["nibabel"] pass_filenames: false - repo: https://github.com/codespell-project/codespell - rev: v2.2.6 + rev: v2.3.0 hooks: - id: codespell additional_dependencies: diff --git a/.zenodo.json b/.zenodo.json index 6cadd84a7a..250611d54d 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -25,6 +25,11 @@ "name": "Cipollini, Ben", "orcid": "0000-0002-7782-0790" }, + { + "affiliation": "CEA", + "name": "Papadopoulos Orfanos, Dimitri", + "orcid": "0000-0002-1242-8990" + }, { "name": "McCarthy, Paul" }, @@ -78,13 +83,11 @@ "orcid": "0000-0001-7159-1387" }, { - "name": "Wang, Hao-Ting", - "orcid": "0000-0003-4078-2038" + "name": "Moloney, Brendan" }, { - "affiliation": "CEA", - "name": "Papadopoulos Orfanos, Dimitri", - "orcid": "0000-0002-1242-8990" + "name": "Wang, Hao-Ting", + "orcid": "0000-0003-4078-2038" }, { "affiliation": "Harvard University - Psychology", @@ -123,9 +126,6 @@ { "name": "S\u00f3lon, Anibal" }, - { - "name": "Moloney, Brendan" - }, { "name": "Morency, F\u00e9lix C." }, @@ -177,6 +177,11 @@ { "name": "Van, Andrew" }, + { + "affiliation": "Brigham and Women's Hospital, Mass General Brigham/Harvard Medical School", + "name": "Legarreta, Jon Haitz", + "orcid": "0000-0002-9661-1396" + }, { "affiliation": "Google", "name": "Gorgolewski, Krzysztof J.", @@ -203,6 +208,9 @@ { "name": "Baker, Eric M." }, + { + "name": "Koudoro, Serge" + }, { "name": "Hayashi, Soichi" }, @@ -220,14 +228,14 @@ "name": "Esteban, Oscar", "orcid": "0000-0001-8435-6191" }, - { - "name": "Koudoro, Serge" - }, { "affiliation": "University College London", "name": "P\u00e9rez-Garc\u00eda, Fernando", "orcid": "0000-0001-9090-3024" }, + { + "name": "Becq, Guillaume" + }, { "name": "Dock\u00e8s, J\u00e9r\u00f4me" }, @@ -270,9 +278,9 @@ "orcid": "0000-0003-1076-5122" }, { - "affiliation": "Universit\u00e9 de Sherbrooke", - "name": "Legarreta, Jon Haitz", - "orcid": "0000-0002-9661-1396" + "affiliation": "Polytechnique Montr\u00e9al, Montr\u00e9al, CA", + "name": "Newton, Joshua", + "orcid": "0009-0005-6963-3812" }, { "name": "Hahn, Kevin S." @@ -285,6 +293,9 @@ { "name": "Hinds, Oliver P." }, + { + "name": "Sandro" + }, { "name": "Fauber, Bennet" }, diff --git a/Changelog b/Changelog index 6892951256..f72a6a8874 100644 --- a/Changelog +++ b/Changelog @@ -25,6 +25,74 @@ Eric Larson (EL), Demian Wassermann, Stephan Gerhard and Ross Markello (RM). References like "pr/298" refer to github pull request numbers. +5.3.0 (Tuesday 8 October 2024) +============================== + +This release primarily adds support for Python 3.13 and Numpy 2.0. + +NiBabel 6.0 will drop support for Numpy 1.x. + +New features +------------ +* Update NIfTI extension protocol to include ``.content : bytes``, ``.text : str`` and ``.json : dict`` + properties for accessing extension contents. Exceptions will be raised on ``.text`` and ``.json`` if + conversion fails. (pr/1336) (CM) + +Enhancements +------------ +* Ability to read data from many multiframe DICOM files that previously generated errors (pr/1340) + (Brendan Moloney, reviewed by CM) +* ``nib-nifti-dx`` now supports NIfTI-2 files with a ``--nifti2`` flag (pr/1323) (CM) +* Update :mod:`nibabel.streamlines.tractogram` to support ragged arrays. (pr/1291) + (Serge Koudoro, reviewed by CM) +* Filter numpy ``UserWarning`` on ``np.finfo(np.longdouble)``. This can occur on + Windows systems, but it's done in the context of checking for the problem that + is being warned against, so there's no need to be noisy. (pr/1310) + (Joshua Newton, reviewed by CM) +* Improve error message for for dicomwrapper errors in shape calculation (pr/1302) + (YOH, reviewed by CM) +* Support "flat" ASCII-encoded GIFTI DataArrays (pr/1298) (PM, reviewed by CM) + +Bug fixes +--------- +* Fix location initialization/update in OrthoSlicer3D for permuted axes (pr/1319, pr/1350) + (Guillaume Becq, reviewed by CM) +* Fix DICOM scaling, making frame filtering explicit (pr/1342) (Brendan Moloney, reviewed by CM) +* Fixed multiframe DICOM issue where data could be flipped along slice dimension relative to the + affine (pr/1340) (Brendan Moloney, reviewed by CM) +* Fixed multiframe DICOM issue where ``image_position`` and the translation component in the + ``affine`` could be incorrect (pr/1340) (Brendan Moloney, reviewed by CM) + +Maintenance +----------- +* Numpy 2.0 compatibility and addressing deprecations in numpy API + (pr/1304, pr/1330, pr/1331, pr/1334, pr/1337) (Jon Haitz Legarreta Gorroño, CM) +* Python 3.13 compatibility (pr/1315) (Sandro from the Fedora Project, reviewed by CM) +* Testing on Python 3.13 with free-threading (pr/1339) (CM) +* Testing on ARM64 Mac OS runners (pr/1320) (CM) +* Proactively address deprecations in coming Python versions (pr/1329, pr/1332, pr/1333) + (Jon Haitz Legarreta Gorroño, reviewed by CM) +* Replace nose-era ``setup()`` and ``teardown()`` functions with pytest equivalents + (pr/1325) (Sandro from the Fedora Project, reviewed by Étienne Mollier and CM) +* Transitioned from blue/isort/flake8 to `ruff `__. (pr/1289) + (Dimitri Papadopoulos, reviewed by CM) +* Vetted and added various rules to the ruff configuration for auto-formatting and style + guide enforcement. (pr/1321, pr/1351, pr/1352, pr/1353, pr/1354, pr/1355, pr/1357, pr/1358, + pr/1359, pr/1360, pr/1361, pr/1362, pr/1363, pr/1364, pr/1368, pr/1369) + (Dimitri Papadopoulos, reviewed by CM) +* Fixing typos when found. (pr/1313, pr/1370) (MB, Dimitri Papadopoulos) +* Applied Repo-Review suggestions (Dimitri Papadopoulos, reviewed by CM) + +API changes and deprecations +---------------------------- +* Raise :class:`~nibabel.spatialimages.HeaderDataError` from + :func:`~nibabel.nifti1.Nifti1Header.set_qform` if the affine fails to decompose. + This would previously result in :class:`numpy.linalg.LinAlgError`. (pr/1227) (CM) +* The :func:`nibabel.onetime.auto_attr` module can be replaced by :func:`functools.cached_property` + in all supported versions of Python. This alias may be removed in future versions. (pr/1341) (CM) +* Removed the deprecated ``nisext`` (setuptools extensions) package. (pr/1290) (CM, reviewed by MB) + + 5.2.1 (Monday 26 February 2024) =============================== diff --git a/Makefile b/Makefile index 7d4c6666ae..689ad6a75f 100644 --- a/Makefile +++ b/Makefile @@ -233,25 +233,6 @@ bdist_rpm: bdist_mpkg: $(PYTHON) tools/mpkg_wrapper.py setup.py install -# Check for files not installed -check-files: - $(PYTHON) -c 'from nisext.testers import check_files; check_files("nibabel")' - -# Print out info for possible install methods -check-version-info: - $(PYTHON) -c 'from nisext.testers import info_from_here; info_from_here("nibabel")' - -# Run tests from installed code -installed-tests: - $(PYTHON) -c 'from nisext.testers import tests_installed; tests_installed("nibabel")' - -# Run tests from packaged distributions -sdist-tests: - $(PYTHON) -c 'from nisext.testers import sdist_tests; sdist_tests("nibabel", doctests=False)' - -bdist-egg-tests: - $(PYTHON) -c 'from nisext.testers import bdist_egg_tests; bdist_egg_tests("nibabel", doctests=False, label="not script_test")' - sdist-venv: clean rm -rf dist venv unset PYTHONPATH && $(PYTHON) setup.py sdist --formats=zip @@ -260,7 +241,7 @@ sdist-venv: clean mkdir venv/tmp cd venv/tmp && unzip ../../dist/*.zip . venv/bin/activate && cd venv/tmp/nibabel* && python setup.py install - unset PYTHONPATH && . venv/bin/activate && cd venv && nosetests --with-doctest nibabel nisext + unset PYTHONPATH && . venv/bin/activate && cd venv && pytest --doctest-modules --doctest-plus --pyargs nibabel source-release: distclean $(PYTHON) -m compileall . diff --git a/doc/README.rst b/doc/README.rst index a19a3c1261..d5fd9765e6 100644 --- a/doc/README.rst +++ b/doc/README.rst @@ -3,7 +3,7 @@ Nibabel documentation ##################### To build the documentation, change to the root directory (containing -``setup.py``) and run:: +``pyproject.toml``) and run:: pip install -r doc-requirements.txt - make html + make -C doc html diff --git a/doc/source/conf.py b/doc/source/conf.py index 175c6340bd..9811651223 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## @@ -29,6 +28,10 @@ import tomli as tomllib # Check for external Sphinx extensions we depend on +try: + import numpy as np +except ImportError: + raise RuntimeError('Need to install "numpy" package for doc build') try: import numpydoc except ImportError: @@ -46,6 +49,11 @@ 'Need nibabel on Python PATH; consider "make htmldoc" from nibabel root directory' ) +from packaging.version import Version + +if Version(np.__version__) >= Version('1.22'): + np.set_printoptions(legacy='1.21') + # -- General configuration ---------------------------------------------------- # We load the nibabel release info into a dict by explicit execution @@ -94,7 +102,7 @@ # General information about the project. project = 'NiBabel' -copyright = f"2006-2023, {authors['name']} <{authors['email']}>" +copyright = f"2006, {authors['name']} <{authors['email']}>" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/doc/source/index.rst b/doc/source/index.rst index 72c731d25f..677e81b331 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -130,6 +130,9 @@ contributed code and discussion (in rough order of appearance): * Reinder Vos de Wael * Peter Suter * Blake Dewey +* Guillaume Becq +* Joshua Newton +* Sandro from the Fedora Project License reprise =============== diff --git a/doc/source/installation.rst b/doc/source/installation.rst index 4f747e7feb..983968c50f 100644 --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -81,16 +81,16 @@ is for you. Requirements ------------ -.. check these against pyproject.toml - -* Python_ 3.8 or greater -* NumPy_ 1.20 or greater -* Packaging_ 17.0 or greater -* importlib-resources_ 1.3 or greater (or Python 3.9+) -* SciPy_ (optional, for full SPM-ANALYZE support) -* h5py_ (optional, for MINC2 support) -* PyDICOM_ 1.0.0 or greater (optional, for DICOM support) -* `Python Imaging Library`_ (optional, for PNG conversion in DICOMFS) +.. check these against pyproject.toml / tox.ini + +* Python_ 3.9 or greater +* NumPy_ 1.22 or greater +* Packaging_ 20.0 or greater +* importlib-resources_ 5.12 or greater (or Python 3.12+) +* SciPy_ 1.8 or greater (optional, for full SPM-ANALYZE support) +* h5py_ 3.5 or greater (optional, for MINC2 support) +* PyDICOM_ 2.3.0 or greater (optional, for DICOM support) +* `Python Imaging Library`_ 8.4 or greater (optional, for PNG conversion in DICOMFS) * pytest_ (optional, to run the tests) * sphinx_ (optional, to build the documentation) diff --git a/doc/tools/apigen.py b/doc/tools/apigen.py index 3167362643..336c81d8d8 100644 --- a/doc/tools/apigen.py +++ b/doc/tools/apigen.py @@ -405,10 +405,7 @@ def discover_modules(self): def write_modules_api(self, modules, outdir): # upper-level modules - main_module = modules[0].split('.')[0] - ulms = [ - '.'.join(m.split('.')[:2]) if m.count('.') >= 1 else m.split('.')[0] for m in modules - ] + ulms = ['.'.join(m.split('.')[:2]) for m in modules] from collections import OrderedDict diff --git a/doc/tools/build_modref_templates.py b/doc/tools/build_modref_templates.py index 11eae99741..76cf9cdf39 100755 --- a/doc/tools/build_modref_templates.py +++ b/doc/tools/build_modref_templates.py @@ -9,7 +9,7 @@ import sys # version comparison -from distutils.version import LooseVersion as V +from packaging.version import Version as V from os.path import join as pjoin # local imports @@ -38,7 +38,7 @@ def abort(error): try: __import__(package) - except ImportError as e: + except ImportError: abort('Can not import ' + package) module = sys.modules[package] @@ -73,6 +73,8 @@ def abort(error): if re.match('^_version_(major|minor|micro|extra)', v) ] ) + + source_version = V(source_version) print('***', source_version) if source_version != installed_version: diff --git a/nibabel/__init__.py b/nibabel/__init__.py index db427435ae..c389c603fc 100644 --- a/nibabel/__init__.py +++ b/nibabel/__init__.py @@ -39,12 +39,10 @@ # module imports from . import analyze as ana -from . import ecat, imagestats, mriutils +from . import ecat, imagestats, mriutils, orientations, streamlines, viewers from . import nifti1 as ni1 -from . import orientations from . import spm2analyze as spm2 from . import spm99analyze as spm99 -from . import streamlines, viewers # isort: split @@ -172,10 +170,7 @@ def bench(label=None, verbose=1, extra_argv=None): code : ExitCode Returns the result of running the tests as a ``pytest.ExitCode`` enum """ - try: - from importlib.resources import as_file, files - except ImportError: - from importlib_resources import as_file, files + from importlib.resources import as_file, files args = [] if extra_argv is not None: diff --git a/nibabel/_compression.py b/nibabel/_compression.py index bf13895c80..871be2629f 100644 --- a/nibabel/_compression.py +++ b/nibabel/_compression.py @@ -7,17 +7,19 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Constants and types for dealing transparently with compression""" + from __future__ import annotations import bz2 import gzip -import io import typing as ty from .optpkg import optional_package -if ty.TYPE_CHECKING: # pragma: no cover - import indexed_gzip # type: ignore +if ty.TYPE_CHECKING: + import io + + import indexed_gzip # type: ignore[import] import pyzstd HAVE_INDEXED_GZIP = True @@ -40,7 +42,7 @@ if HAVE_INDEXED_GZIP: COMPRESSED_FILE_LIKES += (indexed_gzip.IndexedGzipFile,) COMPRESSION_ERRORS += (indexed_gzip.ZranError,) - from indexed_gzip import IndexedGzipFile # type: ignore + from indexed_gzip import IndexedGzipFile # type: ignore[import-not-found] else: IndexedGzipFile = gzip.GzipFile diff --git a/nibabel/affines.py b/nibabel/affines.py index 05fdd7bb58..4b6001dec0 100644 --- a/nibabel/affines.py +++ b/nibabel/affines.py @@ -1,6 +1,7 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Utility routines for working with points and affine transforms""" + from functools import reduce import numpy as np @@ -365,7 +366,7 @@ def rescale_affine(affine, shape, zooms, new_shape=None): A new affine transform with the specified voxel sizes """ - shape = np.array(shape, copy=False) + shape = np.asarray(shape) new_shape = np.array(new_shape if new_shape is not None else shape) s = voxel_sizes(affine) diff --git a/nibabel/analyze.py b/nibabel/analyze.py index 20fdac055a..d02363c792 100644 --- a/nibabel/analyze.py +++ b/nibabel/analyze.py @@ -81,6 +81,7 @@ can be loaded with and without a default flip, so the saved zoom will not constrain the affine. """ + from __future__ import annotations import numpy as np @@ -514,7 +515,9 @@ def data_to_fileobj(self, data, fileobj, rescale=True): data = np.asanyarray(data) shape = self.get_data_shape() if data.shape != shape: - raise HeaderDataError('Data should be shape (%s)' % ', '.join(str(s) for s in shape)) + raise HeaderDataError( + 'Data should be shape ({})'.format(', '.join(str(s) for s in shape)) + ) out_dtype = self.get_data_dtype() if rescale: try: @@ -696,7 +699,7 @@ def set_zooms(self, zooms): ndim = dims[0] zooms = np.asarray(zooms) if len(zooms) != ndim: - raise HeaderDataError('Expecting %d zoom values for ndim %d' % (ndim, ndim)) + raise HeaderDataError(f'Expecting {ndim} zoom values for ndim {ndim}') if np.any(zooms < 0): raise HeaderDataError('zooms must be positive') pixdims = hdr['pixdim'] @@ -815,11 +818,11 @@ def _chk_datatype(klass, hdr, fix=False): dtype = klass._data_type_codes.dtype[code] except KeyError: rep.problem_level = 40 - rep.problem_msg = 'data code %d not recognized' % code + rep.problem_msg = f'data code {code} not recognized' else: if dtype.itemsize == 0: rep.problem_level = 40 - rep.problem_msg = 'data code %d not supported' % code + rep.problem_msg = f'data code {code} not supported' else: return hdr, rep if fix: @@ -929,7 +932,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): Parameters ---------- file_map : dict - Mapping with (kay, value) pairs of (``file_type``, FileHolder + Mapping with (key, value) pairs of (``file_type``, FileHolder instance giving file-likes for each file needed for this image type. mmap : {True, False, 'c', 'r'}, optional, keyword only diff --git a/nibabel/arrayproxy.py b/nibabel/arrayproxy.py index 57d8aa0f8b..ed2310519e 100644 --- a/nibabel/arrayproxy.py +++ b/nibabel/arrayproxy.py @@ -25,6 +25,7 @@ See :mod:`nibabel.tests.test_proxy_api` for proxy API conformance checks. """ + from __future__ import annotations import typing as ty @@ -56,7 +57,7 @@ KEEP_FILE_OPEN_DEFAULT = False -if ty.TYPE_CHECKING: # pragma: no cover +if ty.TYPE_CHECKING: import numpy.typing as npt from typing_extensions import Self # PY310 @@ -74,21 +75,17 @@ class ArrayLike(ty.Protocol): shape: tuple[int, ...] @property - def ndim(self) -> int: - ... # pragma: no cover + def ndim(self) -> int: ... # If no dtype is passed, any dtype might be returned, depending on the array-like @ty.overload - def __array__(self, dtype: None = ..., /) -> np.ndarray[ty.Any, np.dtype[ty.Any]]: - ... # pragma: no cover + def __array__(self, dtype: None = ..., /) -> np.ndarray[ty.Any, np.dtype[ty.Any]]: ... # Any dtype might be passed, and *that* dtype must be returned @ty.overload - def __array__(self, dtype: _DType, /) -> np.ndarray[ty.Any, _DType]: - ... # pragma: no cover + def __array__(self, dtype: _DType, /) -> np.ndarray[ty.Any, _DType]: ... - def __getitem__(self, key, /) -> npt.NDArray: - ... # pragma: no cover + def __getitem__(self, key, /) -> npt.NDArray: ... class ArrayProxy(ArrayLike): diff --git a/nibabel/arraywriters.py b/nibabel/arraywriters.py index 751eb6ad1f..1f55263fc3 100644 --- a/nibabel/arraywriters.py +++ b/nibabel/arraywriters.py @@ -28,6 +28,7 @@ def __init__(self, array, out_dtype=None) something else to make sense of conversions between float and int, or between larger ints and smaller. """ + import numpy as np from .casting import best_float, floor_exact, int_abs, shared_range, type_info diff --git a/nibabel/batteryrunners.py b/nibabel/batteryrunners.py index 30727f3962..860b9b993c 100644 --- a/nibabel/batteryrunners.py +++ b/nibabel/batteryrunners.py @@ -252,7 +252,7 @@ def __str__(self): def message(self): """formatted message string, including fix message if present""" if self.fix_msg: - return '; '.join((self.problem_msg, self.fix_msg)) + return f'{self.problem_msg}; {self.fix_msg}' return self.problem_msg def log_raise(self, logger, error_level=40): diff --git a/nibabel/benchmarks/bench_array_to_file.py b/nibabel/benchmarks/bench_array_to_file.py index c2bab7e95e..2af8b5677f 100644 --- a/nibabel/benchmarks/bench_array_to_file.py +++ b/nibabel/benchmarks/bench_array_to_file.py @@ -11,12 +11,12 @@ """ import sys -from io import BytesIO # NOQA +from io import BytesIO # noqa: F401 import numpy as np from numpy.testing import measure -from nibabel.volumeutils import array_to_file # NOQA +from nibabel.volumeutils import array_to_file # noqa: F401 from .butils import print_git_title diff --git a/nibabel/benchmarks/bench_arrayproxy_slicing.py b/nibabel/benchmarks/bench_arrayproxy_slicing.py index 958923d7ea..3444cb8d8f 100644 --- a/nibabel/benchmarks/bench_arrayproxy_slicing.py +++ b/nibabel/benchmarks/bench_arrayproxy_slicing.py @@ -26,7 +26,7 @@ # if memory_profiler is installed, we get memory usage results try: - from memory_profiler import memory_usage # type: ignore + from memory_profiler import memory_usage # type: ignore[import] except ImportError: memory_usage = None @@ -56,7 +56,6 @@ def bench_arrayproxy_slicing(): - print_git_title('\nArrayProxy gzip slicing') # each test is a tuple containing @@ -100,7 +99,6 @@ def fmt_sliceobj(sliceobj): return f"[{', '.join(slcstr)}]" with InTemporaryDirectory(): - print(f'Generating test data... ({int(round(np.prod(SHAPE) * 4 / 1048576.0))} MB)') data = np.array(np.random.random(SHAPE), dtype=np.float32) @@ -128,7 +126,6 @@ def fmt_sliceobj(sliceobj): seeds = [np.random.randint(0, 2**32) for s in SLICEOBJS] for ti, test in enumerate(tests): - label = get_test_label(test) have_igzip, keep_open, sliceobj = test seed = seeds[SLICEOBJS.index(sliceobj)] diff --git a/nibabel/benchmarks/bench_finite_range.py b/nibabel/benchmarks/bench_finite_range.py index edd839ce61..957446884c 100644 --- a/nibabel/benchmarks/bench_finite_range.py +++ b/nibabel/benchmarks/bench_finite_range.py @@ -15,7 +15,7 @@ import numpy as np from numpy.testing import measure -from nibabel.volumeutils import finite_range # NOQA +from nibabel.volumeutils import finite_range # noqa: F401 from .butils import print_git_title diff --git a/nibabel/benchmarks/butils.py b/nibabel/benchmarks/butils.py index 01d6931eba..13c255d1c1 100644 --- a/nibabel/benchmarks/butils.py +++ b/nibabel/benchmarks/butils.py @@ -1,5 +1,4 @@ -"""Benchmarking utilities -""" +"""Benchmarking utilities""" from .. import get_info diff --git a/nibabel/brikhead.py b/nibabel/brikhead.py index 6694ff08a5..d187a6b34b 100644 --- a/nibabel/brikhead.py +++ b/nibabel/brikhead.py @@ -26,6 +26,7 @@ am aware) always be >= 1. This permits sub-brick indexing common in AFNI programs (e.g., example4d+orig'[0]'). """ + import os import re from copy import deepcopy @@ -197,7 +198,7 @@ def parse_AFNI_header(fobj): return parse_AFNI_header(src) # unpack variables in HEAD file head = fobj.read().split('\n\n') - return {key: value for key, value in map(_unpack_var, head)} + return dict(map(_unpack_var, head)) class AFNIArrayProxy(ArrayProxy): @@ -390,7 +391,7 @@ def get_affine(self): # AFNI default is RAI- == LPS+ == DICOM order. We need to flip RA sign # to align with nibabel RAS+ system affine = np.asarray(self.info['IJK_TO_DICOM_REAL']).reshape(3, 4) - affine = np.row_stack((affine * [[-1], [-1], [1]], [0, 0, 0, 1])) + affine = np.vstack((affine * [[-1], [-1], [1]], [0, 0, 0, 1])) return affine def get_data_scaling(self): diff --git a/nibabel/casting.py b/nibabel/casting.py index f3e04f30f4..b279325477 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -3,6 +3,7 @@ Most routines work round some numpy oddities in floating point precision and casting. Others work round numpy casting to and from python ints """ + from __future__ import annotations import warnings @@ -50,11 +51,11 @@ class CastingError(Exception): getattr(np, dtype) for dtype in ( 'int8', 'byte', 'int16', 'short', 'int32', 'intc', 'int_', 'int64', 'longlong', - 'uint8', 'ubyte', 'uint16', 'ushort', 'uint32', 'uintc', 'uint', 'uint64', 'ulonglong', # noqa: E501 - 'float16', 'half', 'float32', 'single', 'float64', 'double', 'float96', 'float128', 'longdouble', # noqa: E501 - 'complex64', 'csingle', 'complex128', 'cdouble', 'complex192', 'complex256', 'clongdouble', # noqa: E501 + 'uint8', 'ubyte', 'uint16', 'ushort', 'uint32', 'uintc', 'uint', 'uint64', 'ulonglong', + 'float16', 'half', 'float32', 'single', 'float64', 'double', 'float96', 'float128', 'longdouble', + 'complex64', 'csingle', 'complex128', 'cdouble', 'complex192', 'complex256', 'clongdouble', # other names of the built-in scalar types - 'int_', 'float_', 'complex_', 'bytes_', 'str_', 'bool_', 'datetime64', 'timedelta64', # noqa: E501 + 'int_', 'float_', 'complex_', 'bytes_', 'str_', 'bool_', 'datetime64', 'timedelta64', # other 'object_', 'void', ) @@ -274,7 +275,13 @@ def type_info(np_type): nexp=None, width=width, ) - info = np.finfo(dt) + # Mitigate warning from WSL1 when checking `np.longdouble` (#1309) + with warnings.catch_warnings(): + warnings.filterwarnings( + action='ignore', category=UserWarning, message='Signature.*numpy.longdouble' + ) + info = np.finfo(dt) + # Trust the standard IEEE types nmant, nexp = info.nmant, info.nexp ret = dict( @@ -611,7 +618,7 @@ def int_abs(arr): >>> int_abs(np.array([-128, 127], dtype=np.float32)) array([128., 127.], dtype=float32) """ - arr = np.array(arr, copy=False) + arr = np.asarray(arr) dt = arr.dtype if dt.kind == 'u': return arr @@ -757,7 +764,7 @@ def able_int_type(values): >>> able_int_type([-1, 1]) == np.int8 True """ - if any([v % 1 for v in values]): + if any(v % 1 for v in values): return None mn = min(values) mx = max(values) diff --git a/nibabel/cifti2/cifti2.py b/nibabel/cifti2/cifti2.py index 452bceb7ea..b2b67978b7 100644 --- a/nibabel/cifti2/cifti2.py +++ b/nibabel/cifti2/cifti2.py @@ -16,6 +16,7 @@ http://www.nitrc.org/projects/cifti """ + import re from collections import OrderedDict from collections.abc import Iterable, MutableMapping, MutableSequence @@ -1569,7 +1570,7 @@ def to_file_map(self, file_map=None, dtype=None): self.update_headers() header = self._nifti_header - extension = Cifti2Extension(content=self.header.to_xml()) + extension = Cifti2Extension.from_bytes(self.header.to_xml()) header.extensions = Nifti1Extensions( ext for ext in header.extensions if not isinstance(ext, Cifti2Extension) ) diff --git a/nibabel/cifti2/cifti2_axes.py b/nibabel/cifti2/cifti2_axes.py index 6443a34fb5..32914be1b6 100644 --- a/nibabel/cifti2/cifti2_axes.py +++ b/nibabel/cifti2/cifti2_axes.py @@ -118,6 +118,7 @@ ... bm_cortex))) """ + import abc from operator import xor @@ -372,7 +373,7 @@ def from_mask(cls, mask, name='other', affine=None): else: raise ValueError( 'Mask should be either 1-dimensional (for surfaces) or ' - '3-dimensional (for volumes), not %i-dimensional' % mask.ndim + f'3-dimensional (for volumes), not {mask.ndim}-dimensional' ) @classmethod @@ -1518,7 +1519,6 @@ def get_element(self, index): index = self.size + index if index >= self.size or index < 0: raise IndexError( - 'index %i is out of range for SeriesAxis with size %i' - % (original_index, self.size) + f'index {original_index} is out of range for SeriesAxis with size {self.size}' ) return self.start + self.step * index diff --git a/nibabel/cifti2/parse_cifti2.py b/nibabel/cifti2/parse_cifti2.py index 48c2e06537..764e3ae203 100644 --- a/nibabel/cifti2/parse_cifti2.py +++ b/nibabel/cifti2/parse_cifti2.py @@ -40,19 +40,15 @@ ) -class Cifti2Extension(Nifti1Extension): +class Cifti2Extension(Nifti1Extension[Cifti2Header]): code = 32 - def __init__(self, code=None, content=None): - Nifti1Extension.__init__(self, code=code or self.code, content=content) - - def _unmangle(self, value): + def _unmangle(self, value: bytes) -> Cifti2Header: parser = Cifti2Parser() parser.parse(string=value) - self._content = parser.header - return self._content + return parser.header - def _mangle(self, value): + def _mangle(self, value: Cifti2Header) -> bytes: if not isinstance(value, Cifti2Header): raise ValueError('Can only mangle a Cifti2Header.') return value.to_xml() diff --git a/nibabel/cifti2/tests/test_cifti2.py b/nibabel/cifti2/tests/test_cifti2.py index bf287b8e03..6382dab9d6 100644 --- a/nibabel/cifti2/tests/test_cifti2.py +++ b/nibabel/cifti2/tests/test_cifti2.py @@ -1,5 +1,5 @@ -"""Testing CIFTI-2 objects -""" +"""Testing CIFTI-2 objects""" + import collections from xml.etree import ElementTree @@ -7,7 +7,7 @@ import pytest from nibabel import cifti2 as ci -from nibabel.cifti2.cifti2 import Cifti2HeaderError, _float_01, _value_if_klass +from nibabel.cifti2.cifti2 import _float_01, _value_if_klass from nibabel.nifti2 import Nifti2Header from nibabel.tests.test_dataobj_images import TestDataobjAPI as _TDA from nibabel.tests.test_image_api import DtypeOverrideMixin, SerializeMixin @@ -37,7 +37,7 @@ def test_cifti2_metadata(): assert len(md) == 1 assert list(iter(md)) == ['a'] assert md['a'] == 'aval' - assert md.data == dict([('a', 'aval')]) + assert md.data == {'a': 'aval'} with pytest.warns(FutureWarning): md = ci.Cifti2MetaData(metadata={'a': 'aval'}) @@ -57,7 +57,7 @@ def test_cifti2_metadata(): md['a'] = 'aval' assert md['a'] == 'aval' assert len(md) == 1 - assert md.data == dict([('a', 'aval')]) + assert md.data == {'a': 'aval'} del md['a'] assert len(md) == 0 @@ -392,7 +392,7 @@ def test_matrix(): m[0] = mim_1 assert list(m.mapped_indices) == [1] m.insert(0, mim_0) - assert list(sorted(m.mapped_indices)) == [0, 1] + assert sorted(m.mapped_indices) == [0, 1] assert h.number_of_mapped_indices == 2 assert h.get_index_map(0) == mim_0 assert h.get_index_map(1) == mim_1 diff --git a/nibabel/cifti2/tests/test_cifti2io_header.py b/nibabel/cifti2/tests/test_cifti2io_header.py index 8d393686dd..ecdf0c69a7 100644 --- a/nibabel/cifti2/tests/test_cifti2io_header.py +++ b/nibabel/cifti2/tests/test_cifti2io_header.py @@ -7,7 +7,6 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -import io from os.path import dirname from os.path import join as pjoin @@ -38,7 +37,7 @@ def test_space_separated_affine(): - img = ci.Cifti2Image.from_filename(pjoin(NIBABEL_TEST_DATA, 'row_major.dconn.nii')) + ci.Cifti2Image.from_filename(pjoin(NIBABEL_TEST_DATA, 'row_major.dconn.nii')) def test_read_nifti2(): @@ -73,7 +72,7 @@ def test_read_and_proxies(): @needs_nibabel_data('nitest-cifti2') def test_version(): - for i, dat in enumerate(datafiles): + for dat in datafiles: img = nib.load(dat) assert Version(img.header.version) == Version('2') diff --git a/nibabel/cifti2/tests/test_new_cifti2.py b/nibabel/cifti2/tests/test_new_cifti2.py index 0f90b822da..4cf5502ad7 100644 --- a/nibabel/cifti2/tests/test_new_cifti2.py +++ b/nibabel/cifti2/tests/test_new_cifti2.py @@ -6,6 +6,7 @@ These functions are used in the tests to generate most CIFTI file types from scratch. """ + import numpy as np import pytest diff --git a/nibabel/cmdline/__init__.py b/nibabel/cmdline/__init__.py index 6478e5f261..f0744521bc 100644 --- a/nibabel/cmdline/__init__.py +++ b/nibabel/cmdline/__init__.py @@ -6,5 +6,4 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""Functionality to be exposed in the command line -""" +"""Functionality to be exposed in the command line""" diff --git a/nibabel/cmdline/dicomfs.py b/nibabel/cmdline/dicomfs.py index 85d7d8dcad..07aa51e2d3 100644 --- a/nibabel/cmdline/dicomfs.py +++ b/nibabel/cmdline/dicomfs.py @@ -25,7 +25,7 @@ class dummy_fuse: try: - import fuse # type: ignore + import fuse # type: ignore[import] uid = os.getuid() gid = os.getgid() @@ -37,7 +37,7 @@ class dummy_fuse: import nibabel as nib import nibabel.dft as dft -encoding = locale.getdefaultlocale()[1] +encoding = locale.getlocale()[1] fuse.fuse_python_api = (0, 2) @@ -51,7 +51,7 @@ def __init__(self, fno): self.direct_io = False def __str__(self): - return 'FileHandle(%d)' % self.fno + return f'FileHandle({self.fno})' class DICOMFS(fuse.Fuse): @@ -85,11 +85,11 @@ def get_paths(self): series_info += f'UID: {series.uid}\n' series_info += f'number: {series.number}\n' series_info += f'description: {series.description}\n' - series_info += 'rows: %d\n' % series.rows - series_info += 'columns: %d\n' % series.columns - series_info += 'bits allocated: %d\n' % series.bits_allocated - series_info += 'bits stored: %d\n' % series.bits_stored - series_info += 'storage instances: %d\n' % len(series.storage_instances) + series_info += f'rows: {series.rows}\n' + series_info += f'columns: {series.columns}\n' + series_info += f'bits allocated: {series.bits_allocated}\n' + series_info += f'bits stored: {series.bits_stored}\n' + series_info += f'storage instances: {len(series.storage_instances)}\n' d[series.number] = { 'INFO': series_info.encode('ascii', 'replace'), f'{series.number}.nii': (series.nifti_size, series.as_nifti), @@ -193,9 +193,7 @@ def release(self, path, flags, fh): def get_opt_parser(): # use module docstring for help output p = OptionParser( - usage='{} [OPTIONS] '.format( - os.path.basename(sys.argv[0]) - ), + usage=f'{os.path.basename(sys.argv[0])} [OPTIONS] ', version='%prog ' + nib.__version__, ) diff --git a/nibabel/cmdline/diff.py b/nibabel/cmdline/diff.py index b409c7205d..55f827e973 100755 --- a/nibabel/cmdline/diff.py +++ b/nibabel/cmdline/diff.py @@ -231,7 +231,6 @@ def get_data_diff(files, max_abs=0, max_rel=0, dtype=np.float64): diffs1 = [None] * (i + 1) for j, d2 in enumerate(data[i + 1 :], i + 1): - if d1.shape == d2.shape: abs_diff = np.abs(d1 - d2) mean_abs = (np.abs(d1) + np.abs(d2)) * 0.5 @@ -247,15 +246,14 @@ def get_data_diff(files, max_abs=0, max_rel=0, dtype=np.float64): sub_thr = rel_diff <= max_rel # Since we operated on sub-selected values already, we need # to plug them back in - candidates[ - tuple(indexes[sub_thr] for indexes in np.where(candidates)) - ] = False + candidates[tuple(indexes[sub_thr] for indexes in np.where(candidates))] = ( + False + ) max_rel_diff = np.max(rel_diff) else: max_rel_diff = 0 if np.any(candidates): - diff_rec = OrderedDict() # so that abs goes before relative diff_rec['abs'] = max_abs_diff.astype(dtype) @@ -268,8 +266,7 @@ def get_data_diff(files, max_abs=0, max_rel=0, dtype=np.float64): diffs1.append({'CMP': 'incompat'}) if any(diffs1): - - diffs['DATA(diff %d:)' % (i + 1)] = diffs1 + diffs[f'DATA(diff {i + 1}:)'] = diffs1 return diffs @@ -296,7 +293,7 @@ def display_diff(files, diff): output += field_width.format('Field/File') for i, f in enumerate(files, 1): - output += '%d:%s' % (i, filename_width.format(os.path.basename(f))) + output += f'{i}:{filename_width.format(os.path.basename(f))}' output += '\n' @@ -305,7 +302,7 @@ def display_diff(files, diff): for item in value: if isinstance(item, dict): - item_str = ', '.join('%s: %s' % i for i in item.items()) + item_str = ', '.join('{}: {}'.format(*i) for i in item.items()) elif item is None: item_str = '-' else: diff --git a/nibabel/cmdline/ls.py b/nibabel/cmdline/ls.py index ff41afbd0a..72fb227687 100755 --- a/nibabel/cmdline/ls.py +++ b/nibabel/cmdline/ls.py @@ -73,7 +73,7 @@ def get_opt_parser(): action='store_true', dest='all_counts', default=False, - help='Output all counts, even if number of unique values > %d' % MAX_UNIQUE, + help=f'Output all counts, even if number of unique values > {MAX_UNIQUE}', ), Option( '-z', @@ -112,12 +112,12 @@ def proc_file(f, opts): and (h.has_data_slope or h.has_data_intercept) and not h.get_slope_inter() in ((1.0, 0.0), (None, None)) ): - row += ['@l*%.3g+%.3g' % h.get_slope_inter()] + row += ['@l*{:.3g}+{:.3g}'.format(*h.get_slope_inter())] else: row += [''] if hasattr(h, 'extensions') and len(h.extensions): - row += ['@l#exts: %d' % len(h.extensions)] + row += [f'@l#exts: {len(h.extensions)}'] else: row += [''] @@ -166,16 +166,16 @@ def proc_file(f, opts): d = d.reshape(-1) if opts.stats: # just # of elements - row += ['@l[%d]' % np.prod(d.shape)] + row += [f'@l[{np.prod(d.shape)}]'] # stats row += [f'@l[{np.min(d):.2g}, {np.max(d):.2g}]' if len(d) else '-'] if opts.counts: items, inv = np.unique(d, return_inverse=True) if len(items) > 1000 and not opts.all_counts: - counts = _err('%d uniques. Use --all-counts' % len(items)) + counts = _err(f'{len(items)} uniques. Use --all-counts') else: freq = np.bincount(inv) - counts = ' '.join('%g:%d' % (i, f) for i, f in zip(items, freq)) + counts = ' '.join(f'{i:g}:{f}' for i, f in zip(items, freq)) row += ['@l' + counts] except OSError as e: verbose(2, f'Failed to obtain stats/counts -- {e}') diff --git a/nibabel/cmdline/nifti_dx.py b/nibabel/cmdline/nifti_dx.py index 103bbf2640..eb917a04b8 100644 --- a/nibabel/cmdline/nifti_dx.py +++ b/nibabel/cmdline/nifti_dx.py @@ -9,8 +9,7 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Print nifti diagnostics for header files""" -import sys -from optparse import OptionParser +from argparse import ArgumentParser import nibabel as nib @@ -21,15 +20,27 @@ def main(args=None): """Go go team""" - parser = OptionParser( - usage=f'{sys.argv[0]} [FILE ...]\n\n' + __doc__, version='%prog ' + nib.__version__ + parser = ArgumentParser(description=__doc__) + parser.add_argument('--version', action='version', version=f'%(prog)s {nib.__version__}') + parser.add_argument( + '-1', + '--nifti1', + dest='header_class', + action='store_const', + const=nib.Nifti1Header, + default=nib.Nifti1Header, ) - (opts, files) = parser.parse_args(args=args) + parser.add_argument( + '-2', '--nifti2', dest='header_class', action='store_const', const=nib.Nifti2Header + ) + parser.add_argument('files', nargs='*', metavar='FILE', help='Nifti file names') + + args = parser.parse_args(args=args) - for fname in files: + for fname in args.files: with nib.openers.ImageOpener(fname) as fobj: - hdr = fobj.read(nib.nifti1.header_dtype.itemsize) - result = nib.Nifti1Header.diagnose_binaryblock(hdr) + hdr = fobj.read(args.header_class.template_dtype.itemsize) + result = args.header_class.diagnose_binaryblock(hdr) if len(result): print(f'Picky header check output for "{fname}"\n') print(result + '\n') diff --git a/nibabel/cmdline/parrec2nii.py b/nibabel/cmdline/parrec2nii.py index 9340626395..0ae6b3fb40 100644 --- a/nibabel/cmdline/parrec2nii.py +++ b/nibabel/cmdline/parrec2nii.py @@ -1,5 +1,4 @@ -"""Code for PAR/REC to NIfTI converter command -""" +"""Code for PAR/REC to NIfTI converter command""" import csv import os diff --git a/nibabel/cmdline/tck2trk.py b/nibabel/cmdline/tck2trk.py index d5d29ba430..a73540c446 100644 --- a/nibabel/cmdline/tck2trk.py +++ b/nibabel/cmdline/tck2trk.py @@ -1,6 +1,7 @@ """ Convert tractograms (TCK -> TRK). """ + import argparse import os diff --git a/nibabel/cmdline/tests/test_convert.py b/nibabel/cmdline/tests/test_convert.py index 4605bc810d..d500a717a3 100644 --- a/nibabel/cmdline/tests/test_convert.py +++ b/nibabel/cmdline/tests/test_convert.py @@ -71,7 +71,7 @@ def test_convert_dtype(tmp_path, data_dtype): @pytest.mark.parametrize( - 'ext,img_class', + ('ext', 'img_class'), [ ('mgh', nib.MGHImage), ('img', nib.Nifti1Pair), @@ -94,7 +94,7 @@ def test_convert_by_extension(tmp_path, ext, img_class): @pytest.mark.parametrize( - 'ext,img_class', + ('ext', 'img_class'), [ ('mgh', nib.MGHImage), ('img', nib.Nifti1Pair), @@ -119,7 +119,7 @@ def test_convert_imgtype(tmp_path, ext, img_class): def test_convert_nifti_int_fail(tmp_path): infile = get_test_data(fname='anatomical.nii') - outfile = tmp_path / f'output.nii' + outfile = tmp_path / 'output.nii' orig = nib.load(infile) assert not outfile.exists() @@ -141,7 +141,7 @@ def test_convert_nifti_int_fail(tmp_path): @pytest.mark.parametrize( - 'orig_dtype,alias,expected_dtype', + ('orig_dtype', 'alias', 'expected_dtype'), [ ('int64', 'mask', 'uint8'), ('int64', 'compat', 'int32'), diff --git a/nibabel/cmdline/tests/test_parrec2nii.py b/nibabel/cmdline/tests/test_parrec2nii.py index 017df9813a..ccedafb74b 100644 --- a/nibabel/cmdline/tests/test_parrec2nii.py +++ b/nibabel/cmdline/tests/test_parrec2nii.py @@ -1,5 +1,5 @@ -"""Tests for the parrec2nii exe code -""" +"""Tests for the parrec2nii exe code""" + from os.path import basename, isfile, join from unittest.mock import MagicMock, Mock, patch diff --git a/nibabel/cmdline/tests/test_roi.py b/nibabel/cmdline/tests/test_roi.py index ea3852b4da..4692bbb038 100644 --- a/nibabel/cmdline/tests/test_roi.py +++ b/nibabel/cmdline/tests/test_roi.py @@ -1,5 +1,4 @@ import os -import unittest from unittest import mock import numpy as np @@ -120,7 +119,7 @@ def test_nib_roi(tmp_path, inplace): @pytest.mark.parametrize( - 'args, errmsg', + ('args', 'errmsg'), ( (('-i', '1:1'), 'Cannot take zero-length slice'), (('-j', '1::2'), 'Downsampling is not supported'), @@ -139,12 +138,8 @@ def test_nib_roi_bad_slices(capsys, args, errmsg): def test_entrypoint(capsys): # Check that we handle missing args as expected with mock.patch('sys.argv', ['nib-roi', '--help']): - try: - retval = main() - except SystemExit: - pass - else: - assert False, 'argparse exits on --help. If changing to another parser, update test.' + with pytest.raises(SystemExit): + main() captured = capsys.readouterr() assert captured.out.startswith('usage: nib-roi') diff --git a/nibabel/cmdline/tests/test_stats.py b/nibabel/cmdline/tests/test_stats.py index 576a408bce..905114e31b 100644 --- a/nibabel/cmdline/tests/test_stats.py +++ b/nibabel/cmdline/tests/test_stats.py @@ -8,9 +8,6 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -import sys -from io import StringIO - import numpy as np from nibabel import Nifti1Image diff --git a/nibabel/cmdline/tests/test_utils.py b/nibabel/cmdline/tests/test_utils.py index 8143d648d9..0efb5ee0b9 100644 --- a/nibabel/cmdline/tests/test_utils.py +++ b/nibabel/cmdline/tests/test_utils.py @@ -12,8 +12,18 @@ import pytest import nibabel as nib -from nibabel.cmdline.diff import * -from nibabel.cmdline.utils import * +from nibabel.cmdline.diff import ( + display_diff, + get_data_diff, + get_data_hash_diff, + get_headers_diff, + main, +) +from nibabel.cmdline.utils import ( + ap, + safe_get, + table2string, +) from nibabel.testing import data_path diff --git a/nibabel/cmdline/utils.py b/nibabel/cmdline/utils.py index 2149235704..d89cc5c964 100644 --- a/nibabel/cmdline/utils.py +++ b/nibabel/cmdline/utils.py @@ -10,7 +10,6 @@ Helper utilities to be used in cmdline applications """ - # global verbosity switch import re from io import StringIO diff --git a/nibabel/conftest.py b/nibabel/conftest.py index 5eba256fa5..1d7389e867 100644 --- a/nibabel/conftest.py +++ b/nibabel/conftest.py @@ -5,15 +5,12 @@ # Ignore warning requesting help with nicom with pytest.warns(UserWarning): - import nibabel.nicom + import nibabel.nicom # noqa: F401 @pytest.fixture(scope='session', autouse=True) def legacy_printoptions(): - from packaging.version import Version - - if Version(np.__version__) >= Version('1.22'): - np.set_printoptions(legacy='1.21') + np.set_printoptions(legacy='1.21') @pytest.fixture @@ -24,7 +21,7 @@ def max_digits(): orig_max_str_digits = sys.get_int_max_str_digits() yield sys.set_int_max_str_digits sys.set_int_max_str_digits(orig_max_str_digits) - except AttributeError: # pragma: no cover + except AttributeError: # PY310 # pragma: no cover # Nothing to do for versions of Python that lack these methods # They were added as DoS protection in Python 3.11 and backported to # some other versions. diff --git a/nibabel/data.py b/nibabel/data.py index 7e2fe2af70..8ea056d8e7 100644 --- a/nibabel/data.py +++ b/nibabel/data.py @@ -1,6 +1,7 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Utilities to find files from NIPY data packages""" + import configparser import glob import os @@ -86,8 +87,7 @@ def list_files(self, relative=True): for base, dirs, files in os.walk(self.base_path): if relative: base = base[len(self.base_path) + 1 :] - for filename in files: - out_list.append(pjoin(base, filename)) + out_list.extend(pjoin(base, filename) for filename in files) return out_list diff --git a/nibabel/dataobj_images.py b/nibabel/dataobj_images.py index eaf341271e..565a228794 100644 --- a/nibabel/dataobj_images.py +++ b/nibabel/dataobj_images.py @@ -7,20 +7,21 @@ * returns an array from ``numpy.asanyarray(obj)``; * has an attribute or property ``shape``. """ + from __future__ import annotations import typing as ty import numpy as np -from .arrayproxy import ArrayLike from .deprecated import deprecate_with_version from .filebasedimages import FileBasedHeader, FileBasedImage -from .fileholders import FileMap -if ty.TYPE_CHECKING: # pragma: no cover +if ty.TYPE_CHECKING: import numpy.typing as npt + from .arrayproxy import ArrayLike + from .fileholders import FileMap from .filename_parser import FileSpec ArrayImgT = ty.TypeVar('ArrayImgT', bound='DataobjImage') @@ -437,7 +438,7 @@ def from_file_map( Parameters ---------- file_map : dict - Mapping with (kay, value) pairs of (``file_type``, FileHolder + Mapping with (key, value) pairs of (``file_type``, FileHolder instance giving file-likes for each file needed for this image type. mmap : {True, False, 'c', 'r'}, optional, keyword only diff --git a/nibabel/deprecated.py b/nibabel/deprecated.py index 092370106e..15d3e53265 100644 --- a/nibabel/deprecated.py +++ b/nibabel/deprecated.py @@ -1,4 +1,5 @@ """Module to help with deprecating objects and classes""" + from __future__ import annotations import typing as ty @@ -7,7 +8,7 @@ from .deprecator import Deprecator from .pkg_info import cmp_pkg_version -if ty.TYPE_CHECKING: # pragma: no cover +if ty.TYPE_CHECKING: P = ty.ParamSpec('P') diff --git a/nibabel/deprecator.py b/nibabel/deprecator.py index 779fdb462d..83118dd539 100644 --- a/nibabel/deprecator.py +++ b/nibabel/deprecator.py @@ -1,17 +1,29 @@ """Class for recording and reporting deprecations""" + from __future__ import annotations import functools import re +import sys import typing as ty import warnings +from textwrap import dedent -if ty.TYPE_CHECKING: # pragma: no cover +if ty.TYPE_CHECKING: T = ty.TypeVar('T') P = ty.ParamSpec('P') _LEADING_WHITE = re.compile(r'^(\s*)') + +def _dedent_docstring(docstring): + """Compatibility with Python 3.13+. + + xref: https://github.com/python/cpython/issues/81283 + """ + return '\n'.join([dedent(line) for line in docstring.split('\n')]) + + TESTSETUP = """ .. testsetup:: @@ -32,6 +44,10 @@ """ +if sys.version_info >= (3, 13): + TESTSETUP = _dedent_docstring(TESTSETUP) + TESTCLEANUP = _dedent_docstring(TESTCLEANUP) + class ExpiredDeprecationError(RuntimeError): """Error for expired deprecation diff --git a/nibabel/dft.py b/nibabel/dft.py index ee34595b3f..23108895b2 100644 --- a/nibabel/dft.py +++ b/nibabel/dft.py @@ -9,7 +9,6 @@ # Copyright (C) 2011 Christian Haselgrove """DICOM filesystem tools""" - import contextlib import getpass import logging @@ -44,7 +43,6 @@ class VolumeError(DFTError): class InstanceStackError(DFTError): - """bad series of instance numbers""" def __init__(self, series, i, si): @@ -161,10 +159,10 @@ def as_nifti(self): data = numpy.ndarray( (len(self.storage_instances), self.rows, self.columns), dtype=numpy.int16 ) - for (i, si) in enumerate(self.storage_instances): + for i, si in enumerate(self.storage_instances): if i + 1 != si.instance_number: raise InstanceStackError(self, i, si) - logger.info('reading %d/%d' % (i + 1, len(self.storage_instances))) + logger.info(f'reading {i + 1}/{len(self.storage_instances)}') d = self.storage_instances[i].dicom() data[i, :, :] = d.pixel_array @@ -233,7 +231,7 @@ def __getattribute__(self, name): WHERE storage_instance = ? ORDER BY directory, name""" c.execute(query, (self.uid,)) - val = ['%s/%s' % tuple(row) for row in c] + val = ['{}/{}'.format(*tuple(row)) for row in c] self.files = val return val @@ -243,7 +241,7 @@ def dicom(self): def _get_subdirs(base_dir, files_dict=None, followlinks=False): dirs = [] - for (dirpath, dirnames, filenames) in os.walk(base_dir, followlinks=followlinks): + for dirpath, dirnames, filenames in os.walk(base_dir, followlinks=followlinks): abs_dir = os.path.realpath(dirpath) if abs_dir in dirs: raise CachingError(f'link cycle detected under {base_dir}') diff --git a/nibabel/ecat.py b/nibabel/ecat.py index 1db902d10a..f634bcd8a6 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -42,6 +42,7 @@ GPL and some of the header files are adapted from CTI files (called CTI code below). It's not clear what the licenses are for these files. """ + import warnings from numbers import Integral @@ -308,14 +309,14 @@ def get_patient_orient(self): """ code = self._structarr['patient_orientation'].item() if code not in self._patient_orient_codes: - raise KeyError('Ecat Orientation CODE %d not recognized' % code) + raise KeyError(f'Ecat Orientation CODE {code} not recognized') return self._patient_orient_codes[code] def get_filetype(self): """Type of ECAT Matrix File from code stored in header""" code = self._structarr['file_type'].item() if code not in self._ft_codes: - raise KeyError('Ecat Filetype CODE %d not recognized' % code) + raise KeyError(f'Ecat Filetype CODE {code} not recognized') return self._ft_codes[code] @classmethod @@ -389,7 +390,7 @@ def read_mlist(fileobj, endianness): mlist_index += n_rows if mlist_block_no <= 2: # should block_no in (1, 2) be an error? break - return np.row_stack(mlists) + return np.vstack(mlists) def get_frame_order(mlist): @@ -513,7 +514,6 @@ def read_subheaders(fileobj, mlist, endianness): class EcatSubHeader: - _subhdrdtype = subhdr_dtype _data_type_codes = data_type_codes @@ -957,7 +957,7 @@ def to_file_map(self, file_map=None): hdr.write_to(hdrf) # Write every frames - for index in range(0, self.header['num_frames']): + for index in range(self.header['num_frames']): # Move to subheader offset frame_offset = subheaders._get_frame_offset(index) - 512 imgf.seek(frame_offset) diff --git a/nibabel/environment.py b/nibabel/environment.py index 09aaa6320f..a828ccb865 100644 --- a/nibabel/environment.py +++ b/nibabel/environment.py @@ -1,6 +1,7 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Settings from the system environment relevant to NIPY""" + import os from os.path import join as pjoin diff --git a/nibabel/eulerangles.py b/nibabel/eulerangles.py index 13dc059644..b1d187e8c1 100644 --- a/nibabel/eulerangles.py +++ b/nibabel/eulerangles.py @@ -82,6 +82,7 @@ ``y``, followed by rotation around ``x``, is known (confusingly) as "xyz", pitch-roll-yaw, Cardan angles, or Tait-Bryan angles. """ + import math from functools import reduce diff --git a/nibabel/externals/conftest.py b/nibabel/externals/conftest.py index 33f88eb323..472f2f0296 100644 --- a/nibabel/externals/conftest.py +++ b/nibabel/externals/conftest.py @@ -6,7 +6,7 @@ import os from contextlib import contextmanager - @contextmanager # type: ignore + @contextmanager # type: ignore[no-redef] def _chdir(path): cwd = os.getcwd() os.chdir(path) diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index 42760cccdf..086e31f123 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Common interface for any image format--volume or surface, binary or xml""" + from __future__ import annotations import io @@ -19,10 +20,10 @@ from .filename_parser import TypesFilenamesError, _stringify_path, splitext_addext, types_filenames from .openers import ImageOpener -if ty.TYPE_CHECKING: # pragma: no cover +if ty.TYPE_CHECKING: from .filename_parser import ExtensionSpec, FileSpec -FileSniff = ty.Tuple[bytes, str] +FileSniff = tuple[bytes, str] ImgT = ty.TypeVar('ImgT', bound='FileBasedImage') HdrT = ty.TypeVar('HdrT', bound='FileBasedHeader') @@ -53,13 +54,13 @@ def from_header(klass: type[HdrT], header: FileBasedHeader | ty.Mapping | None = @classmethod def from_fileobj(klass: type[HdrT], fileobj: io.IOBase) -> HdrT: - raise NotImplementedError # pragma: no cover + raise NotImplementedError def write_to(self, fileobj: io.IOBase) -> None: - raise NotImplementedError # pragma: no cover + raise NotImplementedError def __eq__(self, other: object) -> bool: - raise NotImplementedError # pragma: no cover + raise NotImplementedError def __ne__(self, other: object) -> bool: return not self == other @@ -250,7 +251,7 @@ def from_filename(klass: type[ImgT], filename: FileSpec) -> ImgT: @classmethod def from_file_map(klass: type[ImgT], file_map: FileMap) -> ImgT: - raise NotImplementedError # pragma: no cover + raise NotImplementedError @classmethod def filespec_to_file_map(klass, filespec: FileSpec) -> FileMap: @@ -307,7 +308,7 @@ def to_filename(self, filename: FileSpec, **kwargs) -> None: self.to_file_map(**kwargs) def to_file_map(self, file_map: FileMap | None = None, **kwargs) -> None: - raise NotImplementedError # pragma: no cover + raise NotImplementedError @classmethod def make_file_map(klass, mapping: ty.Mapping[str, str | io.IOBase] | None = None) -> FileMap: @@ -372,7 +373,7 @@ def from_image(klass: type[ImgT], img: FileBasedImage) -> ImgT: img : ``FileBasedImage`` instance Image, of our own class """ - raise NotImplementedError # pragma: no cover + raise NotImplementedError @classmethod def _sniff_meta_for( diff --git a/nibabel/fileholders.py b/nibabel/fileholders.py index a27715350d..df7c34af63 100644 --- a/nibabel/fileholders.py +++ b/nibabel/fileholders.py @@ -7,14 +7,17 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Fileholder class""" + from __future__ import annotations -import io import typing as ty from copy import copy from .openers import ImageOpener +if ty.TYPE_CHECKING: + import io + class FileHolderError(Exception): pass diff --git a/nibabel/filename_parser.py b/nibabel/filename_parser.py index 92a2f4b1f5..d2c23ae6e4 100644 --- a/nibabel/filename_parser.py +++ b/nibabel/filename_parser.py @@ -7,13 +7,14 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Create filename pairs, triplets etc, with expected extensions""" + from __future__ import annotations import os import pathlib import typing as ty -if ty.TYPE_CHECKING: # pragma: no cover +if ty.TYPE_CHECKING: FileSpec = str | os.PathLike[str] ExtensionSpec = tuple[str, str | None] diff --git a/nibabel/fileslice.py b/nibabel/fileslice.py index fe7d6bba54..91ed1f70a1 100644 --- a/nibabel/fileslice.py +++ b/nibabel/fileslice.py @@ -1,4 +1,5 @@ """Utilities for getting array slices out of file-like objects""" + import operator from functools import reduce from mmap import mmap @@ -126,7 +127,7 @@ def canonical_slicers(sliceobj, shape, check_inds=True): if slicer < 0: slicer = dim_len + slicer elif check_inds and slicer >= dim_len: - raise ValueError('Integer index %d to large' % slicer) + raise ValueError(f'Integer index {slicer} too large') can_slicers.append(slicer) # Fill out any missing dimensions if n_real < n_dim: diff --git a/nibabel/freesurfer/__init__.py b/nibabel/freesurfer/__init__.py index 806d19a272..1ab3859756 100644 --- a/nibabel/freesurfer/__init__.py +++ b/nibabel/freesurfer/__init__.py @@ -1,5 +1,4 @@ -"""Reading functions for freesurfer files -""" +"""Reading functions for freesurfer files""" from .io import ( read_annot, diff --git a/nibabel/freesurfer/io.py b/nibabel/freesurfer/io.py index b4d6ef2a3a..5b3f6a3664 100644 --- a/nibabel/freesurfer/io.py +++ b/nibabel/freesurfer/io.py @@ -1,5 +1,4 @@ -"""Read / write FreeSurfer geometry, morphometry, label, annotation formats -""" +"""Read / write FreeSurfer geometry, morphometry, label, annotation formats""" import getpass import time @@ -428,7 +427,7 @@ def _read_annot_ctab_old_format(fobj, n_entries): for i in range(n_entries): # structure name length + string name_length = np.fromfile(fobj, dt, 1)[0] - name = np.fromfile(fobj, '|S%d' % name_length, 1)[0] + name = np.fromfile(fobj, f'|S{name_length}', 1)[0] names.append(name) # read RGBT for this entry ctab[i, :4] = np.fromfile(fobj, dt, 4) @@ -466,13 +465,13 @@ def _read_annot_ctab_new_format(fobj, ctab_version): dt = _ANNOT_DT # This code works with a file version == 2, nothing else if ctab_version != 2: - raise Exception('Unrecognised .annot file version (%i)', ctab_version) + raise Exception(f'Unrecognised .annot file version ({ctab_version})') # maximum LUT index present in the file max_index = np.fromfile(fobj, dt, 1)[0] ctab = np.zeros((max_index, 5), dt) # orig_tab string length + string length = np.fromfile(fobj, dt, 1)[0] - np.fromfile(fobj, '|S%d' % length, 1)[0] # Orig table path + np.fromfile(fobj, f'|S{length}', 1)[0] # Orig table path # number of LUT entries present in the file entries_to_read = np.fromfile(fobj, dt, 1)[0] names = list() @@ -481,7 +480,7 @@ def _read_annot_ctab_new_format(fobj, ctab_version): idx = np.fromfile(fobj, dt, 1)[0] # structure name length + string name_length = np.fromfile(fobj, dt, 1)[0] - name = np.fromfile(fobj, '|S%d' % name_length, 1)[0] + name = np.fromfile(fobj, f'|S{name_length}', 1)[0] names.append(name) # RGBT ctab[idx, :4] = np.fromfile(fobj, dt, 4) @@ -526,7 +525,7 @@ def write(num, dtype=dt): def write_string(s): s = (s if isinstance(s, bytes) else s.encode()) + b'\x00' write(len(s)) - write(s, dtype='|S%d' % len(s)) + write(s, dtype=f'|S{len(s)}') # Generate annotation values for each ctab entry if fill_ctab: diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index 5dd2660342..0adcb88e2c 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -10,6 +10,7 @@ Author: Krish Subramaniam """ + from os.path import splitext import numpy as np @@ -280,7 +281,7 @@ def set_zooms(self, zooms): zooms = np.asarray(zooms) ndims = self._ndims() if len(zooms) > ndims: - raise HeaderDataError('Expecting %d zoom values' % ndims) + raise HeaderDataError(f'Expecting {ndims} zoom values') if np.any(zooms[:3] <= 0): raise HeaderDataError( f'Spatial (first three) zooms must be positive; got {tuple(zooms[:3])}' @@ -495,7 +496,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): Parameters ---------- file_map : dict - Mapping with (kay, value) pairs of (``file_type``, FileHolder + Mapping with (key, value) pairs of (``file_type``, FileHolder instance giving file-likes for each file needed for this image type. mmap : {True, False, 'c', 'r'}, optional, keyword only @@ -569,7 +570,9 @@ def _write_data(self, mghfile, data, header): """ shape = header.get_data_shape() if data.shape != shape: - raise HeaderDataError('Data should be shape (%s)' % ', '.join(str(s) for s in shape)) + raise HeaderDataError( + 'Data should be shape ({})'.format(', '.join(str(s) for s in shape)) + ) offset = header.get_data_offset() out_dtype = header.get_data_dtype() array_to_file(data, mghfile, out_dtype, offset) diff --git a/nibabel/freesurfer/tests/test_mghformat.py b/nibabel/freesurfer/tests/test_mghformat.py index 189f1a9dd7..d69587811b 100644 --- a/nibabel/freesurfer/tests/test_mghformat.py +++ b/nibabel/freesurfer/tests/test_mghformat.py @@ -460,6 +460,7 @@ def test_as_byteswapped(self): for endianness in (None,) + LITTLE_CODES: with pytest.raises(ValueError): hdr.as_byteswapped(endianness) + # Note that contents is not rechecked on swap / copy class DC(self.header_class): def check_fix(self, *args, **kwargs): diff --git a/nibabel/funcs.py b/nibabel/funcs.py index f83ed68709..cda4a5d2ed 100644 --- a/nibabel/funcs.py +++ b/nibabel/funcs.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Processor functions for images""" + import numpy as np from .loadsave import load diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index 7aba877309..76fcc4a451 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -11,13 +11,14 @@ The Gifti specification was (at time of writing) available as a PDF download from http://www.nitrc.org/projects/gifti/ """ + from __future__ import annotations import base64 import sys import warnings from copy import copy -from typing import Type, cast +from typing import cast import numpy as np @@ -373,7 +374,7 @@ def _to_xml_element(self): def print_summary(self): print('Dataspace: ', xform_codes.niistring[self.dataspace]) print('XFormSpace: ', xform_codes.niistring[self.xformspace]) - print('Affine Transformation Matrix: \n', self.xform) + print('Affine Transformation Matrix:\n', self.xform) def _data_tag_element(dataarray, encoding, dtype, ordering): @@ -521,7 +522,7 @@ def _to_xml_element(self): }, ) for di, dn in enumerate(self.dims): - data_array.attrib['Dim%d' % di] = str(dn) + data_array.attrib[f'Dim{di}'] = str(dn) if self.meta is not None: data_array.append(self.meta._to_xml_element()) @@ -597,7 +598,7 @@ class GiftiImage(xml.XmlSerializable, SerializableImage): # The parser will in due course be a GiftiImageParser, but we can't set # that now, because it would result in a circular import. We set it after # the class has been defined, at the end of the class definition. - parser: Type[xml.XmlParser] + parser: type[xml.XmlParser] def __init__( self, diff --git a/nibabel/gifti/parse_gifti_fast.py b/nibabel/gifti/parse_gifti_fast.py index ccd608324a..5bcd8c8c32 100644 --- a/nibabel/gifti/parse_gifti_fast.py +++ b/nibabel/gifti/parse_gifti_fast.py @@ -284,8 +284,8 @@ def EndElementHandler(self, name): if name == 'GIFTI': if hasattr(self, 'expected_numDA') and self.expected_numDA != self.img.numDA: warnings.warn( - 'Actual # of data arrays does not match ' - '# expected: %d != %d.' % (self.expected_numDA, self.img.numDA) + 'Actual # of data arrays does not match # expected: ' + f'{self.expected_numDA} != {self.img.numDA}.' ) # remove last element of the list self.fsm_state.pop() diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index a2f8395cae..416faf3c84 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -1,20 +1,19 @@ -"""Testing gifti objects -""" +"""Testing gifti objects""" + import itertools import sys -import warnings from io import BytesIO import numpy as np import pytest -from numpy.testing import assert_array_almost_equal, assert_array_equal +from numpy.testing import assert_array_equal from nibabel.tmpdirs import InTemporaryDirectory from ... import load from ...fileholders import FileHolder from ...nifti1 import data_type_codes -from ...testing import get_test_data +from ...testing import deprecated_to, expires, get_test_data from .. import ( GiftiCoordSystem, GiftiDataArray, @@ -275,27 +274,29 @@ def test_labeltable(): assert len(img.labeltable.labels) == 2 +@expires('6.0.0') def test_metadata(): md = GiftiMetaData(key='value') # Old initialization methods - with pytest.warns(DeprecationWarning) as w: + with deprecated_to('6.0.0'): nvpair = GiftiNVPairs('key', 'value') with pytest.warns(FutureWarning) as w: md2 = GiftiMetaData(nvpair=nvpair) assert len(w) == 1 - with pytest.warns(DeprecationWarning) as w: + with deprecated_to('6.0.0'): md3 = GiftiMetaData.from_dict({'key': 'value'}) assert md == md2 == md3 == {'key': 'value'} # .data as a list of NVPairs is going away - with pytest.warns(DeprecationWarning) as w: + with deprecated_to('6.0.0'): assert md.data[0].name == 'key' + with deprecated_to('6.0.0'): assert md.data[0].value == 'value' - assert len(w) == 2 +@expires('6.0.0') def test_metadata_list_interface(): md = GiftiMetaData(key='value') - with pytest.warns(DeprecationWarning): + with deprecated_to('6.0.0'): mdlist = md.data assert len(mdlist) == 1 assert mdlist[0].name == 'key' @@ -312,7 +313,7 @@ def test_metadata_list_interface(): assert md['foo'] == 'bar' # Append new NVPair - with pytest.warns(DeprecationWarning) as w: + with deprecated_to('6.0.0'): nvpair = GiftiNVPairs('key', 'value') mdlist.append(nvpair) assert len(mdlist) == 2 @@ -327,7 +328,7 @@ def test_metadata_list_interface(): assert len(md) == 0 # Extension adds multiple keys - with pytest.warns(DeprecationWarning) as w: + with deprecated_to('6.0'): foobar = GiftiNVPairs('foo', 'bar') mdlist.extend([nvpair, foobar]) assert len(mdlist) == 2 @@ -335,7 +336,7 @@ def test_metadata_list_interface(): assert md == {'key': 'value', 'foo': 'bar'} # Insertion updates list order, though we don't attempt to preserve it in the dict - with pytest.warns(DeprecationWarning) as w: + with deprecated_to('6.0'): lastone = GiftiNVPairs('last', 'one') mdlist.insert(1, lastone) assert len(mdlist) == 3 @@ -358,14 +359,14 @@ def test_metadata_list_interface(): mypair.value = 'strings' assert 'completelynew' not in md assert md == {'foo': 'bar', 'last': 'one'} - # Check popping from the end (lastone inserted before foobar) - lastpair = mdlist.pop() + # Check popping from the end (last one inserted before foobar) + mdlist.pop() assert len(mdlist) == 1 assert len(md) == 1 assert md == {'last': 'one'} # And let's remove an old pair with a new object - with pytest.warns(DeprecationWarning) as w: + with deprecated_to('6.0'): lastoneagain = GiftiNVPairs('last', 'one') mdlist.remove(lastoneagain) assert len(mdlist) == 0 @@ -422,13 +423,14 @@ def test_gifti_coord(capsys): gcs.xform = None gcs.print_summary() captured = capsys.readouterr() - assert captured.out == '\n'.join( - [ - 'Dataspace: NIFTI_XFORM_UNKNOWN', - 'XFormSpace: NIFTI_XFORM_UNKNOWN', - 'Affine Transformation Matrix: ', - ' None\n', - ] + assert ( + captured.out + == """\ +Dataspace: NIFTI_XFORM_UNKNOWN +XFormSpace: NIFTI_XFORM_UNKNOWN +Affine Transformation Matrix: + None +""" ) gcs.to_xml() diff --git a/nibabel/gifti/tests/test_parse_gifti_fast.py b/nibabel/gifti/tests/test_parse_gifti_fast.py index f972425679..6ca54df038 100644 --- a/nibabel/gifti/tests/test_parse_gifti_fast.py +++ b/nibabel/gifti/tests/test_parse_gifti_fast.py @@ -241,7 +241,7 @@ def test_load_dataarray1(): me = img.darrays[0].meta assert 'AnatomicalStructurePrimary' in me assert 'AnatomicalStructureSecondary' in me - me['AnatomicalStructurePrimary'] == 'CortexLeft' + assert me['AnatomicalStructurePrimary'] == 'CortexLeft' assert_array_almost_equal(img.darrays[0].coordsys.xform, np.eye(4, 4)) assert xform_codes.niistring[img.darrays[0].coordsys.dataspace] == 'NIFTI_XFORM_TALAIRACH' assert xform_codes.niistring[img.darrays[0].coordsys.xformspace] == 'NIFTI_XFORM_TALAIRACH' @@ -279,7 +279,7 @@ def test_load_dataarray4(): def test_dataarray5(): img5 = load(DATA_FILE5) for da in img5.darrays: - gifti_endian_codes.byteorder[da.endian] == 'little' + assert gifti_endian_codes.byteorder[da.endian] == 'little' assert_array_almost_equal(img5.darrays[0].data, DATA_FILE5_darr1) assert_array_almost_equal(img5.darrays[1].data, DATA_FILE5_darr2) # Round trip tested below @@ -447,13 +447,13 @@ def test_external_file_failure_cases(): shutil.copy(DATA_FILE7, '.') filename = pjoin(tmpdir, basename(DATA_FILE7)) with pytest.raises(GiftiParseError): - img = load(filename) + load(filename) # load from in-memory xml string (parser requires it as bytes) with open(DATA_FILE7, 'rb') as f: xmldata = f.read() parser = GiftiImageParser() with pytest.raises(GiftiParseError): - img = parser.parse(xmldata) + parser.parse(xmldata) def test_load_compressed(): diff --git a/nibabel/gifti/util.py b/nibabel/gifti/util.py index 9393292013..791f133022 100644 --- a/nibabel/gifti/util.py +++ b/nibabel/gifti/util.py @@ -10,7 +10,7 @@ from ..volumeutils import Recoder # Translate dtype.kind char codes to XML text output strings -KIND2FMT = {'i': '%i', 'u': '%i', 'f': '%10.6f', 'c': '%10.6f', 'V': ''} +KIND2FMT = {'i': '%d', 'u': '%d', 'f': '%10.6f', 'c': '%10.6f', 'V': ''} array_index_order_codes = Recoder( ( diff --git a/nibabel/imageclasses.py b/nibabel/imageclasses.py index b36131ed94..66f984e268 100644 --- a/nibabel/imageclasses.py +++ b/nibabel/imageclasses.py @@ -7,13 +7,14 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Define supported image classes and names""" + from __future__ import annotations +from typing import TYPE_CHECKING + from .analyze import AnalyzeImage from .brikhead import AFNIImage from .cifti2 import Cifti2Image -from .dataobj_images import DataobjImage -from .filebasedimages import FileBasedImage from .freesurfer import MGHImage from .gifti import GiftiImage from .minc1 import Minc1Image @@ -24,6 +25,10 @@ from .spm2analyze import Spm2AnalyzeImage from .spm99analyze import Spm99AnalyzeImage +if TYPE_CHECKING: + from .dataobj_images import DataobjImage + from .filebasedimages import FileBasedImage + # Ordered by the load/save priority. all_image_classes: list[type[FileBasedImage]] = [ Nifti1Pair, diff --git a/nibabel/imageglobals.py b/nibabel/imageglobals.py index 551719a7ee..81a1742809 100644 --- a/nibabel/imageglobals.py +++ b/nibabel/imageglobals.py @@ -23,6 +23,7 @@ Use ``logger.level = 1`` to see all messages. """ + import logging error_level = 40 diff --git a/nibabel/imagestats.py b/nibabel/imagestats.py index 38dc9d3f16..36fbddee0e 100644 --- a/nibabel/imagestats.py +++ b/nibabel/imagestats.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Functions for computing image statistics""" + import numpy as np from nibabel.imageclasses import spatial_axes_first diff --git a/nibabel/info.py b/nibabel/info.py index a608932fa8..87727cab13 100644 --- a/nibabel/info.py +++ b/nibabel/info.py @@ -108,4 +108,4 @@ .. _Digital Object Identifier: https://en.wikipedia.org/wiki/Digital_object_identifier .. _zenodo: https://zenodo.org -""" # noqa: E501 +""" diff --git a/nibabel/loadsave.py b/nibabel/loadsave.py index 463a687975..e39aeceba3 100644 --- a/nibabel/loadsave.py +++ b/nibabel/loadsave.py @@ -8,6 +8,7 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # module imports """Utilities to load and save image objects""" + from __future__ import annotations import os @@ -25,7 +26,7 @@ _compressed_suffixes = ('.gz', '.bz2', '.zst') -if ty.TYPE_CHECKING: # pragma: no cover +if ty.TYPE_CHECKING: from .filebasedimages import FileBasedImage from .filename_parser import FileSpec diff --git a/nibabel/minc1.py b/nibabel/minc1.py index 5f8422bc23..d0b9fd5375 100644 --- a/nibabel/minc1.py +++ b/nibabel/minc1.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Read MINC1 format images""" + from __future__ import annotations from numbers import Integral diff --git a/nibabel/minc2.py b/nibabel/minc2.py index 3096ef9499..161be5c111 100644 --- a/nibabel/minc2.py +++ b/nibabel/minc2.py @@ -25,6 +25,7 @@ mincstats my_funny.mnc """ + import warnings import numpy as np @@ -163,7 +164,7 @@ class Minc2Image(Minc1Image): def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): # Import of h5py might take awhile for MPI-enabled builds # So we are importing it here "on demand" - import h5py # type: ignore + import h5py # type: ignore[import] holder = file_map['image'] if holder.filename is None: diff --git a/nibabel/nicom/__init__.py b/nibabel/nicom/__init__.py index 3a389db172..d15e0846ff 100644 --- a/nibabel/nicom/__init__.py +++ b/nibabel/nicom/__init__.py @@ -19,6 +19,7 @@ dwiparams structreader """ + import warnings warnings.warn( diff --git a/nibabel/nicom/ascconv.py b/nibabel/nicom/ascconv.py index be6da9786c..2eca5a1579 100644 --- a/nibabel/nicom/ascconv.py +++ b/nibabel/nicom/ascconv.py @@ -3,13 +3,14 @@ """ Parse the "ASCCONV" meta data format found in a variety of Siemens MR files. """ + import ast import re from collections import OrderedDict ASCCONV_RE = re.compile( r'### ASCCONV BEGIN((?:\s*[^=\s]+=[^=\s]+)*) ###\n(.*?)\n### ASCCONV END ###', - flags=re.M | re.S, + flags=re.MULTILINE | re.DOTALL, ) @@ -89,10 +90,7 @@ def assign2atoms(assign_ast, default_class=int): target = target.value prev_target_type = OrderedDict elif isinstance(target, ast.Subscript): - if isinstance(target.slice, ast.Constant): # PY39 - index = target.slice.n - else: # PY38 - index = target.slice.value.n + index = target.slice.value atoms.append(Atom(target, prev_target_type, index)) target = target.value prev_target_type = list @@ -173,12 +171,10 @@ def obj_from_atoms(atoms, namespace): def _get_value(assign): value = assign.value - if isinstance(value, ast.Num): - return value.n - if isinstance(value, ast.Str): - return value.s + if isinstance(value, ast.Constant): + return value.value if isinstance(value, ast.UnaryOp) and isinstance(value.op, ast.USub): - return -value.operand.n + return -value.operand.value raise AscconvParseError(f'Unexpected RHS of assignment: {value}') diff --git a/nibabel/nicom/csareader.py b/nibabel/nicom/csareader.py index 40f3f852d9..b98dae7403 100644 --- a/nibabel/nicom/csareader.py +++ b/nibabel/nicom/csareader.py @@ -1,5 +1,5 @@ -"""CSA header reader from SPM spec -""" +"""CSA header reader from SPM spec""" + import numpy as np from .structreader import Unpacker @@ -179,7 +179,7 @@ def get_vector(csa_dict, tag_name, n): if len(items) == 0: return None if len(items) != n: - raise ValueError('Expecting %d vector' % n) + raise ValueError(f'Expecting {n} vector') return np.array(items) diff --git a/nibabel/nicom/dicomreaders.py b/nibabel/nicom/dicomreaders.py index 5892bb8db2..07362ee47d 100644 --- a/nibabel/nicom/dicomreaders.py +++ b/nibabel/nicom/dicomreaders.py @@ -131,7 +131,7 @@ def slices_to_series(wrappers): break else: # no match in current volume lists volume_lists.append([dw]) - print('We appear to have %d Series' % len(volume_lists)) + print(f'We appear to have {len(volume_lists)} Series') # second pass out_vol_lists = [] for vol_list in volume_lists: @@ -143,7 +143,7 @@ def slices_to_series(wrappers): out_vol_lists += _third_pass(vol_list) continue out_vol_lists.append(vol_list) - print('We have %d volumes after second pass' % len(out_vol_lists)) + print(f'We have {len(out_vol_lists)} volumes after second pass') # final pass check for vol_list in out_vol_lists: zs = [s.slice_indicator for s in vol_list] diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index 5ff4f33052..64b2b4a96d 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -13,16 +13,18 @@ """ import operator +import re import warnings +from functools import cached_property import numpy as np from nibabel.optpkg import optional_package -from ..onetime import auto_attr as one_time from ..openers import ImageOpener from . import csareader as csar from .dwiparams import B2q, nearest_pos_semi_def, q2bg +from .utils import Vendor, find_private_section, vendor_from_private pydicom = optional_package('pydicom')[0] @@ -59,7 +61,7 @@ def wrapper_from_file(file_like, *args, **kwargs): return wrapper_from_data(dcm_data) -def wrapper_from_data(dcm_data): +def wrapper_from_data(dcm_data, frame_filters=None): """Create DICOM wrapper from DICOM data object Parameters @@ -68,6 +70,9 @@ def wrapper_from_data(dcm_data): Object allowing attribute access, with DICOM attributes. Probably a dataset as read by ``pydicom``. + frame_filters + Optionally override the `frame_filters` used to create a `MultiFrameWrapper` + Returns ------- dcm_w : ``dicomwrappers.Wrapper`` or subclass @@ -76,9 +81,8 @@ def wrapper_from_data(dcm_data): sop_class = dcm_data.get('SOPClassUID') # try to detect what type of dicom object to wrap if sop_class == '1.2.840.10008.5.1.4.1.1.4.1': # Enhanced MR Image Storage - # currently only Philips is using Enhanced Multiframe DICOM - return MultiframeWrapper(dcm_data) - # Check for Siemens DICOM format types + return MultiframeWrapper(dcm_data, frame_filters) + # Check for non-enhanced (legacy) Siemens DICOM format types # Only Siemens will have data for the CSA header try: csa = csar.get_csa_header(dcm_data) @@ -103,6 +107,7 @@ class Wrapper: Methods: * get_data() + * get_unscaled_data() * get_pixel_array() * is_same_series(other) * __getitem__ : return attributes from `dcm_data` @@ -120,6 +125,8 @@ class Wrapper: * image_position : sequence length 3 * slice_indicator : float * series_signature : tuple + * scale_factors : (N, 2) array + * vendor : Vendor """ is_csa = False @@ -136,11 +143,35 @@ def __init__(self, dcm_data): dcm_data : object object should allow 'get' and '__getitem__' access. Usually this will be a ``dicom.dataset.Dataset`` object resulting from reading a - DICOM file, but a dictionary should also work. + DICOM file. """ self.dcm_data = dcm_data - @one_time + @cached_property + def vendor(self): + """The vendor of the instrument that produced the DICOM""" + # Look at manufacturer tag first + mfgr = self.get('Manufacturer') + if mfgr: + if re.search('Siemens', mfgr, re.IGNORECASE): + return Vendor.SIEMENS + if re.search('Philips', mfgr, re.IGNORECASE): + return Vendor.PHILIPS + if re.search('GE Medical', mfgr, re.IGNORECASE): + return Vendor.GE + # Next look at UID prefixes + for uid_src in ('StudyInstanceUID', 'SeriesInstanceUID', 'SOPInstanceUID'): + uid = str(self.get(uid_src)) + if uid.startswith(('1.3.12.2.1007.', '1.3.12.2.1107.')): + return Vendor.SIEMENS + if uid.startswith(('1.3.46', '1.3.12.2.1017')): + return Vendor.PHILIPS + if uid.startswith('1.2.840.113619'): + return Vendor.GE + # Finally look for vendor specific private blocks + return vendor_from_private(self.dcm_data) + + @cached_property def image_shape(self): """The array shape as it will be returned by ``get_data()``""" shape = (self.get('Rows'), self.get('Columns')) @@ -148,7 +179,7 @@ def image_shape(self): return None return shape - @one_time + @cached_property def image_orient_patient(self): """Note that this is _not_ LR flipped""" iop = self.get('ImageOrientationPatient') @@ -158,7 +189,7 @@ def image_orient_patient(self): iop = np.array(list(map(float, iop))) return np.array(iop).reshape(2, 3).T - @one_time + @cached_property def slice_normal(self): iop = self.image_orient_patient if iop is None: @@ -166,7 +197,7 @@ def slice_normal(self): # iop[:, 0] is column index cosine, iop[:, 1] is row index cosine return np.cross(iop[:, 1], iop[:, 0]) - @one_time + @cached_property def rotation_matrix(self): """Return rotation matrix between array indices and mm @@ -193,7 +224,7 @@ def rotation_matrix(self): raise WrapperPrecisionError('Rotation matrix not nearly orthogonal') return R - @one_time + @cached_property def voxel_sizes(self): """voxel sizes for array as returned by ``get_data()``""" # pix space gives (row_spacing, column_spacing). That is, the @@ -212,7 +243,7 @@ def voxel_sizes(self): pix_space = list(map(float, pix_space)) return tuple(pix_space + [zs]) - @one_time + @cached_property def image_position(self): """Return position of first voxel in data block @@ -231,7 +262,7 @@ def image_position(self): # Values are python Decimals in pydicom 0.9.7 return np.array(list(map(float, ipp))) - @one_time + @cached_property def slice_indicator(self): """A number that is higher for higher slices in Z @@ -246,12 +277,12 @@ def slice_indicator(self): return None return np.inner(ipp, s_norm) - @one_time + @cached_property def instance_number(self): """Just because we use this a lot for sorting""" return self.get('InstanceNumber') - @one_time + @cached_property def series_signature(self): """Signature for matching slices into series @@ -315,14 +346,30 @@ def affine(self): return aff def get_pixel_array(self): - """Return unscaled pixel array from DICOM""" + """Return raw pixel array without reshaping or scaling + + Returns + ------- + data : array + array with raw pixel data from DICOM + """ data = self.dcm_data.get('pixel_array') if data is None: raise WrapperError('Cannot find data in DICOM') return data + def get_unscaled_data(self): + """Return pixel array that is potentially reshaped, but without any scaling + + Returns + ------- + data : array + array with raw pixel data from DICOM + """ + return self.get_pixel_array() + def get_data(self): - """Get scaled image data from DICOMs + """Get potentially scaled and reshaped image data from DICOMs We return the data as DICOM understands it, first dimension is rows, second dimension is columns @@ -333,7 +380,7 @@ def get_data(self): array with data as scaled from any scaling in the DICOM fields. """ - return self._scale_data(self.get_pixel_array()) + return self._scale_data(self.get_unscaled_data()) def is_same_series(self, other): """Return True if `other` appears to be in same series @@ -372,11 +419,86 @@ def is_same_series(self, other): return False return True + @cached_property + def scale_factors(self): + """Return (2, N) array of slope/intercept pairs""" + scaling = self._get_best_scale_factor(self.dcm_data) + if scaling is None: + if self.vendor == Vendor.PHILIPS: + warnings.warn( + 'Unable to find Philips private scale factor, cross-series comparisons may be invalid' + ) + scaling = (1, 0) + return np.array((scaling,)) + + def _get_rwv_scale_factor(self, dcm_data): + """Return the first set of 'real world' scale factors with defined units""" + rw_seq = dcm_data.get('RealWorldValueMappingSequence') + if rw_seq: + for rw_map in rw_seq: + try: + units = rw_map.MeasurementUnitsCodeSequence[0].CodeMeaning + except (AttributeError, IndexError): + continue + if units not in ('', 'no units', 'UNDEFINED'): + return ( + rw_map.get('RealWorldValueSlope', 1), + rw_map.get('RealWorldValueIntercept', 0), + ) + + def _get_legacy_scale_factor(self, dcm_data): + """Return scale factors from older 'Modality LUT' macro + + For Philips data we require RescaleType is defined and not set to 'normalized' + """ + pix_trans_seq = dcm_data.get('PixelValueTransformationSequence') + if pix_trans_seq is not None: + pix_trans = pix_trans_seq[0] + if self.vendor != Vendor.PHILIPS or pix_trans.get('RescaleType', 'US') not in ( + '', + 'US', + 'normalized', + ): + return (pix_trans.get('RescaleSlope', 1), pix_trans.get('RescaleIntercept', 0)) + if ( + dcm_data.get('RescaleSlope') is not None + or dcm_data.get('RescaleIntercept') is not None + ): + if self.vendor != Vendor.PHILIPS or dcm_data.get('RescaleType', 'US') not in ( + '', + 'US', + 'normalized', + ): + return (dcm_data.get('RescaleSlope', 1), dcm_data.get('RescaleIntercept', 0)) + + def _get_philips_scale_factor(self, dcm_data): + """Return scale factors from Philips private element + + If we don't have any other scale factors that are tied to real world units, then + this is the best scaling to use to enable cross-series comparisons + """ + offset = find_private_section(dcm_data, 0x2005, 'Philips MR Imaging DD 001') + priv_scale = None if offset is None else dcm_data.get((0x2005, offset + 0xE)) + if priv_scale is not None: + return (priv_scale.value, 0.0) + + def _get_best_scale_factor(self, dcm_data): + """Return the most appropriate scale factor found or None""" + scaling = self._get_rwv_scale_factor(dcm_data) + if scaling is not None: + return scaling + scaling = self._get_legacy_scale_factor(dcm_data) + if scaling is not None: + return scaling + if self.vendor == Vendor.PHILIPS: + scaling = self._get_philips_scale_factor(dcm_data) + if scaling is not None: + return scaling + def _scale_data(self, data): # depending on pydicom and dicom files, values might need casting from # Decimal to float - scale = float(self.get('RescaleSlope', 1)) - offset = float(self.get('RescaleIntercept', 0)) + scale, offset = self.scale_factors[0] return self._apply_scale_offset(data, scale, offset) def _apply_scale_offset(self, data, scale, offset): @@ -390,7 +512,7 @@ def _apply_scale_offset(self, data, scale, offset): return data + offset return data - @one_time + @cached_property def b_value(self): """Return b value for diffusion or None if not available""" q_vec = self.q_vector @@ -398,7 +520,7 @@ def b_value(self): return None return q2bg(q_vec)[0] - @one_time + @cached_property def b_vector(self): """Return b vector for diffusion or None if not available""" q_vec = self.q_vector @@ -407,6 +529,71 @@ def b_vector(self): return q2bg(q_vec)[1] +class FrameFilter: + """Base class for defining how to filter out (ignore) frames from a multiframe file + + It is guaranteed that the `applies` method will on a dataset before the `keep` method + is called on any of the frames inside. + """ + + def applies(self, dcm_wrp) -> bool: + """Returns true if the filter should be applied to a dataset""" + return True + + def keep(self, frame_data) -> bool: + """Return true if the frame should be kept""" + raise NotImplementedError + + +class FilterMultiStack(FrameFilter): + """Filter out all but one `StackID`""" + + def __init__(self, keep_id=None): + self._keep_id = keep_id + + def applies(self, dcm_wrp) -> bool: + first_fcs = dcm_wrp.frames[0].get('FrameContentSequence', (None,))[0] + if first_fcs is None or not hasattr(first_fcs, 'StackID'): + return False + stack_ids = {frame.FrameContentSequence[0].StackID for frame in dcm_wrp.frames} + if self._keep_id is not None: + if self._keep_id not in stack_ids: + raise WrapperError('Explicitly requested StackID not found') + self._selected = self._keep_id + if len(stack_ids) > 1: + if self._keep_id is None: + warnings.warn( + 'A multi-stack file was passed without an explicit filter, just using lowest StackID' + ) + self._selected = min(stack_ids) + return True + return False + + def keep(self, frame) -> bool: + return frame.FrameContentSequence[0].StackID == self._selected + + +class FilterDwiIso(FrameFilter): + """Filter out derived ISOTROPIC frames from DWI series""" + + def applies(self, dcm_wrp) -> bool: + if not hasattr(dcm_wrp.frames[0], 'MRDiffusionSequence'): + return False + diff_dirs = { + f.MRDiffusionSequence[0].get('DiffusionDirectionality') for f in dcm_wrp.frames + } + if len(diff_dirs) > 1 and 'ISOTROPIC' in diff_dirs: + warnings.warn('Derived images found and removed') + return True + return False + + def keep(self, frame) -> bool: + return frame.MRDiffusionSequence[0].DiffusionDirectionality != 'ISOTROPIC' + + +DEFUALT_FRAME_FILTERS = (FilterMultiStack(), FilterDwiIso()) + + class MultiframeWrapper(Wrapper): """Wrapper for Enhanced MR Storage SOP Class @@ -436,17 +623,20 @@ class MultiframeWrapper(Wrapper): Methods ------- + vendor(self) + frame_order(self) image_shape(self) image_orient_patient(self) voxel_sizes(self) image_position(self) series_signature(self) + scale_factors(self) get_data(self) """ is_multiframe = True - def __init__(self, dcm_data): + def __init__(self, dcm_data, frame_filters=None): """Initializes MultiframeWrapper Parameters @@ -454,10 +644,13 @@ def __init__(self, dcm_data): dcm_data : object object should allow 'get' and '__getitem__' access. Usually this will be a ``dicom.dataset.Dataset`` object resulting from reading a - DICOM file, but a dictionary should also work. + DICOM file. + + frame_filters : Iterable of FrameFilter + defines which frames inside the dataset should be ignored. If None then + `dicomwrappers.DEFAULT_FRAME_FILTERS` will be used. """ Wrapper.__init__(self, dcm_data) - self.dcm_data = dcm_data self.frames = dcm_data.get('PerFrameFunctionalGroupsSequence') try: self.frames[0] @@ -467,9 +660,58 @@ def __init__(self, dcm_data): self.shared = dcm_data.get('SharedFunctionalGroupsSequence')[0] except TypeError: raise WrapperError('SharedFunctionalGroupsSequence is empty.') - self._shape = None - - @one_time + # Apply frame filters one at a time in the order provided + if frame_filters is None: + frame_filters = DEFUALT_FRAME_FILTERS + frame_filters = [filt for filt in frame_filters if filt.applies(self)] + for filt in frame_filters: + self.frames = [f for f in self.frames if filt.keep(f)] + # Make sure there is only one StackID remaining + first_fcs = self.frames[0].get('FrameContentSequence', (None,))[0] + if first_fcs is not None and hasattr(first_fcs, 'StackID'): + if len({frame.FrameContentSequence[0].StackID for frame in self.frames}) > 1: + raise WrapperError('More than one StackID remains after filtering') + # Try to determine slice order and minimal image position patient + self._frame_slc_ord = self._ipp = self._slice_spacing = None + try: + frame_ipps = [f.PlanePositionSequence[0].ImagePositionPatient for f in self.frames] + except AttributeError: + try: + frame_ipps = [self.shared.PlanePositionSequence[0].ImagePositionPatient] + except AttributeError: + frame_ipps = None + if frame_ipps is not None and all(ipp is not None for ipp in frame_ipps): + frame_ipps = [np.array(list(map(float, ipp))) for ipp in frame_ipps] + frame_slc_pos = [np.inner(ipp, self.slice_normal) for ipp in frame_ipps] + rnd_slc_pos = np.round(frame_slc_pos, 4) + uniq_slc_pos = np.unique(rnd_slc_pos) + pos_ord_map = dict(zip(uniq_slc_pos, np.argsort(uniq_slc_pos))) + self._frame_slc_ord = [pos_ord_map[pos] for pos in rnd_slc_pos] + if len(self._frame_slc_ord) > 1: + self._slice_spacing = ( + frame_slc_pos[self._frame_slc_ord[1]] - frame_slc_pos[self._frame_slc_ord[0]] + ) + self._ipp = frame_ipps[np.argmin(frame_slc_pos)] + self._frame_indices = None + + @cached_property + def vendor(self): + """The vendor of the instrument that produced the DICOM""" + vendor = super().vendor + if vendor is not None: + return vendor + vendor = vendor_from_private(self.shared) + if vendor is not None: + return vendor + return vendor_from_private(self.frames[0]) + + @cached_property + def frame_order(self): + if self._frame_indices is None: + _ = self.image_shape + return np.lexsort(self._frame_indices.T) + + @cached_property def image_shape(self): """The array shape as it will be returned by ``get_data()`` @@ -500,76 +742,96 @@ def image_shape(self): rows, cols = self.get('Rows'), self.get('Columns') if None in (rows, cols): raise WrapperError('Rows and/or Columns are empty.') - - # Check number of frames - first_frame = self.frames[0] - n_frames = self.get('NumberOfFrames') - # some Philips may have derived images appended - has_derived = False - if hasattr(first_frame, 'get') and first_frame.get([0x18, 0x9117]): - # DWI image may include derived isotropic, ADC or trace volume - try: - anisotropic = pydicom.Sequence( - frame - for frame in self.frames - if frame.MRDiffusionSequence[0].DiffusionDirectionality != 'ISOTROPIC' - ) - # Image contains DWI volumes followed by derived images; remove derived images - if len(anisotropic) != 0: - self.frames = anisotropic - except IndexError: - # Sequence tag is found but missing items! - raise WrapperError('Diffusion file missing information') - except AttributeError: - # DiffusionDirectionality tag is not required - pass - else: - if n_frames != len(self.frames): - warnings.warn('Derived images found and removed') - n_frames = len(self.frames) - has_derived = True - - assert len(self.frames) == n_frames - frame_indices = np.array( - [frame.FrameContentSequence[0].DimensionIndexValues for frame in self.frames] - ) - # Check that there is only one multiframe stack index - stack_ids = {frame.FrameContentSequence[0].StackID for frame in self.frames} - if len(stack_ids) > 1: - raise WrapperError( - 'File contains more than one StackID. Cannot handle multi-stack files' + # Check number of frames, initialize array of frame indices + n_frames = len(self.frames) + try: + frame_indices = np.array( + [frame.FrameContentSequence[0].DimensionIndexValues for frame in self.frames] ) - # Determine if one of the dimension indices refers to the stack id + except AttributeError: + raise WrapperError("Can't find frame 'DimensionIndexValues'") + # Determine the shape and which indices to use + shape = [rows, cols] + curr_parts = n_frames + frames_per_part = 1 + del_indices = {} dim_seq = [dim.DimensionIndexPointer for dim in self.get('DimensionIndexSequence')] - stackid_tag = pydicom.datadict.tag_for_keyword('StackID') - # remove the stack id axis if present - if stackid_tag in dim_seq: - stackid_dim_idx = dim_seq.index(stackid_tag) - frame_indices = np.delete(frame_indices, stackid_dim_idx, axis=1) - dim_seq.pop(stackid_dim_idx) - if has_derived: - # derived volume is included - derived_tag = pydicom.datadict.tag_for_keyword('DiffusionBValue') - if derived_tag not in dim_seq: - raise WrapperError('Missing information, cannot remove indices with confidence.') - derived_dim_idx = dim_seq.index(derived_tag) - frame_indices = np.delete(frame_indices, derived_dim_idx, axis=1) - # account for the 2 additional dimensions (row and column) not included - # in the indices - n_dim = frame_indices.shape[1] + 2 + stackpos_tag = pydicom.datadict.tag_for_keyword('InStackPositionNumber') + slice_dim_idx = dim_seq.index(stackpos_tag) + for row_idx, row in enumerate(frame_indices.T): + unique = np.unique(row) + count = len(unique) + if curr_parts == 1 or (count == 1 and row_idx != slice_dim_idx): + del_indices[row_idx] = count + continue + # Replace slice indices with order determined from slice positions along normal + if row_idx == slice_dim_idx: + if len(shape) > 2: + raise WrapperError('Non-singular index precedes the slice index') + row = self._frame_slc_ord + frame_indices.T[row_idx, :] = row + unique = np.unique(row) + if len(unique) != count: + raise WrapperError("Number of slice indices and positions don't match") + elif count == n_frames: + if shape[-1] == 'remaining': + raise WrapperError('At most one index have ambiguous size') + shape.append('remaining') + continue + new_parts, leftover = divmod(curr_parts, count) + expected = new_parts * frames_per_part + if leftover != 0 or any(np.count_nonzero(row == val) != expected for val in unique): + if row_idx == slice_dim_idx: + raise WrapperError('Missing slices from multiframe') + del_indices[row_idx] = count + continue + if shape[-1] == 'remaining': + shape[-1] = new_parts + frames_per_part *= shape[-1] + new_parts = 1 + frames_per_part *= count + shape.append(count) + curr_parts = new_parts + if shape[-1] == 'remaining': + if curr_parts > 1: + shape[-1] = curr_parts + curr_parts = 1 + else: + del_indices[len(shape)] = 1 + shape = shape[:-1] + if del_indices: + if curr_parts > 1: + ns_failed = [k for k, v in del_indices.items() if v != 1] + if len(ns_failed) > 1: + # If some indices weren't used yet but we still have unaccounted for + # partitions, try combining indices into single tuple and using that + tup_dtype = np.dtype(','.join(['I'] * len(ns_failed))) + row = [tuple(x for x in vals) for vals in frame_indices[:, ns_failed]] + row = np.array(row, dtype=tup_dtype) + frame_indices = np.delete(frame_indices, np.array(list(del_indices.keys())), axis=1) + if curr_parts > 1 and len(ns_failed) > 1: + unique = np.unique(row, axis=0) + count = len(unique) + new_parts, rem = divmod(curr_parts, count) + allowed_val_counts = [new_parts * frames_per_part, n_frames] + if rem == 0 and all( + np.count_nonzero(row == val) in allowed_val_counts for val in unique + ): + shape.append(count) + curr_parts = new_parts + ord_vals = np.argsort(unique) + order = {tuple(unique[i]): ord_vals[i] for i in range(count)} + ord_row = np.array([order[tuple(v)] for v in row]) + frame_indices = np.hstack( + [frame_indices, np.array(ord_row).reshape((n_frames, 1))] + ) + if curr_parts > 1: + raise WrapperError('Unable to determine sorting of final dimension(s)') # Store frame indices self._frame_indices = frame_indices - if n_dim < 4: # 3D volume - return rows, cols, n_frames - # More than 3 dimensions - ns_unique = [len(np.unique(row)) for row in self._frame_indices.T] - shape = (rows, cols) + tuple(ns_unique) - n_vols = np.prod(shape[3:]) - if n_frames != n_vols * shape[2]: - raise WrapperError('Calculated shape does not match number of frames.') return tuple(shape) - @one_time + @cached_property def image_orient_patient(self): """ Note that this is _not_ LR flipped @@ -586,7 +848,7 @@ def image_orient_patient(self): iop = np.array(list(map(float, iop))) return np.array(iop).reshape(2, 3).T - @one_time + @cached_property def voxel_sizes(self): """Get i, j, k voxel sizes""" try: @@ -597,29 +859,25 @@ def voxel_sizes(self): except AttributeError: raise WrapperError('Not enough data for pixel spacing') pix_space = pix_measures.PixelSpacing - try: - zs = pix_measures.SliceThickness - except AttributeError: - zs = self.get('SpacingBetweenSlices') - if zs is None: - raise WrapperError('Not enough data for slice thickness') + if self._slice_spacing is not None: + zs = self._slice_spacing + else: + try: + zs = pix_measures.SliceThickness + except AttributeError: + zs = self.get('SpacingBetweenSlices') + if zs is None: + raise WrapperError('Not enough data for slice thickness') # Ensure values are float rather than Decimal return tuple(map(float, list(pix_space) + [zs])) - @one_time + @property def image_position(self): - try: - ipp = self.shared.PlanePositionSequence[0].ImagePositionPatient - except AttributeError: - try: - ipp = self.frames[0].PlanePositionSequence[0].ImagePositionPatient - except AttributeError: - raise WrapperError('Cannot get image position from dicom') - if ipp is None: - return None - return np.array(list(map(float, ipp))) + if self._ipp is None: + raise WrapperError('Not enough information for image_position_patient') + return self._ipp - @one_time + @cached_property def series_signature(self): signature = {} eq = operator.eq @@ -630,26 +888,63 @@ def series_signature(self): signature['vox'] = (self.voxel_sizes, none_or_close) return signature - def get_data(self): + @cached_property + def scale_factors(self): + """Return `(2, N)` array of slope/intercept pairs + + If there is a single global scale factor then `N` will be one, otherwise it will + be the number of frames + """ + # Look for shared / global RWV scale factor first + shared_scale = self._get_rwv_scale_factor(self.shared) + if shared_scale is not None: + return np.array([shared_scale]) + shared_scale = self._get_rwv_scale_factor(self.dcm_data) + if shared_scale is not None: + return np.array([shared_scale]) + # Try pulling out best scale factors from each individual frame + frame_scales = [self._get_best_scale_factor(f) for f in self.frames] + if any(s is not None for s in frame_scales): + if any(s is None for s in frame_scales): + if self.vendor == Vendor.PHILIPS: + warnings.warn( + 'Unable to find Philips private scale factor, cross-series comparisons may be invalid' + ) + frame_scales = [s if s is not None else (1, 0) for s in frame_scales] + if all(s == frame_scales[0] for s in frame_scales[1:]): + return np.array([frame_scales[0]]) + return np.array(frame_scales)[self.frame_order] + # Finally look for shared non-RWV scale factors + shared_scale = self._get_best_scale_factor(self.shared) + if shared_scale is not None: + return np.array([shared_scale]) + shared_scale = self._get_best_scale_factor(self.dcm_data) + if shared_scale is None: + if self.vendor == Vendor.PHILIPS: + warnings.warn( + 'Unable to find Philips private scale factor, cross-series comparisons may be invalid' + ) + shared_scale = (1, 0) + return np.array([shared_scale]) + + def get_unscaled_data(self): shape = self.image_shape if shape is None: raise WrapperError('No valid information for image shape') data = self.get_pixel_array() - # Roll frames axis to last - data = data.transpose((1, 2, 0)) - # Sort frames with first index changing fastest, last slowest - sorted_indices = np.lexsort(self._frame_indices.T) - data = data[..., sorted_indices] - data = data.reshape(shape, order='F') - return self._scale_data(data) + # Roll frames axis to last and reorder + if len(data.shape) > 2: + data = data.transpose((1, 2, 0))[..., self.frame_order] + return data.reshape(shape, order='F') def _scale_data(self, data): - pix_trans = getattr(self.frames[0], 'PixelValueTransformationSequence', None) - if pix_trans is None: - return super()._scale_data(data) - scale = float(pix_trans[0].RescaleSlope) - offset = float(pix_trans[0].RescaleIntercept) - return self._apply_scale_offset(data, scale, offset) + scale_factors = self.scale_factors + if scale_factors.shape[0] == 1: + scale, offset = scale_factors[0] + return self._apply_scale_offset(data, scale, offset) + orig_shape = data.shape + data = data.reshape(data.shape[:2] + (len(self.frames),)) + return (data * scale_factors[:, 0] + scale_factors[:, 1]).reshape(orig_shape) class SiemensWrapper(Wrapper): @@ -676,7 +971,7 @@ def __init__(self, dcm_data, csa_header=None): object should allow 'get' and '__getitem__' access. If `csa_header` is None, it should also be possible to extract a CSA header from `dcm_data`. Usually this will be a ``dicom.dataset.Dataset`` object - resulting from reading a DICOM file. A dict should also work. + resulting from reading a DICOM file. csa_header : None or mapping, optional mapping giving values for Siemens CSA image sub-header. If None, we try and read the CSA information from `dcm_data`. @@ -692,7 +987,12 @@ def __init__(self, dcm_data, csa_header=None): csa_header = {} self.csa_header = csa_header - @one_time + @cached_property + def vendor(self): + """The vendor of the instrument that produced the DICOM""" + return Vendor.SIEMENS + + @cached_property def slice_normal(self): # The std_slice_normal comes from the cross product of the directions # in the ImageOrientationPatient @@ -716,7 +1016,7 @@ def slice_normal(self): else: return std_slice_normal - @one_time + @cached_property def series_signature(self): """Add ICE dims from CSA header to signature""" signature = super().series_signature @@ -726,7 +1026,7 @@ def series_signature(self): signature['ICE_Dims'] = (ice, operator.eq) return signature - @one_time + @cached_property def b_matrix(self): """Get DWI B matrix referring to voxel space @@ -763,7 +1063,7 @@ def b_matrix(self): # semi-definite. return nearest_pos_semi_def(B_vox) - @one_time + @cached_property def q_vector(self): """Get DWI q vector referring to voxel space @@ -836,7 +1136,7 @@ def __init__(self, dcm_data, csa_header=None, n_mosaic=None): self.n_mosaic = n_mosaic self.mosaic_size = int(np.ceil(np.sqrt(n_mosaic))) - @one_time + @cached_property def image_shape(self): """Return image shape as returned by ``get_data()``""" # reshape pixel slice array back from mosaic @@ -846,7 +1146,7 @@ def image_shape(self): return None return (rows // self.mosaic_size, cols // self.mosaic_size, self.n_mosaic) - @one_time + @cached_property def image_position(self): """Return position of first voxel in data block @@ -883,7 +1183,7 @@ def image_position(self): Q = np.fliplr(iop) * pix_spacing return ipp + np.dot(Q, vox_trans_fixes[:, None]).ravel() - def get_data(self): + def get_unscaled_data(self): """Get scaled image data from DICOMs Resorts data block from mosaic to 3D @@ -926,8 +1226,7 @@ def get_data(self): # pool mosaic-generated dims v3 = v4.reshape((n_slice_rows, n_slice_cols, n_blocks)) # delete any padding slices - v3 = v3[..., :n_mosaic] - return self._scale_data(v3) + return v3[..., :n_mosaic] def none_or_close(val1, val2, rtol=1e-5, atol=1e-6): diff --git a/nibabel/nicom/dwiparams.py b/nibabel/nicom/dwiparams.py index cb0e501202..5930e96f91 100644 --- a/nibabel/nicom/dwiparams.py +++ b/nibabel/nicom/dwiparams.py @@ -18,6 +18,7 @@ B ~ (q_est . q_est.T) / norm(q_est) """ + import numpy as np import numpy.linalg as npl diff --git a/nibabel/nicom/tests/test_ascconv.py b/nibabel/nicom/tests/test_ascconv.py index cd27bc3192..afe5f05e13 100644 --- a/nibabel/nicom/tests/test_ascconv.py +++ b/nibabel/nicom/tests/test_ascconv.py @@ -1,11 +1,9 @@ -"""Testing Siemens "ASCCONV" parser -""" +"""Testing Siemens "ASCCONV" parser""" from collections import OrderedDict from os.path import dirname from os.path import join as pjoin -import numpy as np from numpy.testing import assert_array_almost_equal, assert_array_equal from .. import ascconv diff --git a/nibabel/nicom/tests/test_csareader.py b/nibabel/nicom/tests/test_csareader.py index 0fc559c7fc..f31f4a3935 100644 --- a/nibabel/nicom/tests/test_csareader.py +++ b/nibabel/nicom/tests/test_csareader.py @@ -1,7 +1,6 @@ -"""Testing Siemens CSA header reader -""" +"""Testing Siemens CSA header reader""" + import gzip -import sys from copy import deepcopy from os.path import join as pjoin diff --git a/nibabel/nicom/tests/test_dicomreaders.py b/nibabel/nicom/tests/test_dicomreaders.py index 17ea7430f2..d508343be1 100644 --- a/nibabel/nicom/tests/test_dicomreaders.py +++ b/nibabel/nicom/tests/test_dicomreaders.py @@ -1,5 +1,4 @@ -"""Testing reading DICOM files -""" +"""Testing reading DICOM files""" from os.path import join as pjoin diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index 5c29349362..aefb35e892 100755 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -1,8 +1,7 @@ -"""Testing DICOM wrappers -""" +"""Testing DICOM wrappers""" import gzip -from copy import copy +from copy import deepcopy from decimal import Decimal from hashlib import sha1 from os.path import dirname @@ -64,8 +63,8 @@ def test_wrappers(): # test direct wrapper calls # first with empty or minimal data multi_minimal = { - 'PerFrameFunctionalGroupsSequence': [None], - 'SharedFunctionalGroupsSequence': [None], + 'PerFrameFunctionalGroupsSequence': [pydicom.Dataset()], + 'SharedFunctionalGroupsSequence': [pydicom.Dataset()], } for maker, args in ( (didw.Wrapper, ({},)), @@ -164,10 +163,10 @@ def test_wrapper_from_data(): fake_data['SOPClassUID'] = '1.2.840.10008.5.1.4.1.1.4.1' with pytest.raises(didw.WrapperError): didw.wrapper_from_data(fake_data) - fake_data['PerFrameFunctionalGroupsSequence'] = [None] + fake_data['PerFrameFunctionalGroupsSequence'] = [pydicom.Dataset()] with pytest.raises(didw.WrapperError): didw.wrapper_from_data(fake_data) - fake_data['SharedFunctionalGroupsSequence'] = [None] + fake_data['SharedFunctionalGroupsSequence'] = [pydicom.Dataset()] # minimal set should now be met dw = didw.wrapper_from_data(fake_data) assert dw.is_multiframe @@ -365,7 +364,7 @@ def test_decimal_rescale(): assert dw.get_data().dtype != np.dtype(object) -def fake_frames(seq_name, field_name, value_seq): +def fake_frames(seq_name, field_name, value_seq, frame_seq=None): """Make fake frames for multiframe testing Parameters @@ -376,6 +375,8 @@ def fake_frames(seq_name, field_name, value_seq): name of field within sequence value_seq : length N sequence sequence of values + frame_seq : length N list + previous result from this function to update Returns ------- @@ -383,23 +384,33 @@ def fake_frames(seq_name, field_name, value_seq): each element in list is obj.[0]. = value_seq[n] for n in range(N) """ - - class Fake: - pass - - frames = [] - for value in value_seq: - fake_frame = Fake() - fake_element = Fake() + if frame_seq is None: + frame_seq = [pydicom.Dataset() for _ in range(len(value_seq))] + for value, fake_frame in zip(value_seq, frame_seq): + if value is None: + continue + if hasattr(fake_frame, seq_name): + fake_element = getattr(fake_frame, seq_name)[0] + else: + fake_element = pydicom.Dataset() + setattr(fake_frame, seq_name, [fake_element]) setattr(fake_element, field_name, value) - setattr(fake_frame, seq_name, [fake_element]) - frames.append(fake_frame) - return frames + return frame_seq -def fake_shape_dependents(div_seq, sid_seq=None, sid_dim=None): +def fake_shape_dependents( + div_seq, + sid_seq=None, + sid_dim=None, + ipp_seq=None, + slice_dim=None, + flip_ipp_idx_corr=False, +): """Make a fake dictionary of data that ``image_shape`` is dependent on. + If you are providing the ``ipp_seq`` argument, they should be generated using + a slice normal aligned with the z-axis (i.e. iop == (0, 1, 0, 1, 0, 0)). + Parameters ---------- div_seq : list of tuples @@ -408,39 +419,92 @@ def fake_shape_dependents(div_seq, sid_seq=None, sid_dim=None): list of values to use for the `StackID` of each frame. sid_dim : int the index of the column in 'div_seq' to use as 'sid_seq' + ipp_seq : list of tuples + list of values to use for `ImagePositionPatient` for each frame + slice_dim : int + the index of the column in 'div_seq' corresponding to slices + flip_ipp_idx_corr : bool + generate ipp values so slice location is negatively correlated with slice index """ - class DimIdxSeqElem: + class PrintBase: + def __repr__(self): + attr_strs = [ + f'{attr}={getattr(self, attr)}' for attr in dir(self) if attr[0].isupper() + ] + return f"{self.__class__.__name__}({', '.join(attr_strs)})" + + class DimIdxSeqElem(pydicom.Dataset): def __init__(self, dip=(0, 0), fgp=None): + super().__init__() self.DimensionIndexPointer = dip if fgp is not None: self.FunctionalGroupPointer = fgp - class FrmContSeqElem: + class FrmContSeqElem(pydicom.Dataset): def __init__(self, div, sid): + super().__init__() self.DimensionIndexValues = div self.StackID = sid - class PerFrmFuncGrpSeqElem: - def __init__(self, div, sid): + class PlnPosSeqElem(pydicom.Dataset): + def __init__(self, ipp): + super().__init__() + self.ImagePositionPatient = ipp + + class PlnOrientSeqElem(pydicom.Dataset): + def __init__(self, iop): + super().__init__() + self.ImageOrientationPatient = iop + + class PerFrmFuncGrpSeqElem(pydicom.Dataset): + def __init__(self, div, sid, ipp, iop): + super().__init__() self.FrameContentSequence = [FrmContSeqElem(div, sid)] + self.PlanePositionSequence = [PlnPosSeqElem(ipp)] + self.PlaneOrientationSequence = [PlnOrientSeqElem(iop)] # if no StackID values passed in then use the values at index 'sid_dim' in # the value for DimensionIndexValues for it + n_indices = len(div_seq[0]) if sid_seq is None: if sid_dim is None: sid_dim = 0 sid_seq = [div[sid_dim] for div in div_seq] - # create the DimensionIndexSequence + # Determine slice_dim and create per-slice ipp information + if slice_dim is None: + slice_dim = 1 if sid_dim == 0 else 0 num_of_frames = len(div_seq) - dim_idx_seq = [DimIdxSeqElem()] * num_of_frames + frame_slc_indices = np.array(div_seq)[:, slice_dim] + uniq_slc_indices = np.unique(frame_slc_indices) + n_slices = len(uniq_slc_indices) + iop_seq = [[0.0, 1.0, 0.0, 1.0, 0.0, 0.0] for _ in range(num_of_frames)] + if ipp_seq is None: + slc_locs = np.linspace(-1.0, 1.0, n_slices) + if flip_ipp_idx_corr: + slc_locs = slc_locs[::-1] + slc_idx_loc = { + div_idx: slc_locs[arr_idx] for arr_idx, div_idx in enumerate(np.sort(uniq_slc_indices)) + } + ipp_seq = [[-1.0, -1.0, slc_idx_loc[idx]] for idx in frame_slc_indices] + else: + assert flip_ipp_idx_corr is False # caller can flip it themselves + assert len(ipp_seq) == num_of_frames + # create the DimensionIndexSequence + dim_idx_seq = [DimIdxSeqElem()] * n_indices + # Add entry for InStackPositionNumber to DimensionIndexSequence + fcs_tag = pydicom.datadict.tag_for_keyword('FrameContentSequence') + isp_tag = pydicom.datadict.tag_for_keyword('InStackPositionNumber') + dim_idx_seq[slice_dim] = DimIdxSeqElem(isp_tag, fcs_tag) # add an entry for StackID into the DimensionIndexSequence if sid_dim is not None: sid_tag = pydicom.datadict.tag_for_keyword('StackID') - fcs_tag = pydicom.datadict.tag_for_keyword('FrameContentSequence') dim_idx_seq[sid_dim] = DimIdxSeqElem(sid_tag, fcs_tag) # create the PerFrameFunctionalGroupsSequence - frames = [PerFrmFuncGrpSeqElem(div, sid) for div, sid in zip(div_seq, sid_seq)] + frames = [ + PerFrmFuncGrpSeqElem(div, sid, ipp, iop) + for div, sid, ipp, iop in zip(div_seq, sid_seq, ipp_seq, iop_seq) + ] return { 'NumberOfFrames': num_of_frames, 'DimensionIndexSequence': dim_idx_seq, @@ -448,48 +512,84 @@ def __init__(self, div, sid): } +if have_dicom: + + class FakeDataset(pydicom.Dataset): + pixel_array = None + + class TestMultiFrameWrapper(TestCase): # Test MultiframeWrapper - MINIMAL_MF = { + + if have_dicom: # Minimal contents of dcm_data for this wrapper - 'PerFrameFunctionalGroupsSequence': [None], - 'SharedFunctionalGroupsSequence': [None], - } - WRAPCLASS = didw.MultiframeWrapper + MINIMAL_MF = FakeDataset() + MINIMAL_MF.PerFrameFunctionalGroupsSequence = [pydicom.Dataset()] + MINIMAL_MF.SharedFunctionalGroupsSequence = [pydicom.Dataset()] + WRAPCLASS = didw.MultiframeWrapper @dicom_test def test_shape(self): # Check the shape algorithm - fake_mf = copy(self.MINIMAL_MF) + fake_mf = deepcopy(self.MINIMAL_MF) MFW = self.WRAPCLASS dw = MFW(fake_mf) # No rows, cols, raise WrapperError with pytest.raises(didw.WrapperError): dw.image_shape - fake_mf['Rows'] = 64 + fake_mf.Rows = 64 with pytest.raises(didw.WrapperError): dw.image_shape fake_mf.pop('Rows') - fake_mf['Columns'] = 64 + fake_mf.Columns = 64 with pytest.raises(didw.WrapperError): dw.image_shape - fake_mf['Rows'] = 32 - # Missing frame data, raise AssertionError - with pytest.raises(AssertionError): - dw.image_shape - fake_mf['NumberOfFrames'] = 4 - # PerFrameFunctionalGroupsSequence does not match NumberOfFrames - with pytest.raises(AssertionError): + fake_mf.Rows = 32 + # No frame data raises WrapperError + with pytest.raises(didw.WrapperError): dw.image_shape - # check 3D shape when StackID index is 0 + # check 2D shape with StackID index is 0 + div_seq = ((1, 1),) + fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) + assert MFW(fake_mf).image_shape == (32, 64) + # Check 2D shape with extraneous extra indices + div_seq = ((1, 1, 2),) + fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) + assert MFW(fake_mf).image_shape == (32, 64) + # Check 2D plus time + div_seq = ((1, 1, 1), (1, 1, 2), (1, 1, 3)) + fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) + assert MFW(fake_mf).image_shape == (32, 64, 1, 3) + # Check 3D shape when StackID index is 0 div_seq = ((1, 1), (1, 2), (1, 3), (1, 4)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) assert MFW(fake_mf).image_shape == (32, 64, 4) - # Check stack number matching when StackID index is 0 + # Check fow warning when implicitly dropping stacks div_seq = ((1, 1), (1, 2), (1, 3), (2, 4)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) + with pytest.warns( + UserWarning, + match='A multi-stack file was passed without an explicit filter, just using lowest StackID', + ): + assert MFW(fake_mf).image_shape == (32, 64, 3) + # No warning if we expclitly select that StackID to keep + assert MFW(fake_mf, frame_filters=(didw.FilterMultiStack(1),)).image_shape == (32, 64, 3) + assert MFW(fake_mf, frame_filters=(didw.FilterMultiStack(2),)).image_shape == (32, 64) + # Stack filtering is the same when StackID is not an index + div_seq = ((1,), (2,), (3,), (4,)) + sid_seq = (1, 1, 1, 2) + fake_mf.update(fake_shape_dependents(div_seq, sid_seq=sid_seq)) + with pytest.warns( + UserWarning, + match='A multi-stack file was passed without an explicit filter, just using lowest StackID', + ): + assert MFW(fake_mf).image_shape == (32, 64, 3) + # No warning if we expclitly select that StackID to keep + assert MFW(fake_mf, frame_filters=(didw.FilterMultiStack(1),)).image_shape == (32, 64, 3) + assert MFW(fake_mf, frame_filters=(didw.FilterMultiStack(2),)).image_shape == (32, 64) + # Check for error when explicitly requested StackID is missing with pytest.raises(didw.WrapperError): - MFW(fake_mf).image_shape + MFW(fake_mf, frame_filters=(didw.FilterMultiStack(3),)) # Make some fake frame data for 4D when StackID index is 0 div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2), (1, 1, 3), (1, 2, 3)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) @@ -497,8 +597,12 @@ def test_shape(self): # Check stack number matching for 4D when StackID index is 0 div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2), (1, 1, 3), (2, 2, 3)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) - with pytest.raises(didw.WrapperError): - MFW(fake_mf).image_shape + with pytest.warns( + UserWarning, + match='A multi-stack file was passed without an explicit filter, just using lowest StackID', + ): + with pytest.raises(didw.WrapperError): + MFW(fake_mf).image_shape # Check indices can be non-contiguous when StackID index is 0 div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 3), (1, 2, 3)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) @@ -507,17 +611,22 @@ def test_shape(self): div_seq = ((1, 1, 0), (1, 2, 0), (1, 1, 3), (1, 2, 3)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) assert MFW(fake_mf).image_shape == (32, 64, 2, 2) + # Check number of IPP vals match the number of slices or we raise + frames = fake_mf.PerFrameFunctionalGroupsSequence + for frame in frames[1:]: + frame.PlanePositionSequence = frames[0].PlanePositionSequence[:] + with pytest.raises(didw.WrapperError): + MFW(fake_mf).image_shape + # Check we raise on missing slices + div_seq = ((1, 1, 0), (1, 2, 0), (1, 1, 1)) + fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) + with pytest.raises(didw.WrapperError): + MFW(fake_mf).image_shape # check 3D shape when there is no StackID index div_seq = ((1,), (2,), (3,), (4,)) sid_seq = (1, 1, 1, 1) fake_mf.update(fake_shape_dependents(div_seq, sid_seq=sid_seq)) assert MFW(fake_mf).image_shape == (32, 64, 4) - # check 3D stack number matching when there is no StackID index - div_seq = ((1,), (2,), (3,), (4,)) - sid_seq = (1, 1, 1, 2) - fake_mf.update(fake_shape_dependents(div_seq, sid_seq=sid_seq)) - with pytest.raises(didw.WrapperError): - MFW(fake_mf).image_shape # check 4D shape when there is no StackID index div_seq = ((1, 1), (2, 1), (1, 2), (2, 2), (1, 3), (2, 3)) sid_seq = (1, 1, 1, 1, 1, 1) @@ -527,8 +636,12 @@ def test_shape(self): div_seq = ((1, 1), (2, 1), (1, 2), (2, 2), (1, 3), (2, 3)) sid_seq = (1, 1, 1, 1, 1, 2) fake_mf.update(fake_shape_dependents(div_seq, sid_seq=sid_seq)) - with pytest.raises(didw.WrapperError): - MFW(fake_mf).image_shape + with pytest.warns( + UserWarning, + match='A multi-stack file was passed without an explicit filter, just using lowest StackID', + ): + with pytest.raises(didw.WrapperError): + MFW(fake_mf).image_shape # check 3D shape when StackID index is 1 div_seq = ((1, 1), (2, 1), (3, 1), (4, 1)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=1)) @@ -536,16 +649,82 @@ def test_shape(self): # Check stack number matching when StackID index is 1 div_seq = ((1, 1), (2, 1), (3, 2), (4, 1)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=1)) - with pytest.raises(didw.WrapperError): - MFW(fake_mf).image_shape + with pytest.warns( + UserWarning, + match='A multi-stack file was passed without an explicit filter, just using lowest StackID', + ): + assert MFW(fake_mf).image_shape == (32, 64, 3) # Make some fake frame data for 4D when StackID index is 1 div_seq = ((1, 1, 1), (2, 1, 1), (1, 1, 2), (2, 1, 2), (1, 1, 3), (2, 1, 3)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=1)) assert MFW(fake_mf).image_shape == (32, 64, 2, 3) + # Check non-singular dimension preceding slice dim raises + div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2), (1, 1, 3), (1, 2, 3)) + fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0, slice_dim=2)) + with pytest.raises(didw.WrapperError): + MFW(fake_mf).image_shape + # Test with combo indices, here with the last two needing to be combined into + # a single index corresponding to [(1, 1), (1, 1), (2, 1), (2, 1), (2, 2), (2, 2)] + div_seq = ( + (1, 1, 1, 1), + (1, 2, 1, 1), + (1, 1, 2, 1), + (1, 2, 2, 1), + (1, 1, 2, 2), + (1, 2, 2, 2), + ) + fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) + assert MFW(fake_mf).image_shape == (32, 64, 2, 3) + # Test invalid 4D indices + div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2), (1, 1, 3), (1, 2, 4)) + fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) + with pytest.raises(didw.WrapperError): + MFW(fake_mf).image_shape + div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2), (1, 1, 3), (1, 2, 2)) + fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) + with pytest.raises(didw.WrapperError): + MFW(fake_mf).image_shape + # Time index that is unique to each frame + div_seq = ((1, 1, 1), (1, 2, 2), (1, 1, 3), (1, 2, 4), (1, 1, 5), (1, 2, 6)) + fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) + assert MFW(fake_mf).image_shape == (32, 64, 2, 3) + div_seq = ( + (1, 1, 1, 1), + (1, 2, 2, 1), + (1, 1, 3, 1), + (1, 2, 4, 1), + (1, 1, 5, 1), + (1, 2, 6, 1), + (1, 1, 7, 2), + (1, 2, 8, 2), + (1, 1, 9, 2), + (1, 2, 10, 2), + (1, 1, 11, 2), + (1, 2, 12, 2), + ) + fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) + assert MFW(fake_mf).image_shape == (32, 64, 2, 3, 2) + # Check we only allow one extra spatial dimension with unique val per frame + div_seq = ( + (1, 1, 1, 6), + (1, 2, 2, 5), + (1, 1, 3, 4), + (1, 2, 4, 3), + (1, 1, 5, 2), + (1, 2, 6, 1), + ) + fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) + with pytest.raises(didw.WrapperError): + MFW(fake_mf).image_shape + # Check that having unique value per frame works with single volume + div_seq = ((1, 1, 1), (1, 2, 2), (1, 3, 3)) + fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) + assert MFW(fake_mf).image_shape == (32, 64, 3) + @dicom_test def test_iop(self): # Test Image orient patient for multiframe - fake_mf = copy(self.MINIMAL_MF) + fake_mf = deepcopy(self.MINIMAL_MF) MFW = self.WRAPCLASS dw = MFW(fake_mf) with pytest.raises(didw.WrapperError): @@ -554,84 +733,94 @@ def test_iop(self): fake_frame = fake_frames( 'PlaneOrientationSequence', 'ImageOrientationPatient', [[0, 1, 0, 1, 0, 0]] )[0] - fake_mf['SharedFunctionalGroupsSequence'] = [fake_frame] + fake_mf.SharedFunctionalGroupsSequence = [fake_frame] assert_array_equal(MFW(fake_mf).image_orient_patient, [[0, 1], [1, 0], [0, 0]]) - fake_mf['SharedFunctionalGroupsSequence'] = [None] + fake_mf.SharedFunctionalGroupsSequence = [pydicom.Dataset()] with pytest.raises(didw.WrapperError): MFW(fake_mf).image_orient_patient - fake_mf['PerFrameFunctionalGroupsSequence'] = [fake_frame] + fake_mf.PerFrameFunctionalGroupsSequence = [fake_frame] assert_array_equal(MFW(fake_mf).image_orient_patient, [[0, 1], [1, 0], [0, 0]]) + @dicom_test def test_voxel_sizes(self): # Test voxel size calculation - fake_mf = copy(self.MINIMAL_MF) + fake_mf = deepcopy(self.MINIMAL_MF) MFW = self.WRAPCLASS dw = MFW(fake_mf) with pytest.raises(didw.WrapperError): dw.voxel_sizes # Make a fake frame fake_frame = fake_frames('PixelMeasuresSequence', 'PixelSpacing', [[2.1, 3.2]])[0] - fake_mf['SharedFunctionalGroupsSequence'] = [fake_frame] + fake_mf.SharedFunctionalGroupsSequence = [fake_frame] # Still not enough, we lack information for slice distances with pytest.raises(didw.WrapperError): MFW(fake_mf).voxel_sizes # This can come from SpacingBetweenSlices or frame SliceThickness - fake_mf['SpacingBetweenSlices'] = 4.3 + fake_mf.SpacingBetweenSlices = 4.3 assert_array_equal(MFW(fake_mf).voxel_sizes, [2.1, 3.2, 4.3]) # If both, prefer SliceThickness fake_frame.PixelMeasuresSequence[0].SliceThickness = 5.4 assert_array_equal(MFW(fake_mf).voxel_sizes, [2.1, 3.2, 5.4]) # Just SliceThickness is OK - del fake_mf['SpacingBetweenSlices'] + del fake_mf.SpacingBetweenSlices assert_array_equal(MFW(fake_mf).voxel_sizes, [2.1, 3.2, 5.4]) # Removing shared leads to error again - fake_mf['SharedFunctionalGroupsSequence'] = [None] + fake_mf.SharedFunctionalGroupsSequence = [pydicom.Dataset()] with pytest.raises(didw.WrapperError): MFW(fake_mf).voxel_sizes # Restoring to frames makes it work again - fake_mf['PerFrameFunctionalGroupsSequence'] = [fake_frame] + fake_mf.PerFrameFunctionalGroupsSequence = [fake_frame] assert_array_equal(MFW(fake_mf).voxel_sizes, [2.1, 3.2, 5.4]) # Decimals in any field are OK fake_frame = fake_frames( 'PixelMeasuresSequence', 'PixelSpacing', [[Decimal('2.1'), Decimal('3.2')]] )[0] - fake_mf['SharedFunctionalGroupsSequence'] = [fake_frame] - fake_mf['SpacingBetweenSlices'] = Decimal('4.3') + fake_mf.SharedFunctionalGroupsSequence = [fake_frame] + fake_mf.SpacingBetweenSlices = Decimal('4.3') assert_array_equal(MFW(fake_mf).voxel_sizes, [2.1, 3.2, 4.3]) fake_frame.PixelMeasuresSequence[0].SliceThickness = Decimal('5.4') assert_array_equal(MFW(fake_mf).voxel_sizes, [2.1, 3.2, 5.4]) + @dicom_test def test_image_position(self): # Test image_position property for multiframe - fake_mf = copy(self.MINIMAL_MF) + fake_mf = deepcopy(self.MINIMAL_MF) MFW = self.WRAPCLASS dw = MFW(fake_mf) with pytest.raises(didw.WrapperError): dw.image_position # Make a fake frame - fake_frame = fake_frames( - 'PlanePositionSequence', 'ImagePositionPatient', [[-2.0, 3.0, 7]] - )[0] - fake_mf['SharedFunctionalGroupsSequence'] = [fake_frame] + iop = [0, 1, 0, 1, 0, 0] + frames = fake_frames('PlaneOrientationSequence', 'ImageOrientationPatient', [iop]) + frames = fake_frames( + 'PlanePositionSequence', 'ImagePositionPatient', [[-2.0, 3.0, 7]], frames + ) + fake_mf.SharedFunctionalGroupsSequence = frames assert_array_equal(MFW(fake_mf).image_position, [-2, 3, 7]) - fake_mf['SharedFunctionalGroupsSequence'] = [None] + fake_mf.SharedFunctionalGroupsSequence = [pydicom.Dataset()] with pytest.raises(didw.WrapperError): MFW(fake_mf).image_position - fake_mf['PerFrameFunctionalGroupsSequence'] = [fake_frame] + fake_mf.PerFrameFunctionalGroupsSequence = frames assert_array_equal(MFW(fake_mf).image_position, [-2, 3, 7]) # Check lists of Decimals work - fake_frame.PlanePositionSequence[0].ImagePositionPatient = [ + frames[0].PlanePositionSequence[0].ImagePositionPatient = [ Decimal(str(v)) for v in [-2, 3, 7] ] assert_array_equal(MFW(fake_mf).image_position, [-2, 3, 7]) assert MFW(fake_mf).image_position.dtype == float + # We should get minimum along slice normal with multiple frames + frames = fake_frames('PlaneOrientationSequence', 'ImageOrientationPatient', [iop] * 2) + ipps = [[-2.0, 3.0, 7], [-2.0, 3.0, 6]] + frames = fake_frames('PlanePositionSequence', 'ImagePositionPatient', ipps, frames) + fake_mf.PerFrameFunctionalGroupsSequence = frames + assert_array_equal(MFW(fake_mf).image_position, [-2, 3, 6]) @dicom_test @pytest.mark.xfail(reason='Not packaged in install', raises=FileNotFoundError) def test_affine(self): # Make sure we find orientation/position/spacing info dw = didw.wrapper_from_file(DATA_FILE_4D) - aff = dw.affine + dw.affine @dicom_test @pytest.mark.xfail(reason='Not packaged in install', raises=FileNotFoundError) @@ -645,7 +834,7 @@ def test_data_real(self): if endian_codes[data.dtype.byteorder] == '>': data = data.byteswap() dat_str = data.tobytes() - assert sha1(dat_str).hexdigest() == '149323269b0af92baa7508e19ca315240f77fa8c' + assert sha1(dat_str).hexdigest() == 'dc011bb49682fb78f3cebacf965cb65cc9daba7d' @dicom_test def test_slicethickness_fallback(self): @@ -657,16 +846,16 @@ def test_slicethickness_fallback(self): def test_data_derived_shape(self): # Test 4D diffusion data with an additional trace volume included # Excludes the trace volume and generates the correct shape - dw = didw.wrapper_from_file(DATA_FILE_4D_DERIVED) with pytest.warns(UserWarning, match='Derived images found and removed'): - assert dw.image_shape == (96, 96, 60, 33) + dw = didw.wrapper_from_file(DATA_FILE_4D_DERIVED) + assert dw.image_shape == (96, 96, 60, 33) @dicom_test @needs_nibabel_data('dcm_qa_xa30') def test_data_trace(self): # Test that a standalone trace volume is found and not dropped dw = didw.wrapper_from_file(DATA_FILE_SIEMENS_TRACE) - assert dw.image_shape == (72, 72, 39, 1) + assert dw.image_shape == (72, 72, 39) @dicom_test @needs_nibabel_data('nitest-dicom') @@ -679,7 +868,7 @@ def test_data_unreadable_private_headers(self): @dicom_test def test_data_fake(self): # Test algorithm for get_data - fake_mf = copy(self.MINIMAL_MF) + fake_mf = deepcopy(self.MINIMAL_MF) MFW = self.WRAPCLASS dw = MFW(fake_mf) # Fails - no shape @@ -691,8 +880,8 @@ def test_data_fake(self): with pytest.raises(didw.WrapperError): dw.get_data() # Make shape and indices - fake_mf['Rows'] = 2 - fake_mf['Columns'] = 3 + fake_mf.Rows = 2 + fake_mf.Columns = 3 dim_idxs = ((1, 1), (1, 2), (1, 3), (1, 4)) fake_mf.update(fake_shape_dependents(dim_idxs, sid_dim=0)) assert MFW(fake_mf).image_shape == (2, 3, 4) @@ -702,19 +891,24 @@ def test_data_fake(self): # Add data - 3D data = np.arange(24).reshape((2, 3, 4)) # Frames dim is first for some reason - fake_mf['pixel_array'] = np.rollaxis(data, 2) + object.__setattr__(fake_mf, 'pixel_array', np.rollaxis(data, 2)) # Now it should work dw = MFW(fake_mf) assert_array_equal(dw.get_data(), data) # Test scaling works - fake_mf['RescaleSlope'] = 2.0 - fake_mf['RescaleIntercept'] = -1 + fake_mf.RescaleSlope = 2.0 + fake_mf.RescaleIntercept = -1 assert_array_equal(MFW(fake_mf).get_data(), data * 2.0 - 1) # Check slice sorting dim_idxs = ((1, 4), (1, 2), (1, 3), (1, 1)) fake_mf.update(fake_shape_dependents(dim_idxs, sid_dim=0)) sorted_data = data[..., [3, 1, 2, 0]] - fake_mf['pixel_array'] = np.rollaxis(sorted_data, 2) + fake_mf.pixel_array = np.rollaxis(sorted_data, 2) + assert_array_equal(MFW(fake_mf).get_data(), data * 2.0 - 1) + # Check slice sorting with negative index / IPP correlation + fake_mf.update(fake_shape_dependents(dim_idxs, sid_dim=0, flip_ipp_idx_corr=True)) + sorted_data = data[..., [0, 2, 1, 3]] + fake_mf.pixel_array = np.rollaxis(sorted_data, 2) assert_array_equal(MFW(fake_mf).get_data(), data * 2.0 - 1) # 5D! dim_idxs = [ @@ -741,28 +935,173 @@ def test_data_fake(self): sorted_data = data.reshape(shape[:2] + (-1,), order='F') order = [11, 9, 10, 8, 3, 1, 2, 0, 15, 13, 14, 12, 7, 5, 6, 4] sorted_data = sorted_data[..., np.argsort(order)] - fake_mf['pixel_array'] = np.rollaxis(sorted_data, 2) + fake_mf.pixel_array = np.rollaxis(sorted_data, 2) assert_array_equal(MFW(fake_mf).get_data(), data * 2.0 - 1) - def test__scale_data(self): + @dicom_test + def test_scale_data(self): # Test data scaling - fake_mf = copy(self.MINIMAL_MF) + fake_mf = deepcopy(self.MINIMAL_MF) + fake_mf.Rows = 2 + fake_mf.Columns = 3 + fake_mf.PerFrameFunctionalGroupsSequence = [pydicom.Dataset() for _ in range(4)] MFW = self.WRAPCLASS - dw = MFW(fake_mf) - data = np.arange(24).reshape((2, 3, 4)) - assert_array_equal(data, dw._scale_data(data)) - fake_mf['RescaleSlope'] = 2.0 - fake_mf['RescaleIntercept'] = -1.0 - assert_array_equal(data * 2 - 1, dw._scale_data(data)) - fake_frame = fake_frames('PixelValueTransformationSequence', 'RescaleSlope', [3.0])[0] - fake_mf['PerFrameFunctionalGroupsSequence'] = [fake_frame] - # Lacking RescaleIntercept -> Error - dw = MFW(fake_mf) - with pytest.raises(AttributeError): - dw._scale_data(data) - fake_frame.PixelValueTransformationSequence[0].RescaleIntercept = -2 - assert_array_equal(data * 3 - 2, dw._scale_data(data)) + data = np.arange(24).reshape((2, 3, 4), order='F') + assert_array_equal(data, MFW(fake_mf)._scale_data(data)) + # Test legacy top-level slope/intercept + fake_mf.RescaleSlope = 2.0 + fake_mf.RescaleIntercept = -1.0 + assert_array_equal(data * 2 - 1, MFW(fake_mf)._scale_data(data)) + # RealWorldValueMapping takes precedence, but only with defined units + fake_mf.RealWorldValueMappingSequence = [pydicom.Dataset()] + fake_mf.RealWorldValueMappingSequence[0].RealWorldValueSlope = 10.0 + fake_mf.RealWorldValueMappingSequence[0].RealWorldValueIntercept = -5.0 + assert_array_equal(data * 2 - 1, MFW(fake_mf)._scale_data(data)) + fake_mf.RealWorldValueMappingSequence[0].MeasurementUnitsCodeSequence = [pydicom.Dataset()] + fake_mf.RealWorldValueMappingSequence[0].MeasurementUnitsCodeSequence[0].CodeMeaning = '%' + assert_array_equal(data * 10 - 5, MFW(fake_mf)._scale_data(data)) + fake_mf.RealWorldValueMappingSequence[0].MeasurementUnitsCodeSequence[ + 0 + ].CodeMeaning = 'no units' + assert_array_equal(data * 2 - 1, MFW(fake_mf)._scale_data(data)) + # Possible to have more than one RealWorldValueMapping, use first one with defined units + fake_mf.RealWorldValueMappingSequence.append(pydicom.Dataset()) + fake_mf.RealWorldValueMappingSequence[-1].RealWorldValueSlope = 15.0 + fake_mf.RealWorldValueMappingSequence[-1].RealWorldValueIntercept = -3.0 + fake_mf.RealWorldValueMappingSequence[-1].MeasurementUnitsCodeSequence = [ + pydicom.Dataset() + ] + fake_mf.RealWorldValueMappingSequence[-1].MeasurementUnitsCodeSequence[0].CodeMeaning = '%' + assert_array_equal(data * 15 - 3, MFW(fake_mf)._scale_data(data)) + # A global RWV scale takes precedence over per-frame PixelValueTransformation + div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2)) + fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) + frames = fake_frames( + 'PixelValueTransformationSequence', + 'RescaleSlope', + [3.0, 3.0, 3.0, 3.0], + fake_mf.PerFrameFunctionalGroupsSequence, + ) + assert_array_equal(data * 15 - 3, MFW(fake_mf)._scale_data(data)) + # The per-frame PixelValueTransformation takes precedence over plain top-level slope / inter + delattr(fake_mf, 'RealWorldValueMappingSequence') + assert_array_equal(data * 3, MFW(fake_mf)._scale_data(data)) + for frame in frames: + frame.PixelValueTransformationSequence[0].RescaleIntercept = -2 + assert_array_equal(data * 3 - 2, MFW(fake_mf)._scale_data(data)) # Decimals are OK - fake_frame.PixelValueTransformationSequence[0].RescaleSlope = Decimal('3') - fake_frame.PixelValueTransformationSequence[0].RescaleIntercept = Decimal('-2') - assert_array_equal(data * 3 - 2, dw._scale_data(data)) + for frame in frames: + frame.PixelValueTransformationSequence[0].RescaleSlope = Decimal(3) + frame.PixelValueTransformationSequence[0].RescaleIntercept = Decimal(-2) + assert_array_equal(data * 3 - 2, MFW(fake_mf)._scale_data(data)) + # A per-frame RWV scaling takes precedence over per-frame PixelValueTransformation + for frame in frames: + frame.RealWorldValueMappingSequence = [pydicom.Dataset()] + frame.RealWorldValueMappingSequence[0].RealWorldValueSlope = 10.0 + frame.RealWorldValueMappingSequence[0].RealWorldValueIntercept = -5.0 + frame.RealWorldValueMappingSequence[0].MeasurementUnitsCodeSequence = [ + pydicom.Dataset() + ] + frame.RealWorldValueMappingSequence[0].MeasurementUnitsCodeSequence[ + 0 + ].CodeMeaning = '%' + assert_array_equal(data * 10 - 5, MFW(fake_mf)._scale_data(data)) + # Test varying per-frame scale factors + for frame_idx, frame in enumerate(frames): + frame.RealWorldValueMappingSequence[0].RealWorldValueSlope = 2 * (frame_idx + 1) + frame.RealWorldValueMappingSequence[0].RealWorldValueIntercept = -1 * (frame_idx + 1) + assert_array_equal( + data * np.array([2, 4, 6, 8]) + np.array([-1, -2, -3, -4]), + MFW(fake_mf)._scale_data(data), + ) + + @dicom_test + def test_philips_scale_data(self): + fake_mf = deepcopy(self.MINIMAL_MF) + fake_mf.Manufacturer = 'Philips' + fake_mf.Rows = 2 + fake_mf.Columns = 3 + fake_mf.PerFrameFunctionalGroupsSequence = [pydicom.Dataset() for _ in range(4)] + MFW = self.WRAPCLASS + data = np.arange(24).reshape((2, 3, 4), order='F') + # Unlike other manufacturers, public scale factors from Philips without defined + # units should not be used. In lieu of this the private scale factor should be + # used, which should always be available (modulo deidentification). If we can't + # find any of these scale factors a warning is issued. + with pytest.warns( + UserWarning, + match='Unable to find Philips private scale factor, cross-series comparisons may be invalid', + ): + assert_array_equal(data, MFW(fake_mf)._scale_data(data)) + fake_mf.RescaleSlope = 2.0 + fake_mf.RescaleIntercept = -1.0 + for rescale_type in (None, '', 'US', 'normalized'): + if rescale_type is not None: + fake_mf.RescaleType = rescale_type + with pytest.warns( + UserWarning, + match='Unable to find Philips private scale factor, cross-series comparisons may be invalid', + ): + assert_array_equal(data, MFW(fake_mf)._scale_data(data)) + # Falling back to private scaling doesn't generate error + priv_block = fake_mf.private_block(0x2005, 'Philips MR Imaging DD 001', create=True) + priv_block.add_new(0xE, 'FL', 3.0) + assert_array_equal(data * 3.0, MFW(fake_mf)._scale_data(data)) + # If the units are defined they take precedence over private scaling + fake_mf.RescaleType = 'mrad' + assert_array_equal(data * 2 - 1, MFW(fake_mf)._scale_data(data)) + # A RWV scale factor with defined units takes precdence + shared = pydicom.Dataset() + fake_mf.SharedFunctionalGroupsSequence = [shared] + rwv_map = pydicom.Dataset() + rwv_map.RealWorldValueSlope = 10.0 + rwv_map.RealWorldValueIntercept = -5.0 + rwv_map.MeasurementUnitsCodeSequence = [pydicom.Dataset()] + rwv_map.MeasurementUnitsCodeSequence[0].CodeMeaning = '%' + shared.RealWorldValueMappingSequence = [rwv_map] + assert_array_equal(data * 10 - 5, MFW(fake_mf)._scale_data(data)) + # Get rid of valid top-level scale factors, test per-frame scale factors + delattr(shared, 'RealWorldValueMappingSequence') + delattr(fake_mf, 'RescaleType') + del fake_mf[priv_block.get_tag(0xE)] + div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2)) + fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) + # Simplest case is all frames have same (valid) scale factor + for frame in fake_mf.PerFrameFunctionalGroupsSequence: + pix_trans = pydicom.Dataset() + pix_trans.RescaleSlope = 2.5 + pix_trans.RescaleIntercept = -4 + pix_trans.RescaleType = 'mrad' + frame.PixelValueTransformationSequence = [pix_trans] + assert_array_equal(data * 2.5 - 4, MFW(fake_mf)._scale_data(data)) + # If some frames are missing valid scale factors we should get a warning + for frame in fake_mf.PerFrameFunctionalGroupsSequence[2:]: + delattr(frame.PixelValueTransformationSequence[0], 'RescaleType') + with pytest.warns( + UserWarning, + match='Unable to find Philips private scale factor, cross-series comparisons may be invalid', + ): + assert_array_equal( + data * np.array([2.5, 2.5, 1, 1]) + np.array([-4, -4, 0, 0]), + MFW(fake_mf)._scale_data(data), + ) + # We can fall back to private scale factor on frame-by-frame basis + for frame in fake_mf.PerFrameFunctionalGroupsSequence: + priv_block = frame.private_block(0x2005, 'Philips MR Imaging DD 001', create=True) + priv_block.add_new(0xE, 'FL', 7.0) + assert_array_equal( + data * np.array([2.5, 2.5, 7, 7]) + np.array([-4, -4, 0, 0]), + MFW(fake_mf)._scale_data(data), + ) + # Again RWV scale factors take precedence + for frame_idx, frame in enumerate(fake_mf.PerFrameFunctionalGroupsSequence): + rwv_map = pydicom.Dataset() + rwv_map.RealWorldValueSlope = 14.0 - frame_idx + rwv_map.RealWorldValueIntercept = 5.0 + rwv_map.MeasurementUnitsCodeSequence = [pydicom.Dataset()] + rwv_map.MeasurementUnitsCodeSequence[0].CodeMeaning = '%' + frame.RealWorldValueMappingSequence = [rwv_map] + assert_array_equal( + data * np.array([14, 13, 12, 11]) + np.array([5, 5, 5, 5]), + MFW(fake_mf)._scale_data(data), + ) diff --git a/nibabel/nicom/tests/test_dwiparams.py b/nibabel/nicom/tests/test_dwiparams.py index 6e98b4af61..559c0a2143 100644 --- a/nibabel/nicom/tests/test_dwiparams.py +++ b/nibabel/nicom/tests/test_dwiparams.py @@ -1,5 +1,4 @@ -"""Testing diffusion parameter processing -""" +"""Testing diffusion parameter processing""" import numpy as np import pytest diff --git a/nibabel/nicom/tests/test_structreader.py b/nibabel/nicom/tests/test_structreader.py index 2d37bbc3ed..ccd2dd4f85 100644 --- a/nibabel/nicom/tests/test_structreader.py +++ b/nibabel/nicom/tests/test_structreader.py @@ -1,5 +1,5 @@ -"""Testing Siemens CSA header reader -""" +"""Testing Siemens CSA header reader""" + import struct import sys diff --git a/nibabel/nicom/tests/test_utils.py b/nibabel/nicom/tests/test_utils.py index ea3b999fad..4f0d7e68d5 100644 --- a/nibabel/nicom/tests/test_utils.py +++ b/nibabel/nicom/tests/test_utils.py @@ -1,5 +1,5 @@ -"""Testing nicom.utils module -""" +"""Testing nicom.utils module""" + import re from nibabel.optpkg import optional_package diff --git a/nibabel/nicom/utils.py b/nibabel/nicom/utils.py index 617ff2a28a..2c01c9d161 100644 --- a/nibabel/nicom/utils.py +++ b/nibabel/nicom/utils.py @@ -1,5 +1,6 @@ -"""Utilities for working with DICOM datasets -""" +"""Utilities for working with DICOM datasets""" + +from enum import Enum def find_private_section(dcm_data, group_no, creator): @@ -46,3 +47,55 @@ def find_private_section(dcm_data, group_no, creator): if match_func(val): return elno * 0x100 return None + + +class Vendor(Enum): + SIEMENS = 1 + GE = 2 + PHILIPS = 3 + + +vendor_priv_sections = { + Vendor.SIEMENS: [ + (0x9, 'SIEMENS SYNGO INDEX SERVICE'), + (0x19, 'SIEMENS MR HEADER'), + (0x21, 'SIEMENS MR SDR 01'), + (0x21, 'SIEMENS MR SDS 01'), + (0x21, 'SIEMENS MR SDI 02'), + (0x29, 'SIEMENS CSA HEADER'), + (0x29, 'SIEMENS MEDCOM HEADER2'), + (0x51, 'SIEMENS MR HEADER'), + ], + Vendor.PHILIPS: [ + (0x2001, 'Philips Imaging DD 001'), + (0x2001, 'Philips Imaging DD 002'), + (0x2001, 'Philips Imaging DD 129'), + (0x2005, 'Philips MR Imaging DD 001'), + (0x2005, 'Philips MR Imaging DD 002'), + (0x2005, 'Philips MR Imaging DD 003'), + (0x2005, 'Philips MR Imaging DD 004'), + (0x2005, 'Philips MR Imaging DD 005'), + (0x2005, 'Philips MR Imaging DD 006'), + (0x2005, 'Philips MR Imaging DD 007'), + (0x2005, 'Philips MR Imaging DD 005'), + (0x2005, 'Philips MR Imaging DD 006'), + ], + Vendor.GE: [ + (0x9, 'GEMS_IDEN_01'), + (0x19, 'GEMS_ACQU_01'), + (0x21, 'GEMS_RELA_01'), + (0x23, 'GEMS_STDY_01'), + (0x25, 'GEMS_SERS_01'), + (0x27, 'GEMS_IMAG_01'), + (0x29, 'GEMS_IMPS_01'), + (0x43, 'GEMS_PARM_01'), + ], +} + + +def vendor_from_private(dcm_data): + """Try to determine the vendor by looking for specific private tags""" + for vendor, priv_sections in vendor_priv_sections.items(): + for priv_group, priv_creator in priv_sections: + if find_private_section(dcm_data, priv_group, priv_creator) != None: + return vendor diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 4cf1e52748..f0bd91fc48 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -10,14 +10,23 @@ NIfTI1 format defined at http://nifti.nimh.nih.gov/nifti-1/ """ + from __future__ import annotations +import json +import sys +import typing as ty import warnings from io import BytesIO import numpy as np import numpy.linalg as npl +if sys.version_info < (3, 13): + from typing_extensions import Self, TypeVar # PY312 +else: + from typing import Self, TypeVar + from . import analyze # module import from .arrayproxy import get_obj_dtype from .batteryrunners import Report @@ -30,7 +39,19 @@ from .spm99analyze import SpmAnalyzeHeader from .volumeutils import Recoder, endian_codes, make_dt_codes -pdcm, have_dicom, _ = optional_package('pydicom') +if ty.TYPE_CHECKING: + import pydicom as pdcm + + have_dicom = True + DicomDataset = pdcm.Dataset +else: + pdcm, have_dicom, _ = optional_package('pydicom') + if have_dicom: + DicomDataset = pdcm.Dataset + else: + DicomDataset = ty.Any + +T = TypeVar('T', default=bytes) # nifti1 flat header definition for Analyze-like first 348 bytes # first number in comments indicates offset in file header in bytes @@ -282,15 +303,38 @@ ) -class Nifti1Extension: - """Baseclass for NIfTI1 header extensions. +class NiftiExtension(ty.Generic[T]): + """Base class for NIfTI header extensions. - This class is sufficient to handle very simple text-based extensions, such - as `comment`. More sophisticated extensions should/will be supported by - dedicated subclasses. + This class provides access to the extension content in various forms. + For simple extensions that expose data as bytes, text or JSON, this class + is sufficient. More complex extensions should be implemented as subclasses + that provide custom serialization/deserialization methods. + + Efficiency note: + + This class assumes that the runtime representation of the extension content + is mutable. Once a runtime representation is set, it is cached and will be + serialized on any attempt to access the extension content as bytes, including + determining the size of the extension in the NIfTI file. + + If the runtime representation is never accessed, the raw bytes will be used + without modification. While avoiding unnecessary deserialization, if there + are bytestrings that do not produce a valid runtime representation, they will + be written as-is, and may cause errors downstream. """ - def __init__(self, code, content): + code: int + encoding: str | None = None + _content: bytes + _object: T | None = None + + def __init__( + self, + code: int | str, + content: bytes = b'', + object: T | None = None, + ) -> None: """ Parameters ---------- @@ -298,94 +342,129 @@ def __init__(self, code, content): Canonical extension code as defined in the NIfTI standard, given either as integer or corresponding label (see :data:`~nibabel.nifti1.extension_codes`) - content : str - Extension content as read from the NIfTI file header. This content is - converted into a runtime representation. + content : bytes, optional + Extension content as read from the NIfTI file header. + object : optional + Extension content in runtime form. """ try: - self._code = extension_codes.code[code] + self.code = extension_codes.code[code] # type: ignore[assignment] except KeyError: - # XXX or fail or at least complain? - self._code = code - self._content = self._unmangle(content) - - def _unmangle(self, value): - """Convert the extension content into its runtime representation. + self.code = code # type: ignore[assignment] + self._content = content + if object is not None: + self._object = object - The default implementation does nothing at all. + @classmethod + def from_bytes(cls, content: bytes) -> Self: + """Create an extension from raw bytes. - Parameters - ---------- - value : str - Extension content as read from file. + This constructor may only be used in extension classes with a class + attribute `code` to indicate the extension type. + """ + if not hasattr(cls, 'code'): + raise NotImplementedError('from_bytes() requires a class attribute `code`') + return cls(cls.code, content=content) - Returns - ------- - The same object that was passed as `value`. + @classmethod + def from_object(cls, obj: T) -> Self: + """Create an extension from a runtime object. - Notes - ----- - Subclasses should reimplement this method to provide the desired - unmangling procedure and may return any type of object. + This constructor may only be used in extension classes with a class + attribute `code` to indicate the extension type. """ - return value + if not hasattr(cls, 'code'): + raise NotImplementedError('from_object() requires a class attribute `code`') + return cls(cls.code, object=obj) - def _mangle(self, value): - """Convert the extension content into NIfTI file header representation. + # Handle (de)serialization of extension content + # Subclasses may implement these methods to provide an alternative + # view of the extension content. If left unimplemented, the content + # must be bytes and is not modified. + def _mangle(self, obj: T) -> bytes: + raise NotImplementedError - The default implementation does nothing at all. - - Parameters - ---------- - value : str - Extension content in runtime form. + def _unmangle(self, content: bytes) -> T: + raise NotImplementedError - Returns - ------- - str + def _sync(self) -> None: + """Synchronize content with object. - Notes - ----- - Subclasses should reimplement this method to provide the desired - mangling procedure. + This permits the runtime representation to be modified in-place + and updates the bytes representation accordingly. """ - return value + if self._object is not None: + self._content = self._mangle(self._object) + + def __repr__(self) -> str: + try: + code = extension_codes.label[self.code] + except KeyError: + # deal with unknown codes + code = self.code + return f'{self.__class__.__name__}({code}, {self._content!r})' + + def __eq__(self, other: object) -> bool: + return ( + isinstance(other, self.__class__) + and self.code == other.code + and self.content == other.content + ) + + def __ne__(self, other): + return not self == other def get_code(self): """Return the canonical extension type code.""" - return self._code - - def get_content(self): - """Return the extension content in its runtime representation.""" + return self.code + + # Canonical access to extension content + # Follows the lead of httpx.Response .content, .text and .json() + # properties/methods + @property + def content(self) -> bytes: + """Return the extension content as raw bytes.""" + self._sync() return self._content - def get_sizeondisk(self): - """Return the size of the extension in the NIfTI file.""" - # need raw value size plus 8 bytes for esize and ecode - size = len(self._mangle(self._content)) - size += 8 - # extensions size has to be a multiple of 16 bytes - if size % 16 != 0: - size += 16 - (size % 16) - return size + @property + def text(self) -> str: + """Attempt to decode the extension content as text. - def __repr__(self): - try: - code = extension_codes.label[self._code] - except KeyError: - # deal with unknown codes - code = self._code + The encoding is determined by the `encoding` attribute, which may be + set by the user or subclass. If not set, the default encoding is 'utf-8'. + """ + return self.content.decode(self.encoding or 'utf-8') + + def json(self) -> ty.Any: + """Attempt to decode the extension content as JSON. - s = f"Nifti1Extension('{code}', '{self._content}')" - return s + If the content is not valid JSON, a JSONDecodeError or UnicodeDecodeError + will be raised. + """ + return json.loads(self.content) - def __eq__(self, other): - return (self._code, self._content) == (other._code, other._content) + def get_object(self) -> T: + """Return the extension content in its runtime representation. - def __ne__(self, other): - return not self == other + This method may return a different type for each extension type. + For simple use cases, consider using ``.content``, ``.text`` or ``.json()`` + instead. + """ + if self._object is None: + self._object = self._unmangle(self._content) + return self._object - def write_to(self, fileobj, byteswap): + # Backwards compatibility + get_content = get_object + + def get_sizeondisk(self) -> int: + """Return the size of the extension in the NIfTI file.""" + # need raw value size plus 8 bytes for esize and ecode, rounded up to next 16 bytes + # Rounding C+8 up to M is done by (C+8 + (M-1)) // M * M + return (len(self.content) + 23) // 16 * 16 + + def write_to(self, fileobj: ty.BinaryIO, byteswap: bool = False) -> None: """Write header extensions to fileobj Write starts at fileobj current file position. @@ -402,21 +481,75 @@ def write_to(self, fileobj, byteswap): None """ extstart = fileobj.tell() - rawsize = self.get_sizeondisk() + rawsize = self.get_sizeondisk() # Calls _sync() # write esize and ecode first - extinfo = np.array((rawsize, self._code), dtype=np.int32) + extinfo = np.array((rawsize, self.code), dtype=np.int32) if byteswap: extinfo = extinfo.byteswap() fileobj.write(extinfo.tobytes()) - # followed by the actual extension content - # XXX if mangling upon load is implemented, it should be reverted here - fileobj.write(self._mangle(self._content)) + # followed by the actual extension content, synced above + fileobj.write(self._content) # be nice and zero out remaining part of the extension till the # next 16 byte border - fileobj.write(b'\x00' * (extstart + rawsize - fileobj.tell())) + pad = extstart + rawsize - fileobj.tell() + if pad: + fileobj.write(bytes(pad)) -class Nifti1DicomExtension(Nifti1Extension): +class Nifti1Extension(NiftiExtension[T]): + """Baseclass for NIfTI1 header extensions. + + This class is sufficient to handle very simple text-based extensions, such + as `comment`. More sophisticated extensions should/will be supported by + dedicated subclasses. + """ + + code = 0 # Default to unknown extension + + def _unmangle(self, value: bytes) -> T: + """Convert the extension content into its runtime representation. + + The default implementation does nothing at all. + + Parameters + ---------- + value : str + Extension content as read from file. + + Returns + ------- + The same object that was passed as `value`. + + Notes + ----- + Subclasses should reimplement this method to provide the desired + unmangling procedure and may return any type of object. + """ + return value # type: ignore[return-value] + + def _mangle(self, value: T) -> bytes: + """Convert the extension content into NIfTI file header representation. + + The default implementation does nothing at all. + + Parameters + ---------- + value : str + Extension content in runtime form. + + Returns + ------- + str + + Notes + ----- + Subclasses should reimplement this method to provide the desired + mangling procedure. + """ + return value # type: ignore[return-value] + + +class Nifti1DicomExtension(Nifti1Extension[DicomDataset]): """NIfTI1 DICOM header extension This class is a thin wrapper around pydicom to read a binary DICOM @@ -426,7 +559,16 @@ class Nifti1DicomExtension(Nifti1Extension): header. """ - def __init__(self, code, content, parent_hdr=None): + code = 2 + _is_implicit_VR: bool = False + _is_little_endian: bool = True + + def __init__( + self, + code: int | str, + content: bytes | DicomDataset | None = None, + parent_hdr: Nifti1Header | None = None, + ) -> None: """ Parameters ---------- @@ -451,30 +593,28 @@ def __init__(self, code, content, parent_hdr=None): code should always be 2 for DICOM. """ - self._code = code - if parent_hdr: + if code != 2: + raise ValueError(f'code must be 2 for DICOM. Got {code}.') + + if content is None: + content = pdcm.Dataset() + + if parent_hdr is not None: self._is_little_endian = parent_hdr.endianness == '<' - else: - self._is_little_endian = True + if isinstance(content, pdcm.dataset.Dataset): - self._is_implicit_VR = False - self._raw_content = self._mangle(content) - self._content = content + super().__init__(code, object=content) elif isinstance(content, bytes): # Got a byte string - unmangle it - self._raw_content = content - self._is_implicit_VR = self._guess_implicit_VR() - ds = self._unmangle(content, self._is_implicit_VR, self._is_little_endian) - self._content = ds - elif content is None: # initialize a new dicom dataset - self._is_implicit_VR = False - self._content = pdcm.dataset.Dataset() + self._is_implicit_VR = self._guess_implicit_VR(content) + super().__init__(code, content=content) else: raise TypeError( f'content must be either a bytestring or a pydicom Dataset. ' f'Got {content.__class__}' ) - def _guess_implicit_VR(self): + @staticmethod + def _guess_implicit_VR(content) -> bool: """Try to guess DICOM syntax by checking for valid VRs. Without a DICOM Transfer Syntax, it's difficult to tell if Value @@ -482,19 +622,17 @@ def _guess_implicit_VR(self): This reads where the first VR would be and checks it against a list of valid VRs """ - potential_vr = self._raw_content[4:6].decode() - if potential_vr in pdcm.values.converters.keys(): - implicit_VR = False - else: - implicit_VR = True - return implicit_VR - - def _unmangle(self, value, is_implicit_VR=False, is_little_endian=True): - bio = BytesIO(value) - ds = pdcm.filereader.read_dataset(bio, is_implicit_VR, is_little_endian) - return ds + potential_vr = content[4:6].decode() + return potential_vr not in pdcm.values.converters.keys() + + def _unmangle(self, obj: bytes) -> DicomDataset: + return pdcm.filereader.read_dataset( + BytesIO(obj), + self._is_implicit_VR, + self._is_little_endian, + ) - def _mangle(self, dataset): + def _mangle(self, dataset: DicomDataset) -> bytes: bio = BytesIO() dio = pdcm.filebase.DicomFileLike(bio) dio.is_implicit_VR = self._is_implicit_VR @@ -519,6 +657,21 @@ def _mangle(self, dataset): (12, 'workflow_fwds', Nifti1Extension), (14, 'freesurfer', Nifti1Extension), (16, 'pypickle', Nifti1Extension), + (18, 'mind_ident', NiftiExtension), + (20, 'b_value', NiftiExtension), + (22, 'spherical_direction', NiftiExtension), + (24, 'dt_component', NiftiExtension), + (26, 'shc_degreeorder', NiftiExtension), + (28, 'voxbo', NiftiExtension), + (30, 'caret', NiftiExtension), + ## Defined in nibabel.cifti2.parse_cifti2 + # (32, 'cifti', Cifti2Extension), + (34, 'variable_frame_timing', NiftiExtension), + (36, 'unassigned', NiftiExtension), + (38, 'eval', NiftiExtension), + (40, 'matlab', NiftiExtension), + (42, 'quantiphyse', NiftiExtension), + (44, 'mrs', NiftiExtension[dict[str, ty.Any]]), ), fields=('code', 'label', 'handler'), ) @@ -551,7 +704,7 @@ def get_sizeondisk(self): return np.sum([e.get_sizeondisk() for e in self]) def __repr__(self): - return 'Nifti1Extensions(%s)' % ', '.join(str(e) for e in self) + return 'Nifti1Extensions({})'.format(', '.join(str(e) for e in self)) def write_to(self, fileobj, byteswap): """Write header extensions to fileobj @@ -1096,7 +1249,10 @@ def set_qform(self, affine, code=None, strip_shears=True): # (a subtle requirement of the NIFTI format qform transform) # Transform below is polar decomposition, returning the closest # orthogonal matrix PR, to input R - P, S, Qs = npl.svd(R) + try: + P, S, Qs = npl.svd(R) + except np.linalg.LinAlgError as e: + raise HeaderDataError(f'Could not decompose affine:\n{affine}') from e PR = np.dot(P, Qs) if not strip_shears and not np.allclose(PR, R): raise HeaderDataError('Shears in affine and `strip_shears` is False') @@ -1403,7 +1559,7 @@ def get_intent(self, code_repr='label'): else: raise TypeError('repr can be "label" or "code"') n_params = len(recoder.parameters[code]) if known_intent else 0 - params = (float(hdr['intent_p%d' % (i + 1)]) for i in range(n_params)) + params = (float(hdr[f'intent_p{i}']) for i in range(1, n_params + 1)) name = hdr['intent_name'].item().decode('latin-1') return label, tuple(params), name @@ -1476,8 +1632,8 @@ def set_intent(self, code, params=(), name='', allow_unknown=False): hdr['intent_name'] = name all_params = [0] * 3 all_params[: len(params)] = params[:] - for i, param in enumerate(all_params): - hdr['intent_p%d' % (i + 1)] = param + for i, param in enumerate(all_params, start=1): + hdr[f'intent_p{i}'] = param def get_slice_duration(self): """Get slice duration @@ -1755,7 +1911,7 @@ def _chk_offset(hdr, fix=False): return hdr, rep if magic == hdr.single_magic and offset < hdr.single_vox_offset: rep.problem_level = 40 - rep.problem_msg = 'vox offset %d too low for single file nifti1' % offset + rep.problem_msg = f'vox offset {int(offset)} too low for single file nifti1' if fix: hdr['vox_offset'] = hdr.single_vox_offset rep.fix_msg = f'setting to minimum value of {hdr.single_vox_offset}' @@ -1787,7 +1943,7 @@ def _chk_xform_code(klass, code_type, hdr, fix): if code in recoder.value_set(): return hdr, rep rep.problem_level = 30 - rep.problem_msg = '%s %d not valid' % (code_type, code) + rep.problem_msg = f'{code_type} {code} not valid' if fix: hdr[code_type] = 0 rep.fix_msg = 'setting to 0' diff --git a/nibabel/nifti2.py b/nibabel/nifti2.py index 8d9b81e1f9..9c898b47ba 100644 --- a/nibabel/nifti2.py +++ b/nibabel/nifti2.py @@ -12,6 +12,7 @@ https://www.nitrc.org/forum/message.php?msg_id=3738 """ + import numpy as np from .analyze import AnalyzeHeader diff --git a/nibabel/onetime.py b/nibabel/onetime.py index e365e81f74..f6d3633af3 100644 --- a/nibabel/onetime.py +++ b/nibabel/onetime.py @@ -1,9 +1,12 @@ """Descriptor support for NIPY -Utilities to support special Python descriptors [1,2], in particular the use of -a useful pattern for properties we call 'one time properties'. These are -object attributes which are declared as properties, but become regular -attributes once they've been read the first time. They can thus be evaluated +Utilities to support special Python descriptors [1,2], in particular +:func:`~functools.cached_property`, which has been available in the Python +standard library since Python 3.8. We currently maintain aliases from +earlier names for this descriptor, specifically `OneTimeProperty` and `auto_attr`. + +:func:`~functools.cached_property` creates properties that are computed once +and then stored as regular attributes. They can thus be evaluated later in the object's life cycle, but once evaluated they become normal, static attributes with no function call overhead on access or any other constraints. @@ -18,12 +21,10 @@ [2] Python data model, https://docs.python.org/reference/datamodel.html """ -from __future__ import annotations -import typing as ty +from __future__ import annotations -InstanceT = ty.TypeVar('InstanceT') -T = ty.TypeVar('T') +from functools import cached_property from nibabel.deprecated import deprecate_with_version @@ -33,22 +34,22 @@ class ResetMixin: - """A Mixin class to add a .reset() method to users of OneTimeProperty. + """A Mixin class to add a .reset() method to users of cached_property. - By default, auto attributes once computed, become static. If they happen + By default, cached properties, once computed, become static. If they happen to depend on other parts of an object and those parts change, their values may now be invalid. This class offers a .reset() method that users can call *explicitly* when they know the state of their objects may have changed and they want to ensure that *all* their special attributes should be invalidated. Once - reset() is called, all their auto attributes are reset to their - OneTimeProperty descriptors, and their accessor functions will be triggered - again. + reset() is called, all their cached properties are reset to their + :func:`~functools.cached_property` descriptors, + and their accessor functions will be triggered again. .. warning:: - If a class has a set of attributes that are OneTimeProperty, but that + If a class has a set of attributes that are cached_property, but that can be initialized from any one of them, do NOT use this mixin! For instance, UniformTimeSeries can be initialized with only sampling_rate and t0, sampling_interval and time are auto-computed. But if you were @@ -67,15 +68,15 @@ class ResetMixin: ... def __init__(self,x=1.0): ... self.x = x ... - ... @auto_attr + ... @cached_property ... def y(self): ... print('*** y computation executed ***') ... return self.x / 2.0 - ... >>> a = A(10) About to access y twice, the second time no computation is done: + >>> a.y *** y computation executed *** 5.0 @@ -83,17 +84,21 @@ class ResetMixin: 5.0 Changing x + >>> a.x = 20 a.y doesn't change to 10, since it is a static attribute: + >>> a.y 5.0 We now reset a, and this will then force all auto attributes to recompute the next time we access them: + >>> a.reset() About to access y twice again after reset(): + >>> a.y *** y computation executed *** 10.0 @@ -102,90 +107,18 @@ class ResetMixin: """ def reset(self) -> None: - """Reset all OneTimeProperty attributes that may have fired already.""" + """Reset all cached_property attributes that may have fired already.""" # To reset them, we simply remove them from the instance dict. At that # point, it's as if they had never been computed. On the next access, # the accessor function from the parent class will be called, simply # because that's how the python descriptor protocol works. for mname, mval in self.__class__.__dict__.items(): - if mname in self.__dict__ and isinstance(mval, OneTimeProperty): + if mname in self.__dict__ and isinstance(mval, cached_property): delattr(self, mname) -class OneTimeProperty(ty.Generic[T]): - """A descriptor to make special properties that become normal attributes. - - This is meant to be used mostly by the auto_attr decorator in this module. - """ - - def __init__(self, func: ty.Callable[[InstanceT], T]) -> None: - """Create a OneTimeProperty instance. - - Parameters - ---------- - func : method - - The method that will be called the first time to compute a value. - Afterwards, the method's name will be a standard attribute holding - the value of this computation. - """ - self.getter = func - self.name = func.__name__ - self.__doc__ = func.__doc__ - - @ty.overload - def __get__( - self, obj: None, objtype: type[InstanceT] | None = None - ) -> ty.Callable[[InstanceT], T]: - ... # pragma: no cover - - @ty.overload - def __get__(self, obj: InstanceT, objtype: type[InstanceT] | None = None) -> T: - ... # pragma: no cover - - def __get__( - self, obj: InstanceT | None, objtype: type[InstanceT] | None = None - ) -> T | ty.Callable[[InstanceT], T]: - """This will be called on attribute access on the class or instance.""" - if obj is None: - # Being called on the class, return the original function. This - # way, introspection works on the class. - return self.getter - - # Errors in the following line are errors in setting a OneTimeProperty - val = self.getter(obj) - - obj.__dict__[self.name] = val - return val - - -def auto_attr(func: ty.Callable[[InstanceT], T]) -> OneTimeProperty[T]: - """Decorator to create OneTimeProperty attributes. - - Parameters - ---------- - func : method - The method that will be called the first time to compute a value. - Afterwards, the method's name will be a standard attribute holding the - value of this computation. - - Examples - -------- - >>> class MagicProp: - ... @auto_attr - ... def a(self): - ... return 99 - ... - >>> x = MagicProp() - >>> 'a' in x.__dict__ - False - >>> x.a - 99 - >>> 'a' in x.__dict__ - True - """ - return OneTimeProperty(func) - +OneTimeProperty = cached_property +auto_attr = cached_property # ----------------------------------------------------------------------------- # Deprecated API diff --git a/nibabel/openers.py b/nibabel/openers.py index 90c7774d12..35b10c20a4 100644 --- a/nibabel/openers.py +++ b/nibabel/openers.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Context manager openers for various fileobject types""" + from __future__ import annotations import gzip @@ -17,10 +18,11 @@ from ._compression import HAVE_INDEXED_GZIP, IndexedGzipFile, pyzstd -if ty.TYPE_CHECKING: # pragma: no cover +if ty.TYPE_CHECKING: from types import TracebackType from _typeshed import WriteableBuffer + from typing_extensions import Self ModeRT = ty.Literal['r', 'rt'] ModeRB = ty.Literal['rb'] @@ -35,11 +37,8 @@ @ty.runtime_checkable class Fileish(ty.Protocol): - def read(self, size: int = -1, /) -> bytes: - ... # pragma: no cover - - def write(self, b: bytes, /) -> int | None: - ... # pragma: no cover + def read(self, size: int = -1, /) -> bytes: ... + def write(self, b: bytes, /) -> int | None: ... class DeterministicGzipFile(gzip.GzipFile): @@ -70,7 +69,7 @@ def __init__( raise TypeError('Must define either fileobj or filename') # Cast because GzipFile.myfileobj has type io.FileIO while open returns ty.IO fileobj = self.myfileobj = ty.cast(io.FileIO, open(filename, modestr)) - return super().__init__( + super().__init__( filename='', mode=modestr, compresslevel=compresslevel, @@ -86,7 +85,6 @@ def _gzip_open( mtime: int = 0, keep_open: bool = False, ) -> gzip.GzipFile: - if not HAVE_INDEXED_GZIP or mode != 'rb': gzip_file = DeterministicGzipFile(filename, mode, compresslevel, mtime=mtime) @@ -129,6 +127,7 @@ class Opener: passed to opening method when `fileish` is str. Change of defaults as for \*args """ + gz_def = (_gzip_open, ('mode', 'compresslevel', 'mtime', 'keep_open')) bz2_def = (BZ2File, ('mode', 'buffering', 'compresslevel')) zstd_def = (_zstd_open, ('mode', 'level_or_option', 'zstd_dict')) @@ -248,7 +247,7 @@ def close_if_mine(self) -> None: if self.me_opened: self.close() - def __enter__(self) -> Opener: + def __enter__(self) -> Self: return self def __exit__( diff --git a/nibabel/optpkg.py b/nibabel/optpkg.py index b59a89bb35..90b8ded518 100644 --- a/nibabel/optpkg.py +++ b/nibabel/optpkg.py @@ -1,13 +1,16 @@ """Routines to support optional packages""" + from __future__ import annotations import typing as ty -from types import ModuleType from packaging.version import Version from .tripwire import TripWire +if ty.TYPE_CHECKING: + from types import ModuleType + def _check_pkg_version(min_version: str | Version) -> ty.Callable[[ModuleType], bool]: min_ver = Version(min_version) if isinstance(min_version, str) else min_version diff --git a/nibabel/orientations.py b/nibabel/orientations.py index 075cbd4ffd..f1cdd228be 100644 --- a/nibabel/orientations.py +++ b/nibabel/orientations.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Utilities for calculating and applying affine orientations""" + import numpy as np import numpy.linalg as npl @@ -123,7 +124,7 @@ def ornt_transform(start_ornt, end_ornt): result[start_in_idx, :] = [end_in_idx, flip] break else: - raise ValueError('Unable to find out axis %d in start_ornt' % end_out_idx) + raise ValueError(f'Unable to find out axis {end_out_idx} in start_ornt') return result @@ -321,7 +322,7 @@ def axcodes2ornt(axcodes, labels=None): [ 2., 1.]]) """ labels = list(zip('LPI', 'RAS')) if labels is None else labels - allowed_labels = sum([list(L) for L in labels], []) + [None] + allowed_labels = sum(map(list, labels), [None]) if len(allowed_labels) != len(set(allowed_labels)): raise ValueError(f'Duplicate labels in {allowed_labels}') if not set(axcodes).issubset(allowed_labels): diff --git a/nibabel/parrec.py b/nibabel/parrec.py index ec3fdea711..0a2005835f 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -6,8 +6,6 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -# Disable line length checking for PAR fragments in module docstring -# flake8: noqa E501 """Read images in PAR/REC format This is yet another MRI image format generated by Philips scanners. It is an @@ -121,6 +119,7 @@ utility via the option "--strict-sort". The dimension info can be exported to a CSV file by adding the option "--volume-info". """ + import re import warnings from collections import OrderedDict @@ -1338,7 +1337,7 @@ def from_filename( strict_sort=strict_sort, ) - load = from_filename # type: ignore + load = from_filename # type: ignore[assignment] load = PARRECImage.from_filename diff --git a/nibabel/pointset.py b/nibabel/pointset.py index 58fca148a8..889a8c70cd 100644 --- a/nibabel/pointset.py +++ b/nibabel/pointset.py @@ -17,6 +17,7 @@ adjacent points to be identified. A *triangular mesh* in particular uses triplets of adjacent vertices to describe faces. """ + from __future__ import annotations import math @@ -29,7 +30,7 @@ from nibabel.fileslice import strided_scalar from nibabel.spatialimages import SpatialImage -if ty.TYPE_CHECKING: # pragma: no cover +if ty.TYPE_CHECKING: from typing_extensions import Self _DType = ty.TypeVar('_DType', bound=np.dtype[ty.Any]) @@ -40,12 +41,10 @@ class CoordinateArray(ty.Protocol): shape: tuple[int, int] @ty.overload - def __array__(self, dtype: None = ..., /) -> np.ndarray[ty.Any, np.dtype[ty.Any]]: - ... # pragma: no cover + def __array__(self, dtype: None = ..., /) -> np.ndarray[ty.Any, np.dtype[ty.Any]]: ... @ty.overload - def __array__(self, dtype: _DType, /) -> np.ndarray[ty.Any, _DType]: - ... # pragma: no cover + def __array__(self, dtype: _DType, /) -> np.ndarray[ty.Any, _DType]: ... @dataclass @@ -102,7 +101,11 @@ def dim(self) -> int: """The dimensionality of the space the coordinates are in""" return self.coordinates.shape[1] - self.homogeneous - def __rmatmul__(self, affine: np.ndarray) -> Self: + # Use __rmatmul__ to prefer to compose affines. Mypy does not like that + # this conflicts with ndarray.__matmul__. We will need some more feedback + # on how this plays out for type-checking or code suggestions before we + # can do better than ignore. + def __rmatmul__(self, affine: np.ndarray) -> Self: # type: ignore[misc] """Apply an affine transformation to the pointset This will return a new pointset with an updated affine matrix only. diff --git a/nibabel/processing.py b/nibabel/processing.py index d634ce7086..6027575d47 100644 --- a/nibabel/processing.py +++ b/nibabel/processing.py @@ -16,6 +16,7 @@ Smoothing and resampling routines need scipy. """ + import numpy as np import numpy.linalg as npl diff --git a/nibabel/pydicom_compat.py b/nibabel/pydicom_compat.py index fae24e691c..76423b40a8 100644 --- a/nibabel/pydicom_compat.py +++ b/nibabel/pydicom_compat.py @@ -19,6 +19,7 @@ A deprecated copy is available here for backward compatibility. """ + from __future__ import annotations import warnings @@ -42,7 +43,7 @@ if have_dicom: # Values not imported by default - import pydicom.values # type: ignore + import pydicom.values # type: ignore[import-not-found] from pydicom.dicomio import dcmread as read_file # noqa:F401 from pydicom.sequence import Sequence # noqa:F401 diff --git a/nibabel/quaternions.py b/nibabel/quaternions.py index d2fc3ac4ca..77cf8d2d3f 100644 --- a/nibabel/quaternions.py +++ b/nibabel/quaternions.py @@ -25,6 +25,7 @@ >>> vec = np.array([1, 2, 3]).reshape((3,1)) # column vector >>> tvec = np.dot(M, vec) """ + import math import numpy as np diff --git a/nibabel/rstutils.py b/nibabel/rstutils.py index 625a2af477..cb40633e54 100644 --- a/nibabel/rstutils.py +++ b/nibabel/rstutils.py @@ -2,6 +2,7 @@ * Make ReST table given array of values """ + import numpy as np diff --git a/nibabel/spaces.py b/nibabel/spaces.py index e5b87171df..d06a39b0ed 100644 --- a/nibabel/spaces.py +++ b/nibabel/spaces.py @@ -19,6 +19,7 @@ mapping), or * a length 2 sequence with the same information (shape, affine). """ + from itertools import product import numpy as np diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index bcc4336f73..a8e8993597 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -129,55 +129,46 @@ >>> np.all(img3.get_fdata(dtype=np.float32) == data) True """ + from __future__ import annotations -import io import typing as ty -from collections.abc import Sequence +from functools import cache from typing import Literal import numpy as np -from .arrayproxy import ArrayLike from .casting import sctypes_aliases from .dataobj_images import DataobjImage from .filebasedimages import FileBasedHeader, FileBasedImage -from .fileholders import FileMap from .fileslice import canonical_slicers from .orientations import apply_orientation, inv_ornt_aff from .viewers import OrthoSlicer3D from .volumeutils import shape_zoom_affine -try: - from functools import cache -except ImportError: # PY38 - from functools import lru_cache as cache +if ty.TYPE_CHECKING: + import io + from collections.abc import Sequence -if ty.TYPE_CHECKING: # pragma: no cover import numpy.typing as npt + from .arrayproxy import ArrayLike + from .fileholders import FileMap + SpatialImgT = ty.TypeVar('SpatialImgT', bound='SpatialImage') SpatialHdrT = ty.TypeVar('SpatialHdrT', bound='SpatialHeader') class HasDtype(ty.Protocol): - def get_data_dtype(self) -> np.dtype: - ... # pragma: no cover - - def set_data_dtype(self, dtype: npt.DTypeLike) -> None: - ... # pragma: no cover + def get_data_dtype(self) -> np.dtype: ... + def set_data_dtype(self, dtype: npt.DTypeLike) -> None: ... @ty.runtime_checkable class SpatialProtocol(ty.Protocol): - def get_data_dtype(self) -> np.dtype: - ... # pragma: no cover - - def get_data_shape(self) -> ty.Tuple[int, ...]: - ... # pragma: no cover - - def get_zooms(self) -> ty.Tuple[float, ...]: - ... # pragma: no cover + def get_data_dtype(self) -> np.dtype: ... + def get_data_shape(self) -> tuple[int, ...]: ... + def get_zooms(self) -> tuple[float, ...]: ... class HeaderDataError(Exception): @@ -272,7 +263,7 @@ def set_zooms(self, zooms: Sequence[float]) -> None: shape = self.get_data_shape() ndim = len(shape) if len(zooms) != ndim: - raise HeaderDataError('Expecting %d zoom values for ndim %d' % (ndim, ndim)) + raise HeaderDataError(f'Expecting {ndim} zoom values for ndim {ndim}') if any(z < 0 for z in zooms): raise HeaderDataError('zooms must be positive') self._zooms = zooms diff --git a/nibabel/spm2analyze.py b/nibabel/spm2analyze.py index f63785807c..9c4c544cf5 100644 --- a/nibabel/spm2analyze.py +++ b/nibabel/spm2analyze.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Read / write access to SPM2 version of analyze image format""" + import numpy as np from . import spm99analyze as spm99 # module import diff --git a/nibabel/spm99analyze.py b/nibabel/spm99analyze.py index 974f8609cf..cdedf223e0 100644 --- a/nibabel/spm99analyze.py +++ b/nibabel/spm99analyze.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Read / write access to SPM99 version of analyze image format""" + import warnings from io import BytesIO @@ -240,7 +241,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): Parameters ---------- file_map : dict - Mapping with (kay, value) pairs of (``file_type``, FileHolder + Mapping with (key, value) pairs of (``file_type``, FileHolder instance giving file-likes for each file needed for this image type. mmap : {True, False, 'c', 'r'}, optional, keyword only @@ -275,7 +276,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): contents = matf.read() if len(contents) == 0: return ret - import scipy.io as sio # type: ignore + import scipy.io as sio # type: ignore[import] mats = sio.loadmat(BytesIO(contents)) if 'mat' in mats: # this overrides a 'M', and includes any flip diff --git a/nibabel/streamlines/__init__.py b/nibabel/streamlines/__init__.py index f99f80e4e4..46b403b424 100644 --- a/nibabel/streamlines/__init__.py +++ b/nibabel/streamlines/__init__.py @@ -1,5 +1,5 @@ -"""Multiformat-capable streamline format read / write interface -""" +"""Multiformat-capable streamline format read / write interface""" + import os import warnings @@ -131,7 +131,7 @@ def save(tractogram, filename, **kwargs): warnings.warn(msg, ExtensionWarning) if kwargs: - msg = "A 'TractogramFile' object was provided, no need for" ' keyword arguments.' + msg = "A 'TractogramFile' object was provided, no need for keyword arguments." raise ValueError(msg) tractogram_file.save(filename) diff --git a/nibabel/streamlines/header.py b/nibabel/streamlines/header.py index 2aed10c62c..a3b52b0747 100644 --- a/nibabel/streamlines/header.py +++ b/nibabel/streamlines/header.py @@ -1,5 +1,4 @@ -"""Field class defining common header fields in tractogram files -""" +"""Field class defining common header fields in tractogram files""" class Field: diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py index 43df2f87e0..358c579362 100644 --- a/nibabel/streamlines/tck.py +++ b/nibabel/streamlines/tck.py @@ -309,7 +309,6 @@ def _read_header(cls, fileobj): offset_data = 0 with Opener(fileobj) as f: - # Record start position start_position = f.tell() diff --git a/nibabel/streamlines/tests/test_array_sequence.py b/nibabel/streamlines/tests/test_array_sequence.py index 0c8557fe50..96e66b44c5 100644 --- a/nibabel/streamlines/tests/test_array_sequence.py +++ b/nibabel/streamlines/tests/test_array_sequence.py @@ -1,6 +1,5 @@ import itertools import os -import sys import tempfile import unittest @@ -80,7 +79,7 @@ def test_creating_arraysequence_from_list(self): # List of ndarrays. N = 5 for ndim in range(1, N + 1): - common_shape = tuple([SEQ_DATA['rng'].randint(1, 10) for _ in range(ndim - 1)]) + common_shape = tuple(SEQ_DATA['rng'].randint(1, 10) for _ in range(ndim - 1)) data = generate_data(nb_arrays=5, common_shape=common_shape, rng=SEQ_DATA['rng']) check_arr_seq(ArraySequence(data), data) @@ -220,7 +219,7 @@ def test_arraysequence_extend(self): seq.extend(data) # Extend after extracting some slice - working_slice = seq[:2] + _ = seq[:2] seq.extend(ArraySequence(new_data)) def test_arraysequence_getitem(self): diff --git a/nibabel/streamlines/tests/test_streamlines.py b/nibabel/streamlines/tests/test_streamlines.py index 300397b2b4..8811ddcfa0 100644 --- a/nibabel/streamlines/tests/test_streamlines.py +++ b/nibabel/streamlines/tests/test_streamlines.py @@ -1,5 +1,4 @@ import os -import tempfile import unittest import warnings from io import BytesIO @@ -7,7 +6,6 @@ import numpy as np import pytest -from numpy.compat.py3k import asbytes import nibabel as nib from nibabel.testing import clear_and_catch_warnings, data_path, error_warnings @@ -21,7 +19,7 @@ DATA = {} -def setup(): +def setup_module(): global DATA DATA['empty_filenames'] = [pjoin(data_path, 'empty' + ext) for ext in FORMATS.keys()] DATA['simple_filenames'] = [pjoin(data_path, 'simple' + ext) for ext in FORMATS.keys()] @@ -96,7 +94,7 @@ def test_is_supported_detect_format(tmp_path): # Valid file without extension for tfile_cls in FORMATS.values(): f = BytesIO() - f.write(asbytes(tfile_cls.MAGIC_NUMBER)) + f.write(tfile_cls.MAGIC_NUMBER) f.seek(0, os.SEEK_SET) assert nib.streamlines.is_supported(f) assert nib.streamlines.detect_format(f) is tfile_cls @@ -105,7 +103,7 @@ def test_is_supported_detect_format(tmp_path): for tfile_cls in FORMATS.values(): fpath = tmp_path / 'test.txt' with open(fpath, 'w+b') as f: - f.write(asbytes(tfile_cls.MAGIC_NUMBER)) + f.write(tfile_cls.MAGIC_NUMBER) f.seek(0, os.SEEK_SET) assert nib.streamlines.is_supported(f) assert nib.streamlines.detect_format(f) is tfile_cls @@ -193,13 +191,13 @@ def test_save_tractogram_file(self): trk_file = trk.TrkFile(tractogram) # No need for keyword arguments. - with self.assertRaises(ValueError): + with pytest.raises(ValueError): nib.streamlines.save(trk_file, 'dummy.trk', header={}) # Wrong extension. with pytest.warns(ExtensionWarning, match='extension'): trk_file = trk.TrkFile(tractogram) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): nib.streamlines.save(trk_file, 'dummy.tck', header={}) with InTemporaryDirectory(): @@ -209,7 +207,7 @@ def test_save_tractogram_file(self): def test_save_empty_file(self): tractogram = Tractogram(affine_to_rasmm=np.eye(4)) - for ext, cls in FORMATS.items(): + for ext in FORMATS: with InTemporaryDirectory(): filename = 'streamlines' + ext nib.streamlines.save(tractogram, filename) @@ -218,7 +216,7 @@ def test_save_empty_file(self): def test_save_simple_file(self): tractogram = Tractogram(DATA['streamlines'], affine_to_rasmm=np.eye(4)) - for ext, cls in FORMATS.items(): + for ext in FORMATS: with InTemporaryDirectory(): filename = 'streamlines' + ext nib.streamlines.save(tractogram, filename) @@ -264,7 +262,7 @@ def test_save_complex_file(self): def test_save_sliced_tractogram(self): tractogram = Tractogram(DATA['streamlines'], affine_to_rasmm=np.eye(4)) original_tractogram = tractogram.copy() - for ext, cls in FORMATS.items(): + for ext in FORMATS: with InTemporaryDirectory(): filename = 'streamlines' + ext nib.streamlines.save(tractogram[::2], filename) @@ -274,18 +272,18 @@ def test_save_sliced_tractogram(self): assert_tractogram_equal(tractogram, original_tractogram) def test_load_unknown_format(self): - with self.assertRaises(ValueError): + with pytest.raises(ValueError): nib.streamlines.load('') def test_save_unknown_format(self): - with self.assertRaises(ValueError): + with pytest.raises(ValueError): nib.streamlines.save(Tractogram(), '') def test_save_from_generator(self): tractogram = Tractogram(DATA['streamlines'], affine_to_rasmm=np.eye(4)) # Just to create a generator - for ext, _ in FORMATS.items(): + for ext in FORMATS: filtered = (s for s in tractogram.streamlines if True) lazy_tractogram = LazyTractogram(lambda: filtered, affine_to_rasmm=np.eye(4)) diff --git a/nibabel/streamlines/tests/test_tck.py b/nibabel/streamlines/tests/test_tck.py index 3df7dd4f2d..083ab8e6e9 100644 --- a/nibabel/streamlines/tests/test_tck.py +++ b/nibabel/streamlines/tests/test_tck.py @@ -8,7 +8,6 @@ from numpy.testing import assert_array_equal from ...testing import data_path, error_warnings -from .. import tck as tck_module from ..array_sequence import ArraySequence from ..tck import TckFile from ..tractogram import Tractogram @@ -138,7 +137,7 @@ def test_load_file_with_wrong_information(self): # Simulate a TCK file with no `file` field. new_tck_file = tck_file.replace(b'\nfile: . 67', b'') - with pytest.warns(HeaderWarning, match="Missing 'file'") as w: + with pytest.warns(HeaderWarning, match="Missing 'file'"): tck = TckFile.load(BytesIO(new_tck_file)) assert_array_equal(tck.header['file'], '. 56') diff --git a/nibabel/streamlines/tests/test_tractogram.py b/nibabel/streamlines/tests/test_tractogram.py index 30294be438..72b84fac6e 100644 --- a/nibabel/streamlines/tests/test_tractogram.py +++ b/nibabel/streamlines/tests/test_tractogram.py @@ -1,6 +1,5 @@ import copy import operator -import sys import unittest import warnings from collections import defaultdict @@ -50,8 +49,8 @@ def make_fake_tractogram( ): """Make multiple streamlines according to provided requirements.""" all_streamlines = [] - all_data_per_point = defaultdict(lambda: []) - all_data_per_streamline = defaultdict(lambda: []) + all_data_per_point = defaultdict(list) + all_data_per_streamline = defaultdict(list) for nb_points in list_nb_points: data = make_fake_streamline( nb_points, data_per_point_shapes, data_for_streamline_shapes, rng @@ -80,6 +79,7 @@ def make_dummy_streamline(nb_points): 'mean_curvature': np.array([1.11], dtype='f4'), 'mean_torsion': np.array([1.22], dtype='f4'), 'mean_colors': np.array([1, 0, 0], dtype='f4'), + 'clusters_labels': np.array([0, 1], dtype='i4'), } elif nb_points == 2: @@ -92,6 +92,7 @@ def make_dummy_streamline(nb_points): 'mean_curvature': np.array([2.11], dtype='f4'), 'mean_torsion': np.array([2.22], dtype='f4'), 'mean_colors': np.array([0, 1, 0], dtype='f4'), + 'clusters_labels': np.array([2, 3, 4], dtype='i4'), } elif nb_points == 5: @@ -104,6 +105,7 @@ def make_dummy_streamline(nb_points): 'mean_curvature': np.array([3.11], dtype='f4'), 'mean_torsion': np.array([3.22], dtype='f4'), 'mean_colors': np.array([0, 0, 1], dtype='f4'), + 'clusters_labels': np.array([5, 6, 7, 8], dtype='i4'), } return streamline, data_per_point, data_for_streamline @@ -119,6 +121,7 @@ def setup_module(): DATA['mean_curvature'] = [] DATA['mean_torsion'] = [] DATA['mean_colors'] = [] + DATA['clusters_labels'] = [] for nb_points in [1, 2, 5]: data = make_dummy_streamline(nb_points) streamline, data_per_point, data_for_streamline = data @@ -128,12 +131,14 @@ def setup_module(): DATA['mean_curvature'].append(data_for_streamline['mean_curvature']) DATA['mean_torsion'].append(data_for_streamline['mean_torsion']) DATA['mean_colors'].append(data_for_streamline['mean_colors']) + DATA['clusters_labels'].append(data_for_streamline['clusters_labels']) DATA['data_per_point'] = {'colors': DATA['colors'], 'fa': DATA['fa']} DATA['data_per_streamline'] = { 'mean_curvature': DATA['mean_curvature'], 'mean_torsion': DATA['mean_torsion'], 'mean_colors': DATA['mean_colors'], + 'clusters_labels': DATA['clusters_labels'], } DATA['empty_tractogram'] = Tractogram(affine_to_rasmm=np.eye(4)) @@ -154,6 +159,7 @@ def setup_module(): 'mean_curvature': lambda: (e for e in DATA['mean_curvature']), 'mean_torsion': lambda: (e for e in DATA['mean_torsion']), 'mean_colors': lambda: (e for e in DATA['mean_colors']), + 'clusters_labels': lambda: (e for e in DATA['clusters_labels']), } DATA['lazy_tractogram'] = LazyTractogram( @@ -165,7 +171,6 @@ def setup_module(): def check_tractogram_item(tractogram_item, streamline, data_for_streamline={}, data_for_points={}): - assert_array_equal(tractogram_item.streamline, streamline) assert len(tractogram_item.data_for_streamline) == len(data_for_streamline) @@ -214,7 +219,10 @@ def test_per_array_dict_creation(self): data_dict = PerArrayDict(nb_streamlines, data_per_streamline) assert data_dict.keys() == data_per_streamline.keys() for k in data_dict.keys(): - assert_array_equal(data_dict[k], data_per_streamline[k]) + if isinstance(data_dict[k], np.ndarray) and np.all( + data_dict[k].shape[0] == data_dict[k].shape + ): + assert_array_equal(data_dict[k], data_per_streamline[k]) del data_dict['mean_curvature'] assert len(data_dict) == len(data_per_streamline) - 1 @@ -224,7 +232,10 @@ def test_per_array_dict_creation(self): data_dict = PerArrayDict(nb_streamlines, data_per_streamline) assert data_dict.keys() == data_per_streamline.keys() for k in data_dict.keys(): - assert_array_equal(data_dict[k], data_per_streamline[k]) + if isinstance(data_dict[k], np.ndarray) and np.all( + data_dict[k].shape[0] == data_dict[k].shape + ): + assert_array_equal(data_dict[k], data_per_streamline[k]) del data_dict['mean_curvature'] assert len(data_dict) == len(data_per_streamline) - 1 @@ -234,7 +245,10 @@ def test_per_array_dict_creation(self): data_dict = PerArrayDict(nb_streamlines, **data_per_streamline) assert data_dict.keys() == data_per_streamline.keys() for k in data_dict.keys(): - assert_array_equal(data_dict[k], data_per_streamline[k]) + if isinstance(data_dict[k], np.ndarray) and np.all( + data_dict[k].shape[0] == data_dict[k].shape + ): + assert_array_equal(data_dict[k], data_per_streamline[k]) del data_dict['mean_curvature'] assert len(data_dict) == len(data_per_streamline) - 1 @@ -261,6 +275,7 @@ def test_extend(self): 'mean_curvature': 2 * np.array(DATA['mean_curvature']), 'mean_torsion': 3 * np.array(DATA['mean_torsion']), 'mean_colors': 4 * np.array(DATA['mean_colors']), + 'clusters_labels': 5 * np.array(DATA['clusters_labels'], dtype=object), } sdict2 = PerArrayDict(len(DATA['tractogram']), new_data) @@ -284,7 +299,8 @@ def test_extend(self): 'mean_curvature': 2 * np.array(DATA['mean_curvature']), 'mean_torsion': 3 * np.array(DATA['mean_torsion']), 'mean_colors': 4 * np.array(DATA['mean_colors']), - 'other': 5 * np.array(DATA['mean_colors']), + 'clusters_labels': 5 * np.array(DATA['clusters_labels'], dtype=object), + 'other': 6 * np.array(DATA['mean_colors']), } sdict2 = PerArrayDict(len(DATA['tractogram']), new_data) @@ -305,6 +321,7 @@ def test_extend(self): 'mean_curvature': 2 * np.array(DATA['mean_curvature']), 'mean_torsion': 3 * np.array(DATA['mean_torsion']), 'mean_colors': 4 * np.array(DATA['mean_torsion']), + 'clusters_labels': 5 * np.array(DATA['clusters_labels'], dtype=object), } sdict2 = PerArrayDict(len(DATA['tractogram']), new_data) with pytest.raises(ValueError): @@ -441,7 +458,10 @@ def test_lazydict_creation(self): assert is_lazy_dict(data_dict) assert data_dict.keys() == expected_keys for k in data_dict.keys(): - assert_array_equal(list(data_dict[k]), list(DATA['data_per_streamline'][k])) + if isinstance(data_dict[k], np.ndarray) and np.all( + data_dict[k].shape[0] == data_dict[k].shape + ): + assert_array_equal(list(data_dict[k]), list(DATA['data_per_streamline'][k])) assert len(data_dict) == len(DATA['data_per_streamline_func']) @@ -578,6 +598,7 @@ def test_tractogram_add_new_data(self): t.data_per_streamline['mean_curvature'] = DATA['mean_curvature'] t.data_per_streamline['mean_torsion'] = DATA['mean_torsion'] t.data_per_streamline['mean_colors'] = DATA['mean_colors'] + t.data_per_streamline['clusters_labels'] = DATA['clusters_labels'] assert_tractogram_equal(t, DATA['tractogram']) # Retrieve tractogram by their index. @@ -598,6 +619,7 @@ def test_tractogram_add_new_data(self): t.data_per_streamline['mean_curvature'] = DATA['mean_curvature'] t.data_per_streamline['mean_torsion'] = DATA['mean_torsion'] t.data_per_streamline['mean_colors'] = DATA['mean_colors'] + t.data_per_streamline['clusters_labels'] = DATA['clusters_labels'] assert_tractogram_equal(t, DATA['tractogram']) def test_tractogram_copy(self): @@ -647,14 +669,6 @@ def test_creating_invalid_tractogram(self): with pytest.raises(ValueError): Tractogram(streamlines=DATA['streamlines'], data_per_point={'scalars': scalars}) - # Inconsistent dimension for a data_per_streamline. - properties = [[1.11, 1.22], [2.11], [3.11, 3.22]] - - with pytest.raises(ValueError): - Tractogram( - streamlines=DATA['streamlines'], data_per_streamline={'properties': properties} - ) - # Too many dimension for a data_per_streamline. properties = [ np.array([[1.11], [1.22]], dtype='f4'), @@ -870,6 +884,7 @@ def test_lazy_tractogram_from_data_func(self): DATA['mean_curvature'], DATA['mean_torsion'], DATA['mean_colors'], + DATA['clusters_labels'], ] def _data_gen(): @@ -879,6 +894,7 @@ def _data_gen(): 'mean_curvature': d[3], 'mean_torsion': d[4], 'mean_colors': d[5], + 'clusters_labels': d[6], } yield TractogramItem(d[0], data_for_streamline, data_for_points) diff --git a/nibabel/streamlines/tests/test_tractogram_file.py b/nibabel/streamlines/tests/test_tractogram_file.py index 53a7fb662b..6f764009f1 100644 --- a/nibabel/streamlines/tests/test_tractogram_file.py +++ b/nibabel/streamlines/tests/test_tractogram_file.py @@ -1,5 +1,4 @@ -"""Test tractogramFile base class -""" +"""Test tractogramFile base class""" import pytest @@ -8,7 +7,6 @@ def test_subclassing_tractogram_file(): - # Missing 'save' method class DummyTractogramFile(TractogramFile): @classmethod diff --git a/nibabel/streamlines/tests/test_trk.py b/nibabel/streamlines/tests/test_trk.py index b8ff43620b..4cb6032c25 100644 --- a/nibabel/streamlines/tests/test_trk.py +++ b/nibabel/streamlines/tests/test_trk.py @@ -149,7 +149,7 @@ def test_load_file_with_wrong_information(self): # Simulate a TRK where `vox_to_ras` is invalid. trk_struct, trk_bytes = self.trk_with_bytes() trk_struct[Field.VOXEL_TO_RASMM] = np.diag([0, 0, 0, 1]) - with clear_and_catch_warnings(record=True, modules=[trk_module]) as w: + with clear_and_catch_warnings(modules=[trk_module]): with pytest.raises(HeaderError): TrkFile.load(BytesIO(trk_bytes)) diff --git a/nibabel/streamlines/tractogram.py b/nibabel/streamlines/tractogram.py index 9e7c0f9af2..5a39b415a6 100644 --- a/nibabel/streamlines/tractogram.py +++ b/nibabel/streamlines/tractogram.py @@ -1,6 +1,7 @@ import copy import numbers -from collections.abc import MutableMapping +import types +from collections.abc import Iterable, MutableMapping from warnings import warn import numpy as np @@ -101,15 +102,28 @@ def __init__(self, n_rows=0, *args, **kwargs): super().__init__(*args, **kwargs) def __setitem__(self, key, value): - value = np.asarray(list(value)) + dtype = np.float64 + + if isinstance(value, types.GeneratorType): + value = list(value) + + if isinstance(value, np.ndarray): + dtype = value.dtype + elif not all(len(v) == len(value[0]) for v in value[1:]): + dtype = object + + value = np.asarray(value, dtype=dtype) if value.ndim == 1 and value.dtype != object: # Reshape without copy value.shape = (len(value), 1) - if value.ndim != 2: + if value.ndim != 2 and value.dtype != object: raise ValueError('data_per_streamline must be a 2D array.') + if value.dtype == object and not all(isinstance(v, Iterable) for v in value): + raise ValueError('data_per_streamline must be a 2D array') + # We make sure there is the right amount of values if 0 < self.n_rows != len(value): msg = f'The number of values ({len(value)}) should match n_elements ({self.n_rows}).' diff --git a/nibabel/streamlines/tractogram_file.py b/nibabel/streamlines/tractogram_file.py index 2cec1ea9cb..65add3e2f2 100644 --- a/nibabel/streamlines/tractogram_file.py +++ b/nibabel/streamlines/tractogram_file.py @@ -1,5 +1,5 @@ -"""Define abstract interface for Tractogram file classes -""" +"""Define abstract interface for Tractogram file classes""" + from abc import ABC, abstractmethod from .header import Field @@ -74,7 +74,7 @@ def is_correct_format(cls, fileobj): Returns True if `fileobj` is in the right streamlines file format, otherwise returns False. """ - raise NotImplementedError() + raise NotImplementedError @classmethod def create_empty_header(cls): @@ -101,7 +101,7 @@ def load(cls, fileobj, lazy_load=True): Returns an object containing tractogram data and header information. """ - raise NotImplementedError() + raise NotImplementedError @abstractmethod def save(self, fileobj): @@ -113,4 +113,4 @@ def save(self, fileobj): If string, a filename; otherwise an open file-like object opened and ready to write. """ - raise NotImplementedError() + raise NotImplementedError diff --git a/nibabel/streamlines/trk.py b/nibabel/streamlines/trk.py index 966b133d1f..0b11f5684e 100644 --- a/nibabel/streamlines/trk.py +++ b/nibabel/streamlines/trk.py @@ -366,7 +366,6 @@ def _read(): tractogram = LazyTractogram.from_data_func(_read) else: - # Speed up loading by guessing a suitable buffer size. with Opener(fileobj) as f: old_file_position = f.tell() @@ -773,6 +772,4 @@ def __str__(self): swap_yz: {swap_yz} swap_zx: {swap_zx} n_count: {NB_STREAMLINES} -hdr_size: {hdr_size}""".format( - **vars - ) +hdr_size: {hdr_size}""".format(**vars) diff --git a/nibabel/testing/__init__.py b/nibabel/testing/__init__.py index 21ecadf841..b42baf2955 100644 --- a/nibabel/testing/__init__.py +++ b/nibabel/testing/__init__.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Utilities for testing""" + from __future__ import annotations import os @@ -16,6 +17,7 @@ import unittest import warnings from contextlib import nullcontext +from importlib.resources import as_file, files from itertools import zip_longest import numpy as np @@ -25,12 +27,8 @@ from .helpers import assert_data_similar, bytesio_filemap, bytesio_round_trip from .np_features import memmap_after_ufunc -try: - from importlib.abc import Traversable - from importlib.resources import as_file, files -except ImportError: # PY38 - from importlib_resources import as_file, files - from importlib_resources.abc import Traversable +if ty.TYPE_CHECKING: + from importlib.resources.abc import Traversable def get_test_data( @@ -149,9 +147,10 @@ class clear_and_catch_warnings(warnings.catch_warnings): Examples -------- >>> import warnings - >>> with clear_and_catch_warnings(modules=[np.core.fromnumeric]): + >>> with clear_and_catch_warnings(modules=[np.lib.scimath]): ... warnings.simplefilter('always') - ... # do something that raises a warning in np.core.fromnumeric + ... # do something that raises a warning in np.lib.scimath + ... _ = np.arccos(90) """ class_modules = () diff --git a/nibabel/testing/helpers.py b/nibabel/testing/helpers.py index 2f25a354d7..ad4bf258cd 100644 --- a/nibabel/testing/helpers.py +++ b/nibabel/testing/helpers.py @@ -1,5 +1,5 @@ -"""Helper functions for tests -""" +"""Helper functions for tests""" + from io import BytesIO import numpy as np @@ -14,7 +14,7 @@ def bytesio_filemap(klass): """Return bytes io filemap for this image class `klass`""" file_map = klass.make_file_map() - for name, fileholder in file_map.items(): + for fileholder in file_map.values(): fileholder.fileobj = BytesIO() fileholder.pos = 0 return file_map diff --git a/nibabel/testing/np_features.py b/nibabel/testing/np_features.py index eeb783900a..dd21aac2c0 100644 --- a/nibabel/testing/np_features.py +++ b/nibabel/testing/np_features.py @@ -1,11 +1,11 @@ -"""Look for changes in numpy behavior over versions -""" -from functools import lru_cache +"""Look for changes in numpy behavior over versions""" + +from functools import cache import numpy as np -@lru_cache(maxsize=None) +@cache def memmap_after_ufunc() -> bool: """Return True if ufuncs on memmap arrays always return memmap arrays diff --git a/nibabel/tests/conftest.py b/nibabel/tests/conftest.py index 3cf54a34c5..fb13708450 100644 --- a/nibabel/tests/conftest.py +++ b/nibabel/tests/conftest.py @@ -6,7 +6,7 @@ # Generate dynamic fixtures def pytest_generate_tests(metafunc): if 'supported_dtype' in metafunc.fixturenames: - if metafunc.cls is None or not getattr(metafunc.cls, 'image_class'): + if metafunc.cls is None or not metafunc.cls.image_class: raise pytest.UsageError( 'Attempting to use supported_dtype fixture outside an image test case' ) diff --git a/nibabel/tests/data/check_parrec_reslice.py b/nibabel/tests/data/check_parrec_reslice.py index 8ade7f539c..244b4c3a64 100644 --- a/nibabel/tests/data/check_parrec_reslice.py +++ b/nibabel/tests/data/check_parrec_reslice.py @@ -21,6 +21,7 @@ The *_cor_SENSE* image has a higher RMS because the back of the phantom is out of the field of view. """ + import glob import numpy as np diff --git a/nibabel/tests/data/gen_standard.py b/nibabel/tests/data/gen_standard.py index 598726fe74..7fd05d936e 100644 --- a/nibabel/tests/data/gen_standard.py +++ b/nibabel/tests/data/gen_standard.py @@ -5,6 +5,7 @@ * standard.trk """ + import numpy as np import nibabel as nib diff --git a/nibabel/tests/nibabel_data.py b/nibabel/tests/nibabel_data.py index 8d4652d79f..5919eba925 100644 --- a/nibabel/tests/nibabel_data.py +++ b/nibabel/tests/nibabel_data.py @@ -1,11 +1,9 @@ -"""Functions / decorators for finding / requiring nibabel-data directory -""" +"""Functions / decorators for finding / requiring nibabel-data directory""" import unittest from os import environ, listdir -from os.path import dirname, exists, isdir +from os.path import dirname, exists, isdir, realpath from os.path import join as pjoin -from os.path import realpath def get_nibabel_data(): diff --git a/nibabel/tests/scriptrunner.py b/nibabel/tests/scriptrunner.py index 1ec2fcb486..2f3de50791 100644 --- a/nibabel/tests/scriptrunner.py +++ b/nibabel/tests/scriptrunner.py @@ -12,11 +12,11 @@ assert_equal(code, 0) assert_equal(stdout, b'This script ran OK') """ + import os import sys -from os.path import dirname, isdir, isfile +from os.path import dirname, isdir, isfile, pathsep, realpath from os.path import join as pjoin -from os.path import pathsep, realpath from subprocess import PIPE, Popen MY_PACKAGE = __package__ diff --git a/nibabel/tests/test_affines.py b/nibabel/tests/test_affines.py index 28f405e566..d4ea11821b 100644 --- a/nibabel/tests/test_affines.py +++ b/nibabel/tests/test_affines.py @@ -225,7 +225,6 @@ def test_rescale_affine(): orig_shape = rng.randint(low=20, high=512, size=(3,)) orig_aff = np.eye(4) orig_aff[:3, :] = rng.normal(size=(3, 4)) - orig_zooms = voxel_sizes(orig_aff) orig_axcodes = aff2axcodes(orig_aff) orig_centroid = apply_affine(orig_aff, (orig_shape - 1) // 2) diff --git a/nibabel/tests/test_analyze.py b/nibabel/tests/test_analyze.py index cb7b8d686d..befc920f1e 100644 --- a/nibabel/tests/test_analyze.py +++ b/nibabel/tests/test_analyze.py @@ -728,8 +728,8 @@ def test_data_hdr_cache(self): IC = self.image_class # save an image to a file map fm = IC.make_file_map() - for key, value in fm.items(): - fm[key].fileobj = BytesIO() + for value in fm.values(): + value.fileobj = BytesIO() shape = (2, 3, 4) data = np.arange(24, dtype=np.int8).reshape(shape) affine = np.eye(4) @@ -831,7 +831,7 @@ def test_header_updating(self): hdr = img.header hdr.set_zooms((4, 5, 6)) # Save / reload using bytes IO objects - for key, value in img.file_map.items(): + for value in img.file_map.values(): value.fileobj = BytesIO() img.to_file_map() hdr_back = img.from_file_map(img.file_map).header @@ -842,7 +842,7 @@ def test_header_updating(self): assert_array_equal(hdr.get_zooms(), (2, 3, 4)) # Modify affine in-place? Update on save. img.affine[0, 0] = 9 - for key, value in img.file_map.items(): + for value in img.file_map.values(): value.fileobj = BytesIO() img.to_file_map() hdr_back = img.from_file_map(img.file_map).header @@ -864,7 +864,7 @@ def test_pickle(self): assert_array_equal(img.get_fdata(), img2.get_fdata()) assert img.header == img2.header # Save / reload using bytes IO objects - for key, value in img.file_map.items(): + for value in img.file_map.values(): value.fileobj = BytesIO() img.to_file_map() img_prox = img.from_file_map(img.file_map) diff --git a/nibabel/tests/test_api_validators.py b/nibabel/tests/test_api_validators.py index 1d21092eef..a4e787465a 100644 --- a/nibabel/tests/test_api_validators.py +++ b/nibabel/tests/test_api_validators.py @@ -1,5 +1,5 @@ -"""Metaclass and class for validating instance APIs -""" +"""Metaclass and class for validating instance APIs""" + import os import pytest diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index a207e4ed6d..65b9131905 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -6,8 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""Tests for arrayproxy module -""" +"""Tests for arrayproxy module""" import contextlib import gzip @@ -483,9 +482,11 @@ def test_keep_file_open_true_false_invalid(): for test in tests: filetype, kfo, have_igzip, exp_persist, exp_kfo = test - with InTemporaryDirectory(), mock.patch( - 'nibabel.openers.ImageOpener', CountingImageOpener - ), patch_indexed_gzip(have_igzip): + with ( + InTemporaryDirectory(), + mock.patch('nibabel.openers.ImageOpener', CountingImageOpener), + patch_indexed_gzip(have_igzip), + ): fname = f'testdata.{filetype}' # create the test data file if filetype == 'gz': diff --git a/nibabel/tests/test_arraywriters.py b/nibabel/tests/test_arraywriters.py index 89e7ac6755..4a853ecf5e 100644 --- a/nibabel/tests/test_arraywriters.py +++ b/nibabel/tests/test_arraywriters.py @@ -276,7 +276,7 @@ def test_slope_inter_castable(): for out_dtt in NUMERIC_TYPES: for klass in (ArrayWriter, SlopeArrayWriter, SlopeInterArrayWriter): arr = np.zeros((5,), dtype=in_dtt) - aw = klass(arr, out_dtt) # no error + klass(arr, out_dtt) # no error # Test special case of none finite # This raises error for ArrayWriter, but not for the others arr = np.array([np.inf, np.nan, -np.inf]) @@ -285,8 +285,8 @@ def test_slope_inter_castable(): in_arr = arr.astype(in_dtt) with pytest.raises(WriterError): ArrayWriter(in_arr, out_dtt) - aw = SlopeArrayWriter(arr.astype(in_dtt), out_dtt) # no error - aw = SlopeInterArrayWriter(arr.astype(in_dtt), out_dtt) # no error + SlopeArrayWriter(arr.astype(in_dtt), out_dtt) # no error + SlopeInterArrayWriter(arr.astype(in_dtt), out_dtt) # no error for in_dtt, out_dtt, arr, slope_only, slope_inter, neither in ( (np.float32, np.float32, 1, True, True, True), (np.float64, np.float32, 1, True, True, True), diff --git a/nibabel/tests/test_batteryrunners.py b/nibabel/tests/test_batteryrunners.py index 84590452ea..5cae764c8b 100644 --- a/nibabel/tests/test_batteryrunners.py +++ b/nibabel/tests/test_batteryrunners.py @@ -6,8 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""Tests for BatteryRunner and Report objects -""" +"""Tests for BatteryRunner and Report objects""" import logging from io import StringIO diff --git a/nibabel/tests/test_brikhead.py b/nibabel/tests/test_brikhead.py index 5bf6e79cb9..31e0d0d47c 100644 --- a/nibabel/tests/test_brikhead.py +++ b/nibabel/tests/test_brikhead.py @@ -13,7 +13,7 @@ import pytest from numpy.testing import assert_array_equal -from .. import Nifti1Image, brikhead, load +from .. import Nifti1Image, brikhead from ..testing import assert_data_similar, data_path from .test_fileslice import slicer_samples diff --git a/nibabel/tests/test_casting.py b/nibabel/tests/test_casting.py index f345952aac..c6c1ddb661 100644 --- a/nibabel/tests/test_casting.py +++ b/nibabel/tests/test_casting.py @@ -1,5 +1,5 @@ -"""Test casting utilities -""" +"""Test casting utilities""" + import os from platform import machine @@ -161,7 +161,7 @@ def test_floor_log2(): def test_able_int_type(): - # The integer type cabable of containing values + # The integer type capable of containing values for vals, exp_out in ( ([0, 1], np.uint8), ([0, 255], np.uint8), diff --git a/nibabel/tests/test_data.py b/nibabel/tests/test_data.py index abcb3caaf2..511fa7f857 100644 --- a/nibabel/tests/test_data.py +++ b/nibabel/tests/test_data.py @@ -1,6 +1,7 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Tests for data module""" + import os import sys import tempfile @@ -22,11 +23,11 @@ get_data_path, make_datasource, ) -from .test_environment import DATA_KEY, USER_KEY, with_environment +from .test_environment import DATA_KEY, USER_KEY, with_environment # noqa: F401 @pytest.fixture -def with_nimd_env(request, with_environment): +def with_nimd_env(request, with_environment): # noqa: F811 DATA_FUNCS = {} DATA_FUNCS['home_dir_func'] = nibd.get_nipy_user_dir DATA_FUNCS['sys_dir_func'] = nibd.get_nipy_system_dir @@ -159,7 +160,7 @@ def test_data_path(with_nimd_env): tmpfile = pjoin(tmpdir, 'another_example.ini') with open(tmpfile, 'w') as fobj: fobj.write('[DATA]\n') - fobj.write('path = %s\n' % '/path/two') + fobj.write('path = {}\n'.format('/path/two')) assert get_data_path() == tst_list + ['/path/two'] + old_pth diff --git a/nibabel/tests/test_dataobj_images.py b/nibabel/tests/test_dataobj_images.py index a1d2dbc9f1..877e407812 100644 --- a/nibabel/tests/test_dataobj_images.py +++ b/nibabel/tests/test_dataobj_images.py @@ -1,5 +1,4 @@ -"""Testing dataobj_images module -""" +"""Testing dataobj_images module""" import numpy as np diff --git a/nibabel/tests/test_deprecated.py b/nibabel/tests/test_deprecated.py index 2576eca3d9..01636632e4 100644 --- a/nibabel/tests/test_deprecated.py +++ b/nibabel/tests/test_deprecated.py @@ -1,5 +1,4 @@ -"""Testing `deprecated` module -""" +"""Testing `deprecated` module""" import warnings @@ -15,12 +14,12 @@ from nibabel.tests.test_deprecator import TestDeprecatorFunc as _TestDF -def setup(): +def setup_module(): # Hack nibabel version string pkg_info.cmp_pkg_version.__defaults__ = ('2.0',) -def teardown(): +def teardown_module(): # Hack nibabel version string back again pkg_info.cmp_pkg_version.__defaults__ = (pkg_info.__version__,) diff --git a/nibabel/tests/test_deprecator.py b/nibabel/tests/test_deprecator.py index 833908af94..dfff78658f 100644 --- a/nibabel/tests/test_deprecator.py +++ b/nibabel/tests/test_deprecator.py @@ -1,5 +1,4 @@ -"""Testing deprecator module / Deprecator class -""" +"""Testing deprecator module / Deprecator class""" import sys import warnings @@ -14,6 +13,7 @@ Deprecator, ExpiredDeprecationError, _add_dep_doc, + _dedent_docstring, _ensure_cr, ) @@ -21,6 +21,14 @@ _OWN_MODULE = sys.modules[__name__] +func_docstring = ( + f'A docstring\n \n foo\n \n{indent(TESTSETUP, " ", lambda x: True)}' + f' Some text\n{indent(TESTCLEANUP, " ", lambda x: True)}' +) + +if sys.version_info >= (3, 13): + func_docstring = _dedent_docstring(func_docstring) + def test__ensure_cr(): # Make sure text ends with carriage return @@ -92,11 +100,7 @@ def test_dep_func(self): with pytest.deprecated_call() as w: assert func(1, 2) is None assert len(w) == 1 - assert ( - func.__doc__ - == f'A docstring\n \n foo\n \n{indent(TESTSETUP, " ", lambda x: True)}' - f' Some text\n{indent(TESTCLEANUP, " ", lambda x: True)}' - ) + assert func.__doc__ == func_docstring # Try some since and until versions func = dec('foo', '1.1')(func_no_doc) diff --git a/nibabel/tests/test_dft.py b/nibabel/tests/test_dft.py index f756600fd3..6155dda83c 100644 --- a/nibabel/tests/test_dft.py +++ b/nibabel/tests/test_dft.py @@ -1,5 +1,4 @@ -"""Testing dft -""" +"""Testing dft""" import os import sqlite3 @@ -27,7 +26,7 @@ data_dir = pjoin(dirname(__file__), 'data') -def setUpModule(): +def setup_module(): if os.name == 'nt': raise unittest.SkipTest('FUSE not available for windows, skipping dft tests') if not have_dicom: @@ -59,7 +58,7 @@ def db(monkeypatch): and not modify the host filesystem.""" database = dft._DB(fname=':memory:') monkeypatch.setattr(dft, 'DB', database) - yield database + return database def test_init(db): diff --git a/nibabel/tests/test_diff.py b/nibabel/tests/test_diff.py index fee71d628b..798a7f7b30 100644 --- a/nibabel/tests/test_diff.py +++ b/nibabel/tests/test_diff.py @@ -1,7 +1,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -"""Test diff -""" +"""Test diff""" from os.path import abspath, dirname from os.path import join as pjoin diff --git a/nibabel/tests/test_ecat.py b/nibabel/tests/test_ecat.py index 6a076cbc38..702913e14d 100644 --- a/nibabel/tests/test_ecat.py +++ b/nibabel/tests/test_ecat.py @@ -8,7 +8,6 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## import os -import warnings from pathlib import Path from unittest import TestCase diff --git a/nibabel/tests/test_ecat_data.py b/nibabel/tests/test_ecat_data.py index b7dbe4750a..427645b92a 100644 --- a/nibabel/tests/test_ecat_data.py +++ b/nibabel/tests/test_ecat_data.py @@ -6,14 +6,13 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""Test we can correctly import example ECAT files -""" +"""Test we can correctly import example ECAT files""" import os from os.path import join as pjoin import numpy as np -from numpy.testing import assert_almost_equal, assert_array_equal +from numpy.testing import assert_almost_equal from ..ecat import load from .nibabel_data import get_nibabel_data, needs_nibabel_data diff --git a/nibabel/tests/test_environment.py b/nibabel/tests/test_environment.py index afb6d36f84..aa58d9b8e0 100644 --- a/nibabel/tests/test_environment.py +++ b/nibabel/tests/test_environment.py @@ -1,5 +1,4 @@ -"""Testing environment settings -""" +"""Testing environment settings""" import os from os import environ as env diff --git a/nibabel/tests/test_euler.py b/nibabel/tests/test_euler.py index b0c965c399..4d251a16e3 100644 --- a/nibabel/tests/test_euler.py +++ b/nibabel/tests/test_euler.py @@ -21,12 +21,8 @@ FLOAT_EPS = np.finfo(np.float64).eps # Example rotations """ -eg_rots = [] params = np.arange(-pi * 2, pi * 2.5, pi / 2) -for x in params: - for y in params: - for z in params: - eg_rots.append((x, y, z)) +eg_rots = [(x, y, z) for x in params for y in params for z in params] def x_only(x): @@ -123,7 +119,7 @@ def test_euler_mat_1(): assert_array_equal(M, np.eye(3)) -@pytest.mark.parametrize('x, y, z', eg_rots) +@pytest.mark.parametrize(('x', 'y', 'z'), eg_rots) def test_euler_mat_2(x, y, z): M1 = nea.euler2mat(z, y, x) M2 = sympy_euler(z, y, x) @@ -176,7 +172,7 @@ def test_euler_instability(): assert not np.allclose(M_e, M_e_back) -@pytest.mark.parametrize('x, y, z', eg_rots) +@pytest.mark.parametrize(('x', 'y', 'z'), eg_rots) def test_quats(x, y, z): M1 = nea.euler2mat(z, y, x) quatM = nq.mat2quat(M1) diff --git a/nibabel/tests/test_filebasedimages.py b/nibabel/tests/test_filebasedimages.py index 3aa1ae78c5..7d162c0917 100644 --- a/nibabel/tests/test_filebasedimages.py +++ b/nibabel/tests/test_filebasedimages.py @@ -1,5 +1,4 @@ -"""Testing filebasedimages module -""" +"""Testing filebasedimages module""" import warnings from itertools import product diff --git a/nibabel/tests/test_filehandles.py b/nibabel/tests/test_filehandles.py index 506a623758..c985d35440 100644 --- a/nibabel/tests/test_filehandles.py +++ b/nibabel/tests/test_filehandles.py @@ -33,8 +33,7 @@ def test_multiload(): tmpdir = mkdtemp() fname = pjoin(tmpdir, 'test.img') save(img, fname) - for i in range(N): - imgs.append(load(fname)) + imgs.extend(load(fname) for _ in range(N)) finally: del img, imgs shutil.rmtree(tmpdir) diff --git a/nibabel/tests/test_fileholders.py b/nibabel/tests/test_fileholders.py index 33b3f76e6f..83fe75aecc 100644 --- a/nibabel/tests/test_fileholders.py +++ b/nibabel/tests/test_fileholders.py @@ -1,5 +1,4 @@ -"""Testing fileholders -""" +"""Testing fileholders""" from io import BytesIO diff --git a/nibabel/tests/test_filename_parser.py b/nibabel/tests/test_filename_parser.py index 5d352f72dd..4e53cb2e5d 100644 --- a/nibabel/tests/test_filename_parser.py +++ b/nibabel/tests/test_filename_parser.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Tests for filename container""" + import pathlib import pytest diff --git a/nibabel/tests/test_files_interface.py b/nibabel/tests/test_files_interface.py index 52557d353d..b3562b6083 100644 --- a/nibabel/tests/test_files_interface.py +++ b/nibabel/tests/test_files_interface.py @@ -6,8 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""Testing filesets - a draft -""" +"""Testing filesets - a draft""" from io import BytesIO @@ -29,7 +28,7 @@ def test_files_spatialimages(): ] for klass in klasses: file_map = klass.make_file_map() - for key, value in file_map.items(): + for value in file_map.values(): assert value.filename is None assert value.fileobj is None assert value.pos == 0 @@ -42,7 +41,7 @@ def test_files_spatialimages(): img = klass(arr.astype(np.float32), aff) else: img = klass(arr, aff) - for key, value in img.file_map.items(): + for value in img.file_map.values(): assert value.filename is None assert value.fileobj is None assert value.pos == 0 diff --git a/nibabel/tests/test_fileslice.py b/nibabel/tests/test_fileslice.py index e9f65e45a2..355743b04e 100644 --- a/nibabel/tests/test_fileslice.py +++ b/nibabel/tests/test_fileslice.py @@ -1,6 +1,5 @@ """Test slicing of file-like objects""" - import time from functools import partial from io import BytesIO diff --git a/nibabel/tests/test_fileutils.py b/nibabel/tests/test_fileutils.py index 21c7676fce..bc202c6682 100644 --- a/nibabel/tests/test_fileutils.py +++ b/nibabel/tests/test_fileutils.py @@ -6,9 +6,7 @@ # copyright and license terms. # # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""Testing fileutils module -""" - +"""Testing fileutils module""" import pytest diff --git a/nibabel/tests/test_floating.py b/nibabel/tests/test_floating.py index 3e6e7f426b..82c8e667a9 100644 --- a/nibabel/tests/test_floating.py +++ b/nibabel/tests/test_floating.py @@ -1,10 +1,8 @@ -"""Test floating point deconstructions and floor methods -""" +"""Test floating point deconstructions and floor methods""" + import sys -from contextlib import nullcontext import numpy as np -import pytest from packaging.version import Version from ..casting import ( @@ -13,7 +11,6 @@ _check_nmant, ceil_exact, floor_exact, - floor_log2, have_binary128, longdouble_precision_improved, ok_floats, diff --git a/nibabel/tests/test_funcs.py b/nibabel/tests/test_funcs.py index 10f6e90813..8666406168 100644 --- a/nibabel/tests/test_funcs.py +++ b/nibabel/tests/test_funcs.py @@ -23,7 +23,7 @@ def _as_fname(img): global _counter - fname = 'img%3d.nii' % _counter + fname = f'img{_counter:3d}.nii' _counter = _counter + 1 save(img, fname) return fname @@ -58,7 +58,6 @@ def test_concat(): # Loop over every possible axis, including None (explicit and implied) for axis in list(range(-(dim - 2), (dim - 1))) + [None, '__default__']: - # Allow testing default vs. passing explicit param if axis == '__default__': np_concat_kwargs = dict(axis=-1) diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index 86c04985f8..5898762322 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -50,7 +50,6 @@ clear_and_catch_warnings, deprecated_to, expires, - nullcontext, ) from .. import ( diff --git a/nibabel/tests/test_image_load_save.py b/nibabel/tests/test_image_load_save.py index 706a87f10f..0e5fd57d08 100644 --- a/nibabel/tests/test_image_load_save.py +++ b/nibabel/tests/test_image_load_save.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Tests for loader function""" + import logging import pathlib import shutil @@ -24,7 +25,6 @@ MGHImage, Minc1Image, Minc2Image, - Nifti1Header, Nifti1Image, Nifti1Pair, Nifti2Image, @@ -131,7 +131,7 @@ def test_save_load(): affine[:3, 3] = [3, 2, 1] img = ni1.Nifti1Image(data, affine) img.set_data_dtype(npt) - with InTemporaryDirectory() as pth: + with InTemporaryDirectory(): nifn = 'an_image.nii' sifn = 'another_image.img' ni1.save(img, nifn) diff --git a/nibabel/tests/test_image_types.py b/nibabel/tests/test_image_types.py index da2f93e21f..bc50c8417e 100644 --- a/nibabel/tests/test_image_types.py +++ b/nibabel/tests/test_image_types.py @@ -88,7 +88,6 @@ def check_img(img_path, img_klass, sniff_mode, sniff, expect_success, msg): irrelevant=b'a' * (sizeof_hdr - 1), # A too-small sniff, query bad_sniff=b'a' * sizeof_hdr, # Bad sniff, should fail ).items(): - for klass in img_klasses: if klass == expected_img_klass: # Class will load unless you pass a bad sniff, diff --git a/nibabel/tests/test_imageclasses.py b/nibabel/tests/test_imageclasses.py index 74f05dc6e3..90ef966d2d 100644 --- a/nibabel/tests/test_imageclasses.py +++ b/nibabel/tests/test_imageclasses.py @@ -1,15 +1,11 @@ -"""Testing imageclasses module -""" +"""Testing imageclasses module""" -import warnings from os.path import dirname from os.path import join as pjoin import numpy as np -import pytest import nibabel as nib -from nibabel import imageclasses from nibabel.analyze import AnalyzeImage from nibabel.imageclasses import spatial_axes_first from nibabel.nifti1 import Nifti1Image diff --git a/nibabel/tests/test_imageglobals.py b/nibabel/tests/test_imageglobals.py index ac043d192b..9de72e87c6 100644 --- a/nibabel/tests/test_imageglobals.py +++ b/nibabel/tests/test_imageglobals.py @@ -6,8 +6,8 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""Tests for imageglobals module -""" +"""Tests for imageglobals module""" + from .. import imageglobals as igs diff --git a/nibabel/tests/test_init.py b/nibabel/tests/test_init.py index 2317a6397e..d339c4e26b 100644 --- a/nibabel/tests/test_init.py +++ b/nibabel/tests/test_init.py @@ -1,18 +1,15 @@ import pathlib +import unittest +from importlib.resources import files from unittest import mock import pytest -try: - from importlib.resources import files -except ImportError: - from importlib_resources import files - import nibabel as nib @pytest.mark.parametrize( - 'verbose, v_args', [(-2, ['-qq']), (-1, ['-q']), (0, []), (1, ['-v']), (2, ['-vv'])] + ('verbose', 'v_args'), [(-2, ['-qq']), (-1, ['-q']), (0, []), (1, ['-v']), (2, ['-vv'])] ) @pytest.mark.parametrize('doctests', (True, False)) @pytest.mark.parametrize('coverage', (True, False)) diff --git a/nibabel/tests/test_loadsave.py b/nibabel/tests/test_loadsave.py index 401ed04535..d039263bd1 100644 --- a/nibabel/tests/test_loadsave.py +++ b/nibabel/tests/test_loadsave.py @@ -1,5 +1,4 @@ -"""Testing loadsave module -""" +"""Testing loadsave module""" import pathlib import shutil diff --git a/nibabel/tests/test_minc1.py b/nibabel/tests/test_minc1.py index be4f0deb07..8f88bf802d 100644 --- a/nibabel/tests/test_minc1.py +++ b/nibabel/tests/test_minc1.py @@ -9,8 +9,6 @@ import bz2 import gzip -import types -import warnings from io import BytesIO from os.path import join as pjoin @@ -19,12 +17,10 @@ from numpy.testing import assert_array_equal from .. import Nifti1Image, load, minc1 -from ..deprecated import ModuleProxy -from ..deprecator import ExpiredDeprecationError from ..externals.netcdf import netcdf_file from ..minc1 import Minc1File, Minc1Image, MincHeader from ..optpkg import optional_package -from ..testing import assert_data_similar, clear_and_catch_warnings, data_path +from ..testing import assert_data_similar, data_path from ..tmpdirs import InTemporaryDirectory from . import test_spatialimages as tsi from .test_fileslice import slicer_samples diff --git a/nibabel/tests/test_minc2.py b/nibabel/tests/test_minc2.py index e76cb05ce7..4c2973a728 100644 --- a/nibabel/tests/test_minc2.py +++ b/nibabel/tests/test_minc2.py @@ -129,5 +129,5 @@ def test_bad_diminfo(): # File has a bad spacing field 'xspace' when it should be # `irregular`, `regular__` or absent (default to regular__). # We interpret an invalid spacing as absent, but warn. - with pytest.warns(UserWarning) as w: + with pytest.warns(UserWarning): Minc2Image.from_filename(fname) diff --git a/nibabel/tests/test_minc2_data.py b/nibabel/tests/test_minc2_data.py index e96e716699..a5ea38a8a9 100644 --- a/nibabel/tests/test_minc2_data.py +++ b/nibabel/tests/test_minc2_data.py @@ -6,8 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""Test we can correctly import example MINC2_PATH files -""" +"""Test we can correctly import example MINC2_PATH files""" import os from os.path import join as pjoin diff --git a/nibabel/tests/test_mriutils.py b/nibabel/tests/test_mriutils.py index 848579cee6..02b9da5482 100644 --- a/nibabel/tests/test_mriutils.py +++ b/nibabel/tests/test_mriutils.py @@ -6,9 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""Testing mriutils module -""" - +"""Testing mriutils module""" import pytest from numpy.testing import assert_almost_equal diff --git a/nibabel/tests/test_nibabel_data.py b/nibabel/tests/test_nibabel_data.py index 1687589549..7e319ac3f5 100644 --- a/nibabel/tests/test_nibabel_data.py +++ b/nibabel/tests/test_nibabel_data.py @@ -1,10 +1,8 @@ -"""Tests for ``get_nibabel_data`` -""" +"""Tests for ``get_nibabel_data``""" import os -from os.path import dirname, isdir +from os.path import dirname, isdir, realpath from os.path import join as pjoin -from os.path import realpath from . import nibabel_data as nibd diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index c7c4d1d84b..f0029681b8 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Tests for nifti reading package""" + import os import struct import unittest @@ -537,7 +538,7 @@ def test_slice_times(self): hdr.set_slice_duration(0.1) # We need a function to print out the Nones and floating point # values in a predictable way, for the tests below. - _stringer = lambda val: val is not None and '%2.1f' % val or None + _stringer = lambda val: val is not None and f'{val:2.1f}' or None _print_me = lambda s: list(map(_stringer, s)) # The following examples are from the nifti1.h documentation. hdr['slice_code'] = slice_order_codes['sequential increasing'] @@ -577,12 +578,12 @@ def test_slice_times(self): with pytest.raises(HeaderDataError): # all None hdr.set_slice_times((None,) * len(times)) - n_mid_times = times[:] + n_mid_times = times.copy() n_mid_times[3] = None with pytest.raises(HeaderDataError): # None in middle hdr.set_slice_times(n_mid_times) - funny_times = times[:] + funny_times = times.copy() funny_times[3] = 0.05 with pytest.raises(HeaderDataError): # can't get single slice duration @@ -731,7 +732,6 @@ def unshear_44(affine): class TestNifti1SingleHeader(TestNifti1PairHeader): - header_class = Nifti1Header def test_empty(self): @@ -820,7 +820,7 @@ def _qform_rt(self, img): hdr['qform_code'] = 3 hdr['sform_code'] = 4 # Save / reload using bytes IO objects - for key, value in img.file_map.items(): + for value in img.file_map.values(): value.fileobj = BytesIO() img.to_file_map() return img.from_file_map(img.file_map) @@ -1224,6 +1224,32 @@ def test_ext_eq(): assert not ext == ext2 +def test_extension_content_access(): + ext = Nifti1Extension('comment', b'123') + # Unmangled content access + assert ext.get_content() == b'123' + + # Raw, text and JSON access + assert ext.content == b'123' + assert ext.text == '123' + assert ext.json() == 123 + + # Encoding can be set + ext.encoding = 'ascii' + assert ext.text == '123' + + # Test that encoding errors are caught + ascii_ext = Nifti1Extension('comment', 'hôpital'.encode()) + ascii_ext.encoding = 'ascii' + with pytest.raises(UnicodeDecodeError): + ascii_ext.text + + json_ext = Nifti1Extension('unknown', b'{"a": 1}') + assert json_ext.content == b'{"a": 1}' + assert json_ext.text == '{"a": 1}' + assert json_ext.json() == {'a': 1} + + def test_extension_codes(): for k in extension_codes.keys(): Nifti1Extension(k, 'somevalue') @@ -1339,7 +1365,7 @@ def test_nifti_dicom_extension(): dcmbytes_explicit = struct.pack('') # Big Endian Nifti1Header dcmext = Nifti1DicomExtension(2, dcmbytes_explicit_be, parent_hdr=hdr_be) assert dcmext.__class__ == Nifti1DicomExtension - assert dcmext._guess_implicit_VR() is False + assert dcmext._is_implicit_VR is False assert dcmext.get_code() == 2 assert dcmext.get_content().PatientID == 'NiPy' assert dcmext.get_content()[0x10, 0x20].value == 'NiPy' diff --git a/nibabel/tests/test_nifti2.py b/nibabel/tests/test_nifti2.py index 742ef148bf..01d44c1595 100644 --- a/nibabel/tests/test_nifti2.py +++ b/nibabel/tests/test_nifti2.py @@ -7,13 +7,14 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Tests for nifti2 reading package""" + import os import numpy as np from numpy.testing import assert_array_equal from .. import nifti2 -from ..nifti1 import Nifti1Extension, Nifti1Extensions, Nifti1Header, Nifti1PairHeader +from ..nifti1 import Nifti1Extension, Nifti1Header, Nifti1PairHeader from ..nifti2 import Nifti2Header, Nifti2Image, Nifti2Pair, Nifti2PairHeader from ..testing import data_path from . import test_nifti1 as tn1 diff --git a/nibabel/tests/test_onetime.py b/nibabel/tests/test_onetime.py index b22a4ef3ec..d6b4579534 100644 --- a/nibabel/tests/test_onetime.py +++ b/nibabel/tests/test_onetime.py @@ -1,9 +1,22 @@ -import pytest +from functools import cached_property -from nibabel.onetime import auto_attr, setattr_on_read +from nibabel.onetime import ResetMixin, setattr_on_read from nibabel.testing import deprecated_to, expires +class A(ResetMixin): + @cached_property + def y(self): + return self.x / 2.0 + + @cached_property + def z(self): + return self.x / 3.0 + + def __init__(self, x=1.0): + self.x = x + + @expires('5.0.0') def test_setattr_on_read(): with deprecated_to('5.0.0'): @@ -21,15 +34,14 @@ def a(self): assert x.a is obj -def test_auto_attr(): - class MagicProp: - @auto_attr - def a(self): - return object() - - x = MagicProp() - assert 'a' not in x.__dict__ - obj = x.a - assert 'a' in x.__dict__ - # Each call to object() produces a unique object. Verify we get the same one every time. - assert x.a is obj +def test_ResetMixin(): + a = A(10) + assert 'y' not in a.__dict__ + assert a.y == 5 + assert 'y' in a.__dict__ + a.x = 20 + assert a.y == 5 + # Call reset and no error should be raised even though z was never accessed + a.reset() + assert 'y' not in a.__dict__ + assert a.y == 10 diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py index a228e66135..05d0e04cd0 100644 --- a/nibabel/tests/test_openers.py +++ b/nibabel/tests/test_openers.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Test for openers module""" + import contextlib import hashlib import os @@ -19,7 +20,6 @@ import pytest from packaging.version import Version -from ..deprecator import ExpiredDeprecationError from ..openers import HAVE_INDEXED_GZIP, BZ2File, DeterministicGzipFile, ImageOpener, Opener from ..optpkg import optional_package from ..tmpdirs import InTemporaryDirectory @@ -121,8 +121,9 @@ def patch_indexed_gzip(state): values = (True, MockIndexedGzipFile) else: values = (False, GzipFile) - with mock.patch('nibabel.openers.HAVE_INDEXED_GZIP', values[0]), mock.patch( - 'nibabel.openers.IndexedGzipFile', values[1], create=True + with ( + mock.patch('nibabel.openers.HAVE_INDEXED_GZIP', values[0]), + mock.patch('nibabel.openers.IndexedGzipFile', values[1], create=True), ): yield @@ -431,17 +432,17 @@ def test_DeterministicGzipFile_fileobj(): with open('test.gz', 'wb') as fobj: with DeterministicGzipFile(filename='', mode='wb', fileobj=fobj) as gzobj: gzobj.write(msg) - md5sum('test.gz') == ref_chksum + assert md5sum('test.gz') == ref_chksum with open('test.gz', 'wb') as fobj: with DeterministicGzipFile(fileobj=fobj, mode='wb') as gzobj: gzobj.write(msg) - md5sum('test.gz') == ref_chksum + assert md5sum('test.gz') == ref_chksum with open('test.gz', 'wb') as fobj: with DeterministicGzipFile(filename='test.gz', mode='wb', fileobj=fobj) as gzobj: gzobj.write(msg) - md5sum('test.gz') == ref_chksum + assert md5sum('test.gz') == ref_chksum def test_bitwise_determinism(): diff --git a/nibabel/tests/test_optpkg.py b/nibabel/tests/test_optpkg.py index 7ffaa2f851..c243633a07 100644 --- a/nibabel/tests/test_optpkg.py +++ b/nibabel/tests/test_optpkg.py @@ -1,5 +1,4 @@ -"""Testing optpkg module -""" +"""Testing optpkg module""" import builtins import sys diff --git a/nibabel/tests/test_orientations.py b/nibabel/tests/test_orientations.py index 0094711e79..e7c32d7867 100644 --- a/nibabel/tests/test_orientations.py +++ b/nibabel/tests/test_orientations.py @@ -8,8 +8,6 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Testing for orientations module""" -import warnings - import numpy as np import pytest from numpy.testing import assert_array_equal @@ -185,7 +183,6 @@ def test_apply(): apply_orientation(a[:, :, 1], ornt) with pytest.raises(OrientationError): apply_orientation(a, [[0, 1], [np.nan, np.nan], [2, 1]]) - shape = np.array(a.shape) for ornt in ALL_ORNTS: t_arr = apply_orientation(a, ornt) assert_array_equal(a.shape, np.array(t_arr.shape)[np.array(ornt)[:, 0]]) diff --git a/nibabel/tests/test_parrec.py b/nibabel/tests/test_parrec.py index 6035d47f8d..a312c558a8 100644 --- a/nibabel/tests/test_parrec.py +++ b/nibabel/tests/test_parrec.py @@ -1,5 +1,4 @@ -"""Testing parrec module -""" +"""Testing parrec module""" from glob import glob from os.path import basename, dirname @@ -285,8 +284,8 @@ def test_affine_regression(): # Test against checked affines from previous runs # Checked against Michael's data using some GUI tools # Data at http://psydata.ovgu.de/philips_achieva_testfiles/conversion2 - for basename, exp_affine in PREVIOUS_AFFINES.items(): - fname = pjoin(DATA_PATH, basename + '.PAR') + for basename_affine, exp_affine in PREVIOUS_AFFINES.items(): + fname = pjoin(DATA_PATH, basename_affine + '.PAR') with open(fname) as fobj: hdr = PARRECHeader.from_fileobj(fobj) assert_almost_equal(hdr.get_affine(), exp_affine) @@ -884,7 +883,6 @@ def test_dualTR(): def test_ADC_map(): # test reading an apparent diffusion coefficient map with open(ADC_PAR) as fobj: - # two truncation warnings expected because general_info indicates: # 1.) multiple directions # 2.) multiple b-values diff --git a/nibabel/tests/test_parrec_data.py b/nibabel/tests/test_parrec_data.py index a437fafeda..02a1d5733a 100644 --- a/nibabel/tests/test_parrec_data.py +++ b/nibabel/tests/test_parrec_data.py @@ -1,14 +1,11 @@ -"""Test we can correctly import example PARREC files -""" +"""Test we can correctly import example PARREC files""" import unittest from glob import glob -from os.path import basename, exists +from os.path import basename, exists, splitext from os.path import join as pjoin -from os.path import splitext import numpy as np -import pytest from numpy.testing import assert_almost_equal from .. import load as top_load diff --git a/nibabel/tests/test_pkg_info.py b/nibabel/tests/test_pkg_info.py index dfe18c975a..1a9a06dc93 100644 --- a/nibabel/tests/test_pkg_info.py +++ b/nibabel/tests/test_pkg_info.py @@ -1,5 +1,4 @@ -"""Testing package info -""" +"""Testing package info""" import pytest @@ -15,7 +14,7 @@ def test_pkg_info(): - nibabel.pkg_info.get_pkg_info - nibabel.pkg_info.pkg_commit_hash """ - info = nib.get_info() + nib.get_info() def test_version(): @@ -38,7 +37,7 @@ def test_cmp_pkg_version_0(): @pytest.mark.parametrize( - 'test_ver, pkg_ver, exp_out', + ('test_ver', 'pkg_ver', 'exp_out'), [ ('1.0', '1.0', 0), ('1.0.0', '1.0', 0), @@ -55,8 +54,6 @@ def test_cmp_pkg_version_0(): ('1.2.1rc1', '1.2.1', -1), ('1.2.1rc1', '1.2.1rc', 1), ('1.2.1rc', '1.2.1rc1', -1), - ('1.2.1rc1', '1.2.1rc', 1), - ('1.2.1rc', '1.2.1rc1', -1), ('1.2.1b', '1.2.1a', 1), ('1.2.1a', '1.2.1b', -1), ('1.2.0+1', '1.2', 1), diff --git a/nibabel/tests/test_pointset.py b/nibabel/tests/test_pointset.py index fb9a7c5c81..f4f0e4361b 100644 --- a/nibabel/tests/test_pointset.py +++ b/nibabel/tests/test_pointset.py @@ -1,15 +1,12 @@ from math import prod from pathlib import Path -from unittest import skipUnless import numpy as np import pytest from nibabel import pointset as ps from nibabel.affines import apply_affine -from nibabel.arrayproxy import ArrayProxy from nibabel.fileslice import strided_scalar -from nibabel.onetime import auto_attr from nibabel.optpkg import optional_package from nibabel.spatialimages import SpatialImage from nibabel.tests.nibabel_data import get_nibabel_data diff --git a/nibabel/tests/test_processing.py b/nibabel/tests/test_processing.py index 27da6639c0..f1a4f0a909 100644 --- a/nibabel/tests/test_processing.py +++ b/nibabel/tests/test_processing.py @@ -6,8 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""Testing processing module -""" +"""Testing processing module""" import logging from os.path import dirname diff --git a/nibabel/tests/test_proxy_api.py b/nibabel/tests/test_proxy_api.py index 421bc5bf47..ba0f784d59 100644 --- a/nibabel/tests/test_proxy_api.py +++ b/nibabel/tests/test_proxy_api.py @@ -25,7 +25,7 @@ * if you pass a header into the __init__, then modifying the original header will not affect the result of the array return. -These last are to allow the proxy to be re-used with different images. +These last are to allow the proxy to be reused with different images. """ import unittest diff --git a/nibabel/tests/test_quaternions.py b/nibabel/tests/test_quaternions.py index fff7c5e040..a5ec89d948 100644 --- a/nibabel/tests/test_quaternions.py +++ b/nibabel/tests/test_quaternions.py @@ -112,7 +112,6 @@ def test_fillpositive_simulated_error(dtype): # Permit 1 epsilon per value (default, but make explicit here) w2_thresh = 3 * np.finfo(dtype).eps - pos_error = neg_error = False for _ in range(50): xyz = norm(gen_vec(dtype)) @@ -147,7 +146,7 @@ def test_inverse_0(): assert iq.dtype.kind == 'f' -@pytest.mark.parametrize('M, q', eg_pairs) +@pytest.mark.parametrize(('M', 'q'), eg_pairs) def test_inverse_1(M, q): iq = nq.inverse(q) iqM = nq.quat2mat(iq) @@ -170,15 +169,15 @@ def test_norm(): assert not nq.isunit(qi) -@pytest.mark.parametrize('M1, q1', eg_pairs[0::4]) -@pytest.mark.parametrize('M2, q2', eg_pairs[1::4]) +@pytest.mark.parametrize(('M1', 'q1'), eg_pairs[0::4]) +@pytest.mark.parametrize(('M2', 'q2'), eg_pairs[1::4]) def test_mult(M1, q1, M2, q2): # Test that quaternion * same as matrix * q21 = nq.mult(q2, q1) assert_array_almost_equal, M2 @ M1, nq.quat2mat(q21) -@pytest.mark.parametrize('M, q', eg_pairs) +@pytest.mark.parametrize(('M', 'q'), eg_pairs) def test_inverse(M, q): iq = nq.inverse(q) iqM = nq.quat2mat(iq) @@ -186,14 +185,8 @@ def test_inverse(M, q): assert np.allclose(iM, iqM) -def test_eye(): - qi = nq.eye() - assert np.all([1, 0, 0, 0] == qi) - assert np.allclose(nq.quat2mat(qi), np.eye(3)) - - @pytest.mark.parametrize('vec', np.eye(3)) -@pytest.mark.parametrize('M, q', eg_pairs) +@pytest.mark.parametrize(('M', 'q'), eg_pairs) def test_qrotate(vec, M, q): vdash = nq.rotate_vector(vec, q) vM = M @ vec diff --git a/nibabel/tests/test_removalschedule.py b/nibabel/tests/test_removalschedule.py index 772d395fd4..d2bc7da2fc 100644 --- a/nibabel/tests/test_removalschedule.py +++ b/nibabel/tests/test_removalschedule.py @@ -1,4 +1,3 @@ -import unittest from unittest import mock import pytest @@ -126,7 +125,7 @@ def test_module_removal(): for module in _filter(MODULE_SCHEDULE): with pytest.raises(ImportError): __import__(module) - assert False, f'Time to remove {module}' + raise AssertionError(f'Time to remove {module}') def test_object_removal(): diff --git a/nibabel/tests/test_round_trip.py b/nibabel/tests/test_round_trip.py index 07783fe550..6daf960aa4 100644 --- a/nibabel/tests/test_round_trip.py +++ b/nibabel/tests/test_round_trip.py @@ -108,15 +108,15 @@ def test_round_trip(): iuint_types = [t for t in iuint_types if t in nifti_supported] f_types = [np.float32, np.float64] # Expanding standard deviations - for i, sd_10 in enumerate(sd_10s): + for sd_10 in sd_10s: sd = 10.0**sd_10 V_in = rng.normal(0, sd, size=(N, 1)) - for j, in_type in enumerate(f_types): - for k, out_type in enumerate(iuint_types): + for in_type in f_types: + for out_type in iuint_types: check_arr(sd_10, V_in, in_type, out_type, scaling_type) # Spread integers across range - for i, sd in enumerate(np.linspace(0.05, 0.5, 5)): - for j, in_type in enumerate(iuint_types): + for sd in np.linspace(0.05, 0.5, 5): + for in_type in iuint_types: info = np.iinfo(in_type) mn, mx = info.min, info.max type_range = mx - mn @@ -124,7 +124,7 @@ def test_round_trip(): # float(sd) because type_range can be type 'long' width = type_range * float(sd) V_in = rng.normal(center, width, size=(N, 1)) - for k, out_type in enumerate(iuint_types): + for out_type in iuint_types: check_arr(sd, V_in, in_type, out_type, scaling_type) diff --git a/nibabel/tests/test_rstutils.py b/nibabel/tests/test_rstutils.py index 847b7a4eee..eab1969857 100644 --- a/nibabel/tests/test_rstutils.py +++ b/nibabel/tests/test_rstutils.py @@ -1,5 +1,4 @@ -"""Test printable table -""" +"""Test printable table""" import numpy as np import pytest diff --git a/nibabel/tests/test_scaling.py b/nibabel/tests/test_scaling.py index f667b4164d..ccc379c256 100644 --- a/nibabel/tests/test_scaling.py +++ b/nibabel/tests/test_scaling.py @@ -25,7 +25,7 @@ @pytest.mark.parametrize( - 'in_arr, res', + ('in_arr', 'res'), [ ([[-1, 0, 1], [np.inf, np.nan, -np.inf]], (-1, 1)), (np.array([[-1, 0, 1], [np.inf, np.nan, -np.inf]]), (-1, 1)), @@ -36,7 +36,6 @@ ([[np.nan, -1, 2], [-2, np.nan, 1]], (-2, 2)), ([[np.nan, -np.inf, 2], [-2, np.nan, np.inf]], (-2, 2)), ([[-np.inf, 2], [np.nan, 1]], (1, 2)), # good max case - ([[np.nan, -np.inf, 2], [-2, np.nan, np.inf]], (-2, 2)), ([np.nan], (np.inf, -np.inf)), ([np.inf], (np.inf, -np.inf)), ([-np.inf], (np.inf, -np.inf)), @@ -134,7 +133,7 @@ def test_a2f_nan2zero(): @pytest.mark.parametrize( - 'in_type, out_type', + ('in_type', 'out_type'), [ (np.int16, np.int16), (np.int16, np.int8), @@ -163,7 +162,7 @@ def test_array_file_scales(in_type, out_type): @pytest.mark.parametrize( - 'category0, category1, overflow', + ('category0', 'category1', 'overflow'), [ # Confirm that, for all ints and uints as input, and all possible outputs, # for any simple way of doing the calculation, the result is near enough diff --git a/nibabel/tests/test_scripts.py b/nibabel/tests/test_scripts.py index cc4bb468ad..d97c99d051 100644 --- a/nibabel/tests/test_scripts.py +++ b/nibabel/tests/test_scripts.py @@ -11,9 +11,8 @@ import sys import unittest from glob import glob -from os.path import abspath, basename, dirname, exists +from os.path import abspath, basename, dirname, exists, splitext from os.path import join as pjoin -from os.path import splitext import numpy as np import pytest @@ -197,13 +196,13 @@ def test_help(): # needs special treatment since depends on fuse module which # might not be available. try: - import fuse + import fuse # noqa: F401 except Exception: continue # do not test this one code, stdout, stderr = run_command([cmd, '--help']) assert code == 0 assert_re_in(f'.*{cmd}', stdout) - assert_re_in('.*Usage', stdout) + assert_re_in('.*[uU]sage', stdout) # Some third party modules might like to announce some Deprecation # etc warnings, see e.g. https://travis-ci.org/nipy/nibabel/jobs/370353602 if 'warning' not in stderr.lower(): diff --git a/nibabel/tests/test_spaces.py b/nibabel/tests/test_spaces.py index dbfe533890..4722228a5b 100644 --- a/nibabel/tests/test_spaces.py +++ b/nibabel/tests/test_spaces.py @@ -1,5 +1,4 @@ -"""Tests for spaces module -""" +"""Tests for spaces module""" import numpy as np import numpy.linalg as npl @@ -126,7 +125,7 @@ def test_slice2volume(): @pytest.mark.parametrize( - 'index, axis', + ('index', 'axis'), [ [-1, 0], [0, -1], diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index 7157d5c459..baf470090b 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -6,8 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""Testing spatialimages -""" +"""Testing spatialimages""" from io import BytesIO @@ -399,8 +398,10 @@ def test_slicer(self): img_klass = self.image_class in_data_template = np.arange(240, dtype=np.int16) base_affine = np.eye(4) - t_axis = None - for dshape in ((4, 5, 6, 2), (8, 5, 6)): # Time series # Volume + for dshape in ( + (4, 5, 6, 2), # Time series + (8, 5, 6), # Volume + ): in_data = in_data_template.copy().reshape(dshape) img = img_klass(in_data, base_affine.copy()) diff --git a/nibabel/tests/test_spm99analyze.py b/nibabel/tests/test_spm99analyze.py index ada92d3b05..26098d8ede 100644 --- a/nibabel/tests/test_spm99analyze.py +++ b/nibabel/tests/test_spm99analyze.py @@ -423,7 +423,7 @@ def test_mat_read(self): aff = np.diag([2, 3, 4, 1]) # no LR flip in affine img = img_klass(arr, aff) fm = img.file_map - for key, value in fm.items(): + for value in fm.values(): value.fileobj = BytesIO() # Test round trip img.to_file_map() @@ -475,7 +475,7 @@ def test_none_affine(self): img = img_klass(np.zeros((2, 3, 4)), None) aff = img.header.get_best_affine() # Save / reload using bytes IO objects - for key, value in img.file_map.items(): + for value in img.file_map.values(): value.fileobj = BytesIO() img.to_file_map() img_back = img.from_file_map(img.file_map) diff --git a/nibabel/tests/test_testing.py b/nibabel/tests/test_testing.py index dee3ea3554..ec147baa95 100644 --- a/nibabel/tests/test_testing.py +++ b/nibabel/tests/test_testing.py @@ -1,5 +1,4 @@ -"""Tests for warnings context managers -""" +"""Tests for warnings context managers""" import os import sys @@ -114,7 +113,7 @@ def test_warn_error(): with error_warnings(): with pytest.raises(UserWarning): warnings.warn('A test') - with error_warnings() as w: # w not used for anything + with error_warnings(): with pytest.raises(UserWarning): warnings.warn('A test') assert n_warns == len(warnings.filters) @@ -134,7 +133,7 @@ def test_warn_ignore(): with suppress_warnings(): warnings.warn('Here is a warning, you will not see it') warnings.warn('Nor this one', DeprecationWarning) - with suppress_warnings() as w: # w not used + with suppress_warnings(): warnings.warn('Here is a warning, you will not see it') warnings.warn('Nor this one', DeprecationWarning) assert n_warns == len(warnings.filters) @@ -149,7 +148,7 @@ def f(): @pytest.mark.parametrize( - 'regex, entries', + ('regex', 'entries'), [ ['.*', ''], ['.*', ['any']], diff --git a/nibabel/tests/test_tripwire.py b/nibabel/tests/test_tripwire.py index f172d5c579..4bf91923f2 100644 --- a/nibabel/tests/test_tripwire.py +++ b/nibabel/tests/test_tripwire.py @@ -1,5 +1,4 @@ -"""Testing tripwire module -""" +"""Testing tripwire module""" import pytest @@ -17,9 +16,5 @@ def test_tripwire(): with pytest.raises(TripWireError): silly_module_name.do_silly_thing # Check AttributeError can be checked too - try: + with pytest.raises(AttributeError): silly_module_name.__wrapped__ - except TripWireError as err: - assert isinstance(err, AttributeError) - else: - raise RuntimeError('No error raised, but expected') diff --git a/nibabel/tests/test_viewers.py b/nibabel/tests/test_viewers.py index 53f4a32bdc..fa22d9021a 100644 --- a/nibabel/tests/test_viewers.py +++ b/nibabel/tests/test_viewers.py @@ -102,3 +102,230 @@ def test_viewer(): v2.link_to(v1) # shouldn't do anything v1.close() v2.close() + + +@needs_mpl +def test_viewer_nonRAS(): + data1 = np.random.rand(10, 20, 40) + data1[5, 10, :] = 0 + data1[5, :, 30] = 0 + data1[:, 10, 30] = 0 + # RSA affine + aff1 = np.array([[1, 0, 0, -5], [0, 0, 1, -30], [0, 1, 0, -10], [0, 0, 0, 1]]) + o1 = OrthoSlicer3D(data1, aff1) + sag = o1._ims[0].get_array() + cor = o1._ims[1].get_array() + axi = o1._ims[2].get_array() + + # Sagittal view: [0, I->S, P->A], so data is transposed, matching plot array + assert_array_equal(sag, data1[5, :, :]) + # Coronal view: [L->R, I->S, 0]. Data is not transposed, transpose to match plot array + assert_array_equal(cor, data1[:, :, 30].T) + # Axial view: [L->R, 0, P->A]. Data is not transposed, transpose to match plot array + assert_array_equal(axi, data1[:, 10, :].T) + + o1.set_position(1, 2, 3) # R, A, S coordinates + + sag = o1._ims[0].get_array() + cor = o1._ims[1].get_array() + axi = o1._ims[2].get_array() + + # Shift 1 right, 2 anterior, 3 superior + assert_array_equal(sag, data1[6, :, :]) + assert_array_equal(cor, data1[:, :, 32].T) + assert_array_equal(axi, data1[:, 13, :].T) + + +@needs_mpl +def test_viewer_nonRAS_on_mouse(): + """ + test on_mouse selection on non RAS matrices + + """ + # This affine simulates an acquisition on a quadruped subject that is in a prone position. + # This corresponds to an acquisition with: + # - LR inverted on scanner x (i) + # - IS on scanner y (j) + # - PA on scanner z (k) + # This example enables to test also OrthoSlicer3D properties `_flips` and `_order`. + + (I, J, K) = (10, 20, 40) + data1 = np.random.rand(I, J, K) + (i_target, j_target, k_target) = (2, 14, 12) + i1 = i_target - 2 + i2 = i_target + 2 + j1 = j_target - 3 + j2 = j_target + 3 + k1 = k_target - 4 + k2 = k_target + 4 + data1[i1 : i2 + 1, j1 : j2 + 1, k1 : k2 + 1] = 0 + data1[i_target, j_target, k_target] = 1 + valp1 = 1.5 + valm1 = 0.5 + data1[i_target - 1, j_target, k_target] = valp1 # x flipped + data1[i_target + 1, j_target, k_target] = valm1 # x flipped + data1[i_target, j_target - 1, k_target] = valm1 + data1[i_target, j_target + 1, k_target] = valp1 + data1[i_target, j_target, k_target - 1] = valm1 + data1[i_target, j_target, k_target + 1] = valp1 + + aff1 = np.array([[-1, 0, 0, 5], [0, 0, 1, -10], [0, 1, 0, -30], [0, 0, 0, 1]]) + + o1 = OrthoSlicer3D(data1, aff1) + + class Event: + def __init__(self): + self.name = 'simulated mouse event' + self.button = 1 + + event = Event() + event.xdata = k_target + event.ydata = j_target + event.inaxes = o1._ims[0].axes + o1._on_mouse(event) + + event.inaxes = o1._ims[1].axes + event.xdata = (I - 1) - i_target # x flipped + event.ydata = j_target + o1._on_mouse(event) + + event.inaxes = o1._ims[2].axes + event.xdata = (I - 1) - i_target # x flipped + event.ydata = k_target + o1._on_mouse(event) + + sag = o1._ims[0].get_array() + cor = o1._ims[1].get_array() + axi = o1._ims[2].get_array() + + assert_array_equal(sag, data1[i_target, :, :]) # + assert_array_equal(cor, data1[::-1, :, k_target].T) # x flipped + assert_array_equal(axi, data1[::-1, j_target, :].T) # x flipped + return None + + +@needs_mpl +def test_viewer_nonRAS_on_scroll(): + """ + test scrolling on non RAS matrices + + """ + # This affine simulates an acquisition on a quadruped subject that is in a prone position. + # This corresponds to an acquisition with: + # - LR inverted on scanner x (i) + # - IS on scanner y (j) + # - PA on scanner z (k) + # This example enables to test also OrthoSlicer3D properties `_flips` and `_order`. + + (I, J, K) = (10, 20, 40) + data1 = np.random.rand(I, J, K) + (i_target, j_target, k_target) = (2, 14, 12) + i1 = i_target - 2 + i2 = i_target + 2 + j1 = j_target - 3 + j2 = j_target + 3 + k1 = k_target - 4 + k2 = k_target + 4 + data1[i1 : i2 + 1, j1 : j2 + 1, k1 : k2 + 1] = 0 + data1[i_target, j_target, k_target] = 1 + valp1 = 1.5 + valm1 = 0.5 + data1[i_target - 1, j_target, k_target] = valp1 # x flipped + data1[i_target + 1, j_target, k_target] = valm1 # x flipped + data1[i_target, j_target - 1, k_target] = valm1 + data1[i_target, j_target + 1, k_target] = valp1 + data1[i_target, j_target, k_target - 1] = valm1 + data1[i_target, j_target, k_target + 1] = valp1 + + aff1 = np.array([[-1, 0, 0, 5], [0, 0, 1, -10], [0, 1, 0, -30], [0, 0, 0, 1]]) + + o1 = OrthoSlicer3D(data1, aff1) + + class Event: + def __init__(self): + self.name = 'simulated mouse event' + self.button = None + self.key = None + + [x_t, y_t, z_t] = list(aff1.dot(np.array([i_target, j_target, k_target, 1]))[:3]) + # print(x_t, y_t, z_t) + # scanner positions are x_t=3, y_t=2, z_t=16 + + event = Event() + + # Sagittal plane - one scroll up + # x coordinate is flipped so index decrease by 1 + o1.set_position(x_t, y_t, z_t) + event.inaxes = o1._ims[0].axes + event.button = 'up' + o1._on_scroll(event) + sag = o1._ims[0].get_array() + cor = o1._ims[1].get_array() + axi = o1._ims[2].get_array() + assert_array_equal(sag, data1[i_target - 1, :, :]) + assert_array_equal(cor, data1[::-1, :, k_target].T) # ::-1 because the array is flipped in x + assert_array_equal(axi, data1[::-1, j_target, :].T) # ::-1 because the array is flipped in x + + # Sagittal plane - one scrolled down + o1.set_position(x_t, y_t, z_t) + event.button = 'down' + o1._on_scroll(event) + sag = o1._ims[0].get_array() + cor = o1._ims[1].get_array() + axi = o1._ims[2].get_array() + assert_array_equal(sag, data1[i_target + 1, :, :]) + assert_array_equal(cor, data1[::-1, :, k_target].T) + assert_array_equal(axi, data1[::-1, j_target, :].T) + + # Coronal plane - one scroll up + # y coordinate is increase by 1 + o1.set_position(x_t, y_t, z_t) + event.inaxes = o1._ims[1].axes + event.button = 'up' + o1._on_scroll(event) + sag = o1._ims[0].get_array() + cor = o1._ims[1].get_array() + axi = o1._ims[2].get_array() + assert_array_equal(sag, data1[i_target, :, :]) + assert_array_equal( + cor, data1[::-1, :, k_target + 1].T + ) # ::-1 because the array is flipped in x + assert_array_equal(axi, data1[::-1, j_target, :].T) # ::-1 because the array is flipped in x + + # Coronal plane - one scrolled down + o1.set_position(x_t, y_t, z_t) + event.button = 'down' + o1._on_scroll(event) + sag = o1._ims[0].get_array() + cor = o1._ims[1].get_array() + axi = o1._ims[2].get_array() + assert_array_equal(sag, data1[i_target, :, :]) + assert_array_equal(cor, data1[::-1, :, k_target - 1].T) + assert_array_equal(axi, data1[::-1, j_target, :].T) + + # Axial plane - one scroll up + # y is increase by 1 + o1.set_position(x_t, y_t, z_t) + event.inaxes = o1._ims[2].axes + event.button = 'up' + o1._on_scroll(event) + sag = o1._ims[0].get_array() + cor = o1._ims[1].get_array() + axi = o1._ims[2].get_array() + assert_array_equal(sag, data1[i_target, :, :]) + assert_array_equal(cor, data1[::-1, :, k_target].T) # ::-1 because the array is flipped in x + assert_array_equal( + axi, data1[::-1, j_target + 1, :].T + ) # ::-1 because the array is flipped in x + + # Axial plane - one scrolled down + o1.set_position(x_t, y_t, z_t) + event.button = 'down' + o1._on_scroll(event) + sag = o1._ims[0].get_array() + cor = o1._ims[1].get_array() + axi = o1._ims[2].get_array() + assert_array_equal(sag, data1[i_target, :, :]) + assert_array_equal(cor, data1[::-1, :, k_target].T) + assert_array_equal(axi, data1[::-1, j_target - 1, :].T) + return None diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index 07ca9a6baa..1bd44cbd0a 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -607,7 +607,7 @@ def test_a2f_nanpos(): def test_a2f_bad_scaling(): # Test that pathological scalers raise an error - NUMERICAL_TYPES = sum([sctypes[key] for key in ['int', 'uint', 'float', 'complex']], []) + NUMERICAL_TYPES = sum((sctypes[key] for key in ['int', 'uint', 'float', 'complex']), []) for in_type, out_type, slope, inter in itertools.product( NUMERICAL_TYPES, NUMERICAL_TYPES, @@ -989,7 +989,7 @@ def test_seek_tell_logic(): class BabyBio(BytesIO): def seek(self, *args): - raise OSError() + raise OSError bio = BabyBio() # Fresh fileobj, position 0, can't seek - error diff --git a/nibabel/tests/test_wrapstruct.py b/nibabel/tests/test_wrapstruct.py index 10b4b3f22c..0eb906fee7 100644 --- a/nibabel/tests/test_wrapstruct.py +++ b/nibabel/tests/test_wrapstruct.py @@ -23,6 +23,7 @@ _field_recoders -> field_recoders """ + import logging from io import BytesIO, StringIO @@ -435,15 +436,6 @@ def test_copy(self): self._set_something_into_hdr(hdr2) assert hdr == hdr2 - def test_copy(self): - hdr = self.header_class() - hdr2 = hdr.copy() - assert hdr == hdr2 - self._set_something_into_hdr(hdr) - assert hdr != hdr2 - self._set_something_into_hdr(hdr2) - assert hdr == hdr2 - def test_checks(self): # Test header checks hdr_t = self.header_class() diff --git a/nibabel/tmpdirs.py b/nibabel/tmpdirs.py index 7fe47e6510..2bcf9fdeba 100644 --- a/nibabel/tmpdirs.py +++ b/nibabel/tmpdirs.py @@ -7,6 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Contexts for *with* statement providing temporary directories""" + import os import tempfile from contextlib import contextmanager @@ -15,7 +16,7 @@ from contextlib import chdir as _chdir except ImportError: # PY310 - @contextmanager # type: ignore + @contextmanager # type: ignore[no-redef] def _chdir(path): cwd = os.getcwd() os.chdir(path) @@ -53,7 +54,7 @@ def __init__(self, suffix='', prefix=tempfile.template, dir=None): >>> os.path.exists(tmpdir) False """ - return super().__init__(suffix, prefix, dir) + super().__init__(suffix, prefix, dir) @contextmanager diff --git a/nibabel/tripwire.py b/nibabel/tripwire.py index fa45e73382..efe651fd93 100644 --- a/nibabel/tripwire.py +++ b/nibabel/tripwire.py @@ -1,4 +1,5 @@ """Class to raise error for missing modules or other misfortunes""" + from typing import Any diff --git a/nibabel/viewers.py b/nibabel/viewers.py index 60ebd3a256..185a3e1f32 100644 --- a/nibabel/viewers.py +++ b/nibabel/viewers.py @@ -3,6 +3,7 @@ Includes version of OrthoSlicer3D code originally written by our own Paul Ivanov. """ + import weakref import numpy as np @@ -102,7 +103,7 @@ def __init__(self, data, affine=None, axes=None, title=None): # | | | | # | | | | # +---------+ +---------+ - # A --> <-- R + # A --> R --> # ^ +---------+ +---------+ # | | | | | # | Axial | | Vol | @@ -110,7 +111,7 @@ def __init__(self, data, affine=None, axes=None, title=None): # | | | | # | | | | # +---------+ +---------+ - # <-- R <-- t --> + # R --> <-- t --> fig, axes = plt.subplots(2, 2) fig.set_size_inches((8, 8), forward=True) @@ -398,7 +399,8 @@ def _set_position(self, x, y, z, notify=True): # deal with slicing appropriately self._position[:3] = [x, y, z] idxs = np.dot(self._inv_affine, self._position)[:3] - for ii, (size, idx) in enumerate(zip(self._sizes, idxs)): + idxs_new_order = idxs[self._order] + for ii, (size, idx) in enumerate(zip(self._sizes, idxs_new_order)): self._data_idx[ii] = max(min(int(round(idx)), size - 1), 0) for ii in range(3): # sagittal: get to S/A @@ -417,7 +419,7 @@ def _set_position(self, x, y, z, notify=True): # deal with crosshairs loc = self._data_idx[ii] if self._flips[ii]: - loc = self._sizes[ii] - loc + loc = self._sizes[ii] - 1 - loc loc = [loc] * 2 if ii == 0: self._crosshairs[2]['vert'].set_xdata(loc) @@ -445,7 +447,7 @@ def _set_position(self, x, y, z, notify=True): # Matplotlib handlers #################################################### def _in_axis(self, event): """Return axis index if within one of our axes, else None""" - if getattr(event, 'inaxes') is None: + if event.inaxes is None: return None for ii, ax in enumerate(self._axes): if event.inaxes is ax: @@ -466,12 +468,17 @@ def _on_scroll(self, event): dv *= 1.0 if event.button == 'up' else -1.0 dv *= -1 if self._flips[ii] else 1 val = self._data_idx[ii] + dv + if ii == 3: self._set_volume_index(val) else: - coords = [self._data_idx[k] for k in range(3)] + [1.0] + coords = [self._data_idx[k] for k in range(3)] coords[ii] = val - self._set_position(*np.dot(self._affine, coords)[:3]) + coords_ordered = [0, 0, 0, 1] + for k in range(3): + coords_ordered[self._order[k]] = coords[k] + position = np.dot(self._affine, coords_ordered)[:3] + self._set_position(*position) self._draw() def _on_mouse(self, event): @@ -486,14 +493,18 @@ def _on_mouse(self, event): self._set_volume_index(event.xdata) else: # translate click xdata/ydata to physical position - xax, yax = [[1, 2], [0, 2], [0, 1]][ii] + xax, yax = [ + [self._order[1], self._order[2]], + [self._order[0], self._order[2]], + [self._order[0], self._order[1]], + ][ii] x, y = event.xdata, event.ydata - x = self._sizes[xax] - x if self._flips[xax] else x - y = self._sizes[yax] - y if self._flips[yax] else y - idxs = [None, None, None, 1.0] + x = self._sizes[xax] - x - 1 if self._flips[xax] else x + y = self._sizes[yax] - y - 1 if self._flips[yax] else y + idxs = np.ones(4) idxs[xax] = x idxs[yax] = y - idxs[ii] = self._data_idx[ii] + idxs[self._order[ii]] = self._data_idx[ii] self._set_position(*np.dot(self._affine, idxs)[:3]) self._draw() diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index 90e5e5ff35..d0ebb46a7b 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -7,9 +7,9 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Utility functions for analyze-like formats""" + from __future__ import annotations -import io import sys import typing as ty import warnings @@ -23,7 +23,9 @@ from .casting import OK_FLOATS, shared_range from .externals.oset import OrderedSet -if ty.TYPE_CHECKING: # pragma: no cover +if ty.TYPE_CHECKING: + import io + import numpy.typing as npt Scalar = np.number | float @@ -233,7 +235,7 @@ def value_set(self, name: str | None = None) -> OrderedSet: endian_codes = Recoder(_endian_codes) -class DtypeMapper(ty.Dict[ty.Hashable, ty.Hashable]): +class DtypeMapper(dict[ty.Hashable, ty.Hashable]): """Specialized mapper for numpy dtypes We pass this mapper into the Recoder class to deal with numpy dtype @@ -440,7 +442,7 @@ def array_from_file( True """ if mmap not in (True, False, 'c', 'r', 'r+'): - raise ValueError("mmap value should be one of True, False, 'c', " "'r', 'r+'") + raise ValueError("mmap value should be one of True, False, 'c', 'r', 'r+'") in_dtype = np.dtype(in_dtype) # Get file-like object from Opener instance infile = getattr(infile, 'fobj', infile) @@ -623,7 +625,7 @@ def array_to_file( # pre scale thresholds mn, mx = _dt_min_max(in_dtype, mn, mx) mn_out, mx_out = _dt_min_max(out_dtype) - pre_clips = max(mn, mn_out), min(mx, mx_out) + pre_clips = max(mn, mn_out), min(mx, mx_out) # type: ignore[type-var] return _write_data(data, fileobj, out_dtype, order, pre_clips=pre_clips) # In any case, we do not want to check for nans because we've already # disallowed scaling that generates nans @@ -1190,13 +1192,13 @@ def _ftype4scaled_finite( @ty.overload def finite_range( arr: npt.ArrayLike, check_nan: ty.Literal[False] = False -) -> tuple[Scalar, Scalar]: - ... # pragma: no cover +) -> tuple[Scalar, Scalar]: ... @ty.overload -def finite_range(arr: npt.ArrayLike, check_nan: ty.Literal[True]) -> tuple[Scalar, Scalar, bool]: - ... # pragma: no cover +def finite_range( + arr: npt.ArrayLike, check_nan: ty.Literal[True] +) -> tuple[Scalar, Scalar, bool]: ... def finite_range( diff --git a/nibabel/wrapstruct.py b/nibabel/wrapstruct.py index 6e236d7356..5ffe04bc78 100644 --- a/nibabel/wrapstruct.py +++ b/nibabel/wrapstruct.py @@ -109,6 +109,7 @@ nib.imageglobals.logger = logger """ + from __future__ import annotations import numpy as np diff --git a/nibabel/xmlutils.py b/nibabel/xmlutils.py index 4a5fb28979..12fd30f225 100644 --- a/nibabel/xmlutils.py +++ b/nibabel/xmlutils.py @@ -7,8 +7,9 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Thin layer around xml.etree.ElementTree, to abstract nibabel xml support""" + from io import BytesIO -from xml.etree.ElementTree import Element, SubElement, tostring # noqa +from xml.etree.ElementTree import Element, SubElement, tostring # noqa: F401 from xml.parsers.expat import ParserCreate from .filebasedimages import FileBasedHeader @@ -19,7 +20,7 @@ class XmlSerializable: def _to_xml_element(self) -> Element: """Output should be a xml.etree.ElementTree.Element""" - raise NotImplementedError # pragma: no cover + raise NotImplementedError def to_xml(self, enc='utf-8', **kwargs) -> bytes: r"""Generate an XML bytestring with a given encoding. @@ -32,7 +33,7 @@ def to_xml(self, enc='utf-8', **kwargs) -> bytes: Additional keyword arguments to :func:`xml.etree.ElementTree.tostring`. """ ele = self._to_xml_element() - return b'' if ele is None else tostring(ele, enc, **kwargs) + return tostring(ele, enc, **kwargs) class XmlBasedHeader(FileBasedHeader, XmlSerializable): @@ -108,10 +109,10 @@ def parse(self, string=None, fname=None, fptr=None): parser.ParseFile(fptr) def StartElementHandler(self, name, attrs): - raise NotImplementedError # pragma: no cover + raise NotImplementedError def EndElementHandler(self, name): - raise NotImplementedError # pragma: no cover + raise NotImplementedError def CharacterDataHandler(self, data): - raise NotImplementedError # pragma: no cover + raise NotImplementedError diff --git a/nisext/__init__.py b/nisext/__init__.py deleted file mode 100644 index 6b19d7eb8e..0000000000 --- a/nisext/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# init for sext package -"""Setuptools extensions - -nibabel uses these routines, and houses them, and installs them. nipy-proper -and dipy use them. -""" - -import warnings - -warnings.warn( - """The nisext package is deprecated as of NiBabel 5.0 and will be fully -removed in NiBabel 6.0""" -) diff --git a/nisext/py3builder.py b/nisext/py3builder.py deleted file mode 100644 index 24bd298364..0000000000 --- a/nisext/py3builder.py +++ /dev/null @@ -1,38 +0,0 @@ -"""distutils utilities for porting to python 3 within 2-compatible tree""" - - -try: - from distutils.command.build_py import build_py_2to3 -except ImportError: - # 2.x - no parsing of code - from distutils.command.build_py import build_py -else: # Python 3 - # Command to also apply 2to3 to doctests - from distutils import log - - class build_py(build_py_2to3): - def run_2to3(self, files): - # Add doctest parsing; this stuff copied from distutils.utils in - # python 3.2 source - if not files: - return - fixer_names, options, explicit = (self.fixer_names, self.options, self.explicit) - # Make this class local, to delay import of 2to3 - from lib2to3.refactor import RefactoringTool, get_fixers_from_package - - class DistutilsRefactoringTool(RefactoringTool): - def log_error(self, msg, *args, **kw): - log.error(msg, *args) - - def log_message(self, msg, *args): - log.info(msg, *args) - - def log_debug(self, msg, *args): - log.debug(msg, *args) - - if fixer_names is None: - fixer_names = get_fixers_from_package('lib2to3.fixes') - r = DistutilsRefactoringTool(fixer_names, options=options) - r.refactor(files, write=True) - # Then doctests - r.refactor(files, write=True, doctests_only=True) diff --git a/nisext/sexts.py b/nisext/sexts.py deleted file mode 100644 index b206588dec..0000000000 --- a/nisext/sexts.py +++ /dev/null @@ -1,285 +0,0 @@ -"""Distutils / setuptools helpers""" - -import os -from configparser import ConfigParser -from distutils import log -from distutils.command.build_py import build_py -from distutils.command.install_scripts import install_scripts -from distutils.version import LooseVersion -from os.path import join as pjoin -from os.path import split as psplit -from os.path import splitext - - -def get_comrec_build(pkg_dir, build_cmd=build_py): - """Return extended build command class for recording commit - - The extended command tries to run git to find the current commit, getting - the empty string if it fails. It then writes the commit hash into a file - in the `pkg_dir` path, named ``COMMIT_INFO.txt``. - - In due course this information can be used by the package after it is - installed, to tell you what commit it was installed from if known. - - To make use of this system, you need a package with a COMMIT_INFO.txt file - - e.g. ``myproject/COMMIT_INFO.txt`` - that might well look like this:: - - # This is an ini file that may contain information about the code state - [commit hash] - # The line below may contain a valid hash if it has been substituted during 'git archive' - archive_subst_hash=$Format:%h$ - # This line may be modified by the install process - install_hash= - - The COMMIT_INFO file above is also designed to be used with git substitution - - so you probably also want a ``.gitattributes`` file in the root directory - of your working tree that contains something like this:: - - myproject/COMMIT_INFO.txt export-subst - - That will cause the ``COMMIT_INFO.txt`` file to get filled in by ``git - archive`` - useful in case someone makes such an archive - for example with - via the github 'download source' button. - - Although all the above will work as is, you might consider having something - like a ``get_info()`` function in your package to display the commit - information at the terminal. See the ``pkg_info.py`` module in the nipy - package for an example. - """ - - class MyBuildPy(build_cmd): - """Subclass to write commit data into installation tree""" - - def run(self): - build_cmd.run(self) - import subprocess - - proc = subprocess.Popen( - 'git rev-parse --short HEAD', - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - shell=True, - ) - repo_commit, _ = proc.communicate() - # Fix for python 3 - repo_commit = str(repo_commit) - # We write the installation commit even if it's empty - cfg_parser = ConfigParser() - cfg_parser.read(pjoin(pkg_dir, 'COMMIT_INFO.txt')) - cfg_parser.set('commit hash', 'install_hash', repo_commit) - out_pth = pjoin(self.build_lib, pkg_dir, 'COMMIT_INFO.txt') - cfg_parser.write(open(out_pth, 'wt')) - - return MyBuildPy - - -def _add_append_key(in_dict, key, value): - """Helper for appending dependencies to setuptools args""" - # If in_dict[key] does not exist, create it - # If in_dict[key] is a string, make it len 1 list of strings - # Append value to in_dict[key] list - if key not in in_dict: - in_dict[key] = [] - elif isinstance(in_dict[key], str): - in_dict[key] = [in_dict[key]] - in_dict[key].append(value) - - -# Dependency checks -def package_check( - pkg_name, - version=None, - optional=False, - checker=LooseVersion, - version_getter=None, - messages=None, - setuptools_args=None, -): - """Check if package `pkg_name` is present and has good enough version - - Has two modes of operation. If `setuptools_args` is None (the default), - raise an error for missing non-optional dependencies and log warnings for - missing optional dependencies. If `setuptools_args` is a dict, then fill - ``install_requires`` key value with any missing non-optional dependencies, - and the ``extras_requires`` key value with optional dependencies. - - This allows us to work with and without setuptools. It also means we can - check for packages that have not been installed with setuptools to avoid - installing them again. - - Parameters - ---------- - pkg_name : str - name of package as imported into python - version : {None, str}, optional - minimum version of the package that we require. If None, we don't - check the version. Default is None - optional : bool or str, optional - If ``bool(optional)`` is False, raise error for absent package or wrong - version; otherwise warn. If ``setuptools_args`` is not None, and - ``bool(optional)`` is not False, then `optional` should be a string - giving the feature name for the ``extras_require`` argument to setup. - checker : callable, optional - callable with which to return comparable thing from version - string. Default is ``distutils.version.LooseVersion`` - version_getter : {None, callable}: - Callable that takes `pkg_name` as argument, and returns the - package version string - as in:: - - ``version = version_getter(pkg_name)`` - - If None, equivalent to:: - - mod = __import__(pkg_name); version = mod.__version__`` - messages : None or dict, optional - dictionary giving output messages - setuptools_args : None or dict - If None, raise errors / warnings for missing non-optional / optional - dependencies. If dict fill key values ``install_requires`` and - ``extras_require`` for non-optional and optional dependencies. - """ - setuptools_mode = not setuptools_args is None - optional_tf = bool(optional) - if version_getter is None: - - def version_getter(pkg_name): - mod = __import__(pkg_name) - return mod.__version__ - - if messages is None: - messages = {} - msgs = { - 'missing': 'Cannot import package "%s" - is it installed?', - 'missing opt': 'Missing optional package "%s"', - 'opt suffix': '; you may get run-time errors', - 'version too old': 'You have version %s of package "%s" but we need version >= %s', - } - msgs.update(messages) - status, have_version = _package_status(pkg_name, version, version_getter, checker) - if status == 'satisfied': - return - if not setuptools_mode: - if status == 'missing': - if not optional_tf: - raise RuntimeError(msgs['missing'] % pkg_name) - log.warn(msgs['missing opt'] % pkg_name + msgs['opt suffix']) - return - elif status == 'no-version': - raise RuntimeError(f'Cannot find version for {pkg_name}') - assert status == 'low-version' - if not optional_tf: - raise RuntimeError(msgs['version too old'] % (have_version, pkg_name, version)) - log.warn(msgs['version too old'] % (have_version, pkg_name, version) + msgs['opt suffix']) - return - # setuptools mode - if optional_tf and not isinstance(optional, str): - raise RuntimeError('Not-False optional arg should be string') - dependency = pkg_name - if version: - dependency += '>=' + version - if optional_tf: - if not 'extras_require' in setuptools_args: - setuptools_args['extras_require'] = {} - _add_append_key(setuptools_args['extras_require'], optional, dependency) - else: - _add_append_key(setuptools_args, 'install_requires', dependency) - - -def _package_status(pkg_name, version, version_getter, checker): - try: - __import__(pkg_name) - except ImportError: - return 'missing', None - if not version: - return 'satisfied', None - try: - have_version = version_getter(pkg_name) - except AttributeError: - return 'no-version', None - if checker(have_version) < checker(version): - return 'low-version', have_version - return 'satisfied', have_version - - -BAT_TEMPLATE = r"""@echo off -REM wrapper to use shebang first line of {FNAME} -set mypath=%~dp0 -set pyscript="%mypath%{FNAME}" -set /p line1=<%pyscript% -if "%line1:~0,2%" == "#!" (goto :goodstart) -echo First line of %pyscript% does not start with "#!" -exit /b 1 -:goodstart -set py_exe=%line1:~2% -call "%py_exe%" %pyscript% %* -""" - - -class install_scripts_bat(install_scripts): - """Make scripts executable on Windows - - Scripts are bare file names without extension on Unix, fitting (for example) - Debian rules. They identify as python scripts with the usual ``#!`` first - line. Unix recognizes and uses this first "shebang" line, but Windows does - not. So, on Windows only we add a ``.bat`` wrapper of name - ``bare_script_name.bat`` to call ``bare_script_name`` using the python - interpreter from the #! first line of the script. - - Notes - ----- - See discussion at - https://matthew-brett.github.io/pydagogue/installing_scripts.html and - example at git://github.com/matthew-brett/myscripter.git for more - background. - """ - - def run(self): - install_scripts.run(self) - if not os.name == 'nt': - return - for filepath in self.get_outputs(): - # If we can find an executable name in the #! top line of the script - # file, make .bat wrapper for script. - with open(filepath, 'rt') as fobj: - first_line = fobj.readline() - if not (first_line.startswith('#!') and 'python' in first_line.lower()): - log.info('No #!python executable found, skipping .bat wrapper') - continue - pth, fname = psplit(filepath) - froot, ext = splitext(fname) - bat_file = pjoin(pth, froot + '.bat') - bat_contents = BAT_TEMPLATE.replace('{FNAME}', fname) - log.info(f'Making {bat_file} wrapper for {filepath}') - if self.dry_run: - continue - with open(bat_file, 'wt') as fobj: - fobj.write(bat_contents) - - -class Bunch: - def __init__(self, vars): - for key, name in vars.items(): - if key.startswith('__'): - continue - self.__dict__[key] = name - - -def read_vars_from(ver_file): - """Read variables from Python text file - - Parameters - ---------- - ver_file : str - Filename of file to read - - Returns - ------- - info_vars : Bunch instance - Bunch object where variables read from `ver_file` appear as - attributes - """ - # Use exec for compabibility with Python 3 - ns = {} - with open(ver_file, 'rt') as fobj: - exec(fobj.read(), ns) - return Bunch(ns) diff --git a/nisext/testers.py b/nisext/testers.py deleted file mode 100644 index 07f71af696..0000000000 --- a/nisext/testers.py +++ /dev/null @@ -1,523 +0,0 @@ -"""Test package information in various install settings - -The routines here install the package from source directories, zips or eggs, and -check these installations by running tests, checking version information, -looking for files that were not copied over. - -The typical use for this module is as a Makefile target. For example, here are -the Makefile targets from nibabel:: - - # Check for files not installed - check-files: - $(PYTHON) -c 'from nisext.testers import check_files; check_files("nibabel")' - - # Print out info for possible install methods - check-version-info: - $(PYTHON) -c 'from nisext.testers import info_from_here; info_from_here("nibabel")' - - # Run tests from installed code - installed-tests: - $(PYTHON) -c 'from nisext.testers import tests_installed; tests_installed("nibabel")' - - # Run tests from installed code - sdist-tests: - $(PYTHON) -c 'from nisext.testers import sdist_tests; sdist_tests("nibabel")' - - # Run tests from binary egg - bdist-egg-tests: - $(PYTHON) -c 'from nisext.testers import bdist_egg_tests; bdist_egg_tests("nibabel")' -""" - - -import os -import re -import shutil -import sys -import tempfile -import zipfile -from glob import glob -from os.path import abspath -from os.path import join as pjoin -from subprocess import PIPE, Popen - -NEEDS_SHELL = os.name != 'nt' -PYTHON = sys.executable -HAVE_PUTENV = hasattr(os, 'putenv') - -PY_LIB_SDIR = 'pylib' - - -def back_tick(cmd, ret_err=False, as_str=True): - """Run command `cmd`, return stdout, or stdout, stderr if `ret_err` - - Roughly equivalent to ``check_output`` in Python 2.7 - - Parameters - ---------- - cmd : str - command to execute - ret_err : bool, optional - If True, return stderr in addition to stdout. If False, just return - stdout - as_str : bool, optional - Whether to decode outputs to unicode string on exit. - - Returns - ------- - out : str or tuple - If `ret_err` is False, return stripped string containing stdout from - `cmd`. If `ret_err` is True, return tuple of (stdout, stderr) where - ``stdout`` is the stripped stdout, and ``stderr`` is the stripped - stderr. - - Raises - ------ - RuntimeError - if command returns non-zero exit code. - """ - proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=NEEDS_SHELL) - out, err = proc.communicate() - retcode = proc.returncode - if retcode is None: - proc.terminate() - raise RuntimeError(cmd + ' process did not terminate') - if retcode != 0: - raise RuntimeError(cmd + ' process returned code %d' % retcode) - out = out.strip() - if as_str: - out = out.decode('latin-1') - if not ret_err: - return out - err = err.strip() - if as_str: - err = err.decode('latin-1') - return out, err - - -def run_mod_cmd(mod_name, pkg_path, cmd, script_dir=None, print_location=True): - """Run command in own process in anonymous path - - Parameters - ---------- - mod_name : str - Name of module to import - e.g. 'nibabel' - pkg_path : str - directory containing `mod_name` package. Typically that will be the - directory containing the e.g. 'nibabel' directory. - cmd : str - Python command to execute - script_dir : None or str, optional - script directory to prepend to PATH - print_location : bool, optional - Whether to print the location of the imported `mod_name` - - Returns - ------- - stdout : str - stdout as str - stderr : str - stderr as str - """ - if script_dir is None: - paths_add = '' - else: - if not HAVE_PUTENV: - raise RuntimeError('We cannot set environment variables') - # Need to add the python path for the scripts to pick up our package in - # their environment, because the scripts will get called via the shell - # (via `cmd`). Consider that PYTHONPATH may not be set. Because the - # command might run scripts via the shell, prepend script_dir to the - # system path also. - paths_add = r""" -os.environ['PATH'] = r'"{script_dir}"' + os.path.pathsep + os.environ['PATH'] -PYTHONPATH = os.environ.get('PYTHONPATH') -if PYTHONPATH is None: - os.environ['PYTHONPATH'] = r'"{pkg_path}"' -else: - os.environ['PYTHONPATH'] = r'"{pkg_path}"' + os.path.pathsep + PYTHONPATH -""".format( - **locals() - ) - if print_location: - p_loc = f'print({mod_name}.__file__);' - else: - p_loc = '' - cwd = os.getcwd() - tmpdir = tempfile.mkdtemp() - try: - os.chdir(tmpdir) - with open('script.py', 'wt') as fobj: - fobj.write( - r""" -import os -import sys -sys.path.insert(0, r"{pkg_path}") -{paths_add} -import {mod_name} -{p_loc} -{cmd}""".format( - **locals() - ) - ) - res = back_tick(f'{PYTHON} script.py', ret_err=True) - finally: - os.chdir(cwd) - shutil.rmtree(tmpdir) - return res - - -def zip_extract_all(fname, path=None): - """Extract all members from zipfile - - Deals with situation where the directory is stored in the zipfile as a name, - as well as files that have to go into this directory. - """ - zf = zipfile.ZipFile(fname) - members = zf.namelist() - # Remove members that are just bare directories - members = [m for m in members if not m.endswith('/')] - for zipinfo in members: - zf.extract(zipinfo, path, None) - - -def install_from_to(from_dir, to_dir, py_lib_sdir=PY_LIB_SDIR, bin_sdir='bin'): - """Install package in `from_dir` to standard location in `to_dir` - - Parameters - ---------- - from_dir : str - path containing files to install with ``python setup.py ...`` - to_dir : str - prefix path to which files will be installed, as in ``python setup.py - install --prefix=to_dir`` - py_lib_sdir : str, optional - subdirectory within `to_dir` to which library code will be installed - bin_sdir : str, optional - subdirectory within `to_dir` to which scripts will be installed - """ - site_pkgs_path = os.path.join(to_dir, py_lib_sdir) - py_lib_locs = f' --install-purelib={site_pkgs_path} ' f'--install-platlib={site_pkgs_path}' - pwd = os.path.abspath(os.getcwd()) - cmd = f'{PYTHON} setup.py --quiet install --prefix={to_dir} {py_lib_locs}' - try: - os.chdir(from_dir) - back_tick(cmd) - finally: - os.chdir(pwd) - - -def install_from_zip( - zip_fname, install_path, pkg_finder=None, py_lib_sdir=PY_LIB_SDIR, script_sdir='bin' -): - """Install package from zip file `zip_fname` - - Parameters - ---------- - zip_fname : str - filename of zip file containing package code - install_path : str - output prefix at which to install package - pkg_finder : None or callable, optional - If None, assume zip contains ``setup.py`` at the top level. Otherwise, - find directory containing ``setup.py`` with ``pth = - pkg_finder(unzip_path)`` where ``unzip_path`` is the path to which we - have unzipped the zip file contents. - py_lib_sdir : str, optional - subdirectory to which to write the library code from the package. Thus - if package called ``nibabel``, the written code will be in - ``//nibabel - script_sdir : str, optional - subdirectory to which we write the installed scripts. Thus scripts will - be written to ``/ - """ - unzip_path = tempfile.mkdtemp() - try: - # Zip may unpack module into current directory - zip_extract_all(zip_fname, unzip_path) - if pkg_finder is None: - from_path = unzip_path - else: - from_path = pkg_finder(unzip_path) - install_from_to(from_path, install_path, py_lib_sdir, script_sdir) - finally: - shutil.rmtree(unzip_path) - - -def contexts_print_info(mod_name, repo_path, install_path): - """Print result of get_info from different installation routes - - Runs installation from: - - * git archive zip file - * with setup.py install from repository directory - * just running code from repository directory - - and prints out result of get_info in each case. There will be many files - written into `install_path` that you may want to clean up somehow. - - Parameters - ---------- - mod_name : str - package name that will be installed, and tested - repo_path : str - path to location of git repository - install_path : str - path into which to install temporary installations - """ - site_pkgs_path = os.path.join(install_path, PY_LIB_SDIR) - # first test archive - pwd = os.path.abspath(os.getcwd()) - out_fname = pjoin(install_path, 'test.zip') - try: - os.chdir(repo_path) - back_tick(f'git archive --format zip -o {out_fname} HEAD') - finally: - os.chdir(pwd) - install_from_zip(out_fname, install_path, None) - cmd_str = f'print({mod_name}.get_info())' - print(run_mod_cmd(mod_name, site_pkgs_path, cmd_str)[0]) - # now test install into a directory from the repository - install_from_to(repo_path, install_path, PY_LIB_SDIR) - print(run_mod_cmd(mod_name, site_pkgs_path, cmd_str)[0]) - # test from development tree - print(run_mod_cmd(mod_name, repo_path, cmd_str)[0]) - - -def info_from_here(mod_name): - """Run info context checks starting in working directory - - Runs checks from current working directory, installing temporary - installations into a new temporary directory - - Parameters - ---------- - mod_name : str - package name that will be installed, and tested - """ - repo_path = os.path.abspath(os.getcwd()) - install_path = tempfile.mkdtemp() - try: - contexts_print_info(mod_name, repo_path, install_path) - finally: - shutil.rmtree(install_path) - - -def tests_installed(mod_name, source_path=None): - """Install from `source_path` into temporary directory; run tests - - Parameters - ---------- - mod_name : str - name of module - e.g. 'nibabel' - source_path : None or str - Path from which to install. If None, defaults to working directory - """ - if source_path is None: - source_path = os.path.abspath(os.getcwd()) - install_path = tempfile.mkdtemp() - site_pkgs_path = pjoin(install_path, PY_LIB_SDIR) - scripts_path = pjoin(install_path, 'bin') - try: - install_from_to(source_path, install_path, PY_LIB_SDIR, 'bin') - stdout, stderr = run_mod_cmd(mod_name, site_pkgs_path, mod_name + '.test()', scripts_path) - finally: - shutil.rmtree(install_path) - print(stdout) - print(stderr) - - -# Tell nose this is not a test -tests_installed.__test__ = False - - -def check_installed_files(repo_mod_path, install_mod_path): - """Check files in `repo_mod_path` are installed at `install_mod_path` - - At the moment, all this does is check that all the ``*.py`` files in - `repo_mod_path` are installed at `install_mod_path`. - - Parameters - ---------- - repo_mod_path : str - repository path containing package files, e.g. /nibabel> - install_mod_path : str - path at which package has been installed. This is the path where the - root package ``__init__.py`` lives. - - Return - ------ - uninstalled : list - list of files that should have been installed, but have not been - installed - """ - return missing_from(repo_mod_path, install_mod_path, filter=r'\.py$') - - -def missing_from(path0, path1, filter=None): - """Return filenames present in `path0` but not in `path1` - - Parameters - ---------- - path0 : str - path which contains all files of interest - path1 : str - path which should contain all files of interest - filter : None or str or regexp, optional - A successful result from ``filter.search(fname)`` means the file is of - interest. None means all files are of interest - - Returns - ------- - path1_missing : list - list of all files missing from `path1` that are in `path0` at the same - relative path. - """ - if not filter is None: - filter = re.compile(filter) - uninstalled = [] - # Walk directory tree to get py files - for dirpath, dirnames, filenames in os.walk(path0): - out_dirpath = dirpath.replace(path0, path1) - for fname in filenames: - if not filter is None and filter.search(fname) is None: - continue - equiv_fname = os.path.join(out_dirpath, fname) - if not os.path.isfile(equiv_fname): - uninstalled.append(pjoin(dirpath, fname)) - return uninstalled - - -def check_files(mod_name, repo_path=None, scripts_sdir='bin'): - """Print library and script files not picked up during install""" - if repo_path is None: - repo_path = abspath(os.getcwd()) - install_path = tempfile.mkdtemp() - repo_mod_path = pjoin(repo_path, mod_name) - installed_mod_path = pjoin(install_path, PY_LIB_SDIR, mod_name) - repo_bin = pjoin(repo_path, 'bin') - installed_bin = pjoin(install_path, 'bin') - try: - zip_fname = make_dist(repo_path, install_path, 'sdist --formats=zip', '*.zip') - pf = get_sdist_finder(mod_name) - install_from_zip(zip_fname, install_path, pf, PY_LIB_SDIR, scripts_sdir) - lib_misses = missing_from(repo_mod_path, installed_mod_path, r'\.py$') - script_misses = missing_from(repo_bin, installed_bin) - finally: - shutil.rmtree(install_path) - if lib_misses: - print('Missed library files: ', ', '.join(lib_misses)) - else: - print('You got all the library files') - if script_misses: - print('Missed script files: ', ', '.join(script_misses)) - else: - print('You got all the script files') - return len(lib_misses) > 0 or len(script_misses) > 0 - - -def get_sdist_finder(mod_name): - """Return function finding sdist source directory for `mod_name`""" - - def pf(pth): - pkg_dirs = glob(pjoin(pth, mod_name + '-*')) - if len(pkg_dirs) != 1: - raise OSError('There must be one and only one package dir') - return pkg_dirs[0] - - return pf - - -def sdist_tests(mod_name, repo_path=None, label='fast', doctests=True): - """Make sdist zip, install from it, and run tests""" - if repo_path is None: - repo_path = abspath(os.getcwd()) - install_path = tempfile.mkdtemp() - try: - zip_fname = make_dist(repo_path, install_path, 'sdist --formats=zip', '*.zip') - pf = get_sdist_finder(mod_name) - install_from_zip(zip_fname, install_path, pf, PY_LIB_SDIR, 'bin') - site_pkgs_path = pjoin(install_path, PY_LIB_SDIR) - script_path = pjoin(install_path, 'bin') - cmd = f"{mod_name}.test(label='{label}', doctests={doctests})" - stdout, stderr = run_mod_cmd(mod_name, site_pkgs_path, cmd, script_path) - finally: - shutil.rmtree(install_path) - print(stdout) - print(stderr) - - -sdist_tests.__test__ = False - - -def bdist_egg_tests(mod_name, repo_path=None, label='fast', doctests=True): - """Make bdist_egg, unzip it, and run tests from result - - We've got a problem here, because the egg does not contain the scripts, and - so, if we are testing the scripts with ``mod.test()``, we won't pick up the - scripts from the repository we are testing. - - So, you might need to add a label to the script tests, and use the `label` - parameter to indicate these should be skipped. As in: - - bdist_egg_tests('nibabel', None, label='not script_test') - """ - if repo_path is None: - repo_path = abspath(os.getcwd()) - install_path = tempfile.mkdtemp() - scripts_path = pjoin(install_path, 'bin') - try: - zip_fname = make_dist(repo_path, install_path, 'bdist_egg', '*.egg') - zip_extract_all(zip_fname, install_path) - cmd = f"{mod_name}.test(label='{label}', doctests={doctests})" - stdout, stderr = run_mod_cmd(mod_name, install_path, cmd, scripts_path) - finally: - shutil.rmtree(install_path) - print(stdout) - print(stderr) - - -bdist_egg_tests.__test__ = False - - -def make_dist(repo_path, out_dir, setup_params, zipglob): - """Create distutils distribution file - - Parameters - ---------- - repo_path : str - path to repository containing code and ``setup.py`` - out_dir : str - path to which to write new distribution file - setup_params: str - parameters to pass to ``setup.py`` to create distribution. - zipglob : str - glob identifying expected output file. - - Returns - ------- - out_fname : str - filename of generated distribution file - - Examples - -------- - Make, return a zipped sdist:: - - make_dist('/path/to/repo', '/tmp/path', 'sdist --formats=zip', '*.zip') - - Make, return a binary egg:: - - make_dist('/path/to/repo', '/tmp/path', 'bdist_egg', '*.egg') - """ - pwd = os.path.abspath(os.getcwd()) - try: - os.chdir(repo_path) - back_tick(f'{PYTHON} setup.py {setup_params} --dist-dir={out_dir}') - zips = glob(pjoin(out_dir, zipglob)) - if len(zips) != 1: - raise OSError( - f'There must be one and only one {zipglob} ' - f"file, but I found \"{': '.join(zips)}\"" - ) - finally: - os.chdir(pwd) - return zips[0] diff --git a/nisext/tests/__init__.py b/nisext/tests/__init__.py deleted file mode 100644 index af7d1d1dd2..0000000000 --- a/nisext/tests/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Tests for nisext package diff --git a/nisext/tests/test_sexts.py b/nisext/tests/test_sexts.py deleted file mode 100644 index f262ec5685..0000000000 --- a/nisext/tests/test_sexts.py +++ /dev/null @@ -1,106 +0,0 @@ -"""Tests for nisexts.sexts module -""" - -import sys -import types - -import pytest - -from ..sexts import package_check - -FAKE_NAME = 'nisext_improbable' -assert FAKE_NAME not in sys.modules -FAKE_MODULE = types.ModuleType('nisext_fake') - - -def test_package_check(): - # Try to use a required package - raise error - with pytest.raises(RuntimeError): - package_check(FAKE_NAME) - # Optional, log.warn - package_check(FAKE_NAME, optional=True) - # Can also pass a string - package_check(FAKE_NAME, optional='some-package') - try: - # Make a package - sys.modules[FAKE_NAME] = FAKE_MODULE - # Now it passes if we don't check the version - package_check(FAKE_NAME) - # A fake version - FAKE_MODULE.__version__ = '0.2' - package_check(FAKE_NAME, version='0.2') - # fails when version not good enough - with pytest.raises(RuntimeError): - package_check(FAKE_NAME, '0.3') - # Unless optional in which case log.warns - package_check(FAKE_NAME, version='0.3', optional=True) - # Might do custom version check - package_check(FAKE_NAME, version='0.2', version_getter=lambda x: '0.2') - finally: - del sys.modules[FAKE_NAME] - - -def test_package_check_setuptools(): - # If setuptools arg not None, missing package just adds it to arg - with pytest.raises(RuntimeError): - package_check(FAKE_NAME, setuptools_args=None) - - def pkg_chk_sta(*args, **kwargs): - st_args = {} - package_check(*args, setuptools_args=st_args, **kwargs) - return st_args - - assert pkg_chk_sta(FAKE_NAME) == {'install_requires': ['nisext_improbable']} - # Check that this gets appended to existing value - old_sta = {'install_requires': ['something']} - package_check(FAKE_NAME, setuptools_args=old_sta) - assert old_sta == {'install_requires': ['something', 'nisext_improbable']} - # That existing value as string gets converted to a list - old_sta = {'install_requires': 'something'} - package_check(FAKE_NAME, setuptools_args=old_sta) - assert old_sta == {'install_requires': ['something', 'nisext_improbable']} - # Optional, add to extras_require - assert pkg_chk_sta(FAKE_NAME, optional='something') == { - 'extras_require': {'something': ['nisext_improbable']} - } - # Check that this gets appended to existing value - old_sta = {'extras_require': {'something': ['amodule']}} - package_check(FAKE_NAME, optional='something', setuptools_args=old_sta) - assert old_sta == {'extras_require': {'something': ['amodule', 'nisext_improbable']}} - # That string gets converted to a list here too - old_sta = {'extras_require': {'something': 'amodule'}} - package_check(FAKE_NAME, optional='something', setuptools_args=old_sta) - assert old_sta == {'extras_require': {'something': ['amodule', 'nisext_improbable']}} - # But optional has to be a string if not empty and setuptools_args defined - with pytest.raises(RuntimeError): - package_check(FAKE_NAME, optional=True, setuptools_args={}) - try: - # Make a package - sys.modules[FAKE_NAME] = FAKE_MODULE - # No install_requires because we already have it - assert pkg_chk_sta(FAKE_NAME) == {} - # A fake version still works - FAKE_MODULE.__version__ = '0.2' - assert pkg_chk_sta(FAKE_NAME, version='0.2') == {} - # goes into install requires when version not good enough - exp_spec = [FAKE_NAME + '>=0.3'] - assert pkg_chk_sta(FAKE_NAME, version='0.3') == {'install_requires': exp_spec} - # Unless optional in which case goes into extras_require - package_check(FAKE_NAME, version='0.2', version_getter=lambda x: '0.2') - assert pkg_chk_sta(FAKE_NAME, version='0.3', optional='afeature') == { - 'extras_require': {'afeature': exp_spec} - } - # Might do custom version check - assert pkg_chk_sta(FAKE_NAME, version='0.2', version_getter=lambda x: '0.2') == {} - # If the version check fails, put into requires - bad_getter = lambda x: x.not_an_attribute - exp_spec = [FAKE_NAME + '>=0.2'] - assert pkg_chk_sta(FAKE_NAME, version='0.2', version_getter=bad_getter) == { - 'install_requires': exp_spec - } - # Likewise for optional dependency - assert pkg_chk_sta( - FAKE_NAME, version='0.2', optional='afeature', version_getter=bad_getter - ) == {'extras_require': {'afeature': [FAKE_NAME + '>=0.2']}} - finally: - del sys.modules[FAKE_NAME] diff --git a/nisext/tests/test_testers.py b/nisext/tests/test_testers.py deleted file mode 100644 index f81a40f1df..0000000000 --- a/nisext/tests/test_testers.py +++ /dev/null @@ -1,35 +0,0 @@ -"""Tests for testers -""" - -import os -from os.path import dirname, pathsep - -import pytest - -from ..testers import PYTHON, back_tick, run_mod_cmd - - -def test_back_tick(): - cmd = f'{PYTHON} -c "print(\'Hello\')"' - assert back_tick(cmd) == 'Hello' - assert back_tick(cmd, ret_err=True) == ('Hello', '') - assert back_tick(cmd, True, False) == (b'Hello', b'') - cmd = f'{PYTHON} -c "raise ValueError()"' - with pytest.raises(RuntimeError): - back_tick(cmd) - - -def test_run_mod_cmd(): - mod = 'os' - mod_dir = dirname(os.__file__) - assert run_mod_cmd(mod, mod_dir, "print('Hello')", None, False) == ('Hello', '') - sout, serr = run_mod_cmd(mod, mod_dir, "print('Hello again')") - assert serr == '' - mod_file, out_str = [s.strip() for s in sout.split('\n')] - assert mod_file.startswith(mod_dir) - assert out_str == 'Hello again' - sout, serr = run_mod_cmd(mod, mod_dir, "print(os.environ['PATH'])", None, False) - assert serr == '' - sout2, serr = run_mod_cmd(mod, mod_dir, "print(os.environ['PATH'])", 'pth2', False) - assert serr == '' - assert sout2 == '"pth2"' + pathsep + sout diff --git a/pyproject.toml b/pyproject.toml index 9fec3975cc..b62c0048af 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,11 +9,12 @@ authors = [{ name = "NiBabel developers", email = "neuroimaging@python.org" }] maintainers = [{ name = "Christopher Markiewicz" }] readme = "README.rst" license = { text = "MIT License" } -requires-python = ">=3.8" +requires-python = ">=3.9" dependencies = [ - "numpy >=1.20", - "packaging >=17", - "importlib_resources >=1.3; python_version < '3.9'", + "numpy >=1.22", + "packaging >=20", + "importlib_resources >=5.12; python_version < '3.12'", + "typing_extensions >=4.6; python_version < '3.13'", ] classifiers = [ "Development Status :: 5 - Production/Stable", @@ -22,11 +23,11 @@ classifiers = [ "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", "Topic :: Scientific/Engineering", ] # Version from setuptools_scm @@ -52,7 +53,7 @@ parrec2nii = "nibabel.cmdline.parrec2nii:main" [project.optional-dependencies] all = ["nibabel[dicomfs,minc2,spm,zstd]"] # Features -dicom = ["pydicom >=1.0.0"] +dicom = ["pydicom >=2.3"] dicomfs = ["nibabel[dicom]", "pillow"] minc2 = ["h5py"] spm = ["scipy"] @@ -61,7 +62,7 @@ zstd = ["pyzstd >= 0.14.3"] # tox should use these with extras instead of duplicating doc = [ "sphinx", - "matplotlib>=1.5.3", + "matplotlib>=3.5", "numpydoc", "texext", "tomli; python_version < '3.11'", @@ -72,6 +73,7 @@ test = [ "pytest-cov", "pytest-httpserver", "pytest-xdist", + "coverage>=7.2", ] # Remaining: Simpler to centralize in tox dev = ["tox"] @@ -87,7 +89,7 @@ exclude = [ ] [tool.hatch.build.targets.wheel] -packages = ["nibabel", "nisext"] +packages = ["nibabel"] exclude = [ # 56MB test file does not need to be installed everywhere "nibabel/nicom/tests/data/4d_multiframe_test.dcm", @@ -109,27 +111,91 @@ __version__ = version = {version!r} __version_tuple__ = version_tuple = {version_tuple!r} ''' -[tool.blue] -line_length = 99 -target-version = ["py37"] -force-exclude = """ -( - _version.py - | nibabel/externals/ - | versioneer.py -) -""" +[tool.ruff] +line-length = 99 +exclude = ["doc", "nibabel/externals", "tools", "version.py", "versioneer.py"] -[tool.isort] -profile = "black" -line_length = 99 -extend_skip = ["_version.py", "externals"] +[tool.ruff.lint] +select = [ + "B", + "C4", + "F", + "FLY", + "FURB", + "I", + "PERF", + "PGH", + "PIE", + "PLE", + "PT", + "PYI", + "Q", + "RSE", + "RUF", + "TCH", + "UP", +] +ignore = [ + "B006", # TODO: enable + "B008", # TODO: enable + "B007", + "B011", + "B017", # TODO: enable + "B018", + "B020", + "B023", # TODO: enable + "B028", + "B904", + "C401", + "C408", + "C416", + "PERF203", + "PIE790", + "PT004", # deprecated + "PT005", # deprecated + "PT007", + "PT011", + "PT012", + "PT017", + "PT018", + "PYI024", + "RUF005", + "RUF012", # TODO: enable + "RUF015", + "RUF017", # TODO: enable + "UP027", # deprecated + "UP038", # https://github.com/astral-sh/ruff/issues/7871 + # https://docs.astral.sh/ruff/formatter/#conflicting-lint-rules + "W191", + "E111", + "E114", + "E117", + "D206", + "D300", + "Q000", + "Q001", + "Q002", + "Q003", + "COM812", + "COM819", + "ISC001", + "ISC002", +] + +[tool.ruff.lint.per-file-ignores] +"__init__.py" = ["F401"] +"doc/source/conf.py" = ["F401"] + +[tool.ruff.format] +quote-style = "single" [tool.mypy] python_version = "3.11" exclude = [ "/tests", ] +warn_unreachable = true +enable_error_code = ["ignore-without-code", "redundant-expr", "truthy-bool"] [tool.codespell] skip = "*/data/*,./nibabel-data" diff --git a/tools/make_tarball.py b/tools/make_tarball.py index 3cdad40d0b..b49a1f276a 100755 --- a/tools/make_tarball.py +++ b/tools/make_tarball.py @@ -5,7 +5,7 @@ import os import commands -from toollib import * +from toollib import c, cd tag = commands.getoutput('git describe') base_name = f'nibabel-{tag}' diff --git a/tools/markdown_release_notes.py b/tools/markdown_release_notes.py index 66e7876036..cdae474f51 100644 --- a/tools/markdown_release_notes.py +++ b/tools/markdown_release_notes.py @@ -1,14 +1,53 @@ #!/usr/bin/env python import re import sys +from collections import defaultdict +from functools import cache +from operator import call from pathlib import Path +from sphinx.ext.intersphinx import fetch_inventory + CHANGELOG = Path(__file__).parent.parent / 'Changelog' # Match release lines like "5.2.0 (Monday 11 December 2023)" RELEASE_REGEX = re.compile(r"""((?:\d+)\.(?:\d+)\.(?:\d+)) \(\w+ \d{1,2} \w+ \d{4}\)$""") +class MockConfig: + intersphinx_timeout: int | None = None + tls_verify = False + tls_cacerts: str | dict[str, str] | None = None + user_agent: str = '' + + +@call +class MockApp: + srcdir = '' + config = MockConfig() + + +fetch_inv = cache(fetch_inventory) + + +def get_intersphinx(obj): + module = obj.split('.', 1)[0] + + registry = defaultdict(lambda: 'https://docs.python.org/3') + registry.update( + numpy='https://numpy.org/doc/stable', + ) + + base_url = registry[module] + + inventory = fetch_inv(MockApp, '', f'{base_url}/objects.inv') + # Check py: first, then whatever + for objclass in sorted(inventory, key=lambda x: not x.startswith('py:')): + if obj in inventory[objclass]: + return f'{base_url}/{inventory[objclass][obj][2]}' + raise ValueError("Couldn't lookup {obj}") + + def main(): version = sys.argv[1] output = sys.argv[2] @@ -27,7 +66,7 @@ def main(): if in_release_notes: break in_release_notes = match.group(1) == version - next(f) # Skip the underline + next(f) # Skip the underline continue if in_release_notes: @@ -46,7 +85,7 @@ def main(): release_notes = re.sub(r'\n +', ' ', release_notes) # Replace pr/ with # for GitHub - release_notes = re.sub(r'\(pr/(\d+)\)', r'(#\1)', release_notes) + release_notes = re.sub(r'pr/(\d+)', r'#\1', release_notes) # Replace :mod:`package.X` with [package.X](...) release_notes = re.sub( @@ -76,6 +115,14 @@ def main(): r'[\3](https://nipy.org/nibabel/reference/\1.html#\1.\2.\3)', release_notes, ) + # Replace ::`` with intersphinx lookup + for ref in re.findall(r'(:[^:]*:`~?\w[\w.]+\w`)', release_notes): + objclass, tilde, module, obj = re.match(r':([^:]*):`(~?)([\w.]+)\.(\w+)`', ref).groups() + url = get_intersphinx(f'{module}.{obj}') + mdlink = f'[{"" if tilde else module}{obj}]({url})' + release_notes = release_notes.replace(ref, mdlink) + # Replace RST links with Markdown links + release_notes = re.sub(r'`([^<`]*) <([^>]*)>`_+', r'[\1](\2)', release_notes) def python_doc(match): module = match.group(1) @@ -84,10 +131,9 @@ def python_doc(match): release_notes = re.sub(r':meth:`~([\w.]+)\.(\w+)`', python_doc, release_notes) - output.write('## Release notes\n\n') - output.write(release_notes) - - output.close() + with output: + output.write('## Release notes\n\n') + output.write(release_notes) if __name__ == '__main__': diff --git a/tools/mpkg_wrapper.py b/tools/mpkg_wrapper.py index 0a96156e4d..f5f059b28d 100644 --- a/tools/mpkg_wrapper.py +++ b/tools/mpkg_wrapper.py @@ -24,7 +24,7 @@ def main(): g = dict(globals()) g['__file__'] = sys.argv[0] g['__name__'] = '__main__' - execfile(sys.argv[0], g, g) + exec(open(sys.argv[0]).read(), g, g) if __name__ == '__main__': diff --git a/tox.ini b/tox.ini index cc2b263cb1..82c13debc6 100644 --- a/tox.ini +++ b/tox.ini @@ -7,16 +7,16 @@ requires = tox>=4 envlist = # No preinstallations - py3{8,9,10,11,12}-none + py3{9,10,11,12,13}-none # Minimum Python - py38-{min,full} + py39-{min,full} # x86 support range py3{9,10,11}-{full,pre}-{x86,x64} py3{9,10,11}-pre-{x86,x64} # x64-only range - py312-{full,pre}-x64 + py3{12,13}-{full,pre}-x64 # Special environment for numpy 2.0-dev testing - py312-dev-x64 + py313-dev-x64 install doctest style @@ -26,11 +26,11 @@ skip_missing_interpreters = true # Configuration that allows us to split tests across GitHub runners effectively [gh-actions] python = - 3.8: py38 3.9: py39 3.10: py310 3.11: py311 3.12: py312 + 3.13: py313 [gh-actions:env] DEPENDS = @@ -43,14 +43,15 @@ DEPENDS = ARCH = x64: x64 x86: x86 + arm64: arm64 [testenv] description = Pytest with coverage labels = test install_command = python -I -m pip install -v \ - x64: --only-binary numpy,scipy,h5py,pillow \ - x86: --only-binary numpy,scipy,h5py,pillow,matplotlib \ + dev: --only-binary numpy,scipy,h5py \ + !dev: --only-binary numpy,scipy,h5py,pillow,matplotlib \ pre,dev: --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple \ {opts} {packages} pip_pre = @@ -69,43 +70,47 @@ pass_env = NO_COLOR CLICOLOR CLICOLOR_FORCE +set_env = + py313: PYTHON_GIL=0 extras = test deps = # General minimum dependencies: pin based on API usage - min: packaging ==17 - min: importlib_resources ==1.3; python_version < '3.9' + # matplotlib 3.5 requires packaging 20 + min: packaging ==20 + min: importlib_resources ==5.12; python_version < '3.12' + min: typing_extensions ==4.6; python_version < '3.13' # NEP29/SPEC0 + 1yr: Test on minor release series within the last 3 years # We're extending this to all optional dependencies # This only affects the range that we test on; numpy is the only non-optional # dependency, and will be the only one to affect pip environment resolution. - min: numpy ==1.20 - min: h5py ==2.10 - min: indexed_gzip ==1.4 - min: matplotlib ==3.4 - min: pillow ==8.1 - min: pydicom ==2.1 - min: pyzstd ==0.14.3 - min: scipy ==1.6 + min: numpy ==1.22 + min: h5py ==3.5 + min: indexed_gzip ==1.6 + min: matplotlib ==3.5 + min: pillow ==8.4 + min: pydicom ==2.3 + min: pyzstd ==0.15.2 + min: scipy ==1.8 # Numpy 2.0 is a major breaking release; we cannot put much effort into # supporting until it's at least RC stable - pre: numpy <2.0.dev0 - dev: numpy >=2.0.dev0 + dev: numpy >=2.1.dev0 # Scipy stopped producing win32 wheels at py310 - py3{8,9}-full-x86,x64: scipy >=1.6 + py39-full-x86,x64,arm64: scipy >=1.8 # Matplotlib depends on scipy, so cannot be built for py310 on x86 - py3{8,9}-full-x86,x64: matplotlib >=3.4 + py39-full-x86,x64,arm64: matplotlib >=3.5 # h5py stopped producing win32 wheels at py39 - py38-full-x86,x64: h5py >=2.10 - full,pre,dev: pillow >=8.1 - full,pre,dev: indexed_gzip >=1.4 - full,pre,dev: pyzstd >=0.14.3 - full,pre: pydicom >=2.1 + {full,pre}-{x64,arm64}: h5py >=3.5 + full,pre,dev: pillow >=8.4 + full,pre: indexed_gzip >=1.6 + full,pre,dev: pyzstd >=0.15.2 + full,pre: pydicom >=2.3 dev: pydicom @ git+https://github.com/pydicom/pydicom.git@main commands = pytest --doctest-modules --doctest-plus \ --cov nibabel --cov-report xml:cov.xml \ --junitxml test-results.xml \ + --durations=20 --durations-min=1.0 \ --pyargs nibabel {posargs:-n auto} [testenv:install] @@ -139,26 +144,21 @@ commands = description = Check our style guide labels = check deps = - flake8 - blue - # Broken extras, remove when fix is released - isort[colors]!=5.13.1 + ruff>=0.3.0 skip_install = true commands = - blue --check --diff --color nibabel - isort --check --diff --color nibabel - flake8 nibabel + ruff check --diff nibabel + ruff format --diff nibabel [testenv:style-fix] description = Auto-apply style guide to the extent possible labels = pre-release deps = - blue - isort + ruff skip_install = true commands = - blue nibabel - isort nibabel + ruff check --fix nibabel + ruff format nibabel [testenv:spellcheck] description = Check spelling @@ -181,6 +181,7 @@ deps = numpy pyzstd importlib_resources + typing_extensions skip_install = true commands = mypy nibabel