diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..ba2becff2 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,21 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file + +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "monthly" + commit-message: + prefix: "chore(ci): " + groups: + github-actions: + patterns: + - "*" + - package-ecosystem: "pip" # See documentation for possible values + directory: "/" # Location of package manifests + schedule: + interval: "weekly" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3ad892f24..5e8d1ef03 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,11 +14,11 @@ jobs: lint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v3 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 with: - python-version: "3.9" - - uses: pre-commit/action@v2.0.3 + python-version: "3.12" + - uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd # v3.0.1 # Make sure commit messages follow the conventional commits convention: # https://www.conventionalcommits.org @@ -26,23 +26,23 @@ jobs: name: Lint Commit Messages runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: fetch-depth: 0 - - uses: wagoid/commitlint-github-action@v5 + - uses: wagoid/commitlint-github-action@b948419dd99f3fd78a6548d48f94e3df7f6bf3ed # v6 test: strategy: fail-fast: false matrix: python-version: - - "3.8" - "3.9" - "3.10" - "3.11" - "3.12" - - "pypy-3.8" + - "3.13" - "pypy-3.9" + - "pypy-3.10" os: - ubuntu-latest - macos-latest @@ -55,24 +55,25 @@ jobs: extension: use_cython - os: windows-latest extension: use_cython - - os: windows-latest - python-version: "pypy-3.8" - os: windows-latest python-version: "pypy-3.9" - - os: macos-latest - python-version: "pypy-3.8" + - os: windows-latest + python-version: "pypy-3.10" - os: macos-latest python-version: "pypy-3.9" + - os: macos-latest + python-version: "pypy-3.10" runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Install poetry run: pipx install poetry - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 with: python-version: ${{ matrix.python-version }} cache: "poetry" + allow-prereleases: true - name: Install Dependencies no cython if: ${{ matrix.extension == 'skip_cython' }} env: @@ -86,108 +87,214 @@ jobs: - name: Test with Pytest run: poetry run pytest --durations=20 --timeout=60 -v --cov=zeroconf --cov-branch --cov-report xml --cov-report html --cov-report term-missing tests - name: Upload coverage to Codecov - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@ad3126e916f78f00edff4ed0317cf185271ccc2d # v5 with: token: ${{ secrets.CODECOV_TOKEN }} - release: + benchmark: runs-on: ubuntu-latest - environment: release - if: github.ref == 'refs/heads/master' + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + - name: Setup Python 3.13 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 + with: + python-version: 3.13 + - uses: snok/install-poetry@76e04a911780d5b312d89783f7b1cd627778900a # v1.4.1 + - name: Install Dependencies + run: | + REQUIRE_CYTHON=1 poetry install --only=main,dev + shell: bash + - name: Run benchmarks + uses: CodSpeedHQ/action@0010eb0ca6e89b80c88e8edaaa07cfe5f3e6664d # v3 + with: + token: ${{ secrets.CODSPEED_TOKEN }} + run: poetry run pytest --no-cov -vvvvv --codspeed tests/benchmarks + + release: needs: - test - lint - commitlint + if: ${{ github.repository_owner }} == "python-zeroconf" + + runs-on: ubuntu-latest + environment: release + concurrency: release + permissions: + id-token: write + contents: write + outputs: + released: ${{ steps.release.outputs.released }} + newest_release_tag: ${{ steps.release.outputs.tag }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: fetch-depth: 0 + ref: ${{ github.head_ref || github.ref_name }} + + # Do a dry run of PSR + - name: Test release + uses: python-semantic-release/python-semantic-release@26bb37cfab71a5a372e3db0f48a6eac57519a4a6 # v9.21.0 + if: github.ref_name != 'master' + with: + root_options: --noop - # Run semantic release: - # - Update CHANGELOG.md - # - Update version in code - # - Create git tag - # - Create GitHub release - # - Publish to PyPI - - name: Python Semantic Release - uses: relekang/python-semantic-release@v7.34.6 - # env: - # REPOSITORY_URL: https://test.pypi.org/legacy/ - # TWINE_REPOSITORY_URL: https://test.pypi.org/legacy/ + # On main branch: actual PSR + upload to PyPI & GitHub + - name: Release + uses: python-semantic-release/python-semantic-release@26bb37cfab71a5a372e3db0f48a6eac57519a4a6 # v9.21.0 + id: release + if: github.ref_name == 'master' + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + + - name: Publish package distributions to PyPI + uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc # release/v1 + if: steps.release.outputs.released == 'true' + + - name: Publish package distributions to GitHub Releases + uses: python-semantic-release/upload-to-gh-release@0a92b5d7ebfc15a84f9801ebd1bf706343d43711 # main + if: steps.release.outputs.released == 'true' with: github_token: ${{ secrets.GITHUB_TOKEN }} - pypi_token: ${{ secrets.PYPI_TOKEN }} build_wheels: needs: [release] + if: needs.release.outputs.released == 'true' - name: Build wheels on ${{ matrix.os }} + name: Wheels for ${{ matrix.os }} (${{ matrix.musl == 'musllinux' && 'musllinux' || 'manylinux' }}) ${{ matrix.qemu }} ${{ matrix.pyver }} runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-20.04, windows-2019, macOS-11] - + os: + [ + ubuntu-24.04-arm, + ubuntu-latest, + windows-2019, + macos-13, + macos-latest, + ] + qemu: [""] + musl: [""] + pyver: [""] + include: + - os: ubuntu-latest + musl: "musllinux" + - os: ubuntu-24.04-arm + musl: "musllinux" + # qemu is slow, make a single + # runner per Python version + - os: ubuntu-latest + qemu: armv7l + musl: "musllinux" + pyver: cp39 + - os: ubuntu-latest + qemu: armv7l + musl: "musllinux" + pyver: cp310 + - os: ubuntu-latest + qemu: armv7l + musl: "musllinux" + pyver: cp311 + - os: ubuntu-latest + qemu: armv7l + musl: "musllinux" + pyver: cp312 + - os: ubuntu-latest + qemu: armv7l + musl: "musllinux" + pyver: cp313 + # qemu is slow, make a single + # runner per Python version + - os: ubuntu-latest + qemu: armv7l + musl: "" + pyver: cp39 + - os: ubuntu-latest + qemu: armv7l + musl: "" + pyver: cp310 + - os: ubuntu-latest + qemu: armv7l + musl: "" + pyver: cp311 + - os: ubuntu-latest + qemu: armv7l + musl: "" + pyver: cp312 + - os: ubuntu-latest + qemu: armv7l + musl: "" + pyver: cp313 steps: - - uses: actions/checkout@v3 + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: fetch-depth: 0 ref: "master" - # Used to host cibuildwheel - name: Set up Python - uses: actions/setup-python@v4 - - - name: Install python-semantic-release - run: pipx install python-semantic-release==7.34.6 - - - name: Get Release Tag - id: release_tag - shell: bash + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 + with: + python-version: "3.12" + - name: Set up QEMU + if: ${{ matrix.qemu }} + uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3 + with: + platforms: all + # This should be temporary + # xref https://github.com/docker/setup-qemu-action/issues/188 + # xref https://github.com/tonistiigi/binfmt/issues/215 + image: tonistiigi/binfmt:qemu-v8.1.5 + id: qemu + - name: Prepare emulation + if: ${{ matrix.qemu }} run: | - echo "::set-output name=newest_release_tag::$(semantic-release print-version --current)" + if [[ -n "${{ matrix.qemu }}" ]]; then + # Build emulated architectures only if QEMU is set, + # use default "auto" otherwise + echo "CIBW_ARCHS_LINUX=${{ matrix.qemu }}" >> $GITHUB_ENV + fi + - name: Limit to a specific Python version on slow QEMU + if: ${{ matrix.pyver }} + run: | + if [[ -n "${{ matrix.pyver }}" ]]; then + echo "CIBW_BUILD=${{ matrix.pyver }}*" >> $GITHUB_ENV + fi - - uses: actions/checkout@v3 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: - ref: "${{ steps.release_tag.outputs.newest_release_tag }}" + ref: ${{ needs.release.outputs.newest_release_tag }} fetch-depth: 0 - - name: Set up QEMU - if: runner.os == 'Linux' - uses: docker/setup-qemu-action@v3 - with: - platforms: arm64 - - - name: Build wheels - uses: pypa/cibuildwheel@v2.17.0 + - name: Build wheels ${{ matrix.musl }} (${{ matrix.qemu }}) + uses: pypa/cibuildwheel@faf86a6ed7efa889faf6996aa23820831055001a # v2.23.3 # to supply options, put them in 'env', like: env: - CIBW_SKIP: cp36-* cp37-* pp36-* pp37-* *p38-*_aarch64 *p39-*_aarch64 *p310-*_aarch64 pp*_aarch64 *musllinux*_aarch64 - CIBW_BEFORE_ALL_LINUX: apt-get install -y gcc || yum install -y gcc || apk add gcc - CIBW_ARCHS_LINUX: auto aarch64 - CIBW_BUILD_VERBOSITY: 3 + CIBW_SKIP: cp36-* cp37-* pp36-* pp37-* pp38-* cp38-* ${{ matrix.musl == 'musllinux' && '*manylinux*' || '*musllinux*' }} + CIBW_BEFORE_ALL_LINUX: apt install -y gcc || yum install -y gcc || apk add gcc REQUIRE_CYTHON: 1 - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: path: ./wheelhouse/*.whl + name: wheels-${{ matrix.os }}-${{ matrix.musl }}-${{ matrix.qemu }}-${{ matrix.pyver }} upload_pypi: needs: [build_wheels] runs-on: ubuntu-latest environment: release + permissions: + id-token: write # IMPORTANT: this permission is mandatory for trusted publishing steps: - - uses: actions/download-artifact@v3 + - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4 with: # unpacks default artifact into dist/ # if `name: artifact` is omitted, the action will create extra parent dir - name: artifact path: dist + pattern: wheels-* + merge-multiple: true - - uses: pypa/gh-action-pypi-publish@v1.5.0 - with: - user: __token__ - password: ${{ secrets.PYPI_TOKEN }} - - # To test: repository_url: https://test.pypi.org/legacy/ + - uses: + pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc # v1.12.4 diff --git a/.gitignore b/.gitignore index 0af9ce1e1..430fbec9c 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,4 @@ docs/_build/ .vscode /dist/ /zeroconf.egg-info/ +/src/**/*.c diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b7ae9294c..50a1dd376 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ # See https://pre-commit.com for more information # See https://pre-commit.com/hooks.html for more hooks exclude: "CHANGELOG.md" -default_stages: [commit] +default_stages: [pre-commit] ci: autofix_commit_msg: "chore(pre-commit.ci): auto fixes" @@ -9,53 +9,57 @@ ci: repos: - repo: https://github.com/commitizen-tools/commitizen - rev: v2.32.4 + rev: v4.7.0 hooks: - id: commitizen stages: [commit-msg] - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.3.0 + rev: v5.0.0 hooks: - - id: debug-statements - id: check-builtin-literals - id: check-case-conflict - id: check-docstring-first - id: check-json + - id: check-shebang-scripts-are-executable - id: check-toml - id: check-xml - id: check-yaml + - id: debug-statements - id: detect-private-key - id: end-of-file-fixer - id: trailing-whitespace - - id: debug-statements - repo: https://github.com/pre-commit/mirrors-prettier - rev: v2.7.1 + rev: v4.0.0-alpha.8 hooks: - id: prettier args: ["--tab-width", "2"] + files: ".(css|html|js|json|md|toml|yaml)$" - repo: https://github.com/asottile/pyupgrade - rev: v2.37.3 + rev: v3.19.1 hooks: - id: pyupgrade - args: [--py37-plus] - - repo: https://github.com/PyCQA/isort - rev: 5.12.0 - hooks: - - id: isort - - repo: https://github.com/psf/black - rev: 22.8.0 - hooks: - - id: black - # - repo: https://github.com/codespell-project/codespell - # rev: v2.2.1 - # hooks: - # - id: codespell + args: [--py39-plus] + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.11.9 + hooks: + - id: ruff + args: [--fix, --exit-non-zero-on-fix] + - id: ruff-format + - repo: https://github.com/codespell-project/codespell + rev: v2.4.1 + hooks: + - id: codespell - repo: https://github.com/PyCQA/flake8 - rev: 5.0.4 + rev: 7.2.0 hooks: - id: flake8 - repo: https://github.com/pre-commit/mirrors-mypy - rev: v0.931 + rev: v1.15.0 hooks: - id: mypy - additional_dependencies: [] + additional_dependencies: [ifaddr] + - repo: https://github.com/MarcoGorelli/cython-lint + rev: v0.16.6 + hooks: + - id: cython-lint + - id: double-quote-cython-strings diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 000000000..aee2616a1 --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,16 @@ +# Read the Docs configuration file for Sphinx projects +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details +version: 2 + +build: + os: ubuntu-24.04 + tools: + python: "3.12" + jobs: + post_install: + # https://docs.readthedocs.com/platform/stable/build-customization.html#install-dependencies-with-poetry + - pip install poetry + - SKIP_CYTHON=1 VIRTUAL_ENV=$READTHEDOCS_VIRTUALENV_PATH poetry install --with docs + +sphinx: + configuration: docs/conf.py diff --git a/CHANGELOG.md b/CHANGELOG.md index a2026cbaa..d8a3d4cfb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,1955 +1,1958 @@ -# Changelog +# CHANGELOG + + +## v0.147.0 (2025-05-03) + +### Features + +- Add cython 3.1 support ([#1580](https://github.com/python-zeroconf/python-zeroconf/pull/1580), + [`1d9c94a`](https://github.com/python-zeroconf/python-zeroconf/commit/1d9c94a82d8da16b8f5355131e6167b69293da6c)) + +- Cython 3.1 support ([#1578](https://github.com/python-zeroconf/python-zeroconf/pull/1578), + [`daaf8d6`](https://github.com/python-zeroconf/python-zeroconf/commit/daaf8d6981c778fe4ba0a63371d9368cf217891a)) + +- Cython 3.11 support ([#1579](https://github.com/python-zeroconf/python-zeroconf/pull/1579), + [`1569383`](https://github.com/python-zeroconf/python-zeroconf/commit/1569383c6cf8ce8977427cfdaf5c7104ce52ab08)) + + +## v0.146.5 (2025-04-14) + +### Bug Fixes + +- Address non-working socket configuration + ([#1563](https://github.com/python-zeroconf/python-zeroconf/pull/1563), + [`cc0f835`](https://github.com/python-zeroconf/python-zeroconf/commit/cc0f8350c30c82409b1a9bfecb19ff9b3368d6a7)) + +Co-authored-by: J. Nick Koston + + +## v0.146.4 (2025-04-14) + +### Bug Fixes + +- Avoid loading adapter list twice + ([#1564](https://github.com/python-zeroconf/python-zeroconf/pull/1564), + [`8359488`](https://github.com/python-zeroconf/python-zeroconf/commit/83594887521507cf77bfc0a397becabaaab287c2)) + + +## v0.146.3 (2025-04-02) + +### Bug Fixes + +- Correctly override question type flag for requests + ([#1558](https://github.com/python-zeroconf/python-zeroconf/pull/1558), + [`bd643a2`](https://github.com/python-zeroconf/python-zeroconf/commit/bd643a227bc4d6a949d558850ad1431bc2940d74)) + +* fix: correctly override question type flag for requests + +Currently even when setting the explicit question type flag, the implementation ignores it for + subsequent queries. This commit ensures that all queries respect the explicit question type flag. + +* chore(tests): add test for explicit question type flag + +Add unit test to validate that the explicit question type flag is set correctly in outgoing + requests. + + +## v0.146.2 (2025-04-01) + +### Bug Fixes + +- Create listener socket with specific IP version + ([#1557](https://github.com/python-zeroconf/python-zeroconf/pull/1557), + [`b757ddf`](https://github.com/python-zeroconf/python-zeroconf/commit/b757ddf98d7d04c366281a4281a449c5c2cb897d)) + +* fix: create listener socket with specific IP version + +Create listener sockets when using unicast with specific IP version as well, just like in + `new_respond_socket()`. + +* chore(tests): add unit test for socket creation with unicast addressing + + +## v0.146.1 (2025-03-05) + +### Bug Fixes + +- Use trusted publishing for uploading wheels + ([#1541](https://github.com/python-zeroconf/python-zeroconf/pull/1541), + [`fa65cc8`](https://github.com/python-zeroconf/python-zeroconf/commit/fa65cc8791a6f4c53bc29088cb60b83f420b1ae6)) + + +## v0.146.0 (2025-03-05) + +### Features + +- Reduce size of wheels ([#1540](https://github.com/python-zeroconf/python-zeroconf/pull/1540), + [`dea233c`](https://github.com/python-zeroconf/python-zeroconf/commit/dea233c1e0e80584263090727ce07648755964af)) + +feat: reduce size of binaries + + +## v0.145.1 (2025-02-18) + +### Bug Fixes + +- Hold a strong reference to the AsyncEngine setup task + ([#1533](https://github.com/python-zeroconf/python-zeroconf/pull/1533), + [`d4e6f25`](https://github.com/python-zeroconf/python-zeroconf/commit/d4e6f25754c15417b8bd9839dc8636b2cff717c8)) + + +## v0.145.0 (2025-02-15) + +### Features + +- **docs**: Enable link to source code + ([#1529](https://github.com/python-zeroconf/python-zeroconf/pull/1529), + [`1c7f354`](https://github.com/python-zeroconf/python-zeroconf/commit/1c7f3548b6cbddf73dbb9d69cd8987c8ad32c705)) + + +## v0.144.3 (2025-02-14) + +### Bug Fixes + +- Non unique name during wheel upload + ([#1527](https://github.com/python-zeroconf/python-zeroconf/pull/1527), + [`43136fa`](https://github.com/python-zeroconf/python-zeroconf/commit/43136fa418d4d7826415e1d0f7761b198347ced7)) + + +## v0.144.2 (2025-02-14) + +### Bug Fixes + +- Add a helpful hint for when EADDRINUSE happens during startup + ([#1526](https://github.com/python-zeroconf/python-zeroconf/pull/1526), + [`48dbb71`](https://github.com/python-zeroconf/python-zeroconf/commit/48dbb7190a4f5126e39dbcdb87e34380d4562cd0)) + + +## v0.144.1 (2025-02-12) + +### Bug Fixes + +- Wheel builds failing after adding armv7l builds + ([#1518](https://github.com/python-zeroconf/python-zeroconf/pull/1518), + [`e7adac9`](https://github.com/python-zeroconf/python-zeroconf/commit/e7adac9c59fc4d0c4822c6097a4daee3d68eb4de)) + + +## v0.144.0 (2025-02-12) + +### Features + +- Add armv7l wheel builds ([#1517](https://github.com/python-zeroconf/python-zeroconf/pull/1517), + [`39887b8`](https://github.com/python-zeroconf/python-zeroconf/commit/39887b80328d616e8e6f6ca9d08aecc06f7b0711)) + + +## v0.143.1 (2025-02-12) + +### Bug Fixes + +- Make no buffer space available when adding multicast memberships forgiving + ([#1516](https://github.com/python-zeroconf/python-zeroconf/pull/1516), + [`f377d5c`](https://github.com/python-zeroconf/python-zeroconf/commit/f377d5cd08d724282c8487785163b466f3971344)) + + +## v0.143.0 (2025-01-31) + +### Features + +- Eliminate async_timeout dep on python less than 3.11 + ([#1500](https://github.com/python-zeroconf/python-zeroconf/pull/1500), + [`44457be`](https://github.com/python-zeroconf/python-zeroconf/commit/44457be4571add2f851192db3b37a96d9d27b00e)) + + +## v0.142.0 (2025-01-30) + +### Features + +- Add simple address resolvers and examples + ([#1499](https://github.com/python-zeroconf/python-zeroconf/pull/1499), + [`ae3c352`](https://github.com/python-zeroconf/python-zeroconf/commit/ae3c3523e5f2896989d0b932d53ef1e24ef4aee8)) + + +## v0.141.0 (2025-01-22) + +### Features + +- Speed up adding and expiring records in the DNSCache + ([#1490](https://github.com/python-zeroconf/python-zeroconf/pull/1490), + [`628b136`](https://github.com/python-zeroconf/python-zeroconf/commit/628b13670d04327dd8d4908842f31b476598c7e8)) + + +## v0.140.1 (2025-01-17) + +### Bug Fixes + +- Wheel builds for aarch64 ([#1485](https://github.com/python-zeroconf/python-zeroconf/pull/1485), + [`9d228e2`](https://github.com/python-zeroconf/python-zeroconf/commit/9d228e28eead1561deda696e8837d59896cbc98d)) + + +## v0.140.0 (2025-01-17) + +### Bug Fixes + +- **docs**: Remove repetition of words + ([#1479](https://github.com/python-zeroconf/python-zeroconf/pull/1479), + [`dde26c6`](https://github.com/python-zeroconf/python-zeroconf/commit/dde26c655a49811c11071b0531e408a188687009)) + +Co-authored-by: J. Nick Koston + +### Features + +- Migrate to native types ([#1472](https://github.com/python-zeroconf/python-zeroconf/pull/1472), + [`22a0fb4`](https://github.com/python-zeroconf/python-zeroconf/commit/22a0fb487db27bc2c6448a9167742f3040e910ba)) + +Co-authored-by: J. Nick Koston + +Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> + +- Small performance improvement to writing outgoing packets + ([#1482](https://github.com/python-zeroconf/python-zeroconf/pull/1482), + [`d9be715`](https://github.com/python-zeroconf/python-zeroconf/commit/d9be7155a0ef1ac521e5bbedd3884ddeb9f0b99d)) + + +## v0.139.0 (2025-01-09) + +### Features + +- Implement heapq for tracking cache expire times + ([#1465](https://github.com/python-zeroconf/python-zeroconf/pull/1465), + [`09db184`](https://github.com/python-zeroconf/python-zeroconf/commit/09db1848957b34415f364b7338e4adce99b57abc)) + + +## v0.138.1 (2025-01-08) + +### Bug Fixes + +- Ensure cache does not return stale created and ttl values + ([#1469](https://github.com/python-zeroconf/python-zeroconf/pull/1469), + [`e05055c`](https://github.com/python-zeroconf/python-zeroconf/commit/e05055c584ca46080990437b2b385a187bc48458)) + + +## v0.138.0 (2025-01-08) + +### Features + +- Improve performance of processing incoming records + ([#1467](https://github.com/python-zeroconf/python-zeroconf/pull/1467), + [`ebbb2af`](https://github.com/python-zeroconf/python-zeroconf/commit/ebbb2afccabd3841a3cb0a39824b49773cc6258a)) + +Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> + + +## v0.137.2 (2025-01-06) + +### Bug Fixes + +- Split wheel builds to avoid timeout + ([#1461](https://github.com/python-zeroconf/python-zeroconf/pull/1461), + [`be05f0d`](https://github.com/python-zeroconf/python-zeroconf/commit/be05f0dc4f6b2431606031a7bb24585728d15f01)) + + +## v0.137.1 (2025-01-06) + +### Bug Fixes + +- Move wheel builds to macos-13 + ([#1459](https://github.com/python-zeroconf/python-zeroconf/pull/1459), + [`4ff48a0`](https://github.com/python-zeroconf/python-zeroconf/commit/4ff48a01bc76c82e5710aafaf6cf6e79c069cd85)) + + +## v0.137.0 (2025-01-06) + +### Features + +- Speed up parsing incoming records + ([#1458](https://github.com/python-zeroconf/python-zeroconf/pull/1458), + [`783c1b3`](https://github.com/python-zeroconf/python-zeroconf/commit/783c1b37d1372c90dfce658c66d03aa753afbf49)) + + +## v0.136.2 (2024-11-21) + +### Bug Fixes + +- Retrigger release from failed github workflow + ([#1443](https://github.com/python-zeroconf/python-zeroconf/pull/1443), + [`2ea705d`](https://github.com/python-zeroconf/python-zeroconf/commit/2ea705d850c1cb096c87372d5ec855f684603d01)) + + +## v0.136.1 (2024-11-21) + +### Bug Fixes + +- **ci**: Run release workflow only on main repository + ([#1441](https://github.com/python-zeroconf/python-zeroconf/pull/1441), + [`f637c75`](https://github.com/python-zeroconf/python-zeroconf/commit/f637c75f638ba20c193e58ff63c073a4003430b9)) + +- **docs**: Update python to 3.8 + ([#1430](https://github.com/python-zeroconf/python-zeroconf/pull/1430), + [`483d067`](https://github.com/python-zeroconf/python-zeroconf/commit/483d0673d4ae3eec37840452723fc1839a6cc95c)) + + +## v0.136.0 (2024-10-26) + +### Bug Fixes + +- Add ignore for .c file for wheels + ([#1424](https://github.com/python-zeroconf/python-zeroconf/pull/1424), + [`6535963`](https://github.com/python-zeroconf/python-zeroconf/commit/6535963b5b789ce445e77bb728a5b7ee4263e582)) + +- Correct typos ([#1422](https://github.com/python-zeroconf/python-zeroconf/pull/1422), + [`3991b42`](https://github.com/python-zeroconf/python-zeroconf/commit/3991b4256b8de5b37db7a6144e5112f711b2efef)) + +- Update python-semantic-release to fix release process + ([#1426](https://github.com/python-zeroconf/python-zeroconf/pull/1426), + [`2f20155`](https://github.com/python-zeroconf/python-zeroconf/commit/2f201558d0ab089cdfebb18d2d7bb5785b2cce16)) + +### Features + +- Use SPDX license identifier + ([#1425](https://github.com/python-zeroconf/python-zeroconf/pull/1425), + [`1596145`](https://github.com/python-zeroconf/python-zeroconf/commit/1596145452721e0de4e2a724b055e8e290792d3e)) + + +## v0.135.0 (2024-09-24) + +### Features + +- Improve performance of DNSCache backend + ([#1415](https://github.com/python-zeroconf/python-zeroconf/pull/1415), + [`1df2e69`](https://github.com/python-zeroconf/python-zeroconf/commit/1df2e691ff11c9592e1cdad5599fb6601eb1aa3f)) + + +## v0.134.0 (2024-09-08) + +### Bug Fixes + +- Improve helpfulness of ServiceInfo.request assertions + ([#1408](https://github.com/python-zeroconf/python-zeroconf/pull/1408), + [`9262626`](https://github.com/python-zeroconf/python-zeroconf/commit/9262626895d354ed7376aa567043b793c37a985e)) + +### Features + +- Improve performance when IP addresses change frequently + ([#1407](https://github.com/python-zeroconf/python-zeroconf/pull/1407), + [`111c91a`](https://github.com/python-zeroconf/python-zeroconf/commit/111c91ab395a7520e477eb0e75d5924fba3c64c7)) + + +## v0.133.0 (2024-08-27) + +### Features + +- Add classifier for python 3.13 + ([#1393](https://github.com/python-zeroconf/python-zeroconf/pull/1393), + [`7fb2bb2`](https://github.com/python-zeroconf/python-zeroconf/commit/7fb2bb21421c70db0eb288fa7e73d955f58b0f5d)) + +- Enable building of arm64 macOS builds + ([#1384](https://github.com/python-zeroconf/python-zeroconf/pull/1384), + [`0df2ce0`](https://github.com/python-zeroconf/python-zeroconf/commit/0df2ce0e6f7313831da6a63d477019982d5df55c)) + +Co-authored-by: Alex Ciobanu + +Co-authored-by: J. Nick Koston + +- Improve performance of ip address caching + ([#1392](https://github.com/python-zeroconf/python-zeroconf/pull/1392), + [`f7c7708`](https://github.com/python-zeroconf/python-zeroconf/commit/f7c77081b2f8c70b1ed6a9b9751a86cf91f9aae2)) + +- Python 3.13 support ([#1390](https://github.com/python-zeroconf/python-zeroconf/pull/1390), + [`98cfa83`](https://github.com/python-zeroconf/python-zeroconf/commit/98cfa83710e43880698353821bae61108b08cb2f)) - ## v0.132.2 (2024-04-13) -### Fix +### Bug Fixes + +- Bump cibuildwheel to fix wheel builds + ([#1371](https://github.com/python-zeroconf/python-zeroconf/pull/1371), + [`83e4ce3`](https://github.com/python-zeroconf/python-zeroconf/commit/83e4ce3e31ddd4ae9aec2f8c9d84d7a93f8be210)) + +- Update references to minimum-supported python version of 3.8 + ([#1369](https://github.com/python-zeroconf/python-zeroconf/pull/1369), + [`599524a`](https://github.com/python-zeroconf/python-zeroconf/commit/599524a5ce1e4c1731519dd89377c2a852e59935)) -* Update references to minimum-supported python version of 3.8 ([#1369](https://github.com/python-zeroconf/python-zeroconf/issues/1369)) ([`599524a`](https://github.com/python-zeroconf/python-zeroconf/commit/599524a5ce1e4c1731519dd89377c2a852e59935)) -* Bump cibuildwheel to fix wheel builds ([#1371](https://github.com/python-zeroconf/python-zeroconf/issues/1371)) ([`83e4ce3`](https://github.com/python-zeroconf/python-zeroconf/commit/83e4ce3e31ddd4ae9aec2f8c9d84d7a93f8be210)) ## v0.132.1 (2024-04-12) -### Fix +### Bug Fixes + +- Set change during iteration when dispatching listeners + ([#1370](https://github.com/python-zeroconf/python-zeroconf/pull/1370), + [`e9f8aa5`](https://github.com/python-zeroconf/python-zeroconf/commit/e9f8aa5741ae2d490c33a562b459f0af1014dbb0)) -* Set change during iteration when dispatching listeners ([#1370](https://github.com/python-zeroconf/python-zeroconf/issues/1370)) ([`e9f8aa5`](https://github.com/python-zeroconf/python-zeroconf/commit/e9f8aa5741ae2d490c33a562b459f0af1014dbb0)) ## v0.132.0 (2024-04-01) -### Feature +### Bug Fixes + +- Avoid including scope_id in IPv6Address object if its zero + ([#1367](https://github.com/python-zeroconf/python-zeroconf/pull/1367), + [`edc4a55`](https://github.com/python-zeroconf/python-zeroconf/commit/edc4a556819956c238a11332052000dcbcb07e3d)) -* Make async_get_service_info available on the Zeroconf object ([#1366](https://github.com/python-zeroconf/python-zeroconf/issues/1366)) ([`c4c2dee`](https://github.com/python-zeroconf/python-zeroconf/commit/c4c2deeb05279ddbb0eba1330c7ae58795fea001)) -* Drop python 3.7 support ([#1359](https://github.com/python-zeroconf/python-zeroconf/issues/1359)) ([`4877829`](https://github.com/python-zeroconf/python-zeroconf/commit/4877829e6442de5426db152d11827b1ba85dbf59)) +### Features -### Fix +- Drop python 3.7 support ([#1359](https://github.com/python-zeroconf/python-zeroconf/pull/1359), + [`4877829`](https://github.com/python-zeroconf/python-zeroconf/commit/4877829e6442de5426db152d11827b1ba85dbf59)) + +- Make async_get_service_info available on the Zeroconf object + ([#1366](https://github.com/python-zeroconf/python-zeroconf/pull/1366), + [`c4c2dee`](https://github.com/python-zeroconf/python-zeroconf/commit/c4c2deeb05279ddbb0eba1330c7ae58795fea001)) -* Avoid including scope_id in IPv6Address object if its zero ([#1367](https://github.com/python-zeroconf/python-zeroconf/issues/1367)) ([`edc4a55`](https://github.com/python-zeroconf/python-zeroconf/commit/edc4a556819956c238a11332052000dcbcb07e3d)) ## v0.131.0 (2023-12-19) -### Feature +### Features + +- Small speed up to constructing outgoing packets + ([#1354](https://github.com/python-zeroconf/python-zeroconf/pull/1354), + [`517d7d0`](https://github.com/python-zeroconf/python-zeroconf/commit/517d7d00ca7738c770077738125aec0e4824c000)) + +- Speed up processing incoming packets + ([#1352](https://github.com/python-zeroconf/python-zeroconf/pull/1352), + [`6c15325`](https://github.com/python-zeroconf/python-zeroconf/commit/6c153258a995cf9459a6f23267b7e379b5e2550f)) + +- Speed up the query handler ([#1350](https://github.com/python-zeroconf/python-zeroconf/pull/1350), + [`9eac0a1`](https://github.com/python-zeroconf/python-zeroconf/commit/9eac0a122f28a7a4fa76cbfdda21d9a3571d7abb)) -* Small speed up to constructing outgoing packets ([#1354](https://github.com/python-zeroconf/python-zeroconf/issues/1354)) ([`517d7d0`](https://github.com/python-zeroconf/python-zeroconf/commit/517d7d00ca7738c770077738125aec0e4824c000)) -* Speed up processing incoming packets ([#1352](https://github.com/python-zeroconf/python-zeroconf/issues/1352)) ([`6c15325`](https://github.com/python-zeroconf/python-zeroconf/commit/6c153258a995cf9459a6f23267b7e379b5e2550f)) -* Speed up the query handler ([#1350](https://github.com/python-zeroconf/python-zeroconf/issues/1350)) ([`9eac0a1`](https://github.com/python-zeroconf/python-zeroconf/commit/9eac0a122f28a7a4fa76cbfdda21d9a3571d7abb)) ## v0.130.0 (2023-12-16) -### Feature +### Bug Fixes + +- Ensure IPv6 scoped address construction uses the string cache + ([#1336](https://github.com/python-zeroconf/python-zeroconf/pull/1336), + [`f78a196`](https://github.com/python-zeroconf/python-zeroconf/commit/f78a196db632c4fe017a34f1af8a58903c15a575)) + +- Ensure question history suppresses duplicates + ([#1338](https://github.com/python-zeroconf/python-zeroconf/pull/1338), + [`6f23656`](https://github.com/python-zeroconf/python-zeroconf/commit/6f23656576daa04e3de44e100f3ddd60ee4c560d)) + +- Microsecond precision loss in the query handler + ([#1339](https://github.com/python-zeroconf/python-zeroconf/pull/1339), + [`6560fad`](https://github.com/python-zeroconf/python-zeroconf/commit/6560fad584e0d392962c9a9248759f17c416620e)) + +- Scheduling race with the QueryScheduler + ([#1347](https://github.com/python-zeroconf/python-zeroconf/pull/1347), + [`cf40470`](https://github.com/python-zeroconf/python-zeroconf/commit/cf40470b89f918d3c24d7889d3536f3ffa44846c)) + +### Features -* Make ServiceInfo aware of question history ([#1348](https://github.com/python-zeroconf/python-zeroconf/issues/1348)) ([`b9aae1d`](https://github.com/python-zeroconf/python-zeroconf/commit/b9aae1de07bf1491e873bc314f8a1d7996127ad3)) -* Small speed up to ServiceInfo construction ([#1346](https://github.com/python-zeroconf/python-zeroconf/issues/1346)) ([`b329d99`](https://github.com/python-zeroconf/python-zeroconf/commit/b329d99917bb731b4c70bf20c7c010eeb85ad9fd)) -* Significantly improve efficiency of the ServiceBrowser scheduler ([#1335](https://github.com/python-zeroconf/python-zeroconf/issues/1335)) ([`c65d869`](https://github.com/python-zeroconf/python-zeroconf/commit/c65d869aec731b803484871e9d242a984f9f5848)) -* Small speed up to processing incoming records ([#1345](https://github.com/python-zeroconf/python-zeroconf/issues/1345)) ([`7de655b`](https://github.com/python-zeroconf/python-zeroconf/commit/7de655b6f05012f20a3671e0bcdd44a1913d7b52)) -* Small performance improvement for converting time ([#1342](https://github.com/python-zeroconf/python-zeroconf/issues/1342)) ([`73d3ab9`](https://github.com/python-zeroconf/python-zeroconf/commit/73d3ab90dd3b59caab771235dd6dbedf05bfe0b3)) -* Small performance improvement for ServiceInfo asking questions ([#1341](https://github.com/python-zeroconf/python-zeroconf/issues/1341)) ([`810a309`](https://github.com/python-zeroconf/python-zeroconf/commit/810a3093c5a9411ee97740b468bd706bdf4a95de)) -* Small performance improvement constructing outgoing questions ([#1340](https://github.com/python-zeroconf/python-zeroconf/issues/1340)) ([`157185f`](https://github.com/python-zeroconf/python-zeroconf/commit/157185f28bf1e83e6811e2a5cd1fa9b38966f780)) +- Make ServiceInfo aware of question history + ([#1348](https://github.com/python-zeroconf/python-zeroconf/pull/1348), + [`b9aae1d`](https://github.com/python-zeroconf/python-zeroconf/commit/b9aae1de07bf1491e873bc314f8a1d7996127ad3)) -### Fix +- Significantly improve efficiency of the ServiceBrowser scheduler + ([#1335](https://github.com/python-zeroconf/python-zeroconf/pull/1335), + [`c65d869`](https://github.com/python-zeroconf/python-zeroconf/commit/c65d869aec731b803484871e9d242a984f9f5848)) + +- Small performance improvement constructing outgoing questions + ([#1340](https://github.com/python-zeroconf/python-zeroconf/pull/1340), + [`157185f`](https://github.com/python-zeroconf/python-zeroconf/commit/157185f28bf1e83e6811e2a5cd1fa9b38966f780)) + +- Small performance improvement for converting time + ([#1342](https://github.com/python-zeroconf/python-zeroconf/pull/1342), + [`73d3ab9`](https://github.com/python-zeroconf/python-zeroconf/commit/73d3ab90dd3b59caab771235dd6dbedf05bfe0b3)) + +- Small performance improvement for ServiceInfo asking questions + ([#1341](https://github.com/python-zeroconf/python-zeroconf/pull/1341), + [`810a309`](https://github.com/python-zeroconf/python-zeroconf/commit/810a3093c5a9411ee97740b468bd706bdf4a95de)) + +- Small speed up to processing incoming records + ([#1345](https://github.com/python-zeroconf/python-zeroconf/pull/1345), + [`7de655b`](https://github.com/python-zeroconf/python-zeroconf/commit/7de655b6f05012f20a3671e0bcdd44a1913d7b52)) + +- Small speed up to ServiceInfo construction + ([#1346](https://github.com/python-zeroconf/python-zeroconf/pull/1346), + [`b329d99`](https://github.com/python-zeroconf/python-zeroconf/commit/b329d99917bb731b4c70bf20c7c010eeb85ad9fd)) -* Scheduling race with the QueryScheduler ([#1347](https://github.com/python-zeroconf/python-zeroconf/issues/1347)) ([`cf40470`](https://github.com/python-zeroconf/python-zeroconf/commit/cf40470b89f918d3c24d7889d3536f3ffa44846c)) -* Ensure question history suppresses duplicates ([#1338](https://github.com/python-zeroconf/python-zeroconf/issues/1338)) ([`6f23656`](https://github.com/python-zeroconf/python-zeroconf/commit/6f23656576daa04e3de44e100f3ddd60ee4c560d)) -* Microsecond precision loss in the query handler ([#1339](https://github.com/python-zeroconf/python-zeroconf/issues/1339)) ([`6560fad`](https://github.com/python-zeroconf/python-zeroconf/commit/6560fad584e0d392962c9a9248759f17c416620e)) -* Ensure IPv6 scoped address construction uses the string cache ([#1336](https://github.com/python-zeroconf/python-zeroconf/issues/1336)) ([`f78a196`](https://github.com/python-zeroconf/python-zeroconf/commit/f78a196db632c4fe017a34f1af8a58903c15a575)) ## v0.129.0 (2023-12-13) -### Feature +### Features + +- Add decoded_properties method to ServiceInfo + ([#1332](https://github.com/python-zeroconf/python-zeroconf/pull/1332), + [`9b595a1`](https://github.com/python-zeroconf/python-zeroconf/commit/9b595a1dcacf109c699953219d70fe36296c7318)) -* Add decoded_properties method to ServiceInfo ([#1332](https://github.com/python-zeroconf/python-zeroconf/issues/1332)) ([`9b595a1`](https://github.com/python-zeroconf/python-zeroconf/commit/9b595a1dcacf109c699953219d70fe36296c7318)) -* Ensure ServiceInfo.properties always returns bytes ([#1333](https://github.com/python-zeroconf/python-zeroconf/issues/1333)) ([`d29553a`](https://github.com/python-zeroconf/python-zeroconf/commit/d29553ab7de6b7af70769ddb804fe2aaf492f320)) -* Cache is_unspecified for zeroconf ip address objects ([#1331](https://github.com/python-zeroconf/python-zeroconf/issues/1331)) ([`a1c84dc`](https://github.com/python-zeroconf/python-zeroconf/commit/a1c84dc6adeebd155faec1a647c0f70d70de2945)) +- Cache is_unspecified for zeroconf ip address objects + ([#1331](https://github.com/python-zeroconf/python-zeroconf/pull/1331), + [`a1c84dc`](https://github.com/python-zeroconf/python-zeroconf/commit/a1c84dc6adeebd155faec1a647c0f70d70de2945)) -### Technically breaking change +- Ensure ServiceInfo.properties always returns bytes + ([#1333](https://github.com/python-zeroconf/python-zeroconf/pull/1333), + [`d29553a`](https://github.com/python-zeroconf/python-zeroconf/commit/d29553ab7de6b7af70769ddb804fe2aaf492f320)) -* `ServiceInfo.properties` always returns a dictionary with type `dict[bytes, bytes | None]` instead of a mix `str` and `bytes`. It was only possible to get a mixed dictionary if it was manually passed in when `ServiceInfo` was constructed. ## v0.128.5 (2023-12-13) -### Fix +### Bug Fixes + +- Performance regression with ServiceInfo IPv6Addresses + ([#1330](https://github.com/python-zeroconf/python-zeroconf/pull/1330), + [`e2f9f81`](https://github.com/python-zeroconf/python-zeroconf/commit/e2f9f81dbc54c3dd527eeb3298897d63f99d33f4)) -* Performance regression with ServiceInfo IPv6Addresses ([#1330](https://github.com/python-zeroconf/python-zeroconf/issues/1330)) ([`e2f9f81`](https://github.com/python-zeroconf/python-zeroconf/commit/e2f9f81dbc54c3dd527eeb3298897d63f99d33f4)) ## v0.128.4 (2023-12-10) -### Fix +### Bug Fixes + +- Re-expose ServiceInfo._set_properties for backwards compat + ([#1327](https://github.com/python-zeroconf/python-zeroconf/pull/1327), + [`39c4005`](https://github.com/python-zeroconf/python-zeroconf/commit/39c40051d7a63bdc63a3e2dfa20bd944fee4e761)) -* Re-expose ServiceInfo._set_properties for backwards compat ([#1327](https://github.com/python-zeroconf/python-zeroconf/issues/1327)) ([`39c4005`](https://github.com/python-zeroconf/python-zeroconf/commit/39c40051d7a63bdc63a3e2dfa20bd944fee4e761)) ## v0.128.3 (2023-12-10) -### Fix +### Bug Fixes + +- Correct nsec record writing + ([#1326](https://github.com/python-zeroconf/python-zeroconf/pull/1326), + [`cd7a16a`](https://github.com/python-zeroconf/python-zeroconf/commit/cd7a16a32c37b2f7a2e90d3c749525a5393bad57)) -* Correct nsec record writing ([#1326](https://github.com/python-zeroconf/python-zeroconf/issues/1326)) ([`cd7a16a`](https://github.com/python-zeroconf/python-zeroconf/commit/cd7a16a32c37b2f7a2e90d3c749525a5393bad57)) ## v0.128.2 (2023-12-10) -### Fix +### Bug Fixes + +- Match cython version for dev deps to build deps + ([#1325](https://github.com/python-zeroconf/python-zeroconf/pull/1325), + [`a0dac46`](https://github.com/python-zeroconf/python-zeroconf/commit/a0dac46c01202b3d5a0823ac1928fc1d75332522)) + +- Timestamps missing double precision + ([#1324](https://github.com/python-zeroconf/python-zeroconf/pull/1324), + [`ecea4e4`](https://github.com/python-zeroconf/python-zeroconf/commit/ecea4e4217892ca8cf763074ac3e5d1b898acd21)) -* Timestamps missing double precision ([#1324](https://github.com/python-zeroconf/python-zeroconf/issues/1324)) ([`ecea4e4`](https://github.com/python-zeroconf/python-zeroconf/commit/ecea4e4217892ca8cf763074ac3e5d1b898acd21)) -* Match cython version for dev deps to build deps ([#1325](https://github.com/python-zeroconf/python-zeroconf/issues/1325)) ([`a0dac46`](https://github.com/python-zeroconf/python-zeroconf/commit/a0dac46c01202b3d5a0823ac1928fc1d75332522)) ## v0.128.1 (2023-12-10) -### Fix +### Bug Fixes + +- Correct handling of IPv6 addresses with scope_id in ServiceInfo + ([#1322](https://github.com/python-zeroconf/python-zeroconf/pull/1322), + [`1682991`](https://github.com/python-zeroconf/python-zeroconf/commit/1682991b985b1f7b2bf0cff1a7eb7793070e7cb1)) -* Correct handling of IPv6 addresses with scope_id in ServiceInfo ([#1322](https://github.com/python-zeroconf/python-zeroconf/issues/1322)) ([`1682991`](https://github.com/python-zeroconf/python-zeroconf/commit/1682991b985b1f7b2bf0cff1a7eb7793070e7cb1)) ## v0.128.0 (2023-12-02) -### Feature +### Features + +- Speed up unpacking TXT record data in ServiceInfo + ([#1318](https://github.com/python-zeroconf/python-zeroconf/pull/1318), + [`a200842`](https://github.com/python-zeroconf/python-zeroconf/commit/a20084281e66bdb9c37183a5eb992435f5b866ac)) -* Speed up unpacking TXT record data in ServiceInfo ([#1318](https://github.com/python-zeroconf/python-zeroconf/issues/1318)) ([`a200842`](https://github.com/python-zeroconf/python-zeroconf/commit/a20084281e66bdb9c37183a5eb992435f5b866ac)) ## v0.127.0 (2023-11-15) -### Feature +### Features + +- Small speed up to processing incoming dns records + ([#1315](https://github.com/python-zeroconf/python-zeroconf/pull/1315), + [`bfe4c24`](https://github.com/python-zeroconf/python-zeroconf/commit/bfe4c24881a7259713425df5ab00ffe487518841)) + +- Small speed up to writing outgoing packets + ([#1316](https://github.com/python-zeroconf/python-zeroconf/pull/1316), + [`cd28476`](https://github.com/python-zeroconf/python-zeroconf/commit/cd28476f6b0a6c2c733273fb24ddaac6c7bbdf65)) + +- Speed up incoming packet reader + ([#1314](https://github.com/python-zeroconf/python-zeroconf/pull/1314), + [`0d60b61`](https://github.com/python-zeroconf/python-zeroconf/commit/0d60b61538a5d4b6f44b2369333b6e916a0a55b4)) -* Small speed up to writing outgoing packets ([#1316](https://github.com/python-zeroconf/python-zeroconf/issues/1316)) ([`cd28476`](https://github.com/python-zeroconf/python-zeroconf/commit/cd28476f6b0a6c2c733273fb24ddaac6c7bbdf65)) -* Speed up incoming packet reader ([#1314](https://github.com/python-zeroconf/python-zeroconf/issues/1314)) ([`0d60b61`](https://github.com/python-zeroconf/python-zeroconf/commit/0d60b61538a5d4b6f44b2369333b6e916a0a55b4)) -* Small speed up to processing incoming dns records ([#1315](https://github.com/python-zeroconf/python-zeroconf/issues/1315)) ([`bfe4c24`](https://github.com/python-zeroconf/python-zeroconf/commit/bfe4c24881a7259713425df5ab00ffe487518841)) ## v0.126.0 (2023-11-13) -### Feature +### Features + +- Speed up outgoing packet writer + ([#1313](https://github.com/python-zeroconf/python-zeroconf/pull/1313), + [`55cf4cc`](https://github.com/python-zeroconf/python-zeroconf/commit/55cf4ccdff886a136db4e2133d3e6cdd001a8bd6)) + +- Speed up writing name compression for outgoing packets + ([#1312](https://github.com/python-zeroconf/python-zeroconf/pull/1312), + [`9caeabb`](https://github.com/python-zeroconf/python-zeroconf/commit/9caeabb6d4659a25ea1251c1ee7bb824e05f3d8b)) -* Speed up outgoing packet writer ([#1313](https://github.com/python-zeroconf/python-zeroconf/issues/1313)) ([`55cf4cc`](https://github.com/python-zeroconf/python-zeroconf/commit/55cf4ccdff886a136db4e2133d3e6cdd001a8bd6)) -* Speed up writing name compression for outgoing packets ([#1312](https://github.com/python-zeroconf/python-zeroconf/issues/1312)) ([`9caeabb`](https://github.com/python-zeroconf/python-zeroconf/commit/9caeabb6d4659a25ea1251c1ee7bb824e05f3d8b)) ## v0.125.0 (2023-11-12) -### Feature +### Features + +- Speed up service browser queries when browsing many types + ([#1311](https://github.com/python-zeroconf/python-zeroconf/pull/1311), + [`d192d33`](https://github.com/python-zeroconf/python-zeroconf/commit/d192d33b1f05aa95a89965e86210aec086673a17)) -* Speed up service browser queries when browsing many types ([#1311](https://github.com/python-zeroconf/python-zeroconf/issues/1311)) ([`d192d33`](https://github.com/python-zeroconf/python-zeroconf/commit/d192d33b1f05aa95a89965e86210aec086673a17)) ## v0.124.0 (2023-11-12) -### Feature +### Features + +- Avoid decoding known answers if we have no answers to give + ([#1308](https://github.com/python-zeroconf/python-zeroconf/pull/1308), + [`605dc9c`](https://github.com/python-zeroconf/python-zeroconf/commit/605dc9ccd843a535802031f051b3d93310186ad1)) + +- Small speed up to process incoming packets + ([#1309](https://github.com/python-zeroconf/python-zeroconf/pull/1309), + [`56ef908`](https://github.com/python-zeroconf/python-zeroconf/commit/56ef90865189c01d2207abcc5e2efe3a7a022fa1)) -* Avoid decoding known answers if we have no answers to give ([#1308](https://github.com/python-zeroconf/python-zeroconf/issues/1308)) ([`605dc9c`](https://github.com/python-zeroconf/python-zeroconf/commit/605dc9ccd843a535802031f051b3d93310186ad1)) -* Small speed up to process incoming packets ([#1309](https://github.com/python-zeroconf/python-zeroconf/issues/1309)) ([`56ef908`](https://github.com/python-zeroconf/python-zeroconf/commit/56ef90865189c01d2207abcc5e2efe3a7a022fa1)) ## v0.123.0 (2023-11-12) -### Feature +### Features + +- Speed up instances only used to lookup answers + ([#1307](https://github.com/python-zeroconf/python-zeroconf/pull/1307), + [`0701b8a`](https://github.com/python-zeroconf/python-zeroconf/commit/0701b8ab6009891cbaddaa1d17116d31fd1b2f78)) -* Speed up instances only used to lookup answers ([#1307](https://github.com/python-zeroconf/python-zeroconf/issues/1307)) ([`0701b8a`](https://github.com/python-zeroconf/python-zeroconf/commit/0701b8ab6009891cbaddaa1d17116d31fd1b2f78)) ## v0.122.3 (2023-11-09) -### Fix +### Bug Fixes + +- Do not build musllinux aarch64 wheels to reduce release time + ([#1306](https://github.com/python-zeroconf/python-zeroconf/pull/1306), + [`79aafb0`](https://github.com/python-zeroconf/python-zeroconf/commit/79aafb0acf7ca6b17976be7ede748008deada27b)) -* Do not build musllinux aarch64 wheels to reduce release time ([#1306](https://github.com/python-zeroconf/python-zeroconf/issues/1306)) ([`79aafb0`](https://github.com/python-zeroconf/python-zeroconf/commit/79aafb0acf7ca6b17976be7ede748008deada27b)) ## v0.122.2 (2023-11-09) -### Fix +### Bug Fixes + +- Do not build aarch64 wheels for PyPy + ([#1305](https://github.com/python-zeroconf/python-zeroconf/pull/1305), + [`7e884db`](https://github.com/python-zeroconf/python-zeroconf/commit/7e884db4d958459e64257aba860dba2450db0687)) -* Do not build aarch64 wheels for PyPy ([#1305](https://github.com/python-zeroconf/python-zeroconf/issues/1305)) ([`7e884db`](https://github.com/python-zeroconf/python-zeroconf/commit/7e884db4d958459e64257aba860dba2450db0687)) ## v0.122.1 (2023-11-09) -### Fix +### Bug Fixes + +- Skip wheel builds for eol python and older python with aarch64 + ([#1304](https://github.com/python-zeroconf/python-zeroconf/pull/1304), + [`6c8f5a5`](https://github.com/python-zeroconf/python-zeroconf/commit/6c8f5a5dec2072aa6a8f889c5d8a4623ab392234)) -* Skip wheel builds for eol python and older python with aarch64 ([#1304](https://github.com/python-zeroconf/python-zeroconf/issues/1304)) ([`6c8f5a5`](https://github.com/python-zeroconf/python-zeroconf/commit/6c8f5a5dec2072aa6a8f889c5d8a4623ab392234)) ## v0.122.0 (2023-11-08) -### Feature +### Features + +- Build aarch64 wheels ([#1302](https://github.com/python-zeroconf/python-zeroconf/pull/1302), + [`4fe58e2`](https://github.com/python-zeroconf/python-zeroconf/commit/4fe58e2edc6da64a8ece0e2b16ec9ebfc5b3cd83)) -* Build aarch64 wheels ([#1302](https://github.com/python-zeroconf/python-zeroconf/issues/1302)) ([`4fe58e2`](https://github.com/python-zeroconf/python-zeroconf/commit/4fe58e2edc6da64a8ece0e2b16ec9ebfc5b3cd83)) ## v0.121.0 (2023-11-08) -### Feature +### Features + +- Speed up record updates ([#1301](https://github.com/python-zeroconf/python-zeroconf/pull/1301), + [`d2af6a0`](https://github.com/python-zeroconf/python-zeroconf/commit/d2af6a0978f5abe4f8bb70d3e29d9836d0fd77c4)) -* Speed up record updates ([#1301](https://github.com/python-zeroconf/python-zeroconf/issues/1301)) ([`d2af6a0`](https://github.com/python-zeroconf/python-zeroconf/commit/d2af6a0978f5abe4f8bb70d3e29d9836d0fd77c4)) ## v0.120.0 (2023-11-05) -### Feature +### Features + +- Speed up decoding labels from incoming data + ([#1291](https://github.com/python-zeroconf/python-zeroconf/pull/1291), + [`c37ead4`](https://github.com/python-zeroconf/python-zeroconf/commit/c37ead4d7000607e81706a97b4cdffd80cf8cf99)) + +- Speed up incoming packet processing with a memory view + ([#1290](https://github.com/python-zeroconf/python-zeroconf/pull/1290), + [`f1f0a25`](https://github.com/python-zeroconf/python-zeroconf/commit/f1f0a2504afd4d29bc6b7cf715cd3cb81b9049f7)) + +- Speed up ServiceBrowsers with a pxd for the signal interface + ([#1289](https://github.com/python-zeroconf/python-zeroconf/pull/1289), + [`8a17f20`](https://github.com/python-zeroconf/python-zeroconf/commit/8a17f2053a89db4beca9e8c1de4640faf27726b4)) -* Speed up incoming packet processing with a memory view ([#1290](https://github.com/python-zeroconf/python-zeroconf/issues/1290)) ([`f1f0a25`](https://github.com/python-zeroconf/python-zeroconf/commit/f1f0a2504afd4d29bc6b7cf715cd3cb81b9049f7)) -* Speed up decoding labels from incoming data ([#1291](https://github.com/python-zeroconf/python-zeroconf/issues/1291)) ([`c37ead4`](https://github.com/python-zeroconf/python-zeroconf/commit/c37ead4d7000607e81706a97b4cdffd80cf8cf99)) -* Speed up ServiceBrowsers with a pxd for the signal interface ([#1289](https://github.com/python-zeroconf/python-zeroconf/issues/1289)) ([`8a17f20`](https://github.com/python-zeroconf/python-zeroconf/commit/8a17f2053a89db4beca9e8c1de4640faf27726b4)) ## v0.119.0 (2023-10-18) -### Feature +### Features + +- Update cibuildwheel to build wheels on latest cython final release + ([#1285](https://github.com/python-zeroconf/python-zeroconf/pull/1285), + [`e8c9083`](https://github.com/python-zeroconf/python-zeroconf/commit/e8c9083bb118764a85b12fac9055152a2f62a212)) -* Update cibuildwheel to build wheels on latest cython final release ([#1285](https://github.com/python-zeroconf/python-zeroconf/issues/1285)) ([`e8c9083`](https://github.com/python-zeroconf/python-zeroconf/commit/e8c9083bb118764a85b12fac9055152a2f62a212)) ## v0.118.1 (2023-10-18) -### Fix +### Bug Fixes + +- Reduce size of wheels by excluding generated .c files + ([#1284](https://github.com/python-zeroconf/python-zeroconf/pull/1284), + [`b6afa4b`](https://github.com/python-zeroconf/python-zeroconf/commit/b6afa4b2775a1fdb090145eccdc5711c98e7147a)) -* Reduce size of wheels by excluding generated .c files ([#1284](https://github.com/python-zeroconf/python-zeroconf/issues/1284)) ([`b6afa4b`](https://github.com/python-zeroconf/python-zeroconf/commit/b6afa4b2775a1fdb090145eccdc5711c98e7147a)) ## v0.118.0 (2023-10-14) -### Feature +### Features + +- Small improvements to ServiceBrowser performance + ([#1283](https://github.com/python-zeroconf/python-zeroconf/pull/1283), + [`0fc031b`](https://github.com/python-zeroconf/python-zeroconf/commit/0fc031b1e7bf1766d5a1d39d70d300b86e36715e)) -* Small improvements to ServiceBrowser performance ([#1283](https://github.com/python-zeroconf/python-zeroconf/issues/1283)) ([`0fc031b`](https://github.com/python-zeroconf/python-zeroconf/commit/0fc031b1e7bf1766d5a1d39d70d300b86e36715e)) ## v0.117.0 (2023-10-14) -### Feature +### Features + +- Small cleanups to incoming data handlers + ([#1282](https://github.com/python-zeroconf/python-zeroconf/pull/1282), + [`4f4bd9f`](https://github.com/python-zeroconf/python-zeroconf/commit/4f4bd9ff7c1e575046e5ea213d9b8c91ac7a24a9)) -* Small cleanups to incoming data handlers ([#1282](https://github.com/python-zeroconf/python-zeroconf/issues/1282)) ([`4f4bd9f`](https://github.com/python-zeroconf/python-zeroconf/commit/4f4bd9ff7c1e575046e5ea213d9b8c91ac7a24a9)) ## v0.116.0 (2023-10-13) -### Feature +### Features + +- Reduce type checking overhead at run time + ([#1281](https://github.com/python-zeroconf/python-zeroconf/pull/1281), + [`8f30099`](https://github.com/python-zeroconf/python-zeroconf/commit/8f300996e5bd4316b2237f0502791dd0d6a855fe)) -* Reduce type checking overhead at run time ([#1281](https://github.com/python-zeroconf/python-zeroconf/issues/1281)) ([`8f30099`](https://github.com/python-zeroconf/python-zeroconf/commit/8f300996e5bd4316b2237f0502791dd0d6a855fe)) ## v0.115.2 (2023-10-05) -### Fix +### Bug Fixes + +- Ensure ServiceInfo cache is cleared when adding to the registry + ([#1279](https://github.com/python-zeroconf/python-zeroconf/pull/1279), + [`2060eb2`](https://github.com/python-zeroconf/python-zeroconf/commit/2060eb2cc43489c34bea08924c3f40b875d5a498)) + +* There were production use cases that mutated the service info and re-registered it that need to be + accounted for -* Ensure ServiceInfo cache is cleared when adding to the registry ([#1279](https://github.com/python-zeroconf/python-zeroconf/issues/1279)) ([`2060eb2`](https://github.com/python-zeroconf/python-zeroconf/commit/2060eb2cc43489c34bea08924c3f40b875d5a498)) ## v0.115.1 (2023-10-01) -### Fix +### Bug Fixes + +- Add missing python definition for addresses_by_version + ([#1278](https://github.com/python-zeroconf/python-zeroconf/pull/1278), + [`52ee02b`](https://github.com/python-zeroconf/python-zeroconf/commit/52ee02b16860e344c402124f4b2e2869536ec839)) -* Add missing python definition for addresses_by_version ([#1278](https://github.com/python-zeroconf/python-zeroconf/issues/1278)) ([`52ee02b`](https://github.com/python-zeroconf/python-zeroconf/commit/52ee02b16860e344c402124f4b2e2869536ec839)) ## v0.115.0 (2023-09-26) -### Feature +### Features + +- Speed up outgoing multicast queue + ([#1277](https://github.com/python-zeroconf/python-zeroconf/pull/1277), + [`a13fd49`](https://github.com/python-zeroconf/python-zeroconf/commit/a13fd49d77474fd5858de809e48cbab1ccf89173)) -* Speed up outgoing multicast queue ([#1277](https://github.com/python-zeroconf/python-zeroconf/issues/1277)) ([`a13fd49`](https://github.com/python-zeroconf/python-zeroconf/commit/a13fd49d77474fd5858de809e48cbab1ccf89173)) ## v0.114.0 (2023-09-25) -### Feature +### Features + +- Speed up responding to queries + ([#1275](https://github.com/python-zeroconf/python-zeroconf/pull/1275), + [`3c6b18c`](https://github.com/python-zeroconf/python-zeroconf/commit/3c6b18cdf4c94773ad6f4497df98feb337939ee9)) -* Speed up responding to queries ([#1275](https://github.com/python-zeroconf/python-zeroconf/issues/1275)) ([`3c6b18c`](https://github.com/python-zeroconf/python-zeroconf/commit/3c6b18cdf4c94773ad6f4497df98feb337939ee9)) ## v0.113.0 (2023-09-24) -### Feature +### Features + +- Improve performance of loading records from cache in ServiceInfo + ([#1274](https://github.com/python-zeroconf/python-zeroconf/pull/1274), + [`6257d49`](https://github.com/python-zeroconf/python-zeroconf/commit/6257d49952e02107f800f4ad4894716508edfcda)) -* Improve performance of loading records from cache in ServiceInfo ([#1274](https://github.com/python-zeroconf/python-zeroconf/issues/1274)) ([`6257d49`](https://github.com/python-zeroconf/python-zeroconf/commit/6257d49952e02107f800f4ad4894716508edfcda)) ## v0.112.0 (2023-09-14) -### Feature +### Features + +- Improve AsyncServiceBrowser performance + ([#1273](https://github.com/python-zeroconf/python-zeroconf/pull/1273), + [`0c88ecf`](https://github.com/python-zeroconf/python-zeroconf/commit/0c88ecf5ef6b9b256f991e7a630048de640999a6)) -* Improve AsyncServiceBrowser performance ([#1273](https://github.com/python-zeroconf/python-zeroconf/issues/1273)) ([`0c88ecf`](https://github.com/python-zeroconf/python-zeroconf/commit/0c88ecf5ef6b9b256f991e7a630048de640999a6)) ## v0.111.0 (2023-09-14) -### Feature +### Features + +- Speed up question and answer internals + ([#1272](https://github.com/python-zeroconf/python-zeroconf/pull/1272), + [`d24722b`](https://github.com/python-zeroconf/python-zeroconf/commit/d24722bfa4201d48ab482d35b0ef004f070ada80)) -* Speed up question and answer internals ([#1272](https://github.com/python-zeroconf/python-zeroconf/issues/1272)) ([`d24722b`](https://github.com/python-zeroconf/python-zeroconf/commit/d24722bfa4201d48ab482d35b0ef004f070ada80)) ## v0.110.0 (2023-09-14) -### Feature +### Features + +- Small speed ups to ServiceBrowser + ([#1271](https://github.com/python-zeroconf/python-zeroconf/pull/1271), + [`22c433d`](https://github.com/python-zeroconf/python-zeroconf/commit/22c433ddaea3049ac49933325ba938fd87a529c0)) -* Small speed ups to ServiceBrowser ([#1271](https://github.com/python-zeroconf/python-zeroconf/issues/1271)) ([`22c433d`](https://github.com/python-zeroconf/python-zeroconf/commit/22c433ddaea3049ac49933325ba938fd87a529c0)) ## v0.109.0 (2023-09-14) -### Feature +### Features + +- Speed up ServiceBrowsers with a cython pxd + ([#1270](https://github.com/python-zeroconf/python-zeroconf/pull/1270), + [`4837876`](https://github.com/python-zeroconf/python-zeroconf/commit/48378769c3887b5746ca00de30067a4c0851765c)) -* Speed up ServiceBrowsers with a cython pxd ([#1270](https://github.com/python-zeroconf/python-zeroconf/issues/1270)) ([`4837876`](https://github.com/python-zeroconf/python-zeroconf/commit/48378769c3887b5746ca00de30067a4c0851765c)) ## v0.108.0 (2023-09-11) -### Feature +### Features + +- Improve performance of constructing outgoing queries + ([#1267](https://github.com/python-zeroconf/python-zeroconf/pull/1267), + [`00c439a`](https://github.com/python-zeroconf/python-zeroconf/commit/00c439a6400b7850ef9fdd75bc8d82d4e64b1da0)) -* Improve performance of constructing outgoing queries ([#1267](https://github.com/python-zeroconf/python-zeroconf/issues/1267)) ([`00c439a`](https://github.com/python-zeroconf/python-zeroconf/commit/00c439a6400b7850ef9fdd75bc8d82d4e64b1da0)) ## v0.107.0 (2023-09-11) -### Feature +### Features + +- Speed up responding to queries + ([#1266](https://github.com/python-zeroconf/python-zeroconf/pull/1266), + [`24a0a00`](https://github.com/python-zeroconf/python-zeroconf/commit/24a0a00b3e457979e279a2eeadc8fad2ab09e125)) -* Speed up responding to queries ([#1266](https://github.com/python-zeroconf/python-zeroconf/issues/1266)) ([`24a0a00`](https://github.com/python-zeroconf/python-zeroconf/commit/24a0a00b3e457979e279a2eeadc8fad2ab09e125)) ## v0.106.0 (2023-09-11) -### Feature +### Features + +- Speed up answering questions + ([#1265](https://github.com/python-zeroconf/python-zeroconf/pull/1265), + [`37bfaf2`](https://github.com/python-zeroconf/python-zeroconf/commit/37bfaf2f630358e8c68652f3b3120931a6f94910)) -* Speed up answering questions ([#1265](https://github.com/python-zeroconf/python-zeroconf/issues/1265)) ([`37bfaf2`](https://github.com/python-zeroconf/python-zeroconf/commit/37bfaf2f630358e8c68652f3b3120931a6f94910)) ## v0.105.0 (2023-09-10) -### Feature +### Features + +- Speed up ServiceInfo with a cython pxd + ([#1264](https://github.com/python-zeroconf/python-zeroconf/pull/1264), + [`7ca690a`](https://github.com/python-zeroconf/python-zeroconf/commit/7ca690ac3fa75e7474d3412944bbd5056cb313dd)) -* Speed up ServiceInfo with a cython pxd ([#1264](https://github.com/python-zeroconf/python-zeroconf/issues/1264)) ([`7ca690a`](https://github.com/python-zeroconf/python-zeroconf/commit/7ca690ac3fa75e7474d3412944bbd5056cb313dd)) ## v0.104.0 (2023-09-10) -### Feature +### Features + +- Speed up generating answers + ([#1262](https://github.com/python-zeroconf/python-zeroconf/pull/1262), + [`50a8f06`](https://github.com/python-zeroconf/python-zeroconf/commit/50a8f066b6ab90bc9e3300f81cf9332550b720df)) -* Speed up generating answers ([#1262](https://github.com/python-zeroconf/python-zeroconf/issues/1262)) ([`50a8f06`](https://github.com/python-zeroconf/python-zeroconf/commit/50a8f066b6ab90bc9e3300f81cf9332550b720df)) ## v0.103.0 (2023-09-09) -### Feature +### Features + +- Avoid calling get_running_loop when resolving ServiceInfo + ([#1261](https://github.com/python-zeroconf/python-zeroconf/pull/1261), + [`33a2714`](https://github.com/python-zeroconf/python-zeroconf/commit/33a2714cadff96edf016b869cc63b0661d16ef2c)) -* Avoid calling get_running_loop when resolving ServiceInfo ([#1261](https://github.com/python-zeroconf/python-zeroconf/issues/1261)) ([`33a2714`](https://github.com/python-zeroconf/python-zeroconf/commit/33a2714cadff96edf016b869cc63b0661d16ef2c)) ## v0.102.0 (2023-09-07) -### Feature +### Features + +- Significantly speed up writing outgoing dns records + ([#1260](https://github.com/python-zeroconf/python-zeroconf/pull/1260), + [`bf2f366`](https://github.com/python-zeroconf/python-zeroconf/commit/bf2f3660a1f341e50ab0ae586dfbacbc5ddcc077)) -* Significantly speed up writing outgoing dns records ([#1260](https://github.com/python-zeroconf/python-zeroconf/issues/1260)) ([`bf2f366`](https://github.com/python-zeroconf/python-zeroconf/commit/bf2f3660a1f341e50ab0ae586dfbacbc5ddcc077)) ## v0.101.0 (2023-09-07) -### Feature +### Features + +- Speed up writing outgoing dns records + ([#1259](https://github.com/python-zeroconf/python-zeroconf/pull/1259), + [`248655f`](https://github.com/python-zeroconf/python-zeroconf/commit/248655f0276223b089373c70ec13a0385dfaa4d6)) -* Speed up writing outgoing dns records ([#1259](https://github.com/python-zeroconf/python-zeroconf/issues/1259)) ([`248655f`](https://github.com/python-zeroconf/python-zeroconf/commit/248655f0276223b089373c70ec13a0385dfaa4d6)) ## v0.100.0 (2023-09-07) -### Feature +### Features + +- Small speed up to writing outgoing dns records + ([#1258](https://github.com/python-zeroconf/python-zeroconf/pull/1258), + [`1ed6bd2`](https://github.com/python-zeroconf/python-zeroconf/commit/1ed6bd2ec4db0612b71384f923ffff1efd3ce878)) -* Small speed up to writing outgoing dns records ([#1258](https://github.com/python-zeroconf/python-zeroconf/issues/1258)) ([`1ed6bd2`](https://github.com/python-zeroconf/python-zeroconf/commit/1ed6bd2ec4db0612b71384f923ffff1efd3ce878)) ## v0.99.0 (2023-09-06) -### Feature +### Features + +- Reduce IP Address parsing overhead in ServiceInfo + ([#1257](https://github.com/python-zeroconf/python-zeroconf/pull/1257), + [`83d0b7f`](https://github.com/python-zeroconf/python-zeroconf/commit/83d0b7fda2eb09c9c6e18b85f329d1ddc701e3fb)) -* Reduce IP Address parsing overhead in ServiceInfo ([#1257](https://github.com/python-zeroconf/python-zeroconf/issues/1257)) ([`83d0b7f`](https://github.com/python-zeroconf/python-zeroconf/commit/83d0b7fda2eb09c9c6e18b85f329d1ddc701e3fb)) ## v0.98.0 (2023-09-06) -### Feature +### Features + +- Speed up decoding incoming packets + ([#1256](https://github.com/python-zeroconf/python-zeroconf/pull/1256), + [`ac081cf`](https://github.com/python-zeroconf/python-zeroconf/commit/ac081cf00addde1ceea2c076f73905fdb293de3a)) -* Speed up decoding incoming packets ([#1256](https://github.com/python-zeroconf/python-zeroconf/issues/1256)) ([`ac081cf`](https://github.com/python-zeroconf/python-zeroconf/commit/ac081cf00addde1ceea2c076f73905fdb293de3a)) ## v0.97.0 (2023-09-03) -### Feature +### Features + +- Speed up answering queries ([#1255](https://github.com/python-zeroconf/python-zeroconf/pull/1255), + [`2d3aed3`](https://github.com/python-zeroconf/python-zeroconf/commit/2d3aed36e24c73013fcf4acc90803fc1737d0917)) -* Speed up answering queries ([#1255](https://github.com/python-zeroconf/python-zeroconf/issues/1255)) ([`2d3aed3`](https://github.com/python-zeroconf/python-zeroconf/commit/2d3aed36e24c73013fcf4acc90803fc1737d0917)) ## v0.96.0 (2023-09-03) -### Feature +### Features + +- Optimize DNSCache.get_by_details + ([#1254](https://github.com/python-zeroconf/python-zeroconf/pull/1254), + [`ce59787`](https://github.com/python-zeroconf/python-zeroconf/commit/ce59787a170781ffdaa22425018d288b395ac081)) + +* feat: optimize DNSCache.get_by_details + +This is one of the most called functions since ServiceInfo.load_from_cache calls it + +* fix: make get_all_by_details thread-safe + +* fix: remove unneeded key checks -* Optimize DNSCache.get_by_details ([#1254](https://github.com/python-zeroconf/python-zeroconf/issues/1254)) ([`ce59787`](https://github.com/python-zeroconf/python-zeroconf/commit/ce59787a170781ffdaa22425018d288b395ac081)) ## v0.95.0 (2023-09-03) -### Feature +### Features + +- Speed up adding and removing RecordUpdateListeners + ([#1253](https://github.com/python-zeroconf/python-zeroconf/pull/1253), + [`22e4a29`](https://github.com/python-zeroconf/python-zeroconf/commit/22e4a296d440b3038c0ff5ed6fc8878304ec4937)) -* Speed up adding and removing RecordUpdateListeners ([#1253](https://github.com/python-zeroconf/python-zeroconf/issues/1253)) ([`22e4a29`](https://github.com/python-zeroconf/python-zeroconf/commit/22e4a296d440b3038c0ff5ed6fc8878304ec4937)) ## v0.94.0 (2023-09-03) -### Feature +### Features + +- Optimize cache implementation + ([#1252](https://github.com/python-zeroconf/python-zeroconf/pull/1252), + [`8d3ec79`](https://github.com/python-zeroconf/python-zeroconf/commit/8d3ec792277aaf7ef790318b5b35ab00839ca3b3)) -* Optimize cache implementation ([#1252](https://github.com/python-zeroconf/python-zeroconf/issues/1252)) ([`8d3ec79`](https://github.com/python-zeroconf/python-zeroconf/commit/8d3ec792277aaf7ef790318b5b35ab00839ca3b3)) ## v0.93.1 (2023-09-03) -### Fix +### Bug Fixes + +- No change re-release due to unrecoverable failed CI run + ([#1251](https://github.com/python-zeroconf/python-zeroconf/pull/1251), + [`730921b`](https://github.com/python-zeroconf/python-zeroconf/commit/730921b155dfb9c62251c8c643b1302e807aff3b)) -* No change re-release due to unrecoverable failed CI run ([#1251](https://github.com/python-zeroconf/python-zeroconf/issues/1251)) ([`730921b`](https://github.com/python-zeroconf/python-zeroconf/commit/730921b155dfb9c62251c8c643b1302e807aff3b)) ## v0.93.0 (2023-09-02) -### Feature +### Features + +- Reduce overhead to answer questions + ([#1250](https://github.com/python-zeroconf/python-zeroconf/pull/1250), + [`7cb8da0`](https://github.com/python-zeroconf/python-zeroconf/commit/7cb8da0c6c5c944588009fe36012c1197c422668)) -* Reduce overhead to answer questions ([#1250](https://github.com/python-zeroconf/python-zeroconf/issues/1250)) ([`7cb8da0`](https://github.com/python-zeroconf/python-zeroconf/commit/7cb8da0c6c5c944588009fe36012c1197c422668)) ## v0.92.0 (2023-09-02) -### Feature +### Features + +- Cache construction of records used to answer queries from the service registry + ([#1243](https://github.com/python-zeroconf/python-zeroconf/pull/1243), + [`0890f62`](https://github.com/python-zeroconf/python-zeroconf/commit/0890f628dbbd577fb77d3e6f2e267052b2b2b515)) -* Cache construction of records used to answer queries from the service registry ([#1243](https://github.com/python-zeroconf/python-zeroconf/issues/1243)) ([`0890f62`](https://github.com/python-zeroconf/python-zeroconf/commit/0890f628dbbd577fb77d3e6f2e267052b2b2b515)) ## v0.91.1 (2023-09-02) -### Fix +### Bug Fixes + +- Remove useless calls in ServiceInfo + ([#1248](https://github.com/python-zeroconf/python-zeroconf/pull/1248), + [`4e40fae`](https://github.com/python-zeroconf/python-zeroconf/commit/4e40fae20bf50b4608e28fad4a360c4ed48ac86b)) -* Remove useless calls in ServiceInfo ([#1248](https://github.com/python-zeroconf/python-zeroconf/issues/1248)) ([`4e40fae`](https://github.com/python-zeroconf/python-zeroconf/commit/4e40fae20bf50b4608e28fad4a360c4ed48ac86b)) ## v0.91.0 (2023-09-02) -### Feature +### Features + +- Reduce overhead to process incoming updates by avoiding the handle_response shim + ([#1247](https://github.com/python-zeroconf/python-zeroconf/pull/1247), + [`5e31f0a`](https://github.com/python-zeroconf/python-zeroconf/commit/5e31f0afe4c341fbdbbbe50348a829ea553cbda0)) -* Reduce overhead to process incoming updates by avoiding the handle_response shim ([#1247](https://github.com/python-zeroconf/python-zeroconf/issues/1247)) ([`5e31f0a`](https://github.com/python-zeroconf/python-zeroconf/commit/5e31f0afe4c341fbdbbbe50348a829ea553cbda0)) ## v0.90.0 (2023-09-02) -### Feature +### Features + +- Avoid python float conversion in listener hot path + ([#1245](https://github.com/python-zeroconf/python-zeroconf/pull/1245), + [`816ad4d`](https://github.com/python-zeroconf/python-zeroconf/commit/816ad4dceb3859bad4bb136bdb1d1ee2daa0bf5a)) + +### Refactoring + +- Reduce duplicate code in engine.py + ([#1246](https://github.com/python-zeroconf/python-zeroconf/pull/1246), + [`36ae505`](https://github.com/python-zeroconf/python-zeroconf/commit/36ae505dc9f95b59fdfb632960845a45ba8575b8)) -* Avoid python float conversion in listener hot path ([#1245](https://github.com/python-zeroconf/python-zeroconf/issues/1245)) ([`816ad4d`](https://github.com/python-zeroconf/python-zeroconf/commit/816ad4dceb3859bad4bb136bdb1d1ee2daa0bf5a)) ## v0.89.0 (2023-09-02) -### Feature +### Features + +- Reduce overhead to process incoming questions + ([#1244](https://github.com/python-zeroconf/python-zeroconf/pull/1244), + [`18b65d1`](https://github.com/python-zeroconf/python-zeroconf/commit/18b65d1c75622869b0c29258215d3db3ae520d6c)) -* Reduce overhead to process incoming questions ([#1244](https://github.com/python-zeroconf/python-zeroconf/issues/1244)) ([`18b65d1`](https://github.com/python-zeroconf/python-zeroconf/commit/18b65d1c75622869b0c29258215d3db3ae520d6c)) ## v0.88.0 (2023-08-29) -### Feature +### Features + +- Speed up RecordManager with additional cython defs + ([#1242](https://github.com/python-zeroconf/python-zeroconf/pull/1242), + [`5a76fc5`](https://github.com/python-zeroconf/python-zeroconf/commit/5a76fc5ff74f2941ffbf7570e45390f35e0b7e01)) -* Speed up RecordManager with additional cython defs ([#1242](https://github.com/python-zeroconf/python-zeroconf/issues/1242)) ([`5a76fc5`](https://github.com/python-zeroconf/python-zeroconf/commit/5a76fc5ff74f2941ffbf7570e45390f35e0b7e01)) ## v0.87.0 (2023-08-29) -### Feature +### Features + +- Improve performance by adding cython pxd for RecordManager + ([#1241](https://github.com/python-zeroconf/python-zeroconf/pull/1241), + [`a7dad3d`](https://github.com/python-zeroconf/python-zeroconf/commit/a7dad3d9743586f352e21eea1e129c6875f9a713)) -* Improve performance by adding cython pxd for RecordManager ([#1241](https://github.com/python-zeroconf/python-zeroconf/issues/1241)) ([`a7dad3d`](https://github.com/python-zeroconf/python-zeroconf/commit/a7dad3d9743586f352e21eea1e129c6875f9a713)) ## v0.86.0 (2023-08-28) -### Feature +### Features + +- Build wheels for cpython 3.12 + ([#1239](https://github.com/python-zeroconf/python-zeroconf/pull/1239), + [`58bc154`](https://github.com/python-zeroconf/python-zeroconf/commit/58bc154f55b06b4ddfc4a141592488abe76f062a)) + +- Use server_key when processing DNSService records + ([#1238](https://github.com/python-zeroconf/python-zeroconf/pull/1238), + [`cc8feb1`](https://github.com/python-zeroconf/python-zeroconf/commit/cc8feb110fefc3fb714fd482a52f16e2b620e8c4)) -* Build wheels for cpython 3.12 ([#1239](https://github.com/python-zeroconf/python-zeroconf/issues/1239)) ([`58bc154`](https://github.com/python-zeroconf/python-zeroconf/commit/58bc154f55b06b4ddfc4a141592488abe76f062a)) -* Use server_key when processing DNSService records ([#1238](https://github.com/python-zeroconf/python-zeroconf/issues/1238)) ([`cc8feb1`](https://github.com/python-zeroconf/python-zeroconf/commit/cc8feb110fefc3fb714fd482a52f16e2b620e8c4)) ## v0.85.0 (2023-08-27) -### Feature +### Features + +- Simplify code to unpack properties + ([#1237](https://github.com/python-zeroconf/python-zeroconf/pull/1237), + [`68d9998`](https://github.com/python-zeroconf/python-zeroconf/commit/68d99985a0e9d2c72ff670b2e2af92271a6fe934)) -* Simplify code to unpack properties ([#1237](https://github.com/python-zeroconf/python-zeroconf/issues/1237)) ([`68d9998`](https://github.com/python-zeroconf/python-zeroconf/commit/68d99985a0e9d2c72ff670b2e2af92271a6fe934)) ## v0.84.0 (2023-08-27) -### Feature +### Features + +- Context managers in ServiceBrowser and AsyncServiceBrowser + ([#1233](https://github.com/python-zeroconf/python-zeroconf/pull/1233), + [`bd8d846`](https://github.com/python-zeroconf/python-zeroconf/commit/bd8d8467dec2a39a0b525043ea1051259100fded)) + +Co-authored-by: J. Nick Koston -* Context managers in ServiceBrowser and AsyncServiceBrowser ([#1233](https://github.com/python-zeroconf/python-zeroconf/issues/1233)) ([`bd8d846`](https://github.com/python-zeroconf/python-zeroconf/commit/bd8d8467dec2a39a0b525043ea1051259100fded)) ## v0.83.1 (2023-08-27) -### Fix +### Bug Fixes + +- Rebuild wheels with cython 3.0.2 + ([#1236](https://github.com/python-zeroconf/python-zeroconf/pull/1236), + [`dd637fb`](https://github.com/python-zeroconf/python-zeroconf/commit/dd637fb2e5a87ba283750e69d116e124bef54e7c)) -* Rebuild wheels with cython 3.0.2 ([#1236](https://github.com/python-zeroconf/python-zeroconf/issues/1236)) ([`dd637fb`](https://github.com/python-zeroconf/python-zeroconf/commit/dd637fb2e5a87ba283750e69d116e124bef54e7c)) ## v0.83.0 (2023-08-26) -### Feature +### Features + +- Speed up question and answer history with a cython pxd + ([#1234](https://github.com/python-zeroconf/python-zeroconf/pull/1234), + [`703ecb2`](https://github.com/python-zeroconf/python-zeroconf/commit/703ecb2901b2150fb72fac3deed61d7302561298)) -* Speed up question and answer history with a cython pxd ([#1234](https://github.com/python-zeroconf/python-zeroconf/issues/1234)) ([`703ecb2`](https://github.com/python-zeroconf/python-zeroconf/commit/703ecb2901b2150fb72fac3deed61d7302561298)) ## v0.82.1 (2023-08-22) -### Fix +### Bug Fixes + +- Build failures with older cython 0.29 series + ([#1232](https://github.com/python-zeroconf/python-zeroconf/pull/1232), + [`30c3ad9`](https://github.com/python-zeroconf/python-zeroconf/commit/30c3ad9d1bc6b589e1ca6675fea21907ebcd1ced)) -* Build failures with older cython 0.29 series ([#1232](https://github.com/python-zeroconf/python-zeroconf/issues/1232)) ([`30c3ad9`](https://github.com/python-zeroconf/python-zeroconf/commit/30c3ad9d1bc6b589e1ca6675fea21907ebcd1ced)) ## v0.82.0 (2023-08-22) -### Feature +### Features + +- Optimize processing of records in RecordUpdateListener subclasses + ([#1231](https://github.com/python-zeroconf/python-zeroconf/pull/1231), + [`3e89294`](https://github.com/python-zeroconf/python-zeroconf/commit/3e89294ea0ecee1122e1c1ffdc78925add8ca40e)) -* Optimize processing of records in RecordUpdateListener subclasses ([#1231](https://github.com/python-zeroconf/python-zeroconf/issues/1231)) ([`3e89294`](https://github.com/python-zeroconf/python-zeroconf/commit/3e89294ea0ecee1122e1c1ffdc78925add8ca40e)) ## v0.81.0 (2023-08-22) -### Feature +### Features + +- Optimizing sending answers to questions + ([#1227](https://github.com/python-zeroconf/python-zeroconf/pull/1227), + [`cd7b56b`](https://github.com/python-zeroconf/python-zeroconf/commit/cd7b56b2aa0c8ee429da430e9a36abd515512011)) + +- Speed up the service registry with a cython pxd + ([#1226](https://github.com/python-zeroconf/python-zeroconf/pull/1226), + [`47d3c7a`](https://github.com/python-zeroconf/python-zeroconf/commit/47d3c7ad4bc5f2247631c3ad5e6b6156d45a0a4e)) -* Speed up the service registry with a cython pxd ([#1226](https://github.com/python-zeroconf/python-zeroconf/issues/1226)) ([`47d3c7a`](https://github.com/python-zeroconf/python-zeroconf/commit/47d3c7ad4bc5f2247631c3ad5e6b6156d45a0a4e)) -* Optimizing sending answers to questions ([#1227](https://github.com/python-zeroconf/python-zeroconf/issues/1227)) ([`cd7b56b`](https://github.com/python-zeroconf/python-zeroconf/commit/cd7b56b2aa0c8ee429da430e9a36abd515512011)) ## v0.80.0 (2023-08-15) -### Feature +### Features + +- Optimize unpacking properties in ServiceInfo + ([#1225](https://github.com/python-zeroconf/python-zeroconf/pull/1225), + [`1492e41`](https://github.com/python-zeroconf/python-zeroconf/commit/1492e41b3d5cba5598cc9dd6bd2bc7d238f13555)) -* Optimize unpacking properties in ServiceInfo ([#1225](https://github.com/python-zeroconf/python-zeroconf/issues/1225)) ([`1492e41`](https://github.com/python-zeroconf/python-zeroconf/commit/1492e41b3d5cba5598cc9dd6bd2bc7d238f13555)) ## v0.79.0 (2023-08-14) -### Feature +### Features + +- Refactor notify implementation to reduce overhead of adding and removing listeners + ([#1224](https://github.com/python-zeroconf/python-zeroconf/pull/1224), + [`ceb92cf`](https://github.com/python-zeroconf/python-zeroconf/commit/ceb92cfe42d885dbb38cee7aaeebf685d97627a9)) -* Refactor notify implementation to reduce overhead of adding and removing listeners ([#1224](https://github.com/python-zeroconf/python-zeroconf/issues/1224)) ([`ceb92cf`](https://github.com/python-zeroconf/python-zeroconf/commit/ceb92cfe42d885dbb38cee7aaeebf685d97627a9)) ## v0.78.0 (2023-08-14) -### Feature +### Features + +- Add cython pxd file for _listener.py to improve incoming message processing performance + ([#1221](https://github.com/python-zeroconf/python-zeroconf/pull/1221), + [`f459856`](https://github.com/python-zeroconf/python-zeroconf/commit/f459856a0a61b8afa8a541926d7e15d51f8e4aea)) -* Add cython pxd file for _listener.py to improve incoming message processing performance ([#1221](https://github.com/python-zeroconf/python-zeroconf/issues/1221)) ([`f459856`](https://github.com/python-zeroconf/python-zeroconf/commit/f459856a0a61b8afa8a541926d7e15d51f8e4aea)) ## v0.77.0 (2023-08-14) -### Feature +### Features + +- Cythonize _listener.py to improve incoming message processing performance + ([#1220](https://github.com/python-zeroconf/python-zeroconf/pull/1220), + [`9efde8c`](https://github.com/python-zeroconf/python-zeroconf/commit/9efde8c8c1ed14c5d3c162f185b49212fcfcb5c9)) -* Cythonize _listener.py to improve incoming message processing performance ([#1220](https://github.com/python-zeroconf/python-zeroconf/issues/1220)) ([`9efde8c`](https://github.com/python-zeroconf/python-zeroconf/commit/9efde8c8c1ed14c5d3c162f185b49212fcfcb5c9)) ## v0.76.0 (2023-08-14) -### Feature +### Features + +- Improve performance responding to queries + ([#1217](https://github.com/python-zeroconf/python-zeroconf/pull/1217), + [`69b33be`](https://github.com/python-zeroconf/python-zeroconf/commit/69b33be3b2f9d4a27ef5154cae94afca048efffa)) -* Improve performance responding to queries ([#1217](https://github.com/python-zeroconf/python-zeroconf/issues/1217)) ([`69b33be`](https://github.com/python-zeroconf/python-zeroconf/commit/69b33be3b2f9d4a27ef5154cae94afca048efffa)) ## v0.75.0 (2023-08-13) -### Feature +### Features + +- Expose flag to disable strict name checking in service registration + ([#1215](https://github.com/python-zeroconf/python-zeroconf/pull/1215), + [`5df8a57`](https://github.com/python-zeroconf/python-zeroconf/commit/5df8a57a14d59687a3c22ea8ee063e265031e278)) + +- Speed up processing incoming records + ([#1216](https://github.com/python-zeroconf/python-zeroconf/pull/1216), + [`aff625d`](https://github.com/python-zeroconf/python-zeroconf/commit/aff625dc6a5e816dad519644c4adac4f96980c04)) -* Expose flag to disable strict name checking in service registration ([#1215](https://github.com/python-zeroconf/python-zeroconf/issues/1215)) ([`5df8a57`](https://github.com/python-zeroconf/python-zeroconf/commit/5df8a57a14d59687a3c22ea8ee063e265031e278)) -* Speed up processing incoming records ([#1216](https://github.com/python-zeroconf/python-zeroconf/issues/1216)) ([`aff625d`](https://github.com/python-zeroconf/python-zeroconf/commit/aff625dc6a5e816dad519644c4adac4f96980c04)) ## v0.74.0 (2023-08-04) -### Feature +### Bug Fixes -* Speed up unpacking text records in ServiceInfo ([#1212](https://github.com/python-zeroconf/python-zeroconf/issues/1212)) ([`99a6f98`](https://github.com/python-zeroconf/python-zeroconf/commit/99a6f98e44a1287ba537eabb852b1b69923402f0)) +- Remove typing on reset_ttl for cython compat + ([#1213](https://github.com/python-zeroconf/python-zeroconf/pull/1213), + [`0094e26`](https://github.com/python-zeroconf/python-zeroconf/commit/0094e2684344c6b7edd7948924f093f1b4c19901)) -### Fix +### Features + +- Speed up unpacking text records in ServiceInfo + ([#1212](https://github.com/python-zeroconf/python-zeroconf/pull/1212), + [`99a6f98`](https://github.com/python-zeroconf/python-zeroconf/commit/99a6f98e44a1287ba537eabb852b1b69923402f0)) -* Remove typing on reset_ttl for cython compat ([#1213](https://github.com/python-zeroconf/python-zeroconf/issues/1213)) ([`0094e26`](https://github.com/python-zeroconf/python-zeroconf/commit/0094e2684344c6b7edd7948924f093f1b4c19901)) ## v0.73.0 (2023-08-03) -### Feature +### Features + +- Add a cache to service_type_name + ([#1211](https://github.com/python-zeroconf/python-zeroconf/pull/1211), + [`53a694f`](https://github.com/python-zeroconf/python-zeroconf/commit/53a694f60e675ae0560e727be6b721b401c2b68f)) -* Add a cache to service_type_name ([#1211](https://github.com/python-zeroconf/python-zeroconf/issues/1211)) ([`53a694f`](https://github.com/python-zeroconf/python-zeroconf/commit/53a694f60e675ae0560e727be6b721b401c2b68f)) ## v0.72.3 (2023-08-03) -### Fix +### Bug Fixes + +- Revert adding typing to DNSRecord.suppressed_by + ([#1210](https://github.com/python-zeroconf/python-zeroconf/pull/1210), + [`3dba5ae`](https://github.com/python-zeroconf/python-zeroconf/commit/3dba5ae0c0e9473b7b20fd6fc79fa1a3b298dc5a)) -* Revert adding typing to DNSRecord.suppressed_by ([#1210](https://github.com/python-zeroconf/python-zeroconf/issues/1210)) ([`3dba5ae`](https://github.com/python-zeroconf/python-zeroconf/commit/3dba5ae0c0e9473b7b20fd6fc79fa1a3b298dc5a)) ## v0.72.2 (2023-08-03) -### Fix +### Bug Fixes + +- Revert DNSIncoming cimport in _dns.pxd + ([#1209](https://github.com/python-zeroconf/python-zeroconf/pull/1209), + [`5f14b6d`](https://github.com/python-zeroconf/python-zeroconf/commit/5f14b6dc687b3a0716d0ca7f61ccf1e93dfe5fa1)) -* Revert DNSIncoming cimport in _dns.pxd ([#1209](https://github.com/python-zeroconf/python-zeroconf/issues/1209)) ([`5f14b6d`](https://github.com/python-zeroconf/python-zeroconf/commit/5f14b6dc687b3a0716d0ca7f61ccf1e93dfe5fa1)) ## v0.72.1 (2023-08-03) -### Fix +### Bug Fixes + +- Race with InvalidStateError when async_request times out + ([#1208](https://github.com/python-zeroconf/python-zeroconf/pull/1208), + [`2233b6b`](https://github.com/python-zeroconf/python-zeroconf/commit/2233b6bc4ceeee5524d2ee88ecae8234173feb5f)) -* Race with InvalidStateError when async_request times out ([#1208](https://github.com/python-zeroconf/python-zeroconf/issues/1208)) ([`2233b6b`](https://github.com/python-zeroconf/python-zeroconf/commit/2233b6bc4ceeee5524d2ee88ecae8234173feb5f)) ## v0.72.0 (2023-08-02) -### Feature +### Features + +- Speed up processing incoming records + ([#1206](https://github.com/python-zeroconf/python-zeroconf/pull/1206), + [`126849c`](https://github.com/python-zeroconf/python-zeroconf/commit/126849c92be8cec9253fba9faa591029d992fcc3)) -* Speed up processing incoming records ([#1206](https://github.com/python-zeroconf/python-zeroconf/issues/1206)) ([`126849c`](https://github.com/python-zeroconf/python-zeroconf/commit/126849c92be8cec9253fba9faa591029d992fcc3)) ## v0.71.5 (2023-08-02) -### Fix +### Bug Fixes + +- Improve performance of ServiceInfo.async_request + ([#1205](https://github.com/python-zeroconf/python-zeroconf/pull/1205), + [`8019a73`](https://github.com/python-zeroconf/python-zeroconf/commit/8019a73c952f2fc4c88d849aab970fafedb316d8)) -* Improve performance of ServiceInfo.async_request ([#1205](https://github.com/python-zeroconf/python-zeroconf/issues/1205)) ([`8019a73`](https://github.com/python-zeroconf/python-zeroconf/commit/8019a73c952f2fc4c88d849aab970fafedb316d8)) ## v0.71.4 (2023-07-24) -### Fix +### Bug Fixes + +- Cleanup naming from previous refactoring in ServiceInfo + ([#1202](https://github.com/python-zeroconf/python-zeroconf/pull/1202), + [`b272d75`](https://github.com/python-zeroconf/python-zeroconf/commit/b272d75abd982f3be1f4b20f683cac38011cc6f4)) -* Cleanup naming from previous refactoring in ServiceInfo ([#1202](https://github.com/python-zeroconf/python-zeroconf/issues/1202)) ([`b272d75`](https://github.com/python-zeroconf/python-zeroconf/commit/b272d75abd982f3be1f4b20f683cac38011cc6f4)) ## v0.71.3 (2023-07-23) -### Fix +### Bug Fixes + +- Pin python-semantic-release to fix release process + ([#1200](https://github.com/python-zeroconf/python-zeroconf/pull/1200), + [`c145a23`](https://github.com/python-zeroconf/python-zeroconf/commit/c145a238d768aa17c3aebe120c20a46bfbec6b99)) -* Pin python-semantic-release to fix release process ([#1200](https://github.com/python-zeroconf/python-zeroconf/issues/1200)) ([`c145a23`](https://github.com/python-zeroconf/python-zeroconf/commit/c145a238d768aa17c3aebe120c20a46bfbec6b99)) ## v0.71.2 (2023-07-23) -### Fix +### Bug Fixes + +- No change re-release to fix wheel builds + ([#1199](https://github.com/python-zeroconf/python-zeroconf/pull/1199), + [`8c3a4c8`](https://github.com/python-zeroconf/python-zeroconf/commit/8c3a4c80c221bea7401c12e1c6a525e75b7ffea2)) -* No change re-release to fix wheel builds ([#1199](https://github.com/python-zeroconf/python-zeroconf/issues/1199)) ([`8c3a4c8`](https://github.com/python-zeroconf/python-zeroconf/commit/8c3a4c80c221bea7401c12e1c6a525e75b7ffea2)) ## v0.71.1 (2023-07-23) -### Fix +### Bug Fixes + +- Add missing if TYPE_CHECKING guard to generate_service_query + ([#1198](https://github.com/python-zeroconf/python-zeroconf/pull/1198), + [`ac53adf`](https://github.com/python-zeroconf/python-zeroconf/commit/ac53adf7e71db14c1a0f9adbfd1d74033df36898)) -* Add missing if TYPE_CHECKING guard to generate_service_query ([#1198](https://github.com/python-zeroconf/python-zeroconf/issues/1198)) ([`ac53adf`](https://github.com/python-zeroconf/python-zeroconf/commit/ac53adf7e71db14c1a0f9adbfd1d74033df36898)) ## v0.71.0 (2023-07-08) -### Feature +### Features + +- Improve incoming data processing performance + ([#1194](https://github.com/python-zeroconf/python-zeroconf/pull/1194), + [`a56c776`](https://github.com/python-zeroconf/python-zeroconf/commit/a56c776008ef86f99db78f5997e45a57551be725)) -* Improve incoming data processing performance ([#1194](https://github.com/python-zeroconf/python-zeroconf/issues/1194)) ([`a56c776`](https://github.com/python-zeroconf/python-zeroconf/commit/a56c776008ef86f99db78f5997e45a57551be725)) ## v0.70.0 (2023-07-02) -### Feature +### Features + +- Add support for sending to a specific `addr` and `port` with `ServiceInfo.async_request` and + `ServiceInfo.request` ([#1192](https://github.com/python-zeroconf/python-zeroconf/pull/1192), + [`405f547`](https://github.com/python-zeroconf/python-zeroconf/commit/405f54762d3f61e97de9c1787e837e953de31412)) -* Add support for sending to a specific `addr` and `port` with `ServiceInfo.async_request` and `ServiceInfo.request` ([#1192](https://github.com/python-zeroconf/python-zeroconf/issues/1192)) ([`405f547`](https://github.com/python-zeroconf/python-zeroconf/commit/405f54762d3f61e97de9c1787e837e953de31412)) ## v0.69.0 (2023-06-18) -### Feature +### Features + +- Cython3 support ([#1190](https://github.com/python-zeroconf/python-zeroconf/pull/1190), + [`8ae8ba1`](https://github.com/python-zeroconf/python-zeroconf/commit/8ae8ba1af324b0c8c2da3bd12c264a5c0f3dcc3d)) + +- Reorder incoming data handler to reduce overhead + ([#1189](https://github.com/python-zeroconf/python-zeroconf/pull/1189), + [`32756ff`](https://github.com/python-zeroconf/python-zeroconf/commit/32756ff113f675b7a9cf16d3c0ab840ba733e5e4)) -* Cython3 support ([#1190](https://github.com/python-zeroconf/python-zeroconf/issues/1190)) ([`8ae8ba1`](https://github.com/python-zeroconf/python-zeroconf/commit/8ae8ba1af324b0c8c2da3bd12c264a5c0f3dcc3d)) -* Reorder incoming data handler to reduce overhead ([#1189](https://github.com/python-zeroconf/python-zeroconf/issues/1189)) ([`32756ff`](https://github.com/python-zeroconf/python-zeroconf/commit/32756ff113f675b7a9cf16d3c0ab840ba733e5e4)) ## v0.68.1 (2023-06-18) -### Fix +### Bug Fixes + +- Reduce debug logging overhead by adding missing checks to datagram_received + ([#1188](https://github.com/python-zeroconf/python-zeroconf/pull/1188), + [`ac5c50a`](https://github.com/python-zeroconf/python-zeroconf/commit/ac5c50afc70aaa33fcd20bf02222ff4f0c596fa3)) -* Reduce debug logging overhead by adding missing checks to datagram_received ([#1188](https://github.com/python-zeroconf/python-zeroconf/issues/1188)) ([`ac5c50a`](https://github.com/python-zeroconf/python-zeroconf/commit/ac5c50afc70aaa33fcd20bf02222ff4f0c596fa3)) ## v0.68.0 (2023-06-17) -### Feature +### Features + +- Reduce overhead to handle queries and responses + ([#1184](https://github.com/python-zeroconf/python-zeroconf/pull/1184), + [`81126b7`](https://github.com/python-zeroconf/python-zeroconf/commit/81126b7600f94848ef8c58b70bac0c6ab993c6ae)) + +- adds slots to handler classes + +- avoid any expression overhead and inline instead -* Reduce overhead to handle queries and responses ([#1184](https://github.com/python-zeroconf/python-zeroconf/issues/1184)) ([`81126b7`](https://github.com/python-zeroconf/python-zeroconf/commit/81126b7600f94848ef8c58b70bac0c6ab993c6ae)) ## v0.67.0 (2023-06-17) -### Feature +### Features + +- Speed up answering incoming questions + ([#1186](https://github.com/python-zeroconf/python-zeroconf/pull/1186), + [`8f37665`](https://github.com/python-zeroconf/python-zeroconf/commit/8f376658d2a3bef0353646e6fddfda15626b73a9)) -* Speed up answering incoming questions ([#1186](https://github.com/python-zeroconf/python-zeroconf/issues/1186)) ([`8f37665`](https://github.com/python-zeroconf/python-zeroconf/commit/8f376658d2a3bef0353646e6fddfda15626b73a9)) ## v0.66.0 (2023-06-13) -### Feature -* Optimize construction of outgoing dns records ([#1182](https://github.com/python-zeroconf/python-zeroconf/issues/1182)) ([`fc0341f`](https://github.com/python-zeroconf/python-zeroconf/commit/fc0341f281cdb71428c0f1cf90c12d34cbb4acae)) + +### Features + +- Optimize construction of outgoing dns records + ([#1182](https://github.com/python-zeroconf/python-zeroconf/pull/1182), + [`fc0341f`](https://github.com/python-zeroconf/python-zeroconf/commit/fc0341f281cdb71428c0f1cf90c12d34cbb4acae)) + ## v0.65.0 (2023-06-13) -### Feature -* Reduce overhead to enumerate ip addresses in ServiceInfo ([#1181](https://github.com/python-zeroconf/python-zeroconf/issues/1181)) ([`6a85cbf`](https://github.com/python-zeroconf/python-zeroconf/commit/6a85cbf2b872cb0abd184c2dd728d9ae3eb8115c)) + +### Features + +- Reduce overhead to enumerate ip addresses in ServiceInfo + ([#1181](https://github.com/python-zeroconf/python-zeroconf/pull/1181), + [`6a85cbf`](https://github.com/python-zeroconf/python-zeroconf/commit/6a85cbf2b872cb0abd184c2dd728d9ae3eb8115c)) + ## v0.64.1 (2023-06-05) -### Fix -* Small internal typing cleanups ([#1180](https://github.com/python-zeroconf/python-zeroconf/issues/1180)) ([`f03e511`](https://github.com/python-zeroconf/python-zeroconf/commit/f03e511f7aae72c5ccd4f7514d89e168847bd7a2)) + +### Bug Fixes + +- Small internal typing cleanups + ([#1180](https://github.com/python-zeroconf/python-zeroconf/pull/1180), + [`f03e511`](https://github.com/python-zeroconf/python-zeroconf/commit/f03e511f7aae72c5ccd4f7514d89e168847bd7a2)) + ## v0.64.0 (2023-06-05) -### Feature -* Speed up processing incoming records ([#1179](https://github.com/python-zeroconf/python-zeroconf/issues/1179)) ([`d919316`](https://github.com/python-zeroconf/python-zeroconf/commit/d9193160b05beeca3755e19fd377ba13fe37b071)) -### Fix -* Always answer QU questions when the exact same packet is received from different sources in sequence ([#1178](https://github.com/python-zeroconf/python-zeroconf/issues/1178)) ([`74d7ba1`](https://github.com/python-zeroconf/python-zeroconf/commit/74d7ba1aeeae56be087ee8142ee6ca1219744baa)) +### Bug Fixes + +- Always answer QU questions when the exact same packet is received from different sources in + sequence ([#1178](https://github.com/python-zeroconf/python-zeroconf/pull/1178), + [`74d7ba1`](https://github.com/python-zeroconf/python-zeroconf/commit/74d7ba1aeeae56be087ee8142ee6ca1219744baa)) + +If the exact same packet with a QU question is asked from two different sources in a 1s window we + end up ignoring the second one as a duplicate. We should still respond in this case because the + client wants a unicast response and the question may not be answered by the previous packet since + the response may not be multicast. + +fix: include NSEC records in initial broadcast when registering a new service + +This also revealed that we do not send NSEC records in the initial broadcast. This needed to be + fixed in this PR as well for everything to work as expected since all the tests would fail with 2 + updates otherwise. + +### Features + +- Speed up processing incoming records + ([#1179](https://github.com/python-zeroconf/python-zeroconf/pull/1179), + [`d919316`](https://github.com/python-zeroconf/python-zeroconf/commit/d9193160b05beeca3755e19fd377ba13fe37b071)) + ## v0.63.0 (2023-05-25) -### Feature -* Small speed up to fetch dns addresses from ServiceInfo ([#1176](https://github.com/python-zeroconf/python-zeroconf/issues/1176)) ([`4deaa6e`](https://github.com/python-zeroconf/python-zeroconf/commit/4deaa6ed7c9161db55bf16ec068ab7260bbd4976)) -* Speed up the service registry ([#1174](https://github.com/python-zeroconf/python-zeroconf/issues/1174)) ([`360ceb2`](https://github.com/python-zeroconf/python-zeroconf/commit/360ceb2548c4c4974ff798aac43a6fff9803ea0e)) -* Improve dns cache performance ([#1172](https://github.com/python-zeroconf/python-zeroconf/issues/1172)) ([`bb496a1`](https://github.com/python-zeroconf/python-zeroconf/commit/bb496a1dd5fa3562c0412cb064d14639a542592e)) + +### Features + +- Improve dns cache performance + ([#1172](https://github.com/python-zeroconf/python-zeroconf/pull/1172), + [`bb496a1`](https://github.com/python-zeroconf/python-zeroconf/commit/bb496a1dd5fa3562c0412cb064d14639a542592e)) + +- Small speed up to fetch dns addresses from ServiceInfo + ([#1176](https://github.com/python-zeroconf/python-zeroconf/pull/1176), + [`4deaa6e`](https://github.com/python-zeroconf/python-zeroconf/commit/4deaa6ed7c9161db55bf16ec068ab7260bbd4976)) + +- Speed up the service registry + ([#1174](https://github.com/python-zeroconf/python-zeroconf/pull/1174), + [`360ceb2`](https://github.com/python-zeroconf/python-zeroconf/commit/360ceb2548c4c4974ff798aac43a6fff9803ea0e)) + ## v0.62.0 (2023-05-04) -### Feature -* Improve performance of ServiceBrowser outgoing query scheduler ([#1170](https://github.com/python-zeroconf/python-zeroconf/issues/1170)) ([`963d022`](https://github.com/python-zeroconf/python-zeroconf/commit/963d022ef82b615540fa7521d164a98a6c6f5209)) + +### Features + +- Improve performance of ServiceBrowser outgoing query scheduler + ([#1170](https://github.com/python-zeroconf/python-zeroconf/pull/1170), + [`963d022`](https://github.com/python-zeroconf/python-zeroconf/commit/963d022ef82b615540fa7521d164a98a6c6f5209)) + ## v0.61.0 (2023-05-03) -### Feature -* Speed up parsing NSEC records ([#1169](https://github.com/python-zeroconf/python-zeroconf/issues/1169)) ([`06fa94d`](https://github.com/python-zeroconf/python-zeroconf/commit/06fa94d87b4f0451cb475a921ce1d8e9562e0f26)) + +### Features + +- Speed up parsing NSEC records + ([#1169](https://github.com/python-zeroconf/python-zeroconf/pull/1169), + [`06fa94d`](https://github.com/python-zeroconf/python-zeroconf/commit/06fa94d87b4f0451cb475a921ce1d8e9562e0f26)) + ## v0.60.0 (2023-05-01) -### Feature -* Speed up processing incoming data ([#1167](https://github.com/python-zeroconf/python-zeroconf/issues/1167)) ([`fbaaf7b`](https://github.com/python-zeroconf/python-zeroconf/commit/fbaaf7bb6ff985bdabb85feb6cba144f12d4f1d6)) + +### Features + +- Speed up processing incoming data + ([#1167](https://github.com/python-zeroconf/python-zeroconf/pull/1167), + [`fbaaf7b`](https://github.com/python-zeroconf/python-zeroconf/commit/fbaaf7bb6ff985bdabb85feb6cba144f12d4f1d6)) + ## v0.59.0 (2023-05-01) -### Feature -* Speed up decoding dns questions when processing incoming data ([#1168](https://github.com/python-zeroconf/python-zeroconf/issues/1168)) ([`f927190`](https://github.com/python-zeroconf/python-zeroconf/commit/f927190cb24f70fd7c825c6e12151fcc0daf3973)) + +### Features + +- Speed up decoding dns questions when processing incoming data + ([#1168](https://github.com/python-zeroconf/python-zeroconf/pull/1168), + [`f927190`](https://github.com/python-zeroconf/python-zeroconf/commit/f927190cb24f70fd7c825c6e12151fcc0daf3973)) + ## v0.58.2 (2023-04-26) -### Fix -* Re-release to rebuild failed wheels ([#1165](https://github.com/python-zeroconf/python-zeroconf/issues/1165)) ([`4986271`](https://github.com/python-zeroconf/python-zeroconf/commit/498627166a4976f1d9d8cd1f3654b0d50272d266)) + +### Bug Fixes + +- Re-release to rebuild failed wheels + ([#1165](https://github.com/python-zeroconf/python-zeroconf/pull/1165), + [`4986271`](https://github.com/python-zeroconf/python-zeroconf/commit/498627166a4976f1d9d8cd1f3654b0d50272d266)) + ## v0.58.1 (2023-04-26) -### Fix -* Reduce cast calls in service browser ([#1164](https://github.com/python-zeroconf/python-zeroconf/issues/1164)) ([`c0d65ae`](https://github.com/python-zeroconf/python-zeroconf/commit/c0d65aeae7037a18ed1149336f5e7bdb8b2dd8cf)) + +### Bug Fixes + +- Reduce cast calls in service browser + ([#1164](https://github.com/python-zeroconf/python-zeroconf/pull/1164), + [`c0d65ae`](https://github.com/python-zeroconf/python-zeroconf/commit/c0d65aeae7037a18ed1149336f5e7bdb8b2dd8cf)) + ## v0.58.0 (2023-04-23) -### Feature -* Speed up incoming parser ([#1163](https://github.com/python-zeroconf/python-zeroconf/issues/1163)) ([`4626399`](https://github.com/python-zeroconf/python-zeroconf/commit/46263999c0c7ea5176885f1eadd2c8498834b70e)) + +### Features + +- Speed up incoming parser ([#1163](https://github.com/python-zeroconf/python-zeroconf/pull/1163), + [`4626399`](https://github.com/python-zeroconf/python-zeroconf/commit/46263999c0c7ea5176885f1eadd2c8498834b70e)) + ## v0.57.0 (2023-04-23) -### Feature -* Speed up incoming data parser ([#1161](https://github.com/python-zeroconf/python-zeroconf/issues/1161)) ([`cb4c3b2`](https://github.com/python-zeroconf/python-zeroconf/commit/cb4c3b2b80ca3b88b8de6e87062a45e03e8805a6)) + +### Features + +- Speed up incoming data parser + ([#1161](https://github.com/python-zeroconf/python-zeroconf/pull/1161), + [`cb4c3b2`](https://github.com/python-zeroconf/python-zeroconf/commit/cb4c3b2b80ca3b88b8de6e87062a45e03e8805a6)) + ## v0.56.0 (2023-04-07) -### Feature -* Reduce denial of service protection overhead ([#1157](https://github.com/python-zeroconf/python-zeroconf/issues/1157)) ([`2c2f26a`](https://github.com/python-zeroconf/python-zeroconf/commit/2c2f26a87d0aac81a77205b06bc9ba499caa2321)) + +### Features + +- Reduce denial of service protection overhead + ([#1157](https://github.com/python-zeroconf/python-zeroconf/pull/1157), + [`2c2f26a`](https://github.com/python-zeroconf/python-zeroconf/commit/2c2f26a87d0aac81a77205b06bc9ba499caa2321)) + ## v0.55.0 (2023-04-07) -### Feature -* Improve performance of processing incoming records ([#1155](https://github.com/python-zeroconf/python-zeroconf/issues/1155)) ([`b65e279`](https://github.com/python-zeroconf/python-zeroconf/commit/b65e2792751c44e0fafe9ad3a55dadc5d8ee9d46)) + +### Features + +- Improve performance of processing incoming records + ([#1155](https://github.com/python-zeroconf/python-zeroconf/pull/1155), + [`b65e279`](https://github.com/python-zeroconf/python-zeroconf/commit/b65e2792751c44e0fafe9ad3a55dadc5d8ee9d46)) + ## v0.54.0 (2023-04-03) -### Feature -* Avoid waking async_request when record updates are not relevant ([#1153](https://github.com/python-zeroconf/python-zeroconf/issues/1153)) ([`a3f970c`](https://github.com/python-zeroconf/python-zeroconf/commit/a3f970c7f66067cf2c302c49ed6ad8286f19b679)) + +### Features + +- Avoid waking async_request when record updates are not relevant + ([#1153](https://github.com/python-zeroconf/python-zeroconf/pull/1153), + [`a3f970c`](https://github.com/python-zeroconf/python-zeroconf/commit/a3f970c7f66067cf2c302c49ed6ad8286f19b679)) + ## v0.53.1 (2023-04-03) -### Fix -* Addresses incorrect after server name change ([#1154](https://github.com/python-zeroconf/python-zeroconf/issues/1154)) ([`41ea06a`](https://github.com/python-zeroconf/python-zeroconf/commit/41ea06a0192c0d186e678009285759eb37d880d5)) + +### Bug Fixes + +- Addresses incorrect after server name change + ([#1154](https://github.com/python-zeroconf/python-zeroconf/pull/1154), + [`41ea06a`](https://github.com/python-zeroconf/python-zeroconf/commit/41ea06a0192c0d186e678009285759eb37d880d5)) + ## v0.53.0 (2023-04-02) -### Feature -* Improve ServiceBrowser performance by removing OrderedDict ([#1148](https://github.com/python-zeroconf/python-zeroconf/issues/1148)) ([`9a16be5`](https://github.com/python-zeroconf/python-zeroconf/commit/9a16be56a9f69a5d0f7cde13dc1337b6d93c1433)) -### Fix -* Make parsed_scoped_addresses return addresses in the same order as all other methods ([#1150](https://github.com/python-zeroconf/python-zeroconf/issues/1150)) ([`9b6adcf`](https://github.com/python-zeroconf/python-zeroconf/commit/9b6adcf5c04a469632ee866c32f5898c5cbf810a)) +### Bug Fixes + +- Make parsed_scoped_addresses return addresses in the same order as all other methods + ([#1150](https://github.com/python-zeroconf/python-zeroconf/pull/1150), + [`9b6adcf`](https://github.com/python-zeroconf/python-zeroconf/commit/9b6adcf5c04a469632ee866c32f5898c5cbf810a)) + +### Features + +- Improve ServiceBrowser performance by removing OrderedDict + ([#1148](https://github.com/python-zeroconf/python-zeroconf/pull/1148), + [`9a16be5`](https://github.com/python-zeroconf/python-zeroconf/commit/9a16be56a9f69a5d0f7cde13dc1337b6d93c1433)) -### Technically breaking change -* IP Addresses returned from `ServiceInfo.parsed_addresses` are now stringified using the python `ipaddress` library which may format them differently than `socket.inet_ntop` depending on the operating system. It is recommended to use `ServiceInfo.ip_addresses_by_version` instead going forward as it offers a stronger guarantee since it returns `ipaddress` objects. ## v0.52.0 (2023-04-02) -### Feature -* Small cleanups to cache cleanup interval ([#1146](https://github.com/python-zeroconf/python-zeroconf/issues/1146)) ([`b434b60`](https://github.com/python-zeroconf/python-zeroconf/commit/b434b60f14ebe8f114b7b19bb4f54081c8ae0173)) -* Add ip_addresses_by_version to ServiceInfo ([#1145](https://github.com/python-zeroconf/python-zeroconf/issues/1145)) ([`524494e`](https://github.com/python-zeroconf/python-zeroconf/commit/524494edd49bd049726b19ae8ac8f6eea69a3943)) -* Speed up processing records in the ServiceBrowser ([#1143](https://github.com/python-zeroconf/python-zeroconf/issues/1143)) ([`6a327d0`](https://github.com/python-zeroconf/python-zeroconf/commit/6a327d00ffb81de55b7c5b599893c789996680c1)) -* Speed up matching types in the ServiceBrowser ([#1144](https://github.com/python-zeroconf/python-zeroconf/issues/1144)) ([`68871c3`](https://github.com/python-zeroconf/python-zeroconf/commit/68871c3b5569e41740a66b7d3d7fa5cc41514ea5)) -* Include tests and docs in sdist archives ([#1142](https://github.com/python-zeroconf/python-zeroconf/issues/1142)) ([`da10a3b`](https://github.com/python-zeroconf/python-zeroconf/commit/da10a3b2827cee0719d3bb9152ae897f061c6e2e)) + +### Features + +- Add ip_addresses_by_version to ServiceInfo + ([#1145](https://github.com/python-zeroconf/python-zeroconf/pull/1145), + [`524494e`](https://github.com/python-zeroconf/python-zeroconf/commit/524494edd49bd049726b19ae8ac8f6eea69a3943)) + +- Include tests and docs in sdist archives + ([#1142](https://github.com/python-zeroconf/python-zeroconf/pull/1142), + [`da10a3b`](https://github.com/python-zeroconf/python-zeroconf/commit/da10a3b2827cee0719d3bb9152ae897f061c6e2e)) + +feat: Include tests and docs in sdist archives + +Include documentation and test files in source distributions, in order to make them more useful for + packagers (Linux distributions, Conda). Testing is an important part of packaging process, and at + least Gentoo users have requested offline documentation for Python packages. Furthermore, the + COPYING file was missing from sdist, even though it was referenced in README. + +- Small cleanups to cache cleanup interval + ([#1146](https://github.com/python-zeroconf/python-zeroconf/pull/1146), + [`b434b60`](https://github.com/python-zeroconf/python-zeroconf/commit/b434b60f14ebe8f114b7b19bb4f54081c8ae0173)) + +- Speed up matching types in the ServiceBrowser + ([#1144](https://github.com/python-zeroconf/python-zeroconf/pull/1144), + [`68871c3`](https://github.com/python-zeroconf/python-zeroconf/commit/68871c3b5569e41740a66b7d3d7fa5cc41514ea5)) + +- Speed up processing records in the ServiceBrowser + ([#1143](https://github.com/python-zeroconf/python-zeroconf/pull/1143), + [`6a327d0`](https://github.com/python-zeroconf/python-zeroconf/commit/6a327d00ffb81de55b7c5b599893c789996680c1)) + ## v0.51.0 (2023-04-01) -### Feature -* Improve performance of constructing ServiceInfo ([#1141](https://github.com/python-zeroconf/python-zeroconf/issues/1141)) ([`36d5b45`](https://github.com/python-zeroconf/python-zeroconf/commit/36d5b45a4ece1dca902e9c3c79b5a63b8d9ae41f)) + +### Features + +- Improve performance of constructing ServiceInfo + ([#1141](https://github.com/python-zeroconf/python-zeroconf/pull/1141), + [`36d5b45`](https://github.com/python-zeroconf/python-zeroconf/commit/36d5b45a4ece1dca902e9c3c79b5a63b8d9ae41f)) + ## v0.50.0 (2023-04-01) -### Feature -* Small speed up to handler dispatch ([#1140](https://github.com/python-zeroconf/python-zeroconf/issues/1140)) ([`5bd1b6e`](https://github.com/python-zeroconf/python-zeroconf/commit/5bd1b6e7b4dd796069461c737ded956305096307)) + +### Features + +- Small speed up to handler dispatch + ([#1140](https://github.com/python-zeroconf/python-zeroconf/pull/1140), + [`5bd1b6e`](https://github.com/python-zeroconf/python-zeroconf/commit/5bd1b6e7b4dd796069461c737ded956305096307)) + ## v0.49.0 (2023-04-01) -### Feature -* Speed up processing incoming records ([#1139](https://github.com/python-zeroconf/python-zeroconf/issues/1139)) ([`7246a34`](https://github.com/python-zeroconf/python-zeroconf/commit/7246a344b6c0543871b40715c95c9435db4c7f81)) + +### Features + +- Speed up processing incoming records + ([#1139](https://github.com/python-zeroconf/python-zeroconf/pull/1139), + [`7246a34`](https://github.com/python-zeroconf/python-zeroconf/commit/7246a344b6c0543871b40715c95c9435db4c7f81)) + ## v0.48.0 (2023-04-01) -### Feature -* Reduce overhead to send responses ([#1135](https://github.com/python-zeroconf/python-zeroconf/issues/1135)) ([`c4077dd`](https://github.com/python-zeroconf/python-zeroconf/commit/c4077dde6dfde9e2598eb63daa03c36063a3e7b0)) + +### Features + +- Reduce overhead to send responses + ([#1135](https://github.com/python-zeroconf/python-zeroconf/pull/1135), + [`c4077dd`](https://github.com/python-zeroconf/python-zeroconf/commit/c4077dde6dfde9e2598eb63daa03c36063a3e7b0)) + ## v0.47.4 (2023-03-20) -### Fix -* Correct duplicate record entries in windows wheels by updating poetry-core ([#1134](https://github.com/python-zeroconf/python-zeroconf/issues/1134)) ([`a43055d`](https://github.com/python-zeroconf/python-zeroconf/commit/a43055d3fa258cd762c3e9394b01f8bdcb24f97e)) + +### Bug Fixes + +- Correct duplicate record entries in windows wheels by updating poetry-core + ([#1134](https://github.com/python-zeroconf/python-zeroconf/pull/1134), + [`a43055d`](https://github.com/python-zeroconf/python-zeroconf/commit/a43055d3fa258cd762c3e9394b01f8bdcb24f97e)) + ## v0.47.3 (2023-02-14) -### Fix -* Hold a strong reference to the query sender start task ([#1128](https://github.com/python-zeroconf/python-zeroconf/issues/1128)) ([`808c3b2`](https://github.com/python-zeroconf/python-zeroconf/commit/808c3b2194a7f499a469a9893102d328ccee83db)) + +### Bug Fixes + +- Hold a strong reference to the query sender start task + ([#1128](https://github.com/python-zeroconf/python-zeroconf/pull/1128), + [`808c3b2`](https://github.com/python-zeroconf/python-zeroconf/commit/808c3b2194a7f499a469a9893102d328ccee83db)) + ## v0.47.2 (2023-02-14) -### Fix -* Missing c extensions with newer poetry ([#1129](https://github.com/python-zeroconf/python-zeroconf/issues/1129)) ([`44d7fc6`](https://github.com/python-zeroconf/python-zeroconf/commit/44d7fc6483485102f60c91d591d0d697872f8865)) + +### Bug Fixes + +- Missing c extensions with newer poetry + ([#1129](https://github.com/python-zeroconf/python-zeroconf/pull/1129), + [`44d7fc6`](https://github.com/python-zeroconf/python-zeroconf/commit/44d7fc6483485102f60c91d591d0d697872f8865)) + ## v0.47.1 (2022-12-24) -### Fix -* The equality checks for DNSPointer and DNSService should be case insensitive ([#1122](https://github.com/python-zeroconf/python-zeroconf/issues/1122)) ([`48ae77f`](https://github.com/python-zeroconf/python-zeroconf/commit/48ae77f026a96e2ca475b0ff80cb6d22207ce52f)) + +### Bug Fixes + +- The equality checks for DNSPointer and DNSService should be case insensitive + ([#1122](https://github.com/python-zeroconf/python-zeroconf/pull/1122), + [`48ae77f`](https://github.com/python-zeroconf/python-zeroconf/commit/48ae77f026a96e2ca475b0ff80cb6d22207ce52f)) + ## v0.47.0 (2022-12-22) -### Feature -* Optimize equality checks for DNS records ([#1120](https://github.com/python-zeroconf/python-zeroconf/issues/1120)) ([`3a25ff7`](https://github.com/python-zeroconf/python-zeroconf/commit/3a25ff74bea83cd7d50888ce1ebfd7650d704bfa)) + +### Features + +- Optimize equality checks for DNS records + ([#1120](https://github.com/python-zeroconf/python-zeroconf/pull/1120), + [`3a25ff7`](https://github.com/python-zeroconf/python-zeroconf/commit/3a25ff74bea83cd7d50888ce1ebfd7650d704bfa)) + ## v0.46.0 (2022-12-21) -### Feature -* Optimize the dns cache ([#1119](https://github.com/python-zeroconf/python-zeroconf/issues/1119)) ([`e80fcef`](https://github.com/python-zeroconf/python-zeroconf/commit/e80fcef967024f8e846e44b464a82a25f5550edf)) + +### Features + +- Optimize the dns cache ([#1119](https://github.com/python-zeroconf/python-zeroconf/pull/1119), + [`e80fcef`](https://github.com/python-zeroconf/python-zeroconf/commit/e80fcef967024f8e846e44b464a82a25f5550edf)) + ## v0.45.0 (2022-12-20) -### Feature -* Optimize construction of outgoing packets ([#1118](https://github.com/python-zeroconf/python-zeroconf/issues/1118)) ([`81e186d`](https://github.com/python-zeroconf/python-zeroconf/commit/81e186d365c018381f9b486a4dbe4e2e4b8bacbf)) + +### Features + +- Optimize construction of outgoing packets + ([#1118](https://github.com/python-zeroconf/python-zeroconf/pull/1118), + [`81e186d`](https://github.com/python-zeroconf/python-zeroconf/commit/81e186d365c018381f9b486a4dbe4e2e4b8bacbf)) + ## v0.44.0 (2022-12-18) -### Feature -* Optimize dns objects by adding pxd files ([#1113](https://github.com/python-zeroconf/python-zeroconf/issues/1113)) ([`919d4d8`](https://github.com/python-zeroconf/python-zeroconf/commit/919d4d875747b4fa68e25bccd5aae7f304d8a36d)) + +### Features + +- Optimize dns objects by adding pxd files + ([#1113](https://github.com/python-zeroconf/python-zeroconf/pull/1113), + [`919d4d8`](https://github.com/python-zeroconf/python-zeroconf/commit/919d4d875747b4fa68e25bccd5aae7f304d8a36d)) + ## v0.43.0 (2022-12-18) -### Feature -* Optimize incoming parser by reducing call stack ([#1116](https://github.com/python-zeroconf/python-zeroconf/issues/1116)) ([`11f3f0e`](https://github.com/python-zeroconf/python-zeroconf/commit/11f3f0e699e00c1ee3d6d8ab5e30f62525510589)) + +### Features + +- Optimize incoming parser by reducing call stack + ([#1116](https://github.com/python-zeroconf/python-zeroconf/pull/1116), + [`11f3f0e`](https://github.com/python-zeroconf/python-zeroconf/commit/11f3f0e699e00c1ee3d6d8ab5e30f62525510589)) + ## v0.42.0 (2022-12-18) -### Feature -* Optimize incoming parser by using unpack_from ([#1115](https://github.com/python-zeroconf/python-zeroconf/issues/1115)) ([`a7d50ba`](https://github.com/python-zeroconf/python-zeroconf/commit/a7d50baab362eadd2d292df08a39de6836b41ea7)) -## v0.41.0 (2022-12-18) -### Feature -* Optimize incoming parser by adding pxd files ([#1111](https://github.com/python-zeroconf/python-zeroconf/issues/1111)) ([`26efeb0`](https://github.com/python-zeroconf/python-zeroconf/commit/26efeb09783050266242542228f34eb4dd83e30c)) +### Features -## v0.40.1 (2022-12-18) -### Fix -* Fix project name in pyproject.toml ([#1112](https://github.com/python-zeroconf/python-zeroconf/issues/1112)) ([`a330f62`](https://github.com/python-zeroconf/python-zeroconf/commit/a330f62040475257c4a983044e1675aeb95e030a)) +- Optimize incoming parser by using unpack_from + ([#1115](https://github.com/python-zeroconf/python-zeroconf/pull/1115), + [`a7d50ba`](https://github.com/python-zeroconf/python-zeroconf/commit/a7d50baab362eadd2d292df08a39de6836b41ea7)) -## v0.40.0 (2022-12-17) -### Feature -* Drop async_timeout requirement for python 3.11+ ([#1107](https://github.com/python-zeroconf/python-zeroconf/issues/1107)) ([`1f4224e`](https://github.com/python-zeroconf/python-zeroconf/commit/1f4224ef122299235013cb81b501f8ff9a30dea1)) -# 0.39.5 +## v0.41.0 (2022-12-18) - - This is a stub version to initialize python-semantic-release +### Features - This version will not be published +- Optimize incoming parser by adding pxd files + ([#1111](https://github.com/python-zeroconf/python-zeroconf/pull/1111), + [`26efeb0`](https://github.com/python-zeroconf/python-zeroconf/commit/26efeb09783050266242542228f34eb4dd83e30c)) -# 0.39.4 - - Fix IP changes being missed by ServiceInfo (\#1102) @bdraco +## v0.40.1 (2022-12-18) -# 0.39.3 +### Bug Fixes - - Fix port changes not being seen by ServiceInfo (\#1100) @bdraco +- Fix project name in pyproject.toml + ([#1112](https://github.com/python-zeroconf/python-zeroconf/pull/1112), + [`a330f62`](https://github.com/python-zeroconf/python-zeroconf/commit/a330f62040475257c4a983044e1675aeb95e030a)) -# 0.39.2 - - Performance improvements for parsing incoming packet data (\#1095) - (\#1097) @bdraco +## v0.40.0 (2022-12-17) -# 0.39.1 +### Features - - Performance improvements for constructing outgoing packet data - (\#1090) @bdraco +- Drop async_timeout requirement for python 3.11+ + ([#1107](https://github.com/python-zeroconf/python-zeroconf/pull/1107), + [`1f4224e`](https://github.com/python-zeroconf/python-zeroconf/commit/1f4224ef122299235013cb81b501f8ff9a30dea1)) -# 0.39.0 -Technically backwards incompatible: +## v0.39.5 (2022-12-17) - - Switch to using async\_timeout for timeouts (\#1081) @bdraco - - Significantly reduces the number of asyncio tasks that are created - when using ServiceInfo or - AsyncServiceInfo -# 0.38.7 +## v0.39.4 (2022-10-31) - - Performance improvements for parsing incoming packet data (\#1076) - @bdraco -# 0.38.6 +## v0.39.3 (2022-10-26) - - Performance improvements for fetching ServiceInfo (\#1068) @bdraco -# 0.38.5 +## v0.39.2 (2022-10-20) - - Fix ServiceBrowsers not getting ServiceStateChange.Removed callbacks - on PTR record expire (\#1064) @bdraco - - ServiceBrowsers were only getting a - ServiceStateChange.Removed callback - when the record was sent with a TTL of 0. ServiceBrowsers now - correctly get a - ServiceStateChange.Removed callback - when the record expires as well. - - Fix missing minimum version of python 3.7 (\#1060) @stevencrader +## v0.39.1 (2022-09-05) -# 0.38.4 - - Fix IP Address updates when hostname is uppercase (\#1057) @bdraco - - ServiceBrowsers would not callback updates when the ip address - changed if the hostname contained uppercase characters +## v0.39.0 (2022-08-05) -# 0.38.3 -Version bump only, no changes from 0.38.2 +## v0.38.7 (2022-06-14) -# 0.38.2 - - Make decode errors more helpful in finding the source of the bad - data (\#1052) @bdraco +## v0.38.6 (2022-05-06) -# 0.38.1 - - Improve performance of query scheduler (\#1043) @bdraco - - Avoid linear type searches in ServiceBrowsers (\#1044) @bdraco +## v0.38.5 (2022-05-01) -# 0.38.0 - - Handle Service types that end with another service type (\#1041) - @apworks1 +## v0.38.4 (2022-02-28) -Backwards incompatible: - - Dropped Python 3.6 support (\#1009) @bdraco +## v0.38.3 (2022-01-31) -# 0.37.0 -Technically backwards incompatible: +## v0.38.2 (2022-01-31) - - Adding a listener that does not inherit from RecordUpdateListener - now logs an error (\#1034) @bdraco - - The NotRunningException exception is now thrown when Zeroconf is not - running (\#1033) @bdraco - - Before this change the consumer would get a timeout or an - EventLoopBlocked exception when calling - ServiceInfo.\*request when the - instance had already been shutdown or had failed to startup. +## v0.38.1 (2021-12-23) - - The EventLoopBlocked exception is now thrown when a coroutine times - out (\#1032) @bdraco - - Previously - concurrent.futures.TimeoutError would - have been raised instead. This is never expected to happen during - normal operation. -# 0.36.13 +## v0.38.0 (2021-12-23) - - Unavailable interfaces are now skipped during socket bind (\#1028) - @bdraco - - Downgraded incoming corrupt packet logging to debug (\#1029) @bdraco - - Warning about network traffic we have no control over is confusing - to users as they think there is something wrong with zeroconf +## v0.37.0 (2021-11-18) -# 0.36.12 - - Prevented service lookups from deadlocking if time abruptly moves - backwards (\#1006) @bdraco - - The typical reason time moves backwards is via an ntp update +## v0.36.13 (2021-11-13) -# 0.36.11 -No functional changes from 0.36.10. This release corrects an error in -the README.rst file that prevented the build from uploading to PyPI +## v0.36.12 (2021-11-05) -# 0.36.10 - - scope\_id is now stripped from IPv6 addresses if given (\#1020) - @StevenLooman - - cpython 3.9 allows a suffix %scope\_id in IPv6Address. This caused - an error with the existing code if it was not stripped +## v0.36.11 (2021-10-30) - - Optimized decoding labels from incoming packets (\#1019) @bdraco -# 0.36.9 +## v0.36.10 (2021-10-30) - - Ensure ServiceInfo orders newest addresses first (\#1012) @bdraco - - This change effectively restored the behavior before 1s cache flush - expire behavior described in rfc6762 section 10.2 was added for - callers that rely on this. -# 0.36.8 +## v0.36.9 (2021-10-22) - - Fixed ServiceBrowser infinite loop when zeroconf is closed before it - is canceled (\#1008) @bdraco -# 0.36.7 +## v0.36.8 (2021-10-10) - - Improved performance of responding to queries (\#994) (\#996) - (\#997) @bdraco - - Improved log message when receiving an invalid or corrupt packet - (\#998) @bdraco -# 0.36.6 +## v0.36.7 (2021-09-22) - - Improved performance of sending outgoing packets (\#990) @bdraco -# 0.36.5 +## v0.36.6 (2021-09-19) - - Reduced memory usage for incoming and outgoing packets (\#987) - @bdraco -# 0.36.4 +## v0.36.5 (2021-09-18) - - Improved performance of constructing outgoing packets (\#978) - (\#979) @bdraco - - Deferred parsing of incoming packets when it can be avoided (\#983) - @bdraco -# 0.36.3 +## v0.36.4 (2021-09-16) - - Improved performance of parsing incoming packets (\#975) @bdraco -# 0.36.2 +## v0.36.3 (2021-09-14) - - Include NSEC records for non-existent types when responding with - addresses (\#972) (\#971) @bdraco Implements RFC6762 sec 6.2 - () -# 0.36.1 - - - Skip goodbye packets for addresses when there is another service - registered with the same name (\#968) @bdraco - - If a ServiceInfo that used the same server name as another - ServiceInfo was unregistered, goodbye packets would be sent for the - addresses and would cause the other service to be seen as offline. - - - Fixed equality and hash for dns records with the unique bit (\#969) - @bdraco - - These records should have the same hash and equality since the - unique bit (cache flush bit) is not considered when adding or - removing the records from the cache. - -# 0.36.0 - -Technically backwards incompatible: - - - Fill incomplete IPv6 tuples to avoid WinError on windows (\#965) - @lokesh2019 - - Fixed \#932 - -# 0.35.1 +## v0.36.2 (2021-08-30) - - Only reschedule types if the send next time changes (\#958) @bdraco - - When the PTR response was seen again, the timer was being canceled - and rescheduled even if the timer was for the same time. While this - did not cause any breakage, it is quite inefficient. - - Cache DNS record and question hashes (\#960) @bdraco - - The hash was being recalculated every time the object was being used - in a set or dict. Since the hashes are effectively immutable, we - only calculate them once now. +## v0.36.1 (2021-08-29) -# 0.35.0 - - Reduced chance of accidental synchronization of ServiceInfo requests - (\#955) @bdraco - - Sort aggregated responses to increase chance of name compression - (\#954) @bdraco - -Technically backwards incompatible: - - - Send unicast replies on the same socket the query was received - (\#952) @bdraco - - When replying to a QU question, we do not know if the sending host - is reachable from all of the sending sockets. We now avoid this - problem by replying via the receiving socket. This was the existing - behavior when InterfaceChoice.Default - is set. - - This change extends the unicast relay behavior to used with - InterfaceChoice.Default to apply when - InterfaceChoice.All or interfaces are - explicitly passed when instantiating a - Zeroconf instance. - - Fixes \#951 - -# 0.34.3 - - - Fix sending immediate multicast responses (\#949) @bdraco - -# 0.34.2 - - - Coalesce aggregated multicast answers (\#945) @bdraco - - When the random delay is shorter than the last scheduled response, - answers are now added to the same outgoing time group. - - This reduces traffic when we already know we will be sending a group - of answers inside the random delay window described in - datatracker.ietf.org/doc/html/rfc6762\#section-6.3 - - - Ensure ServiceInfo requests can be answered inside the default - timeout with network protection (\#946) @bdraco - - Adjust the time windows to ensure responses that have triggered the - protection against against excessive packet flooding due to software - bugs or malicious attack described in RFC6762 section 6 can respond - in under 1350ms to ensure ServiceInfo can ask two questions within - the default timeout of 3000ms - -# 0.34.1 - - - Ensure multicast aggregation sends responses within 620ms (\#942) - @bdraco - - Responses that trigger the protection against against excessive - packet flooding due to software bugs or malicious attack described - in RFC6762 section 6 could cause the multicast aggregation response - to be delayed longer than 620ms (The maximum random delay of 120ms - and 500ms additional for aggregation). - - Only responses that trigger the protection are delayed longer than - 620ms - -# 0.34.0 - - - Implemented Multicast Response Aggregation (\#940) @bdraco - - Responses are now aggregated when possible per rules in RFC6762 - section 6.4 - - Responses that trigger the protection against against excessive - packet flooding due to software bugs or malicious attack described - in RFC6762 section 6 are delayed instead of discarding as it was - causing responders that implement Passive Observation Of Failures - (POOF) to evict the records. - - Probe responses are now always sent immediately as there were cases - where they would fail to be answered in time to defend a name. - -# 0.33.4 - - - Ensure zeroconf can be loaded when the system disables IPv6 (\#933) - @che0 - -# 0.33.3 - - - Added support for forward dns compression pointers (\#934) @bdraco - - Provide sockname when logging a protocol error (\#935) @bdraco - -# 0.33.2 - - - Handle duplicate goodbye answers in the same packet (\#928) @bdraco - - Solves an exception being thrown when we tried to remove the known - answer from the cache when the second goodbye answer in the same - packet was processed - - Fixed \#926 - - - Skip ipv6 interfaces that return ENODEV (\#930) @bdraco - -# 0.33.1 - - - Version number change only with less restrictive directory - permissions - - Fixed \#923 - -# 0.33.0 - -This release eliminates all threading locks as all non-threadsafe -operations now happen in the event loop. - - - Let connection\_lost close the underlying socket (\#918) @bdraco - - The socket was closed during shutdown before asyncio's - connection\_lost handler had a chance to close it which resulted in - a traceback on windows. - - Fixed \#917 - -Technically backwards incompatible: - - - Removed duplicate unregister\_all\_services code (\#910) @bdraco - - Calling Zeroconf.close from same asyncio event loop zeroconf is - running in will now skip unregister\_all\_services and log a warning - as this a blocking operation and is not async safe and never has - been. - - Use AsyncZeroconf instead, or for legacy code call - async\_unregister\_all\_services before Zeroconf.close - -# 0.32.1 - - - Increased timeout in ServiceInfo.request to handle loaded systems - (\#895) @bdraco - - It can take a few seconds for a loaded system to run the - async\_request coroutine when the - event loop is busy, or the system is CPU bound (example being Home - Assistant startup). We now add an additional - \_LOADED\_SYSTEM\_TIMEOUT (10s) to - the run\_coroutine\_threadsafe calls - to ensure the coroutine has the total amount of time to run up to - its internal timeout (default of 3000ms). - - Ten seconds is a bit large of a timeout; however, it is only used in - cases where we wrap other timeouts. We now expect the only instance - the run\_coroutine\_threadsafe result - timeout will happen in a production circumstance is when someone is - running a ServiceInfo.request() in a - thread and another thread calls - Zeroconf.close() at just the right - moment that the future is never completed unless the system is so - loaded that it is nearly unresponsive. - - The timeout for - run\_coroutine\_threadsafe is the - maximum time a thread can cleanly shut down when zeroconf is closed - out in another thread, which should always be longer than the - underlying thread operation. - -# 0.32.0 - -This release offers 100% line and branch coverage. - - - Made ServiceInfo first question QU (\#852) @bdraco - - We want an immediate response when requesting with ServiceInfo by - asking a QU question; most responders will not delay the response - and respond right away to our question. This also improves - compatibility with split networks as we may not have been able to - see the response otherwise. If the responder has not multicast the - record recently, it may still choose to do so in addition to - responding via unicast - - Reduces traffic when there are multiple zeroconf instances running - on the network running ServiceBrowsers - - If we don't get an answer on the first try, we ask a QM question in - the event, we can't receive a unicast response for some reason - - This change puts ServiceInfo inline with ServiceBrowser which also - asks the first question as QU since ServiceInfo is commonly called - from ServiceBrowser callbacks - - - Limited duplicate packet suppression to 1s intervals (\#841) @bdraco - - Only suppress duplicate packets that happen within the same second. - Legitimate queriers will retry the question if they are suppressed. - The limit was reduced to one second to be in line with rfc6762 - - - Made multipacket known answer suppression per interface (\#836) - @bdraco - - The suppression was happening per instance of Zeroconf instead of - per interface. Since the same network can be seen on multiple - interfaces (usually and wifi and ethernet), this would confuse the - multi-packet known answer supression since it was not expecting to - get the same data more than once - - - New ServiceBrowsers now request QU in the first outgoing when - unspecified (\#812) @bdraco - - When we - start a ServiceBrowser and zeroconf has just started up, the known - answer list will be small. By asking a QU question first, it is - likely that we have a large known answer list by the time we ask the - QM question a second later (current default which is likely too low - but would be a breaking change to increase). This reduces the amount - of traffic on the network, and has the secondary advantage that most - responders will answer a QU question without the typical delay - answering QM questions. - - - IPv6 link-local addresses are now qualified with scope\_id (\#343) - @ibygrave - - When a service is advertised on an IPv6 address where the scope is - link local, i.e. fe80::/64 (see RFC 4007) the resolved IPv6 address - must be extended with the scope\_id that identifies through the "%" - symbol the local interface to be used when routing to that address. - A new API parsed\_scoped\_addresses() - is provided to return qualified addresses to avoid breaking - compatibility on the existing parsed\_addresses(). - - - Network adapters that are disconnected are now skipped (\#327) - @ZLJasonG - - - Fixed listeners missing initial packets if Engine starts too quickly - (\#387) @bdraco - - When manually creating a zeroconf.Engine object, it is no longer - started automatically. It must manually be started by calling - .start() on the created object. - - The Engine thread is now started after all the listeners have been - added to avoid a race condition where packets could be missed at - startup. - - - Fixed answering matching PTR queries with the ANY query (\#618) - @bdraco - - - Fixed lookup of uppercase names in the registry (\#597) @bdraco - - If the ServiceInfo was registered with an uppercase name and the - query was for a lowercase name, it would not be found and - vice-versa. - - - Fixed unicast responses from any source port (\#598) @bdraco - - Unicast responses were only being sent if the source port was 53, - this prevented responses when testing with dig: - - > dig -p 5353 @224.0.0.251 media-12.local - - The above query will now see a response - - - Fixed queries for AAAA records not being answered (\#616) @bdraco - - - Removed second level caching from ServiceBrowsers (\#737) @bdraco - - The ServiceBrowser had its own cache of the last time it saw a - service that was reimplementing the DNSCache and presenting a source - of truth problem that lead to unexpected queries when the two - disagreed. - - - Fixed server cache not being case-insensitive (\#731) @bdraco - - If the server name had uppercase chars and any of the matching - records were lowercase, and the server would not be found - - - Fixed cache handling of records with different TTLs (\#729) @bdraco - - There should only be one unique record in the cache at a time as - having multiple unique records will different TTLs in the cache can - result in unexpected behavior since some functions returned all - matching records and some fetched from the right side of the list to - return the newest record. Instead we now store the records in a dict - to ensure that the newest record always replaces the same unique - record, and we never have a source of truth problem determining the - TTL of a record from the cache. - - - Fixed ServiceInfo with multiple A records (\#725) @bdraco - - If there were multiple A records for the host, ServiceInfo would - always return the last one that was in the incoming packet, which - was usually not the one that was wanted. - - - Fixed stale unique records expiring too quickly (\#706) @bdraco - - Records now expire 1s in the future instead of instant removal. - - tools.ietf.org/html/rfc6762\#section-10.2 Queriers receiving a - Multicast DNS response with a TTL of zero SHOULD NOT immediately - delete the record from the cache, but instead record a TTL of 1 and - then delete the record one second later. In the case of multiple - Multicast DNS responders on the network described in Section 6.6 - above, if one of the responders shuts down and incorrectly sends - goodbye packets for its records, it gives the other cooperating - responders one second to send out their own response to "rescue" the - records before they expire and are deleted. - - - Fixed exception when unregistering a service multiple times (\#679) - @bdraco - - - Added an AsyncZeroconfServiceTypes to mirror ZeroconfServiceTypes to - zeroconf.asyncio (\#658) @bdraco - - - Fixed interface\_index\_to\_ip6\_address not skiping ipv4 adapters - (\#651) @bdraco - - - Added async\_unregister\_all\_services to AsyncZeroconf (\#649) - @bdraco - - - Fixed services not being removed from the registry when calling - unregister\_all\_services (\#644) @bdraco - - There was a race condition where a query could be answered for a - service in the registry, while goodbye packets which could result in - a fresh record being broadcast after the goodbye if a query came in - at just the right time. To avoid this, we now remove the services - from the registry right after we generate the goodbye packet - - - Fixed zeroconf exception on load when the system disables IPv6 - (\#624) @bdraco - - - Fixed the QU bit missing from for probe queries (\#609) @bdraco - - The bit should be set per - datatracker.ietf.org/doc/html/rfc6762\#section-8.1 - - - Fixed the TC bit missing for query packets where the known answers - span multiple packets (\#494) @bdraco - - - Fixed packets not being properly separated when exceeding maximum - size (\#498) @bdraco - - Ensure that questions that exceed the max packet size are moved to - the next packet. This fixes DNSQuestions being sent in multiple - packets in violation of: - datatracker.ietf.org/doc/html/rfc6762\#section-7.2 - - Ensure only one resource record is sent when a record exceeds - \_MAX\_MSG\_TYPICAL - datatracker.ietf.org/doc/html/rfc6762\#section-17 - - - Fixed PTR questions asked in uppercase not being answered (\#465) - @bdraco - - - Added Support for context managers in Zeroconf and AsyncZeroconf - (\#284) @shenek - - - Implemented an AsyncServiceBrowser to compliment the sync - ServiceBrowser (\#429) @bdraco - - - Added async\_get\_service\_info to AsyncZeroconf and async\_request - to AsyncServiceInfo (\#408) @bdraco - - - Implemented allowing passing in a sync Zeroconf instance to - AsyncZeroconf (\#406) @bdraco - - - Fixed IPv6 setup under MacOS when binding to "" (\#392) @bdraco - - - Fixed ZeroconfServiceTypes.find not always cancels the - ServiceBrowser (\#389) @bdraco - - There was a short window where the ServiceBrowser thread could be - left running after Zeroconf is closed because the .join() was never - waited for when a new Zeroconf object was created - - - Fixed duplicate packets triggering duplicate updates (\#376) @bdraco - - If TXT or SRV records update was already processed and then received - again, it was possible for a second update to be called back in the - ServiceBrowser - - - Fixed ServiceStateChange.Updated event happening for IPs that - already existed (\#375) @bdraco - - - Fixed RFC6762 Section 10.2 paragraph 2 compliance (\#374) @bdraco - - - Reduced length of ServiceBrowser thread name with many types (\#373) - @bdraco - - - Fixed empty answers being added in ServiceInfo.request (\#367) - @bdraco - - - Fixed ServiceInfo not populating all AAAA records (\#366) @bdraco - - Use get\_all\_by\_details to ensure all records are loaded into - addresses. - - Only load A/AAAA records from the cache once in load\_from\_cache if - there is a SRV record present - - Move duplicate code that checked if the ServiceInfo was complete - into its own function - - - Fixed a case where the cache list can change during iteration - (\#363) @bdraco - - - Return task objects created by AsyncZeroconf (\#360) @nocarryr - -Traffic Reduction: - - - Added support for handling QU questions (\#621) @bdraco - - Implements RFC 6762 sec 5.4: Questions Requesting Unicast Responses - datatracker.ietf.org/doc/html/rfc6762\#section-5.4 - - - Implemented protect the network against excessive packet flooding - (\#619) @bdraco - - - Additionals are now suppressed when they are already in the answers - section (\#617) @bdraco - - - Additionals are no longer included when the answer is suppressed by - known-answer suppression (\#614) @bdraco - - - Implemented multi-packet known answer supression (\#687) @bdraco - - Implements datatracker.ietf.org/doc/html/rfc6762\#section-7.2 - - - Implemented efficient bucketing of queries with known answers - (\#698) @bdraco - - - Implemented duplicate question suppression (\#770) @bdraco - - - -Technically backwards incompatible: - - - Update internal version check to match docs (3.6+) (\#491) @bdraco - - Python version earlier then 3.6 were likely broken with zeroconf - already, however, the version is now explicitly checked. - - - Update python compatibility as PyPy3 7.2 is required (\#523) @bdraco - -Backwards incompatible: - - - Drop oversize packets before processing them (\#826) @bdraco - - Oversized packets can quickly overwhelm the system and deny service - to legitimate queriers. In practice, this is usually due to broken - mDNS implementations rather than malicious actors. - - - Guard against excessive ServiceBrowser queries from PTR records - significantly lowerthan recommended (\#824) @bdraco - - We now enforce a minimum TTL for PTR records to avoid - ServiceBrowsers generating excessive queries refresh queries. Apple - uses a 15s minimum TTL, however, we do not have the same level of - rate limit and safeguards, so we use 1/4 of the recommended value. - - - RecordUpdateListener now uses async\_update\_records instead of - update\_record (\#419, \#726) @bdraco - - This allows the listener to receive all the records that have been - updated in a single transaction such as a packet or cache expiry. - - update\_record has been deprecated in favor of - async\_update\_records A compatibility shim exists to ensure classes - that use RecordUpdateListener as a base class continue to have - update\_record called, however, they should be updated as soon as - possible. - - A new method async\_update\_records\_complete is now called on each - listener when all listeners have completed processing updates and - the cache has been updated. This allows ServiceBrowsers to delay - calling handlers until they are sure the cache has been updated as - its a common pattern to call for ServiceInfo when a ServiceBrowser - handler fires. - - The async\_ prefix was chosen to make it clear that these functions - run in the eventloop and should never do blocking I/O. Before 0.32+ - these functions ran in a select() loop and should not have been - doing any blocking I/O, but it was not clear to implementors that - I/O would block the loop. - - - Pass both the new and old records to async\_update\_records (\#792) - @bdraco - - Pass the old\_record (cached) as the value and the new\_record - (wire) to async\_update\_records instead of forcing each consumer to - check the cache since we will always have the old\_record when - generating the async\_update\_records call. This avoids the overhead - of multiple cache lookups for each listener. - -# 0.31.0 - - - Separated cache loading from I/O in ServiceInfo and fixed cache - lookup (\#356), thanks to J. Nick Koston. - - The ServiceInfo class gained a load\_from\_cache() method to only - fetch information from Zeroconf cache (if it exists) with no IO - performed. Additionally this should reduce IO in cases where cache - lookups were previously incorrectly failing. - -# 0.30.0 - - - Some nice refactoring work including removal of the Reaper thread, - thanks to J. Nick Koston. - - Fixed a Windows-specific The requested address is not valid in its - context regression, thanks to Timothee ‘TTimo’ Besset and J. Nick - Koston. - - Provided an asyncio-compatible service registration layer (in the - zeroconf.asyncio module), thanks to J. Nick Koston. - -# 0.29.0 - - - A single socket is used for listening on responding when - InterfaceChoice.Default is chosen. - Thanks to J. Nick Koston. - -Backwards incompatible: - - - Dropped Python 3.5 support - -# 0.28.8 - - - Fixed the packet generation when multiple packets are necessary, - previously invalid packets were generated sometimes. Patch thanks to - J. Nick Koston. - -# 0.28.7 - - - Fixed the IPv6 address rendering in the browser example, thanks to - Alexey Vazhnov. - - Fixed a crash happening when a service is added or removed during - handle\_response and improved exception handling, thanks to J. Nick - Koston. - -# 0.28.6 - - - Loosened service name validation when receiving from the network - this lets us handle some real world devices previously causing - errors, thanks to J. Nick Koston. - -# 0.28.5 - - - Enabled ignoring duplicated messages which decreases CPU usage, - thanks to J. Nick Koston. - - Fixed spurious AttributeError: module 'unittest' has no attribute - 'mock' in tests. - -# 0.28.4 - - - Improved cache reaper performance significantly, thanks to J. Nick - Koston. - - Added ServiceListener to \_\_all\_\_ as it's part of the public API, - thanks to Justin Nesselrotte. +## v0.36.0 (2021-08-16) -# 0.28.3 - - Reduced a time an internal lock is held which should eliminate - deadlocks in high-traffic networks, thanks to J. Nick Koston. +## v0.35.1 (2021-08-15) -# 0.28.2 - - Stopped asking questions we already have answers for in cache, - thanks to Paul Daumlechner. - - Removed initial delay before querying for service info, thanks to - Erik Montnemery. +## v0.35.0 (2021-08-13) -# 0.28.1 - - Fixed a resource leak connected to using ServiceBrowser with - multiple types, thanks to - 10. Nick Koston. +## v0.34.3 (2021-08-09) -# 0.28.0 - - Improved Windows support when using socket errno checks, thanks to - Sandy Patterson. - - Added support for passing text addresses to ServiceInfo. - - Improved logging (includes fixing an incorrect logging call) - - Improved Windows compatibility by using Adapter.index from ifaddr, - thanks to PhilippSelenium. - - Improved Windows compatibility by stopping using - socket.if\_nameindex. - - Fixed an OS X edge case which should also eliminate a memory leak, - thanks to Emil Styrke. +## v0.34.2 (2021-08-09) -Technically backwards incompatible: - - `ifaddr` 0.1.7 or newer is required now. +## v0.34.1 (2021-08-08) -## 0.27.1 - - Improved the logging situation (includes fixing a false-positive - "packets() made no progress adding records", thanks to Greg Badros) +## v0.34.0 (2021-08-08) -## 0.27.0 - - Large multi-resource responses are now split into separate packets - which fixes a bad mdns-repeater/ChromeCast Audio interaction ending - with ChromeCast Audio crash (and possibly some others) and improves - RFC 6762 compliance, thanks to Greg Badros - - Added a warning presented when the listener passed to ServiceBrowser - lacks update\_service() callback - - Added support for finding all services available in the browser - example, thanks to Perry Kunder +## v0.33.4 (2021-08-06) -Backwards incompatible: - - Removed previously deprecated ServiceInfo address constructor - parameter and property +## v0.33.3 (2021-08-05) -## 0.26.3 - - Improved readability of logged incoming data, thanks to Erik - Montnemery - - Threads are given unique names now to aid debugging, thanks to Erik - Montnemery - - Fixed a regression where get\_service\_info() called within a - listener add\_service method would deadlock, timeout and incorrectly - return None, fix thanks to Erik Montnemery, but Matt Saxon and - Hmmbob were also involved in debugging it. +## v0.33.2 (2021-07-28) -## 0.26.2 - - Added support for multiple types to ServiceBrowser, thanks to J. - Nick Koston - - Fixed a race condition where a listener gets a message before the - lock is created, thanks to - 10. Nick Koston +## v0.33.1 (2021-07-18) -## 0.26.1 - - Fixed a performance regression introduced in 0.26.0, thanks to J. - Nick Koston (this is close in spirit to an optimization made in - 0.24.5 by the same author) +## v0.33.0 (2021-07-18) -## 0.26.0 - - Fixed a regression where service update listener wasn't called on IP - address change (it's called on SRV/A/AAAA record changes now), - thanks to Matt Saxon +## v0.32.1 (2021-07-05) -Technically backwards incompatible: - - Service update hook is no longer called on service addition (service - added hook is still called), this is related to the fix above +## v0.32.0 (2021-06-30) -## 0.25.1 - - Eliminated 5s hangup when calling Zeroconf.close(), thanks to Erik - Montnemery +## v0.29.0 (2021-03-25) -## 0.25.0 - - Reverted uniqueness assertions when browsing, they caused a - regression +## v0.28.8 (2021-01-04) -Backwards incompatible: - - Rationalized handling of TXT records. Non-bytes values are converted - to str and encoded to bytes using UTF-8 now, None values mean - value-less attributes. When receiving TXT records no decoding is - performed now, keys are always bytes and values are either bytes or - None in value-less attributes. +## v0.28.7 (2020-12-13) -## 0.24.5 - - Fixed issues with shared records being used where they shouldn't be - (TXT, SRV, A records are unique now), thanks to Matt Saxon - - Stopped unnecessarily excluding host-only interfaces from - InterfaceChoice.all as they don't forbid multicast, thanks to - Andreas Oberritter - - Fixed repr() of IPv6 DNSAddress, thanks to Aldo Hoeben - - Removed duplicate update messages sent to listeners, thanks to Matt - Saxon - - Added support for cooperating responders, thanks to Matt Saxon - - Optimized handle\_response cache check, thanks to J. Nick Koston - - Fixed memory leak in DNSCache, thanks to J. Nick Koston +## v0.28.6 (2020-10-13) -## 0.24.4 - - Fixed resetting TTL in DNSRecord.reset\_ttl(), thanks to Matt Saxon - - Improved various DNS class' string representations, thanks to Jay - Hogg +## v0.28.5 (2020-09-11) -## 0.24.3 - - Fixed import-time "TypeError: 'ellipsis' object is not iterable." on - CPython 3.5.2 +## v0.28.4 (2020-09-06) -## 0.24.2 - - Added support for AWDL interface on macOS (needed and used by the - opendrop project but should be useful in general), thanks to Milan - Stute - - Added missing type hints +## v0.28.3 (2020-08-31) -## 0.24.1 - - Applied some significant performance optimizations, thanks to Jaime - van Kessel for the patch and to Ghostkeeper for performance - measurements - - Fixed flushing outdated cache entries when incoming record is - unique, thanks to Michael Hu - - Fixed handling updates of TXT records (they'd not get recorded - previously), thanks to Michael Hu +## v0.28.2 (2020-08-27) -## 0.24.0 - - Added IPv6 support, thanks to Dmitry Tantsur - - Added additional recommended records to PTR responses, thanks to - Scott Mertz - - Added handling of ENOTCONN being raised during shutdown when using - Eventlet, thanks to Tamás Nepusz - - Included the py.typed marker in the package so that type checkers - know to use type hints from the source code, thanks to Dmitry - Tantsur +## v0.28.1 (2020-08-17) -## 0.23.0 - - Added support for MyListener call getting updates to service TXT - records, thanks to Matt Saxon - - Added support for multiple addresses when publishing a service, - getting/setting single address has become deprecated. Change thanks - to Dmitry Tantsur +## v0.28.0 (2020-07-07) -Backwards incompatible: - - Dropped Python 3.4 support +## v0.27.1 (2020-06-05) -## 0.22.0 - - A lot of maintenance work (tooling, typing coverage and - improvements, spelling) done, thanks to Ville Skyttä - - Provided saner defaults in ServiceInfo's constructor, thanks to - Jorge Miranda - - Fixed service removal packets not being sent on shutdown, thanks to - Andrew Bonney - - Added a way to define TTL-s through ServiceInfo contructor - parameters, thanks to Andrew Bonney +## v0.27.0 (2020-05-27) -Technically backwards incompatible: - - Adjusted query intervals to match RFC 6762, thanks to Andrew Bonney - - Made default TTL-s match RFC 6762, thanks to Andrew Bonney +## v0.26.3 (2020-05-26) -## 0.21.3 - - This time really allowed incoming service names to contain - underscores (patch released as part of 0.21.0 was defective) +## v0.26.1 (2020-05-06) -## 0.21.2 - - Fixed import-time typing-related TypeError when older typing version - is used +## v0.26.0 (2020-04-26) -## 0.21.1 - - Fixed installation on Python 3.4 (we use typing now but there was no - explicit dependency on it) +## v0.25.1 (2020-04-14) -## 0.21.0 - - Added an error message when importing the package using unsupported - Python version - - Fixed TTL handling for published service - - Implemented unicast support - - Fixed WSL (Windows Subsystem for Linux) compatibility - - Fixed occasional UnboundLocalError issue - - Fixed UTF-8 multibyte name compression - - Switched from netifaces to ifaddr (pure Python) - - Allowed incoming service names to contain underscores +## v0.25.0 (2020-04-03) -## 0.20.0 - - Dropped support for Python 2 (this includes PyPy) and 3.3 - - Fixed some class' equality operators - - ServiceBrowser entries are being refreshed when 'stale' now - - Cache returns new records first now instead of last +## v0.24.5 (2020-03-08) -## 0.19.1 - - Allowed installation with netifaces \>= 0.10.6 (a bug that was - concerning us got fixed) +## v0.24.4 (2019-12-30) -## 0.19.0 - - Technically backwards incompatible - restricted netifaces dependency - version to work around a bug, see - for details +## v0.24.3 (2019-12-23) -## 0.18.0 - - Dropped Python 2.6 support - - Improved error handling inside code executed when Zeroconf object is - being closed +## v0.24.2 (2019-12-17) -## 0.17.7 - - Better Handling of DNS Incoming Packets parsing exceptions - - Many exceptions will now log a warning the first time they are seen - - Catch and log sendto() errors - - Fix/Implement duplicate name change - - Fix overly strict name validation introduced in 0.17.6 - - Greatly improve handling of oversized packets including: - - Implement name compression per RFC1035 - - Limit size of generated packets to 9000 bytes as per RFC6762 - - Better handle over sized incoming packets - - Increased test coverage to 95% +## v0.24.1 (2019-12-16) -## 0.17.6 - - Many improvements to address race conditions and exceptions during - ZC() startup and shutdown, thanks to: morpav, veawor, justingiorgi, - herczy, stephenrauch - - Added more test coverage: strahlex, stephenrauch - - Stephen Rauch contributed: - - Speed up browser startup - - Add ZeroconfServiceTypes() query class to discover all - advertised service types - - Add full validation for service names, types and subtypes - - Fix for subtype browsing - - Fix DNSHInfo support +## v0.24.0 (2019-11-19) -## 0.17.5 - - Fixed OpenBSD compatibility, thanks to Alessio Sergi - - Fixed race condition on ServiceBrowser startup, thanks to gbiddison - - Fixed installation on some Python 3 systems, thanks to Per Sandström - - Fixed "size change during iteration" bug on Python 3, thanks to - gbiddison +## v0.23.0 (2019-06-04) -## 0.17.4 - - Fixed support for Linux kernel versions \< 3.9 (thanks to Giovanni - Harting and Luckydonald, GitHub pull request \#26) +## v0.22.0 (2019-04-27) -## 0.17.3 - - Fixed DNSText repr on Python 3 (it'd crash when the text was longer - than 10 bytes), thanks to Paulus Schoutsen for the patch, GitHub - pull request \#24 +## v0.21.3 (2018-09-21) -## 0.17.2 - - Fixed installation on Python 3.4.3+ (was failing because of enum34 - dependency which fails to install on 3.4.3+, changed to depend on - enum-compat instead; thanks to Michael Brennan for the original - patch, GitHub pull request \#22) +## v0.21.2 (2018-09-20) -## 0.17.1 - - Fixed EADDRNOTAVAIL when attempting to use dummy network interfaces - on Windows, thanks to daid +## v0.21.1 (2018-09-17) -## 0.17.0 - - Added some Python dependencies so it's not zero-dependencies anymore - - Improved exception handling (it'll be quieter now) - - Messages are listened to and sent using all available network - interfaces by default (configurable); thanks to Marcus Müller - - Started using logging more freely - - Fixed a bug with binary strings as property values being converted - to False (); - thanks to Dr. Seuss - - Added new `ServiceBrowser` event handler interface (see the - examples) - - PyPy3 now officially supported - - Fixed ServiceInfo repr on Python 3, thanks to Yordan Miladinov +## v0.21.0 (2018-09-16) -## 0.16.0 - - Set up Python logging and started using it - - Cleaned up code style (includes migrating from camel case to snake - case) +## v0.20.0 (2018-02-21) -## 0.15.1 - - Fixed handling closed socket (GitHub \#4) +## v0.19.1 (2017-06-13) -## 0.15 - - Forked by Jakub Stasiak - - Made Python 3 compatible - - Added setup script, made installable by pip and uploaded to PyPI - - Set up Travis build - - Reformatted the code and moved files around - - Stopped catching BaseException in several places, that could hide - errors - - Marked threads as daemonic, they won't keep application alive now +## v0.19.0 (2017-03-21) -## 0.14 - - Fix for SOL\_IP undefined on some systems - thanks Mike Erdely. - - Cleaned up examples. - - Lowercased module name. +## v0.18.0 (2017-02-03) -## 0.13 - - Various minor changes; see git for details. - - No longer compatible with Python 2.2. Only tested with 2.5-2.7. - - Fork by William McBrine. +## v0.17.7 (2017-02-01) -## 0.12 - - allow selection of binding interface - - typo fix - Thanks A. M. Kuchlingi - - removed all use of word 'Rendezvous' - this is an API change +## v0.17.6 (2016-07-08) -## 0.11 +### Testing - - correction to comments for addListener method - - support for new record types seen from OS X - - IPv6 address - - hostinfo - - ignore unknown DNS record types - - fixes to name decoding - - works alongside other processes using port 5353 (e.g. on Mac OS X) - - tested against Mac OS X 10.3.2's mDNSResponder - - corrections to removal of list entries for service browser +- Added test for DNS-SD subtype discovery + ([`914241b`](https://github.com/python-zeroconf/python-zeroconf/commit/914241b92c3097669e1e8c1a380f6c2f23a14cf8)) -## 0.10 - - Jonathon Paisley contributed these corrections: - - always multicast replies, even when query is unicast - - correct a pointer encoding problem - - can now write records in any order - - traceback shown on failure - - better TXT record parsing - - server is now separate from name - - can cancel a service browser - - modified some unit tests to accommodate these changes +## v0.17.5 (2016-03-14) -## 0.09 - - remove all records on service unregistration - - fix DOS security problem with readName +## v0.17.4 (2015-09-22) -## 0.08 - - changed licensing to LGPL +## v0.17.3 (2015-08-19) -## 0.07 - - faster shutdown on engine - - pointer encoding of outgoing names - - ServiceBrowser now works - - new unit tests +## v0.17.2 (2015-07-12) -## 0.06 - - small improvements with unit tests - - added defined exception types - - new style objects - - fixed hostname/interface problem - - fixed socket timeout problem - - fixed add\_service\_listener() typo bug - - using select() for socket reads - - tested on Debian unstable with Python 2.2.2 +## v0.17.1 (2015-04-10) -## 0.05 - - ensure case insensitivty on domain names - - support for unicast DNS queries +## v0.17.0 (2015-04-10) -## 0.04 - - added some unit tests - - added \_\_ne\_\_ adjuncts where required - - ensure names end in '.local.' - - timeout on receiving socket for clean shutdown +## v0.15.1 (2014-07-10) diff --git a/README.rst b/README.rst index eba4d7feb..c27833f80 100644 --- a/README.rst +++ b/README.rst @@ -10,6 +10,14 @@ python-zeroconf .. image:: https://codecov.io/gh/python-zeroconf/python-zeroconf/branch/master/graph/badge.svg :target: https://codecov.io/gh/python-zeroconf/python-zeroconf +.. image:: https://img.shields.io/endpoint?url=https://codspeed.io/badge.json + :target: https://codspeed.io/python-zeroconf/python-zeroconf + :alt: Codspeed.io status for python-zeroconf + +.. image:: https://readthedocs.org/projects/python-zeroconf/badge/?version=latest + :target: https://python-zeroconf.readthedocs.io/en/latest/?badge=latest + :alt: Documentation Status + `Documentation `_. This is fork of pyzeroconf, Multicast DNS Service Discovery for Python, @@ -45,8 +53,8 @@ Compared to some other Zeroconf/Bonjour/Avahi Python packages, python-zeroconf: Python compatibility -------------------- -* CPython 3.8+ -* PyPy3.8 7.3+ +* CPython 3.9+ +* PyPy 3.9+ Versioning ---------- diff --git a/bench/create_destory.py b/bench/create_destory.py index f1941423c..6fde9ebe3 100644 --- a/bench/create_destory.py +++ b/bench/create_destory.py @@ -1,4 +1,5 @@ """Benchmark for AsyncZeroconf.""" + import asyncio import time diff --git a/bench/incoming.py b/bench/incoming.py index 233f19e94..eb35f8a92 100644 --- a/bench/incoming.py +++ b/bench/incoming.py @@ -1,7 +1,7 @@ """Benchmark for DNSIncoming.""" + import socket import timeit -from typing import List from zeroconf import ( DNSAddress, @@ -14,7 +14,7 @@ ) -def generate_packets() -> List[bytes]: +def generate_packets() -> list[bytes]: out = DNSOutgoing(const._FLAGS_QR_RESPONSE | const._FLAGS_AA) address = socket.inet_pton(socket.AF_INET, "192.168.208.5") @@ -121,8 +121,8 @@ def generate_packets() -> List[bytes]: const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, const._DNS_OTHER_TTL, - b'\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1' - b'\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==', + b"\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1" + b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==", ), 0, ) @@ -177,7 +177,7 @@ def generate_packets() -> List[bytes]: def parse_incoming_message() -> None: for packet in packets: - DNSIncoming(packet).answers + DNSIncoming(packet).answers # noqa: B018 break diff --git a/bench/outgoing.py b/bench/outgoing.py index d832a05b4..8c8097cbf 100644 --- a/bench/outgoing.py +++ b/bench/outgoing.py @@ -1,4 +1,5 @@ """Benchmark for DNSOutgoing.""" + import socket import timeit @@ -113,8 +114,8 @@ def generate_packets() -> DNSOutgoing: const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, const._DNS_OTHER_TTL, - b'\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1' - b'\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==', + b"\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1" + b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==", ), 0, ) diff --git a/bench/txt_properties.py b/bench/txt_properties.py index 792d5312d..f9adeccf1 100644 --- a/bench/txt_properties.py +++ b/bench/txt_properties.py @@ -14,7 +14,7 @@ def process_properties() -> None: info._properties = None - info.properties + info.properties # noqa: B018 count = 100000 diff --git a/build_ext.py b/build_ext.py index 0f02f53a4..ff088f830 100644 --- a/build_ext.py +++ b/build_ext.py @@ -1,54 +1,73 @@ """Build optional cython modules.""" +import logging import os from distutils.command.build_ext import build_ext from typing import Any +try: + from setuptools import Extension +except ImportError: + from distutils.core import Extension + +_LOGGER = logging.getLogger(__name__) + +TO_CYTHONIZE = [ + "src/zeroconf/_dns.py", + "src/zeroconf/_cache.py", + "src/zeroconf/_history.py", + "src/zeroconf/_record_update.py", + "src/zeroconf/_listener.py", + "src/zeroconf/_protocol/incoming.py", + "src/zeroconf/_protocol/outgoing.py", + "src/zeroconf/_handlers/answers.py", + "src/zeroconf/_handlers/record_manager.py", + "src/zeroconf/_handlers/multicast_outgoing_queue.py", + "src/zeroconf/_handlers/query_handler.py", + "src/zeroconf/_services/__init__.py", + "src/zeroconf/_services/browser.py", + "src/zeroconf/_services/info.py", + "src/zeroconf/_services/registry.py", + "src/zeroconf/_updates.py", + "src/zeroconf/_utils/ipaddress.py", + "src/zeroconf/_utils/time.py", +] + +EXTENSIONS = [ + Extension( + ext.removeprefix("src/").removesuffix(".py").replace("/", "."), + [ext], + language="c", + extra_compile_args=["-O3", "-g0"], + ) + for ext in TO_CYTHONIZE +] + class BuildExt(build_ext): def build_extensions(self) -> None: try: super().build_extensions() except Exception: - pass + _LOGGER.info("Failed to build cython extensions") def build(setup_kwargs: Any) -> None: - if os.environ.get("SKIP_CYTHON", False): + if os.environ.get("SKIP_CYTHON"): return try: from Cython.Build import cythonize setup_kwargs.update( - dict( - ext_modules=cythonize( - [ - "src/zeroconf/_dns.py", - "src/zeroconf/_cache.py", - "src/zeroconf/_history.py", - "src/zeroconf/_record_update.py", - "src/zeroconf/_listener.py", - "src/zeroconf/_protocol/incoming.py", - "src/zeroconf/_protocol/outgoing.py", - "src/zeroconf/_handlers/answers.py", - "src/zeroconf/_handlers/record_manager.py", - "src/zeroconf/_handlers/multicast_outgoing_queue.py", - "src/zeroconf/_handlers/query_handler.py", - "src/zeroconf/_services/__init__.py", - "src/zeroconf/_services/browser.py", - "src/zeroconf/_services/info.py", - "src/zeroconf/_services/registry.py", - "src/zeroconf/_updates.py", - "src/zeroconf/_utils/ipaddress.py", - "src/zeroconf/_utils/time.py", - ], + { + "ext_modules": cythonize( + EXTENSIONS, compiler_directives={"language_level": "3"}, # Python 3 ), - cmdclass=dict(build_ext=BuildExt), - ) + "cmdclass": {"build_ext": BuildExt}, + } ) setup_kwargs["exclude_package_data"] = {pkg: ["*.c"] for pkg in setup_kwargs["packages"]} except Exception: if os.environ.get("REQUIRE_CYTHON"): raise - pass diff --git a/commitlint.config.mjs b/commitlint.config.mjs new file mode 100644 index 000000000..deb029abf --- /dev/null +++ b/commitlint.config.mjs @@ -0,0 +1,8 @@ +export default { + extends: ["@commitlint/config-conventional"], + rules: { + "header-max-length": [0, "always", Infinity], + "body-max-line-length": [0, "always", Infinity], + "footer-max-line-length": [0, "always", Infinity], + }, +}; diff --git a/docs/Makefile b/docs/Makefile index a8d581c27..d4bb2cbb9 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -1,177 +1,20 @@ -# Makefile for Sphinx documentation +# Minimal makefile for Sphinx documentation # -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . BUILDDIR = _build -# User-friendly check for sphinx-build -ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) -$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) -endif - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . -# the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . - -.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext - +# Put it first so that "make" without argument is like "make help". help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " texinfo to make Texinfo files" - @echo " info to make Texinfo files and run them through makeinfo" - @echo " gettext to make PO message catalogs" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " xml to make Docutils-native XML files" - @echo " pseudoxml to make pseudoxml-XML files for display purposes" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - -clean: - rm -rf $(BUILDDIR)/* - -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -singlehtml: - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/zeroconf.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/zeroconf.qhc" - -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/zeroconf" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/zeroconf" - @echo "# devhelp" - -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -latexpdf: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -latexpdfja: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through platex and dvipdfmx..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -texinfo: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo - @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." - @echo "Run \`make' in that directory to run these through makeinfo" \ - "(use \`make info' here to do that automatically)." - -info: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo "Running Texinfo files through makeinfo..." - make -C $(BUILDDIR)/texinfo info - @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." - -gettext: - $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale - @echo - @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -xml: - $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml - @echo - @echo "Build finished. The XML files are in $(BUILDDIR)/xml." +.PHONY: help Makefile -pseudoxml: - $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml - @echo - @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/_ext/zeroconfautodocfix.py b/docs/_ext/zeroconfautodocfix.py new file mode 100644 index 000000000..8163a9c6c --- /dev/null +++ b/docs/_ext/zeroconfautodocfix.py @@ -0,0 +1,19 @@ +""" +Must be included after 'sphinx.ext.autodoc'. Fixes unwanted 'alias of' behavior. +""" + +# pylint: disable=import-error +from sphinx.application import Sphinx + + +def skip_member(app, what, name, obj, skip: bool, options) -> bool: # type: ignore[no-untyped-def] + return ( + skip + or getattr(obj, "__doc__", None) is None + or getattr(obj, "__private__", False) is True + or getattr(getattr(obj, "__func__", None), "__private__", False) is True + ) + + +def setup(app: Sphinx) -> None: + app.connect("autodoc-skip-member", skip_member) diff --git a/docs/conf.py b/docs/conf.py index afaa510e0..11a0f2d43 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,247 +1,76 @@ +# Configuration file for the Sphinx documentation builder. # -# This file is execfile()d with the current directory set to its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -from typing import Any, Dict - -import zeroconf - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ----------------------------------------------------- - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx'] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = 'python-zeroconf' -copyright = 'python-zeroconf authors' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = zeroconf.__version__ -# The full version, including alpha/beta/rc tags. -release = version - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ['_build'] - -# The reST default role (used for this markup: `text`) to use for all documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output --------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'default' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html +import sys +from collections.abc import Sequence +from pathlib import Path + +# If your extensions are in another directory, add it here. If the directory +# is relative to the documentation root, use Path.absolute to make it absolute. +sys.path.append(str(Path(__file__).parent / "_ext")) +sys.path.insert(0, str(Path(__file__).parent.parent)) + +# -- Project information ----------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information + +project = "python-zeroconf" +project_copyright = "python-zeroconf authors" +author = "python-zeroconf authors" + +try: + import zeroconf + + # The short X.Y version. + version = zeroconf.__version__ + # The full version, including alpha/beta/rc tags. + release = version +except ImportError: + version = "" + release = "" + +# -- General configuration --------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration + +extensions = [ + "sphinx.ext.todo", # Allow todo comments. + "sphinx.ext.viewcode", # Link to source code. + "sphinx.ext.autodoc", + "zeroconfautodocfix", # Must be after "sphinx.ext.autodoc" + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", # Enable the overage report. + "sphinx.ext.duration", # Show build duration at the end. + "sphinx_rtd_theme", # Required for theme. +] + +templates_path = ["_templates"] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] + +# -- Options for HTML output ------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output + +html_theme = "sphinx_rtd_theme" +html_static_path = ["_static"] # Custom sidebar templates, maps document names to template names. -html_sidebars = { - 'index': ('sidebar.html', 'sourcelink.html', 'searchbox.html'), - '**': ('localtoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html'), +html_sidebars: dict[str, Sequence[str]] = { + "index": ("sidebar.html", "sourcelink.html", "searchbox.html"), + "**": ("localtoc.html", "relations.html", "sourcelink.html", "searchbox.html"), } -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'zeroconfdoc' - - -# -- Options for LaTeX output -------------------------------------------------- - -latex_elements: Dict[str, Any] = {} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass [howto/manual]). -# latex_documents = [] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False +# -- Options for RTD theme --------------------------------------------------- +# https://sphinx-rtd-theme.readthedocs.io/en/stable/configuring.html -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output -------------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -# man_pages = [] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------------ - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -# texinfo_documents = [] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'http://docs.python.org/': None} +# html_theme_options = {} +# -- Options for HTML help output -------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-help-output -def setup(app): # type: ignore[no-untyped-def] - app.connect('autodoc-skip-member', skip_member) +htmlhelp_basename = "zeroconfdoc" +# -- Options for intersphinx extension --------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html#configuration -def skip_member(app, what, name, obj, skip, options): # type: ignore[no-untyped-def] - return ( - skip - or getattr(obj, '__doc__', None) is None - or getattr(obj, '__private__', False) is True - or getattr(getattr(obj, '__func__', None), '__private__', False) is True - ) +intersphinx_mapping = { + "python": ("https://docs.python.org/3", None), +} diff --git a/docs/index.rst b/docs/index.rst index 8929f417b..7899fad9e 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -16,7 +16,7 @@ PyPI (installable, stable distributions): https://pypi.org/project/zeroconf. You pip install zeroconf -python-zeroconf works with CPython 3.6+ and PyPy 3 implementing Python 3.6+. +python-zeroconf works with CPython 3.8+ and PyPy 3 implementing Python 3.8+. Contents -------- diff --git a/examples/async_apple_scanner.py b/examples/async_apple_scanner.py old mode 100644 new mode 100755 index ff558f82e..00744b5c5 --- a/examples/async_apple_scanner.py +++ b/examples/async_apple_scanner.py @@ -1,11 +1,13 @@ -#!/usr/bin/env python3 +#!/usr/bin/env python -""" Scan for apple devices. """ +"""Scan for apple devices.""" + +from __future__ import annotations import argparse import asyncio import logging -from typing import Any, Optional, cast +from typing import Any, cast from zeroconf import DNSQuestionType, IPVersion, ServiceStateChange, Zeroconf from zeroconf.asyncio import AsyncServiceBrowser, AsyncServiceInfo, AsyncZeroconf @@ -32,6 +34,8 @@ log = logging.getLogger(__name__) +_PENDING_TASKS: set[asyncio.Task] = set() + def async_on_service_state_change( zeroconf: Zeroconf, service_type: str, name: str, state_change: ServiceStateChange @@ -41,20 +45,24 @@ def async_on_service_state_change( return base_name = name[: -len(service_type) - 1] device_name = f"{base_name}.{DEVICE_INFO_SERVICE}" - asyncio.ensure_future(_async_show_service_info(zeroconf, service_type, name)) + task = asyncio.ensure_future(_async_show_service_info(zeroconf, service_type, name)) + _PENDING_TASKS.add(task) + task.add_done_callback(_PENDING_TASKS.discard) # Also probe for device info - asyncio.ensure_future(_async_show_service_info(zeroconf, DEVICE_INFO_SERVICE, device_name)) + task = asyncio.ensure_future(_async_show_service_info(zeroconf, DEVICE_INFO_SERVICE, device_name)) + _PENDING_TASKS.add(task) + task.add_done_callback(_PENDING_TASKS.discard) async def _async_show_service_info(zeroconf: Zeroconf, service_type: str, name: str) -> None: info = AsyncServiceInfo(service_type, name) await info.async_request(zeroconf, 3000, question_type=DNSQuestionType.QU) - print("Info from zeroconf.get_service_info: %r" % (info)) + print(f"Info from zeroconf.get_service_info: {info!r}") if info: - addresses = ["%s:%d" % (addr, cast(int, info.port)) for addr in info.parsed_addresses()] - print(" Name: %s" % name) - print(" Addresses: %s" % ", ".join(addresses)) - print(" Weight: %d, priority: %d" % (info.weight, info.priority)) + addresses = [f"{addr}:{cast(int, info.port)}" for addr in info.parsed_addresses()] + print(f" Name: {name}") + print(f" Addresses: {', '.join(addresses)}") + print(f" Weight: {info.weight}, priority: {info.priority}") print(f" Server: {info.server}") if info.properties: print(" Properties are:") @@ -64,25 +72,31 @@ async def _async_show_service_info(zeroconf: Zeroconf, service_type: str, name: print(" No properties") else: print(" No info") - print('\n') + print("\n") class AsyncAppleScanner: def __init__(self, args: Any) -> None: self.args = args - self.aiobrowser: Optional[AsyncServiceBrowser] = None - self.aiozc: Optional[AsyncZeroconf] = None + self.aiobrowser: AsyncServiceBrowser | None = None + self.aiozc: AsyncZeroconf | None = None async def async_run(self) -> None: self.aiozc = AsyncZeroconf(ip_version=ip_version) await self.aiozc.zeroconf.async_wait_for_start() - print("\nBrowsing %s service(s), press Ctrl-C to exit...\n" % ALL_SERVICES) - kwargs = {'handlers': [async_on_service_state_change], 'question_type': DNSQuestionType.QU} + print(f"\nBrowsing {ALL_SERVICES} service(s), press Ctrl-C to exit...\n") + kwargs = { + "handlers": [async_on_service_state_change], + "question_type": DNSQuestionType.QU, + } if self.args.target: kwargs["addr"] = self.args.target - self.aiobrowser = AsyncServiceBrowser(self.aiozc.zeroconf, ALL_SERVICES, **kwargs) # type: ignore - while True: - await asyncio.sleep(1) + self.aiobrowser = AsyncServiceBrowser( + self.aiozc.zeroconf, + ALL_SERVICES, + **kwargs, # type: ignore[arg-type] + ) + await asyncio.Event().wait() async def async_close(self) -> None: assert self.aiozc is not None @@ -91,19 +105,19 @@ async def async_close(self) -> None: await self.aiozc.async_close() -if __name__ == '__main__': +if __name__ == "__main__": logging.basicConfig(level=logging.DEBUG) parser = argparse.ArgumentParser() - parser.add_argument('--debug', action='store_true') + parser.add_argument("--debug", action="store_true") version_group = parser.add_mutually_exclusive_group() - version_group.add_argument('--target', help='Unicast target') - version_group.add_argument('--v6', action='store_true') - version_group.add_argument('--v6-only', action='store_true') + version_group.add_argument("--target", help="Unicast target") + version_group.add_argument("--v6", action="store_true") + version_group.add_argument("--v6-only", action="store_true") args = parser.parse_args() if args.debug: - logging.getLogger('zeroconf').setLevel(logging.DEBUG) + logging.getLogger("zeroconf").setLevel(logging.DEBUG) if args.v6: ip_version = IPVersion.All elif args.v6_only: diff --git a/examples/async_browser.py b/examples/async_browser.py old mode 100644 new mode 100755 index f7fb71514..58193f705 --- a/examples/async_browser.py +++ b/examples/async_browser.py @@ -1,14 +1,16 @@ -#!/usr/bin/env python3 +#!/usr/bin/env python -""" Example of browsing for a service. +"""Example of browsing for a service. The default is HTTP and HAP; use --find to search for all available services in the network """ +from __future__ import annotations + import argparse import asyncio import logging -from typing import Any, Optional, cast +from typing import Any, cast from zeroconf import IPVersion, ServiceStateChange, Zeroconf from zeroconf.asyncio import ( @@ -18,6 +20,8 @@ AsyncZeroconfServiceTypes, ) +_PENDING_TASKS: set[asyncio.Task] = set() + def async_on_service_state_change( zeroconf: Zeroconf, service_type: str, name: str, state_change: ServiceStateChange @@ -25,18 +29,20 @@ def async_on_service_state_change( print(f"Service {name} of type {service_type} state changed: {state_change}") if state_change is not ServiceStateChange.Added: return - asyncio.ensure_future(async_display_service_info(zeroconf, service_type, name)) + task = asyncio.ensure_future(async_display_service_info(zeroconf, service_type, name)) + _PENDING_TASKS.add(task) + task.add_done_callback(_PENDING_TASKS.discard) async def async_display_service_info(zeroconf: Zeroconf, service_type: str, name: str) -> None: info = AsyncServiceInfo(service_type, name) await info.async_request(zeroconf, 3000) - print("Info from zeroconf.get_service_info: %r" % (info)) + print(f"Info from zeroconf.get_service_info: {info!r}") if info: - addresses = ["%s:%d" % (addr, cast(int, info.port)) for addr in info.parsed_scoped_addresses()] - print(" Name: %s" % name) - print(" Addresses: %s" % ", ".join(addresses)) - print(" Weight: %d, priority: %d" % (info.weight, info.priority)) + addresses = [f"{addr}:{cast(int, info.port)}" for addr in info.parsed_scoped_addresses()] + print(f" Name: {name}") + print(f" Addresses: {', '.join(addresses)}") + print(f" Weight: {info.weight}, priority: {info.priority}") print(f" Server: {info.server}") if info.properties: print(" Properties are:") @@ -46,14 +52,14 @@ async def async_display_service_info(zeroconf: Zeroconf, service_type: str, name print(" No properties") else: print(" No info") - print('\n') + print("\n") class AsyncRunner: def __init__(self, args: Any) -> None: self.args = args - self.aiobrowser: Optional[AsyncServiceBrowser] = None - self.aiozc: Optional[AsyncZeroconf] = None + self.aiobrowser: AsyncServiceBrowser | None = None + self.aiozc: AsyncZeroconf | None = None async def async_run(self) -> None: self.aiozc = AsyncZeroconf(ip_version=ip_version) @@ -64,12 +70,11 @@ async def async_run(self) -> None: await AsyncZeroconfServiceTypes.async_find(aiozc=self.aiozc, ip_version=ip_version) ) - print("\nBrowsing %s service(s), press Ctrl-C to exit...\n" % services) + print(f"\nBrowsing {services} service(s), press Ctrl-C to exit...\n") self.aiobrowser = AsyncServiceBrowser( self.aiozc.zeroconf, services, handlers=[async_on_service_state_change] ) - while True: - await asyncio.sleep(1) + await asyncio.Event().wait() async def async_close(self) -> None: assert self.aiozc is not None @@ -78,19 +83,19 @@ async def async_close(self) -> None: await self.aiozc.async_close() -if __name__ == '__main__': +if __name__ == "__main__": logging.basicConfig(level=logging.DEBUG) parser = argparse.ArgumentParser() - parser.add_argument('--debug', action='store_true') - parser.add_argument('--find', action='store_true', help='Browse all available services') + parser.add_argument("--debug", action="store_true") + parser.add_argument("--find", action="store_true", help="Browse all available services") version_group = parser.add_mutually_exclusive_group() - version_group.add_argument('--v6', action='store_true') - version_group.add_argument('--v6-only', action='store_true') + version_group.add_argument("--v6", action="store_true") + version_group.add_argument("--v6-only", action="store_true") args = parser.parse_args() if args.debug: - logging.getLogger('zeroconf').setLevel(logging.DEBUG) + logging.getLogger("zeroconf").setLevel(logging.DEBUG) if args.v6: ip_version = IPVersion.All elif args.v6_only: diff --git a/examples/async_registration.py b/examples/async_registration.py old mode 100644 new mode 100755 index c3aab326a..5c774cadb --- a/examples/async_registration.py +++ b/examples/async_registration.py @@ -1,11 +1,13 @@ -#!/usr/bin/env python3 +#!/usr/bin/env python + """Example of announcing 250 services (in this case, a fake HTTP server).""" +from __future__ import annotations + import argparse import asyncio import logging import socket -from typing import List, Optional from zeroconf import IPVersion from zeroconf.asyncio import AsyncServiceInfo, AsyncZeroconf @@ -14,18 +16,17 @@ class AsyncRunner: def __init__(self, ip_version: IPVersion) -> None: self.ip_version = ip_version - self.aiozc: Optional[AsyncZeroconf] = None + self.aiozc: AsyncZeroconf | None = None - async def register_services(self, infos: List[AsyncServiceInfo]) -> None: + async def register_services(self, infos: list[AsyncServiceInfo]) -> None: self.aiozc = AsyncZeroconf(ip_version=self.ip_version) tasks = [self.aiozc.async_register_service(info) for info in infos] background_tasks = await asyncio.gather(*tasks) await asyncio.gather(*background_tasks) print("Finished registration, press Ctrl-C to exit...") - while True: - await asyncio.sleep(1) + await asyncio.Event().wait() - async def unregister_services(self, infos: List[AsyncServiceInfo]) -> None: + async def unregister_services(self, infos: list[AsyncServiceInfo]) -> None: assert self.aiozc is not None tasks = [self.aiozc.async_unregister_service(info) for info in infos] background_tasks = await asyncio.gather(*tasks) @@ -33,18 +34,18 @@ async def unregister_services(self, infos: List[AsyncServiceInfo]) -> None: await self.aiozc.async_close() -if __name__ == '__main__': +if __name__ == "__main__": logging.basicConfig(level=logging.DEBUG) parser = argparse.ArgumentParser() - parser.add_argument('--debug', action='store_true') + parser.add_argument("--debug", action="store_true") version_group = parser.add_mutually_exclusive_group() - version_group.add_argument('--v6', action='store_true') - version_group.add_argument('--v6-only', action='store_true') + version_group.add_argument("--v6", action="store_true") + version_group.add_argument("--v6-only", action="store_true") args = parser.parse_args() if args.debug: - logging.getLogger('zeroconf').setLevel(logging.DEBUG) + logging.getLogger("zeroconf").setLevel(logging.DEBUG) if args.v6: ip_version = IPVersion.All elif args.v6_only: @@ -60,7 +61,7 @@ async def unregister_services(self, infos: List[AsyncServiceInfo]) -> None: f"Paul's Test Web Site {i}._http._tcp.local.", addresses=[socket.inet_aton("127.0.0.1")], port=80, - properties={'path': '/~paulsm/'}, + properties={"path": "/~paulsm/"}, server=f"zcdemohost-{i}.local.", ) ) diff --git a/examples/async_service_info_request.py b/examples/async_service_info_request.py old mode 100644 new mode 100755 index 5bb247618..ca75fc522 --- a/examples/async_service_info_request.py +++ b/examples/async_service_info_request.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python3 +#!/usr/bin/env python + """Example of perodic dump of homekit services. This example is useful when a user wants an ondemand @@ -6,10 +7,12 @@ """ +from __future__ import annotations + import argparse import asyncio import logging -from typing import Any, List, Optional, cast +from typing import Any, cast from zeroconf import IPVersion, ServiceBrowser, ServiceStateChange, Zeroconf from zeroconf.asyncio import AsyncServiceInfo, AsyncZeroconf @@ -21,7 +24,7 @@ async def async_watch_services(aiozc: AsyncZeroconf) -> None: zeroconf = aiozc.zeroconf while True: await asyncio.sleep(5) - infos: List[AsyncServiceInfo] = [] + infos: list[AsyncServiceInfo] = [] for name in zeroconf.cache.names(): if not name.endswith(HAP_TYPE): continue @@ -29,11 +32,11 @@ async def async_watch_services(aiozc: AsyncZeroconf) -> None: tasks = [info.async_request(aiozc.zeroconf, 3000) for info in infos] await asyncio.gather(*tasks) for info in infos: - print("Info for %s" % (info.name)) + print(f"Info for {info.name}") if info: - addresses = ["%s:%d" % (addr, cast(int, info.port)) for addr in info.parsed_addresses()] - print(" Addresses: %s" % ", ".join(addresses)) - print(" Weight: %d, priority: %d" % (info.weight, info.priority)) + addresses = [f"{addr}:{cast(int, info.port)}" for addr in info.parsed_addresses()] + print(f" Addresses: {', '.join(addresses)}") + print(f" Weight: {info.weight}, priority: {info.priority}") print(f" Server: {info.server}") if info.properties: print(" Properties are:") @@ -43,21 +46,24 @@ async def async_watch_services(aiozc: AsyncZeroconf) -> None: print(" No properties") else: print(" No info") - print('\n') + print("\n") class AsyncRunner: def __init__(self, args: Any) -> None: self.args = args - self.threaded_browser: Optional[ServiceBrowser] = None - self.aiozc: Optional[AsyncZeroconf] = None + self.threaded_browser: ServiceBrowser | None = None + self.aiozc: AsyncZeroconf | None = None async def async_run(self) -> None: self.aiozc = AsyncZeroconf(ip_version=ip_version) assert self.aiozc is not None def on_service_state_change( - zeroconf: Zeroconf, service_type: str, state_change: ServiceStateChange, name: str + zeroconf: Zeroconf, + service_type: str, + state_change: ServiceStateChange, + name: str, ) -> None: """Dummy handler.""" @@ -73,18 +79,18 @@ async def async_close(self) -> None: await self.aiozc.async_close() -if __name__ == '__main__': +if __name__ == "__main__": logging.basicConfig(level=logging.DEBUG) parser = argparse.ArgumentParser() - parser.add_argument('--debug', action='store_true') + parser.add_argument("--debug", action="store_true") version_group = parser.add_mutually_exclusive_group() - version_group.add_argument('--v6', action='store_true') - version_group.add_argument('--v6-only', action='store_true') + version_group.add_argument("--v6", action="store_true") + version_group.add_argument("--v6-only", action="store_true") args = parser.parse_args() if args.debug: - logging.getLogger('zeroconf').setLevel(logging.DEBUG) + logging.getLogger("zeroconf").setLevel(logging.DEBUG) if args.v6: ip_version = IPVersion.All elif args.v6_only: diff --git a/examples/browser.py b/examples/browser.py index 237de013f..92adc9491 100755 --- a/examples/browser.py +++ b/examples/browser.py @@ -1,10 +1,12 @@ -#!/usr/bin/env python3 +#!/usr/bin/env python -""" Example of browsing for a service. +"""Example of browsing for a service. The default is HTTP and HAP; use --find to search for all available services in the network """ +from __future__ import annotations + import argparse import logging from time import sleep @@ -26,12 +28,12 @@ def on_service_state_change( if state_change is ServiceStateChange.Added: info = zeroconf.get_service_info(service_type, name) - print("Info from zeroconf.get_service_info: %r" % (info)) + print(f"Info from zeroconf.get_service_info: {info!r}") if info: - addresses = ["%s:%d" % (addr, cast(int, info.port)) for addr in info.parsed_scoped_addresses()] - print(" Addresses: %s" % ", ".join(addresses)) - print(" Weight: %d, priority: %d" % (info.weight, info.priority)) + addresses = [f"{addr}:{cast(int, info.port)}" for addr in info.parsed_scoped_addresses()] + print(f" Addresses: {', '.join(addresses)}") + print(f" Weight: {info.weight}, priority: {info.priority}") print(f" Server: {info.server}") if info.properties: print(" Properties are:") @@ -41,22 +43,22 @@ def on_service_state_change( print(" No properties") else: print(" No info") - print('\n') + print("\n") -if __name__ == '__main__': +if __name__ == "__main__": logging.basicConfig(level=logging.DEBUG) parser = argparse.ArgumentParser() - parser.add_argument('--debug', action='store_true') - parser.add_argument('--find', action='store_true', help='Browse all available services') + parser.add_argument("--debug", action="store_true") + parser.add_argument("--find", action="store_true", help="Browse all available services") version_group = parser.add_mutually_exclusive_group() - version_group.add_argument('--v6-only', action='store_true') - version_group.add_argument('--v4-only', action='store_true') + version_group.add_argument("--v6-only", action="store_true") + version_group.add_argument("--v4-only", action="store_true") args = parser.parse_args() if args.debug: - logging.getLogger('zeroconf').setLevel(logging.DEBUG) + logging.getLogger("zeroconf").setLevel(logging.DEBUG) if args.v6_only: ip_version = IPVersion.V6Only elif args.v4_only: @@ -66,11 +68,16 @@ def on_service_state_change( zeroconf = Zeroconf(ip_version=ip_version) - services = ["_http._tcp.local.", "_hap._tcp.local.", "_esphomelib._tcp.local.", "_airplay._tcp.local."] + services = [ + "_http._tcp.local.", + "_hap._tcp.local.", + "_esphomelib._tcp.local.", + "_airplay._tcp.local.", + ] if args.find: services = list(ZeroconfServiceTypes.find(zc=zeroconf)) - print("\nBrowsing %d service(s), press Ctrl-C to exit...\n" % len(services)) + print(f"\nBrowsing {len(services)} service(s), press Ctrl-C to exit...\n") browser = ServiceBrowser(zeroconf, services, handlers=[on_service_state_change]) try: diff --git a/examples/registration.py b/examples/registration.py index 65c221996..1ba19b16a 100755 --- a/examples/registration.py +++ b/examples/registration.py @@ -1,6 +1,8 @@ -#!/usr/bin/env python3 +#!/usr/bin/env python -""" Example of announcing a service (in this case, a fake HTTP server) """ +"""Example of announcing a service (in this case, a fake HTTP server)""" + +from __future__ import annotations import argparse import logging @@ -9,18 +11,18 @@ from zeroconf import IPVersion, ServiceInfo, Zeroconf -if __name__ == '__main__': +if __name__ == "__main__": logging.basicConfig(level=logging.DEBUG) parser = argparse.ArgumentParser() - parser.add_argument('--debug', action='store_true') + parser.add_argument("--debug", action="store_true") version_group = parser.add_mutually_exclusive_group() - version_group.add_argument('--v6', action='store_true') - version_group.add_argument('--v6-only', action='store_true') + version_group.add_argument("--v6", action="store_true") + version_group.add_argument("--v6-only", action="store_true") args = parser.parse_args() if args.debug: - logging.getLogger('zeroconf').setLevel(logging.DEBUG) + logging.getLogger("zeroconf").setLevel(logging.DEBUG) if args.v6: ip_version = IPVersion.All elif args.v6_only: @@ -28,7 +30,7 @@ else: ip_version = IPVersion.V4Only - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( "_http._tcp.local.", diff --git a/examples/resolve_address.py b/examples/resolve_address.py new file mode 100755 index 000000000..88ce825b7 --- /dev/null +++ b/examples/resolve_address.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python + +"""Example of resolving a name to an IP address.""" + +from __future__ import annotations + +import asyncio +import logging +import sys + +from zeroconf import AddressResolver, IPVersion +from zeroconf.asyncio import AsyncZeroconf + + +async def resolve_name(name: str) -> None: + aiozc = AsyncZeroconf() + await aiozc.zeroconf.async_wait_for_start() + resolver = AddressResolver(name) + if await resolver.async_request(aiozc.zeroconf, 3000): + print(f"{name} IP addresses:", resolver.ip_addresses_by_version(IPVersion.All)) + else: + print(f"Name {name} not resolved") + await aiozc.async_close() + + +if __name__ == "__main__": + logging.basicConfig(level=logging.DEBUG) + argv = sys.argv.copy() + if "--debug" in argv: + logging.getLogger("zeroconf").setLevel(logging.DEBUG) + argv.remove("--debug") + + if len(argv) < 2 or not argv[1]: + raise ValueError("Usage: resolve_address.py [--debug] ") + + name = argv[1] + if not name.endswith("."): + name += "." + + asyncio.run(resolve_name(name)) diff --git a/examples/resolver.py b/examples/resolver.py index 6a550fcb2..a52050f41 100755 --- a/examples/resolver.py +++ b/examples/resolver.py @@ -1,24 +1,26 @@ -#!/usr/bin/env python3 +#!/usr/bin/env python -""" Example of resolving a service with a known name """ +"""Example of resolving a service with a known name""" + +from __future__ import annotations import logging import sys from zeroconf import Zeroconf -TYPE = '_test._tcp.local.' -NAME = 'My Service Name' +TYPE = "_test._tcp.local." +NAME = "My Service Name" -if __name__ == '__main__': +if __name__ == "__main__": logging.basicConfig(level=logging.DEBUG) if len(sys.argv) > 1: - assert sys.argv[1:] == ['--debug'] - logging.getLogger('zeroconf').setLevel(logging.DEBUG) + assert sys.argv[1:] == ["--debug"] + logging.getLogger("zeroconf").setLevel(logging.DEBUG) zeroconf = Zeroconf() try: - print(zeroconf.get_service_info(TYPE, NAME + '.' + TYPE)) + print(zeroconf.get_service_info(TYPE, NAME + "." + TYPE)) finally: zeroconf.close() diff --git a/examples/self_test.py b/examples/self_test.py index 2178629b5..3d1fa050c 100755 --- a/examples/self_test.py +++ b/examples/self_test.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python3 +#!/usr/bin/env python +from __future__ import annotations import logging import socket @@ -6,23 +7,23 @@ from zeroconf import ServiceInfo, Zeroconf, __version__ -if __name__ == '__main__': +if __name__ == "__main__": logging.basicConfig(level=logging.DEBUG) if len(sys.argv) > 1: - assert sys.argv[1:] == ['--debug'] - logging.getLogger('zeroconf').setLevel(logging.DEBUG) + assert sys.argv[1:] == ["--debug"] + logging.getLogger("zeroconf").setLevel(logging.DEBUG) # Test a few module features, including service registration, service # query (for Zoe), and service unregistration. print(f"Multicast DNS Service Discovery for Python, version {__version__}") r = Zeroconf() print("1. Testing registration of a service...") - desc = {'version': '0.10', 'a': 'test value', 'b': 'another value'} + desc = {"version": "0.10", "a": "test value", "b": "another value"} addresses = [socket.inet_aton("127.0.0.1")] - expected = {'127.0.0.1'} + expected = {"127.0.0.1"} if socket.has_ipv6: - addresses.append(socket.inet_pton(socket.AF_INET6, '::1')) - expected.add('::1') + addresses.append(socket.inet_pton(socket.AF_INET6, "::1")) + expected.add("::1") info = ServiceInfo( "_http._tcp.local.", "My Service Name._http._tcp.local.", @@ -34,7 +35,7 @@ r.register_service(info) print(" Registration done.") print("2. Testing query of service information...") - print(" Getting ZOE service: %s" % (r.get_service_info("_http._tcp.local.", "ZOE._http._tcp.local."))) + print(f" Getting ZOE service: {r.get_service_info('_http._tcp.local.', 'ZOE._http._tcp.local.')}") print(" Query done.") print("3. Testing query of own service...") queried_info = r.get_service_info("_http._tcp.local.", "My Service Name._http._tcp.local.") diff --git a/poetry.lock b/poetry.lock index a9a7c6c2b..c9e4642fa 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,14 +1,224 @@ -# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. [[package]] -name = "async-timeout" -version = "4.0.3" -description = "Timeout context manager for asyncio programs" +name = "alabaster" +version = "0.7.16" +description = "A light, configurable Sphinx theme" +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "alabaster-0.7.16-py3-none-any.whl", hash = "sha256:b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92"}, + {file = "alabaster-0.7.16.tar.gz", hash = "sha256:75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65"}, +] + +[[package]] +name = "babel" +version = "2.17.0" +description = "Internationalization utilities" +optional = false +python-versions = ">=3.8" +groups = ["docs"] +files = [ + {file = "babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2"}, + {file = "babel-2.17.0.tar.gz", hash = "sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d"}, +] + +[package.extras] +dev = ["backports.zoneinfo ; python_version < \"3.9\"", "freezegun (>=1.0,<2.0)", "jinja2 (>=3.0)", "pytest (>=6.0)", "pytest-cov", "pytz", "setuptools", "tzdata ; sys_platform == \"win32\""] + +[[package]] +name = "certifi" +version = "2025.1.31" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +groups = ["docs"] +files = [ + {file = "certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe"}, + {file = "certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651"}, +] + +[[package]] +name = "cffi" +version = "1.17.1" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, + {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"}, + {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"}, + {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"}, + {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"}, + {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"}, + {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, + {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, + {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, + {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, + {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, + {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, + {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, + {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, + {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, + {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "charset-normalizer" +version = "3.4.1" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7" +groups = ["docs"] files = [ - {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"}, - {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-win32.whl", hash = "sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765"}, + {file = "charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85"}, + {file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"}, ] [[package]] @@ -17,6 +227,8 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["dev", "docs"] +markers = "sys_platform == \"win32\"" files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, @@ -24,209 +236,452 @@ files = [ [[package]] name = "coverage" -version = "7.4.1" +version = "7.6.12" description = "Code coverage measurement for Python" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" +groups = ["dev"] files = [ - {file = "coverage-7.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:077d366e724f24fc02dbfe9d946534357fda71af9764ff99d73c3c596001bbd7"}, - {file = "coverage-7.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0193657651f5399d433c92f8ae264aff31fc1d066deee4b831549526433f3f61"}, - {file = "coverage-7.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d17bbc946f52ca67adf72a5ee783cd7cd3477f8f8796f59b4974a9b59cacc9ee"}, - {file = "coverage-7.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a3277f5fa7483c927fe3a7b017b39351610265308f5267ac6d4c2b64cc1d8d25"}, - {file = "coverage-7.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6dceb61d40cbfcf45f51e59933c784a50846dc03211054bd76b421a713dcdf19"}, - {file = "coverage-7.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6008adeca04a445ea6ef31b2cbaf1d01d02986047606f7da266629afee982630"}, - {file = "coverage-7.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c61f66d93d712f6e03369b6a7769233bfda880b12f417eefdd4f16d1deb2fc4c"}, - {file = "coverage-7.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b9bb62fac84d5f2ff523304e59e5c439955fb3b7f44e3d7b2085184db74d733b"}, - {file = "coverage-7.4.1-cp310-cp310-win32.whl", hash = "sha256:f86f368e1c7ce897bf2457b9eb61169a44e2ef797099fb5728482b8d69f3f016"}, - {file = "coverage-7.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:869b5046d41abfea3e381dd143407b0d29b8282a904a19cb908fa24d090cc018"}, - {file = "coverage-7.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b8ffb498a83d7e0305968289441914154fb0ef5d8b3157df02a90c6695978295"}, - {file = "coverage-7.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3cacfaefe6089d477264001f90f55b7881ba615953414999c46cc9713ff93c8c"}, - {file = "coverage-7.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d6850e6e36e332d5511a48a251790ddc545e16e8beaf046c03985c69ccb2676"}, - {file = "coverage-7.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18e961aa13b6d47f758cc5879383d27b5b3f3dcd9ce8cdbfdc2571fe86feb4dd"}, - {file = "coverage-7.4.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dfd1e1b9f0898817babf840b77ce9fe655ecbe8b1b327983df485b30df8cc011"}, - {file = "coverage-7.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6b00e21f86598b6330f0019b40fb397e705135040dbedc2ca9a93c7441178e74"}, - {file = "coverage-7.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:536d609c6963c50055bab766d9951b6c394759190d03311f3e9fcf194ca909e1"}, - {file = "coverage-7.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7ac8f8eb153724f84885a1374999b7e45734bf93a87d8df1e7ce2146860edef6"}, - {file = "coverage-7.4.1-cp311-cp311-win32.whl", hash = "sha256:f3771b23bb3675a06f5d885c3630b1d01ea6cac9e84a01aaf5508706dba546c5"}, - {file = "coverage-7.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:9d2f9d4cc2a53b38cabc2d6d80f7f9b7e3da26b2f53d48f05876fef7956b6968"}, - {file = "coverage-7.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f68ef3660677e6624c8cace943e4765545f8191313a07288a53d3da188bd8581"}, - {file = "coverage-7.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:23b27b8a698e749b61809fb637eb98ebf0e505710ec46a8aa6f1be7dc0dc43a6"}, - {file = "coverage-7.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e3424c554391dc9ef4a92ad28665756566a28fecf47308f91841f6c49288e66"}, - {file = "coverage-7.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e0860a348bf7004c812c8368d1fc7f77fe8e4c095d661a579196a9533778e156"}, - {file = "coverage-7.4.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe558371c1bdf3b8fa03e097c523fb9645b8730399c14fe7721ee9c9e2a545d3"}, - {file = "coverage-7.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3468cc8720402af37b6c6e7e2a9cdb9f6c16c728638a2ebc768ba1ef6f26c3a1"}, - {file = "coverage-7.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:02f2edb575d62172aa28fe00efe821ae31f25dc3d589055b3fb64d51e52e4ab1"}, - {file = "coverage-7.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ca6e61dc52f601d1d224526360cdeab0d0712ec104a2ce6cc5ccef6ed9a233bc"}, - {file = "coverage-7.4.1-cp312-cp312-win32.whl", hash = "sha256:ca7b26a5e456a843b9b6683eada193fc1f65c761b3a473941efe5a291f604c74"}, - {file = "coverage-7.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:85ccc5fa54c2ed64bd91ed3b4a627b9cce04646a659512a051fa82a92c04a448"}, - {file = "coverage-7.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8bdb0285a0202888d19ec6b6d23d5990410decb932b709f2b0dfe216d031d218"}, - {file = "coverage-7.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:918440dea04521f499721c039863ef95433314b1db00ff826a02580c1f503e45"}, - {file = "coverage-7.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:379d4c7abad5afbe9d88cc31ea8ca262296480a86af945b08214eb1a556a3e4d"}, - {file = "coverage-7.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b094116f0b6155e36a304ff912f89bbb5067157aff5f94060ff20bbabdc8da06"}, - {file = "coverage-7.4.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2f5968608b1fe2a1d00d01ad1017ee27efd99b3437e08b83ded9b7af3f6f766"}, - {file = "coverage-7.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:10e88e7f41e6197ea0429ae18f21ff521d4f4490aa33048f6c6f94c6045a6a75"}, - {file = "coverage-7.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a4a3907011d39dbc3e37bdc5df0a8c93853c369039b59efa33a7b6669de04c60"}, - {file = "coverage-7.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6d224f0c4c9c98290a6990259073f496fcec1b5cc613eecbd22786d398ded3ad"}, - {file = "coverage-7.4.1-cp38-cp38-win32.whl", hash = "sha256:23f5881362dcb0e1a92b84b3c2809bdc90db892332daab81ad8f642d8ed55042"}, - {file = "coverage-7.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:a07f61fc452c43cd5328b392e52555f7d1952400a1ad09086c4a8addccbd138d"}, - {file = "coverage-7.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8e738a492b6221f8dcf281b67129510835461132b03024830ac0e554311a5c54"}, - {file = "coverage-7.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46342fed0fff72efcda77040b14728049200cbba1279e0bf1188f1f2078c1d70"}, - {file = "coverage-7.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9641e21670c68c7e57d2053ddf6c443e4f0a6e18e547e86af3fad0795414a628"}, - {file = "coverage-7.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aeb2c2688ed93b027eb0d26aa188ada34acb22dceea256d76390eea135083950"}, - {file = "coverage-7.4.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d12c923757de24e4e2110cf8832d83a886a4cf215c6e61ed506006872b43a6d1"}, - {file = "coverage-7.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0491275c3b9971cdbd28a4595c2cb5838f08036bca31765bad5e17edf900b2c7"}, - {file = "coverage-7.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:8dfc5e195bbef80aabd81596ef52a1277ee7143fe419efc3c4d8ba2754671756"}, - {file = "coverage-7.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1a78b656a4d12b0490ca72651fe4d9f5e07e3c6461063a9b6265ee45eb2bdd35"}, - {file = "coverage-7.4.1-cp39-cp39-win32.whl", hash = "sha256:f90515974b39f4dea2f27c0959688621b46d96d5a626cf9c53dbc653a895c05c"}, - {file = "coverage-7.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:64e723ca82a84053dd7bfcc986bdb34af8d9da83c521c19d6b472bc6880e191a"}, - {file = "coverage-7.4.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:32a8d985462e37cfdab611a6f95b09d7c091d07668fdc26e47a725ee575fe166"}, - {file = "coverage-7.4.1.tar.gz", hash = "sha256:1ed4b95480952b1a26d863e546fa5094564aa0065e1e5f0d4d0041f293251d04"}, + {file = "coverage-7.6.12-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:704c8c8c6ce6569286ae9622e534b4f5b9759b6f2cd643f1c1a61f666d534fe8"}, + {file = "coverage-7.6.12-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ad7525bf0241e5502168ae9c643a2f6c219fa0a283001cee4cf23a9b7da75879"}, + {file = "coverage-7.6.12-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06097c7abfa611c91edb9e6920264e5be1d6ceb374efb4986f38b09eed4cb2fe"}, + {file = "coverage-7.6.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:220fa6c0ad7d9caef57f2c8771918324563ef0d8272c94974717c3909664e674"}, + {file = "coverage-7.6.12-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3688b99604a24492bcfe1c106278c45586eb819bf66a654d8a9a1433022fb2eb"}, + {file = "coverage-7.6.12-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d1a987778b9c71da2fc8948e6f2656da6ef68f59298b7e9786849634c35d2c3c"}, + {file = "coverage-7.6.12-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:cec6b9ce3bd2b7853d4a4563801292bfee40b030c05a3d29555fd2a8ee9bd68c"}, + {file = "coverage-7.6.12-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ace9048de91293e467b44bce0f0381345078389814ff6e18dbac8fdbf896360e"}, + {file = "coverage-7.6.12-cp310-cp310-win32.whl", hash = "sha256:ea31689f05043d520113e0552f039603c4dd71fa4c287b64cb3606140c66f425"}, + {file = "coverage-7.6.12-cp310-cp310-win_amd64.whl", hash = "sha256:676f92141e3c5492d2a1596d52287d0d963df21bf5e55c8b03075a60e1ddf8aa"}, + {file = "coverage-7.6.12-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e18aafdfb3e9ec0d261c942d35bd7c28d031c5855dadb491d2723ba54f4c3015"}, + {file = "coverage-7.6.12-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:66fe626fd7aa5982cdebad23e49e78ef7dbb3e3c2a5960a2b53632f1f703ea45"}, + {file = "coverage-7.6.12-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ef01d70198431719af0b1f5dcbefc557d44a190e749004042927b2a3fed0702"}, + {file = "coverage-7.6.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07e92ae5a289a4bc4c0aae710c0948d3c7892e20fd3588224ebe242039573bf0"}, + {file = "coverage-7.6.12-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e695df2c58ce526eeab11a2e915448d3eb76f75dffe338ea613c1201b33bab2f"}, + {file = "coverage-7.6.12-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d74c08e9aaef995f8c4ef6d202dbd219c318450fe2a76da624f2ebb9c8ec5d9f"}, + {file = "coverage-7.6.12-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e995b3b76ccedc27fe4f477b349b7d64597e53a43fc2961db9d3fbace085d69d"}, + {file = "coverage-7.6.12-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b1f097878d74fe51e1ddd1be62d8e3682748875b461232cf4b52ddc6e6db0bba"}, + {file = "coverage-7.6.12-cp311-cp311-win32.whl", hash = "sha256:1f7ffa05da41754e20512202c866d0ebfc440bba3b0ed15133070e20bf5aeb5f"}, + {file = "coverage-7.6.12-cp311-cp311-win_amd64.whl", hash = "sha256:e216c5c45f89ef8971373fd1c5d8d1164b81f7f5f06bbf23c37e7908d19e8558"}, + {file = "coverage-7.6.12-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b172f8e030e8ef247b3104902cc671e20df80163b60a203653150d2fc204d1ad"}, + {file = "coverage-7.6.12-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:641dfe0ab73deb7069fb972d4d9725bf11c239c309ce694dd50b1473c0f641c3"}, + {file = "coverage-7.6.12-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e549f54ac5f301e8e04c569dfdb907f7be71b06b88b5063ce9d6953d2d58574"}, + {file = "coverage-7.6.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:959244a17184515f8c52dcb65fb662808767c0bd233c1d8a166e7cf74c9ea985"}, + {file = "coverage-7.6.12-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bda1c5f347550c359f841d6614fb8ca42ae5cb0b74d39f8a1e204815ebe25750"}, + {file = "coverage-7.6.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1ceeb90c3eda1f2d8c4c578c14167dbd8c674ecd7d38e45647543f19839dd6ea"}, + {file = "coverage-7.6.12-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f16f44025c06792e0fb09571ae454bcc7a3ec75eeb3c36b025eccf501b1a4c3"}, + {file = "coverage-7.6.12-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b076e625396e787448d27a411aefff867db2bffac8ed04e8f7056b07024eed5a"}, + {file = "coverage-7.6.12-cp312-cp312-win32.whl", hash = "sha256:00b2086892cf06c7c2d74983c9595dc511acca00665480b3ddff749ec4fb2a95"}, + {file = "coverage-7.6.12-cp312-cp312-win_amd64.whl", hash = "sha256:7ae6eabf519bc7871ce117fb18bf14e0e343eeb96c377667e3e5dd12095e0288"}, + {file = "coverage-7.6.12-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:488c27b3db0ebee97a830e6b5a3ea930c4a6e2c07f27a5e67e1b3532e76b9ef1"}, + {file = "coverage-7.6.12-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d1095bbee1851269f79fd8e0c9b5544e4c00c0c24965e66d8cba2eb5bb535fd"}, + {file = "coverage-7.6.12-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0533adc29adf6a69c1baa88c3d7dbcaadcffa21afbed3ca7a225a440e4744bf9"}, + {file = "coverage-7.6.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53c56358d470fa507a2b6e67a68fd002364d23c83741dbc4c2e0680d80ca227e"}, + {file = "coverage-7.6.12-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64cbb1a3027c79ca6310bf101014614f6e6e18c226474606cf725238cf5bc2d4"}, + {file = "coverage-7.6.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:79cac3390bfa9836bb795be377395f28410811c9066bc4eefd8015258a7578c6"}, + {file = "coverage-7.6.12-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:9b148068e881faa26d878ff63e79650e208e95cf1c22bd3f77c3ca7b1d9821a3"}, + {file = "coverage-7.6.12-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8bec2ac5da793c2685ce5319ca9bcf4eee683b8a1679051f8e6ec04c4f2fd7dc"}, + {file = "coverage-7.6.12-cp313-cp313-win32.whl", hash = "sha256:200e10beb6ddd7c3ded322a4186313d5ca9e63e33d8fab4faa67ef46d3460af3"}, + {file = "coverage-7.6.12-cp313-cp313-win_amd64.whl", hash = "sha256:2b996819ced9f7dbb812c701485d58f261bef08f9b85304d41219b1496b591ef"}, + {file = "coverage-7.6.12-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:299cf973a7abff87a30609879c10df0b3bfc33d021e1adabc29138a48888841e"}, + {file = "coverage-7.6.12-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:4b467a8c56974bf06e543e69ad803c6865249d7a5ccf6980457ed2bc50312703"}, + {file = "coverage-7.6.12-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2458f275944db8129f95d91aee32c828a408481ecde3b30af31d552c2ce284a0"}, + {file = "coverage-7.6.12-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a9d8be07fb0832636a0f72b80d2a652fe665e80e720301fb22b191c3434d924"}, + {file = "coverage-7.6.12-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14d47376a4f445e9743f6c83291e60adb1b127607a3618e3185bbc8091f0467b"}, + {file = "coverage-7.6.12-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b95574d06aa9d2bd6e5cc35a5bbe35696342c96760b69dc4287dbd5abd4ad51d"}, + {file = "coverage-7.6.12-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:ecea0c38c9079570163d663c0433a9af4094a60aafdca491c6a3d248c7432827"}, + {file = "coverage-7.6.12-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:2251fabcfee0a55a8578a9d29cecfee5f2de02f11530e7d5c5a05859aa85aee9"}, + {file = "coverage-7.6.12-cp313-cp313t-win32.whl", hash = "sha256:eb5507795caabd9b2ae3f1adc95f67b1104971c22c624bb354232d65c4fc90b3"}, + {file = "coverage-7.6.12-cp313-cp313t-win_amd64.whl", hash = "sha256:f60a297c3987c6c02ffb29effc70eadcbb412fe76947d394a1091a3615948e2f"}, + {file = "coverage-7.6.12-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e7575ab65ca8399c8c4f9a7d61bbd2d204c8b8e447aab9d355682205c9dd948d"}, + {file = "coverage-7.6.12-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8161d9fbc7e9fe2326de89cd0abb9f3599bccc1287db0aba285cb68d204ce929"}, + {file = "coverage-7.6.12-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a1e465f398c713f1b212400b4e79a09829cd42aebd360362cd89c5bdc44eb87"}, + {file = "coverage-7.6.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f25d8b92a4e31ff1bd873654ec367ae811b3a943583e05432ea29264782dc32c"}, + {file = "coverage-7.6.12-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a936309a65cc5ca80fa9f20a442ff9e2d06927ec9a4f54bcba9c14c066323f2"}, + {file = "coverage-7.6.12-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:aa6f302a3a0b5f240ee201297fff0bbfe2fa0d415a94aeb257d8b461032389bd"}, + {file = "coverage-7.6.12-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:f973643ef532d4f9be71dd88cf7588936685fdb576d93a79fe9f65bc337d9d73"}, + {file = "coverage-7.6.12-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:78f5243bb6b1060aed6213d5107744c19f9571ec76d54c99cc15938eb69e0e86"}, + {file = "coverage-7.6.12-cp39-cp39-win32.whl", hash = "sha256:69e62c5034291c845fc4df7f8155e8544178b6c774f97a99e2734b05eb5bed31"}, + {file = "coverage-7.6.12-cp39-cp39-win_amd64.whl", hash = "sha256:b01a840ecc25dce235ae4c1b6a0daefb2a203dba0e6e980637ee9c2f6ee0df57"}, + {file = "coverage-7.6.12-pp39.pp310-none-any.whl", hash = "sha256:7e39e845c4d764208e7b8f6a21c541ade741e2c41afabdfa1caa28687a3c98cf"}, + {file = "coverage-7.6.12-py3-none-any.whl", hash = "sha256:eb8668cfbc279a536c633137deeb9435d2962caec279c3f8cf8b91fff6ff8953"}, + {file = "coverage-7.6.12.tar.gz", hash = "sha256:48cfc4641d95d34766ad41d9573cc0f22a48aa88d22657a1fe01dca0dbae4de2"}, ] [package.dependencies] tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} [package.extras] -toml = ["tomli"] +toml = ["tomli ; python_full_version <= \"3.11.0a6\""] [[package]] name = "cython" -version = "3.0.8" +version = "3.0.12" description = "The Cython compiler for writing C extensions in the Python language." optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" +groups = ["dev"] +files = [ + {file = "Cython-3.0.12-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ba67eee9413b66dd9fbacd33f0bc2e028a2a120991d77b5fd4b19d0b1e4039b9"}, + {file = "Cython-3.0.12-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bee2717e5b5f7d966d0c6e27d2efe3698c357aa4d61bb3201997c7a4f9fe485a"}, + {file = "Cython-3.0.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7cffc3464f641c8d0dda942c7c53015291beea11ec4d32421bed2f13b386b819"}, + {file = "Cython-3.0.12-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d3a8f81980ffbd74e52f9186d8f1654e347d0c44bfea6b5997028977f481a179"}, + {file = "Cython-3.0.12-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8d32856716c369d01f2385ad9177cdd1a11079ac89ea0932dc4882de1aa19174"}, + {file = "Cython-3.0.12-cp310-cp310-win32.whl", hash = "sha256:712c3f31adec140dc60d064a7f84741f50e2c25a8edd7ae746d5eb4d3ef7072a"}, + {file = "Cython-3.0.12-cp310-cp310-win_amd64.whl", hash = "sha256:d6945694c5b9170cfbd5f2c0d00ef7487a2de7aba83713a64ee4ebce7fad9e05"}, + {file = "Cython-3.0.12-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:feb86122a823937cc06e4c029d80ff69f082ebb0b959ab52a5af6cdd271c5dc3"}, + {file = "Cython-3.0.12-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfdbea486e702c328338314adb8e80f5f9741f06a0ae83aaec7463bc166d12e8"}, + {file = "Cython-3.0.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:563de1728c8e48869d2380a1b76bbc1b1b1d01aba948480d68c1d05e52d20c92"}, + {file = "Cython-3.0.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:398d4576c1e1f6316282aa0b4a55139254fbed965cba7813e6d9900d3092b128"}, + {file = "Cython-3.0.12-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1e5eadef80143026944ea8f9904715a008f5108d1d644a89f63094cc37351e73"}, + {file = "Cython-3.0.12-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5a93cbda00a5451175b97dea5a9440a3fcee9e54b4cba7a7dbcba9a764b22aec"}, + {file = "Cython-3.0.12-cp311-cp311-win32.whl", hash = "sha256:3109e1d44425a2639e9a677b66cd7711721a5b606b65867cb2d8ef7a97e2237b"}, + {file = "Cython-3.0.12-cp311-cp311-win_amd64.whl", hash = "sha256:d4b70fc339adba1e2111b074ee6119fe9fd6072c957d8597bce9a0dd1c3c6784"}, + {file = "Cython-3.0.12-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fe030d4a00afb2844f5f70896b7f2a1a0d7da09bf3aa3d884cbe5f73fff5d310"}, + {file = "Cython-3.0.12-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7fec4f052b8fe173fe70eae75091389955b9a23d5cec3d576d21c5913b49d47"}, + {file = "Cython-3.0.12-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0faa5e39e5c8cdf6f9c3b1c3f24972826e45911e7f5b99cf99453fca5432f45e"}, + {file = "Cython-3.0.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2d53de996ed340e9ab0fc85a88aaa8932f2591a2746e1ab1c06e262bd4ec4be7"}, + {file = "Cython-3.0.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ea3a0e19ab77266c738aa110684a753a04da4e709472cadeff487133354d6ab8"}, + {file = "Cython-3.0.12-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c151082884be468f2f405645858a857298ac7f7592729e5b54788b5c572717ba"}, + {file = "Cython-3.0.12-cp312-cp312-win32.whl", hash = "sha256:3083465749911ac3b2ce001b6bf17f404ac9dd35d8b08469d19dc7e717f5877a"}, + {file = "Cython-3.0.12-cp312-cp312-win_amd64.whl", hash = "sha256:c0b91c7ebace030dd558ea28730de8c580680b50768e5af66db2904a3716c3e3"}, + {file = "Cython-3.0.12-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4ee6f1ea1bead8e6cbc4e64571505b5d8dbdb3b58e679d31f3a84160cebf1a1a"}, + {file = "Cython-3.0.12-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57aefa6d3341109e46ec1a13e3a763aaa2cbeb14e82af2485b318194be1d9170"}, + {file = "Cython-3.0.12-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:879ae9023958d63c0675015369384642d0afb9c9d1f3473df9186c42f7a9d265"}, + {file = "Cython-3.0.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:36fcd584dae547de6f095500a380f4a0cce72b7a7e409e9ff03cb9beed6ac7a1"}, + {file = "Cython-3.0.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:62b79dcc0de49efe9e84b9d0e2ae0a6fc9b14691a65565da727aa2e2e63c6a28"}, + {file = "Cython-3.0.12-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4aa255781b093a8401109d8f2104bbb2e52de7639d5896aefafddc85c30e0894"}, + {file = "Cython-3.0.12-cp313-cp313-win32.whl", hash = "sha256:77d48f2d4bab9fe1236eb753d18f03e8b2619af5b6f05d51df0532a92dfb38ab"}, + {file = "Cython-3.0.12-cp313-cp313-win_amd64.whl", hash = "sha256:86c304b20bd57c727c7357e90d5ba1a2b6f1c45492de2373814d7745ef2e63b4"}, + {file = "Cython-3.0.12-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:ff5c0b6a65b08117d0534941d404833d516dac422eee88c6b4fd55feb409a5ed"}, + {file = "Cython-3.0.12-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:680f1d6ed4436ae94805db264d6155ed076d2835d84f20dcb31a7a3ad7f8668c"}, + {file = "Cython-3.0.12-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ebc24609613fa06d0d896309f7164ba168f7e8d71c1e490ed2a08d23351c3f41"}, + {file = "Cython-3.0.12-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c1879c073e2b34924ce9b7ca64c212705dcc416af4337c45f371242b2e5f6d32"}, + {file = "Cython-3.0.12-cp36-cp36m-musllinux_1_2_aarch64.whl", hash = "sha256:bfb75123dd4ff767baa37d7036da0de2dfb6781ff256eef69b11b88b9a0691d1"}, + {file = "Cython-3.0.12-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:f39640f8df0400cde6882e23c734f15bb8196de0a008ae5dc6c8d1ec5957d7c8"}, + {file = "Cython-3.0.12-cp36-cp36m-win32.whl", hash = "sha256:8c9efe9a0895abee3cadfdad4130b30f7b5e57f6e6a51ef2a44f9fc66a913880"}, + {file = "Cython-3.0.12-cp36-cp36m-win_amd64.whl", hash = "sha256:63d840f2975e44d74512f8f34f1f7cb8121c9428e26a3f6116ff273deb5e60a2"}, + {file = "Cython-3.0.12-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:75c5acd40b97cff16fadcf6901a91586cbca5dcdba81f738efaf1f4c6bc8dccb"}, + {file = "Cython-3.0.12-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e62564457851db1c40399bd95a5346b9bb99e17a819bf583b362f418d8f3457a"}, + {file = "Cython-3.0.12-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ccd1228cc203b1f1b8a3d403f5a20ad1c40e5879b3fbf5851ce09d948982f2c"}, + {file = "Cython-3.0.12-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:25529ee948f44d9a165ff960c49d4903267c20b5edf2df79b45924802e4cca6e"}, + {file = "Cython-3.0.12-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:90cf599372c5a22120609f7d3a963f17814799335d56dd0dcf8fe615980a8ae1"}, + {file = "Cython-3.0.12-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:9f8c48748a9c94ea5d59c26ab49ad0fad514d36f894985879cf3c3ca0e600bf4"}, + {file = "Cython-3.0.12-cp37-cp37m-win32.whl", hash = "sha256:3e4fa855d98bc7bd6a2049e0c7dc0dcf595e2e7f571a26e808f3efd84d2db374"}, + {file = "Cython-3.0.12-cp37-cp37m-win_amd64.whl", hash = "sha256:120681093772bf3600caddb296a65b352a0d3556e962b9b147efcfb8e8c9801b"}, + {file = "Cython-3.0.12-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:731d719423e041242c9303c80cae4327467299b90ffe62d4cc407e11e9ea3160"}, + {file = "Cython-3.0.12-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3238a29f37999e27494d120983eca90d14896b2887a0bd858a381204549137a"}, + {file = "Cython-3.0.12-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b588c0a089a9f4dd316d2f9275230bad4a7271e5af04e1dc41d2707c816be44b"}, + {file = "Cython-3.0.12-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ab9f5198af74eb16502cc143cdde9ca1cbbf66ea2912e67440dd18a36e3b5fa"}, + {file = "Cython-3.0.12-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8ee841c0e114efa1e849c281ac9b8df8aa189af10b4a103b1c5fd71cbb799679"}, + {file = "Cython-3.0.12-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:43c48b5789398b228ea97499f5b864843ba9b1ab837562a9227c6f58d16ede8b"}, + {file = "Cython-3.0.12-cp38-cp38-win32.whl", hash = "sha256:5e5f17c48a4f41557fbcc7ee660ccfebe4536a34c557f553b6893c1b3c83df2d"}, + {file = "Cython-3.0.12-cp38-cp38-win_amd64.whl", hash = "sha256:309c081057930bb79dc9ea3061a1af5086c679c968206e9c9c2ec90ab7cb471a"}, + {file = "Cython-3.0.12-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54115fcc126840926ff3b53cfd2152eae17b3522ae7f74888f8a41413bd32f25"}, + {file = "Cython-3.0.12-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:629db614b9c364596d7c975fa3fb3978e8c5349524353dbe11429896a783fc1e"}, + {file = "Cython-3.0.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:af081838b0f9e12a83ec4c3809a00a64c817f489f7c512b0e3ecaf5f90a2a816"}, + {file = "Cython-3.0.12-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:34ce459808f7d8d5d4007bc5486fe50532529096b43957af6cbffcb4d9cc5c8d"}, + {file = "Cython-3.0.12-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d6c6cd6a75c8393e6805d17f7126b96a894f310a1a9ea91c47d141fb9341bfa8"}, + {file = "Cython-3.0.12-cp39-cp39-win32.whl", hash = "sha256:a4032e48d4734d2df68235d21920c715c451ac9de15fa14c71b378e8986b83be"}, + {file = "Cython-3.0.12-cp39-cp39-win_amd64.whl", hash = "sha256:dcdc3e5d4ce0e7a4af6903ed580833015641e968d18d528d8371e2435a34132c"}, + {file = "Cython-3.0.12-py2.py3-none-any.whl", hash = "sha256:0038c9bae46c459669390e53a1ec115f8096b2e4647ae007ff1bf4e6dee92806"}, + {file = "cython-3.0.12.tar.gz", hash = "sha256:b988bb297ce76c671e28c97d017b95411010f7c77fa6623dd0bb47eed1aee1bc"}, +] + +[[package]] +name = "docutils" +version = "0.21.2" +description = "Docutils -- Python Documentation Utilities" +optional = false +python-versions = ">=3.9" +groups = ["docs"] files = [ - {file = "Cython-3.0.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a846e0a38e2b24e9a5c5dc74b0e54c6e29420d88d1dafabc99e0fc0f3e338636"}, - {file = "Cython-3.0.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45523fdc2b78d79b32834cc1cc12dc2ca8967af87e22a3ee1bff20e77c7f5520"}, - {file = "Cython-3.0.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa0b7f3f841fe087410cab66778e2d3fb20ae2d2078a2be3dffe66c6574be39"}, - {file = "Cython-3.0.8-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e87294e33e40c289c77a135f491cd721bd089f193f956f7b8ed5aa2d0b8c558f"}, - {file = "Cython-3.0.8-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:a1df7a129344b1215c20096d33c00193437df1a8fcca25b71f17c23b1a44f782"}, - {file = "Cython-3.0.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:13c2a5e57a0358da467d97667297bf820b62a1a87ae47c5f87938b9bb593acbd"}, - {file = "Cython-3.0.8-cp310-cp310-win32.whl", hash = "sha256:96b028f044f5880e3cb18ecdcfc6c8d3ce9d0af28418d5ab464509f26d8adf12"}, - {file = "Cython-3.0.8-cp310-cp310-win_amd64.whl", hash = "sha256:8140597a8b5cc4f119a1190f5a2228a84f5ca6d8d9ec386cfce24663f48b2539"}, - {file = "Cython-3.0.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:aae26f9663e50caf9657148403d9874eea41770ecdd6caf381d177c2b1bb82ba"}, - {file = "Cython-3.0.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:547eb3cdb2f8c6f48e6865d5a741d9dd051c25b3ce076fbca571727977b28ac3"}, - {file = "Cython-3.0.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a567d4b9ba70b26db89d75b243529de9e649a2f56384287533cf91512705bee"}, - {file = "Cython-3.0.8-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:51d1426263b0e82fb22bda8ea60dc77a428581cc19e97741011b938445d383f1"}, - {file = "Cython-3.0.8-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c26daaeccda072459b48d211415fd1e5507c06bcd976fa0d5b8b9f1063467d7b"}, - {file = "Cython-3.0.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:289ce7838208211cd166e975865fd73b0649bf118170b6cebaedfbdaf4a37795"}, - {file = "Cython-3.0.8-cp311-cp311-win32.whl", hash = "sha256:c8aa05f5e17f8042a3be052c24f2edc013fb8af874b0bf76907d16c51b4e7871"}, - {file = "Cython-3.0.8-cp311-cp311-win_amd64.whl", hash = "sha256:000dc9e135d0eec6ecb2b40a5b02d0868a2f8d2e027a41b0fe16a908a9e6de02"}, - {file = "Cython-3.0.8-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:90d3fe31db55685d8cb97d43b0ec39ef614fcf660f83c77ed06aa670cb0e164f"}, - {file = "Cython-3.0.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e24791ddae2324e88e3c902a765595c738f19ae34ee66bfb1a6dac54b1833419"}, - {file = "Cython-3.0.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2f020fa1c0552052e0660790b8153b79e3fc9a15dbd8f1d0b841fe5d204a6ae6"}, - {file = "Cython-3.0.8-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18bfa387d7a7f77d7b2526af69a65dbd0b731b8d941aaff5becff8e21f6d7717"}, - {file = "Cython-3.0.8-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:fe81b339cffd87c0069c6049b4d33e28bdd1874625ee515785bf42c9fdff3658"}, - {file = "Cython-3.0.8-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:80fd94c076e1e1b1ee40a309be03080b75f413e8997cddcf401a118879863388"}, - {file = "Cython-3.0.8-cp312-cp312-win32.whl", hash = "sha256:85077915a93e359a9b920280d214dc0cf8a62773e1f3d7d30fab8ea4daed670c"}, - {file = "Cython-3.0.8-cp312-cp312-win_amd64.whl", hash = "sha256:0cb2dcc565c7851f75d496f724a384a790fab12d1b82461b663e66605bec429a"}, - {file = "Cython-3.0.8-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:870d2a0a7e3cbd5efa65aecdb38d715ea337a904ea7bb22324036e78fb7068e7"}, - {file = "Cython-3.0.8-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7e8f2454128974905258d86534f4fd4f91d2f1343605657ecab779d80c9d6d5e"}, - {file = "Cython-3.0.8-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1949d6aa7bc792554bee2b67a9fe41008acbfe22f4f8df7b6ec7b799613a4b3"}, - {file = "Cython-3.0.8-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9f2c6e1b8f3bcd6cb230bac1843f85114780bb8be8614855b1628b36bb510e0"}, - {file = "Cython-3.0.8-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:05d7eddc668ae7993643f32c7661f25544e791edb745758672ea5b1a82ecffa6"}, - {file = "Cython-3.0.8-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bfabe115deef4ada5d23c87bddb11289123336dcc14347011832c07db616dd93"}, - {file = "Cython-3.0.8-cp36-cp36m-win32.whl", hash = "sha256:0c38c9f0bcce2df0c3347285863621be904ac6b64c5792d871130569d893efd7"}, - {file = "Cython-3.0.8-cp36-cp36m-win_amd64.whl", hash = "sha256:6c46939c3983217d140999de7c238c3141f56b1ea349e47ca49cae899969aa2c"}, - {file = "Cython-3.0.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:115f0a50f752da6c99941b103b5cb090da63eb206abbc7c2ad33856ffc73f064"}, - {file = "Cython-3.0.8-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9c0f29246734561c90f36e70ed0506b61aa3d044e4cc4cba559065a2a741fae"}, - {file = "Cython-3.0.8-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ab75242869ff71e5665fe5c96f3378e79e792fa3c11762641b6c5afbbbbe026"}, - {file = "Cython-3.0.8-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6717c06e9cfc6c1df18543cd31a21f5d8e378a40f70c851fa2d34f0597037abc"}, - {file = "Cython-3.0.8-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:9d3f74388db378a3c6fd06e79a809ed98df3f56484d317b81ee762dbf3c263e0"}, - {file = "Cython-3.0.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ae7ac561fd8253a9ae96311e91d12af5f701383564edc11d6338a7b60b285a6f"}, - {file = "Cython-3.0.8-cp37-cp37m-win32.whl", hash = "sha256:97b2a45845b993304f1799664fa88da676ee19442b15fdcaa31f9da7e1acc434"}, - {file = "Cython-3.0.8-cp37-cp37m-win_amd64.whl", hash = "sha256:9e2be2b340fea46fb849d378f9b80d3c08ff2e81e2bfbcdb656e2e3cd8c6b2dc"}, - {file = "Cython-3.0.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2cde23c555470db3f149ede78b518e8274853745289c956a0e06ad8d982e4db9"}, - {file = "Cython-3.0.8-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7990ca127e1f1beedaf8fc8bf66541d066ef4723ad7d8d47a7cbf842e0f47580"}, - {file = "Cython-3.0.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b983c8e6803f016146c26854d9150ddad5662960c804ea7f0c752c9266752f0"}, - {file = "Cython-3.0.8-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a973268d7ca1a2bdf78575e459a94a78e1a0a9bb62a7db0c50041949a73b02ff"}, - {file = "Cython-3.0.8-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:61a237bc9dd23c7faef0fcfce88c11c65d0c9bb73c74ccfa408b3a012073c20e"}, - {file = "Cython-3.0.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3a3d67f079598af49e90ff9655bf85bd358f093d727eb21ca2708f467c489cae"}, - {file = "Cython-3.0.8-cp38-cp38-win32.whl", hash = "sha256:17a642bb01a693e34c914106566f59844b4461665066613913463a719e0dd15d"}, - {file = "Cython-3.0.8-cp38-cp38-win_amd64.whl", hash = "sha256:2cdfc32252f3b6dc7c94032ab744dcedb45286733443c294d8f909a4854e7f83"}, - {file = "Cython-3.0.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fa97893d99385386925d00074654aeae3a98867f298d1e12ceaf38a9054a9bae"}, - {file = "Cython-3.0.8-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f05c0bf9d085c031df8f583f0d506aa3be1692023de18c45d0aaf78685bbb944"}, - {file = "Cython-3.0.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de892422582f5758bd8de187e98ac829330ec1007bc42c661f687792999988a7"}, - {file = "Cython-3.0.8-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:314f2355a1f1d06e3c431eaad4708cf10037b5e91e4b231d89c913989d0bdafd"}, - {file = "Cython-3.0.8-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:78825a3774211e7d5089730f00cdf7f473042acc9ceb8b9eeebe13ed3a5541de"}, - {file = "Cython-3.0.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:df8093deabc55f37028190cf5e575c26aad23fc673f34b85d5f45076bc37ce39"}, - {file = "Cython-3.0.8-cp39-cp39-win32.whl", hash = "sha256:1aca1b97e0095b3a9a6c33eada3f661a4ed0d499067d121239b193e5ba3bb4f0"}, - {file = "Cython-3.0.8-cp39-cp39-win_amd64.whl", hash = "sha256:16873d78be63bd38ffb759da7ab82814b36f56c769ee02b1d5859560e4c3ac3c"}, - {file = "Cython-3.0.8-py2.py3-none-any.whl", hash = "sha256:171b27051253d3f9108e9759e504ba59ff06e7f7ba944457f94deaf9c21bf0b6"}, - {file = "Cython-3.0.8.tar.gz", hash = "sha256:8333423d8fd5765e7cceea3a9985dd1e0a5dfeb2734629e1a2ed2d6233d39de6"}, + {file = "docutils-0.21.2-py3-none-any.whl", hash = "sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2"}, + {file = "docutils-0.21.2.tar.gz", hash = "sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f"}, ] [[package]] name = "exceptiongroup" -version = "1.2.0" +version = "1.2.2" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" +groups = ["dev"] +markers = "python_version < \"3.11\"" files = [ - {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"}, - {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"}, + {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, + {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, ] [package.extras] test = ["pytest (>=6)"] +[[package]] +name = "idna" +version = "3.10" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.6" +groups = ["docs"] +files = [ + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, +] + +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + [[package]] name = "ifaddr" version = "0.2.0" description = "Cross-platform network interface and IP address enumeration library" optional = false python-versions = "*" +groups = ["main"] files = [ {file = "ifaddr-0.2.0-py3-none-any.whl", hash = "sha256:085e0305cfe6f16ab12d72e2024030f5d52674afad6911bb1eee207177b8a748"}, {file = "ifaddr-0.2.0.tar.gz", hash = "sha256:cc0cbfcaabf765d44595825fb96a99bb12c79716b73b44330ea38ee2b0c4aed4"}, ] +[[package]] +name = "imagesize" +version = "1.4.1" +description = "Getting image size from png/jpeg/jpeg2000/gif file" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +groups = ["docs"] +files = [ + {file = "imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b"}, + {file = "imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a"}, +] + +[[package]] +name = "importlib-metadata" +version = "8.6.1" +description = "Read metadata from Python packages" +optional = false +python-versions = ">=3.9" +groups = ["dev", "docs"] +markers = "python_version < \"3.10\"" +files = [ + {file = "importlib_metadata-8.6.1-py3-none-any.whl", hash = "sha256:02a89390c1e15fdfdc0d7c6b25cb3e62650d0494005c97d6f148bf5b9787525e"}, + {file = "importlib_metadata-8.6.1.tar.gz", hash = "sha256:310b41d755445d74569f993ccfc22838295d9fe005425094fad953d7f15c8580"}, +] + +[package.dependencies] +zipp = ">=3.20" + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +perf = ["ipython"] +test = ["flufl.flake8", "importlib_resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] +type = ["pytest-mypy"] + [[package]] name = "iniconfig" version = "2.0.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] +[[package]] +name = "jinja2" +version = "3.1.6" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +groups = ["docs"] +files = [ + {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, + {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "markdown-it-py" +version = "3.0.0" +description = "Python port of markdown-it. Markdown parsing, done right!" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, + {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, +] + +[package.dependencies] +mdurl = ">=0.1,<1.0" + +[package.extras] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] +plugins = ["mdit-py-plugins"] +profiling = ["gprof2dot"] +rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + +[[package]] +name = "markupsafe" +version = "3.0.2" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a"}, + {file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"}, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +description = "Markdown URL utilities" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, + {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, +] + [[package]] name = "packaging" -version = "23.2" +version = "24.2" description = "Core utilities for Python packages" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" +groups = ["dev", "docs"] files = [ - {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, - {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, + {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, + {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, ] [[package]] name = "pluggy" -version = "1.4.0" +version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ - {file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981"}, - {file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"}, + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, ] [package.extras] dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] +[[package]] +name = "pycparser" +version = "2.22" +description = "C parser in Python" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, + {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, +] + +[[package]] +name = "pygments" +version = "2.19.1" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.8" +groups = ["dev", "docs"] +files = [ + {file = "pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c"}, + {file = "pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f"}, +] + +[package.extras] +windows-terminal = ["colorama (>=0.4.6)"] + [[package]] name = "pytest" -version = "7.4.4" +version = "8.3.5" description = "pytest: simple powerful testing with Python" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" +groups = ["dev"] files = [ - {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, - {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"}, + {file = "pytest-8.3.5-py3-none-any.whl", hash = "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820"}, + {file = "pytest-8.3.5.tar.gz", hash = "sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845"}, ] [package.dependencies] @@ -234,90 +689,442 @@ colorama = {version = "*", markers = "sys_platform == \"win32\""} exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} iniconfig = "*" packaging = "*" -pluggy = ">=0.12,<2.0" -tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} +pluggy = ">=1.5,<2" +tomli = {version = ">=1", markers = "python_version < \"3.11\""} [package.extras] -testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] [[package]] name = "pytest-asyncio" -version = "0.20.3" +version = "0.26.0" description = "Pytest support for asyncio" optional = false -python-versions = ">=3.7" +python-versions = ">=3.9" +groups = ["dev"] files = [ - {file = "pytest-asyncio-0.20.3.tar.gz", hash = "sha256:83cbf01169ce3e8eb71c6c278ccb0574d1a7a3bb8eaaf5e50e0ad342afb33b36"}, - {file = "pytest_asyncio-0.20.3-py3-none-any.whl", hash = "sha256:f129998b209d04fcc65c96fc85c11e5316738358909a8399e93be553d7656442"}, + {file = "pytest_asyncio-0.26.0-py3-none-any.whl", hash = "sha256:7b51ed894f4fbea1340262bdae5135797ebbe21d8638978e35d31c6d19f72fb0"}, + {file = "pytest_asyncio-0.26.0.tar.gz", hash = "sha256:c4df2a697648241ff39e7f0e4a73050b03f123f760673956cf0d72a4990e312f"}, ] [package.dependencies] -pytest = ">=6.1.0" +pytest = ">=8.2,<9" +typing-extensions = {version = ">=4.12", markers = "python_version < \"3.10\""} [package.extras] -docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] -testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy (>=0.931)", "pytest-trio (>=0.7.0)"] +docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1)"] +testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] + +[[package]] +name = "pytest-codspeed" +version = "3.2.0" +description = "Pytest plugin to create CodSpeed benchmarks" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "pytest_codspeed-3.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c5165774424c7ab8db7e7acdb539763a0e5657996effefdf0664d7fd95158d34"}, + {file = "pytest_codspeed-3.2.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9bd55f92d772592c04a55209950c50880413ae46876e66bd349ef157075ca26c"}, + {file = "pytest_codspeed-3.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4cf6f56067538f4892baa8d7ab5ef4e45bb59033be1ef18759a2c7fc55b32035"}, + {file = "pytest_codspeed-3.2.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:39a687b05c3d145642061b45ea78e47e12f13ce510104d1a2cda00eee0e36f58"}, + {file = "pytest_codspeed-3.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:46a1afaaa1ac4c2ca5b0700d31ac46d80a27612961d031067d73c6ccbd8d3c2b"}, + {file = "pytest_codspeed-3.2.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c48ce3af3dfa78413ed3d69d1924043aa1519048dbff46edccf8f35a25dab3c2"}, + {file = "pytest_codspeed-3.2.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:66692506d33453df48b36a84703448cb8b22953eea51f03fbb2eb758dc2bdc4f"}, + {file = "pytest_codspeed-3.2.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:479774f80d0bdfafa16112700df4dbd31bf2a6757fac74795fd79c0a7b3c389b"}, + {file = "pytest_codspeed-3.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:109f9f4dd1088019c3b3f887d003b7d65f98a7736ca1d457884f5aa293e8e81c"}, + {file = "pytest_codspeed-3.2.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e2f69a03b52c9bb041aec1b8ee54b7b6c37a6d0a948786effa4c71157765b6da"}, + {file = "pytest_codspeed-3.2.0-py3-none-any.whl", hash = "sha256:54b5c2e986d6a28e7b0af11d610ea57bd5531cec8326abe486f1b55b09d91c39"}, + {file = "pytest_codspeed-3.2.0.tar.gz", hash = "sha256:f9d1b1a3b2c69cdc0490a1e8b1ced44bffbd0e8e21d81a7160cfdd923f6e8155"}, +] + +[package.dependencies] +cffi = ">=1.17.1" +importlib-metadata = {version = ">=8.5.0", markers = "python_version < \"3.10\""} +pytest = ">=3.8" +rich = ">=13.8.1" + +[package.extras] +compat = ["pytest-benchmark (>=5.0.0,<5.1.0)", "pytest-xdist (>=3.6.1,<3.7.0)"] +lint = ["mypy (>=1.11.2,<1.12.0)", "ruff (>=0.6.5,<0.7.0)"] +test = ["pytest (>=7.0,<8.0)", "pytest-cov (>=4.0.0,<4.1.0)"] [[package]] name = "pytest-cov" -version = "4.1.0" +version = "6.1.1" description = "Pytest plugin for measuring coverage." optional = false -python-versions = ">=3.7" +python-versions = ">=3.9" +groups = ["dev"] files = [ - {file = "pytest-cov-4.1.0.tar.gz", hash = "sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6"}, - {file = "pytest_cov-4.1.0-py3-none-any.whl", hash = "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a"}, + {file = "pytest_cov-6.1.1-py3-none-any.whl", hash = "sha256:bddf29ed2d0ab6f4df17b4c55b0a657287db8684af9c42ea546b21b1041b3dde"}, + {file = "pytest_cov-6.1.1.tar.gz", hash = "sha256:46935f7aaefba760e716c2ebfbe1c216240b9592966e7da99ea8292d4d3e2a0a"}, ] [package.dependencies] -coverage = {version = ">=5.2.1", extras = ["toml"]} +coverage = {version = ">=7.5", extras = ["toml"]} pytest = ">=4.6" [package.extras] -testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtualenv"] +testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] [[package]] name = "pytest-timeout" -version = "2.2.0" +version = "2.4.0" description = "pytest plugin to abort hanging tests" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ - {file = "pytest-timeout-2.2.0.tar.gz", hash = "sha256:3b0b95dabf3cb50bac9ef5ca912fa0cfc286526af17afc806824df20c2f72c90"}, - {file = "pytest_timeout-2.2.0-py3-none-any.whl", hash = "sha256:bde531e096466f49398a59f2dde76fa78429a09a12411466f88a07213e220de2"}, + {file = "pytest_timeout-2.4.0-py3-none-any.whl", hash = "sha256:c42667e5cdadb151aeb5b26d114aff6bdf5a907f176a007a30b940d3d865b5c2"}, + {file = "pytest_timeout-2.4.0.tar.gz", hash = "sha256:7e68e90b01f9eff71332b25001f85c75495fc4e3a836701876183c4bcfd0540a"}, ] [package.dependencies] -pytest = ">=5.0.0" +pytest = ">=7.0.0" + +[[package]] +name = "requests" +version = "2.32.3" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.8" +groups = ["docs"] +files = [ + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "rich" +version = "13.9.4" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +optional = false +python-versions = ">=3.8.0" +groups = ["dev"] +files = [ + {file = "rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90"}, + {file = "rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098"}, +] + +[package.dependencies] +markdown-it-py = ">=2.2.0" +pygments = ">=2.13.0,<3.0.0" +typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.11\""} + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<9)"] [[package]] name = "setuptools" -version = "65.7.0" +version = "80.7.1" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false -python-versions = ">=3.7" +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "setuptools-80.7.1-py3-none-any.whl", hash = "sha256:ca5cc1069b85dc23070a6628e6bcecb3292acac802399c7f8edc0100619f9009"}, + {file = "setuptools-80.7.1.tar.gz", hash = "sha256:f6ffc5f0142b1bd8d0ca94ee91b30c0ca862ffd50826da1ea85258a06fd94552"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\"", "ruff (>=0.8.0) ; sys_platform != \"cygwin\""] +core = ["importlib_metadata (>=6) ; python_version < \"3.10\"", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1) ; python_version < \"3.11\"", "wheel (>=0.43.0)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21) ; python_version >= \"3.9\" and sys_platform != \"cygwin\"", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf ; sys_platform != \"cygwin\"", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] +type = ["importlib_metadata (>=7.0.2) ; python_version < \"3.10\"", "jaraco.develop (>=7.21) ; sys_platform != \"cygwin\"", "mypy (==1.14.*)", "pytest-mypy"] + +[[package]] +name = "snowballstemmer" +version = "2.2.0" +description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms." +optional = false +python-versions = "*" +groups = ["docs"] +files = [ + {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"}, + {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"}, +] + +[[package]] +name = "sphinx" +version = "7.4.7" +description = "Python documentation generator" +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "sphinx-7.4.7-py3-none-any.whl", hash = "sha256:c2419e2135d11f1951cd994d6eb18a1835bd8fdd8429f9ca375dc1f3281bd239"}, + {file = "sphinx-7.4.7.tar.gz", hash = "sha256:242f92a7ea7e6c5b406fdc2615413890ba9f699114a9c09192d7dfead2ee9cfe"}, +] + +[package.dependencies] +alabaster = ">=0.7.14,<0.8.0" +babel = ">=2.13" +colorama = {version = ">=0.4.6", markers = "sys_platform == \"win32\""} +docutils = ">=0.20,<0.22" +imagesize = ">=1.3" +importlib-metadata = {version = ">=6.0", markers = "python_version < \"3.10\""} +Jinja2 = ">=3.1" +packaging = ">=23.0" +Pygments = ">=2.17" +requests = ">=2.30.0" +snowballstemmer = ">=2.2" +sphinxcontrib-applehelp = "*" +sphinxcontrib-devhelp = "*" +sphinxcontrib-htmlhelp = ">=2.0.0" +sphinxcontrib-jsmath = "*" +sphinxcontrib-qthelp = "*" +sphinxcontrib-serializinghtml = ">=1.1.9" +tomli = {version = ">=2", markers = "python_version < \"3.11\""} + +[package.extras] +docs = ["sphinxcontrib-websupport"] +lint = ["flake8 (>=6.0)", "importlib-metadata (>=6.0)", "mypy (==1.10.1)", "pytest (>=6.0)", "ruff (==0.5.2)", "sphinx-lint (>=0.9)", "tomli (>=2)", "types-docutils (==0.21.0.20240711)", "types-requests (>=2.30.0)"] +test = ["cython (>=3.0)", "defusedxml (>=0.7.1)", "pytest (>=8.0)", "setuptools (>=70.0)", "typing_extensions (>=4.9)"] + +[[package]] +name = "sphinx-rtd-theme" +version = "3.0.2" +description = "Read the Docs theme for Sphinx" +optional = false +python-versions = ">=3.8" +groups = ["docs"] +files = [ + {file = "sphinx_rtd_theme-3.0.2-py2.py3-none-any.whl", hash = "sha256:422ccc750c3a3a311de4ae327e82affdaf59eb695ba4936538552f3b00f4ee13"}, + {file = "sphinx_rtd_theme-3.0.2.tar.gz", hash = "sha256:b7457bc25dda723b20b086a670b9953c859eab60a2a03ee8eb2bb23e176e5f85"}, +] + +[package.dependencies] +docutils = ">0.18,<0.22" +sphinx = ">=6,<9" +sphinxcontrib-jquery = ">=4,<5" + +[package.extras] +dev = ["bump2version", "transifex-client", "twine", "wheel"] + +[[package]] +name = "sphinxcontrib-applehelp" +version = "2.0.0" +description = "sphinxcontrib-applehelp is a Sphinx extension which outputs Apple help books" +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "sphinxcontrib_applehelp-2.0.0-py3-none-any.whl", hash = "sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5"}, + {file = "sphinxcontrib_applehelp-2.0.0.tar.gz", hash = "sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1"}, +] + +[package.extras] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] +test = ["pytest"] + +[[package]] +name = "sphinxcontrib-devhelp" +version = "2.0.0" +description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp documents" +optional = false +python-versions = ">=3.9" +groups = ["docs"] files = [ - {file = "setuptools-65.7.0-py3-none-any.whl", hash = "sha256:8ab4f1dbf2b4a65f7eec5ad0c620e84c34111a68d3349833494b9088212214dd"}, - {file = "setuptools-65.7.0.tar.gz", hash = "sha256:4d3c92fac8f1118bb77a22181355e29c239cabfe2b9effdaa665c66b711136d7"}, + {file = "sphinxcontrib_devhelp-2.0.0-py3-none-any.whl", hash = "sha256:aefb8b83854e4b0998877524d1029fd3e6879210422ee3780459e28a1f03a8a2"}, + {file = "sphinxcontrib_devhelp-2.0.0.tar.gz", hash = "sha256:411f5d96d445d1d73bb5d52133377b4248ec79db5c793ce7dbe59e074b4dd1ad"}, ] [package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] -testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] +test = ["pytest"] + +[[package]] +name = "sphinxcontrib-htmlhelp" +version = "2.1.0" +description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files" +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "sphinxcontrib_htmlhelp-2.1.0-py3-none-any.whl", hash = "sha256:166759820b47002d22914d64a075ce08f4c46818e17cfc9470a9786b759b19f8"}, + {file = "sphinxcontrib_htmlhelp-2.1.0.tar.gz", hash = "sha256:c9e2916ace8aad64cc13a0d233ee22317f2b9025b9cf3295249fa985cc7082e9"}, +] + +[package.extras] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] +test = ["html5lib", "pytest"] + +[[package]] +name = "sphinxcontrib-jquery" +version = "4.1" +description = "Extension to include jQuery on newer Sphinx releases" +optional = false +python-versions = ">=2.7" +groups = ["docs"] +files = [ + {file = "sphinxcontrib-jquery-4.1.tar.gz", hash = "sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a"}, + {file = "sphinxcontrib_jquery-4.1-py2.py3-none-any.whl", hash = "sha256:f936030d7d0147dd026a4f2b5a57343d233f1fc7b363f68b3d4f1cb0993878ae"}, +] + +[package.dependencies] +Sphinx = ">=1.8" + +[[package]] +name = "sphinxcontrib-jsmath" +version = "1.0.1" +description = "A sphinx extension which renders display math in HTML via JavaScript" +optional = false +python-versions = ">=3.5" +groups = ["docs"] +files = [ + {file = "sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"}, + {file = "sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178"}, +] + +[package.extras] +test = ["flake8", "mypy", "pytest"] + +[[package]] +name = "sphinxcontrib-qthelp" +version = "2.0.0" +description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp documents" +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "sphinxcontrib_qthelp-2.0.0-py3-none-any.whl", hash = "sha256:b18a828cdba941ccd6ee8445dbe72ffa3ef8cbe7505d8cd1fa0d42d3f2d5f3eb"}, + {file = "sphinxcontrib_qthelp-2.0.0.tar.gz", hash = "sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab"}, +] + +[package.extras] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] +test = ["defusedxml (>=0.7.1)", "pytest"] + +[[package]] +name = "sphinxcontrib-serializinghtml" +version = "2.0.0" +description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)" +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "sphinxcontrib_serializinghtml-2.0.0-py3-none-any.whl", hash = "sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331"}, + {file = "sphinxcontrib_serializinghtml-2.0.0.tar.gz", hash = "sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d"}, +] + +[package.extras] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] +test = ["pytest"] [[package]] name = "tomli" -version = "2.0.1" +version = "2.2.1" description = "A lil' TOML parser" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" +groups = ["dev", "docs"] +files = [ + {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, + {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"}, + {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"}, + {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"}, + {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"}, + {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"}, + {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"}, + {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"}, + {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"}, + {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"}, +] +markers = {dev = "python_full_version <= \"3.11.0a6\"", docs = "python_version < \"3.11\""} + +[[package]] +name = "typing-extensions" +version = "4.12.2" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.11\"" files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, ] +[[package]] +name = "urllib3" +version = "2.3.0" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df"}, + {file = "urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "zipp" +version = "3.21.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +optional = false +python-versions = ">=3.9" +groups = ["dev", "docs"] +markers = "python_version < \"3.10\"" +files = [ + {file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"}, + {file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["big-O", "importlib-resources ; python_version < \"3.9\"", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +type = ["pytest-mypy"] + [metadata] -lock-version = "2.0" -python-versions = "^3.8" -content-hash = "26c7f2ec91a34a0661a5511d2ade43511d80dd4f89e1aefbb59c9fafc2c92df2" +lock-version = "2.1" +python-versions = "^3.9" +content-hash = "972988da838067a7f2d12b8212ce54ba946cb38a4f63576a520dd1ed40ac3e9b" diff --git a/pyproject.toml b/pyproject.toml index 1be7d81a9..d47a19667 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,9 +1,9 @@ [tool.poetry] name = "zeroconf" -version = "0.132.2" +version = "0.147.0" description = "A pure python implementation of multicast DNS service discovery" authors = ["Paul Scott-Murphy", "William McBrine", "Jakub Stasiak", "J. Nick Koston"] -license = "LGPL" +license = "LGPL-2.1-or-later" readme = "README.rst" repository = "https://github.com/python-zeroconf/python-zeroconf" documentation = "https://python-zeroconf.readthedocs.io" @@ -11,16 +11,15 @@ classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', - 'License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)', 'Operating System :: POSIX', 'Operating System :: POSIX :: Linux', 'Operating System :: MacOS :: MacOS X', 'Topic :: Software Development :: Libraries', - 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', + 'Programming Language :: Python :: 3.13', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', ] @@ -33,6 +32,8 @@ include = [ { path = "docs", format = "sdist" }, { path = "tests", format = "sdist" }, ] +# Make sure we don't package temporary C files generated by the build process +exclude = [ "**/*.c" ] [tool.poetry.urls] "Bug Tracker" = "https://github.com/python-zeroconf/python-zeroconf/issues" @@ -44,28 +45,138 @@ script = "build_ext.py" [tool.semantic_release] branch = "master" -version_toml = "pyproject.toml:tool.poetry.version" -version_variable = "src/zeroconf/__init__.py:__version__" +version_toml = ["pyproject.toml:tool.poetry.version"] +version_variables = [ + "src/zeroconf/__init__.py:__version__" +] build_command = "pip install poetry && poetry build" tag_format = "{version}" +[tool.semantic_release.changelog] +exclude_commit_patterns = [ + "chore*", + "ci*", +] + +[tool.semantic_release.changelog.environment] +keep_trailing_newline = true + +[tool.semantic_release.branches.master] +match = "master" + +[tool.semantic_release.branches.noop] +match = "(?!master$)" +prerelease = true + [tool.poetry.dependencies] -python = "^3.8" -async-timeout = {version = ">=3.0.0", python = "<3.11"} +python = "^3.9" ifaddr = ">=0.1.7" [tool.poetry.group.dev.dependencies] -pytest = "^7.2.0" -pytest-cov = "^4.0.0" -pytest-asyncio = "^0.20.3" +pytest = ">=7.2,<9.0" +pytest-cov = ">=4,<7" +pytest-asyncio = ">=0.20.3,<0.27.0" cython = "^3.0.5" -setuptools = "^65.6.3" +setuptools = ">=65.6.3,<81.0.0" pytest-timeout = "^2.1.0" +pytest-codspeed = "^3.1.0" -[tool.black] +[tool.poetry.group.docs.dependencies] +sphinx = "^7.4.7 || ^8.1.3" +sphinx-rtd-theme = "^3.0.2" + +[tool.ruff] +target-version = "py39" line-length = 110 -target_version = ['py37', 'py38', 'py39', 'py310', 'py311'] -skip_string_normalization = true + +[tool.ruff.lint] +ignore = [ + "S101", # use of assert + "S104", # S104 Possible binding to all interfaces + "PLR0912", # too many to fix right now + "TID252", # skip + "PLR0913", # too late to make changes here + "PLR0911", # would be breaking change + "TRY003", # too many to fix + "SLF001", # design choice + "PLR2004" , # too many to fix + "PGH004", # too many to fix + "PGH003", # too many to fix + "SIM110", # this is slower + "PYI034", # enable when we drop Py3.10 + "PYI032", # breaks Cython + "PYI041", # breaks Cython + "PERF401", # Cython: closures inside cpdef functions not yet supported +] +select = [ + "ASYNC", # async rules + "B", # flake8-bugbear + "C4", # flake8-comprehensions + "S", # flake8-bandit + "F", # pyflake + "E", # pycodestyle + "W", # pycodestyle + "UP", # pyupgrade + "I", # isort + "RUF", # ruff specific + "FLY", # flynt + "G", # flake8-logging-format , + "PERF", # Perflint + "PGH", # pygrep-hooks + "PIE", # flake8-pie + "PL", # pylint + "PT", # flake8-pytest-style + "PTH", # flake8-pathlib + "PYI", # flake8-pyi + "RET", # flake8-return + "RSE", # flake8-raise , + "SIM", # flake8-simplify + "SLF", # flake8-self + "SLOT", # flake8-slots + "T100", # Trace found: {name} used + "T20", # flake8-print + "TID", # Tidy imports + "TRY", # tryceratops +] + +[tool.ruff.lint.per-file-ignores] +"tests/**/*" = [ + "D100", + "D101", + "D102", + "D103", + "D104", + "S101", + "SLF001", + "PLR2004", # too many to fix right now + "PT011", # too many to fix right now + "PT006", # too many to fix right now + "PGH003", # too many to fix right now + "PT007", # too many to fix right now + "PT027", # too many to fix right now + "PLW0603" , # too many to fix right now + "PLR0915", # too many to fix right now + "FLY002", # too many to fix right now + "PT018", # too many to fix right now + "PLR0124", # too many to fix right now + "SIM202" , # too many to fix right now + "PT012" , # too many to fix right now + "TID252", # too many to fix right now + "PLR0913", # skip this one + "SIM102" , # too many to fix right now + "SIM108", # too many to fix right now + "T201", # too many to fix right now + "PT004", # nice to have +] +"bench/**/*" = [ + "T201", # intended +] +"examples/**/*" = [ + "T201", # intended +] +"setup.py" = ["D100"] +"conftest.py" = ["D100"] +"docs/conf.py" = ["D100"] [tool.pylint.BASIC] class-const-naming-style = "any" @@ -122,15 +233,21 @@ profile = "black" known_first_party = ["zeroconf", "tests"] [tool.mypy] +warn_unused_configs = true check_untyped_defs = true disallow_any_generics = false # turn this on when we drop 3.7/3.8 support disallow_incomplete_defs = true disallow_untyped_defs = true +warn_incomplete_stub = true mypy_path = "src/" -no_implicit_optional = true show_error_codes = true +warn_redundant_casts = false # Activate for cleanup. +warn_return_any = true warn_unreachable = true -warn_unused_ignores = true +warn_unused_ignores = false # Does not always work properly, activate for cleanup. +extra_checks = true +strict_equality = true +strict_bytes = true # Will be true by default with mypy v2 release. exclude = [ 'docs/*', 'bench/*', @@ -155,6 +272,8 @@ requires = ['setuptools>=65.4.1', 'wheel', 'Cython>=3.0.8', "poetry-core>=1.5.2" build-backend = "poetry.core.masonry.api" [tool.codespell] -skip = '*.po,*.ts,./tests,./bench' -count = '' -quiet-level = 3 +ignore-words-list = ["additionals", "HASS"] + +[tool.cython-lint] +max-line-length = 110 +ignore = ['E501'] # too many to fix right now diff --git a/requirements-dev.txt b/requirements-dev.txt deleted file mode 100644 index 1054014ed..000000000 --- a/requirements-dev.txt +++ /dev/null @@ -1,16 +0,0 @@ -async_timeout>=4.0.1 -autopep8 -black;implementation_name=="cpython" -bump2version -coverage -flake8 -flake8-import-order -ifaddr -mypy;implementation_name=="cpython" -pep8-naming>=0.12.0 -pylint -pytest -pytest-asyncio -pytest-cov -pytest-timeout -readme_renderer diff --git a/src/zeroconf/__init__.py b/src/zeroconf/__init__.py index 4e6fb1574..439ffceb6 100644 --- a/src/zeroconf/__init__.py +++ b/src/zeroconf/__init__.py @@ -1,26 +1,26 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - This module provides a framework for the use of DNS Service Discovery - using IP multicast. +This module provides a framework for the use of DNS Service Discovery +using IP multicast. - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ -import sys +from __future__ import annotations from ._cache import DNSCache # noqa # import needed for backwards compat from ._core import Zeroconf @@ -59,6 +59,9 @@ ) from ._services.browser import ServiceBrowser from ._services.info import ( # noqa # import needed for backwards compat + AddressResolver, + AddressResolverIPv4, + AddressResolverIPv6, ServiceInfo, instance_name_from_service_info, ) @@ -83,43 +86,34 @@ millis_to_seconds, ) -__author__ = 'Paul Scott-Murphy, William McBrine' -__maintainer__ = 'Jakub Stasiak ' -__version__ = '0.132.2' -__license__ = 'LGPL' +__author__ = "Paul Scott-Murphy, William McBrine" +__maintainer__ = "Jakub Stasiak " +__version__ = "0.147.0" +__license__ = "LGPL" __all__ = [ - "__version__", - "Zeroconf", - "ServiceInfo", - "ServiceBrowser", - "ServiceListener", + "AbstractMethodException", + "BadTypeInNameException", "DNSQuestionType", - "InterfaceChoice", - "ServiceStateChange", - "IPVersion", - "ZeroconfServiceTypes", - "RecordUpdate", - "RecordUpdateListener", - "current_time_millis", # Exceptions "Error", - "AbstractMethodException", - "BadTypeInNameException", "EventLoopBlocked", + "IPVersion", "IncomingDecodeError", + "InterfaceChoice", "NamePartTooLongException", "NonUniqueNameException", "NotRunningException", + "RecordUpdate", + "RecordUpdateListener", + "ServiceBrowser", + "ServiceInfo", + "ServiceListener", "ServiceNameAlreadyRegistered", + "ServiceStateChange", + "Zeroconf", + "ZeroconfServiceTypes", + "__version__", + "current_time_millis", ] - -if sys.version_info <= (3, 6): # pragma: no cover - raise ImportError( # pragma: no cover - ''' -Python version > 3.6 required for python-zeroconf. -If you need support for Python 2 or Python 3.3-3.4 please use version 19.1 -If you need support for Python 3.5 please use version 0.28.0 - ''' - ) diff --git a/src/zeroconf/_cache.pxd b/src/zeroconf/_cache.pxd index af27a1d51..05a40c0f3 100644 --- a/src/zeroconf/_cache.pxd +++ b/src/zeroconf/_cache.pxd @@ -11,11 +11,17 @@ from ._dns cimport ( DNSText, ) +cdef object heappop +cdef object heappush +cdef object heapify cdef object _UNIQUE_RECORD_TYPES -cdef object _TYPE_PTR +cdef unsigned int _TYPE_PTR cdef cython.uint _ONE_SECOND +cdef unsigned int _MIN_SCHEDULED_RECORD_EXPIRATION + +@cython.locals(record_cache=dict) cdef _remove_key(cython.dict cache, object key, DNSRecord record) @@ -23,51 +29,59 @@ cdef class DNSCache: cdef public cython.dict cache cdef public cython.dict service_cache + cdef public list _expire_heap + cdef public dict _expirations cpdef bint async_add_records(self, object entries) cpdef void async_remove_records(self, object entries) - @cython.locals( - store=cython.dict, - ) + @cython.locals(store=cython.dict) cpdef DNSRecord async_get_unique(self, DNSRecord entry) - @cython.locals( - record=DNSRecord, - ) + @cython.locals(record=DNSRecord, when_record=tuple, when=double) cpdef list async_expire(self, double now) - @cython.locals( - records=cython.dict, - record=DNSRecord, - ) - cpdef list async_all_by_details(self, str name, object type_, object class_) + @cython.locals(records=cython.dict, record=DNSRecord) + cpdef list async_all_by_details(self, str name, unsigned int type_, unsigned int class_) - cpdef cython.dict async_entries_with_name(self, str name) + cpdef list async_entries_with_name(self, str name) - cpdef cython.dict async_entries_with_server(self, str name) + cpdef list async_entries_with_server(self, str name) - @cython.locals( - cached_entry=DNSRecord, - ) - cpdef DNSRecord get_by_details(self, str name, object type_, object class_) + @cython.locals(cached_entry=DNSRecord, records=dict) + cpdef DNSRecord get_by_details(self, str name, unsigned int type_, unsigned int class_) - @cython.locals( - records=cython.dict, - entry=DNSRecord, - ) - cpdef cython.list get_all_by_details(self, str name, object type_, object class_) + @cython.locals(records=cython.dict, entry=DNSRecord) + cpdef cython.list get_all_by_details(self, str name, unsigned int type_, unsigned int class_) @cython.locals( store=cython.dict, + service_store=cython.dict, + service_record=DNSService, + when=object, + new=bint ) cdef bint _async_add(self, DNSRecord record) + @cython.locals(service_record=DNSService) cdef void _async_remove(self, DNSRecord record) - @cython.locals( - record=DNSRecord, - created_double=double, - ) + @cython.locals(record=DNSRecord, created_double=double) cpdef void async_mark_unique_records_older_than_1s_to_expire(self, cython.set unique_types, object answers, double now) + + @cython.locals(entries=dict) + cpdef list entries_with_name(self, str name) + + @cython.locals(entries=dict) + cpdef list entries_with_server(self, str server) + + @cython.locals(record=DNSRecord, now=double) + cpdef current_entry_with_name_and_alias(self, str name, str alias) + + cpdef void _async_set_created_ttl( + self, + DNSRecord record, + double now, + unsigned int ttl + ) diff --git a/src/zeroconf/_cache.py b/src/zeroconf/_cache.py index 35a13cf64..c7ca8472b 100644 --- a/src/zeroconf/_cache.py +++ b/src/zeroconf/_cache.py @@ -1,26 +1,30 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ -from typing import Dict, Iterable, List, Optional, Set, Tuple, Union, cast +from __future__ import annotations + +from collections.abc import Iterable +from heapq import heapify, heappop, heappush +from typing import Union, cast from ._dns import ( DNSAddress, @@ -37,20 +41,26 @@ _UNIQUE_RECORD_TYPES = (DNSAddress, DNSHinfo, DNSPointer, DNSText, DNSService) _UniqueRecordsType = Union[DNSAddress, DNSHinfo, DNSPointer, DNSText, DNSService] -_DNSRecordCacheType = Dict[str, Dict[DNSRecord, DNSRecord]] +_DNSRecordCacheType = dict[str, dict[DNSRecord, DNSRecord]] _DNSRecord = DNSRecord _str = str _float = float _int = int +# The minimum number of scheduled record expirations before we start cleaning up +# the expiration heap. This is a performance optimization to avoid cleaning up the +# heap too often when there are only a few scheduled expirations. +_MIN_SCHEDULED_RECORD_EXPIRATION = 100 + def _remove_key(cache: _DNSRecordCacheType, key: _str, record: _DNSRecord) -> None: """Remove a key from a DNSRecord cache This function must be run in from event loop. """ - del cache[key][record] - if not cache[key]: + record_cache = cache[key] + del record_cache[record] + if not record_cache: del cache[key] @@ -59,6 +69,8 @@ class DNSCache: def __init__(self) -> None: self.cache: _DNSRecordCacheType = {} + self._expire_heap: list[tuple[float, DNSRecord]] = [] + self._expirations: dict[DNSRecord, float] = {} self.service_cache: _DNSRecordCacheType = {} # Functions prefixed with async_ are NOT threadsafe and must @@ -77,11 +89,21 @@ def _async_add(self, record: _DNSRecord) -> bool: # replaces any existing records that are __eq__ to each other which # removes the risk that accessing the cache from the wrong # direction would return the old incorrect entry. - store = self.cache.setdefault(record.key, {}) + if (store := self.cache.get(record.key)) is None: + store = self.cache[record.key] = {} new = record not in store and not isinstance(record, DNSNsec) store[record] = record + when = record.created + (record.ttl * 1000) + if self._expirations.get(record) != when: + # Avoid adding duplicates to the heap + heappush(self._expire_heap, (when, record)) + self._expirations[record] = when + if isinstance(record, DNSService): - self.service_cache.setdefault(record.server_key, {})[record] = record + service_record = record + if (service_store := self.service_cache.get(service_record.server_key)) is None: + service_store = self.service_cache[service_record.server_key] = {} + service_store[service_record] = service_record return new def async_add_records(self, entries: Iterable[DNSRecord]) -> bool: @@ -103,8 +125,10 @@ def _async_remove(self, record: _DNSRecord) -> None: This function must be run in from event loop. """ if isinstance(record, DNSService): - _remove_key(self.service_cache, record.server_key, record) + service_record = record + _remove_key(self.service_cache, service_record.server_key, service_record) _remove_key(self.cache, record.key, record) + self._expirations.pop(record, None) def async_remove_records(self, entries: Iterable[DNSRecord]) -> None: """Remove multiple records. @@ -114,16 +138,54 @@ def async_remove_records(self, entries: Iterable[DNSRecord]) -> None: for entry in entries: self._async_remove(entry) - def async_expire(self, now: _float) -> List[DNSRecord]: + def async_expire(self, now: _float) -> list[DNSRecord]: """Purge expired entries from the cache. This function must be run in from event loop. + + :param now: The current time in milliseconds. """ - expired = [record for records in self.cache.values() for record in records if record.is_expired(now)] + if not (expire_heap_len := len(self._expire_heap)): + return [] + + expired: list[DNSRecord] = [] + # Find any expired records and add them to the to-delete list + while self._expire_heap: + when_record = self._expire_heap[0] + when = when_record[0] + if when > now: + break + heappop(self._expire_heap) + # Check if the record hasn't been re-added to the heap + # with a different expiration time as it will be removed + # later when it reaches the top of the heap and its + # expiration time is met. + record = when_record[1] + if self._expirations.get(record) == when: + expired.append(record) + + # If the expiration heap grows larger than the number expirations + # times two, we clean it up to avoid keeping expired entries in + # the heap and consuming memory. We guard this with a minimum + # threshold to avoid cleaning up the heap too often when there are + # only a few scheduled expirations. + if ( + expire_heap_len > _MIN_SCHEDULED_RECORD_EXPIRATION + and expire_heap_len > len(self._expirations) * 2 + ): + # Remove any expired entries from the expiration heap + # that do not match the expiration time in the expirations + # as it means the record has been re-added to the heap + # with a different expiration time. + self._expire_heap = [ + entry for entry in self._expire_heap if self._expirations.get(entry[1]) == entry[0] + ] + heapify(self._expire_heap) + self.async_remove_records(expired) return expired - def async_get_unique(self, entry: _UniqueRecordsType) -> Optional[DNSRecord]: + def async_get_unique(self, entry: _UniqueRecordsType) -> DNSRecord | None: """Gets a unique entry by key. Will return None if there is no matching entry. @@ -135,7 +197,7 @@ def async_get_unique(self, entry: _UniqueRecordsType) -> Optional[DNSRecord]: return None return store.get(entry) - def async_all_by_details(self, name: _str, type_: _int, class_: _int) -> List[DNSRecord]: + def async_all_by_details(self, name: _str, type_: _int, class_: _int) -> list[DNSRecord]: """Gets all matching entries by details. This function is not thread-safe and must be called from @@ -143,45 +205,45 @@ def async_all_by_details(self, name: _str, type_: _int, class_: _int) -> List[DN """ key = name.lower() records = self.cache.get(key) - matches: List[DNSRecord] = [] + matches: list[DNSRecord] = [] if records is None: return matches - for record in records: + for record in records.values(): if type_ == record.type and class_ == record.class_: matches.append(record) return matches - def async_entries_with_name(self, name: str) -> Dict[DNSRecord, DNSRecord]: + def async_entries_with_name(self, name: str) -> list[DNSRecord]: """Returns a dict of entries whose key matches the name. This function is not threadsafe and must be called from the event loop. """ - return self.cache.get(name.lower()) or {} + return self.entries_with_name(name) - def async_entries_with_server(self, name: str) -> Dict[DNSRecord, DNSRecord]: + def async_entries_with_server(self, name: str) -> list[DNSRecord]: """Returns a dict of entries whose key matches the server. This function is not threadsafe and must be called from the event loop. """ - return self.service_cache.get(name.lower()) or {} + return self.entries_with_server(name) # The below functions are threadsafe and do not need to be run in the # event loop, however they all make copies so they significantly - # inefficent + # inefficient. - def get(self, entry: DNSEntry) -> Optional[DNSRecord]: + def get(self, entry: DNSEntry) -> DNSRecord | None: """Gets an entry by key. Will return None if there is no matching entry.""" if isinstance(entry, _UNIQUE_RECORD_TYPES): return self.cache.get(entry.key, {}).get(entry) - for cached_entry in reversed(list(self.cache.get(entry.key, []))): + for cached_entry in reversed(list(self.cache.get(entry.key, {}).values())): if entry.__eq__(cached_entry): return cached_entry return None - def get_by_details(self, name: str, type_: _int, class_: _int) -> Optional[DNSRecord]: + def get_by_details(self, name: str, type_: _int, class_: _int) -> DNSRecord | None: """Gets the first matching entry by details. Returns None if no entries match. Calling this function is not recommended as it will only @@ -197,28 +259,32 @@ def get_by_details(self, name: str, type_: _int, class_: _int) -> Optional[DNSRe records = self.cache.get(key) if records is None: return None - for cached_entry in reversed(list(records)): + for cached_entry in reversed(list(records.values())): if type_ == cached_entry.type and class_ == cached_entry.class_: return cached_entry return None - def get_all_by_details(self, name: str, type_: _int, class_: _int) -> List[DNSRecord]: + def get_all_by_details(self, name: str, type_: _int, class_: _int) -> list[DNSRecord]: """Gets all matching entries by details.""" key = name.lower() records = self.cache.get(key) if records is None: return [] - return [entry for entry in list(records) if type_ == entry.type and class_ == entry.class_] + return [entry for entry in list(records.values()) if type_ == entry.type and class_ == entry.class_] - def entries_with_server(self, server: str) -> List[DNSRecord]: + def entries_with_server(self, server: str) -> list[DNSRecord]: """Returns a list of entries whose server matches the name.""" - return list(self.service_cache.get(server.lower(), [])) + if entries := self.service_cache.get(server.lower()): + return list(entries.values()) + return [] - def entries_with_name(self, name: str) -> List[DNSRecord]: + def entries_with_name(self, name: str) -> list[DNSRecord]: """Returns a list of entries whose key matches the name.""" - return list(self.cache.get(name.lower(), [])) + if entries := self.cache.get(name.lower()): + return list(entries.values()) + return [] - def current_entry_with_name_and_alias(self, name: str, alias: str) -> Optional[DNSRecord]: + def current_entry_with_name_and_alias(self, name: str, alias: str) -> DNSRecord | None: now = current_time_millis() for record in reversed(self.entries_with_name(name)): if ( @@ -229,12 +295,15 @@ def current_entry_with_name_and_alias(self, name: str, alias: str) -> Optional[D return record return None - def names(self) -> List[str]: + def names(self) -> list[str]: """Return a copy of the list of current cache names.""" return list(self.cache) def async_mark_unique_records_older_than_1s_to_expire( - self, unique_types: Set[Tuple[_str, _int, _int]], answers: Iterable[DNSRecord], now: _float + self, + unique_types: set[tuple[_str, _int, _int]], + answers: Iterable[DNSRecord], + now: _float, ) -> None: # rfc6762#section-10.2 para 2 # Since unique is set, all old records with that name, rrtype, @@ -246,4 +315,11 @@ def async_mark_unique_records_older_than_1s_to_expire( created_double = record.created if (now - created_double > _ONE_SECOND) and record not in answers_rrset: # Expire in 1s - record.set_created_ttl(now, 1) + self._async_set_created_ttl(record, now, 1) + + def _async_set_created_ttl(self, record: DNSRecord, now: _float, ttl: _int) -> None: + """Set the created time and ttl of a record.""" + # It would be better if we made a copy instead of mutating the record + # in place, but records currently don't have a copy method. + record._set_created_ttl(now, ttl) + self._async_add(record) diff --git a/src/zeroconf/_core.py b/src/zeroconf/_core.py index cb488b4e8..5e3a7f465 100644 --- a/src/zeroconf/_core.py +++ b/src/zeroconf/_core.py @@ -1,31 +1,33 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ +from __future__ import annotations + import asyncio import logging import sys import threading +from collections.abc import Awaitable from types import TracebackType -from typing import Awaitable, Dict, List, Optional, Set, Tuple, Type, Union from ._cache import DNSCache from ._dns import DNSQuestion, DNSQuestionType @@ -53,8 +55,8 @@ get_running_loop, run_coro_with_timeout, shutdown_loop, - wait_event_or_timeout, wait_for_future_set_or_timeout, + wait_future_or_timeout, ) from ._utils.name import service_type_name from ._utils.net import ( @@ -84,10 +86,10 @@ _UNREGISTER_TIME, ) -# The maximum amont of time to delay a multicast +# The maximum amount of time to delay a multicast # response in order to aggregate answers _AGGREGATION_DELAY = 500 # ms -# The maximum amont of time to delay a multicast +# The maximum amount of time to delay a multicast # response in order to aggregate answers after # it has already been delayed to protect the network # from excessive traffic. We use a shorter time @@ -108,9 +110,9 @@ def async_send_with_transport( packet: bytes, packet_num: int, out: DNSOutgoing, - addr: Optional[str], + addr: str | None, port: int, - v6_flow_scope: Union[Tuple[()], Tuple[int, int]] = (), + v6_flow_scope: tuple[()] | tuple[int, int] = (), ) -> None: ipv6_socket = transport.is_ipv6 if addr is None: @@ -121,7 +123,7 @@ def async_send_with_transport( return if log_debug: log.debug( - 'Sending to (%s, %d) via [socket %s (%s)] (%d bytes #%d) %r as %r...', + "Sending to (%s, %d) via [socket %s (%s)] (%d bytes #%d) %r as %r...", real_addr, port or _MDNS_PORT, transport.fileno, @@ -140,7 +142,6 @@ def async_send_with_transport( class Zeroconf(QuietLogger): - """Implementation of Zeroconf Multicast DNS Service Discovery Supports registration, unregistration, queries and browsing. @@ -150,7 +151,7 @@ def __init__( self, interfaces: InterfacesType = InterfaceChoice.All, unicast: bool = False, - ip_version: Optional[IPVersion] = None, + ip_version: IPVersion | None = None, apple_p2p: bool = False, ) -> None: """Creates an instance of the Zeroconf class, establishing @@ -173,16 +174,16 @@ def __init__( self.done = False - if apple_p2p and sys.platform != 'darwin': - raise RuntimeError('Option `apple_p2p` is not supported on non-Apple platforms.') + if apple_p2p and sys.platform != "darwin": + raise RuntimeError("Option `apple_p2p` is not supported on non-Apple platforms.") self.unicast = unicast listen_socket, respond_sockets = create_sockets(interfaces, unicast, ip_version, apple_p2p=apple_p2p) - log.debug('Listen socket %s, respond sockets %s', listen_socket, respond_sockets) + log.debug("Listen socket %s, respond sockets %s", listen_socket, respond_sockets) self.engine = AsyncEngine(self, listen_socket, respond_sockets) - self.browsers: Dict[ServiceListener, ServiceBrowser] = {} + self.browsers: dict[ServiceListener, ServiceBrowser] = {} self.registry = ServiceRegistry() self.cache = DNSCache() self.question_history = QuestionHistory() @@ -193,16 +194,24 @@ def __init__( self.query_handler = QueryHandler(self) self.record_manager = RecordManager(self) - self._notify_futures: Set[asyncio.Future] = set() - self.loop: Optional[asyncio.AbstractEventLoop] = None - self._loop_thread: Optional[threading.Thread] = None + self._notify_futures: set[asyncio.Future] = set() + self.loop: asyncio.AbstractEventLoop | None = None + self._loop_thread: threading.Thread | None = None self.start() @property def started(self) -> bool: """Check if the instance has started.""" - return bool(not self.done and self.engine.running_event and self.engine.running_event.is_set()) + running_future = self.engine.running_future + return bool( + not self.done + and running_future + and running_future.done() + and not running_future.cancelled() + and not running_future.exception() + and running_future.result() + ) def start(self) -> None: """Start Zeroconf.""" @@ -226,7 +235,7 @@ def _run_loop() -> None: self._loop_thread.start() loop_thread_ready.wait() - async def async_wait_for_start(self) -> None: + async def async_wait_for_start(self, timeout: float = _STARTUP_TIMEOUT) -> None: """Wait for start up for actions that require a running Zeroconf instance. Throws NotRunningException if the instance is not running or could @@ -234,13 +243,13 @@ async def async_wait_for_start(self) -> None: """ if self.done: # If the instance was shutdown from under us, raise immediately raise NotRunningException - assert self.engine.running_event is not None - await wait_event_or_timeout(self.engine.running_event, timeout=_STARTUP_TIMEOUT) - if not self.engine.running_event.is_set() or self.done: + assert self.engine.running_future is not None + await wait_future_or_timeout(self.engine.running_future, timeout=timeout) + if not self.started: raise NotRunningException @property - def listeners(self) -> Set[RecordUpdateListener]: + def listeners(self) -> set[RecordUpdateListener]: return self.record_manager.listeners async def async_wait(self, timeout: float) -> None: @@ -261,8 +270,12 @@ def async_notify_all(self) -> None: _resolve_all_futures_to_none(notify_futures) def get_service_info( - self, type_: str, name: str, timeout: int = 3000, question_type: Optional[DNSQuestionType] = None - ) -> Optional[ServiceInfo]: + self, + type_: str, + name: str, + timeout: int = 3000, + question_type: DNSQuestionType | None = None, + ) -> ServiceInfo | None: """Returns network's service information for a particular name and type, or None if no service matches by the timeout, which defaults to 3 seconds. @@ -298,7 +311,7 @@ def remove_all_service_listeners(self) -> None: def register_service( self, info: ServiceInfo, - ttl: Optional[int] = None, + ttl: int | None = None, allow_name_change: bool = False, cooperating_responders: bool = False, strict: bool = True, @@ -326,7 +339,7 @@ def register_service( async def async_register_service( self, info: ServiceInfo, - ttl: Optional[int] = None, + ttl: int | None = None, allow_name_change: bool = False, cooperating_responders: bool = False, strict: bool = True, @@ -360,7 +373,9 @@ def update_service(self, info: ServiceInfo) -> None: """ assert self.loop is not None run_coro_with_timeout( - await_awaitable(self.async_update_service(info)), self.loop, _REGISTER_TIME * _REGISTER_BROADCASTS + await_awaitable(self.async_update_service(info)), + self.loop, + _REGISTER_TIME * _REGISTER_BROADCASTS, ) async def async_update_service(self, info: ServiceInfo) -> Awaitable: @@ -371,8 +386,12 @@ async def async_update_service(self, info: ServiceInfo) -> Awaitable: return asyncio.ensure_future(self._async_broadcast_service(info, _REGISTER_TIME, None)) async def async_get_service_info( - self, type_: str, name: str, timeout: int = 3000, question_type: Optional[DNSQuestionType] = None - ) -> Optional[AsyncServiceInfo]: + self, + type_: str, + name: str, + timeout: int = 3000, + question_type: DNSQuestionType | None = None, + ) -> AsyncServiceInfo | None: """Returns network's service information for a particular name and type, or None if no service matches by the timeout, which defaults to 3 seconds. @@ -391,7 +410,7 @@ async def _async_broadcast_service( self, info: ServiceInfo, interval: int, - ttl: Optional[int], + ttl: int | None, broadcast_addresses: bool = True, ) -> None: """Send a broadcasts to announce a service at intervals.""" @@ -403,7 +422,7 @@ async def _async_broadcast_service( def generate_service_broadcast( self, info: ServiceInfo, - ttl: Optional[int], + ttl: int | None, broadcast_addresses: bool = True, ) -> DNSOutgoing: """Generate a broadcast to announce a service.""" @@ -430,7 +449,7 @@ def _add_broadcast_answer( # pylint: disable=no-self-use self, out: DNSOutgoing, info: ServiceInfo, - override_ttl: Optional[int], + override_ttl: int | None, broadcast_addresses: bool = True, ) -> None: """Add answers to broadcast a service.""" @@ -453,7 +472,9 @@ def unregister_service(self, info: ServiceInfo) -> None: """ assert self.loop is not None run_coro_with_timeout( - self.async_unregister_service(info), self.loop, _UNREGISTER_TIME * _REGISTER_BROADCASTS + self.async_unregister_service(info), + self.loop, + _UNREGISTER_TIME * _REGISTER_BROADCASTS, ) async def async_unregister_service(self, info: ServiceInfo) -> Awaitable: @@ -470,7 +491,7 @@ async def async_unregister_service(self, info: ServiceInfo) -> Awaitable: self._async_broadcast_service(info, _UNREGISTER_TIME, 0, broadcast_addresses) ) - def generate_unregister_all_services(self) -> Optional[DNSOutgoing]: + def generate_unregister_all_services(self) -> DNSOutgoing | None: """Generate a DNSOutgoing goodbye for all services and remove them from the registry.""" service_infos = self.registry.async_get_service_infos() if not service_infos: @@ -506,7 +527,9 @@ def unregister_all_services(self) -> None: """ assert self.loop is not None run_coro_with_timeout( - self.async_unregister_all_services(), self.loop, _UNREGISTER_TIME * _REGISTER_BROADCASTS + self.async_unregister_all_services(), + self.loop, + _UNREGISTER_TIME * _REGISTER_BROADCASTS, ) async def async_check_service( @@ -531,7 +554,7 @@ async def async_check_service( raise NonUniqueNameException # change the name and look for a conflict - info.name = f'{instance_name}-{next_instance_number}.{info.type}' + info.name = f"{instance_name}-{next_instance_number}.{info.type}" next_instance_number += 1 service_type_name(info.name, strict=strict) next_time = now @@ -547,7 +570,9 @@ async def async_check_service( next_time += _CHECK_TIME def add_listener( - self, listener: RecordUpdateListener, question: Optional[Union[DNSQuestion, List[DNSQuestion]]] + self, + listener: RecordUpdateListener, + question: DNSQuestion | list[DNSQuestion] | None, ) -> None: """Adds a listener for a given question. The listener will have its update_record method called when information is available to @@ -567,7 +592,9 @@ def remove_listener(self, listener: RecordUpdateListener) -> None: self.loop.call_soon_threadsafe(self.record_manager.async_remove_listener, listener) def async_add_listener( - self, listener: RecordUpdateListener, question: Optional[Union[DNSQuestion, List[DNSQuestion]]] + self, + listener: RecordUpdateListener, + question: DNSQuestion | list[DNSQuestion] | None, ) -> None: """Adds a listener for a given question. The listener will have its update_record method called when information is available to @@ -587,10 +614,10 @@ def async_remove_listener(self, listener: RecordUpdateListener) -> None: def send( self, out: DNSOutgoing, - addr: Optional[str] = None, + addr: str | None = None, port: int = _MDNS_PORT, - v6_flow_scope: Union[Tuple[()], Tuple[int, int]] = (), - transport: Optional[_WrappedTransport] = None, + v6_flow_scope: tuple[()] | tuple[int, int] = (), + transport: _WrappedTransport | None = None, ) -> None: """Sends an outgoing packet threadsafe.""" assert self.loop is not None @@ -599,10 +626,10 @@ def send( def async_send( self, out: DNSOutgoing, - addr: Optional[str] = None, + addr: str | None = None, port: int = _MDNS_PORT, - v6_flow_scope: Union[Tuple[()], Tuple[int, int]] = (), - transport: Optional[_WrappedTransport] = None, + v6_flow_scope: tuple[()] | tuple[int, int] = (), + transport: _WrappedTransport | None = None, ) -> None: """Sends an outgoing packet.""" if self.done: @@ -615,11 +642,23 @@ def async_send( for packet_num, packet in enumerate(out.packets()): if len(packet) > _MAX_MSG_ABSOLUTE: - self.log_warning_once("Dropping %r over-sized packet (%d bytes) %r", out, len(packet), packet) + self.log_warning_once( + "Dropping %r over-sized packet (%d bytes) %r", + out, + len(packet), + packet, + ) return for send_transport in transports: async_send_with_transport( - log_debug, send_transport, packet, packet_num, out, addr, port, v6_flow_scope + log_debug, + send_transport, + packet, + packet_num, + out, + addr, + port, + v6_flow_scope, ) def _close(self) -> None: @@ -672,14 +711,14 @@ async def _async_close(self) -> None: await self.engine._async_close() # pylint: disable=protected-access self._shutdown_threads() - def __enter__(self) -> 'Zeroconf': + def __enter__(self) -> Zeroconf: return self def __exit__( # pylint: disable=useless-return self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> Optional[bool]: + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> bool | None: self.close() return None diff --git a/src/zeroconf/_dns.pxd b/src/zeroconf/_dns.pxd index d4116a66a..7ef1dbec9 100644 --- a/src/zeroconf/_dns.pxd +++ b/src/zeroconf/_dns.pxd @@ -30,7 +30,7 @@ cdef class DNSEntry: cdef public cython.uint class_ cdef public bint unique - cdef _set_class(self, cython.uint class_) + cdef _fast_init_entry(self, str name, cython.uint type_, cython.uint class_) cdef bint _dns_entry_matches(self, DNSEntry other) @@ -38,13 +38,17 @@ cdef class DNSQuestion(DNSEntry): cdef public cython.int _hash + cdef _fast_init(self, str name, cython.uint type_, cython.uint class_) + cpdef bint answered_by(self, DNSRecord rec) cdef class DNSRecord(DNSEntry): - cdef public cython.float ttl + cdef public unsigned int ttl cdef public double created + cdef _fast_init_record(self, str name, cython.uint type_, cython.uint class_, unsigned int ttl, double created) + cdef bint _suppressed_by_answer(self, DNSRecord answer) @cython.locals( @@ -62,16 +66,16 @@ cdef class DNSRecord(DNSEntry): cpdef bint is_recent(self, double now) - cpdef reset_ttl(self, DNSRecord other) - - cpdef set_created_ttl(self, double now, cython.float ttl) + cdef _set_created_ttl(self, double now, unsigned int ttl) cdef class DNSAddress(DNSRecord): cdef public cython.int _hash - cdef public object address + cdef public bytes address cdef public object scope_id + cdef _fast_init(self, str name, cython.uint type_, cython.uint class_, unsigned int ttl, bytes address, object scope_id, double created) + cdef bint _eq(self, DNSAddress other) cpdef write(self, DNSOutgoing out) @@ -83,6 +87,8 @@ cdef class DNSHinfo(DNSRecord): cdef public str cpu cdef public str os + cdef _fast_init(self, str name, cython.uint type_, cython.uint class_, unsigned int ttl, str cpu, str os, double created) + cdef bint _eq(self, DNSHinfo other) cpdef write(self, DNSOutgoing out) @@ -93,6 +99,8 @@ cdef class DNSPointer(DNSRecord): cdef public str alias cdef public str alias_key + cdef _fast_init(self, str name, cython.uint type_, cython.uint class_, unsigned int ttl, str alias, double created) + cdef bint _eq(self, DNSPointer other) cpdef write(self, DNSOutgoing out) @@ -102,6 +110,8 @@ cdef class DNSText(DNSRecord): cdef public cython.int _hash cdef public bytes text + cdef _fast_init(self, str name, cython.uint type_, cython.uint class_, unsigned int ttl, bytes text, double created) + cdef bint _eq(self, DNSText other) cpdef write(self, DNSOutgoing out) @@ -115,6 +125,8 @@ cdef class DNSService(DNSRecord): cdef public str server cdef public str server_key + cdef _fast_init(self, str name, cython.uint type_, cython.uint class_, unsigned int ttl, cython.uint priority, cython.uint weight, cython.uint port, str server, double created) + cdef bint _eq(self, DNSService other) cpdef write(self, DNSOutgoing out) @@ -122,9 +134,11 @@ cdef class DNSService(DNSRecord): cdef class DNSNsec(DNSRecord): cdef public cython.int _hash - cdef public object next_name + cdef public str next_name cdef public cython.list rdtypes + cdef _fast_init(self, str name, cython.uint type_, cython.uint class_, unsigned int ttl, str next_name, cython.list rdtypes, double created) + cdef bint _eq(self, DNSNsec other) cpdef write(self, DNSOutgoing out) diff --git a/src/zeroconf/_dns.py b/src/zeroconf/_dns.py index 66fb5b86d..60df14b11 100644 --- a/src/zeroconf/_dns.py +++ b/src/zeroconf/_dns.py @@ -1,28 +1,30 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ +from __future__ import annotations + import enum import socket -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Union, cast +from typing import TYPE_CHECKING, Any, cast from ._exceptions import AbstractMethodException from ._utils.net import _is_v6_address @@ -62,22 +64,22 @@ class DNSQuestionType(enum.Enum): class DNSEntry: - """A DNS entry""" - __slots__ = ('key', 'name', 'type', 'class_', 'unique') + __slots__ = ("class_", "key", "name", "type", "unique") def __init__(self, name: str, type_: int, class_: int) -> None: + self._fast_init_entry(name, type_, class_) + + def _fast_init_entry(self, name: str, type_: _int, class_: _int) -> None: + """Fast init for reuse.""" self.name = name self.key = name.lower() self.type = type_ - self._set_class(class_) - - def _set_class(self, class_: _int) -> None: self.class_ = class_ & _CLASS_MASK self.unique = (class_ & _CLASS_UNIQUE) != 0 - def _dns_entry_matches(self, other) -> bool: # type: ignore[no-untyped-def] + def _dns_entry_matches(self, other: DNSEntry) -> bool: return self.key == other.key and self.type == other.type and self.class_ == other.class_ def __eq__(self, other: Any) -> bool: @@ -94,7 +96,7 @@ def get_type(t: int) -> str: """Type accessor""" return _TYPES.get(t, f"?({t})") - def entry_to_string(self, hdr: str, other: Optional[Union[bytes, str]]) -> str: + def entry_to_string(self, hdr: str, other: bytes | str | None) -> str: """String representation with additional information""" return "{}[{},{}{},{}]{}".format( hdr, @@ -102,21 +104,24 @@ def entry_to_string(self, hdr: str, other: Optional[Union[bytes, str]]) -> str: self.get_class_(self.class_), "-unique" if self.unique else "", self.name, - "=%s" % cast(Any, other) if other is not None else "", + f"={cast(Any, other)}" if other is not None else "", ) class DNSQuestion(DNSEntry): - """A DNS question entry""" - __slots__ = ('_hash',) + __slots__ = ("_hash",) def __init__(self, name: str, type_: int, class_: int) -> None: - super().__init__(name, type_, class_) + self._fast_init(name, type_, class_) + + def _fast_init(self, name: str, type_: _int, class_: _int) -> None: + """Fast init for reuse.""" + self._fast_init_entry(name, type_, class_) self._hash = hash((self.key, type_, self.class_)) - def answered_by(self, rec: 'DNSRecord') -> bool: + def answered_by(self, rec: DNSRecord) -> bool: """Returns true if the question is answered by the record""" return self.class_ == rec.class_ and self.type in (rec.type, _TYPE_ANY) and self.name == rec.name @@ -130,7 +135,7 @@ def __eq__(self, other: Any) -> bool: @property def max_size(self) -> int: """Maximum size of the question in the packet.""" - return len(self.name.encode('utf-8')) + _LEN_BYTE + _LEN_SHORT + _LEN_SHORT # type # class + return len(self.name.encode("utf-8")) + _LEN_BYTE + _LEN_SHORT + _LEN_SHORT @property def unicast(self) -> bool: @@ -157,24 +162,34 @@ def __repr__(self) -> str: class DNSRecord(DNSEntry): - """A DNS record - like a DNS entry, but has a TTL""" - __slots__ = ('ttl', 'created') + __slots__ = ("created", "ttl") - # TODO: Switch to just int ttl def __init__( - self, name: str, type_: int, class_: int, ttl: Union[float, int], created: Optional[float] = None + self, + name: str, + type_: int, + class_: int, + ttl: _int, + created: float | None = None, ) -> None: - super().__init__(name, type_, class_) + self._fast_init_record(name, type_, class_, ttl, created or current_time_millis()) + + def _fast_init_record(self, name: str, type_: _int, class_: _int, ttl: _int, created: _float) -> None: + """Fast init for reuse.""" + self._fast_init_entry(name, type_, class_) self.ttl = ttl - self.created = created or current_time_millis() + self.created = created def __eq__(self, other: Any) -> bool: # pylint: disable=no-self-use """Abstract method""" raise AbstractMethodException - def suppressed_by(self, msg: 'DNSIncoming') -> bool: + def __lt__(self, other: DNSRecord) -> bool: + return self.ttl < other.ttl + + def suppressed_by(self, msg: DNSIncoming) -> bool: """Returns true if any answer in a message can suffice for the information held in this record.""" answers = msg.answers() @@ -183,7 +198,7 @@ def suppressed_by(self, msg: 'DNSIncoming') -> bool: return True return False - def _suppressed_by_answer(self, other) -> bool: # type: ignore[no-untyped-def] + def _suppressed_by_answer(self, other: DNSRecord) -> bool: """Returns true if another record has same name, type and class, and if its TTL is at least half of this record's.""" return self == other and other.ttl > (self.ttl / 2) @@ -194,7 +209,7 @@ def get_expiration_time(self, percent: _int) -> float: return self.created + (percent * self.ttl * 10) # TODO: Switch to just int here - def get_remaining_ttl(self, now: _float) -> Union[int, float]: + def get_remaining_ttl(self, now: _float) -> int | float: """Returns the remaining TTL in seconds.""" remain = (self.created + (_EXPIRE_FULL_TIME_MS * self.ttl) - now) / 1000.0 return 0 if remain < 0 else remain @@ -211,31 +226,27 @@ def is_recent(self, now: _float) -> bool: """Returns true if the record more than one quarter of its TTL remaining.""" return self.created + (_RECENT_TIME_MS * self.ttl) > now - def reset_ttl(self, other) -> None: # type: ignore[no-untyped-def] - """Sets this record's TTL and created time to that of - another record.""" - self.set_created_ttl(other.created, other.ttl) - - def set_created_ttl(self, created: _float, ttl: Union[float, int]) -> None: + def _set_created_ttl(self, created: _float, ttl: _int) -> None: """Set the created and ttl of a record.""" + # It would be better if we made a copy instead of mutating the record + # in place, but records currently don't have a copy method. self.created = created self.ttl = ttl - def write(self, out: 'DNSOutgoing') -> None: # pylint: disable=no-self-use + def write(self, out: DNSOutgoing) -> None: # pylint: disable=no-self-use """Abstract method""" raise AbstractMethodException - def to_string(self, other: Union[bytes, str]) -> str: + def to_string(self, other: bytes | str) -> str: """String representation with additional information""" arg = f"{self.ttl}/{int(self.get_remaining_ttl(current_time_millis()))},{cast(Any, other)}" return DNSEntry.entry_to_string(self, "record", arg) class DNSAddress(DNSRecord): - """A DNS address record""" - __slots__ = ('_hash', 'address', 'scope_id') + __slots__ = ("_hash", "address", "scope_id") def __init__( self, @@ -244,15 +255,28 @@ def __init__( class_: int, ttl: int, address: bytes, - scope_id: Optional[int] = None, - created: Optional[float] = None, + scope_id: int | None = None, + created: float | None = None, + ) -> None: + self._fast_init(name, type_, class_, ttl, address, scope_id, created or current_time_millis()) + + def _fast_init( + self, + name: str, + type_: _int, + class_: _int, + ttl: _int, + address: bytes, + scope_id: _int | None, + created: _float, ) -> None: - super().__init__(name, type_, class_, ttl, created) + """Fast init for reuse.""" + self._fast_init_record(name, type_, class_, ttl, created) self.address = address self.scope_id = scope_id self._hash = hash((self.key, type_, self.class_, address, scope_id)) - def write(self, out: 'DNSOutgoing') -> None: + def write(self, out: DNSOutgoing) -> None: """Used in constructing an outgoing packet""" out.write_string(self.address) @@ -260,7 +284,7 @@ def __eq__(self, other: Any) -> bool: """Tests equality on address""" return isinstance(other, DNSAddress) and self._eq(other) - def _eq(self, other) -> bool: # type: ignore[no-untyped-def] + def _eq(self, other: DNSAddress) -> bool: return ( self.address == other.address and self.scope_id == other.scope_id @@ -276,7 +300,8 @@ def __repr__(self) -> str: try: return self.to_string( socket.inet_ntop( - socket.AF_INET6 if _is_v6_address(self.address) else socket.AF_INET, self.address + socket.AF_INET6 if _is_v6_address(self.address) else socket.AF_INET, + self.address, ) ) except (ValueError, OSError): @@ -284,29 +309,41 @@ def __repr__(self) -> str: class DNSHinfo(DNSRecord): - """A DNS host information record""" - __slots__ = ('_hash', 'cpu', 'os') + __slots__ = ("_hash", "cpu", "os") def __init__( - self, name: str, type_: int, class_: int, ttl: int, cpu: str, os: str, created: Optional[float] = None + self, + name: str, + type_: int, + class_: int, + ttl: int, + cpu: str, + os: str, + created: float | None = None, ) -> None: - super().__init__(name, type_, class_, ttl, created) + self._fast_init(name, type_, class_, ttl, cpu, os, created or current_time_millis()) + + def _fast_init( + self, name: str, type_: _int, class_: _int, ttl: _int, cpu: str, os: str, created: _float + ) -> None: + """Fast init for reuse.""" + self._fast_init_record(name, type_, class_, ttl, created) self.cpu = cpu self.os = os self._hash = hash((self.key, type_, self.class_, cpu, os)) - def write(self, out: 'DNSOutgoing') -> None: + def write(self, out: DNSOutgoing) -> None: """Used in constructing an outgoing packet""" - out.write_character_string(self.cpu.encode('utf-8')) - out.write_character_string(self.os.encode('utf-8')) + out.write_character_string(self.cpu.encode("utf-8")) + out.write_character_string(self.os.encode("utf-8")) def __eq__(self, other: Any) -> bool: """Tests equality on cpu and os.""" return isinstance(other, DNSHinfo) and self._eq(other) - def _eq(self, other) -> bool: # type: ignore[no-untyped-def] + def _eq(self, other: DNSHinfo) -> bool: """Tests equality on cpu and os.""" return self.cpu == other.cpu and self.os == other.os and self._dns_entry_matches(other) @@ -320,15 +357,25 @@ def __repr__(self) -> str: class DNSPointer(DNSRecord): - """A DNS pointer record""" - __slots__ = ('_hash', 'alias', 'alias_key') + __slots__ = ("_hash", "alias", "alias_key") def __init__( - self, name: str, type_: int, class_: int, ttl: int, alias: str, created: Optional[float] = None + self, + name: str, + type_: int, + class_: int, + ttl: int, + alias: str, + created: float | None = None, + ) -> None: + self._fast_init(name, type_, class_, ttl, alias, created or current_time_millis()) + + def _fast_init( + self, name: str, type_: _int, class_: _int, ttl: _int, alias: str, created: _float ) -> None: - super().__init__(name, type_, class_, ttl, created) + self._fast_init_record(name, type_, class_, ttl, created) self.alias = alias self.alias_key = alias.lower() self._hash = hash((self.key, type_, self.class_, self.alias_key)) @@ -343,7 +390,7 @@ def max_size_compressed(self) -> int: + _NAME_COMPRESSION_MIN_SIZE ) - def write(self, out: 'DNSOutgoing') -> None: + def write(self, out: DNSOutgoing) -> None: """Used in constructing an outgoing packet""" out.write_name(self.alias) @@ -351,7 +398,7 @@ def __eq__(self, other: Any) -> bool: """Tests equality on alias.""" return isinstance(other, DNSPointer) and self._eq(other) - def _eq(self, other) -> bool: # type: ignore[no-untyped-def] + def _eq(self, other: DNSPointer) -> bool: """Tests equality on alias.""" return self.alias_key == other.alias_key and self._dns_entry_matches(other) @@ -365,19 +412,29 @@ def __repr__(self) -> str: class DNSText(DNSRecord): - """A DNS text record""" - __slots__ = ('_hash', 'text') + __slots__ = ("_hash", "text") def __init__( - self, name: str, type_: int, class_: int, ttl: int, text: bytes, created: Optional[float] = None + self, + name: str, + type_: int, + class_: int, + ttl: int, + text: bytes, + created: float | None = None, + ) -> None: + self._fast_init(name, type_, class_, ttl, text, created or current_time_millis()) + + def _fast_init( + self, name: str, type_: _int, class_: _int, ttl: _int, text: bytes, created: _float ) -> None: - super().__init__(name, type_, class_, ttl, created) + self._fast_init_record(name, type_, class_, ttl, created) self.text = text self._hash = hash((self.key, type_, self.class_, text)) - def write(self, out: 'DNSOutgoing') -> None: + def write(self, out: DNSOutgoing) -> None: """Used in constructing an outgoing packet""" out.write_string(self.text) @@ -389,7 +446,7 @@ def __eq__(self, other: Any) -> bool: """Tests equality on text.""" return isinstance(other, DNSText) and self._eq(other) - def _eq(self, other) -> bool: # type: ignore[no-untyped-def] + def _eq(self, other: DNSText) -> bool: """Tests equality on text.""" return self.text == other.text and self._dns_entry_matches(other) @@ -401,24 +458,39 @@ def __repr__(self) -> str: class DNSService(DNSRecord): - """A DNS service record""" - __slots__ = ('_hash', 'priority', 'weight', 'port', 'server', 'server_key') + __slots__ = ("_hash", "port", "priority", "server", "server_key", "weight") def __init__( self, name: str, type_: int, class_: int, - ttl: Union[float, int], + ttl: int, priority: int, weight: int, port: int, server: str, - created: Optional[float] = None, + created: float | None = None, + ) -> None: + self._fast_init( + name, type_, class_, ttl, priority, weight, port, server, created or current_time_millis() + ) + + def _fast_init( + self, + name: str, + type_: _int, + class_: _int, + ttl: _int, + priority: _int, + weight: _int, + port: _int, + server: str, + created: _float, ) -> None: - super().__init__(name, type_, class_, ttl, created) + self._fast_init_record(name, type_, class_, ttl, created) self.priority = priority self.weight = weight self.port = port @@ -426,7 +498,7 @@ def __init__( self.server_key = server.lower() self._hash = hash((self.key, type_, self.class_, priority, weight, port, self.server_key)) - def write(self, out: 'DNSOutgoing') -> None: + def write(self, out: DNSOutgoing) -> None: """Used in constructing an outgoing packet""" out.write_short(self.priority) out.write_short(self.weight) @@ -437,7 +509,7 @@ def __eq__(self, other: Any) -> bool: """Tests equality on priority, weight, port and server""" return isinstance(other, DNSService) and self._eq(other) - def _eq(self, other) -> bool: # type: ignore[no-untyped-def] + def _eq(self, other: DNSService) -> bool: """Tests equality on priority, weight, port and server.""" return ( self.priority == other.priority @@ -457,29 +529,40 @@ def __repr__(self) -> str: class DNSNsec(DNSRecord): - """A DNS NSEC record""" - __slots__ = ('_hash', 'next_name', 'rdtypes') + __slots__ = ("_hash", "next_name", "rdtypes") def __init__( self, name: str, type_: int, class_: int, - ttl: int, + ttl: _int, + next_name: str, + rdtypes: list[int], + created: float | None = None, + ) -> None: + self._fast_init(name, type_, class_, ttl, next_name, rdtypes, created or current_time_millis()) + + def _fast_init( + self, + name: str, + type_: _int, + class_: _int, + ttl: _int, next_name: str, - rdtypes: List[int], - created: Optional[float] = None, + rdtypes: list[_int], + created: _float, ) -> None: - super().__init__(name, type_, class_, ttl, created) + self._fast_init_record(name, type_, class_, ttl, created) self.next_name = next_name self.rdtypes = sorted(rdtypes) self._hash = hash((self.key, type_, self.class_, next_name, *self.rdtypes)) - def write(self, out: 'DNSOutgoing') -> None: + def write(self, out: DNSOutgoing) -> None: """Used in constructing an outgoing packet.""" - bitmap = bytearray(b'\0' * 32) + bitmap = bytearray(b"\0" * 32) total_octets = 0 for rdtype in self.rdtypes: if rdtype > 255: # mDNS only supports window 0 @@ -501,7 +584,7 @@ def __eq__(self, other: Any) -> bool: """Tests equality on next_name and rdtypes.""" return isinstance(other, DNSNsec) and self._eq(other) - def _eq(self, other) -> bool: # type: ignore[no-untyped-def] + def _eq(self, other: DNSNsec) -> bool: """Tests equality on next_name and rdtypes.""" return ( self.next_name == other.next_name @@ -526,23 +609,23 @@ def __repr__(self) -> str: class DNSRRSet: """A set of dns records with a lookup to get the ttl.""" - __slots__ = ('_records', '_lookup') + __slots__ = ("_lookup", "_records") - def __init__(self, records: List[DNSRecord]) -> None: + def __init__(self, records: list[DNSRecord]) -> None: """Create an RRset from records sets.""" self._records = records - self._lookup: Optional[Dict[DNSRecord, DNSRecord]] = None + self._lookup: dict[DNSRecord, DNSRecord] | None = None @property - def lookup(self) -> Dict[DNSRecord, DNSRecord]: + def lookup(self) -> dict[DNSRecord, DNSRecord]: """Return the lookup table.""" return self._get_lookup() - def lookup_set(self) -> Set[DNSRecord]: + def lookup_set(self) -> set[DNSRecord]: """Return the lookup table as aset.""" return set(self._get_lookup()) - def _get_lookup(self) -> Dict[DNSRecord, DNSRecord]: + def _get_lookup(self) -> dict[DNSRecord, DNSRecord]: """Return the lookup table, building it if needed.""" if self._lookup is None: # Build the hash table so we can lookup the record ttl diff --git a/src/zeroconf/_engine.py b/src/zeroconf/_engine.py index 9e4550030..8a371e1e2 100644 --- a/src/zeroconf/_engine.py +++ b/src/zeroconf/_engine.py @@ -1,30 +1,32 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ +from __future__ import annotations + import asyncio import itertools import socket import threading -from typing import TYPE_CHECKING, List, Optional, cast +from typing import TYPE_CHECKING, cast from ._record_update import RecordUpdate from ._utils.asyncio import get_running_loop, run_coro_with_timeout @@ -45,45 +47,52 @@ class AsyncEngine: """An engine wraps sockets in the event loop.""" __slots__ = ( - 'loop', - 'zc', - 'protocols', - 'readers', - 'senders', - 'running_event', - '_listen_socket', - '_respond_sockets', - '_cleanup_timer', + "_cleanup_timer", + "_listen_socket", + "_respond_sockets", + "_setup_task", + "loop", + "protocols", + "readers", + "running_future", + "senders", + "zc", ) def __init__( self, - zeroconf: 'Zeroconf', - listen_socket: Optional[socket.socket], - respond_sockets: List[socket.socket], + zeroconf: Zeroconf, + listen_socket: socket.socket | None, + respond_sockets: list[socket.socket], ) -> None: - self.loop: Optional[asyncio.AbstractEventLoop] = None + self.loop: asyncio.AbstractEventLoop | None = None self.zc = zeroconf - self.protocols: List[AsyncListener] = [] - self.readers: List[_WrappedTransport] = [] - self.senders: List[_WrappedTransport] = [] - self.running_event: Optional[asyncio.Event] = None + self.protocols: list[AsyncListener] = [] + self.readers: list[_WrappedTransport] = [] + self.senders: list[_WrappedTransport] = [] + self.running_future: asyncio.Future[bool | None] | None = None self._listen_socket = listen_socket self._respond_sockets = respond_sockets - self._cleanup_timer: Optional[asyncio.TimerHandle] = None + self._cleanup_timer: asyncio.TimerHandle | None = None + self._setup_task: asyncio.Task[None] | None = None - def setup(self, loop: asyncio.AbstractEventLoop, loop_thread_ready: Optional[threading.Event]) -> None: + def setup( + self, + loop: asyncio.AbstractEventLoop, + loop_thread_ready: threading.Event | None, + ) -> None: """Set up the instance.""" self.loop = loop - self.running_event = asyncio.Event() - self.loop.create_task(self._async_setup(loop_thread_ready)) + self.running_future = loop.create_future() + self._setup_task = self.loop.create_task(self._async_setup(loop_thread_ready)) - async def _async_setup(self, loop_thread_ready: Optional[threading.Event]) -> None: + async def _async_setup(self, loop_thread_ready: threading.Event | None) -> None: """Set up the instance.""" self._async_schedule_next_cache_cleanup() await self._async_create_endpoints() - assert self.running_event is not None - self.running_event.set() + assert self.running_future is not None + if not self.running_future.done(): + self.running_future.set_result(True) if loop_thread_ready: loop_thread_ready.set() @@ -101,8 +110,9 @@ async def _async_create_endpoints(self) -> None: sender_sockets.append(s) for s in reader_sockets: - transport, protocol = await loop.create_datagram_endpoint( - lambda: AsyncListener(self.zc), sock=s # type: ignore[arg-type, return-value] + transport, protocol = await loop.create_datagram_endpoint( # type: ignore[type-var] + lambda: AsyncListener(self.zc), # type: ignore[arg-type, return-value] + sock=s, ) self.protocols.append(cast(AsyncListener, protocol)) self.readers.append(make_wrapped_transport(cast(asyncio.DatagramTransport, transport))) @@ -114,7 +124,8 @@ def _async_cache_cleanup(self) -> None: now = current_time_millis() self.zc.question_history.async_expire(now) self.zc.record_manager.async_updates( - now, [RecordUpdate(record, record) for record in self.zc.cache.async_expire(now)] + now, + [RecordUpdate(record, record) for record in self.zc.cache.async_expire(now)], ) self.zc.record_manager.async_updates_complete(False) self._async_schedule_next_cache_cleanup() @@ -127,6 +138,8 @@ def _async_schedule_next_cache_cleanup(self) -> None: async def _async_close(self) -> None: """Cancel and wait for the cleanup task to finish.""" + assert self._setup_task is not None + await self._setup_task self._async_shutdown() await asyncio.sleep(0) # flush out any call soons assert self._cleanup_timer is not None @@ -134,8 +147,9 @@ async def _async_close(self) -> None: def _async_shutdown(self) -> None: """Shutdown transports and sockets.""" - assert self.running_event is not None - self.running_event.clear() + assert self.running_future is not None + assert self.loop is not None + self.running_future = self.loop.create_future() for wrapped_transport in itertools.chain(self.senders, self.readers): wrapped_transport.transport.close() diff --git a/src/zeroconf/_exceptions.py b/src/zeroconf/_exceptions.py index f4fcbd551..5fc812593 100644 --- a/src/zeroconf/_exceptions.py +++ b/src/zeroconf/_exceptions.py @@ -1,25 +1,27 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ +from __future__ import annotations + class Error(Exception): """Base class for all zeroconf exceptions.""" diff --git a/src/zeroconf/_handlers/__init__.py b/src/zeroconf/_handlers/__init__.py index 2ef4b15b1..584a74eca 100644 --- a/src/zeroconf/_handlers/__init__.py +++ b/src/zeroconf/_handlers/__init__.py @@ -1,21 +1,23 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - This module provides a framework for the use of DNS Service Discovery - using IP multicast. +This module provides a framework for the use of DNS Service Discovery +using IP multicast. - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ + +from __future__ import annotations diff --git a/src/zeroconf/_handlers/answers.pxd b/src/zeroconf/_handlers/answers.pxd index 25b3c1a1e..759905f27 100644 --- a/src/zeroconf/_handlers/answers.pxd +++ b/src/zeroconf/_handlers/answers.pxd @@ -20,8 +20,6 @@ cdef class AnswerGroup: cdef public cython.dict answers - - cdef object _FLAGS_QR_RESPONSE_AA cdef object NAME_GETTER @@ -31,5 +29,6 @@ cpdef DNSOutgoing construct_outgoing_unicast_answers( cython.dict answers, bint ucast_source, cython.list questions, object id_ ) + @cython.locals(answer=DNSRecord, additionals=cython.set, additional=DNSRecord) cdef void _add_answers_additionals(DNSOutgoing out, cython.dict answers) diff --git a/src/zeroconf/_handlers/answers.py b/src/zeroconf/_handlers/answers.py index a2dbd66aa..07b0a65ab 100644 --- a/src/zeroconf/_handlers/answers.py +++ b/src/zeroconf/_handlers/answers.py @@ -1,40 +1,41 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ +from __future__ import annotations + from operator import attrgetter -from typing import Dict, List, Set from .._dns import DNSQuestion, DNSRecord from .._protocol.outgoing import DNSOutgoing from ..const import _FLAGS_AA, _FLAGS_QR_RESPONSE -_AnswerWithAdditionalsType = Dict[DNSRecord, Set[DNSRecord]] +_AnswerWithAdditionalsType = dict[DNSRecord, set[DNSRecord]] int_ = int MULTICAST_DELAY_RANDOM_INTERVAL = (20, 120) -NAME_GETTER = attrgetter('name') +NAME_GETTER = attrgetter("name") _FLAGS_QR_RESPONSE_AA = _FLAGS_QR_RESPONSE | _FLAGS_AA @@ -44,7 +45,7 @@ class QuestionAnswers: """A group of answers to a question.""" - __slots__ = ('ucast', 'mcast_now', 'mcast_aggregate', 'mcast_aggregate_last_second') + __slots__ = ("mcast_aggregate", "mcast_aggregate_last_second", "mcast_now", "ucast") def __init__( self, @@ -62,24 +63,31 @@ def __init__( def __repr__(self) -> str: """Return a string representation of this QuestionAnswers.""" return ( - f'QuestionAnswers(ucast={self.ucast}, mcast_now={self.mcast_now}, ' - f'mcast_aggregate={self.mcast_aggregate}, ' - f'mcast_aggregate_last_second={self.mcast_aggregate_last_second})' + f"QuestionAnswers(ucast={self.ucast}, mcast_now={self.mcast_now}, " + f"mcast_aggregate={self.mcast_aggregate}, " + f"mcast_aggregate_last_second={self.mcast_aggregate_last_second})" ) class AnswerGroup: """A group of answers scheduled to be sent at the same time.""" - __slots__ = ('send_after', 'send_before', 'answers') + __slots__ = ("answers", "send_after", "send_before") - def __init__(self, send_after: float_, send_before: float_, answers: _AnswerWithAdditionalsType) -> None: + def __init__( + self, + send_after: float_, + send_before: float_, + answers: _AnswerWithAdditionalsType, + ) -> None: self.send_after = send_after # Must be sent after this time self.send_before = send_before # Must be sent before this time self.answers = answers -def construct_outgoing_multicast_answers(answers: _AnswerWithAdditionalsType) -> DNSOutgoing: +def construct_outgoing_multicast_answers( + answers: _AnswerWithAdditionalsType, +) -> DNSOutgoing: """Add answers and additionals to a DNSOutgoing.""" out = DNSOutgoing(_FLAGS_QR_RESPONSE_AA, True) _add_answers_additionals(out, answers) @@ -87,7 +95,10 @@ def construct_outgoing_multicast_answers(answers: _AnswerWithAdditionalsType) -> def construct_outgoing_unicast_answers( - answers: _AnswerWithAdditionalsType, ucast_source: bool, questions: List[DNSQuestion], id_: int_ + answers: _AnswerWithAdditionalsType, + ucast_source: bool, + questions: list[DNSQuestion], + id_: int_, ) -> DNSOutgoing: """Add answers and additionals to a DNSOutgoing.""" out = DNSOutgoing(_FLAGS_QR_RESPONSE_AA, False, id_) @@ -101,7 +112,7 @@ def construct_outgoing_unicast_answers( def _add_answers_additionals(out: DNSOutgoing, answers: _AnswerWithAdditionalsType) -> None: # Find additionals and suppress any additionals that are already in answers - sending: Set[DNSRecord] = set(answers) + sending: set[DNSRecord] = set(answers) # Answers are sorted to group names together to increase the chance # that similar names will end up in the same packet and can reduce the # overall size of the outgoing response via name compression diff --git a/src/zeroconf/_handlers/multicast_outgoing_queue.py b/src/zeroconf/_handlers/multicast_outgoing_queue.py index 23288d18d..73d5ee431 100644 --- a/src/zeroconf/_handlers/multicast_outgoing_queue.py +++ b/src/zeroconf/_handlers/multicast_outgoing_queue.py @@ -1,25 +1,27 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ +from __future__ import annotations + import random from collections import deque from typing import TYPE_CHECKING @@ -45,15 +47,15 @@ class MulticastOutgoingQueue: """An outgoing queue used to aggregate multicast responses.""" __slots__ = ( - "zc", - "queue", - "_multicast_delay_random_min", - "_multicast_delay_random_max", "_additional_delay", "_aggregation_delay", + "_multicast_delay_random_max", + "_multicast_delay_random_min", + "queue", + "zc", ) - def __init__(self, zeroconf: 'Zeroconf', additional_delay: _int, max_aggregation_delay: _int) -> None: + def __init__(self, zeroconf: Zeroconf, additional_delay: _int, max_aggregation_delay: _int) -> None: self.zc = zeroconf self.queue: deque[AnswerGroup] = deque() # Additional delay is used to implement @@ -103,7 +105,10 @@ def async_ready(self) -> None: if len(self.queue) > 1 and self.queue[0].send_before > now: # There is more than one answer in the queue, # delay until we have to send it (first answer group reaches send_before) - loop.call_at(loop.time() + millis_to_seconds(self.queue[0].send_before - now), self.async_ready) + loop.call_at( + loop.time() + millis_to_seconds(self.queue[0].send_before - now), + self.async_ready, + ) return answers: _AnswerWithAdditionalsType = {} @@ -114,7 +119,10 @@ def async_ready(self) -> None: if len(self.queue): # If there are still groups in the queue that are not ready to send # be sure we schedule them to go out later - loop.call_at(loop.time() + millis_to_seconds(self.queue[0].send_after - now), self.async_ready) + loop.call_at( + loop.time() + millis_to_seconds(self.queue[0].send_after - now), + self.async_ready, + ) if answers: # pragma: no branch # If we have the same answer scheduled to go out, remove them diff --git a/src/zeroconf/_handlers/query_handler.py b/src/zeroconf/_handlers/query_handler.py index ba9c9e31c..60209568a 100644 --- a/src/zeroconf/_handlers/query_handler.py +++ b/src/zeroconf/_handlers/query_handler.py @@ -1,26 +1,28 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ -from typing import TYPE_CHECKING, List, Optional, Set, Tuple, Union, cast +from __future__ import annotations + +from typing import TYPE_CHECKING, cast from .._cache import DNSCache, _UniqueRecordsType from .._dns import DNSAddress, DNSPointer, DNSQuestion, DNSRecord, DNSRRSet @@ -52,8 +54,8 @@ _RESPOND_IMMEDIATE_TYPES = {_TYPE_NSEC, _TYPE_SRV, *_ADDRESS_RECORD_TYPES} -_EMPTY_SERVICES_LIST: List[ServiceInfo] = [] -_EMPTY_TYPES_LIST: List[str] = [] +_EMPTY_SERVICES_LIST: list[ServiceInfo] = [] +_EMPTY_TYPES_LIST: list[str] = [] _IPVersion_ALL = IPVersion.All @@ -71,15 +73,14 @@ class _AnswerStrategy: - - __slots__ = ("question", "strategy_type", "types", "services") + __slots__ = ("question", "services", "strategy_type", "types") def __init__( self, question: DNSQuestion, strategy_type: _int, - types: List[str], - services: List[ServiceInfo], + types: list[str], + services: list[ServiceInfo], ) -> None: """Create an answer strategy.""" self.question = question @@ -92,28 +93,28 @@ class _QueryResponse: """A pair for unicast and multicast DNSOutgoing responses.""" __slots__ = ( - "_is_probe", - "_questions", - "_now", - "_cache", "_additionals", - "_ucast", - "_mcast_now", + "_cache", + "_is_probe", "_mcast_aggregate", "_mcast_aggregate_last_second", + "_mcast_now", + "_now", + "_questions", + "_ucast", ) - def __init__(self, cache: DNSCache, questions: List[DNSQuestion], is_probe: bool, now: float) -> None: + def __init__(self, cache: DNSCache, questions: list[DNSQuestion], is_probe: bool, now: float) -> None: """Build a query response.""" self._is_probe = is_probe self._questions = questions self._now = now self._cache = cache self._additionals: _AnswerWithAdditionalsType = {} - self._ucast: Set[DNSRecord] = set() - self._mcast_now: Set[DNSRecord] = set() - self._mcast_aggregate: Set[DNSRecord] = set() - self._mcast_aggregate_last_second: Set[DNSRecord] = set() + self._ucast: set[DNSRecord] = set() + self._mcast_now: set[DNSRecord] = set() + self._mcast_aggregate: set[DNSRecord] = set() + self._mcast_aggregate_last_second: set[DNSRecord] = set() def add_qu_question_response(self, answers: _AnswerWithAdditionalsType) -> None: """Generate a response to a multicast QU query.""" @@ -191,9 +192,16 @@ def _has_mcast_record_in_last_second(self, record: DNSRecord) -> bool: class QueryHandler: """Query the ServiceRegistry.""" - __slots__ = ("zc", "registry", "cache", "question_history", "out_queue", "out_delay_queue") + __slots__ = ( + "cache", + "out_delay_queue", + "out_queue", + "question_history", + "registry", + "zc", + ) - def __init__(self, zc: 'Zeroconf') -> None: + def __init__(self, zc: Zeroconf) -> None: """Init the query handler.""" self.zc = zc self.registry = zc.registry @@ -203,7 +211,10 @@ def __init__(self, zc: 'Zeroconf') -> None: self.out_delay_queue = zc.out_delay_queue def _add_service_type_enumeration_query_answers( - self, types: List[str], answer_set: _AnswerWithAdditionalsType, known_answers: DNSRRSet + self, + types: list[str], + answer_set: _AnswerWithAdditionalsType, + known_answers: DNSRRSet, ) -> None: """Provide an answer to a service type enumeration query. @@ -211,13 +222,21 @@ def _add_service_type_enumeration_query_answers( """ for stype in types: dns_pointer = DNSPointer( - _SERVICE_TYPE_ENUMERATION_NAME, _TYPE_PTR, _CLASS_IN, _DNS_OTHER_TTL, stype, 0.0 + _SERVICE_TYPE_ENUMERATION_NAME, + _TYPE_PTR, + _CLASS_IN, + _DNS_OTHER_TTL, + stype, + 0.0, ) if not known_answers.suppresses(dns_pointer): answer_set[dns_pointer] = set() def _add_pointer_answers( - self, services: List[ServiceInfo], answer_set: _AnswerWithAdditionalsType, known_answers: DNSRRSet + self, + services: list[ServiceInfo], + answer_set: _AnswerWithAdditionalsType, + known_answers: DNSRRSet, ) -> None: """Answer PTR/ANY question.""" for service in services: @@ -234,23 +253,23 @@ def _add_pointer_answers( def _add_address_answers( self, - services: List[ServiceInfo], + services: list[ServiceInfo], answer_set: _AnswerWithAdditionalsType, known_answers: DNSRRSet, type_: _int, ) -> None: """Answer A/AAAA/ANY question.""" for service in services: - answers: List[DNSAddress] = [] - additionals: Set[DNSRecord] = set() - seen_types: Set[int] = set() + answers: list[DNSAddress] = [] + additionals: set[DNSRecord] = set() + seen_types: set[int] = set() for dns_address in service._dns_addresses(None, _IPVersion_ALL): seen_types.add(dns_address.type) if dns_address.type != type_: additionals.add(dns_address) elif not known_answers.suppresses(dns_address): answers.append(dns_address) - missing_types: Set[int] = _ADDRESS_RECORD_TYPES - seen_types + missing_types: set[int] = _ADDRESS_RECORD_TYPES - seen_types if answers: if missing_types: assert service.server is not None, "Service server must be set for NSEC record." @@ -265,8 +284,8 @@ def _answer_question( self, question: DNSQuestion, strategy_type: _int, - types: List[str], - services: List[ServiceInfo], + types: list[str], + services: list[ServiceInfo], known_answers: DNSRRSet, ) -> _AnswerWithAdditionalsType: """Answer a question.""" @@ -294,14 +313,14 @@ def _answer_question( return answer_set def async_response( # pylint: disable=unused-argument - self, msgs: List[DNSIncoming], ucast_source: bool - ) -> Optional[QuestionAnswers]: + self, msgs: list[DNSIncoming], ucast_source: bool + ) -> QuestionAnswers | None: """Deal with incoming query packets. Provides a response if possible. This function must be run in the event loop as it is not threadsafe. """ - strategies: List[_AnswerStrategy] = [] + strategies: list[_AnswerStrategy] = [] for msg in msgs: for question in msg._questions: strategies.extend(self._get_answer_strategies(question)) @@ -317,7 +336,7 @@ def async_response( # pylint: disable=unused-argument questions = msg._questions # Only decode known answers if we are not a probe and we have # at least one answer strategy - answers: List[DNSRecord] = [] + answers: list[DNSRecord] = [] for msg in msgs: if msg.is_probe(): is_probe = True @@ -326,7 +345,7 @@ def async_response( # pylint: disable=unused-argument query_res = _QueryResponse(self.cache, questions, is_probe, msg.now) known_answers = DNSRRSet(answers) - known_answers_set: Optional[Set[DNSRecord]] = None + known_answers_set: set[DNSRecord] | None = None now = msg.now for strategy in strategies: question = strategy.question @@ -336,7 +355,11 @@ def async_response( # pylint: disable=unused-argument known_answers_set = known_answers.lookup_set() self.question_history.add_question_at_time(question, now, known_answers_set) answer_set = self._answer_question( - question, strategy.strategy_type, strategy.types, strategy.services, known_answers + question, + strategy.strategy_type, + strategy.types, + strategy.services, + known_answers, ) if not ucast_source and is_unicast: query_res.add_qu_question_response(answer_set) @@ -352,19 +375,22 @@ def async_response( # pylint: disable=unused-argument def _get_answer_strategies( self, question: DNSQuestion, - ) -> List[_AnswerStrategy]: + ) -> list[_AnswerStrategy]: """Collect strategies to answer a question.""" name = question.name question_lower_name = name.lower() type_ = question.type - strategies: List[_AnswerStrategy] = [] + strategies: list[_AnswerStrategy] = [] if type_ == _TYPE_PTR and question_lower_name == _SERVICE_TYPE_ENUMERATION_NAME: types = self.registry.async_get_types() if types: strategies.append( _AnswerStrategy( - question, _ANSWER_STRATEGY_SERVICE_TYPE_ENUMERATION, types, _EMPTY_SERVICES_LIST + question, + _ANSWER_STRATEGY_SERVICE_TYPE_ENUMERATION, + types, + _EMPTY_SERVICES_LIST, ) ) return strategies @@ -388,26 +414,36 @@ def _get_answer_strategies( if service is not None: if type_ in (_TYPE_SRV, _TYPE_ANY): strategies.append( - _AnswerStrategy(question, _ANSWER_STRATEGY_SERVICE, _EMPTY_TYPES_LIST, [service]) + _AnswerStrategy( + question, + _ANSWER_STRATEGY_SERVICE, + _EMPTY_TYPES_LIST, + [service], + ) ) if type_ in (_TYPE_TXT, _TYPE_ANY): strategies.append( - _AnswerStrategy(question, _ANSWER_STRATEGY_TEXT, _EMPTY_TYPES_LIST, [service]) + _AnswerStrategy( + question, + _ANSWER_STRATEGY_TEXT, + _EMPTY_TYPES_LIST, + [service], + ) ) return strategies def handle_assembled_query( self, - packets: List[DNSIncoming], + packets: list[DNSIncoming], addr: _str, port: _int, transport: _WrappedTransport, - v6_flow_scope: Union[Tuple[()], Tuple[int, int]], + v6_flow_scope: tuple[()] | tuple[int, int], ) -> None: """Respond to a (re)assembled query. - If the protocol recieved packets with the TC bit set, it will + If the protocol received packets with the TC bit set, it will wait a bit for the rest of the packets and only call handle_assembled_query once it has a complete set of packets or the timer expires. If the TC bit is not set, a single @@ -423,7 +459,7 @@ def handle_assembled_query( id_ = first_packet.id out = construct_outgoing_unicast_answers(question_answers.ucast, ucast_source, questions, id_) # When sending unicast, only send back the reply - # via the same socket that it was recieved from + # via the same socket that it was received from # as we know its reachable from that socket self.zc.async_send(out, addr, port, v6_flow_scope, transport) if question_answers.mcast_now: diff --git a/src/zeroconf/_handlers/record_manager.pxd b/src/zeroconf/_handlers/record_manager.pxd index 5be2c283b..b9bde975d 100644 --- a/src/zeroconf/_handlers/record_manager.pxd +++ b/src/zeroconf/_handlers/record_manager.pxd @@ -6,12 +6,11 @@ from .._dns cimport DNSQuestion, DNSRecord from .._protocol.incoming cimport DNSIncoming from .._updates cimport RecordUpdateListener from .._utils.time cimport current_time_millis +from .._record_update cimport RecordUpdate - -cdef cython.float _DNS_PTR_MIN_TTL +cdef unsigned int _DNS_PTR_MIN_TTL cdef cython.uint _TYPE_PTR cdef object _ADDRESS_RECORD_TYPES -cdef object RecordUpdate cdef bint TYPE_CHECKING cdef object _TYPE_PTR @@ -22,7 +21,7 @@ cdef class RecordManager: cdef public DNSCache cache cdef public cython.set listeners - cpdef void async_updates(self, object now, object records) + cpdef void async_updates(self, object now, list records) cpdef void async_updates_complete(self, bint notify) @@ -31,6 +30,7 @@ cdef class RecordManager: record=DNSRecord, answers=cython.list, maybe_entry=DNSRecord, + rec_update=RecordUpdate ) cpdef void async_updates_from_response(self, DNSIncoming msg) diff --git a/src/zeroconf/_handlers/record_manager.py b/src/zeroconf/_handlers/record_manager.py index 70f2e5e11..566f0e8c9 100644 --- a/src/zeroconf/_handlers/record_manager.py +++ b/src/zeroconf/_handlers/record_manager.py @@ -1,26 +1,28 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ -from typing import TYPE_CHECKING, List, Optional, Set, Tuple, Union, cast +from __future__ import annotations + +from typing import TYPE_CHECKING, cast from .._cache import _UniqueRecordsType from .._dns import DNSQuestion, DNSRecord @@ -40,15 +42,15 @@ class RecordManager: """Process records into the cache and notify listeners.""" - __slots__ = ("zc", "cache", "listeners") + __slots__ = ("cache", "listeners", "zc") - def __init__(self, zeroconf: 'Zeroconf') -> None: + def __init__(self, zeroconf: Zeroconf) -> None: """Init the record manager.""" self.zc = zeroconf self.cache = zeroconf.cache - self.listeners: Set[RecordUpdateListener] = set() + self.listeners: set[RecordUpdateListener] = set() - def async_updates(self, now: _float, records: List[RecordUpdate]) -> None: + def async_updates(self, now: _float, records: list[RecordUpdate]) -> None: """Used to notify listeners of new information that has updated a record. @@ -79,12 +81,12 @@ def async_updates_from_response(self, msg: DNSIncoming) -> None: This function must be run in the event loop as it is not threadsafe. """ - updates: List[RecordUpdate] = [] - address_adds: List[DNSRecord] = [] - other_adds: List[DNSRecord] = [] - removes: Set[DNSRecord] = set() + updates: list[RecordUpdate] = [] + address_adds: list[DNSRecord] = [] + other_adds: list[DNSRecord] = [] + removes: set[DNSRecord] = set() now = msg.now - unique_types: Set[Tuple[str, int, int]] = set() + unique_types: set[tuple[str, int, int]] = set() cache = self.cache answers = msg.answers() @@ -103,7 +105,8 @@ def async_updates_from_response(self, msg: DNSIncoming) -> None: record, _DNS_PTR_MIN_TTL, ) - record.set_created_ttl(record.created, _DNS_PTR_MIN_TTL) + # Safe because the record is never in the cache yet + record._set_created_ttl(record.created, _DNS_PTR_MIN_TTL) if record.unique: # https://tools.ietf.org/html/rfc6762#section-10.2 unique_types.add((record.name, record_type, record.class_)) @@ -113,18 +116,19 @@ def async_updates_from_response(self, msg: DNSIncoming) -> None: maybe_entry = cache.async_get_unique(record) if not record.is_expired(now): - if maybe_entry is not None: - maybe_entry.reset_ttl(record) + if record_type in _ADDRESS_RECORD_TYPES: + address_adds.append(record) else: - if record_type in _ADDRESS_RECORD_TYPES: - address_adds.append(record) - else: - other_adds.append(record) - updates.append(RecordUpdate(record, maybe_entry)) + other_adds.append(record) + rec_update = RecordUpdate.__new__(RecordUpdate) + rec_update._fast_init(record, maybe_entry) + updates.append(rec_update) # This is likely a goodbye since the record is # expired and exists in the cache elif maybe_entry is not None: - updates.append(RecordUpdate(record, maybe_entry)) + rec_update = RecordUpdate.__new__(RecordUpdate) + rec_update._fast_init(record, maybe_entry) + updates.append(rec_update) removes.add(record) if unique_types: @@ -146,7 +150,7 @@ def async_updates_from_response(self, msg: DNSIncoming) -> None: # that any ServiceBrowser that is going to call # zc.get_service_info will see the cached value # but ONLY after all the record updates have been - # processsed. + # processed. new = False if other_adds or address_adds: new = cache.async_add_records(address_adds) @@ -161,7 +165,9 @@ def async_updates_from_response(self, msg: DNSIncoming) -> None: self.async_updates_complete(new) def async_add_listener( - self, listener: RecordUpdateListener, question: Optional[Union[DNSQuestion, List[DNSQuestion]]] + self, + listener: RecordUpdateListener, + question: DNSQuestion | list[DNSQuestion] | None, ) -> None: """Adds a listener for a given question. The listener will have its update_record method called when information is available to @@ -184,14 +190,14 @@ def async_add_listener( self._async_update_matching_records(listener, questions) def _async_update_matching_records( - self, listener: RecordUpdateListener, questions: List[DNSQuestion] + self, listener: RecordUpdateListener, questions: list[DNSQuestion] ) -> None: """Calls back any existing entries in the cache that answer the question. This function must be run from the event loop. """ now = current_time_millis() - records: List[RecordUpdate] = [ + records: list[RecordUpdate] = [ RecordUpdate(record, None) for question in questions for record in self.cache.async_entries_with_name(question.name) @@ -212,4 +218,4 @@ def async_remove_listener(self, listener: RecordUpdateListener) -> None: self.listeners.remove(listener) self.zc.async_notify_all() except ValueError as e: - log.exception('Failed to remove listener: %r', e) + log.exception("Failed to remove listener: %r", e) diff --git a/src/zeroconf/_history.py b/src/zeroconf/_history.py index db6a394d7..1b6f3fadf 100644 --- a/src/zeroconf/_history.py +++ b/src/zeroconf/_history.py @@ -1,26 +1,26 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ -from typing import Dict, List, Set, Tuple +from __future__ import annotations from ._dns import DNSQuestion, DNSRecord from .const import _DUPLICATE_QUESTION_INTERVAL @@ -36,13 +36,13 @@ class QuestionHistory: def __init__(self) -> None: """Init a new QuestionHistory.""" - self._history: Dict[DNSQuestion, Tuple[float, Set[DNSRecord]]] = {} + self._history: dict[DNSQuestion, tuple[float, set[DNSRecord]]] = {} - def add_question_at_time(self, question: DNSQuestion, now: _float, known_answers: Set[DNSRecord]) -> None: + def add_question_at_time(self, question: DNSQuestion, now: _float, known_answers: set[DNSRecord]) -> None: """Remember a question with known answers.""" self._history[question] = (now, known_answers) - def suppresses(self, question: DNSQuestion, now: _float, known_answers: Set[DNSRecord]) -> bool: + def suppresses(self, question: DNSQuestion, now: _float, known_answers: set[DNSRecord]) -> bool: """Check to see if a question should be suppressed. https://datatracker.ietf.org/doc/html/rfc6762#section-7.3 @@ -60,13 +60,11 @@ def suppresses(self, question: DNSQuestion, now: _float, known_answers: Set[DNSR return False # The last question has more known answers than # we knew so we have to ask - if previous_known_answers - known_answers: - return False - return True + return not previous_known_answers - known_answers def async_expire(self, now: _float) -> None: """Expire the history of old questions.""" - removes: List[DNSQuestion] = [] + removes: list[DNSQuestion] = [] for question, now_known_answers in self._history.items(): than, _ = now_known_answers if now - than > _DUPLICATE_QUESTION_INTERVAL: diff --git a/src/zeroconf/_listener.pxd b/src/zeroconf/_listener.pxd index 96f52be02..4cbc5d007 100644 --- a/src/zeroconf/_listener.pxd +++ b/src/zeroconf/_listener.pxd @@ -16,7 +16,6 @@ cdef cython.uint _MAX_MSG_ABSOLUTE cdef cython.uint _DUPLICATE_PACKET_SUPPRESSION_INTERVAL - cdef class AsyncListener: cdef public object zc @@ -51,7 +50,7 @@ cdef class AsyncListener: cpdef _respond_query( self, - object msg, + DNSIncoming msg, object addr, object port, object transport, diff --git a/src/zeroconf/_listener.py b/src/zeroconf/_listener.py index 0f8a8cac7..ed5031698 100644 --- a/src/zeroconf/_listener.py +++ b/src/zeroconf/_listener.py @@ -1,30 +1,32 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ +from __future__ import annotations + import asyncio import logging import random from functools import partial -from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union, cast +from typing import TYPE_CHECKING, cast from ._logger import QuietLogger, log from ._protocol.incoming import DNSIncoming @@ -47,7 +49,6 @@ class AsyncListener: - """A Listener is used by this module to listen on the multicast group to which DNS messages are sent, allowing the implementation to cache information as it arrives. @@ -56,36 +57,34 @@ class AsyncListener: the read() method called when a socket is available for reading.""" __slots__ = ( - 'zc', - '_registry', - '_record_manager', + "_deferred", "_query_handler", - 'data', - 'last_time', - 'last_message', - 'transport', - 'sock_description', - '_deferred', - '_timers', + "_record_manager", + "_registry", + "_timers", + "data", + "last_message", + "last_time", + "sock_description", + "transport", + "zc", ) - def __init__(self, zc: 'Zeroconf') -> None: + def __init__(self, zc: Zeroconf) -> None: self.zc = zc self._registry = zc.registry self._record_manager = zc.record_manager self._query_handler = zc.query_handler - self.data: Optional[bytes] = None + self.data: bytes | None = None self.last_time: float = 0 - self.last_message: Optional[DNSIncoming] = None - self.transport: Optional[_WrappedTransport] = None - self.sock_description: Optional[str] = None - self._deferred: Dict[str, List[DNSIncoming]] = {} - self._timers: Dict[str, asyncio.TimerHandle] = {} + self.last_message: DNSIncoming | None = None + self.transport: _WrappedTransport | None = None + self.sock_description: str | None = None + self._deferred: dict[str, list[DNSIncoming]] = {} + self._timers: dict[str, asyncio.TimerHandle] = {} super().__init__() - def datagram_received( - self, data: _bytes, addrs: Union[Tuple[str, int], Tuple[str, int, int, int]] - ) -> None: + def datagram_received(self, data: _bytes, addrs: tuple[str, int] | tuple[str, int, int, int]) -> None: data_len = len(data) debug = DEBUG_ENABLED() @@ -109,7 +108,7 @@ def _process_datagram_at_time( data_len: _int, now: _float, data: _bytes, - addrs: Union[Tuple[str, int], Tuple[str, int, int, int]], + addrs: tuple[str, int] | tuple[str, int, int, int], ) -> None: if ( self.data == data @@ -120,7 +119,8 @@ def _process_datagram_at_time( # Guard against duplicate packets if debug: log.debug( - 'Ignoring duplicate message with no unicast questions received from %s [socket %s] (%d bytes) as [%r]', + "Ignoring duplicate message with no unicast questions" + " received from %s [socket %s] (%d bytes) as [%r]", addrs, self.sock_description, data_len, @@ -129,18 +129,18 @@ def _process_datagram_at_time( return if len(addrs) == 2: - v6_flow_scope: Union[Tuple[()], Tuple[int, int]] = () + v6_flow_scope: tuple[()] | tuple[int, int] = () # https://github.com/python/mypy/issues/1178 - addr, port = addrs # type: ignore + addr, port = addrs addr_port = addrs if TYPE_CHECKING: - addr_port = cast(Tuple[str, int], addr_port) + addr_port = cast(tuple[str, int], addr_port) scope = None else: # https://github.com/python/mypy/issues/1178 - addr, port, flow, scope = addrs # type: ignore + addr, port, flow, scope = addrs if debug: # pragma: no branch - log.debug('IPv6 scope_id %d associated to the receiving interface', scope) + log.debug("IPv6 scope_id %d associated to the receiving interface", scope) v6_flow_scope = (flow, scope) addr_port = (addr, port) @@ -151,7 +151,7 @@ def _process_datagram_at_time( if msg.valid is True: if debug: log.debug( - 'Received from %r:%r [socket %s]: %r (%d bytes) as [%r]', + "Received from %r:%r [socket %s]: %r (%d bytes) as [%r]", addr, port, self.sock_description, @@ -162,7 +162,7 @@ def _process_datagram_at_time( else: if debug: log.debug( - 'Received from %r:%r [socket %s]: (%d bytes) [%r]', + "Received from %r:%r [socket %s]: (%d bytes) [%r]", addr, port, self.sock_description, @@ -189,7 +189,7 @@ def handle_query_or_defer( addr: _str, port: _int, transport: _WrappedTransport, - v6_flow_scope: Union[Tuple[()], Tuple[int, int]], + v6_flow_scope: tuple[()] | tuple[int, int], ) -> None: """Deal with incoming query packets. Provides a response if possible.""" @@ -203,12 +203,18 @@ def handle_query_or_defer( if incoming.data == msg.data: return deferred.append(msg) - delay = millis_to_seconds(random.randint(*_TC_DELAY_RANDOM_INTERVAL)) + delay = millis_to_seconds(random.randint(*_TC_DELAY_RANDOM_INTERVAL)) # noqa: S311 loop = self.zc.loop assert loop is not None self._cancel_any_timers_for_addr(addr) self._timers[addr] = loop.call_at( - loop.time() + delay, self._respond_query, None, addr, port, transport, v6_flow_scope + loop.time() + delay, + self._respond_query, + None, + addr, + port, + transport, + v6_flow_scope, ) def _cancel_any_timers_for_addr(self, addr: _str) -> None: @@ -218,11 +224,11 @@ def _cancel_any_timers_for_addr(self, addr: _str) -> None: def _respond_query( self, - msg: Optional[DNSIncoming], + msg: DNSIncoming | None, addr: _str, port: _int, transport: _WrappedTransport, - v6_flow_scope: Union[Tuple[()], Tuple[int, int]], + v6_flow_scope: tuple[()] | tuple[int, int], ) -> None: """Respond to a query and reassemble any truncated deferred packets.""" self._cancel_any_timers_for_addr(addr) @@ -235,7 +241,7 @@ def _respond_query( def error_received(self, exc: Exception) -> None: """Likely socket closed or IPv6.""" # We preformat the message string with the socket as we want - # log_exception_once to log a warrning message once PER EACH + # log_exception_once to log a warning message once PER EACH # different socket in case there are problems with multiple # sockets msg_str = f"Error with socket {self.sock_description}): %s" @@ -246,5 +252,5 @@ def connection_made(self, transport: asyncio.BaseTransport) -> None: self.transport = wrapped_transport self.sock_description = f"{wrapped_transport.fileno} ({wrapped_transport.sock_name})" - def connection_lost(self, exc: Optional[Exception]) -> None: + def connection_lost(self, exc: Exception | None) -> None: """Handle connection lost.""" diff --git a/src/zeroconf/_logger.py b/src/zeroconf/_logger.py index b0e66bc90..0d734dfde 100644 --- a/src/zeroconf/_logger.py +++ b/src/zeroconf/_logger.py @@ -1,31 +1,33 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - ) - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine + ) +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ +from __future__ import annotations + import logging import sys -from typing import Any, Dict, Union, cast +from typing import Any, ClassVar, cast -log = logging.getLogger(__name__.split('.', maxsplit=1)[0]) +log = logging.getLogger(__name__.split(".", maxsplit=1)[0]) log.addHandler(logging.NullHandler()) @@ -38,7 +40,7 @@ def set_logger_level_if_unset() -> None: class QuietLogger: - _seen_logs: Dict[str, Union[int, tuple]] = {} + _seen_logs: ClassVar[dict[str, int | tuple]] = {} @classmethod def log_exception_warning(cls, *logger_data: Any) -> None: @@ -50,7 +52,7 @@ def log_exception_warning(cls, *logger_data: Any) -> None: logger = log.warning else: logger = log.debug - logger(*(logger_data or ['Exception occurred']), exc_info=True) + logger(*(logger_data or ["Exception occurred"]), exc_info=True) @classmethod def log_exception_debug(cls, *logger_data: Any) -> None: @@ -61,7 +63,7 @@ def log_exception_debug(cls, *logger_data: Any) -> None: # log the trace only on the first time cls._seen_logs[exc_str] = exc_info log_exc_info = True - log.debug(*(logger_data or ['Exception occurred']), exc_info=log_exc_info) + log.debug(*(logger_data or ["Exception occurred"]), exc_info=log_exc_info) @classmethod def log_warning_once(cls, *args: Any) -> None: diff --git a/src/zeroconf/_protocol/__init__.py b/src/zeroconf/_protocol/__init__.py index 2ef4b15b1..584a74eca 100644 --- a/src/zeroconf/_protocol/__init__.py +++ b/src/zeroconf/_protocol/__init__.py @@ -1,21 +1,23 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - This module provides a framework for the use of DNS Service Discovery - using IP multicast. +This module provides a framework for the use of DNS Service Discovery +using IP multicast. - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ + +from __future__ import annotations diff --git a/src/zeroconf/_protocol/incoming.pxd b/src/zeroconf/_protocol/incoming.pxd index bb4383036..feaa2a02e 100644 --- a/src/zeroconf/_protocol/incoming.pxd +++ b/src/zeroconf/_protocol/incoming.pxd @@ -97,7 +97,7 @@ cdef class DNSIncoming: ) cdef void _read_others(self) - @cython.locals(offset="unsigned int") + @cython.locals(offset="unsigned int", question=DNSQuestion) cdef _read_questions(self) @cython.locals( @@ -109,9 +109,15 @@ cdef class DNSIncoming: @cython.locals( name_start="unsigned int", - offset="unsigned int" + offset="unsigned int", + address_rec=DNSAddress, + pointer_rec=DNSPointer, + text_rec=DNSText, + srv_rec=DNSService, + hinfo_rec=DNSHinfo, + nsec_rec=DNSNsec, ) - cdef _read_record(self, object domain, unsigned int type_, unsigned int class_, unsigned int ttl, unsigned int length) + cdef _read_record(self, str domain, unsigned int type_, unsigned int class_, unsigned int ttl, unsigned int length) @cython.locals( offset="unsigned int", diff --git a/src/zeroconf/_protocol/incoming.py b/src/zeroconf/_protocol/incoming.py index 9e208b639..2d977b642 100644 --- a/src/zeroconf/_protocol/incoming.py +++ b/src/zeroconf/_protocol/incoming.py @@ -1,28 +1,30 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ +from __future__ import annotations + import struct import sys -from typing import Any, Dict, List, Optional, Set, Tuple, Union +from typing import Any from .._dns import ( DNSAddress, @@ -61,7 +63,7 @@ DECODE_EXCEPTIONS = (IndexError, struct.error, IncomingDecodeError) -_seen_logs: Dict[str, Union[int, tuple]] = {} +_seen_logs: dict[str, int | tuple] = {} _str = str _int = int @@ -70,33 +72,33 @@ class DNSIncoming: """Object representation of an incoming DNS packet""" __slots__ = ( + "_answers", + "_data_len", "_did_read_others", - 'flags', - 'offset', - 'data', - 'view', - '_data_len', - '_name_cache', - '_questions', - '_answers', - 'id', - '_num_questions', - '_num_answers', - '_num_authorities', - '_num_additionals', - 'valid', - 'now', - 'scope_id', - 'source', - '_has_qu_question', + "_has_qu_question", + "_name_cache", + "_num_additionals", + "_num_answers", + "_num_authorities", + "_num_questions", + "_questions", + "data", + "flags", + "id", + "now", + "offset", + "scope_id", + "source", + "valid", + "view", ) def __init__( self, data: bytes, - source: Optional[Tuple[str, int]] = None, - scope_id: Optional[int] = None, - now: Optional[float] = None, + source: tuple[str, int] | None = None, + scope_id: int | None = None, + now: float | None = None, ) -> None: """Constructor from string holding bytes of packet""" self.flags = 0 @@ -104,9 +106,9 @@ def __init__( self.data = data self.view = data self._data_len = len(data) - self._name_cache: Dict[int, List[str]] = {} - self._questions: List[DNSQuestion] = [] - self._answers: List[DNSRecord] = [] + self._name_cache: dict[int, list[str]] = {} + self._questions: list[DNSQuestion] = [] + self._answers: list[DNSRecord] = [] self.id = 0 self._num_questions = 0 self._num_answers = 0 @@ -122,7 +124,7 @@ def __init__( self._initial_parse() except DECODE_EXCEPTIONS: self._log_exception_debug( - 'Received invalid packet from %s at offset %d while unpacking %r', + "Received invalid packet from %s at offset %d while unpacking %r", self.source, self.offset, self.data, @@ -146,7 +148,7 @@ def truncated(self) -> bool: return (self.flags & _FLAGS_TC) == _FLAGS_TC @property - def questions(self) -> List[DNSQuestion]: + def questions(self) -> list[DNSQuestion]: """Questions in the packet.""" return self._questions @@ -171,7 +173,7 @@ def num_additionals(self) -> int: return self._num_additionals def _initial_parse(self) -> None: - """Parse the data needed to initalize the packet object.""" + """Parse the data needed to initialize the packet object.""" self._read_header() self._read_questions() if not self._num_questions: @@ -187,16 +189,16 @@ def _log_exception_debug(cls, *logger_data: Any) -> None: # log the trace only on the first time _seen_logs[exc_str] = exc_info log_exc_info = True - log.debug(*(logger_data or ['Exception occurred']), exc_info=log_exc_info) + log.debug(*(logger_data or ["Exception occurred"]), exc_info=log_exc_info) - def answers(self) -> List[DNSRecord]: + def answers(self) -> list[DNSRecord]: """Answers in the packet.""" if not self._did_read_others: try: self._read_others() except DECODE_EXCEPTIONS: self._log_exception_debug( - 'Received invalid packet from %s at offset %d while unpacking %r', + "Received invalid packet from %s at offset %d while unpacking %r", self.source, self.offset, self.data, @@ -208,18 +210,20 @@ def is_probe(self) -> bool: return self._num_authorities > 0 def __repr__(self) -> str: - return '' % ', '.join( - [ - 'id=%s' % self.id, - 'flags=%s' % self.flags, - 'truncated=%s' % self.truncated, - 'n_q=%s' % self._num_questions, - 'n_ans=%s' % self._num_answers, - 'n_auth=%s' % self._num_authorities, - 'n_add=%s' % self._num_additionals, - 'questions=%s' % self._questions, - 'answers=%s' % self.answers(), - ] + return "".format( + ", ".join( + [ + f"id={self.id}", + f"flags={self.flags}", + f"truncated={self.truncated}", + f"n_q={self._num_questions}", + f"n_ans={self._num_answers}", + f"n_auth={self._num_authorities}", + f"n_add={self._num_additionals}", + f"questions={self._questions}", + f"answers={self.answers()}", + ] + ) ) def _read_header(self) -> None: @@ -246,7 +250,8 @@ def _read_questions(self) -> None: # The question has 2 unsigned shorts in network order type_ = view[offset] << 8 | view[offset + 1] class_ = view[offset + 2] << 8 | view[offset + 3] - question = DNSQuestion(name, type_, class_) + question = DNSQuestion.__new__(DNSQuestion) + question._fast_init(name, type_, class_) if question.unique: # QU questions use the same bit as unique self._has_qu_question = True questions.append(question) @@ -255,7 +260,7 @@ def _read_character_string(self) -> str: """Reads a character string from the packet""" length = self.view[self.offset] self.offset += 1 - info = self.data[self.offset : self.offset + length].decode('utf-8', 'replace') + info = self.data[self.offset : self.offset + length].decode("utf-8", "replace") self.offset += length return info @@ -291,7 +296,7 @@ def _read_others(self) -> None: # above would fail and hit the exception catch in read_others self.offset = end log.debug( - 'Unable to parse; skipping record for %s with type %s at offset %d while unpacking %r', + "Unable to parse; skipping record for %s with type %s at offset %d while unpacking %r", domain, _TYPES.get(type_, type_), self.offset, @@ -303,14 +308,20 @@ def _read_others(self) -> None: def _read_record( self, domain: _str, type_: _int, class_: _int, ttl: _int, length: _int - ) -> Optional[DNSRecord]: + ) -> DNSRecord | None: """Read known records types and skip unknown ones.""" if type_ == _TYPE_A: - return DNSAddress(domain, type_, class_, ttl, self._read_string(4), None, self.now) + address_rec = DNSAddress.__new__(DNSAddress) + address_rec._fast_init(domain, type_, class_, ttl, self._read_string(4), None, self.now) + return address_rec if type_ in (_TYPE_CNAME, _TYPE_PTR): - return DNSPointer(domain, type_, class_, ttl, self._read_name(), self.now) + pointer_rec = DNSPointer.__new__(DNSPointer) + pointer_rec._fast_init(domain, type_, class_, ttl, self._read_name(), self.now) + return pointer_rec if type_ == _TYPE_TXT: - return DNSText(domain, type_, class_, ttl, self._read_string(length), self.now) + text_rec = DNSText.__new__(DNSText) + text_rec._fast_init(domain, type_, class_, ttl, self._read_string(length), self.now) + return text_rec if type_ == _TYPE_SRV: view = self.view offset = self.offset @@ -319,7 +330,8 @@ def _read_record( priority = view[offset] << 8 | view[offset + 1] weight = view[offset + 2] << 8 | view[offset + 3] port = view[offset + 4] << 8 | view[offset + 5] - return DNSService( + srv_rec = DNSService.__new__(DNSService) + srv_rec._fast_init( domain, type_, class_, @@ -330,8 +342,10 @@ def _read_record( self._read_name(), self.now, ) + return srv_rec if type_ == _TYPE_HINFO: - return DNSHinfo( + hinfo_rec = DNSHinfo.__new__(DNSHinfo) + hinfo_rec._fast_init( domain, type_, class_, @@ -340,11 +354,23 @@ def _read_record( self._read_character_string(), self.now, ) + return hinfo_rec if type_ == _TYPE_AAAA: - return DNSAddress(domain, type_, class_, ttl, self._read_string(16), self.scope_id, self.now) + address_rec = DNSAddress.__new__(DNSAddress) + address_rec._fast_init( + domain, + type_, + class_, + ttl, + self._read_string(16), + self.scope_id, + self.now, + ) + return address_rec if type_ == _TYPE_NSEC: name_start = self.offset - return DNSNsec( + nsec_rec = DNSNsec.__new__(DNSNsec) + nsec_rec._fast_init( domain, type_, class_, @@ -353,13 +379,14 @@ def _read_record( self._read_bitmap(name_start + length), self.now, ) + return nsec_rec # Try to ignore types we don't know about # Skip the payload for the resource record so the next # records can be parsed correctly self.offset += length return None - def _read_bitmap(self, end: _int) -> List[int]: + def _read_bitmap(self, end: _int) -> list[int]: """Reads an NSEC bitmap from the packet.""" rdtypes = [] view = self.view @@ -371,7 +398,7 @@ def _read_bitmap(self, end: _int) -> List[int]: bitmap_length = view[offset_plus_one] bitmap_end = offset_plus_two + bitmap_length for i, byte in enumerate(self.data[offset_plus_two:bitmap_end]): - for bit in range(0, 8): + for bit in range(8): if byte & (0x80 >> bit): rdtypes.append(bit + window * 256 + i * 8) self.offset += 2 + bitmap_length @@ -379,8 +406,8 @@ def _read_bitmap(self, end: _int) -> List[int]: def _read_name(self) -> str: """Reads a domain name from the packet.""" - labels: List[str] = [] - seen_pointers: Set[int] = set() + labels: list[str] = [] + seen_pointers: set[int] = set() original_offset = self.offset self.offset = self._decode_labels_at_offset(original_offset, labels, seen_pointers) self._name_cache[original_offset] = labels @@ -391,7 +418,7 @@ def _read_name(self) -> str: ) return name - def _decode_labels_at_offset(self, off: _int, labels: List[str], seen_pointers: Set[int]) -> int: + def _decode_labels_at_offset(self, off: _int, labels: list[str], seen_pointers: set[int]) -> int: # This is a tight loop that is called frequently, small optimizations can make a difference. view = self.view while off < self._data_len: @@ -401,7 +428,7 @@ def _decode_labels_at_offset(self, off: _int, labels: List[str], seen_pointers: if length < 0x40: label_idx = off + DNS_COMPRESSION_HEADER_LEN - labels.append(self.data[label_idx : label_idx + length].decode('utf-8', 'replace')) + labels.append(self.data[label_idx : label_idx + length].decode("utf-8", "replace")) off += DNS_COMPRESSION_HEADER_LEN + length continue diff --git a/src/zeroconf/_protocol/outgoing.pxd b/src/zeroconf/_protocol/outgoing.pxd index fa1aeebcd..bb9730b89 100644 --- a/src/zeroconf/_protocol/outgoing.pxd +++ b/src/zeroconf/_protocol/outgoing.pxd @@ -108,6 +108,8 @@ cdef class DNSOutgoing: cpdef void write_string(self, cython.bytes value) + cpdef void write_character_string(self, cython.bytes value) + @cython.locals(utfstr=bytes) cdef void _write_utf(self, cython.str value) diff --git a/src/zeroconf/_protocol/outgoing.py b/src/zeroconf/_protocol/outgoing.py index f45c39351..fd5e57a02 100644 --- a/src/zeroconf/_protocol/outgoing.py +++ b/src/zeroconf/_protocol/outgoing.py @@ -1,29 +1,32 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ +from __future__ import annotations + import enum import logging +from collections.abc import Sequence from struct import Struct -from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Tuple, Union +from typing import TYPE_CHECKING from .._dns import DNSPointer, DNSQuestion, DNSRecord from .._exceptions import NamePartTooLongException @@ -50,9 +53,9 @@ DNSRecord_ = DNSRecord -PACK_BYTE = Struct('>B').pack -PACK_SHORT = Struct('>H').pack -PACK_LONG = Struct('>L').pack +PACK_BYTE = Struct(">B").pack +PACK_SHORT = Struct(">H").pack +PACK_LONG = Struct(">L").pack SHORT_CACHE_MAX = 128 @@ -74,24 +77,23 @@ class State(enum.Enum): class DNSOutgoing: - """Object representation of an outgoing packet""" __slots__ = ( - 'flags', - 'finished', - 'id', - 'multicast', - 'packets_data', - 'names', - 'data', - 'size', - 'allow_long', - 'state', - 'questions', - 'answers', - 'authorities', - 'additionals', + "additionals", + "allow_long", + "answers", + "authorities", + "data", + "finished", + "flags", + "id", + "multicast", + "names", + "packets_data", + "questions", + "size", + "state", ) def __init__(self, flags: int, multicast: bool = True, id_: int = 0) -> None: @@ -99,20 +101,20 @@ def __init__(self, flags: int, multicast: bool = True, id_: int = 0) -> None: self.finished = False self.id = id_ self.multicast = multicast - self.packets_data: List[bytes] = [] + self.packets_data: list[bytes] = [] # these 3 are per-packet -- see also _reset_for_next_packet() - self.names: Dict[str, int] = {} - self.data: List[bytes] = [] + self.names: dict[str, int] = {} + self.data: list[bytes] = [] self.size: int = _DNS_PACKET_HEADER_LEN self.allow_long: bool = True self.state = STATE_INIT - self.questions: List[DNSQuestion] = [] - self.answers: List[Tuple[DNSRecord, float]] = [] - self.authorities: List[DNSPointer] = [] - self.additionals: List[DNSRecord] = [] + self.questions: list[DNSQuestion] = [] + self.answers: list[tuple[DNSRecord, float]] = [] + self.authorities: list[DNSPointer] = [] + self.additionals: list[DNSRecord] = [] def is_query(self) -> bool: """Returns true if this is a query.""" @@ -129,15 +131,17 @@ def _reset_for_next_packet(self) -> None: self.allow_long = True def __repr__(self) -> str: - return '' % ', '.join( - [ - 'multicast=%s' % self.multicast, - 'flags=%s' % self.flags, - 'questions=%s' % self.questions, - 'answers=%s' % self.answers, - 'authorities=%s' % self.authorities, - 'additionals=%s' % self.additionals, - ] + return "".format( + ", ".join( + [ + f"multicast={self.multicast}", + f"flags={self.flags}", + f"questions={self.questions}", + f"answers={self.answers}", + f"authorities={self.authorities}", + f"additionals={self.additionals}", + ] + ) ) def add_question(self, record: DNSQuestion) -> None: @@ -149,7 +153,7 @@ def add_answer(self, inp: DNSIncoming, record: DNSRecord) -> None: if not record.suppressed_by(inp): self.add_answer_at_time(record, 0.0) - def add_answer_at_time(self, record: Optional[DNSRecord], now: float_) -> None: + def add_answer_at_time(self, record: DNSRecord | None, now: float_) -> None: """Adds an answer if it does not expire by a certain time""" now_double = now if record is not None and (now_double == 0 or not record.is_expired(now_double)): @@ -219,7 +223,7 @@ def write_short(self, value: int_) -> None: self.data.append(self._get_short(value)) self.size += 2 - def _write_int(self, value: Union[float, int]) -> None: + def _write_int(self, value: float | int) -> None: """Writes an unsigned integer to the packet""" value_as_int = int(value) long_bytes = LONG_LOOKUP.get(value_as_int) @@ -238,7 +242,7 @@ def write_string(self, value: bytes_) -> None: def _write_utf(self, s: str_) -> None: """Writes a UTF-8 string of a given length to the packet""" - utfstr = s.encode('utf-8') + utfstr = s.encode("utf-8") length = len(utfstr) if length > 64: raise NamePartTooLongException @@ -268,7 +272,7 @@ def write_name(self, name: str_) -> None: """ # split name into each label - if name.endswith('.'): + if name and name[-1] == ".": name = name[:-1] index = self.names.get(name, 0) @@ -277,21 +281,21 @@ def write_name(self, name: str_) -> None: return start_size = self.size - labels = name.split('.') + labels = name.split(".") # Write each new label or a pointer to the existing one in the packet self.names[name] = start_size self._write_utf(labels[0]) name_length = 0 for count in range(1, len(labels)): - partial_name = '.'.join(labels[count:]) + partial_name = ".".join(labels[count:]) index = self.names.get(partial_name, 0) if index: self._write_link_to_name(index) return if name_length == 0: - name_length = len(name.encode('utf-8')) - self.names[partial_name] = start_size + name_length - len(partial_name.encode('utf-8')) + name_length = len(name.encode("utf-8")) + self.names[partial_name] = start_size + name_length - len(partial_name.encode("utf-8")) self._write_utf(labels[count]) # this is the end of a name @@ -312,7 +316,7 @@ def _write_question(self, question: DNSQuestion_) -> bool: self._write_record_class(question) return self._check_data_limit_or_rollback(start_data_length, start_size) - def _write_record_class(self, record: Union[DNSQuestion_, DNSRecord_]) -> None: + def _write_record_class(self, record: DNSQuestion_ | DNSRecord_) -> None: """Write out the record class including the unique/unicast (QU) bit.""" class_ = record.class_ if record.unique is True and self.multicast: @@ -355,7 +359,11 @@ def _check_data_limit_or_rollback(self, start_data_length: int_, start_size: int return True if LOGGING_IS_ENABLED_FOR(LOGGING_DEBUG): # pragma: no branch - log.debug("Reached data limit (size=%d) > (limit=%d) - rolling back", self.size, len_limit) + log.debug( + "Reached data limit (size=%d) > (limit=%d) - rolling back", + self.size, + len_limit, + ) del self.data[start_data_length:] self.size = start_size @@ -390,7 +398,11 @@ def _write_records_from_offset(self, records: Sequence[DNSRecord], offset: int_) return records_written def _has_more_to_add( - self, questions_offset: int_, answer_offset: int_, authority_offset: int_, additional_offset: int_ + self, + questions_offset: int_, + answer_offset: int_, + authority_offset: int_, + additional_offset: int_, ) -> bool: """Check if all questions, answers, authority, and additionals have been written to the packet.""" return ( @@ -400,7 +412,7 @@ def _has_more_to_add( or additional_offset < len(self.additionals) ) - def packets(self) -> List[bytes]: + def packets(self) -> list[bytes]: """Returns a list of bytestrings containing the packets' bytes No further parts should be added to the packet once this @@ -481,7 +493,7 @@ def packets(self) -> List[bytes]: else: self._insert_short_at_start(self.id) - packets_data.append(b''.join(self.data)) + packets_data.append(b"".join(self.data)) if not made_progress: # Generating an empty packet is not a desirable outcome, but currently diff --git a/src/zeroconf/_record_update.pxd b/src/zeroconf/_record_update.pxd index d1b18cbe0..1562299b2 100644 --- a/src/zeroconf/_record_update.pxd +++ b/src/zeroconf/_record_update.pxd @@ -8,3 +8,5 @@ cdef class RecordUpdate: cdef public DNSRecord new cdef public DNSRecord old + + cdef void _fast_init(self, object new, object old) diff --git a/src/zeroconf/_record_update.py b/src/zeroconf/_record_update.py index 8e0e4bdb0..497ee39df 100644 --- a/src/zeroconf/_record_update.py +++ b/src/zeroconf/_record_update.py @@ -1,42 +1,48 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ -from typing import Optional +from __future__ import annotations from ._dns import DNSRecord +_DNSRecord = DNSRecord + class RecordUpdate: __slots__ = ("new", "old") - def __init__(self, new: DNSRecord, old: Optional[DNSRecord] = None): + def __init__(self, new: DNSRecord, old: DNSRecord | None = None) -> None: """RecordUpdate represents a change in a DNS record.""" + self._fast_init(new, old) + + def _fast_init(self, new: _DNSRecord, old: _DNSRecord | None) -> None: + """Fast init for RecordUpdate.""" self.new = new self.old = old - def __getitem__(self, index: int) -> Optional[DNSRecord]: + def __getitem__(self, index: int) -> DNSRecord | None: """Get the new or old record.""" if index == 0: return self.new - elif index == 1: + if index == 1: return self.old raise IndexError(index) diff --git a/src/zeroconf/_services/__init__.py b/src/zeroconf/_services/__init__.py index cf54d7f07..b244552f1 100644 --- a/src/zeroconf/_services/__init__.py +++ b/src/zeroconf/_services/__init__.py @@ -1,27 +1,29 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ +from __future__ import annotations + import enum -from typing import TYPE_CHECKING, Any, Callable, List +from typing import TYPE_CHECKING, Any, Callable if TYPE_CHECKING: from .._core import Zeroconf @@ -35,41 +37,41 @@ class ServiceStateChange(enum.Enum): class ServiceListener: - def add_service(self, zc: 'Zeroconf', type_: str, name: str) -> None: - raise NotImplementedError() + def add_service(self, zc: Zeroconf, type_: str, name: str) -> None: + raise NotImplementedError - def remove_service(self, zc: 'Zeroconf', type_: str, name: str) -> None: - raise NotImplementedError() + def remove_service(self, zc: Zeroconf, type_: str, name: str) -> None: + raise NotImplementedError - def update_service(self, zc: 'Zeroconf', type_: str, name: str) -> None: - raise NotImplementedError() + def update_service(self, zc: Zeroconf, type_: str, name: str) -> None: + raise NotImplementedError class Signal: - __slots__ = ('_handlers',) + __slots__ = ("_handlers",) def __init__(self) -> None: - self._handlers: List[Callable[..., None]] = [] + self._handlers: list[Callable[..., None]] = [] def fire(self, **kwargs: Any) -> None: for h in self._handlers[:]: h(**kwargs) @property - def registration_interface(self) -> 'SignalRegistrationInterface': + def registration_interface(self) -> SignalRegistrationInterface: return SignalRegistrationInterface(self._handlers) class SignalRegistrationInterface: - __slots__ = ('_handlers',) + __slots__ = ("_handlers",) - def __init__(self, handlers: List[Callable[..., None]]) -> None: + def __init__(self, handlers: list[Callable[..., None]]) -> None: self._handlers = handlers - def register_handler(self, handler: Callable[..., None]) -> 'SignalRegistrationInterface': + def register_handler(self, handler: Callable[..., None]) -> SignalRegistrationInterface: self._handlers.append(handler) return self - def unregister_handler(self, handler: Callable[..., None]) -> 'SignalRegistrationInterface': + def unregister_handler(self, handler: Callable[..., None]) -> SignalRegistrationInterface: self._handlers.remove(handler) return self diff --git a/src/zeroconf/_services/browser.pxd b/src/zeroconf/_services/browser.pxd index 4649291c7..1ea99c82d 100644 --- a/src/zeroconf/_services/browser.pxd +++ b/src/zeroconf/_services/browser.pxd @@ -44,6 +44,7 @@ cdef class _DNSPointerOutgoingBucket: cpdef add(self, cython.uint max_compressed_size, DNSQuestion question, cython.set answers) + @cython.locals(cache=DNSCache, question_history=QuestionHistory, record=DNSRecord, qu_question=bint) cpdef list generate_service_query( object zc, @@ -53,9 +54,11 @@ cpdef list generate_service_query( object question_type ) + @cython.locals(answer=DNSPointer, query_buckets=list, question=DNSQuestion, max_compressed_size=cython.uint, max_bucket_size=cython.uint, query_bucket=_DNSPointerOutgoingBucket) cdef list _group_ptr_queries_with_known_answers(double now_millis, bint multicast, cython.dict question_with_known_answers) + cdef class QueryScheduler: cdef object _zc @@ -83,7 +86,7 @@ cdef class QueryScheduler: @cython.locals(current=_ScheduledPTRQuery, expire_time=double) cpdef void reschedule_ptr_first_refresh(self, DNSPointer pointer) - @cython.locals(ttl_millis='unsigned int', additional_wait=double, next_query_time=double) + @cython.locals(ttl_millis="unsigned int", additional_wait=double, next_query_time=double) cpdef void schedule_rescue_query(self, _ScheduledPTRQuery query, double now_millis, float additional_percentage) cpdef void _process_startup_queries(self) @@ -93,6 +96,7 @@ cdef class QueryScheduler: cpdef void async_send_ready_queries(self, bint first_request, double now_millis, set ready_types) + cdef class _ServiceBrowserBase(RecordUpdateListener): cdef public cython.set types diff --git a/src/zeroconf/_services/browser.py b/src/zeroconf/_services/browser.py index 2ff660744..1f60e8f9c 100644 --- a/src/zeroconf/_services/browser.py +++ b/src/zeroconf/_services/browser.py @@ -1,25 +1,27 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ +from __future__ import annotations + import asyncio import heapq import queue @@ -27,20 +29,13 @@ import threading import time import warnings +from collections.abc import Iterable from functools import partial -from types import TracebackType # noqa # used in type hints +from types import TracebackType # used in type hints from typing import ( TYPE_CHECKING, Any, Callable, - Dict, - Iterable, - List, - Optional, - Set, - Tuple, - Type, - Union, cast, ) @@ -98,18 +93,29 @@ bool_ = bool str_ = str -_QuestionWithKnownAnswers = Dict[DNSQuestion, Set[DNSPointer]] +_QuestionWithKnownAnswers = dict[DNSQuestion, set[DNSPointer]] heappop = heapq.heappop heappush = heapq.heappush class _ScheduledPTRQuery: - - __slots__ = ('alias', 'name', 'ttl', 'cancelled', 'expire_time_millis', 'when_millis') + __slots__ = ( + "alias", + "cancelled", + "expire_time_millis", + "name", + "ttl", + "when_millis", + ) def __init__( - self, alias: str, name: str, ttl: int, expire_time_millis: float, when_millis: float + self, + alias: str, + name: str, + ttl: int, + expire_time_millis: float, + when_millis: float, ) -> None: """Create a scheduled query.""" self.alias = alias @@ -144,13 +150,13 @@ def __repr__(self) -> str: ">" ) - def __lt__(self, other: '_ScheduledPTRQuery') -> bool: + def __lt__(self, other: _ScheduledPTRQuery) -> bool: """Compare two scheduled queries.""" if type(other) is _ScheduledPTRQuery: return self.when_millis < other.when_millis return NotImplemented - def __le__(self, other: '_ScheduledPTRQuery') -> bool: + def __le__(self, other: _ScheduledPTRQuery) -> bool: """Compare two scheduled queries.""" if type(other) is _ScheduledPTRQuery: return self.when_millis < other.when_millis or self.__eq__(other) @@ -162,13 +168,13 @@ def __eq__(self, other: Any) -> bool: return self.when_millis == other.when_millis return NotImplemented - def __ge__(self, other: '_ScheduledPTRQuery') -> bool: + def __ge__(self, other: _ScheduledPTRQuery) -> bool: """Compare two scheduled queries.""" if type(other) is _ScheduledPTRQuery: return self.when_millis > other.when_millis or self.__eq__(other) return NotImplemented - def __gt__(self, other: '_ScheduledPTRQuery') -> bool: + def __gt__(self, other: _ScheduledPTRQuery) -> bool: """Compare two scheduled queries.""" if type(other) is _ScheduledPTRQuery: return self.when_millis > other.when_millis @@ -178,7 +184,7 @@ def __gt__(self, other: '_ScheduledPTRQuery') -> bool: class _DNSPointerOutgoingBucket: """A DNSOutgoing bucket.""" - __slots__ = ('now_millis', 'out', 'bytes') + __slots__ = ("bytes", "now_millis", "out") def __init__(self, now_millis: float, multicast: bool) -> None: """Create a bucket to wrap a DNSOutgoing.""" @@ -186,7 +192,7 @@ def __init__(self, now_millis: float, multicast: bool) -> None: self.out = DNSOutgoing(_FLAGS_QR_QUERY, multicast) self.bytes = 0 - def add(self, max_compressed_size: int_, question: DNSQuestion, answers: Set[DNSPointer]) -> None: + def add(self, max_compressed_size: int_, question: DNSQuestion, answers: set[DNSPointer]) -> None: """Add a new set of questions and known answers to the outgoing.""" self.out.add_question(question) for answer in answers: @@ -195,8 +201,10 @@ def add(self, max_compressed_size: int_, question: DNSQuestion, answers: Set[DNS def group_ptr_queries_with_known_answers( - now: float_, multicast: bool_, question_with_known_answers: _QuestionWithKnownAnswers -) -> List[DNSOutgoing]: + now: float_, + multicast: bool_, + question_with_known_answers: _QuestionWithKnownAnswers, +) -> list[DNSOutgoing]: """Aggregate queries so that as many known answers as possible fit in the same packet without having known answers spill over into the next packet unless the question and known answers are always going to exceed the packet size. @@ -209,20 +217,22 @@ def group_ptr_queries_with_known_answers( def _group_ptr_queries_with_known_answers( - now_millis: float_, multicast: bool_, question_with_known_answers: _QuestionWithKnownAnswers -) -> List[DNSOutgoing]: + now_millis: float_, + multicast: bool_, + question_with_known_answers: _QuestionWithKnownAnswers, +) -> list[DNSOutgoing]: """Inner wrapper for group_ptr_queries_with_known_answers.""" # This is the maximum size the query + known answers can be with name compression. # The actual size of the query + known answers may be a bit smaller since other # parts may be shared when the final DNSOutgoing packets are constructed. The # goal of this algorithm is to quickly bucket the query + known answers without # the overhead of actually constructing the packets. - query_by_size: Dict[DNSQuestion, int] = { + query_by_size: dict[DNSQuestion, int] = { question: (question.max_size + sum(answer.max_size_compressed for answer in known_answers)) for question, known_answers in question_with_known_answers.items() } max_bucket_size = _MAX_MSG_TYPICAL - _DNS_PACKET_HEADER_LEN - query_buckets: List[_DNSPointerOutgoingBucket] = [] + query_buckets: list[_DNSPointerOutgoingBucket] = [] for question in sorted( query_by_size, key=query_by_size.get, # type: ignore @@ -246,12 +256,12 @@ def _group_ptr_queries_with_known_answers( def generate_service_query( - zc: 'Zeroconf', + zc: Zeroconf, now_millis: float_, - types_: Set[str], + types_: set[str], multicast: bool, - question_type: Optional[DNSQuestionType], -) -> List[DNSOutgoing]: + question_type: DNSQuestionType | None, +) -> list[DNSOutgoing]: """Generate a service query for sending with zeroconf.send.""" questions_with_known_answers: _QuestionWithKnownAnswers = {} qu_question = not multicast if question_type is None else question_type is QU_QUESTION @@ -269,7 +279,7 @@ def generate_service_query( log.debug("Asking %s was suppressed by the question history", question) continue if TYPE_CHECKING: - pointer_known_answers = cast(Set[DNSPointer], known_answers) + pointer_known_answers = cast(set[DNSPointer], known_answers) else: pointer_known_answers = known_answers questions_with_known_answers[question] = pointer_known_answers @@ -281,7 +291,7 @@ def generate_service_query( def _on_change_dispatcher( listener: ServiceListener, - zeroconf: 'Zeroconf', + zeroconf: Zeroconf, service_type: str, name: str, state_change: ServiceStateChange, @@ -290,14 +300,17 @@ def _on_change_dispatcher( getattr(listener, _ON_CHANGE_DISPATCH[state_change])(zeroconf, service_type, name) -def _service_state_changed_from_listener(listener: ServiceListener) -> Callable[..., None]: +def _service_state_changed_from_listener( + listener: ServiceListener, +) -> Callable[..., None]: """Generate a service_state_changed handlers from a listener.""" assert listener is not None - if not hasattr(listener, 'update_service'): + if not hasattr(listener, "update_service"): warnings.warn( - "%r has no update_service method. Provide one (it can be empty if you " - "don't care about the updates), it'll become mandatory." % (listener,), + f"{listener!r} has no update_service method. Provide one (it can be empty if you " + "don't care about the updates), it'll become mandatory.", FutureWarning, + stacklevel=1, ) return partial(_on_change_dispatcher, listener) @@ -310,32 +323,32 @@ class QueryScheduler: """ __slots__ = ( - '_zc', - '_types', - '_addr', - '_port', - '_multicast', - '_first_random_delay_interval', - '_min_time_between_queries_millis', - '_loop', - '_startup_queries_sent', - '_next_scheduled_for_alias', - '_query_heap', - '_next_run', - '_clock_resolution_millis', - '_question_type', + "_addr", + "_clock_resolution_millis", + "_first_random_delay_interval", + "_loop", + "_min_time_between_queries_millis", + "_multicast", + "_next_run", + "_next_scheduled_for_alias", + "_port", + "_query_heap", + "_question_type", + "_startup_queries_sent", + "_types", + "_zc", ) def __init__( self, - zc: "Zeroconf", - types: Set[str], - addr: Optional[str], + zc: Zeroconf, + types: set[str], + addr: str | None, port: int, multicast: bool, delay: int, - first_random_delay_interval: Tuple[int, int], - question_type: Optional[DNSQuestionType], + first_random_delay_interval: tuple[int, int], + question_type: DNSQuestionType | None, ) -> None: self._zc = zc self._types = types @@ -344,12 +357,12 @@ def __init__( self._multicast = multicast self._first_random_delay_interval = first_random_delay_interval self._min_time_between_queries_millis = delay - self._loop: Optional[asyncio.AbstractEventLoop] = None + self._loop: asyncio.AbstractEventLoop | None = None self._startup_queries_sent = 0 - self._next_scheduled_for_alias: Dict[str, _ScheduledPTRQuery] = {} + self._next_scheduled_for_alias: dict[str, _ScheduledPTRQuery] = {} self._query_heap: list[_ScheduledPTRQuery] = [] - self._next_run: Optional[asyncio.TimerHandle] = None - self._clock_resolution_millis = time.get_clock_info('monotonic').resolution * 1000 + self._next_run: asyncio.TimerHandle | None = None + self._clock_resolution_millis = time.get_clock_info("monotonic").resolution * 1000 self._question_type = question_type def start(self, loop: asyncio.AbstractEventLoop) -> None: @@ -362,7 +375,7 @@ def start(self, loop: asyncio.AbstractEventLoop) -> None: also delay the first query of the series by a randomly chosen amount in the range 20-120 ms. """ - start_delay = millis_to_seconds(random.randint(*self._first_random_delay_interval)) + start_delay = millis_to_seconds(random.randint(*self._first_random_delay_interval)) # noqa: S311 self._loop = loop self._next_run = loop.call_later(start_delay, self._process_startup_queries) @@ -375,12 +388,14 @@ def stop(self) -> None: self._query_heap.clear() def _schedule_ptr_refresh( - self, pointer: DNSPointer, expire_time_millis: float_, refresh_time_millis: float_ + self, + pointer: DNSPointer, + expire_time_millis: float_, + refresh_time_millis: float_, ) -> None: """Schedule a query for a pointer.""" - ttl = int(pointer.ttl) if isinstance(pointer.ttl, float) else pointer.ttl scheduled_ptr_query = _ScheduledPTRQuery( - pointer.alias, pointer.name, ttl, expire_time_millis, refresh_time_millis + pointer.alias, pointer.name, pointer.ttl, expire_time_millis, refresh_time_millis ) self._schedule_ptr_query(scheduled_ptr_query) @@ -414,7 +429,10 @@ def reschedule_ptr_first_refresh(self, pointer: DNSPointer) -> None: self._schedule_ptr_refresh(pointer, expire_time_millis, refresh_time_millis) def schedule_rescue_query( - self, query: _ScheduledPTRQuery, now_millis: float_, additional_percentage: float_ + self, + query: _ScheduledPTRQuery, + now_millis: float_, + additional_percentage: float_, ) -> None: """Reschedule a query for a pointer at an additional percentage of expiration.""" ttl_millis = query.ttl * 1000 @@ -426,7 +444,11 @@ def schedule_rescue_query( # tried to rescue the record and failed return scheduled_ptr_query = _ScheduledPTRQuery( - query.alias, query.name, query.ttl, query.expire_time_millis, next_query_time + query.alias, + query.name, + query.ttl, + query.expire_time_millis, + next_query_time, ) self._schedule_ptr_query(scheduled_ptr_query) @@ -472,10 +494,10 @@ def _process_ready_types(self) -> None: # with a minimum time between queries of _min_time_between_queries # which defaults to 10s - ready_types: Set[str] = set() - next_scheduled: Optional[_ScheduledPTRQuery] = None + ready_types: set[str] = set() + next_scheduled: _ScheduledPTRQuery | None = None end_time_millis = now_millis + self._clock_resolution_millis - schedule_rescue: List[_ScheduledPTRQuery] = [] + schedule_rescue: list[_ScheduledPTRQuery] = [] while self._query_heap: query = self._query_heap[0] @@ -510,7 +532,7 @@ def _process_ready_types(self) -> None: self._next_run = self._loop.call_at(millis_to_seconds(next_when_millis), self._process_ready_types) def async_send_ready_queries( - self, first_request: bool, now_millis: float_, ready_types: Set[str] + self, first_request: bool, now_millis: float_, ready_types: set[str] ) -> None: """Send any ready queries.""" # If they did not specify and this is the first request, ask QU questions @@ -528,27 +550,27 @@ class _ServiceBrowserBase(RecordUpdateListener): """Base class for ServiceBrowser.""" __slots__ = ( - 'types', - 'zc', - '_cache', - '_loop', - '_pending_handlers', - '_service_state_changed', - 'query_scheduler', - 'done', - '_query_sender_task', + "_cache", + "_loop", + "_pending_handlers", + "_query_sender_task", + "_service_state_changed", + "done", + "query_scheduler", + "types", + "zc", ) def __init__( self, - zc: 'Zeroconf', - type_: Union[str, list], - handlers: Optional[Union[ServiceListener, List[Callable[..., None]]]] = None, - listener: Optional[ServiceListener] = None, - addr: Optional[str] = None, + zc: Zeroconf, + type_: str | list, + handlers: ServiceListener | list[Callable[..., None]] | None = None, + listener: ServiceListener | None = None, + addr: str | None = None, port: int = _MDNS_PORT, delay: int = _BROWSER_TIME, - question_type: Optional[DNSQuestionType] = None, + question_type: DNSQuestionType | None = None, ) -> None: """Used to browse for a service for specific type(s). @@ -567,8 +589,8 @@ def __init__( remove_service() methods called when this browser discovers changes in the services availability. """ - assert handlers or listener, 'You need to specify at least one handler' - self.types: Set[str] = set(type_ if isinstance(type_, list) else [type_]) + assert handlers or listener, "You need to specify at least one handler" + self.types: set[str] = set(type_ if isinstance(type_, list) else [type_]) for check_type_ in self.types: # Will generate BadTypeInNameException on a bad name service_type_name(check_type_, strict=False) @@ -576,7 +598,7 @@ def __init__( self._cache = zc.cache assert zc.loop is not None self._loop = zc.loop - self._pending_handlers: Dict[Tuple[str, str], ServiceStateChange] = {} + self._pending_handlers: dict[tuple[str, str], ServiceStateChange] = {} self._service_state_changed = Signal() self.query_scheduler = QueryScheduler( zc, @@ -589,13 +611,13 @@ def __init__( question_type, ) self.done = False - self._query_sender_task: Optional[asyncio.Task] = None + self._query_sender_task: asyncio.Task | None = None - if hasattr(handlers, 'add_service'): - listener = cast('ServiceListener', handlers) + if hasattr(handlers, "add_service"): + listener = cast(ServiceListener, handlers) handlers = None - handlers = cast(List[Callable[..., None]], handlers or []) + handlers = cast(list[Callable[..., None]], handlers or []) if listener: handlers.append(_service_state_changed_from_listener(listener)) @@ -617,7 +639,7 @@ def _async_start(self) -> None: def service_state_changed(self) -> SignalRegistrationInterface: return self._service_state_changed.registration_interface - def _names_matching_types(self, names: Iterable[str]) -> List[Tuple[str, str]]: + def _names_matching_types(self, names: Iterable[str]) -> list[tuple[str, str]]: """Return the type and name for records matching the types we are browsing.""" return [ (type_, name) for name in names for type_ in self.types.intersection(cached_possible_types(name)) @@ -642,7 +664,7 @@ def _enqueue_callback( ): self._pending_handlers[key] = state_change - def async_update_records(self, zc: 'Zeroconf', now: float_, records: List[RecordUpdate]) -> None: + def async_update_records(self, zc: Zeroconf, now: float_, records: list[RecordUpdate]) -> None: """Callback invoked by Zeroconf when new information arrives. Updates information required by browser in the Zeroconf cache. @@ -699,7 +721,7 @@ def async_update_records_complete(self) -> None: self._fire_service_state_changed_event(pending) self._pending_handlers.clear() - def _fire_service_state_changed_event(self, event: Tuple[Tuple[str, str], ServiceStateChange]) -> None: + def _fire_service_state_changed_event(self, event: tuple[tuple[str, str], ServiceStateChange]) -> None: """Fire a service state changed event. When running with ServiceBrowser, this will happen in the dedicated @@ -741,14 +763,14 @@ class ServiceBrowser(_ServiceBrowserBase, threading.Thread): def __init__( self, - zc: 'Zeroconf', - type_: Union[str, list], - handlers: Optional[Union[ServiceListener, List[Callable[..., None]]]] = None, - listener: Optional[ServiceListener] = None, - addr: Optional[str] = None, + zc: Zeroconf, + type_: str | list, + handlers: ServiceListener | list[Callable[..., None]] | None = None, + listener: ServiceListener | None = None, + addr: str | None = None, port: int = _MDNS_PORT, delay: int = _BROWSER_TIME, - question_type: Optional[DNSQuestionType] = None, + question_type: DNSQuestionType | None = None, ) -> None: assert zc.loop is not None if not zc.loop.is_running(): @@ -763,8 +785,8 @@ def __init__( self.start() zc.loop.call_soon_threadsafe(self._async_start) self.name = "zeroconf-ServiceBrowser-{}-{}".format( - '-'.join([type_[:-7] for type_ in self.types]), - getattr(self, 'native_id', self.ident), + "-".join([type_[:-7] for type_ in self.types]), + getattr(self, "native_id", self.ident), ) def cancel(self) -> None: @@ -793,14 +815,14 @@ def async_update_records_complete(self) -> None: self.queue.put(pending) self._pending_handlers.clear() - def __enter__(self) -> 'ServiceBrowser': + def __enter__(self) -> ServiceBrowser: return self def __exit__( # pylint: disable=useless-return self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> Optional[bool]: + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> bool | None: self.cancel() return None diff --git a/src/zeroconf/_services/info.pxd b/src/zeroconf/_services/info.pxd index 6f1bef712..3f65bc0a7 100644 --- a/src/zeroconf/_services/info.pxd +++ b/src/zeroconf/_services/info.pxd @@ -22,6 +22,9 @@ from .._utils.ipaddress cimport ( ) from .._utils.time cimport current_time_millis +cdef cython.set _TYPE_AAAA_RECORDS +cdef cython.set _TYPE_A_RECORDS +cdef cython.set _TYPE_A_AAAA_RECORDS cdef object _resolve_all_futures_to_none @@ -47,7 +50,6 @@ cdef cython.set _ADDRESS_RECORD_TYPES cdef unsigned int _DUPLICATE_QUESTION_INTERVAL cdef bint TYPE_CHECKING -cdef bint IPADDRESS_SUPPORTS_SCOPE_ID cdef object cached_ip_addresses cdef object randint @@ -76,6 +78,7 @@ cdef class ServiceInfo(RecordUpdateListener): cdef public DNSText _dns_text_cache cdef public cython.list _dns_address_cache cdef public cython.set _get_address_and_nsec_records_cache + cdef public cython.set _query_record_types @cython.locals(record_update=RecordUpdate, update=bint, cache=DNSCache) cpdef void async_update_records(self, object zc, double now, cython.list records) @@ -156,3 +159,12 @@ cdef class ServiceInfo(RecordUpdateListener): cdef double _get_initial_delay(self) cdef double _get_random_delay(self) + +cdef class AddressResolver(ServiceInfo): + pass + +cdef class AddressResolverIPv6(ServiceInfo): + pass + +cdef class AddressResolverIPv4(ServiceInfo): + pass diff --git a/src/zeroconf/_services/info.py b/src/zeroconf/_services/info.py index 6d68de838..9b38de9d8 100644 --- a/src/zeroconf/_services/info.py +++ b/src/zeroconf/_services/info.py @@ -1,30 +1,30 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ +from __future__ import annotations + import asyncio import random -import sys -from ipaddress import IPv4Address, IPv6Address, _BaseAddress -from typing import TYPE_CHECKING, Dict, List, Optional, Set, Union, cast +from typing import TYPE_CHECKING, cast from .._cache import DNSCache from .._dns import ( @@ -50,6 +50,8 @@ wait_for_future_set_or_timeout, ) from .._utils.ipaddress import ( + ZeroconfIPv4Address, + ZeroconfIPv6Address, cached_ip_addresses, get_ip_address_object_from_record, ip_bytes_and_scope_to_address, @@ -76,8 +78,6 @@ _TYPE_TXT, ) -IPADDRESS_SUPPORTS_SCOPE_ID = sys.version_info >= (3, 9, 0) - _IPVersion_All_value = IPVersion.All.value _IPVersion_V4Only_value = IPVersion.V4Only.value # https://datatracker.ietf.org/doc/html/rfc6762#section-5.2 @@ -90,6 +90,10 @@ # the A/AAAA/SRV records for a host. _AVOID_SYNC_DELAY_RANDOM_INTERVAL = (20, 120) +_TYPE_AAAA_RECORDS = {_TYPE_AAAA} +_TYPE_A_RECORDS = {_TYPE_A} +_TYPE_A_AAAA_RECORDS = {_TYPE_A, _TYPE_AAAA} + bytes_ = bytes float_ = float int_ = int @@ -104,7 +108,7 @@ from .._core import Zeroconf -def instance_name_from_service_info(info: "ServiceInfo", strict: bool = True) -> str: +def instance_name_from_service_info(info: ServiceInfo, strict: bool = True) -> str: """Calculate the instance name from the ServiceInfo.""" # This is kind of funky because of the subtype based tests # need to make subtypes a first class citizen @@ -137,45 +141,46 @@ class ServiceInfo(RecordUpdateListener): """ __slots__ = ( - "text", - "type", - "_name", - "key", + "_decoded_properties", + "_dns_address_cache", + "_dns_pointer_cache", + "_dns_service_cache", + "_dns_text_cache", + "_get_address_and_nsec_records_cache", "_ipv4_addresses", "_ipv6_addresses", + "_name", + "_new_records_futures", + "_properties", + "_query_record_types", + "host_ttl", + "interface_index", + "key", + "other_ttl", "port", - "weight", "priority", "server", "server_key", - "_properties", - "_decoded_properties", - "host_ttl", - "other_ttl", - "interface_index", - "_new_records_futures", - "_dns_pointer_cache", - "_dns_service_cache", - "_dns_text_cache", - "_dns_address_cache", - "_get_address_and_nsec_records_cache", + "text", + "type", + "weight", ) def __init__( self, type_: str, name: str, - port: Optional[int] = None, + port: int | None = None, weight: int = 0, priority: int = 0, - properties: Union[bytes, Dict] = b'', - server: Optional[str] = None, + properties: bytes | dict = b"", + server: str | None = None, host_ttl: int = _DNS_HOST_TTL, other_ttl: int = _DNS_OTHER_TTL, *, - addresses: Optional[List[bytes]] = None, - parsed_addresses: Optional[List[str]] = None, - interface_index: Optional[int] = None, + addresses: list[bytes] | None = None, + parsed_addresses: list[str] | None = None, + interface_index: int | None = None, ) -> None: # Accept both none, or one, but not both. if addresses is not None and parsed_addresses is not None: @@ -183,12 +188,12 @@ def __init__( if not type_.endswith(service_type_name(name, strict=False)): raise BadTypeInNameException self.interface_index = interface_index - self.text = b'' + self.text = b"" self.type = type_ self._name = name self.key = name.lower() - self._ipv4_addresses: List[IPv4Address] = [] - self._ipv6_addresses: List[IPv6Address] = [] + self._ipv4_addresses: list[ZeroconfIPv4Address] = [] + self._ipv6_addresses: list[ZeroconfIPv6Address] = [] if addresses is not None: self.addresses = addresses elif parsed_addresses is not None: @@ -198,20 +203,21 @@ def __init__( self.priority = priority self.server = server if server else None self.server_key = server.lower() if server else None - self._properties: Optional[Dict[bytes, Optional[bytes]]] = None - self._decoded_properties: Optional[Dict[str, Optional[str]]] = None + self._properties: dict[bytes, bytes | None] | None = None + self._decoded_properties: dict[str, str | None] | None = None if isinstance(properties, bytes): self._set_text(properties) else: self._set_properties(properties) self.host_ttl = host_ttl self.other_ttl = other_ttl - self._new_records_futures: Optional[Set[asyncio.Future]] = None - self._dns_address_cache: Optional[List[DNSAddress]] = None - self._dns_pointer_cache: Optional[DNSPointer] = None - self._dns_service_cache: Optional[DNSService] = None - self._dns_text_cache: Optional[DNSText] = None - self._get_address_and_nsec_records_cache: Optional[Set[DNSRecord]] = None + self._new_records_futures: set[asyncio.Future] | None = None + self._dns_address_cache: list[DNSAddress] | None = None + self._dns_pointer_cache: DNSPointer | None = None + self._dns_service_cache: DNSService | None = None + self._dns_text_cache: DNSText | None = None + self._get_address_and_nsec_records_cache: set[DNSRecord] | None = None + self._query_record_types = {_TYPE_SRV, _TYPE_TXT, _TYPE_A, _TYPE_AAAA} @property def name(self) -> str: @@ -220,7 +226,7 @@ def name(self) -> str: @name.setter def name(self, name: str) -> None: - """Replace the the name and reset the key.""" + """Replace the name and reset the key.""" self._name = name self.key = name.lower() self._dns_service_cache = None @@ -228,7 +234,7 @@ def name(self, name: str) -> None: self._dns_text_cache = None @property - def addresses(self) -> List[bytes]: + def addresses(self) -> list[bytes]: """IPv4 addresses of this service. Only IPv4 addresses are returned for backward compatibility. @@ -238,7 +244,7 @@ def addresses(self) -> List[bytes]: return self.addresses_by_version(IPVersion.V4Only) @addresses.setter - def addresses(self, value: List[bytes]) -> None: + def addresses(self, value: list[bytes]) -> None: """Replace the addresses list. This replaces all currently stored addresses, both IPv4 and IPv6. @@ -249,7 +255,7 @@ def addresses(self, value: List[bytes]) -> None: self._get_address_and_nsec_records_cache = None for address in value: - if IPADDRESS_SUPPORTS_SCOPE_ID and len(address) == 16 and self.interface_index is not None: + if len(address) == 16 and self.interface_index is not None: addr = ip_bytes_and_scope_to_address(address, self.interface_index) else: addr = cached_ip_addresses(address) @@ -260,15 +266,15 @@ def addresses(self, value: List[bytes]) -> None: ) if addr.version == 4: if TYPE_CHECKING: - assert isinstance(addr, IPv4Address) + assert isinstance(addr, ZeroconfIPv4Address) self._ipv4_addresses.append(addr) else: if TYPE_CHECKING: - assert isinstance(addr, IPv6Address) + assert isinstance(addr, ZeroconfIPv6Address) self._ipv6_addresses.append(addr) @property - def properties(self) -> Dict[bytes, Optional[bytes]]: + def properties(self) -> dict[bytes, bytes | None]: """Return properties as bytes.""" if self._properties is None: self._unpack_text_into_properties() @@ -277,7 +283,7 @@ def properties(self) -> Dict[bytes, Optional[bytes]]: return self._properties @property - def decoded_properties(self) -> Dict[str, Optional[str]]: + def decoded_properties(self) -> dict[str, str | None]: """Return properties as strings.""" if self._decoded_properties is None: self._generate_decoded_properties() @@ -293,7 +299,7 @@ def async_clear_cache(self) -> None: self._dns_text_cache = None self._get_address_and_nsec_records_cache = None - async def async_wait(self, timeout: float, loop: Optional[asyncio.AbstractEventLoop] = None) -> None: + async def async_wait(self, timeout: float, loop: asyncio.AbstractEventLoop | None = None) -> None: """Calling task waits for a given number of milliseconds or until notified.""" if not self._new_records_futures: self._new_records_futures = set() @@ -301,7 +307,7 @@ async def async_wait(self, timeout: float, loop: Optional[asyncio.AbstractEventL loop or asyncio.get_running_loop(), self._new_records_futures, timeout ) - def addresses_by_version(self, version: IPVersion) -> List[bytes]: + def addresses_by_version(self, version: IPVersion) -> list[bytes]: """List addresses matching IP version. Addresses are guaranteed to be returned in LIFO (last in, first out) @@ -321,7 +327,7 @@ def addresses_by_version(self, version: IPVersion) -> List[bytes]: def ip_addresses_by_version( self, version: IPVersion - ) -> Union[List[IPv4Address], List[IPv6Address], List[_BaseAddress]]: + ) -> list[ZeroconfIPv4Address] | list[ZeroconfIPv6Address]: """List ip_address objects matching IP version. Addresses are guaranteed to be returned in LIFO (last in, first out) @@ -334,7 +340,7 @@ def ip_addresses_by_version( def _ip_addresses_by_version_value( self, version_value: int_ - ) -> Union[List[IPv4Address], List[IPv6Address]]: + ) -> list[ZeroconfIPv4Address] | list[ZeroconfIPv6Address]: """Backend for addresses_by_version that uses the raw value.""" if version_value == _IPVersion_All_value: return [*self._ipv4_addresses, *self._ipv6_addresses] # type: ignore[return-value] @@ -342,7 +348,7 @@ def _ip_addresses_by_version_value( return self._ipv4_addresses return self._ipv6_addresses - def parsed_addresses(self, version: IPVersion = IPVersion.All) -> List[str]: + def parsed_addresses(self, version: IPVersion = IPVersion.All) -> list[str]: """List addresses in their parsed string form. Addresses are guaranteed to be returned in LIFO (last in, first out) @@ -353,7 +359,7 @@ def parsed_addresses(self, version: IPVersion = IPVersion.All) -> List[str]: """ return [str_without_scope_id(addr) for addr in self._ip_addresses_by_version_value(version.value)] - def parsed_scoped_addresses(self, version: IPVersion = IPVersion.All) -> List[str]: + def parsed_scoped_addresses(self, version: IPVersion = IPVersion.All) -> list[str]: """Equivalent to parsed_addresses, with the exception that IPv6 Link-Local addresses are qualified with % when available @@ -365,31 +371,31 @@ def parsed_scoped_addresses(self, version: IPVersion = IPVersion.All) -> List[st """ return [str(addr) for addr in self._ip_addresses_by_version_value(version.value)] - def _set_properties(self, properties: Dict[Union[str, bytes], Optional[Union[str, bytes]]]) -> None: + def _set_properties(self, properties: dict[str | bytes, str | bytes | None]) -> None: """Sets properties and text of this info from a dictionary""" - list_: List[bytes] = [] + list_: list[bytes] = [] properties_contain_str = False - result = b'' + result = b"" for key, value in properties.items(): if isinstance(key, str): - key = key.encode('utf-8') + key = key.encode("utf-8") # noqa: PLW2901 properties_contain_str = True record = key if value is not None: if not isinstance(value, bytes): - value = str(value).encode('utf-8') + value = str(value).encode("utf-8") # noqa: PLW2901 properties_contain_str = True - record += b'=' + value + record += b"=" + value list_.append(record) for item in list_: - result = b''.join((result, bytes((len(item),)), item)) + result = b"".join((result, bytes((len(item),)), item)) if not properties_contain_str: # If there are no str keys or values, we can use the properties # as-is, without decoding them, otherwise calling # self.properties will lazy decode them, which is expensive. if TYPE_CHECKING: - self._properties = cast("Dict[bytes, Optional[bytes]]", properties) + self._properties = cast(dict[bytes, bytes | None], properties) else: self._properties = properties self.text = result @@ -421,12 +427,12 @@ def _unpack_text_into_properties(self) -> None: return index = 0 - properties: Dict[bytes, Optional[bytes]] = {} + properties: dict[bytes, bytes | None] = {} while index < end: length = text[index] index += 1 key_value = text[index : index + length] - key_sep_value = key_value.partition(b'=') + key_sep_value = key_value.partition(b"=") key = key_sep_value[0] if key not in properties: properties[key] = key_sep_value[2] or None @@ -439,10 +445,10 @@ def get_name(self) -> str: return self._name[: len(self._name) - len(self.type) - 1] def _get_ip_addresses_from_cache_lifo( - self, zc: 'Zeroconf', now: float_, type: int_ - ) -> List[Union[IPv4Address, IPv6Address]]: + self, zc: Zeroconf, now: float_, type: int_ + ) -> list[ZeroconfIPv4Address | ZeroconfIPv6Address]: """Set IPv6 addresses from the cache.""" - address_list: List[Union[IPv4Address, IPv6Address]] = [] + address_list: list[ZeroconfIPv4Address | ZeroconfIPv6Address] = [] for record in self._get_address_records_from_cache_by_type(zc, type): if record.is_expired(now): continue @@ -452,25 +458,27 @@ def _get_ip_addresses_from_cache_lifo( address_list.reverse() # Reverse to get LIFO order return address_list - def _set_ipv6_addresses_from_cache(self, zc: 'Zeroconf', now: float_) -> None: + def _set_ipv6_addresses_from_cache(self, zc: Zeroconf, now: float_) -> None: """Set IPv6 addresses from the cache.""" if TYPE_CHECKING: self._ipv6_addresses = cast( - "List[IPv6Address]", self._get_ip_addresses_from_cache_lifo(zc, now, _TYPE_AAAA) + list[ZeroconfIPv6Address], + self._get_ip_addresses_from_cache_lifo(zc, now, _TYPE_AAAA), ) else: self._ipv6_addresses = self._get_ip_addresses_from_cache_lifo(zc, now, _TYPE_AAAA) - def _set_ipv4_addresses_from_cache(self, zc: 'Zeroconf', now: float_) -> None: + def _set_ipv4_addresses_from_cache(self, zc: Zeroconf, now: float_) -> None: """Set IPv4 addresses from the cache.""" if TYPE_CHECKING: self._ipv4_addresses = cast( - "List[IPv4Address]", self._get_ip_addresses_from_cache_lifo(zc, now, _TYPE_A) + list[ZeroconfIPv4Address], + self._get_ip_addresses_from_cache_lifo(zc, now, _TYPE_A), ) else: self._ipv4_addresses = self._get_ip_addresses_from_cache_lifo(zc, now, _TYPE_A) - def async_update_records(self, zc: 'Zeroconf', now: float_, records: List[RecordUpdate]) -> None: + def async_update_records(self, zc: Zeroconf, now: float_, records: list[RecordUpdate]) -> None: """Updates service information from a DNS record. This method will be run in the event loop. @@ -482,7 +490,7 @@ def async_update_records(self, zc: 'Zeroconf', now: float_, records: List[Record if updated and new_records_futures: _resolve_all_futures_to_none(new_records_futures) - def _process_record_threadsafe(self, zc: 'Zeroconf', record: DNSRecord, now: float_) -> bool: + def _process_record_threadsafe(self, zc: Zeroconf, record: DNSRecord, now: float_) -> bool: """Thread safe record updating. Returns True if a new record was added. @@ -507,24 +515,32 @@ def _process_record_threadsafe(self, zc: 'Zeroconf', record: DNSRecord, now: flo if ip_addr.version == 4: if TYPE_CHECKING: - assert isinstance(ip_addr, IPv4Address) + assert isinstance(ip_addr, ZeroconfIPv4Address) ipv4_addresses = self._ipv4_addresses if ip_addr not in ipv4_addresses: ipv4_addresses.insert(0, ip_addr) return True - elif ip_addr != ipv4_addresses[0]: + # Use int() to compare the addresses as integers + # since by default IPv4Address.__eq__ compares the + # the addresses on version and int which more than + # we need here since we know the version is 4. + if ip_addr.zc_integer != ipv4_addresses[0].zc_integer: ipv4_addresses.remove(ip_addr) ipv4_addresses.insert(0, ip_addr) return False if TYPE_CHECKING: - assert isinstance(ip_addr, IPv6Address) + assert isinstance(ip_addr, ZeroconfIPv6Address) ipv6_addresses = self._ipv6_addresses if ip_addr not in self._ipv6_addresses: ipv6_addresses.insert(0, ip_addr) return True - elif ip_addr != self._ipv6_addresses[0]: + # Use int() to compare the addresses as integers + # since by default IPv6Address.__eq__ compares the + # the addresses on version and int which more than + # we need here since we know the version is 6. + if ip_addr.zc_integer != self._ipv6_addresses[0].zc_integer: ipv6_addresses.remove(ip_addr) ipv6_addresses.insert(0, ip_addr) @@ -561,17 +577,17 @@ def _process_record_threadsafe(self, zc: 'Zeroconf', record: DNSRecord, now: flo def dns_addresses( self, - override_ttl: Optional[int] = None, + override_ttl: int_ | None = None, version: IPVersion = IPVersion.All, - ) -> List[DNSAddress]: + ) -> list[DNSAddress]: """Return matching DNSAddress from ServiceInfo.""" return self._dns_addresses(override_ttl, version) def _dns_addresses( self, - override_ttl: Optional[int], + override_ttl: int_ | None, version: IPVersion, - ) -> List[DNSAddress]: + ) -> list[DNSAddress]: """Return matching DNSAddress from ServiceInfo.""" cacheable = version is IPVersion.All and override_ttl is None if self._dns_address_cache is not None and cacheable: @@ -595,11 +611,11 @@ def _dns_addresses( self._dns_address_cache = records return records - def dns_pointer(self, override_ttl: Optional[int] = None) -> DNSPointer: + def dns_pointer(self, override_ttl: int_ | None = None) -> DNSPointer: """Return DNSPointer from ServiceInfo.""" return self._dns_pointer(override_ttl) - def _dns_pointer(self, override_ttl: Optional[int]) -> DNSPointer: + def _dns_pointer(self, override_ttl: int_ | None) -> DNSPointer: """Return DNSPointer from ServiceInfo.""" cacheable = override_ttl is None if self._dns_pointer_cache is not None and cacheable: @@ -616,11 +632,11 @@ def _dns_pointer(self, override_ttl: Optional[int]) -> DNSPointer: self._dns_pointer_cache = record return record - def dns_service(self, override_ttl: Optional[int] = None) -> DNSService: + def dns_service(self, override_ttl: int_ | None = None) -> DNSService: """Return DNSService from ServiceInfo.""" return self._dns_service(override_ttl) - def _dns_service(self, override_ttl: Optional[int]) -> DNSService: + def _dns_service(self, override_ttl: int_ | None) -> DNSService: """Return DNSService from ServiceInfo.""" cacheable = override_ttl is None if self._dns_service_cache is not None and cacheable: @@ -643,11 +659,11 @@ def _dns_service(self, override_ttl: Optional[int]) -> DNSService: self._dns_service_cache = record return record - def dns_text(self, override_ttl: Optional[int] = None) -> DNSText: + def dns_text(self, override_ttl: int_ | None = None) -> DNSText: """Return DNSText from ServiceInfo.""" return self._dns_text(override_ttl) - def _dns_text(self, override_ttl: Optional[int]) -> DNSText: + def _dns_text(self, override_ttl: int_ | None) -> DNSText: """Return DNSText from ServiceInfo.""" cacheable = override_ttl is None if self._dns_text_cache is not None and cacheable: @@ -664,11 +680,11 @@ def _dns_text(self, override_ttl: Optional[int]) -> DNSText: self._dns_text_cache = record return record - def dns_nsec(self, missing_types: List[int], override_ttl: Optional[int] = None) -> DNSNsec: + def dns_nsec(self, missing_types: list[int], override_ttl: int_ | None = None) -> DNSNsec: """Return DNSNsec from ServiceInfo.""" return self._dns_nsec(missing_types, override_ttl) - def _dns_nsec(self, missing_types: List[int], override_ttl: Optional[int]) -> DNSNsec: + def _dns_nsec(self, missing_types: list[int], override_ttl: int_ | None) -> DNSNsec: """Return DNSNsec from ServiceInfo.""" return DNSNsec( self._name, @@ -680,17 +696,17 @@ def _dns_nsec(self, missing_types: List[int], override_ttl: Optional[int]) -> DN 0.0, ) - def get_address_and_nsec_records(self, override_ttl: Optional[int] = None) -> Set[DNSRecord]: + def get_address_and_nsec_records(self, override_ttl: int_ | None = None) -> set[DNSRecord]: """Build a set of address records and NSEC records for non-present record types.""" return self._get_address_and_nsec_records(override_ttl) - def _get_address_and_nsec_records(self, override_ttl: Optional[int]) -> Set[DNSRecord]: + def _get_address_and_nsec_records(self, override_ttl: int_ | None) -> set[DNSRecord]: """Build a set of address records and NSEC records for non-present record types.""" cacheable = override_ttl is None if self._get_address_and_nsec_records_cache is not None and cacheable: return self._get_address_and_nsec_records_cache - missing_types: Set[int] = _ADDRESS_RECORD_TYPES.copy() - records: Set[DNSRecord] = set() + missing_types: set[int] = _ADDRESS_RECORD_TYPES.copy() + records: set[DNSRecord] = set() for dns_address in self._dns_addresses(override_ttl, IPVersion.All): missing_types.discard(dns_address.type) records.add(dns_address) @@ -701,13 +717,16 @@ def _get_address_and_nsec_records(self, override_ttl: Optional[int]) -> Set[DNSR self._get_address_and_nsec_records_cache = records return records - def _get_address_records_from_cache_by_type(self, zc: 'Zeroconf', _type: int_) -> List[DNSAddress]: + def _get_address_records_from_cache_by_type(self, zc: Zeroconf, _type: int_) -> list[DNSAddress]: """Get the addresses from the cache.""" if self.server_key is None: return [] cache = zc.cache if TYPE_CHECKING: - records = cast("List[DNSAddress]", cache.get_all_by_details(self.server_key, _type, _CLASS_IN)) + records = cast( + list[DNSAddress], + cache.get_all_by_details(self.server_key, _type, _CLASS_IN), + ) else: records = cache.get_all_by_details(self.server_key, _type, _CLASS_IN) return records @@ -721,14 +740,14 @@ def set_server_if_missing(self) -> None: self.server = self._name self.server_key = self.key - def load_from_cache(self, zc: 'Zeroconf', now: Optional[float_] = None) -> bool: + def load_from_cache(self, zc: Zeroconf, now: float_ | None = None) -> bool: """Populate the service info from the cache. This method is designed to be threadsafe. """ return self._load_from_cache(zc, now or current_time_millis()) - def _load_from_cache(self, zc: 'Zeroconf', now: float_) -> bool: + def _load_from_cache(self, zc: Zeroconf, now: float_) -> bool: """Populate the service info from the cache. This method is designed to be threadsafe. @@ -758,10 +777,10 @@ def _is_complete(self) -> bool: def request( self, - zc: 'Zeroconf', + zc: Zeroconf, timeout: float, - question_type: Optional[DNSQuestionType] = None, - addr: Optional[str] = None, + question_type: DNSQuestionType | None = None, + addr: str | None = None, port: int = _MDNS_PORT, ) -> bool: """Returns true if the service could be discovered on the @@ -777,12 +796,15 @@ def request( :param addr: address to send the request to :param port: port to send the request to """ - assert zc.loop is not None and zc.loop.is_running() + assert zc.loop is not None, "Zeroconf instance must have a loop, was it not started?" + assert zc.loop.is_running(), "Zeroconf instance loop must be running, was it already stopped?" if zc.loop == get_running_loop(): raise RuntimeError("Use AsyncServiceInfo.async_request from the event loop") return bool( run_coro_with_timeout( - self.async_request(zc, timeout, question_type, addr, port), zc.loop, timeout + self.async_request(zc, timeout, question_type, addr, port), + zc.loop, + timeout, ) ) @@ -794,10 +816,10 @@ def _get_random_delay(self) -> int_: async def async_request( self, - zc: 'Zeroconf', + zc: Zeroconf, timeout: float, - question_type: Optional[DNSQuestionType] = None, - addr: Optional[str] = None, + question_type: DNSQuestionType | None = None, + addr: str | None = None, port: int = _MDNS_PORT, ) -> bool: """Returns true if the service could be discovered on the @@ -837,7 +859,7 @@ async def async_request( if last <= now: return False if next_ <= now: - this_question_type = question_type or QU_QUESTION if first_request else QM_QUESTION + this_question_type = question_type or (QU_QUESTION if first_request else QM_QUESTION) out = self._generate_request_query(zc, now, this_question_type) first_request = False if out.questions: @@ -894,7 +916,7 @@ def _add_question_with_known_answers( out.add_answer_at_time(answer, now) def _generate_request_query( - self, zc: 'Zeroconf', now: float_, question_type: DNSQuestionType + self, zc: Zeroconf, now: float_, question_type: DNSQuestionType ) -> DNSOutgoing: """Generate the request query.""" out = DNSOutgoing(_FLAGS_QR_QUERY) @@ -903,36 +925,40 @@ def _generate_request_query( cache = zc.cache history = zc.question_history qu_question = question_type is QU_QUESTION - self._add_question_with_known_answers( - out, qu_question, history, cache, now, name, _TYPE_SRV, _CLASS_IN, True - ) - self._add_question_with_known_answers( - out, qu_question, history, cache, now, name, _TYPE_TXT, _CLASS_IN, True - ) - self._add_question_with_known_answers( - out, qu_question, history, cache, now, server, _TYPE_A, _CLASS_IN, False - ) - self._add_question_with_known_answers( - out, qu_question, history, cache, now, server, _TYPE_AAAA, _CLASS_IN, False - ) + if _TYPE_SRV in self._query_record_types: + self._add_question_with_known_answers( + out, qu_question, history, cache, now, name, _TYPE_SRV, _CLASS_IN, True + ) + if _TYPE_TXT in self._query_record_types: + self._add_question_with_known_answers( + out, qu_question, history, cache, now, name, _TYPE_TXT, _CLASS_IN, True + ) + if _TYPE_A in self._query_record_types: + self._add_question_with_known_answers( + out, qu_question, history, cache, now, server, _TYPE_A, _CLASS_IN, False + ) + if _TYPE_AAAA in self._query_record_types: + self._add_question_with_known_answers( + out, qu_question, history, cache, now, server, _TYPE_AAAA, _CLASS_IN, False + ) return out def __repr__(self) -> str: """String representation""" - return '{}({})'.format( + return "{}({})".format( type(self).__name__, - ', '.join( - f'{name}={getattr(self, name)!r}' + ", ".join( + f"{name}={getattr(self, name)!r}" for name in ( - 'type', - 'name', - 'addresses', - 'port', - 'weight', - 'priority', - 'server', - 'properties', - 'interface_index', + "type", + "name", + "addresses", + "port", + "weight", + "priority", + "server", + "properties", + "interface_index", ) ), ) @@ -940,3 +966,45 @@ def __repr__(self) -> str: class AsyncServiceInfo(ServiceInfo): """An async version of ServiceInfo.""" + + +class AddressResolver(ServiceInfo): + """Resolve a host name to an IP address.""" + + def __init__(self, server: str) -> None: + """Initialize the AddressResolver.""" + super().__init__(server, server, server=server) + self._query_record_types = _TYPE_A_AAAA_RECORDS + + @property + def _is_complete(self) -> bool: + """The ServiceInfo has all expected properties.""" + return bool(self._ipv4_addresses) or bool(self._ipv6_addresses) + + +class AddressResolverIPv6(ServiceInfo): + """Resolve a host name to an IPv6 address.""" + + def __init__(self, server: str) -> None: + """Initialize the AddressResolver.""" + super().__init__(server, server, server=server) + self._query_record_types = _TYPE_AAAA_RECORDS + + @property + def _is_complete(self) -> bool: + """The ServiceInfo has all expected properties.""" + return bool(self._ipv6_addresses) + + +class AddressResolverIPv4(ServiceInfo): + """Resolve a host name to an IPv4 address.""" + + def __init__(self, server: str) -> None: + """Initialize the AddressResolver.""" + super().__init__(server, server, server=server) + self._query_record_types = _TYPE_A_RECORDS + + @property + def _is_complete(self) -> bool: + """The ServiceInfo has all expected properties.""" + return bool(self._ipv4_addresses) diff --git a/src/zeroconf/_services/registry.py b/src/zeroconf/_services/registry.py index 261e8e9cd..937992eb0 100644 --- a/src/zeroconf/_services/registry.py +++ b/src/zeroconf/_services/registry.py @@ -1,26 +1,26 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ -from typing import Dict, List, Optional, Union +from __future__ import annotations from .._exceptions import ServiceNameAlreadyRegistered from .info import ServiceInfo @@ -35,22 +35,22 @@ class ServiceRegistry: the event loop as it is not thread safe. """ - __slots__ = ("_services", "types", "servers", "has_entries") + __slots__ = ("_services", "has_entries", "servers", "types") def __init__( self, ) -> None: """Create the ServiceRegistry class.""" - self._services: Dict[str, ServiceInfo] = {} - self.types: Dict[str, List] = {} - self.servers: Dict[str, List] = {} + self._services: dict[str, ServiceInfo] = {} + self.types: dict[str, list] = {} + self.servers: dict[str, list] = {} self.has_entries: bool = False def async_add(self, info: ServiceInfo) -> None: """Add a new service to the registry.""" self._add(info) - def async_remove(self, info: Union[List[ServiceInfo], ServiceInfo]) -> None: + def async_remove(self, info: list[ServiceInfo] | ServiceInfo) -> None: """Remove a new service from the registry.""" self._remove(info if isinstance(info, list) else [info]) @@ -59,27 +59,27 @@ def async_update(self, info: ServiceInfo) -> None: self._remove([info]) self._add(info) - def async_get_service_infos(self) -> List[ServiceInfo]: + def async_get_service_infos(self) -> list[ServiceInfo]: """Return all ServiceInfo.""" return list(self._services.values()) - def async_get_info_name(self, name: str) -> Optional[ServiceInfo]: + def async_get_info_name(self, name: str) -> ServiceInfo | None: """Return all ServiceInfo for the name.""" return self._services.get(name) - def async_get_types(self) -> List[str]: + def async_get_types(self) -> list[str]: """Return all types.""" return list(self.types) - def async_get_infos_type(self, type_: str) -> List[ServiceInfo]: + def async_get_infos_type(self, type_: str) -> list[ServiceInfo]: """Return all ServiceInfo matching type.""" return self._async_get_by_index(self.types, type_) - def async_get_infos_server(self, server: str) -> List[ServiceInfo]: + def async_get_infos_server(self, server: str) -> list[ServiceInfo]: """Return all ServiceInfo matching server.""" return self._async_get_by_index(self.servers, server) - def _async_get_by_index(self, records: Dict[str, List], key: _str) -> List[ServiceInfo]: + def _async_get_by_index(self, records: dict[str, list], key: _str) -> list[ServiceInfo]: """Return all ServiceInfo matching the index.""" record_list = records.get(key) if record_list is None: @@ -98,7 +98,7 @@ def _add(self, info: ServiceInfo) -> None: self.servers.setdefault(info.server_key, []).append(info.key) self.has_entries = True - def _remove(self, infos: List[ServiceInfo]) -> None: + def _remove(self, infos: list[ServiceInfo]) -> None: """Remove a services under the lock.""" for info in infos: old_service_info = self._services.get(info.key) diff --git a/src/zeroconf/_services/types.py b/src/zeroconf/_services/types.py index 70db2d609..af25dc6db 100644 --- a/src/zeroconf/_services/types.py +++ b/src/zeroconf/_services/types.py @@ -1,27 +1,28 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ +from __future__ import annotations + import time -from typing import Optional, Set, Tuple, Union from .._core import Zeroconf from .._services import ServiceListener @@ -37,7 +38,7 @@ class ZeroconfServiceTypes(ServiceListener): def __init__(self) -> None: """Keep track of found services in a set.""" - self.found_services: Set[str] = set() + self.found_services: set[str] = set() def add_service(self, zc: Zeroconf, type_: str, name: str) -> None: """Service added.""" @@ -52,11 +53,11 @@ def remove_service(self, zc: Zeroconf, type_: str, name: str) -> None: @classmethod def find( cls, - zc: Optional[Zeroconf] = None, - timeout: Union[int, float] = 5, + zc: Zeroconf | None = None, + timeout: int | float = 5, interfaces: InterfacesType = InterfaceChoice.All, - ip_version: Optional[IPVersion] = None, - ) -> Tuple[str, ...]: + ip_version: IPVersion | None = None, + ) -> tuple[str, ...]: """ Return all of the advertised services on any local networks. diff --git a/src/zeroconf/_transport.py b/src/zeroconf/_transport.py index c37af2efd..c8d7699b9 100644 --- a/src/zeroconf/_transport.py +++ b/src/zeroconf/_transport.py @@ -1,39 +1,40 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - This module provides a framework for the use of DNS Service Discovery - using IP multicast. +This module provides a framework for the use of DNS Service Discovery +using IP multicast. - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ +from __future__ import annotations + import asyncio import socket -from typing import Tuple class _WrappedTransport: """A wrapper for transports.""" __slots__ = ( - 'transport', - 'is_ipv6', - 'sock', - 'fileno', - 'sock_name', + "fileno", + "is_ipv6", + "sock", + "sock_name", + "transport", ) def __init__( @@ -42,7 +43,7 @@ def __init__( is_ipv6: bool, sock: socket.socket, fileno: int, - sock_name: Tuple, + sock_name: tuple, ) -> None: """Initialize the wrapped transport. @@ -57,7 +58,7 @@ def __init__( def make_wrapped_transport(transport: asyncio.DatagramTransport) -> _WrappedTransport: """Make a wrapped transport.""" - sock: socket.socket = transport.get_extra_info('socket') + sock: socket.socket = transport.get_extra_info("socket") return _WrappedTransport( transport=transport, is_ipv6=sock.family == socket.AF_INET6, diff --git a/src/zeroconf/_updates.py b/src/zeroconf/_updates.py index 42fa82850..c0bf9b8c9 100644 --- a/src/zeroconf/_updates.py +++ b/src/zeroconf/_updates.py @@ -1,26 +1,28 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ -from typing import TYPE_CHECKING, List +from __future__ import annotations + +from typing import TYPE_CHECKING from ._dns import DNSRecord from ._record_update import RecordUpdate @@ -40,7 +42,7 @@ class RecordUpdateListener: """ def update_record( # pylint: disable=no-self-use - self, zc: 'Zeroconf', now: float, record: DNSRecord + self, zc: Zeroconf, now: float, record: DNSRecord ) -> None: """Update a single record. @@ -49,7 +51,7 @@ def update_record( # pylint: disable=no-self-use """ raise RuntimeError("update_record is deprecated and will be removed in a future version.") - def async_update_records(self, zc: 'Zeroconf', now: float_, records: List[RecordUpdate]) -> None: + def async_update_records(self, zc: Zeroconf, now: float_, records: list[RecordUpdate]) -> None: """Update multiple records in one shot. All records that are received in a single packet are passed diff --git a/src/zeroconf/_utils/__init__.py b/src/zeroconf/_utils/__init__.py index 2ef4b15b1..584a74eca 100644 --- a/src/zeroconf/_utils/__init__.py +++ b/src/zeroconf/_utils/__init__.py @@ -1,21 +1,23 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - This module provides a framework for the use of DNS Service Discovery - using IP multicast. +This module provides a framework for the use of DNS Service Discovery +using IP multicast. - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ + +from __future__ import annotations diff --git a/src/zeroconf/_utils/asyncio.py b/src/zeroconf/_utils/asyncio.py index 358ef37ea..860906017 100644 --- a/src/zeroconf/_utils/asyncio.py +++ b/src/zeroconf/_utils/asyncio.py @@ -1,35 +1,33 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ +from __future__ import annotations + import asyncio import concurrent.futures import contextlib import sys -from typing import Any, Awaitable, Coroutine, Optional, Set - -if sys.version_info[:2] < (3, 11): - from async_timeout import timeout as asyncio_timeout -else: - from asyncio import timeout as asyncio_timeout +from collections.abc import Awaitable, Coroutine +from typing import Any from .._exceptions import EventLoopBlocked from ..const import _LOADED_SYSTEM_TIMEOUT @@ -47,7 +45,7 @@ def _set_future_none_if_not_done(fut: asyncio.Future) -> None: fut.set_result(None) -def _resolve_all_futures_to_none(futures: Set[asyncio.Future]) -> None: +def _resolve_all_futures_to_none(futures: set[asyncio.Future]) -> None: """Resolve all futures to None.""" for fut in futures: _set_future_none_if_not_done(fut) @@ -55,7 +53,7 @@ def _resolve_all_futures_to_none(futures: Set[asyncio.Future]) -> None: async def wait_for_future_set_or_timeout( - loop: asyncio.AbstractEventLoop, future_set: Set[asyncio.Future], timeout: float + loop: asyncio.AbstractEventLoop, future_set: set[asyncio.Future], timeout: float ) -> None: """Wait for a future or timeout (in milliseconds).""" future = loop.create_future() @@ -68,14 +66,20 @@ async def wait_for_future_set_or_timeout( future_set.discard(future) -async def wait_event_or_timeout(event: asyncio.Event, timeout: float) -> None: - """Wait for an event or timeout.""" - with contextlib.suppress(asyncio.TimeoutError): - async with asyncio_timeout(timeout): - await event.wait() +async def wait_future_or_timeout(future: asyncio.Future[bool | None], timeout: float) -> None: + """Wait for a future or timeout.""" + loop = asyncio.get_running_loop() + handle = loop.call_later(timeout, _set_future_none_if_not_done, future) + try: + await future + except asyncio.CancelledError: + if sys.version_info >= (3, 11) and (task := asyncio.current_task()) and task.cancelling(): + raise + finally: + handle.cancel() -async def _async_get_all_tasks(loop: asyncio.AbstractEventLoop) -> Set[asyncio.Task]: +async def _async_get_all_tasks(loop: asyncio.AbstractEventLoop) -> set[asyncio.Task]: """Return all tasks running.""" await asyncio.sleep(0) # flush out any call_soon_threadsafe # If there are multiple event loops running, all_tasks is not @@ -87,7 +91,7 @@ async def _async_get_all_tasks(loop: asyncio.AbstractEventLoop) -> Set[asyncio.T return set() -async def _wait_for_loop_tasks(wait_tasks: Set[asyncio.Task]) -> None: +async def _wait_for_loop_tasks(wait_tasks: set[asyncio.Task]) -> None: """Wait for the event loop thread we started to shutdown.""" await asyncio.wait(wait_tasks, timeout=_TASK_AWAIT_TIMEOUT) @@ -130,7 +134,7 @@ def shutdown_loop(loop: asyncio.AbstractEventLoop) -> None: loop.call_soon_threadsafe(loop.stop) -def get_running_loop() -> Optional[asyncio.AbstractEventLoop]: +def get_running_loop() -> asyncio.AbstractEventLoop | None: """Check if an event loop is already running.""" with contextlib.suppress(RuntimeError): return asyncio.get_running_loop() diff --git a/src/zeroconf/_utils/ipaddress.pxd b/src/zeroconf/_utils/ipaddress.pxd index 098c6ff9a..78bbdfbdd 100644 --- a/src/zeroconf/_utils/ipaddress.pxd +++ b/src/zeroconf/_utils/ipaddress.pxd @@ -1,14 +1,16 @@ -cdef bint TYPE_CHECKING -cdef bint IPADDRESS_SUPPORTS_SCOPE_ID - from .._dns cimport DNSAddress +cdef bint TYPE_CHECKING + cpdef get_ip_address_object_from_record(DNSAddress record) + @cython.locals(address_str=str) cpdef str_without_scope_id(object addr) + cpdef ip_bytes_and_scope_to_address(object addr, object scope_id) + cdef object cached_ip_addresses_wrapper diff --git a/src/zeroconf/_utils/ipaddress.py b/src/zeroconf/_utils/ipaddress.py index ba1379551..d172d0c9f 100644 --- a/src/zeroconf/_utils/ipaddress.py +++ b/src/zeroconf/_utils/ipaddress.py @@ -1,40 +1,40 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ -import sys -from functools import lru_cache + +from __future__ import annotations + +from functools import cache, lru_cache from ipaddress import AddressValueError, IPv4Address, IPv6Address, NetmaskValueError -from typing import Any, Optional, Union +from typing import Any from .._dns import DNSAddress from ..const import _TYPE_AAAA bytes_ = bytes int_ = int -IPADDRESS_SUPPORTS_SCOPE_ID = sys.version_info >= (3, 9, 0) class ZeroconfIPv4Address(IPv4Address): - - __slots__ = ("_str", "_is_link_local", "_is_unspecified") + __slots__ = ("__hash__", "_is_link_local", "_is_loopback", "_is_unspecified", "_str", "zc_integer") def __init__(self, *args: Any, **kwargs: Any) -> None: """Initialize a new IPv4 address.""" @@ -42,6 +42,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: self._str = super().__str__() self._is_link_local = super().is_link_local self._is_unspecified = super().is_unspecified + self._is_loopback = super().is_loopback + self.__hash__ = cache(lambda: IPv4Address.__hash__(self)) # type: ignore[method-assign] + self.zc_integer = int(self) def __str__(self) -> str: """Return the string representation of the IPv4 address.""" @@ -57,10 +60,14 @@ def is_unspecified(self) -> bool: """Return True if this is an unspecified address.""" return self._is_unspecified + @property + def is_loopback(self) -> bool: + """Return True if this is a loop back.""" + return self._is_loopback + class ZeroconfIPv6Address(IPv6Address): - - __slots__ = ("_str", "_is_link_local", "_is_unspecified") + __slots__ = ("__hash__", "_is_link_local", "_is_loopback", "_is_unspecified", "_str", "zc_integer") def __init__(self, *args: Any, **kwargs: Any) -> None: """Initialize a new IPv6 address.""" @@ -68,6 +75,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: self._str = super().__str__() self._is_link_local = super().is_link_local self._is_unspecified = super().is_unspecified + self._is_loopback = super().is_loopback + self.__hash__ = cache(lambda: IPv6Address.__hash__(self)) # type: ignore[method-assign] + self.zc_integer = int(self) def __str__(self) -> str: """Return the string representation of the IPv6 address.""" @@ -83,9 +93,16 @@ def is_unspecified(self) -> bool: """Return True if this is an unspecified address.""" return self._is_unspecified + @property + def is_loopback(self) -> bool: + """Return True if this is a loop back.""" + return self._is_loopback + @lru_cache(maxsize=512) -def _cached_ip_addresses(address: Union[str, bytes, int]) -> Optional[Union[IPv4Address, IPv6Address]]: +def _cached_ip_addresses( + address: str | bytes | int, +) -> ZeroconfIPv4Address | ZeroconfIPv6Address | None: """Cache IP addresses.""" try: return ZeroconfIPv4Address(address) @@ -102,14 +119,18 @@ def _cached_ip_addresses(address: Union[str, bytes, int]) -> Optional[Union[IPv4 cached_ip_addresses = cached_ip_addresses_wrapper -def get_ip_address_object_from_record(record: DNSAddress) -> Optional[Union[IPv4Address, IPv6Address]]: +def get_ip_address_object_from_record( + record: DNSAddress, +) -> ZeroconfIPv4Address | ZeroconfIPv6Address | None: """Get the IP address object from the record.""" - if IPADDRESS_SUPPORTS_SCOPE_ID and record.type == _TYPE_AAAA and record.scope_id: + if record.type == _TYPE_AAAA and record.scope_id: return ip_bytes_and_scope_to_address(record.address, record.scope_id) return cached_ip_addresses_wrapper(record.address) -def ip_bytes_and_scope_to_address(address: bytes_, scope: int_) -> Optional[Union[IPv4Address, IPv6Address]]: +def ip_bytes_and_scope_to_address( + address: bytes_, scope: int_ +) -> ZeroconfIPv4Address | ZeroconfIPv6Address | None: """Convert the bytes and scope to an IP address object.""" base_address = cached_ip_addresses_wrapper(address) if base_address is not None and base_address.is_link_local: @@ -118,11 +139,11 @@ def ip_bytes_and_scope_to_address(address: bytes_, scope: int_) -> Optional[Unio return base_address -def str_without_scope_id(addr: Union[IPv4Address, IPv6Address]) -> str: +def str_without_scope_id(addr: ZeroconfIPv4Address | ZeroconfIPv6Address) -> str: """Return the string representation of the address without the scope id.""" - if IPADDRESS_SUPPORTS_SCOPE_ID and addr.version == 6: + if addr.version == 6: address_str = str(addr) - return address_str.partition('%')[0] + return address_str.partition("%")[0] return str(addr) diff --git a/src/zeroconf/_utils/name.py b/src/zeroconf/_utils/name.py index adccb3e5e..de35f7afb 100644 --- a/src/zeroconf/_utils/name.py +++ b/src/zeroconf/_utils/name.py @@ -1,27 +1,28 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ +from __future__ import annotations + from functools import lru_cache -from typing import Set from .._exceptions import BadTypeInNameException from ..const import ( @@ -80,19 +81,19 @@ def service_type_name(type_: str, *, strict: bool = True) -> str: # pylint: dis """ if len(type_) > 256: # https://datatracker.ietf.org/doc/html/rfc6763#section-7.2 - raise BadTypeInNameException("Full name (%s) must be > 256 bytes" % type_) + raise BadTypeInNameException(f"Full name ({type_}) must be > 256 bytes") if type_.endswith((_TCP_PROTOCOL_LOCAL_TRAILER, _NONTCP_PROTOCOL_LOCAL_TRAILER)): - remaining = type_[: -len(_TCP_PROTOCOL_LOCAL_TRAILER)].split('.') + remaining = type_[: -len(_TCP_PROTOCOL_LOCAL_TRAILER)].split(".") trailer = type_[-len(_TCP_PROTOCOL_LOCAL_TRAILER) :] has_protocol = True elif strict: raise BadTypeInNameException( - "Type '%s' must end with '%s' or '%s'" - % (type_, _TCP_PROTOCOL_LOCAL_TRAILER, _NONTCP_PROTOCOL_LOCAL_TRAILER) + f"Type '{type_}' must end with " + f"'{_TCP_PROTOCOL_LOCAL_TRAILER}' or '{_NONTCP_PROTOCOL_LOCAL_TRAILER}'" ) elif type_.endswith(_LOCAL_TRAILER): - remaining = type_[: -len(_LOCAL_TRAILER)].split('.') + remaining = type_[: -len(_LOCAL_TRAILER)].split(".") trailer = type_[-len(_LOCAL_TRAILER) + 1 :] has_protocol = False else: @@ -104,28 +105,26 @@ def service_type_name(type_: str, *, strict: bool = True) -> str: # pylint: dis raise BadTypeInNameException("No Service name found") if len(remaining) == 1 and len(remaining[0]) == 0: - raise BadTypeInNameException("Type '%s' must not start with '.'" % type_) + raise BadTypeInNameException(f"Type '{type_}' must not start with '.'") - if service_name[0] != '_': - raise BadTypeInNameException("Service name (%s) must start with '_'" % service_name) + if service_name[0] != "_": + raise BadTypeInNameException(f"Service name ({service_name}) must start with '_'") test_service_name = service_name[1:] if strict and len(test_service_name) > 15: # https://datatracker.ietf.org/doc/html/rfc6763#section-7.2 - raise BadTypeInNameException("Service name (%s) must be <= 15 bytes" % test_service_name) + raise BadTypeInNameException(f"Service name ({test_service_name}) must be <= 15 bytes") - if '--' in test_service_name: - raise BadTypeInNameException("Service name (%s) must not contain '--'" % test_service_name) + if "--" in test_service_name: + raise BadTypeInNameException(f"Service name ({test_service_name}) must not contain '--'") - if '-' in (test_service_name[0], test_service_name[-1]): - raise BadTypeInNameException( - "Service name (%s) may not start or end with '-'" % test_service_name - ) + if "-" in (test_service_name[0], test_service_name[-1]): + raise BadTypeInNameException(f"Service name ({test_service_name}) may not start or end with '-'") if not _HAS_A_TO_Z.search(test_service_name): raise BadTypeInNameException( - "Service name (%s) must contain at least one letter (eg: 'A-Z')" % test_service_name + f"Service name ({test_service_name}) must contain at least one letter (eg: 'A-Z')" ) allowed_characters_re = ( @@ -134,43 +133,46 @@ def service_type_name(type_: str, *, strict: bool = True) -> str: # pylint: dis if not allowed_characters_re.search(test_service_name): raise BadTypeInNameException( - "Service name (%s) must contain only these characters: " - "A-Z, a-z, 0-9, hyphen ('-')%s" % (test_service_name, "" if strict else ", underscore ('_')") + f"Service name ({test_service_name if strict else ''}) " + "must contain only these characters: " + "A-Z, a-z, 0-9, hyphen ('-')" + ", underscore ('_')" + if strict + else "" ) else: - service_name = '' + service_name = "" - if remaining and remaining[-1] == '_sub': + if remaining and remaining[-1] == "_sub": remaining.pop() if len(remaining) == 0 or len(remaining[0]) == 0: raise BadTypeInNameException("_sub requires a subtype name") if len(remaining) > 1: - remaining = ['.'.join(remaining)] + remaining = [".".join(remaining)] if remaining: - length = len(remaining[0].encode('utf-8')) + length = len(remaining[0].encode("utf-8")) if length > 63: - raise BadTypeInNameException("Too long: '%s'" % remaining[0]) + raise BadTypeInNameException(f"Too long: '{remaining[0]}'") if _HAS_ASCII_CONTROL_CHARS.search(remaining[0]): raise BadTypeInNameException( - "Ascii control character 0x00-0x1F and 0x7F illegal in '%s'" % remaining[0] + f"Ascii control character 0x00-0x1F and 0x7F illegal in '{remaining[0]}'" ) return service_name + trailer -def possible_types(name: str) -> Set[str]: +def possible_types(name: str) -> set[str]: """Build a set of all possible types from a fully qualified name.""" - labels = name.split('.') + labels = name.split(".") label_count = len(labels) types = set() for count in range(label_count): parts = labels[label_count - count - 4 :] - if not parts[0].startswith('_'): + if not parts[0].startswith("_"): break - types.add('.'.join(parts)) + types.add(".".join(parts)) return types diff --git a/src/zeroconf/_utils/net.py b/src/zeroconf/_utils/net.py index cc4754abc..e67edf787 100644 --- a/src/zeroconf/_utils/net.py +++ b/src/zeroconf/_utils/net.py @@ -1,32 +1,36 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ +from __future__ import annotations + import enum import errno import ipaddress import socket import struct import sys -from typing import Any, List, Optional, Sequence, Tuple, Union, cast +import warnings +from collections.abc import Iterable, Sequence +from typing import Any, Union, cast import ifaddr @@ -40,7 +44,7 @@ class InterfaceChoice(enum.Enum): All = 2 -InterfacesType = Union[Sequence[Union[str, int, Tuple[Tuple[str, int, int], int]]], InterfaceChoice] +InterfacesType = Union[Sequence[Union[str, int, tuple[tuple[str, int, int], int]]], InterfaceChoice] @enum.unique @@ -65,50 +69,74 @@ def _is_v6_address(addr: bytes) -> bool: def _encode_address(address: str) -> bytes: - is_ipv6 = ':' in address + is_ipv6 = ":" in address address_family = socket.AF_INET6 if is_ipv6 else socket.AF_INET return socket.inet_pton(address_family, address) -def get_all_addresses() -> List[str]: - return list({addr.ip for iface in ifaddr.get_adapters() for addr in iface.ips if addr.is_IPv4}) +def get_all_addresses_ipv4(adapters: Iterable[ifaddr.Adapter]) -> list[str]: + return list({addr.ip for iface in adapters for addr in iface.ips if addr.is_IPv4}) # type: ignore[misc] -def get_all_addresses_v6() -> List[Tuple[Tuple[str, int, int], int]]: +def get_all_addresses_ipv6(adapters: Iterable[ifaddr.Adapter]) -> list[tuple[tuple[str, int, int], int]]: # IPv6 multicast uses positive indexes for interfaces # TODO: What about multi-address interfaces? return list( - {(addr.ip, iface.index) for iface in ifaddr.get_adapters() for addr in iface.ips if addr.is_IPv6} + {(addr.ip, iface.index) for iface in adapters for addr in iface.ips if addr.is_IPv6} # type: ignore[misc] + ) + + +def get_all_addresses() -> list[str]: + warnings.warn( + "get_all_addresses is deprecated, and will be removed in a future version. Use ifaddr" + "directly instead to get a list of adapters.", + DeprecationWarning, + stacklevel=2, ) + return get_all_addresses_ipv4(ifaddr.get_adapters()) -def ip6_to_address_and_index(adapters: List[Any], ip: str) -> Tuple[Tuple[str, int, int], int]: - if '%' in ip: - ip = ip[: ip.index('%')] # Strip scope_id. +def get_all_addresses_v6() -> list[tuple[tuple[str, int, int], int]]: + warnings.warn( + "get_all_addresses_v6 is deprecated, and will be removed in a future version. Use ifaddr" + "directly instead to get a list of adapters.", + DeprecationWarning, + stacklevel=2, + ) + return get_all_addresses_ipv6(ifaddr.get_adapters()) + + +def ip6_to_address_and_index(adapters: Iterable[ifaddr.Adapter], ip: str) -> tuple[tuple[str, int, int], int]: + if "%" in ip: + ip = ip[: ip.index("%")] # Strip scope_id. ipaddr = ipaddress.ip_address(ip) for adapter in adapters: for adapter_ip in adapter.ips: # IPv6 addresses are represented as tuples - if isinstance(adapter_ip.ip, tuple) and ipaddress.ip_address(adapter_ip.ip[0]) == ipaddr: - return (cast(Tuple[str, int, int], adapter_ip.ip), cast(int, adapter.index)) + if ( + adapter.index is not None + and isinstance(adapter_ip.ip, tuple) + and ipaddress.ip_address(adapter_ip.ip[0]) == ipaddr + ): + return (adapter_ip.ip, adapter.index) - raise RuntimeError('No adapter found for IP address %s' % ip) + raise RuntimeError(f"No adapter found for IP address {ip}") -def interface_index_to_ip6_address(adapters: List[Any], index: int) -> Tuple[str, int, int]: +def interface_index_to_ip6_address(adapters: Iterable[ifaddr.Adapter], index: int) -> tuple[str, int, int]: for adapter in adapters: if adapter.index == index: for adapter_ip in adapter.ips: # IPv6 addresses are represented as tuples if isinstance(adapter_ip.ip, tuple): - return cast(Tuple[str, int, int], adapter_ip.ip) + return adapter_ip.ip - raise RuntimeError('No adapter found for index %s' % index) + raise RuntimeError(f"No adapter found for index {index}") def ip6_addresses_to_indexes( - interfaces: Sequence[Union[str, int, Tuple[Tuple[str, int, int], int]]] -) -> List[Tuple[Tuple[str, int, int], int]]: + interfaces: Sequence[str | int | tuple[tuple[str, int, int], int]], +) -> list[tuple[tuple[str, int, int], int]]: """Convert IPv6 interface addresses to interface indexes. IPv4 addresses are ignored. @@ -121,37 +149,47 @@ def ip6_addresses_to_indexes( for iface in interfaces: if isinstance(iface, int): - result.append((interface_index_to_ip6_address(adapters, iface), iface)) + result.append((interface_index_to_ip6_address(adapters, iface), iface)) # type: ignore[arg-type] elif isinstance(iface, str) and ipaddress.ip_address(iface).version == 6: - result.append(ip6_to_address_and_index(adapters, iface)) + result.append(ip6_to_address_and_index(adapters, iface)) # type: ignore[arg-type] return result def normalize_interface_choice( choice: InterfacesType, ip_version: IPVersion = IPVersion.V4Only -) -> List[Union[str, Tuple[Tuple[str, int, int], int]]]: +) -> list[str | tuple[tuple[str, int, int], int]]: """Convert the interfaces choice into internal representation. :param choice: `InterfaceChoice` or list of interface addresses or indexes (IPv6 only). :param ip_address: IP version to use (ignored if `choice` is a list). :returns: List of IP addresses (for IPv4) and indexes (for IPv6). """ - result: List[Union[str, Tuple[Tuple[str, int, int], int]]] = [] + result: list[str | tuple[tuple[str, int, int], int]] = [] if choice is InterfaceChoice.Default: if ip_version != IPVersion.V4Only: - # IPv6 multicast uses interface 0 to mean the default - result.append((('', 0, 0), 0)) + # IPv6 multicast uses interface 0 to mean the default. However, + # the default interface can't be used for outgoing IPv6 multicast + # requests. In a way, interface choice default isn't really working + # with IPv6. Inform the user accordingly. + message = ( + "IPv6 multicast requests can't be sent using default interface. " + "Use V4Only, InterfaceChoice.All or an explicit list of interfaces." + ) + log.error(message) + warnings.warn(message, DeprecationWarning, stacklevel=2) + result.append((("::", 0, 0), 0)) if ip_version != IPVersion.V6Only: - result.append('0.0.0.0') + result.append("0.0.0.0") elif choice is InterfaceChoice.All: + adapters = ifaddr.get_adapters() if ip_version != IPVersion.V4Only: - result.extend(get_all_addresses_v6()) + result.extend(get_all_addresses_ipv6(adapters)) if ip_version != IPVersion.V6Only: - result.extend(get_all_addresses()) + result.extend(get_all_addresses_ipv4(adapters)) if not result: raise RuntimeError( - 'No interfaces to listen on, check that any interfaces have IP version %s' % ip_version + f"No interfaces to listen on, check that any interfaces have IP version {ip_version}" ) elif isinstance(choice, list): # First, take IPv4 addresses. @@ -159,7 +197,7 @@ def normalize_interface_choice( # Unlike IP_ADD_MEMBERSHIP, IPV6_JOIN_GROUP requires interface indexes. result += ip6_addresses_to_indexes(choice) else: - raise TypeError("choice must be a list or InterfaceChoice, got %r" % choice) + raise TypeError(f"choice must be a list or InterfaceChoice, got {choice!r}") return result @@ -168,7 +206,7 @@ def disable_ipv6_only_or_raise(s: socket.socket) -> None: try: s.setsockopt(_IPPROTO_IPV6, socket.IPV6_V6ONLY, False) except OSError: - log.error('Support for dual V4-V6 sockets is not present, use IPVersion.V4 or IPVersion.V6') + log.error("Support for dual V4-V6 sockets is not present, use IPVersion.V4 or IPVersion.V6") raise @@ -181,7 +219,7 @@ def set_so_reuseport_if_available(s: socket.socket) -> None: # versions of Python have SO_REUSEPORT available. # Catch OSError and socket.error for kernel versions <3.9 because lacking # SO_REUSEPORT support. - if not hasattr(socket, 'SO_REUSEPORT'): + if not hasattr(socket, "SO_REUSEPORT"): return try: @@ -191,36 +229,43 @@ def set_so_reuseport_if_available(s: socket.socket) -> None: raise -def set_mdns_port_socket_options_for_ip_version( - s: socket.socket, bind_addr: Union[Tuple[str], Tuple[str, int, int]], ip_version: IPVersion +def set_respond_socket_multicast_options( + s: socket.socket, + ip_version: IPVersion, ) -> None: - """Set ttl/hops and loop for mdns port.""" - if ip_version != IPVersion.V6Only: - ttl = struct.pack(b'B', 255) - loop = struct.pack(b'B', 1) + """Set ttl/hops and loop for mDNS respond socket.""" + if ip_version == IPVersion.V4Only: # OpenBSD needs the ttl and loop values for the IP_MULTICAST_TTL and # IP_MULTICAST_LOOP socket options as an unsigned char. - try: - s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl) - s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, loop) - except OSError as e: - if bind_addr[0] != '' or get_errno(e) != errno.EINVAL: # Fails to set on MacOS - raise - - if ip_version != IPVersion.V4Only: + ttl = struct.pack(b"B", 255) + loop = struct.pack(b"B", 1) + s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl) + s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, loop) + elif ip_version == IPVersion.V6Only: # However, char doesn't work here (at least on Linux) s.setsockopt(_IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, 255) s.setsockopt(_IPPROTO_IPV6, socket.IPV6_MULTICAST_LOOP, True) + else: + # A shared sender socket is not really possible, especially with link-local + # multicast addresses (ff02::/16), the kernel needs to know which interface + # to use for routing. + # + # It seems that macOS even refuses to take IPv4 socket options if this is an + # AF_INET6 socket. + # + # In theory we could reconfigure the socket on each send, but that is not + # really practical for Python Zerconf. + raise RuntimeError("Dual-stack responder socket not supported") def new_socket( - bind_addr: Union[Tuple[str], Tuple[str, int, int]], + bind_addr: tuple[str] | tuple[str, int, int], port: int = _MDNS_PORT, ip_version: IPVersion = IPVersion.V4Only, apple_p2p: bool = False, -) -> Optional[socket.socket]: +) -> socket.socket | None: log.debug( - 'Creating new socket with port %s, ip_version %s, apple_p2p %s and bind_addr %r', + "Creating new socket with port %s, ip_version %s, apple_p2p %s and bind_addr %r", port, ip_version, apple_p2p, @@ -235,52 +280,69 @@ def new_socket( s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) set_so_reuseport_if_available(s) - if port == _MDNS_PORT: - set_mdns_port_socket_options_for_ip_version(s, bind_addr, ip_version) - if apple_p2p: # SO_RECV_ANYIF = 0x1104 # https://opensource.apple.com/source/xnu/xnu-4570.41.2/bsd/sys/socket.h s.setsockopt(socket.SOL_SOCKET, 0x1104, 1) + # Bind expects (address, port) for AF_INET and (address, port, flowinfo, scope_id) for AF_INET6 bind_tup = (bind_addr[0], port, *bind_addr[1:]) try: s.bind(bind_tup) except OSError as ex: if ex.errno == errno.EADDRNOTAVAIL: log.warning( - 'Address not available when binding to %s, ' 'it is expected to happen on some systems', + "Address not available when binding to %s, it is expected to happen on some systems", bind_tup, ) return None + if ex.errno == errno.EADDRINUSE: + if sys.platform.startswith("darwin") or sys.platform.startswith("freebsd"): + log.error( + "Address in use when binding to %s; " + "On BSD based systems sharing the same port with another " + "stack may require processes to run with the same UID; " + "When using avahi, make sure disallow-other-stacks is set" + " to no in avahi-daemon.conf", + bind_tup, + ) + else: + log.error( + "Address in use when binding to %s; " + "When using avahi, make sure disallow-other-stacks is set" + " to no in avahi-daemon.conf", + bind_tup, + ) + # This is still a fatal error as its not going to work + # if we can't hear the traffic coming in. raise - log.debug('Created socket %s', s) + log.debug("Created socket %s", s) return s def add_multicast_member( listen_socket: socket.socket, - interface: Union[str, Tuple[Tuple[str, int, int], int]], + interface: str | tuple[tuple[str, int, int], int], ) -> bool: # This is based on assumptions in normalize_interface_choice is_v6 = isinstance(interface, tuple) err_einval = {errno.EINVAL} - if sys.platform == 'win32': + if sys.platform == "win32": # No WSAEINVAL definition in typeshed err_einval |= {cast(Any, errno).WSAEINVAL} # pylint: disable=no-member - log.debug('Adding %r (socket %d) to multicast group', interface, listen_socket.fileno()) + log.debug("Adding %r (socket %d) to multicast group", interface, listen_socket.fileno()) try: if is_v6: try: mdns_addr6_bytes = socket.inet_pton(socket.AF_INET6, _MDNS_ADDR6) except OSError: log.info( - 'Unable to translate IPv6 address when adding %s to multicast group, ' - 'this can happen if IPv6 is disabled on the system', + "Unable to translate IPv6 address when adding %s to multicast group, " + "this can happen if IPv6 is disabled on the system", interface, ) return False - iface_bin = struct.pack('@I', cast(int, interface[1])) + iface_bin = struct.pack("@I", cast(int, interface[1])) _value = mdns_addr6_bytes + iface_bin listen_socket.setsockopt(_IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, _value) else: @@ -290,32 +352,48 @@ def add_multicast_member( _errno = get_errno(e) if _errno == errno.EADDRINUSE: log.info( - 'Address in use when adding %s to multicast group, ' - 'it is expected to happen on some systems', + "Address in use when adding %s to multicast group, it is expected to happen on some systems", interface, ) return False + if _errno == errno.ENOBUFS: + # https://github.com/python-zeroconf/python-zeroconf/issues/1510 + if not is_v6 and sys.platform.startswith("linux"): + log.warning( + "No buffer space available when adding %s to multicast group, " + "try increasing `net.ipv4.igmp_max_memberships` to `1024` in sysctl.conf", + interface, + ) + else: + log.warning( + "No buffer space available when adding %s to multicast group.", + interface, + ) + return False if _errno == errno.EADDRNOTAVAIL: log.info( - 'Address not available when adding %s to multicast ' - 'group, it is expected to happen on some systems', + "Address not available when adding %s to multicast " + "group, it is expected to happen on some systems", interface, ) return False if _errno in err_einval: - log.info('Interface of %s does not support multicast, ' 'it is expected in WSL', interface) + log.info( + "Interface of %s does not support multicast, it is expected in WSL", + interface, + ) return False if _errno == errno.ENOPROTOOPT: log.info( - 'Failed to set socket option on %s, this can happen if ' - 'the network adapter is in a disconnected state', + "Failed to set socket option on %s, this can happen if " + "the network adapter is in a disconnected state", interface, ) return False if is_v6 and _errno == errno.ENODEV: log.info( - 'Address in use when adding %s to multicast group, ' - 'it is expected to happen when the device does not have ipv6', + "Address in use when adding %s to multicast group, " + "it is expected to happen when the device does not have ipv6", interface, ) return False @@ -324,25 +402,40 @@ def add_multicast_member( def new_respond_socket( - interface: Union[str, Tuple[Tuple[str, int, int], int]], + interface: str | tuple[tuple[str, int, int], int], apple_p2p: bool = False, -) -> Optional[socket.socket]: + unicast: bool = False, +) -> socket.socket | None: + """Create interface specific socket for responding to multicast queries.""" is_v6 = isinstance(interface, tuple) + + # For response sockets: + # - Bind explicitly to the interface address + # - Use ephemeral ports if in unicast mode + # - Create socket according to the interface IP type (IPv4 or IPv6) respond_socket = new_socket( + bind_addr=cast(tuple[tuple[str, int, int], int], interface)[0] if is_v6 else (cast(str, interface),), + port=0 if unicast else _MDNS_PORT, ip_version=(IPVersion.V6Only if is_v6 else IPVersion.V4Only), apple_p2p=apple_p2p, - bind_addr=cast(Tuple[Tuple[str, int, int], int], interface)[0] if is_v6 else (cast(str, interface),), ) + if unicast: + return respond_socket + if not respond_socket: return None - log.debug('Configuring socket %s with multicast interface %s', respond_socket, interface) + + log.debug("Configuring socket %s with multicast interface %s", respond_socket, interface) if is_v6: - iface_bin = struct.pack('@I', cast(int, interface[1])) + iface_bin = struct.pack("@I", cast(int, interface[1])) respond_socket.setsockopt(_IPPROTO_IPV6, socket.IPV6_MULTICAST_IF, iface_bin) else: respond_socket.setsockopt( - socket.IPPROTO_IP, socket.IP_MULTICAST_IF, socket.inet_aton(cast(str, interface)) + socket.IPPROTO_IP, + socket.IP_MULTICAST_IF, + socket.inet_aton(cast(str, interface)), ) + set_respond_socket_multicast_options(respond_socket, IPVersion.V6Only if is_v6 else IPVersion.V4Only) return respond_socket @@ -351,36 +444,31 @@ def create_sockets( unicast: bool = False, ip_version: IPVersion = IPVersion.V4Only, apple_p2p: bool = False, -) -> Tuple[Optional[socket.socket], List[socket.socket]]: +) -> tuple[socket.socket | None, list[socket.socket]]: if unicast: listen_socket = None else: - listen_socket = new_socket(ip_version=ip_version, apple_p2p=apple_p2p, bind_addr=('',)) + listen_socket = new_socket(bind_addr=("",), ip_version=ip_version, apple_p2p=apple_p2p) normalized_interfaces = normalize_interface_choice(interfaces, ip_version) - # If we are using InterfaceChoice.Default we can use + # If we are using InterfaceChoice.Default with only IPv4 or only IPv6, we can use # a single socket to listen and respond. - if not unicast and interfaces is InterfaceChoice.Default: - for i in normalized_interfaces: - add_multicast_member(cast(socket.socket, listen_socket), i) + if not unicast and interfaces is InterfaceChoice.Default and ip_version != IPVersion.All: + for interface in normalized_interfaces: + add_multicast_member(cast(socket.socket, listen_socket), interface) + # Sent responder socket options to the dual-use listen socket + set_respond_socket_multicast_options(cast(socket.socket, listen_socket), ip_version) return listen_socket, [cast(socket.socket, listen_socket)] respond_sockets = [] - for i in normalized_interfaces: - if not unicast: - if add_multicast_member(cast(socket.socket, listen_socket), i): - respond_socket = new_respond_socket(i, apple_p2p=apple_p2p) - else: - respond_socket = None - else: - respond_socket = new_socket( - port=0, - ip_version=ip_version, - apple_p2p=apple_p2p, - bind_addr=i[0] if isinstance(i, tuple) else (i,), - ) + for interface in normalized_interfaces: + # Only create response socket if unicast or becoming multicast member was successful + if not unicast and not add_multicast_member(cast(socket.socket, listen_socket), interface): + continue + + respond_socket = new_respond_socket(interface, apple_p2p=apple_p2p, unicast=unicast) if respond_socket is not None: respond_sockets.append(respond_socket) @@ -388,8 +476,7 @@ def create_sockets( return listen_socket, respond_sockets -def get_errno(e: Exception) -> int: - assert isinstance(e, socket.error) +def get_errno(e: OSError) -> int: return cast(int, e.args[0]) diff --git a/src/zeroconf/_utils/time.py b/src/zeroconf/_utils/time.py index 600d90285..4057f0630 100644 --- a/src/zeroconf/_utils/time.py +++ b/src/zeroconf/_utils/time.py @@ -1,25 +1,26 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ +from __future__ import annotations import time @@ -29,7 +30,7 @@ def current_time_millis() -> _float: """Current time in milliseconds. - The current implemention uses `time.monotonic` + The current implementation uses `time.monotonic` but may change in the future. The design requires the time to match asyncio.loop.time() diff --git a/src/zeroconf/asyncio.py b/src/zeroconf/asyncio.py index b2daeb10f..a0f4a99db 100644 --- a/src/zeroconf/asyncio.py +++ b/src/zeroconf/asyncio.py @@ -1,31 +1,36 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ + +from __future__ import annotations + import asyncio import contextlib -from types import TracebackType # noqa # used in type hints -from typing import Awaitable, Callable, Dict, List, Optional, Tuple, Type, Union +from collections.abc import Awaitable +from types import TracebackType # used in type hints +from typing import Callable from ._core import Zeroconf from ._dns import DNSQuestionType +from ._exceptions import NotRunningException from ._services import ServiceListener from ._services.browser import _ServiceBrowserBase from ._services.info import AsyncServiceInfo, ServiceInfo @@ -34,9 +39,9 @@ from .const import _BROWSER_TIME, _MDNS_PORT, _SERVICE_TYPE_ENUMERATION_NAME __all__ = [ - "AsyncZeroconf", - "AsyncServiceInfo", "AsyncServiceBrowser", + "AsyncServiceInfo", + "AsyncZeroconf", "AsyncZeroconfServiceTypes", ] @@ -62,14 +67,14 @@ class AsyncServiceBrowser(_ServiceBrowserBase): def __init__( self, - zeroconf: 'Zeroconf', - type_: Union[str, list], - handlers: Optional[Union[ServiceListener, List[Callable[..., None]]]] = None, - listener: Optional[ServiceListener] = None, - addr: Optional[str] = None, + zeroconf: Zeroconf, + type_: str | list, + handlers: ServiceListener | list[Callable[..., None]] | None = None, + listener: ServiceListener | None = None, + addr: str | None = None, port: int = _MDNS_PORT, delay: int = _BROWSER_TIME, - question_type: Optional[DNSQuestionType] = None, + question_type: DNSQuestionType | None = None, ) -> None: super().__init__(zeroconf, type_, handlers, listener, addr, port, delay, question_type) self._async_start() @@ -78,15 +83,15 @@ async def async_cancel(self) -> None: """Cancel the browser.""" self._async_cancel() - async def __aenter__(self) -> 'AsyncServiceBrowser': + async def __aenter__(self) -> AsyncServiceBrowser: return self async def __aexit__( self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> Optional[bool]: + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> bool | None: await self.async_cancel() return None @@ -97,11 +102,11 @@ class AsyncZeroconfServiceTypes(ZeroconfServiceTypes): @classmethod async def async_find( cls, - aiozc: Optional['AsyncZeroconf'] = None, - timeout: Union[int, float] = 5, + aiozc: AsyncZeroconf | None = None, + timeout: int | float = 5, interfaces: InterfacesType = InterfaceChoice.All, - ip_version: Optional[IPVersion] = None, - ) -> Tuple[str, ...]: + ip_version: IPVersion | None = None, + ) -> tuple[str, ...]: """ Return all of the advertised services on any local networks. @@ -144,9 +149,9 @@ def __init__( self, interfaces: InterfacesType = InterfaceChoice.All, unicast: bool = False, - ip_version: Optional[IPVersion] = None, + ip_version: IPVersion | None = None, apple_p2p: bool = False, - zc: Optional[Zeroconf] = None, + zc: Zeroconf | None = None, ) -> None: """Creates an instance of the Zeroconf class, establishing multicast communications, and listening. @@ -169,12 +174,12 @@ def __init__( ip_version=ip_version, apple_p2p=apple_p2p, ) - self.async_browsers: Dict[ServiceListener, AsyncServiceBrowser] = {} + self.async_browsers: dict[ServiceListener, AsyncServiceBrowser] = {} async def async_register_service( self, info: ServiceInfo, - ttl: Optional[int] = None, + ttl: int | None = None, allow_name_change: bool = False, cooperating_responders: bool = False, strict: bool = True, @@ -224,15 +229,19 @@ async def async_close(self) -> None: """Ends the background threads, and prevent this instance from servicing further queries.""" if not self.zeroconf.done: - with contextlib.suppress(asyncio.TimeoutError): - await asyncio.wait_for(self.zeroconf.async_wait_for_start(), timeout=1) + with contextlib.suppress(NotRunningException): + await self.zeroconf.async_wait_for_start(timeout=1.0) await self.async_remove_all_service_listeners() await self.async_unregister_all_services() await self.zeroconf._async_close() # pylint: disable=protected-access async def async_get_service_info( - self, type_: str, name: str, timeout: int = 3000, question_type: Optional[DNSQuestionType] = None - ) -> Optional[AsyncServiceInfo]: + self, + type_: str, + name: str, + timeout: int = 3000, + question_type: DNSQuestionType | None = None, + ) -> AsyncServiceInfo | None: """Returns network's service information for a particular name and type, or None if no service matches by the timeout, which defaults to 3 seconds. @@ -263,14 +272,14 @@ async def async_remove_all_service_listeners(self) -> None: *(self.async_remove_service_listener(listener) for listener in list(self.async_browsers)) ) - async def __aenter__(self) -> 'AsyncZeroconf': + async def __aenter__(self) -> AsyncZeroconf: return self async def __aexit__( self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> Optional[bool]: + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> bool | None: await self.async_close() return None diff --git a/src/zeroconf/const.py b/src/zeroconf/const.py index 73c60d3b6..c3a62875a 100644 --- a/src/zeroconf/const.py +++ b/src/zeroconf/const.py @@ -1,25 +1,27 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ +from __future__ import annotations + import re import socket @@ -45,8 +47,8 @@ # Some DNS constants -_MDNS_ADDR = '224.0.0.251' -_MDNS_ADDR6 = 'ff02::fb' +_MDNS_ADDR = "224.0.0.251" +_MDNS_ADDR6 = "ff02::fb" _MDNS_PORT = 5353 _DNS_PORT = 53 _DNS_HOST_TTL = 120 # two minute for host records (A, SRV etc) as-per RFC6762 @@ -55,7 +57,7 @@ # ServiceBrowsers generating excessive queries refresh queries. # Apple uses a 15s minimum TTL, however we do not have the same # level of rate limit and safe guards so we use 1/4 of the recommended value -_DNS_PTR_MIN_TTL = _DNS_OTHER_TTL / 4 +_DNS_PTR_MIN_TTL = 1125 _DNS_PACKET_HEADER_LEN = 12 @@ -142,16 +144,16 @@ _ADDRESS_RECORD_TYPES = {_TYPE_A, _TYPE_AAAA} -_HAS_A_TO_Z = re.compile(r'[A-Za-z]') -_HAS_ONLY_A_TO_Z_NUM_HYPHEN = re.compile(r'^[A-Za-z0-9\-]+$') -_HAS_ONLY_A_TO_Z_NUM_HYPHEN_UNDERSCORE = re.compile(r'^[A-Za-z0-9\-\_]+$') -_HAS_ASCII_CONTROL_CHARS = re.compile(r'[\x00-\x1f\x7f]') +_HAS_A_TO_Z = re.compile(r"[A-Za-z]") +_HAS_ONLY_A_TO_Z_NUM_HYPHEN = re.compile(r"^[A-Za-z0-9\-]+$") +_HAS_ONLY_A_TO_Z_NUM_HYPHEN_UNDERSCORE = re.compile(r"^[A-Za-z0-9\-\_]+$") +_HAS_ASCII_CONTROL_CHARS = re.compile(r"[\x00-\x1f\x7f]") _EXPIRE_REFRESH_TIME_PERCENT = 75 -_LOCAL_TRAILER = '.local.' -_TCP_PROTOCOL_LOCAL_TRAILER = '._tcp.local.' -_NONTCP_PROTOCOL_LOCAL_TRAILER = '._udp.local.' +_LOCAL_TRAILER = ".local." +_TCP_PROTOCOL_LOCAL_TRAILER = "._tcp.local." +_NONTCP_PROTOCOL_LOCAL_TRAILER = "._udp.local." # https://datatracker.ietf.org/doc/html/rfc6763#section-9 _SERVICE_TYPE_ENUMERATION_NAME = "_services._dns-sd._udp.local." diff --git a/tests/__init__.py b/tests/__init__.py index cbba60731..a70cca600 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,29 +1,31 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - - This module provides a framework for the use of DNS Service Discovery - using IP multicast. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine + +This module provides a framework for the use of DNS Service Discovery +using IP multicast. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ + +from __future__ import annotations + import asyncio import socket import time -from functools import lru_cache -from typing import List, Optional, Set +from functools import cache from unittest import mock import ifaddr @@ -35,11 +37,11 @@ class QuestionHistoryWithoutSuppression(QuestionHistory): - def suppresses(self, question: DNSQuestion, now: float, known_answers: Set[DNSRecord]) -> bool: + def suppresses(self, question: DNSQuestion, now: float, known_answers: set[DNSRecord]) -> bool: return False -def _inject_responses(zc: Zeroconf, msgs: List[DNSIncoming]) -> None: +def _inject_responses(zc: Zeroconf, msgs: list[DNSIncoming]) -> None: """Inject a DNSIncoming response.""" assert zc.loop is not None @@ -61,16 +63,16 @@ def _wait_for_start(zc: Zeroconf) -> None: asyncio.run_coroutine_threadsafe(zc.async_wait_for_start(), zc.loop).result() -@lru_cache(maxsize=None) +@cache def has_working_ipv6(): - """Return True if if the system can bind an IPv6 address.""" + """Return True if the system can bind an IPv6 address.""" if not socket.has_ipv6: return False sock = None try: sock = socket.socket(socket.AF_INET6) - sock.bind(('::1', 0)) + sock.bind(("::1", 0)) except Exception: return False finally: @@ -89,7 +91,7 @@ def _clear_cache(zc: Zeroconf) -> None: zc.question_history.clear() -def time_changed_millis(millis: Optional[float] = None) -> None: +def time_changed_millis(millis: float | None = None) -> None: """Call all scheduled events for a time.""" loop = asyncio.get_running_loop() loop_time = loop.time() @@ -99,7 +101,6 @@ def time_changed_millis(millis: Optional[float] = None) -> None: mock_seconds_into_future = loop_time with mock.patch("time.monotonic", return_value=mock_seconds_into_future): - for task in list(loop._scheduled): # type: ignore[attr-defined] if not isinstance(task, asyncio.TimerHandle): continue diff --git a/tests/benchmarks/__init__.py b/tests/benchmarks/__init__.py new file mode 100644 index 000000000..9d48db4f9 --- /dev/null +++ b/tests/benchmarks/__init__.py @@ -0,0 +1 @@ +from __future__ import annotations diff --git a/tests/benchmarks/helpers.py b/tests/benchmarks/helpers.py new file mode 100644 index 000000000..4f5f7d66c --- /dev/null +++ b/tests/benchmarks/helpers.py @@ -0,0 +1,155 @@ +"""Benchmark helpers.""" + +from __future__ import annotations + +import socket + +from zeroconf import DNSAddress, DNSOutgoing, DNSService, DNSText, const + + +def generate_packets() -> DNSOutgoing: + out = DNSOutgoing(const._FLAGS_QR_RESPONSE | const._FLAGS_AA) + address = socket.inet_pton(socket.AF_INET, "192.168.208.5") + + additionals = [ + { + "name": "HASS Bridge ZJWH FF5137._hap._tcp.local.", + "address": address, + "port": 51832, + "text": b"\x13md=HASS Bridge" + b" ZJWH\x06pv=1.0\x14id=01:6B:30:FF:51:37\x05c#=12\x04s#=1\x04ff=0\x04" + b"ci=2\x04sf=0\x0bsh=L0m/aQ==", + }, + { + "name": "HASS Bridge 3K9A C2582A._hap._tcp.local.", + "address": address, + "port": 51834, + "text": b"\x13md=HASS Bridge" + b" 3K9A\x06pv=1.0\x14id=E2:AA:5B:C2:58:2A\x05c#=12\x04s#=1\x04ff=0\x04" + b"ci=2\x04sf=0\x0bsh=b2CnzQ==", + }, + { + "name": "Master Bed TV CEDB27._hap._tcp.local.", + "address": address, + "port": 51830, + "text": b"\x10md=Master Bed" + b" TV\x06pv=1.0\x14id=9E:B7:44:CE:DB:27\x05c#=18\x04s#=1\x04ff=0\x05" + b"ci=31\x04sf=0\x0bsh=CVj1kw==", + }, + { + "name": "Living Room TV 921B77._hap._tcp.local.", + "address": address, + "port": 51833, + "text": b"\x11md=Living Room" + b" TV\x06pv=1.0\x14id=11:61:E7:92:1B:77\x05c#=17\x04s#=1\x04ff=0\x05" + b"ci=31\x04sf=0\x0bsh=qU77SQ==", + }, + { + "name": "HASS Bridge ZC8X FF413D._hap._tcp.local.", + "address": address, + "port": 51829, + "text": b"\x13md=HASS Bridge" + b" ZC8X\x06pv=1.0\x14id=96:14:45:FF:41:3D\x05c#=12\x04s#=1\x04ff=0\x04" + b"ci=2\x04sf=0\x0bsh=b0QZlg==", + }, + { + "name": "HASS Bridge WLTF 4BE61F._hap._tcp.local.", + "address": address, + "port": 51837, + "text": b"\x13md=HASS Bridge" + b" WLTF\x06pv=1.0\x14id=E0:E7:98:4B:E6:1F\x04c#=2\x04s#=1\x04ff=0\x04" + b"ci=2\x04sf=0\x0bsh=ahAISA==", + }, + { + "name": "FrontdoorCamera 8941D1._hap._tcp.local.", + "address": address, + "port": 54898, + "text": b"\x12md=FrontdoorCamera\x06pv=1.0\x14id=9F:B7:DC:89:41:D1\x04c#=2\x04" + b"s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=0+MXmA==", + }, + { + "name": "HASS Bridge W9DN 5B5CC5._hap._tcp.local.", + "address": address, + "port": 51836, + "text": b"\x13md=HASS Bridge" + b" W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1\x04ff=0\x04" + b"ci=2\x04sf=0\x0bsh=6fLM5A==", + }, + { + "name": "HASS Bridge Y9OO EFF0A7._hap._tcp.local.", + "address": address, + "port": 51838, + "text": b"\x13md=HASS Bridge" + b" Y9OO\x06pv=1.0\x14id=D3:FE:98:EF:F0:A7\x04c#=2\x04s#=1\x04ff=0\x04" + b"ci=2\x04sf=0\x0bsh=u3bdfw==", + }, + { + "name": "Snooze Room TV 6B89B0._hap._tcp.local.", + "address": address, + "port": 51835, + "text": b"\x11md=Snooze Room" + b" TV\x06pv=1.0\x14id=5F:D5:70:6B:89:B0\x05c#=17\x04s#=1\x04ff=0\x05" + b"ci=31\x04sf=0\x0bsh=xNTqsg==", + }, + { + "name": "AlexanderHomeAssistant 74651D._hap._tcp.local.", + "address": address, + "port": 54811, + "text": b"\x19md=AlexanderHomeAssistant\x06pv=1.0\x14id=59:8A:0B:74:65:1D\x05" + b"c#=14\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=ccZLPA==", + }, + { + "name": "HASS Bridge OS95 39C053._hap._tcp.local.", + "address": address, + "port": 51831, + "text": b"\x13md=HASS Bridge" + b" OS95\x06pv=1.0\x14id=7E:8C:E6:39:C0:53\x05c#=12\x04s#=1\x04ff=0\x04ci=2" + b"\x04sf=0\x0bsh=Xfe5LQ==", + }, + ] + + out.add_answer_at_time( + DNSText( + "HASS Bridge W9DN 5B5CC5._hap._tcp.local.", + const._TYPE_TXT, + const._CLASS_IN | const._CLASS_UNIQUE, + const._DNS_OTHER_TTL, + b"\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1" + b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==", + ), + 0, + ) + + for record in additionals: + out.add_additional_answer( + DNSService( + record["name"], # type: ignore + const._TYPE_SRV, + const._CLASS_IN | const._CLASS_UNIQUE, + const._DNS_HOST_TTL, + 0, + 0, + record["port"], # type: ignore + record["name"], # type: ignore + ) + ) + out.add_additional_answer( + DNSText( + record["name"], # type: ignore + const._TYPE_TXT, + const._CLASS_IN | const._CLASS_UNIQUE, + const._DNS_OTHER_TTL, + record["text"], # type: ignore + ) + ) + out.add_additional_answer( + DNSAddress( + record["name"], # type: ignore + const._TYPE_A, + const._CLASS_IN | const._CLASS_UNIQUE, + const._DNS_HOST_TTL, + record["address"], # type: ignore + ) + ) + + return out diff --git a/tests/benchmarks/test_cache.py b/tests/benchmarks/test_cache.py new file mode 100644 index 000000000..7813f6798 --- /dev/null +++ b/tests/benchmarks/test_cache.py @@ -0,0 +1,50 @@ +from __future__ import annotations + +from pytest_codspeed import BenchmarkFixture + +from zeroconf import DNSCache, DNSPointer, current_time_millis +from zeroconf.const import _CLASS_IN, _TYPE_PTR + + +def test_add_expire_1000_records(benchmark: BenchmarkFixture) -> None: + """Benchmark for DNSCache to expire 10000 records.""" + cache = DNSCache() + now = current_time_millis() + records = [ + DNSPointer( + name=f"test{id}.local.", + type_=_TYPE_PTR, + class_=_CLASS_IN, + ttl=60, + alias=f"test{id}.local.", + created=now + id, + ) + for id in range(1000) + ] + + @benchmark + def _expire_records() -> None: + cache.async_add_records(records) + cache.async_expire(now + 100_000) + + +def test_expire_no_records_to_expire(benchmark: BenchmarkFixture) -> None: + """Benchmark for DNSCache with 1000 records none to expire.""" + cache = DNSCache() + now = current_time_millis() + cache.async_add_records( + DNSPointer( + name=f"test{id}.local.", + type_=_TYPE_PTR, + class_=_CLASS_IN, + ttl=60, + alias=f"test{id}.local.", + created=now + id, + ) + for id in range(1000) + ) + cache.async_expire(now) + + @benchmark + def _expire_records() -> None: + cache.async_expire(now) diff --git a/tests/benchmarks/test_incoming.py b/tests/benchmarks/test_incoming.py new file mode 100644 index 000000000..6d31e51e5 --- /dev/null +++ b/tests/benchmarks/test_incoming.py @@ -0,0 +1,186 @@ +"""Benchmark for DNSIncoming.""" + +from __future__ import annotations + +import socket + +from pytest_codspeed import BenchmarkFixture + +from zeroconf import ( + DNSAddress, + DNSIncoming, + DNSNsec, + DNSOutgoing, + DNSService, + DNSText, + const, +) + + +def generate_packets() -> list[bytes]: + out = DNSOutgoing(const._FLAGS_QR_RESPONSE | const._FLAGS_AA) + address = socket.inet_pton(socket.AF_INET, "192.168.208.5") + + additionals = [ + { + "name": "HASS Bridge ZJWH FF5137._hap._tcp.local.", + "address": address, + "port": 51832, + "text": b"\x13md=HASS Bridge" + b" ZJWH\x06pv=1.0\x14id=01:6B:30:FF:51:37\x05c#=12\x04s#=1\x04ff=0\x04" + b"ci=2\x04sf=0\x0bsh=L0m/aQ==", + }, + { + "name": "HASS Bridge 3K9A C2582A._hap._tcp.local.", + "address": address, + "port": 51834, + "text": b"\x13md=HASS Bridge" + b" 3K9A\x06pv=1.0\x14id=E2:AA:5B:C2:58:2A\x05c#=12\x04s#=1\x04ff=0\x04" + b"ci=2\x04sf=0\x0bsh=b2CnzQ==", + }, + { + "name": "Master Bed TV CEDB27._hap._tcp.local.", + "address": address, + "port": 51830, + "text": b"\x10md=Master Bed" + b" TV\x06pv=1.0\x14id=9E:B7:44:CE:DB:27\x05c#=18\x04s#=1\x04ff=0\x05" + b"ci=31\x04sf=0\x0bsh=CVj1kw==", + }, + { + "name": "Living Room TV 921B77._hap._tcp.local.", + "address": address, + "port": 51833, + "text": b"\x11md=Living Room" + b" TV\x06pv=1.0\x14id=11:61:E7:92:1B:77\x05c#=17\x04s#=1\x04ff=0\x05" + b"ci=31\x04sf=0\x0bsh=qU77SQ==", + }, + { + "name": "HASS Bridge ZC8X FF413D._hap._tcp.local.", + "address": address, + "port": 51829, + "text": b"\x13md=HASS Bridge" + b" ZC8X\x06pv=1.0\x14id=96:14:45:FF:41:3D\x05c#=12\x04s#=1\x04ff=0\x04" + b"ci=2\x04sf=0\x0bsh=b0QZlg==", + }, + { + "name": "HASS Bridge WLTF 4BE61F._hap._tcp.local.", + "address": address, + "port": 51837, + "text": b"\x13md=HASS Bridge" + b" WLTF\x06pv=1.0\x14id=E0:E7:98:4B:E6:1F\x04c#=2\x04s#=1\x04ff=0\x04" + b"ci=2\x04sf=0\x0bsh=ahAISA==", + }, + { + "name": "FrontdoorCamera 8941D1._hap._tcp.local.", + "address": address, + "port": 54898, + "text": b"\x12md=FrontdoorCamera\x06pv=1.0\x14id=9F:B7:DC:89:41:D1\x04c#=2\x04" + b"s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=0+MXmA==", + }, + { + "name": "HASS Bridge W9DN 5B5CC5._hap._tcp.local.", + "address": address, + "port": 51836, + "text": b"\x13md=HASS Bridge" + b" W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1\x04ff=0\x04" + b"ci=2\x04sf=0\x0bsh=6fLM5A==", + }, + { + "name": "HASS Bridge Y9OO EFF0A7._hap._tcp.local.", + "address": address, + "port": 51838, + "text": b"\x13md=HASS Bridge" + b" Y9OO\x06pv=1.0\x14id=D3:FE:98:EF:F0:A7\x04c#=2\x04s#=1\x04ff=0\x04" + b"ci=2\x04sf=0\x0bsh=u3bdfw==", + }, + { + "name": "Snooze Room TV 6B89B0._hap._tcp.local.", + "address": address, + "port": 51835, + "text": b"\x11md=Snooze Room" + b" TV\x06pv=1.0\x14id=5F:D5:70:6B:89:B0\x05c#=17\x04s#=1\x04ff=0\x05" + b"ci=31\x04sf=0\x0bsh=xNTqsg==", + }, + { + "name": "AlexanderHomeAssistant 74651D._hap._tcp.local.", + "address": address, + "port": 54811, + "text": b"\x19md=AlexanderHomeAssistant\x06pv=1.0\x14id=59:8A:0B:74:65:1D\x05" + b"c#=14\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=ccZLPA==", + }, + { + "name": "HASS Bridge OS95 39C053._hap._tcp.local.", + "address": address, + "port": 51831, + "text": b"\x13md=HASS Bridge" + b" OS95\x06pv=1.0\x14id=7E:8C:E6:39:C0:53\x05c#=12\x04s#=1\x04ff=0\x04ci=2" + b"\x04sf=0\x0bsh=Xfe5LQ==", + }, + ] + + out.add_answer_at_time( + DNSText( + "HASS Bridge W9DN 5B5CC5._hap._tcp.local.", + const._TYPE_TXT, + const._CLASS_IN | const._CLASS_UNIQUE, + const._DNS_OTHER_TTL, + b"\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1" + b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==", + ), + 0, + ) + + for record in additionals: + out.add_additional_answer( + DNSService( + record["name"], # type: ignore + const._TYPE_SRV, + const._CLASS_IN | const._CLASS_UNIQUE, + const._DNS_HOST_TTL, + 0, + 0, + record["port"], # type: ignore + record["name"], # type: ignore + ) + ) + out.add_additional_answer( + DNSText( + record["name"], # type: ignore + const._TYPE_TXT, + const._CLASS_IN | const._CLASS_UNIQUE, + const._DNS_OTHER_TTL, + record["text"], # type: ignore + ) + ) + out.add_additional_answer( + DNSAddress( + record["name"], # type: ignore + const._TYPE_A, + const._CLASS_IN | const._CLASS_UNIQUE, + const._DNS_HOST_TTL, + record["address"], # type: ignore + ) + ) + out.add_additional_answer( + DNSNsec( + record["name"], # type: ignore + const._TYPE_NSEC, + const._CLASS_IN | const._CLASS_UNIQUE, + const._DNS_OTHER_TTL, + record["name"], # type: ignore + [const._TYPE_TXT, const._TYPE_SRV], + ) + ) + + return out.packets() + + +packets = generate_packets() + + +def test_parse_incoming_message(benchmark: BenchmarkFixture) -> None: + @benchmark + def parse_incoming_message() -> None: + for packet in packets: + DNSIncoming(packet).answers # noqa: B018 + break diff --git a/tests/benchmarks/test_outgoing.py b/tests/benchmarks/test_outgoing.py new file mode 100644 index 000000000..a8db4d6f8 --- /dev/null +++ b/tests/benchmarks/test_outgoing.py @@ -0,0 +1,20 @@ +"""Benchmark for DNSOutgoing.""" + +from __future__ import annotations + +from pytest_codspeed import BenchmarkFixture + +from zeroconf._protocol.outgoing import State + +from .helpers import generate_packets + + +def test_parse_outgoing_message(benchmark: BenchmarkFixture) -> None: + out = generate_packets() + + @benchmark + def make_outgoing_message() -> None: + out.packets() + out.state = State.init.value + out.finished = False + out._reset_for_next_packet() diff --git a/tests/benchmarks/test_send.py b/tests/benchmarks/test_send.py new file mode 100644 index 000000000..596662a2b --- /dev/null +++ b/tests/benchmarks/test_send.py @@ -0,0 +1,24 @@ +"""Benchmark for sending packets.""" + +from __future__ import annotations + +import pytest +from pytest_codspeed import BenchmarkFixture + +from zeroconf.asyncio import AsyncZeroconf + +from .helpers import generate_packets + + +@pytest.mark.asyncio +async def test_sending_packets(benchmark: BenchmarkFixture) -> None: + """Benchmark sending packets.""" + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) + await aiozc.zeroconf.async_wait_for_start() + out = generate_packets() + + @benchmark + def _send_packets() -> None: + aiozc.zeroconf.async_send(out) + + await aiozc.async_close() diff --git a/tests/benchmarks/test_txt_properties.py b/tests/benchmarks/test_txt_properties.py new file mode 100644 index 000000000..72afa0b65 --- /dev/null +++ b/tests/benchmarks/test_txt_properties.py @@ -0,0 +1,21 @@ +from __future__ import annotations + +from pytest_codspeed import BenchmarkFixture + +from zeroconf import ServiceInfo + +info = ServiceInfo( + "_test._tcp.local.", + "test._test._tcp.local.", + properties=( + b"\x19md=AlexanderHomeAssistant\x06pv=1.0\x14id=59:8A:0B:74:65:1D\x05" + b"c#=14\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=ccZLPA==" + ), +) + + +def test_txt_properties(benchmark: BenchmarkFixture) -> None: + @benchmark + def process_properties() -> None: + info._properties = None + info.properties # noqa: B018 diff --git a/tests/conftest.py b/tests/conftest.py index 5525c4ee0..531c810be 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,7 +1,6 @@ -#!/usr/bin/env python +"""conftest for zeroconf tests.""" - -""" conftest for zeroconf tests. """ +from __future__ import annotations import threading from unittest.mock import patch @@ -24,9 +23,11 @@ def verify_threads_ended(): @pytest.fixture def run_isolated(): """Change the mDNS port to run the test in isolation.""" - with patch.object(query_handler, "_MDNS_PORT", 5454), patch.object( - _core, "_MDNS_PORT", 5454 - ), patch.object(const, "_MDNS_PORT", 5454): + with ( + patch.object(query_handler, "_MDNS_PORT", 5454), + patch.object(_core, "_MDNS_PORT", 5454), + patch.object(const, "_MDNS_PORT", 5454), + ): yield diff --git a/tests/services/__init__.py b/tests/services/__init__.py index 2ef4b15b1..584a74eca 100644 --- a/tests/services/__init__.py +++ b/tests/services/__init__.py @@ -1,21 +1,23 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - This module provides a framework for the use of DNS Service Discovery - using IP multicast. +This module provides a framework for the use of DNS Service Discovery +using IP multicast. - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ + +from __future__ import annotations diff --git a/tests/services/test_browser.py b/tests/services/test_browser.py index 37896ba1d..e9135bb60 100644 --- a/tests/services/test_browser.py +++ b/tests/services/test_browser.py @@ -1,7 +1,6 @@ -#!/usr/bin/env python +"""Unit tests for zeroconf._services.browser.""" - -""" Unit tests for zeroconf._services.browser. """ +from __future__ import annotations import asyncio import logging @@ -9,8 +8,9 @@ import socket import time import unittest +from collections.abc import Iterable from threading import Event -from typing import Iterable, List, Set, cast +from typing import cast from unittest.mock import patch import pytest @@ -39,7 +39,7 @@ time_changed_millis, ) -log = logging.getLogger('zeroconf') +log = logging.getLogger("zeroconf") original_logging_level = logging.NOTSET @@ -65,7 +65,7 @@ def test_service_browser_cancel_multiple_times(): """Test we can cancel a ServiceBrowser multiple times before close.""" # instantiate a zeroconf instance - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) # start a browser type_ = "_hap._tcp.local." @@ -87,7 +87,7 @@ def test_service_browser_cancel_context_manager(): """Test we can cancel a ServiceBrowser with it being used as a context manager.""" # instantiate a zeroconf instance - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) # start a browser type_ = "_hap._tcp.local." @@ -116,7 +116,7 @@ def test_service_browser_cancel_multiple_times_after_close(): """Test we can cancel a ServiceBrowser multiple times after close.""" # instantiate a zeroconf instance - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) # start a browser type_ = "_hap._tcp.local." @@ -137,7 +137,7 @@ class MyServiceListener(r.ServiceListener): def test_service_browser_started_after_zeroconf_closed(): """Test starting a ServiceBrowser after close raises RuntimeError.""" # instantiate a zeroconf instance - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) # start a browser type_ = "_hap._tcp.local." @@ -155,9 +155,9 @@ def test_multiple_instances_running_close(): """Test we can shutdown multiple instances.""" # instantiate a zeroconf instance - zc = Zeroconf(interfaces=['127.0.0.1']) - zc2 = Zeroconf(interfaces=['127.0.0.1']) - zc3 = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) + zc2 = Zeroconf(interfaces=["127.0.0.1"]) + zc3 = Zeroconf(interfaces=["127.0.0.1"]) assert zc.loop != zc2.loop assert zc.loop != zc3.loop @@ -177,13 +177,13 @@ class MyServiceListener(r.ServiceListener): class TestServiceBrowser(unittest.TestCase): def test_update_record(self): - enable_ipv6 = has_working_ipv6() and not os.environ.get('SKIP_IPV6') + enable_ipv6 = has_working_ipv6() and not os.environ.get("SKIP_IPV6") - service_name = 'name._type._tcp.local.' - service_type = '_type._tcp.local.' - service_server = 'ash-1.local.' - service_text = b'path=/~matt1/' - service_address = '10.0.1.2' + service_name = "name._type._tcp.local." + service_type = "_type._tcp.local." + service_server = "ash-1.local." + service_text = b"path=/~matt1/" + service_address = "10.0.1.2" service_v6_address = "2001:db8::1" service_v6_second_address = "6001:db8::1" @@ -221,7 +221,9 @@ def update_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-de assert service_info.server.lower() == service_server.lower() service_updated_event.set() - def mock_record_update_incoming_msg(service_state_change: r.ServiceStateChange) -> r.DNSIncoming: + def mock_record_update_incoming_msg( + service_state_change: r.ServiceStateChange, + ) -> r.DNSIncoming: generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) assert generated.is_response() is True @@ -232,7 +234,11 @@ def mock_record_update_incoming_msg(service_state_change: r.ServiceStateChange) generated.add_answer_at_time( r.DNSText( - service_name, const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, ttl, service_text + service_name, + const._TYPE_TXT, + const._CLASS_IN | const._CLASS_UNIQUE, + ttl, + service_text, ), 0, ) @@ -287,12 +293,13 @@ def mock_record_update_incoming_msg(service_state_change: r.ServiceStateChange) ) generated.add_answer_at_time( - r.DNSPointer(service_type, const._TYPE_PTR, const._CLASS_IN, ttl, service_name), 0 + r.DNSPointer(service_type, const._TYPE_PTR, const._CLASS_IN, ttl, service_name), + 0, ) return r.DNSIncoming(generated.packets()[0]) - zeroconf = r.Zeroconf(interfaces=['127.0.0.1']) + zeroconf = r.Zeroconf(interfaces=["127.0.0.1"]) service_browser = r.ServiceBrowser(zeroconf, service_type, listener=MyServiceListener()) try: @@ -307,7 +314,7 @@ def mock_record_update_incoming_msg(service_state_change: r.ServiceStateChange) # service SRV updated service_updated_event.clear() - service_server = 'ash-2.local.' + service_server = "ash-2.local." _inject_response(zeroconf, mock_record_update_incoming_msg(r.ServiceStateChange.Updated)) service_updated_event.wait(wait_time) assert service_added_count == 1 @@ -316,7 +323,7 @@ def mock_record_update_incoming_msg(service_state_change: r.ServiceStateChange) # service TXT updated service_updated_event.clear() - service_text = b'path=/~matt2/' + service_text = b"path=/~matt2/" _inject_response(zeroconf, mock_record_update_incoming_msg(r.ServiceStateChange.Updated)) service_updated_event.wait(wait_time) assert service_added_count == 1 @@ -325,7 +332,7 @@ def mock_record_update_incoming_msg(service_state_change: r.ServiceStateChange) # service TXT updated - duplicate update should not trigger another service_updated service_updated_event.clear() - service_text = b'path=/~matt2/' + service_text = b"path=/~matt2/" _inject_response(zeroconf, mock_record_update_incoming_msg(r.ServiceStateChange.Updated)) service_updated_event.wait(wait_time) assert service_added_count == 1 @@ -334,7 +341,7 @@ def mock_record_update_incoming_msg(service_state_change: r.ServiceStateChange) # service A updated service_updated_event.clear() - service_address = '10.0.1.3' + service_address = "10.0.1.3" # Verify we match on uppercase service_server = service_server.upper() _inject_response(zeroconf, mock_record_update_incoming_msg(r.ServiceStateChange.Updated)) @@ -345,9 +352,9 @@ def mock_record_update_incoming_msg(service_state_change: r.ServiceStateChange) # service all updated service_updated_event.clear() - service_server = 'ash-3.local.' - service_text = b'path=/~matt3/' - service_address = '10.0.1.3' + service_server = "ash-3.local." + service_text = b"path=/~matt3/" + service_address = "10.0.1.3" _inject_response(zeroconf, mock_record_update_incoming_msg(r.ServiceStateChange.Updated)) service_updated_event.wait(wait_time) assert service_added_count == 1 @@ -372,8 +379,12 @@ def mock_record_update_incoming_msg(service_state_change: r.ServiceStateChange) class TestServiceBrowserMultipleTypes(unittest.TestCase): def test_update_record(self): - service_names = ['name2._type2._tcp.local.', 'name._type._tcp.local.', 'name._type._udp.local'] - service_types = ['_type2._tcp.local.', '_type._tcp.local.', '_type._udp.local.'] + service_names = [ + "name2._type2._tcp.local.", + "name._type._tcp.local.", + "name._type._udp.local", + ] + service_types = ["_type2._tcp.local.", "_type._tcp.local.", "_type._udp.local."] service_added_count = 0 service_removed_count = 0 @@ -394,15 +405,19 @@ def remove_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-de service_removed_event.set() def mock_record_update_incoming_msg( - service_state_change: r.ServiceStateChange, service_type: str, service_name: str, ttl: int + service_state_change: r.ServiceStateChange, + service_type: str, + service_name: str, + ttl: int, ) -> r.DNSIncoming: generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) generated.add_answer_at_time( - r.DNSPointer(service_type, const._TYPE_PTR, const._CLASS_IN, ttl, service_name), 0 + r.DNSPointer(service_type, const._TYPE_PTR, const._CLASS_IN, ttl, service_name), + 0, ) return r.DNSIncoming(generated.packets()[0]) - zeroconf = r.Zeroconf(interfaces=['127.0.0.1']) + zeroconf = r.Zeroconf(interfaces=["127.0.0.1"]) service_browser = r.ServiceBrowser(zeroconf, service_types, listener=MyServiceListener()) try: @@ -437,7 +452,10 @@ def _mock_get_expiration_time(self, percent): _inject_response( zeroconf, mock_record_update_incoming_msg( - r.ServiceStateChange.Added, service_types[0], service_names[0], 120 + r.ServiceStateChange.Added, + service_types[0], + service_names[0], + 120, ), ) # Add the last record after updating the first one @@ -446,7 +464,10 @@ def _mock_get_expiration_time(self, percent): _inject_response( zeroconf, mock_record_update_incoming_msg( - r.ServiceStateChange.Added, service_types[2], service_names[2], 120 + r.ServiceStateChange.Added, + service_types[2], + service_names[2], + 120, ), ) service_add_event.wait(wait_time) @@ -502,7 +523,7 @@ def test_first_query_delay(): https://datatracker.ietf.org/doc/html/rfc6762#section-5.2 """ type_ = "_http._tcp.local." - zeroconf_browser = Zeroconf(interfaces=['127.0.0.1']) + zeroconf_browser = Zeroconf(interfaces=["127.0.0.1"]) _wait_for_start(zeroconf_browser) # we are going to patch the zeroconf send to check query transmission @@ -544,7 +565,7 @@ async def test_asking_default_is_asking_qm_questions_after_the_first_qu(): got_query = asyncio.Event() type_ = "_http._tcp.local." - registration_name = "xxxyyy.%s" % type_ + registration_name = f"xxxyyy.{type_}" def on_service_state_change(zeroconf, service_type, state_change, name): if name == registration_name: @@ -553,7 +574,7 @@ def on_service_state_change(zeroconf, service_type, state_change, name): elif state_change is ServiceStateChange.Removed: service_removed.set() - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) zeroconf_browser = aiozc.zeroconf zeroconf_browser.question_history = QuestionHistoryWithoutSuppression() await zeroconf_browser.async_wait_for_start() @@ -562,7 +583,7 @@ def on_service_state_change(zeroconf, service_type, state_change, name): old_send = zeroconf_browser.async_send expected_ttl = const._DNS_OTHER_TTL - questions: List[List[DNSQuestion]] = [] + questions: list[list[DNSQuestion]] = [] def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): """Sends an outgoing packet.""" @@ -573,7 +594,7 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): assert len(zeroconf_browser.engine.protocols) == 2 - aio_zeroconf_registrar = AsyncZeroconf(interfaces=['127.0.0.1']) + aio_zeroconf_registrar = AsyncZeroconf(interfaces=["127.0.0.1"]) zeroconf_registrar = aio_zeroconf_registrar.zeroconf await aio_zeroconf_registrar.zeroconf.async_wait_for_start() @@ -590,7 +611,7 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): 80, 0, 0, - {'path': '/~paulsm/'}, + {"path": "/~paulsm/"}, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")], ) @@ -646,7 +667,7 @@ async def test_ttl_refresh_cancelled_rescue_query(): got_query = asyncio.Event() type_ = "_http._tcp.local." - registration_name = "xxxyyy.%s" % type_ + registration_name = f"xxxyyy.{type_}" def on_service_state_change(zeroconf, service_type, state_change, name): if name == registration_name: @@ -655,7 +676,7 @@ def on_service_state_change(zeroconf, service_type, state_change, name): elif state_change is ServiceStateChange.Removed: service_removed.set() - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) zeroconf_browser = aiozc.zeroconf zeroconf_browser.question_history = QuestionHistoryWithoutSuppression() await zeroconf_browser.async_wait_for_start() @@ -675,7 +696,7 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): assert len(zeroconf_browser.engine.protocols) == 2 - aio_zeroconf_registrar = AsyncZeroconf(interfaces=['127.0.0.1']) + aio_zeroconf_registrar = AsyncZeroconf(interfaces=["127.0.0.1"]) zeroconf_registrar = aio_zeroconf_registrar.zeroconf await aio_zeroconf_registrar.zeroconf.async_wait_for_start() @@ -692,7 +713,7 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): 80, 0, 0, - {'path': '/~paulsm/'}, + {"path": "/~paulsm/"}, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")], ) @@ -749,9 +770,9 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): @pytest.mark.asyncio async def test_asking_qm_questions(): - """Verify explictly asking QM questions.""" + """Verify explicitly asking QM questions.""" type_ = "_quservice._tcp.local." - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) zeroconf_browser = aiozc.zeroconf await zeroconf_browser.async_wait_for_start() # we are going to patch the zeroconf send to check query transmission @@ -773,7 +794,10 @@ def on_service_state_change(zeroconf, service_type, state_change, name): pass browser = AsyncServiceBrowser( - zeroconf_browser, type_, [on_service_state_change], question_type=r.DNSQuestionType.QM + zeroconf_browser, + type_, + [on_service_state_change], + question_type=r.DNSQuestionType.QM, ) await asyncio.sleep(millis_to_seconds(_services_browser._FIRST_QUERY_DELAY_RANDOM_INTERVAL[1] + 5)) try: @@ -787,7 +811,7 @@ def on_service_state_change(zeroconf, service_type, state_change, name): async def test_asking_qu_questions(): """Verify the service browser can ask QU questions.""" type_ = "_quservice._tcp.local." - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) zeroconf_browser = aiozc.zeroconf await zeroconf_browser.async_wait_for_start() @@ -810,7 +834,10 @@ def on_service_state_change(zeroconf, service_type, state_change, name): pass browser = AsyncServiceBrowser( - zeroconf_browser, type_, [on_service_state_change], question_type=r.DNSQuestionType.QU + zeroconf_browser, + type_, + [on_service_state_change], + question_type=r.DNSQuestionType.QU, ) await asyncio.sleep(millis_to_seconds(_services_browser._FIRST_QUERY_DELAY_RANDOM_INTERVAL[1] + 5)) try: @@ -824,11 +851,13 @@ def test_legacy_record_update_listener(): """Test a RecordUpdateListener that does not implement update_records.""" # instantiate a zeroconf instance - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) with pytest.raises(RuntimeError): r.RecordUpdateListener().update_record( - zc, 0, r.DNSRecord('irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL) + zc, + 0, + r.DNSRecord("irrelevant", const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL), ) updates = [] @@ -836,8 +865,7 @@ def test_legacy_record_update_listener(): class LegacyRecordUpdateListener(r.RecordUpdateListener): """A RecordUpdateListener that does not implement update_records.""" - def update_record(self, zc: 'Zeroconf', now: float, record: r.DNSRecord) -> None: - nonlocal updates + def update_record(self, zc: Zeroconf, now: float, record: r.DNSRecord) -> None: updates.append(record) listener = LegacyRecordUpdateListener() @@ -855,11 +883,11 @@ def on_service_state_change(zeroconf, service_type, state_change, name): info_service = ServiceInfo( type_, - f'{name}.{type_}', + f"{name}.{type_}", 80, 0, 0, - {'path': '/~paulsm/'}, + {"path": "/~paulsm/"}, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")], ) @@ -870,7 +898,7 @@ def on_service_state_change(zeroconf, service_type, state_change, name): browser.cancel() - assert len(updates) + assert updates assert len([isinstance(update, r.DNSPointer) and update.name == type_ for update in updates]) >= 1 zc.remove_listener(listener) @@ -884,34 +912,40 @@ def test_service_browser_is_aware_of_port_changes(): """Test that the ServiceBrowser is aware of port changes.""" # instantiate a zeroconf instance - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) # start a browser type_ = "_hap._tcp.local." - registration_name = "xxxyyy.%s" % type_ + registration_name = f"xxxyyy.{type_}" callbacks = [] # dummy service callback def on_service_state_change(zeroconf, service_type, state_change, name): """Dummy callback.""" - nonlocal callbacks if name == registration_name: callbacks.append((service_type, state_change, name)) browser = ServiceBrowser(zc, type_, [on_service_state_change]) - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} address_parsed = "10.0.1.2" address = socket.inet_aton(address_parsed) info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[address]) _inject_response( zc, - mock_incoming_msg([info.dns_pointer(), info.dns_service(), info.dns_text(), *info.dns_addresses()]), + mock_incoming_msg( + [ + info.dns_pointer(), + info.dns_service(), + info.dns_text(), + *info.dns_addresses(), + ] + ), ) time.sleep(0.1) - assert callbacks == [('_hap._tcp.local.', ServiceStateChange.Added, 'xxxyyy._hap._tcp.local.')] + assert callbacks == [("_hap._tcp.local.", ServiceStateChange.Added, "xxxyyy._hap._tcp.local.")] service_info = zc.get_service_info(type_, registration_name) assert service_info is not None assert service_info.port == 80 @@ -926,8 +960,8 @@ def on_service_state_change(zeroconf, service_type, state_change, name): time.sleep(0.1) assert callbacks == [ - ('_hap._tcp.local.', ServiceStateChange.Added, 'xxxyyy._hap._tcp.local.'), - ('_hap._tcp.local.', ServiceStateChange.Updated, 'xxxyyy._hap._tcp.local.'), + ("_hap._tcp.local.", ServiceStateChange.Added, "xxxyyy._hap._tcp.local."), + ("_hap._tcp.local.", ServiceStateChange.Updated, "xxxyyy._hap._tcp.local."), ] service_info = zc.get_service_info(type_, registration_name) assert service_info is not None @@ -941,25 +975,22 @@ def test_service_browser_listeners_update_service(): """Test that the ServiceBrowser ServiceListener that implements update_service.""" # instantiate a zeroconf instance - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) # start a browser type_ = "_hap._tcp.local." - registration_name = "xxxyyy.%s" % type_ + registration_name = f"xxxyyy.{type_}" callbacks = [] class MyServiceListener(r.ServiceListener): def add_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-def] - nonlocal callbacks if name == registration_name: callbacks.append(("add", type_, name)) def remove_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-def] - nonlocal callbacks if name == registration_name: callbacks.append(("remove", type_, name)) def update_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-def] - nonlocal callbacks if name == registration_name: callbacks.append(("update", type_, name)) @@ -967,14 +998,21 @@ def update_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-de browser = r.ServiceBrowser(zc, type_, None, listener) - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} address_parsed = "10.0.1.2" address = socket.inet_aton(address_parsed) info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[address]) _inject_response( zc, - mock_incoming_msg([info.dns_pointer(), info.dns_service(), info.dns_text(), *info.dns_addresses()]), + mock_incoming_msg( + [ + info.dns_pointer(), + info.dns_service(), + info.dns_text(), + *info.dns_addresses(), + ] + ), ) time.sleep(0.2) info._dns_service_cache = None # we are mutating the record so clear the cache @@ -987,8 +1025,8 @@ def update_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-de time.sleep(0.2) assert callbacks == [ - ('add', type_, registration_name), - ('update', type_, registration_name), + ("add", type_, registration_name), + ("update", type_, registration_name), ] browser.cancel() @@ -999,20 +1037,18 @@ def test_service_browser_listeners_no_update_service(): """Test that the ServiceBrowser ServiceListener that does not implement update_service.""" # instantiate a zeroconf instance - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) # start a browser type_ = "_hap._tcp.local." - registration_name = "xxxyyy.%s" % type_ + registration_name = f"xxxyyy.{type_}" callbacks = [] class MyServiceListener(r.ServiceListener): def add_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-def] - nonlocal callbacks if name == registration_name: callbacks.append(("add", type_, name)) def remove_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-def] - nonlocal callbacks if name == registration_name: callbacks.append(("remove", type_, name)) @@ -1020,14 +1056,21 @@ def remove_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-de browser = r.ServiceBrowser(zc, type_, None, listener) - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} address_parsed = "10.0.1.2" address = socket.inet_aton(address_parsed) info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[address]) _inject_response( zc, - mock_incoming_msg([info.dns_pointer(), info.dns_service(), info.dns_text(), *info.dns_addresses()]), + mock_incoming_msg( + [ + info.dns_pointer(), + info.dns_service(), + info.dns_text(), + *info.dns_addresses(), + ] + ), ) time.sleep(0.2) info.port = 400 @@ -1040,7 +1083,7 @@ def remove_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-de time.sleep(0.2) assert callbacks == [ - ('add', type_, registration_name), + ("add", type_, registration_name), ] browser.cancel() @@ -1054,7 +1097,7 @@ def test_service_browser_uses_non_strict_names(): def on_service_state_change(zeroconf, service_type, state_change, name): pass - zc = r.Zeroconf(interfaces=['127.0.0.1']) + zc = r.Zeroconf(interfaces=["127.0.0.1"]) browser = ServiceBrowser(zc, ["_tivo-videostream._tcp.local."], [on_service_state_change]) browser.cancel() @@ -1092,7 +1135,7 @@ def test_group_ptr_queries_with_known_answers(): @pytest.mark.asyncio async def test_generate_service_query_suppress_duplicate_questions(): """Generate a service query for sending with zeroconf.send.""" - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) zc = aiozc.zeroconf now = current_time_millis() name = "_suppresstest._tcp.local." @@ -1102,9 +1145,9 @@ async def test_generate_service_query_suppress_duplicate_questions(): const._TYPE_PTR, const._CLASS_IN, 10000, - f'known-to-other.{name}', + f"known-to-other.{name}", ) - other_known_answers: Set[r.DNSRecord] = {answer} + other_known_answers: set[r.DNSRecord] = {answer} zc.question_history.add_question_at_time(question, now, other_known_answers) assert zc.question_history.suppresses(question, now, other_known_answers) @@ -1146,10 +1189,10 @@ async def test_generate_service_query_suppress_duplicate_questions(): async def test_query_scheduler(): delay = const._BROWSER_TIME types_ = {"_hap._tcp.local.", "_http._tcp.local."} - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) await aiozc.zeroconf.async_wait_for_start() zc = aiozc.zeroconf - sends: List[r.DNSIncoming] = [] + sends: list[r.DNSIncoming] = [] def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): """Sends an outgoing packet.""" @@ -1161,7 +1204,6 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): # patch the zeroconf send so we can capture what is being sent with patch.object(zc, "async_send", send): - query_scheduler.start(loop) original_now = loop.time() @@ -1189,7 +1231,11 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): expected_when_time = ptr_record.get_expiration_time(const._EXPIRE_REFRESH_TIME_PERCENT) expected_expire_time = ptr_record.get_expiration_time(100) ptr_query = _ScheduledPTRQuery( - ptr_record.alias, ptr_record.name, int(ptr_record.ttl), expected_expire_time, expected_when_time + ptr_record.alias, + ptr_record.name, + int(ptr_record.ttl), + expected_expire_time, + expected_when_time, ) assert query_scheduler._query_heap == [ptr_query] @@ -1236,10 +1282,10 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): async def test_query_scheduler_rescue_records(): delay = const._BROWSER_TIME types_ = {"_hap._tcp.local.", "_http._tcp.local."} - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) await aiozc.zeroconf.async_wait_for_start() zc = aiozc.zeroconf - sends: List[r.DNSIncoming] = [] + sends: list[r.DNSIncoming] = [] def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): """Sends an outgoing packet.""" @@ -1251,7 +1297,6 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): # patch the zeroconf send so we can capture what is being sent with patch.object(zc, "async_send", send): - query_scheduler.start(loop) original_now = loop.time() @@ -1272,7 +1317,11 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): expected_when_time = ptr_record.get_expiration_time(const._EXPIRE_REFRESH_TIME_PERCENT) expected_expire_time = ptr_record.get_expiration_time(100) ptr_query = _ScheduledPTRQuery( - ptr_record.alias, ptr_record.name, int(ptr_record.ttl), expected_expire_time, expected_when_time + ptr_record.alias, + ptr_record.name, + int(ptr_record.ttl), + expected_expire_time, + expected_when_time, ) assert query_scheduler._query_heap == [ptr_query] assert query_scheduler._query_heap[0].cancelled is False @@ -1308,27 +1357,24 @@ def test_service_browser_matching(): """Test that the ServiceBrowser matching does not match partial names.""" # instantiate a zeroconf instance - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) # start a browser type_ = "_http._tcp.local." - registration_name = "xxxyyy.%s" % type_ + registration_name = f"xxxyyy.{type_}" not_match_type_ = "_asustor-looksgood_http._tcp.local." - not_match_registration_name = "xxxyyy.%s" % not_match_type_ + not_match_registration_name = f"xxxyyy.{not_match_type_}" callbacks = [] class MyServiceListener(r.ServiceListener): def add_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-def] - nonlocal callbacks if name == registration_name: callbacks.append(("add", type_, name)) def remove_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-def] - nonlocal callbacks if name == registration_name: callbacks.append(("remove", type_, name)) def update_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-def] - nonlocal callbacks if name == registration_name: callbacks.append(("update", type_, name)) @@ -1336,17 +1382,31 @@ def update_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-de browser = r.ServiceBrowser(zc, type_, None, listener) - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} address_parsed = "10.0.1.2" address = socket.inet_aton(address_parsed) info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[address]) should_not_match = ServiceInfo( - not_match_type_, not_match_registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[address] + not_match_type_, + not_match_registration_name, + 80, + 0, + 0, + desc, + "ash-2.local.", + addresses=[address], ) _inject_response( zc, - mock_incoming_msg([info.dns_pointer(), info.dns_service(), info.dns_text(), *info.dns_addresses()]), + mock_incoming_msg( + [ + info.dns_pointer(), + info.dns_service(), + info.dns_text(), + *info.dns_addresses(), + ] + ), ) _inject_response( zc, @@ -1375,8 +1435,8 @@ def update_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-de time.sleep(0.2) assert callbacks == [ - ('add', type_, registration_name), - ('update', type_, registration_name), + ("add", type_, registration_name), + ("update", type_, registration_name), ] browser.cancel() @@ -1387,25 +1447,22 @@ def update_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-de def test_service_browser_expire_callbacks(): """Test that the ServiceBrowser matching does not match partial names.""" # instantiate a zeroconf instance - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) # start a browser type_ = "_old._tcp.local." - registration_name = "uniquezip323.%s" % type_ + registration_name = f"uniquezip323.{type_}" callbacks = [] class MyServiceListener(r.ServiceListener): def add_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-def] - nonlocal callbacks if name == registration_name: callbacks.append(("add", type_, name)) def remove_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-def] - nonlocal callbacks if name == registration_name: callbacks.append(("remove", type_, name)) def update_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-def] - nonlocal callbacks if name == registration_name: callbacks.append(("update", type_, name)) @@ -1413,7 +1470,7 @@ def update_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-de browser = r.ServiceBrowser(zc, type_, None, listener) - desc = {'path': '/~paul2/'} + desc = {"path": "/~paul2/"} address_parsed = "10.0.1.3" address = socket.inet_aton(address_parsed) info = ServiceInfo( @@ -1431,13 +1488,20 @@ def update_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-de _inject_response( zc, - mock_incoming_msg([info.dns_pointer(), info.dns_service(), info.dns_text(), *info.dns_addresses()]), + mock_incoming_msg( + [ + info.dns_pointer(), + info.dns_service(), + info.dns_text(), + *info.dns_addresses(), + ] + ), ) # Force the ttl to be 1 second now = current_time_millis() - for cache_record in zc.cache.cache.values(): + for cache_record in list(zc.cache.cache.values()): for record in cache_record: - record.set_created_ttl(now, 1) + zc.cache._async_set_created_ttl(record, now, 1) time.sleep(0.3) info.port = 400 @@ -1454,8 +1518,8 @@ def update_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-de break assert callbacks == [ - ('add', type_, registration_name), - ('update', type_, registration_name), + ("add", type_, registration_name), + ("update", type_, registration_name), ] for _ in range(25): @@ -1464,9 +1528,9 @@ def update_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-de break assert callbacks == [ - ('add', type_, registration_name), - ('update', type_, registration_name), - ('remove', type_, registration_name), + ("add", type_, registration_name), + ("update", type_, registration_name), + ("remove", type_, registration_name), ] browser.cancel() @@ -1494,13 +1558,13 @@ def test_scheduled_ptr_query_dunder_methods(): assert query75 != other with pytest.raises(TypeError): - query75 < other # type: ignore[operator] + assert query75 < other # type: ignore[operator] with pytest.raises(TypeError): - query75 <= other # type: ignore[operator] + assert query75 <= other # type: ignore[operator] with pytest.raises(TypeError): - query75 > other # type: ignore[operator] + assert query75 > other # type: ignore[operator] with pytest.raises(TypeError): - query75 >= other # type: ignore[operator] + assert query75 >= other # type: ignore[operator] @pytest.mark.asyncio @@ -1508,14 +1572,14 @@ async def test_close_zeroconf_without_browser_before_start_up_queries(): """Test that we stop sending startup queries if zeroconf is closed out from under the browser.""" service_added = asyncio.Event() type_ = "_http._tcp.local." - registration_name = "xxxyyy.%s" % type_ + registration_name = f"xxxyyy.{type_}" def on_service_state_change(zeroconf, service_type, state_change, name): if name == registration_name: if state_change is ServiceStateChange.Added: service_added.set() - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) zeroconf_browser = aiozc.zeroconf zeroconf_browser.question_history = QuestionHistoryWithoutSuppression() await zeroconf_browser.async_wait_for_start() @@ -1529,7 +1593,7 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): assert len(zeroconf_browser.engine.protocols) == 2 - aio_zeroconf_registrar = AsyncZeroconf(interfaces=['127.0.0.1']) + aio_zeroconf_registrar = AsyncZeroconf(interfaces=["127.0.0.1"]) zeroconf_registrar = aio_zeroconf_registrar.zeroconf await aio_zeroconf_registrar.zeroconf.async_wait_for_start() @@ -1545,7 +1609,7 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): 80, 0, 0, - {'path': '/~paulsm/'}, + {"path": "/~paulsm/"}, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")], ) @@ -1577,14 +1641,14 @@ async def test_close_zeroconf_without_browser_after_start_up_queries(): service_added = asyncio.Event() type_ = "_http._tcp.local." - registration_name = "xxxyyy.%s" % type_ + registration_name = f"xxxyyy.{type_}" def on_service_state_change(zeroconf, service_type, state_change, name): if name == registration_name: if state_change is ServiceStateChange.Added: service_added.set() - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) zeroconf_browser = aiozc.zeroconf zeroconf_browser.question_history = QuestionHistoryWithoutSuppression() await zeroconf_browser.async_wait_for_start() @@ -1598,7 +1662,7 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): assert len(zeroconf_browser.engine.protocols) == 2 - aio_zeroconf_registrar = AsyncZeroconf(interfaces=['127.0.0.1']) + aio_zeroconf_registrar = AsyncZeroconf(interfaces=["127.0.0.1"]) zeroconf_registrar = aio_zeroconf_registrar.zeroconf await aio_zeroconf_registrar.zeroconf.async_wait_for_start() @@ -1614,7 +1678,7 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): 80, 0, 0, - {'path': '/~paulsm/'}, + {"path": "/~paulsm/"}, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")], ) diff --git a/tests/services/test_info.py b/tests/services/test_info.py index c02d5e055..660b56d29 100644 --- a/tests/services/test_info.py +++ b/tests/services/test_info.py @@ -1,24 +1,23 @@ -#!/usr/bin/env python +"""Unit tests for zeroconf._services.info.""" - -""" Unit tests for zeroconf._services.info. """ +from __future__ import annotations import asyncio import logging import os import socket -import sys import threading import unittest +from collections.abc import Iterable from ipaddress import ip_address from threading import Event -from typing import Iterable, List, Optional from unittest.mock import patch import pytest import zeroconf as r from zeroconf import DNSAddress, RecordUpdate, const +from zeroconf._protocol.outgoing import DNSOutgoing from zeroconf._services import info from zeroconf._services.info import ServiceInfo from zeroconf._utils.net import IPVersion @@ -26,7 +25,7 @@ from .. import _inject_response, has_working_ipv6 -log = logging.getLogger('zeroconf') +log = logging.getLogger("zeroconf") original_logging_level = logging.NOTSET @@ -44,31 +43,45 @@ def teardown_module(): class TestServiceInfo(unittest.TestCase): def test_get_name(self): """Verify the name accessor can strip the type.""" - desc = {'path': '/~paulsm/'} - service_name = 'name._type._tcp.local.' - service_type = '_type._tcp.local.' - service_server = 'ash-1.local.' + desc = {"path": "/~paulsm/"} + service_name = "name._type._tcp.local." + service_type = "_type._tcp.local." + service_server = "ash-1.local." service_address = socket.inet_aton("10.0.1.2") info = ServiceInfo( - service_type, service_name, 22, 0, 0, desc, service_server, addresses=[service_address] + service_type, + service_name, + 22, + 0, + 0, + desc, + service_server, + addresses=[service_address], ) assert info.get_name() == "name" def test_service_info_rejects_non_matching_updates(self): """Verify records with the wrong name are rejected.""" - zc = r.Zeroconf(interfaces=['127.0.0.1']) - desc = {'path': '/~paulsm/'} - service_name = 'name._type._tcp.local.' - service_type = '_type._tcp.local.' - service_server = 'ash-1.local.' + zc = r.Zeroconf(interfaces=["127.0.0.1"]) + desc = {"path": "/~paulsm/"} + service_name = "name._type._tcp.local." + service_type = "_type._tcp.local." + service_server = "ash-1.local." service_address = socket.inet_aton("10.0.1.2") ttl = 120 now = r.current_time_millis() info = ServiceInfo( - service_type, service_name, 22, 0, 0, desc, service_server, addresses=[service_address] + service_type, + service_name, + 22, + 0, + 0, + desc, + service_server, + addresses=[service_address], ) - # Verify backwards compatiblity with calling with None + # Verify backwards compatibility with calling with None info.async_update_records(zc, now, []) # Matching updates info.async_update_records( @@ -81,7 +94,7 @@ def test_service_info_rejects_non_matching_updates(self): const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, ttl, - b'\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==', + b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==", ), None, ) @@ -101,14 +114,14 @@ def test_service_info_rejects_non_matching_updates(self): 0, 0, 80, - 'ASH-2.local.', + "ASH-2.local.", ), None, ) ], ) - assert info.server_key == 'ash-2.local.' - assert info.server == 'ASH-2.local.' + assert info.server_key == "ash-2.local." + assert info.server == "ASH-2.local." new_address = socket.inet_aton("10.0.1.3") info.async_update_records( zc, @@ -116,7 +129,7 @@ def test_service_info_rejects_non_matching_updates(self): [ RecordUpdate( r.DNSAddress( - 'ASH-2.local.', + "ASH-2.local.", const._TYPE_A, const._CLASS_IN | const._CLASS_UNIQUE, ttl, @@ -138,7 +151,7 @@ def test_service_info_rejects_non_matching_updates(self): const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, ttl, - b'\x04ff=0\x04ci=3\x04sf=0\x0bsh=6fLM5A==', + b"\x04ff=0\x04ci=3\x04sf=0\x0bsh=6fLM5A==", ), None, ) @@ -158,14 +171,14 @@ def test_service_info_rejects_non_matching_updates(self): 0, 0, 80, - 'ASH-2.local.', + "ASH-2.local.", ), None, ) ], ) - assert info.server_key == 'ash-2.local.' - assert info.server == 'ASH-2.local.' + assert info.server_key == "ash-2.local." + assert info.server == "ASH-2.local." new_address = socket.inet_aton("10.0.1.4") info.async_update_records( zc, @@ -188,16 +201,23 @@ def test_service_info_rejects_non_matching_updates(self): def test_service_info_rejects_expired_records(self): """Verify records that are expired are rejected.""" - zc = r.Zeroconf(interfaces=['127.0.0.1']) - desc = {'path': '/~paulsm/'} - service_name = 'name._type._tcp.local.' - service_type = '_type._tcp.local.' - service_server = 'ash-1.local.' + zc = r.Zeroconf(interfaces=["127.0.0.1"]) + desc = {"path": "/~paulsm/"} + service_name = "name._type._tcp.local." + service_type = "_type._tcp.local." + service_server = "ash-1.local." service_address = socket.inet_aton("10.0.1.2") ttl = 120 now = r.current_time_millis() info = ServiceInfo( - service_type, service_name, 22, 0, 0, desc, service_server, addresses=[service_address] + service_type, + service_name, + 22, + 0, + 0, + desc, + service_server, + addresses=[service_address], ) # Matching updates info.async_update_records( @@ -210,7 +230,7 @@ def test_service_info_rejects_expired_records(self): const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, ttl, - b'\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==', + b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==", ), None, ) @@ -223,31 +243,31 @@ def test_service_info_rejects_expired_records(self): const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, ttl, - b'\x04ff=0\x04ci=3\x04sf=0\x0bsh=6fLM5A==', + b"\x04ff=0\x04ci=3\x04sf=0\x0bsh=6fLM5A==", ) - expired_record.set_created_ttl(1000, 1) + zc.cache._async_set_created_ttl(expired_record, 1000, 1) info.async_update_records(zc, now, [RecordUpdate(expired_record, None)]) assert info.properties[b"ci"] == b"2" zc.close() - @unittest.skipIf(not has_working_ipv6(), 'Requires IPv6') - @unittest.skipIf(os.environ.get('SKIP_IPV6'), 'IPv6 tests disabled') + @unittest.skipIf(not has_working_ipv6(), "Requires IPv6") + @unittest.skipIf(os.environ.get("SKIP_IPV6"), "IPv6 tests disabled") def test_get_info_partial(self): - zc = r.Zeroconf(interfaces=['127.0.0.1']) - - service_name = 'name._type._tcp.local.' - service_type = '_type._tcp.local.' - service_server = 'ash-1.local.' - service_text = b'path=/~matt1/' - service_address = '10.0.1.2' - service_address_v6_ll = 'fe80::52e:c2f2:bc5f:e9c6' + zc = r.Zeroconf(interfaces=["127.0.0.1"]) + + service_name = "name._type._tcp.local." + service_type = "_type._tcp.local." + service_server = "ash-1.local." + service_text = b"path=/~matt1/" + service_address = "10.0.1.2" + service_address_v6_ll = "fe80::52e:c2f2:bc5f:e9c6" service_scope_id = 12 service_info = None send_event = Event() service_info_event = Event() - last_sent: Optional[r.DNSOutgoing] = None + last_sent: r.DNSOutgoing | None = None def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): """Sends an outgoing packet.""" @@ -275,7 +295,8 @@ def get_service_info_helper(zc, type, name): try: ttl = 120 helper_thread = threading.Thread( - target=get_service_info_helper, args=(zc, service_type, service_name) + target=get_service_info_helper, + args=(zc, service_type, service_name), ) helper_thread.start() wait_time = 1 @@ -377,19 +398,19 @@ def get_service_info_helper(zc, type, name): zc.remove_all_service_listeners() zc.close() - @unittest.skipIf(not has_working_ipv6(), 'Requires IPv6') - @unittest.skipIf(os.environ.get('SKIP_IPV6'), 'IPv6 tests disabled') + @unittest.skipIf(not has_working_ipv6(), "Requires IPv6") + @unittest.skipIf(os.environ.get("SKIP_IPV6"), "IPv6 tests disabled") def test_get_info_suppressed_by_question_history(self): - zc = r.Zeroconf(interfaces=['127.0.0.1']) + zc = r.Zeroconf(interfaces=["127.0.0.1"]) - service_name = 'name._type._tcp.local.' - service_type = '_type._tcp.local.' + service_name = "name._type._tcp.local." + service_type = "_type._tcp.local." service_info = None send_event = Event() service_info_event = Event() - last_sent: Optional[r.DNSOutgoing] = None + last_sent: r.DNSOutgoing | None = None def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): """Sends an outgoing packet.""" @@ -416,7 +437,8 @@ def get_service_info_helper(zc, type, name): try: helper_thread = threading.Thread( - target=get_service_info_helper, args=(zc, service_type, service_name) + target=get_service_info_helper, + args=(zc, service_type, service_name), ) helper_thread.start() wait_time = (const._LISTENER_TIME + info._AVOID_SYNC_DELAY_RANDOM_INTERVAL[1] + 5) / 1000 @@ -441,13 +463,19 @@ def get_service_info_helper(zc, type, name): ) # Wait long enough to be inside the question history window now = r.current_time_millis() zc.question_history.add_question_at_time( - r.DNSQuestion(service_name, const._TYPE_A, const._CLASS_IN), now, set() + r.DNSQuestion(service_name, const._TYPE_A, const._CLASS_IN), + now, + set(), ) zc.question_history.add_question_at_time( - r.DNSQuestion(service_name, const._TYPE_AAAA, const._CLASS_IN), now, set() + r.DNSQuestion(service_name, const._TYPE_AAAA, const._CLASS_IN), + now, + set(), ) zc.question_history.add_question_at_time( - r.DNSQuestion(service_name, const._TYPE_TXT, const._CLASS_IN), now, set() + r.DNSQuestion(service_name, const._TYPE_TXT, const._CLASS_IN), + now, + set(), ) send_event.wait(wait_time * 0.25) assert last_sent is not None @@ -467,16 +495,24 @@ def get_service_info_helper(zc, type, name): ) # Wait long enough to be inside the question history window now = r.current_time_millis() zc.question_history.add_question_at_time( - r.DNSQuestion(service_name, const._TYPE_A, const._CLASS_IN), now, set() + r.DNSQuestion(service_name, const._TYPE_A, const._CLASS_IN), + now, + set(), ) zc.question_history.add_question_at_time( - r.DNSQuestion(service_name, const._TYPE_AAAA, const._CLASS_IN), now, set() + r.DNSQuestion(service_name, const._TYPE_AAAA, const._CLASS_IN), + now, + set(), ) zc.question_history.add_question_at_time( - r.DNSQuestion(service_name, const._TYPE_TXT, const._CLASS_IN), now, set() + r.DNSQuestion(service_name, const._TYPE_TXT, const._CLASS_IN), + now, + set(), ) zc.question_history.add_question_at_time( - r.DNSQuestion(service_name, const._TYPE_SRV, const._CLASS_IN), now, set() + r.DNSQuestion(service_name, const._TYPE_SRV, const._CLASS_IN), + now, + set(), ) send_event.wait(wait_time * 0.25) # All questions are suppressed so no query should be sent @@ -489,19 +525,19 @@ def get_service_info_helper(zc, type, name): zc.close() def test_get_info_single(self): - zc = r.Zeroconf(interfaces=['127.0.0.1']) + zc = r.Zeroconf(interfaces=["127.0.0.1"]) - service_name = 'name._type._tcp.local.' - service_type = '_type._tcp.local.' - service_server = 'ash-1.local.' - service_text = b'path=/~matt1/' - service_address = '10.0.1.2' + service_name = "name._type._tcp.local." + service_type = "_type._tcp.local." + service_server = "ash-1.local." + service_text = b"path=/~matt1/" + service_address = "10.0.1.2" service_info = None send_event = Event() service_info_event = Event() - last_sent = None # type: Optional[r.DNSOutgoing] + last_sent: r.DNSOutgoing | None = None def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): """Sends an outgoing packet.""" @@ -529,12 +565,13 @@ def get_service_info_helper(zc, type, name): try: ttl = 120 helper_thread = threading.Thread( - target=get_service_info_helper, args=(zc, service_type, service_name) + target=get_service_info_helper, + args=(zc, service_type, service_name), ) helper_thread.start() wait_time = 1 - # Expext query for SRV, TXT, A, AAAA + # Expect query for SRV, TXT, A, AAAA send_event.wait(wait_time) assert last_sent is not None assert len(last_sent.questions) == 4 @@ -544,7 +581,7 @@ def get_service_info_helper(zc, type, name): assert r.DNSQuestion(service_name, const._TYPE_AAAA, const._CLASS_IN) in last_sent.questions assert service_info is None - # Expext no further queries + # Expect no further queries last_sent = None send_event.clear() _inject_response( @@ -590,16 +627,23 @@ def get_service_info_helper(zc, type, name): def test_service_info_duplicate_properties_txt_records(self): """Verify the first property is always used when there are duplicates in a txt record.""" - zc = r.Zeroconf(interfaces=['127.0.0.1']) - desc = {'path': '/~paulsm/'} - service_name = 'name._type._tcp.local.' - service_type = '_type._tcp.local.' - service_server = 'ash-1.local.' + zc = r.Zeroconf(interfaces=["127.0.0.1"]) + desc = {"path": "/~paulsm/"} + service_name = "name._type._tcp.local." + service_type = "_type._tcp.local." + service_server = "ash-1.local." service_address = socket.inet_aton("10.0.1.2") ttl = 120 now = r.current_time_millis() info = ServiceInfo( - service_type, service_name, 22, 0, 0, desc, service_server, addresses=[service_address] + service_type, + service_name, + 22, + 0, + 0, + desc, + service_server, + addresses=[service_address], ) info.async_update_records( zc, @@ -611,7 +655,7 @@ def test_service_info_duplicate_properties_txt_records(self): const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, ttl, - b'\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==\x04dd=0\x04jl=2\x04qq=0\x0brr=6fLM5A==\x04ci=3', + b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==\x04dd=0\x04jl=2\x04qq=0\x0brr=6fLM5A==\x04ci=3", ), None, ) @@ -625,13 +669,22 @@ def test_service_info_duplicate_properties_txt_records(self): def test_multiple_addresses(): type_ = "_http._tcp.local." - registration_name = "xxxyyy.%s" % type_ - desc = {'path': '/~paulsm/'} + registration_name = f"xxxyyy.{type_}" + desc = {"path": "/~paulsm/"} address_parsed = "10.0.1.2" address = socket.inet_aton(address_parsed) # New kwarg way - info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[address, address]) + info = ServiceInfo( + type_, + registration_name, + 80, + 0, + 0, + desc, + "ash-2.local.", + addresses=[address, address], + ) assert info.addresses == [address, address] assert info.parsed_addresses() == [address_parsed, address_parsed] @@ -650,9 +703,8 @@ def test_multiple_addresses(): assert info.addresses == [address, address] assert info.parsed_addresses() == [address_parsed, address_parsed] assert info.parsed_scoped_addresses() == [address_parsed, address_parsed] - ipaddress_supports_scope_id = sys.version_info >= (3, 9, 0) - if has_working_ipv6() and not os.environ.get('SKIP_IPV6'): + if has_working_ipv6() and not os.environ.get("SKIP_IPV6"): address_v6_parsed = "2001:db8::1" address_v6 = socket.inet_pton(socket.AF_INET6, address_v6_parsed) address_v6_ll_parsed = "fe80::52e:c2f2:bc5f:e9c6" @@ -679,49 +731,62 @@ def test_multiple_addresses(): 0, desc, "ash-2.local.", - parsed_addresses=[address_parsed, address_v6_parsed, address_v6_ll_parsed], + parsed_addresses=[ + address_parsed, + address_v6_parsed, + address_v6_ll_parsed, + ], interface_index=interface_index, ), ] for info in infos: assert info.addresses == [address] - assert info.addresses_by_version(r.IPVersion.All) == [address, address_v6, address_v6_ll] + assert info.addresses_by_version(r.IPVersion.All) == [ + address, + address_v6, + address_v6_ll, + ] assert info.ip_addresses_by_version(r.IPVersion.All) == [ ip_address(address), ip_address(address_v6), - ip_address(address_v6_ll_scoped_parsed) - if ipaddress_supports_scope_id - else ip_address(address_v6_ll), + ip_address(address_v6_ll_scoped_parsed), ] assert info.addresses_by_version(r.IPVersion.V4Only) == [address] assert info.ip_addresses_by_version(r.IPVersion.V4Only) == [ip_address(address)] - assert info.addresses_by_version(r.IPVersion.V6Only) == [address_v6, address_v6_ll] + assert info.addresses_by_version(r.IPVersion.V6Only) == [ + address_v6, + address_v6_ll, + ] assert info.ip_addresses_by_version(r.IPVersion.V6Only) == [ ip_address(address_v6), - ip_address(address_v6_ll_scoped_parsed) - if ipaddress_supports_scope_id - else ip_address(address_v6_ll), + ip_address(address_v6_ll_scoped_parsed), + ] + assert info.parsed_addresses() == [ + address_parsed, + address_v6_parsed, + address_v6_ll_parsed, ] - assert info.parsed_addresses() == [address_parsed, address_v6_parsed, address_v6_ll_parsed] assert info.parsed_addresses(r.IPVersion.V4Only) == [address_parsed] - assert info.parsed_addresses(r.IPVersion.V6Only) == [address_v6_parsed, address_v6_ll_parsed] + assert info.parsed_addresses(r.IPVersion.V6Only) == [ + address_v6_parsed, + address_v6_ll_parsed, + ] assert info.parsed_scoped_addresses() == [ address_parsed, address_v6_parsed, - address_v6_ll_scoped_parsed if ipaddress_supports_scope_id else address_v6_ll_parsed, + address_v6_ll_scoped_parsed, ] assert info.parsed_scoped_addresses(r.IPVersion.V4Only) == [address_parsed] assert info.parsed_scoped_addresses(r.IPVersion.V6Only) == [ address_v6_parsed, - address_v6_ll_scoped_parsed if ipaddress_supports_scope_id else address_v6_ll_parsed, + address_v6_ll_scoped_parsed, ] -@unittest.skipIf(sys.version_info < (3, 9, 0), 'Requires newer python') def test_scoped_addresses_from_cache(): type_ = "_http._tcp.local." registration_name = f"scoped.{type_}" - zeroconf = r.Zeroconf(interfaces=['127.0.0.1']) + zeroconf = r.Zeroconf(interfaces=["127.0.0.1"]) host = "scoped.local." zeroconf.cache.async_add_records( @@ -768,32 +833,32 @@ def test_scoped_addresses_from_cache(): async def test_multiple_a_addresses_newest_address_first(): """Test that info.addresses returns the newest seen address first.""" type_ = "_http._tcp.local." - registration_name = "multiarec.%s" % type_ - desc = {'path': '/~paulsm/'} - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + registration_name = f"multiarec.{type_}" + desc = {"path": "/~paulsm/"} + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) cache = aiozc.zeroconf.cache host = "multahost.local." - record1 = r.DNSAddress(host, const._TYPE_A, const._CLASS_IN, 1000, b'\x7f\x00\x00\x01') - record2 = r.DNSAddress(host, const._TYPE_A, const._CLASS_IN, 1000, b'\x7f\x00\x00\x02') + record1 = r.DNSAddress(host, const._TYPE_A, const._CLASS_IN, 1000, b"\x7f\x00\x00\x01") + record2 = r.DNSAddress(host, const._TYPE_A, const._CLASS_IN, 1000, b"\x7f\x00\x00\x02") cache.async_add_records([record1, record2]) # New kwarg way info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, host) info.load_from_cache(aiozc.zeroconf) - assert info.addresses == [b'\x7f\x00\x00\x02', b'\x7f\x00\x00\x01'] + assert info.addresses == [b"\x7f\x00\x00\x02", b"\x7f\x00\x00\x01"] await aiozc.async_close() @pytest.mark.asyncio async def test_invalid_a_addresses(caplog): type_ = "_http._tcp.local." - registration_name = "multiarec.%s" % type_ - desc = {'path': '/~paulsm/'} - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + registration_name = f"multiarec.{type_}" + desc = {"path": "/~paulsm/"} + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) cache = aiozc.zeroconf.cache host = "multahost.local." - record1 = r.DNSAddress(host, const._TYPE_A, const._CLASS_IN, 1000, b'a') - record2 = r.DNSAddress(host, const._TYPE_A, const._CLASS_IN, 1000, b'b') + record1 = r.DNSAddress(host, const._TYPE_A, const._CLASS_IN, 1000, b"a") + record2 = r.DNSAddress(host, const._TYPE_A, const._CLASS_IN, 1000, b"b") cache.async_add_records([record1, record2]) # New kwarg way @@ -805,11 +870,11 @@ async def test_invalid_a_addresses(caplog): await aiozc.async_close() -@unittest.skipIf(not has_working_ipv6(), 'Requires IPv6') -@unittest.skipIf(os.environ.get('SKIP_IPV6'), 'IPv6 tests disabled') +@unittest.skipIf(not has_working_ipv6(), "Requires IPv6") +@unittest.skipIf(os.environ.get("SKIP_IPV6"), "IPv6 tests disabled") def test_filter_address_by_type_from_service_info(): """Verify dns_addresses can filter by ipversion.""" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} type_ = "_homeassistant._tcp.local." name = "MyTestHome" registration_name = f"{name}.{type_}" @@ -817,11 +882,14 @@ def test_filter_address_by_type_from_service_info(): ipv6 = socket.inet_pton(socket.AF_INET6, "2001:db8::1") info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[ipv4, ipv6]) - def dns_addresses_to_addresses(dns_address: List[DNSAddress]) -> List[bytes]: + def dns_addresses_to_addresses(dns_address: list[DNSAddress]) -> list[bytes]: return [address.address for address in dns_address] assert dns_addresses_to_addresses(info.dns_addresses()) == [ipv4, ipv6] - assert dns_addresses_to_addresses(info.dns_addresses(version=r.IPVersion.All)) == [ipv4, ipv6] + assert dns_addresses_to_addresses(info.dns_addresses(version=r.IPVersion.All)) == [ + ipv4, + ipv6, + ] assert dns_addresses_to_addresses(info.dns_addresses(version=r.IPVersion.V4Only)) == [ipv4] assert dns_addresses_to_addresses(info.dns_addresses(version=r.IPVersion.V6Only)) == [ipv6] @@ -832,11 +900,11 @@ def test_changing_name_updates_serviceinfo_key(): name = "MyTestHome" info_service = ServiceInfo( type_, - f'{name}.{type_}', + f"{name}.{type_}", 80, 0, 0, - {'path': '/~paulsm/'}, + {"path": "/~paulsm/"}, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")], ) @@ -854,11 +922,11 @@ def test_serviceinfo_address_updates(): with pytest.raises(TypeError): info_service = ServiceInfo( type_, - f'{name}.{type_}', + f"{name}.{type_}", 80, 0, 0, - {'path': '/~paulsm/'}, + {"path": "/~paulsm/"}, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")], parsed_addresses=["10.0.1.2"], @@ -866,11 +934,11 @@ def test_serviceinfo_address_updates(): info_service = ServiceInfo( type_, - f'{name}.{type_}', + f"{name}.{type_}", 80, 0, 0, - {'path': '/~paulsm/'}, + {"path": "/~paulsm/"}, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")], ) @@ -885,48 +953,55 @@ def test_serviceinfo_accepts_bytes_or_string_dict(): addresses = [socket.inet_aton("10.0.1.2")] server_name = "ash-2.local." info_service = ServiceInfo( - type_, f'{name}.{type_}', 80, 0, 0, {b'path': b'/~paulsm/'}, server_name, addresses=addresses + type_, + f"{name}.{type_}", + 80, + 0, + 0, + {b"path": b"/~paulsm/"}, + server_name, + addresses=addresses, ) - assert info_service.dns_text().text == b'\x0epath=/~paulsm/' + assert info_service.dns_text().text == b"\x0epath=/~paulsm/" info_service = ServiceInfo( type_, - f'{name}.{type_}', + f"{name}.{type_}", 80, 0, 0, - {'path': '/~paulsm/'}, + {"path": "/~paulsm/"}, server_name, addresses=addresses, ) - assert info_service.dns_text().text == b'\x0epath=/~paulsm/' + assert info_service.dns_text().text == b"\x0epath=/~paulsm/" info_service = ServiceInfo( type_, - f'{name}.{type_}', + f"{name}.{type_}", 80, 0, 0, - {b'path': '/~paulsm/'}, + {b"path": "/~paulsm/"}, server_name, addresses=addresses, ) - assert info_service.dns_text().text == b'\x0epath=/~paulsm/' + assert info_service.dns_text().text == b"\x0epath=/~paulsm/" info_service = ServiceInfo( type_, - f'{name}.{type_}', + f"{name}.{type_}", 80, 0, 0, - {'path': b'/~paulsm/'}, + {"path": b"/~paulsm/"}, server_name, addresses=addresses, ) - assert info_service.dns_text().text == b'\x0epath=/~paulsm/' + assert info_service.dns_text().text == b"\x0epath=/~paulsm/" def test_asking_qu_questions(): - """Verify explictly asking QU questions.""" + """Verify explicitly asking QU questions.""" type_ = "_quservice._tcp.local." - zeroconf = r.Zeroconf(interfaces=['127.0.0.1']) + zeroconf = r.Zeroconf(interfaces=["127.0.0.1"]) # we are going to patch the zeroconf send to check query transmission old_send = zeroconf.async_send @@ -948,9 +1023,9 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT): def test_asking_qm_questions(): - """Verify explictly asking QM questions.""" + """Verify explicitly asking QM questions.""" type_ = "_quservice._tcp.local." - zeroconf = r.Zeroconf(interfaces=['127.0.0.1']) + zeroconf = r.Zeroconf(interfaces=["127.0.0.1"]) # we are going to patch the zeroconf send to check query transmission old_send = zeroconf.async_send @@ -973,7 +1048,7 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT): def test_request_timeout(): """Test that the timeout does not throw an exception and finishes close to the actual timeout.""" - zeroconf = r.Zeroconf(interfaces=['127.0.0.1']) + zeroconf = r.Zeroconf(interfaces=["127.0.0.1"]) start_time = r.current_time_millis() assert zeroconf.get_service_info("_notfound.local.", "notthere._notfound.local.") is None end_time = r.current_time_millis() @@ -987,7 +1062,7 @@ def test_request_timeout(): async def test_we_try_four_times_with_random_delay(): """Verify we try four times even with the random delay.""" type_ = "_typethatisnothere._tcp.local." - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) # we are going to patch the zeroconf send to check query transmission request_count = 0 @@ -1010,9 +1085,9 @@ def async_send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT): async def test_release_wait_when_new_recorded_added(): """Test that async_request returns as soon as new matching records are added to the cache.""" type_ = "_http._tcp.local." - registration_name = "multiarec.%s" % type_ - desc = {'path': '/~paulsm/'} - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + registration_name = f"multiarec.{type_}" + desc = {"path": "/~paulsm/"} + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) host = "multahost.local." # New kwarg way @@ -1049,7 +1124,7 @@ async def test_release_wait_when_new_recorded_added(): const._TYPE_A, const._CLASS_IN, 10000, - b'\x7f\x00\x00\x01', + b"\x7f\x00\x00\x01", ), 0, ) @@ -1059,7 +1134,7 @@ async def test_release_wait_when_new_recorded_added(): const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, 10000, - b'\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==', + b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==", ), 0, ) @@ -1067,7 +1142,7 @@ async def test_release_wait_when_new_recorded_added(): await asyncio.sleep(0) aiozc.zeroconf.record_manager.async_updates_from_response(r.DNSIncoming(generated.packets()[0])) assert await asyncio.wait_for(task, timeout=2) - assert info.addresses == [b'\x7f\x00\x00\x01'] + assert info.addresses == [b"\x7f\x00\x00\x01"] await aiozc.async_close() @@ -1075,9 +1150,9 @@ async def test_release_wait_when_new_recorded_added(): async def test_port_changes_are_seen(): """Test that port changes are seen by async_request.""" type_ = "_http._tcp.local." - registration_name = "multiarec.%s" % type_ - desc = {'path': '/~paulsm/'} - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + registration_name = f"multiarec.{type_}" + desc = {"path": "/~paulsm/"} + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) host = "multahost.local." # New kwarg way @@ -1112,7 +1187,7 @@ async def test_port_changes_are_seen(): const._TYPE_A, const._CLASS_IN, 10000, - b'\x7f\x00\x00\x01', + b"\x7f\x00\x00\x01", ), 0, ) @@ -1122,7 +1197,7 @@ async def test_port_changes_are_seen(): const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, 10000, - b'\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==', + b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==", ), 0, ) @@ -1158,9 +1233,9 @@ async def test_port_changes_are_seen(): async def test_port_changes_are_seen_with_directed_request(): """Test that port changes are seen by async_request with a directed request.""" type_ = "_http._tcp.local." - registration_name = "multiarec.%s" % type_ - desc = {'path': '/~paulsm/'} - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + registration_name = f"multiarec.{type_}" + desc = {"path": "/~paulsm/"} + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) host = "multahost.local." # New kwarg way @@ -1195,7 +1270,7 @@ async def test_port_changes_are_seen_with_directed_request(): const._TYPE_A, const._CLASS_IN, 10000, - b'\x7f\x00\x00\x01', + b"\x7f\x00\x00\x01", ), 0, ) @@ -1205,7 +1280,7 @@ async def test_port_changes_are_seen_with_directed_request(): const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, 10000, - b'\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==', + b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==", ), 0, ) @@ -1241,8 +1316,8 @@ async def test_port_changes_are_seen_with_directed_request(): async def test_ipv4_changes_are_seen(): """Test that ipv4 changes are seen by async_request.""" type_ = "_http._tcp.local." - registration_name = "multiaipv4rec.%s" % type_ - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + registration_name = f"multiaipv4rec.{type_}" + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) host = "multahost.local." # New kwarg way @@ -1277,7 +1352,7 @@ async def test_ipv4_changes_are_seen(): const._TYPE_A, const._CLASS_IN, 10000, - b'\x7f\x00\x00\x01', + b"\x7f\x00\x00\x01", ), 0, ) @@ -1287,7 +1362,7 @@ async def test_ipv4_changes_are_seen(): const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, 10000, - b'\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==', + b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==", ), 0, ) @@ -1296,7 +1371,7 @@ async def test_ipv4_changes_are_seen(): aiozc.zeroconf.record_manager.async_updates_from_response(r.DNSIncoming(generated.packets()[0])) info = ServiceInfo(type_, registration_name) info.load_from_cache(aiozc.zeroconf) - assert info.addresses_by_version(IPVersion.V4Only) == [b'\x7f\x00\x00\x01'] + assert info.addresses_by_version(IPVersion.V4Only) == [b"\x7f\x00\x00\x01"] generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) generated.add_answer_at_time( @@ -1305,7 +1380,7 @@ async def test_ipv4_changes_are_seen(): const._TYPE_A, const._CLASS_IN, 10000, - b'\x7f\x00\x00\x02', + b"\x7f\x00\x00\x02", ), 0, ) @@ -1313,9 +1388,15 @@ async def test_ipv4_changes_are_seen(): info = ServiceInfo(type_, registration_name) info.load_from_cache(aiozc.zeroconf) - assert info.addresses_by_version(IPVersion.V4Only) == [b'\x7f\x00\x00\x02', b'\x7f\x00\x00\x01'] + assert info.addresses_by_version(IPVersion.V4Only) == [ + b"\x7f\x00\x00\x02", + b"\x7f\x00\x00\x01", + ] await info.async_request(aiozc.zeroconf, timeout=200) - assert info.addresses_by_version(IPVersion.V4Only) == [b'\x7f\x00\x00\x02', b'\x7f\x00\x00\x01'] + assert info.addresses_by_version(IPVersion.V4Only) == [ + b"\x7f\x00\x00\x02", + b"\x7f\x00\x00\x01", + ] await aiozc.async_close() @@ -1323,8 +1404,8 @@ async def test_ipv4_changes_are_seen(): async def test_ipv6_changes_are_seen(): """Test that ipv6 changes are seen by async_request.""" type_ = "_http._tcp.local." - registration_name = "multiaipv6rec.%s" % type_ - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + registration_name = f"multiaipv6rec.{type_}" + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) host = "multahost.local." # New kwarg way @@ -1359,7 +1440,7 @@ async def test_ipv6_changes_are_seen(): const._TYPE_AAAA, const._CLASS_IN, 10000, - b'\xde\xad\xbe\xef\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', + b"\xde\xad\xbe\xef\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", ), 0, ) @@ -1369,7 +1450,7 @@ async def test_ipv6_changes_are_seen(): const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, 10000, - b'\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==', + b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==", ), 0, ) @@ -1379,7 +1460,11 @@ async def test_ipv6_changes_are_seen(): info = ServiceInfo(type_, registration_name) info.load_from_cache(aiozc.zeroconf) assert info.addresses_by_version(IPVersion.V6Only) == [ - b'\xde\xad\xbe\xef\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b"\xde\xad\xbe\xef\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + ] + info.load_from_cache(aiozc.zeroconf) + assert info.addresses_by_version(IPVersion.V6Only) == [ + b"\xde\xad\xbe\xef\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" ] generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) @@ -1389,7 +1474,7 @@ async def test_ipv6_changes_are_seen(): const._TYPE_AAAA, const._CLASS_IN, 10000, - b'\x00\xad\xbe\xef\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', + b"\x00\xad\xbe\xef\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", ), 0, ) @@ -1398,14 +1483,15 @@ async def test_ipv6_changes_are_seen(): info = ServiceInfo(type_, registration_name) info.load_from_cache(aiozc.zeroconf) assert info.addresses_by_version(IPVersion.V6Only) == [ - b'\x00\xad\xbe\xef\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', - b'\xde\xad\xbe\xef\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', + b"\x00\xad\xbe\xef\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", + b"\xde\xad\xbe\xef\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", ] await info.async_request(aiozc.zeroconf, timeout=200) assert info.addresses_by_version(IPVersion.V6Only) == [ - b'\x00\xad\xbe\xef\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', - b'\xde\xad\xbe\xef\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', + b"\x00\xad\xbe\xef\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", + b"\xde\xad\xbe\xef\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", ] + await aiozc.async_close() @@ -1413,8 +1499,8 @@ async def test_ipv6_changes_are_seen(): async def test_bad_ip_addresses_ignored_in_cache(): """Test that bad ip address in the cache are ignored async_request.""" type_ = "_http._tcp.local." - registration_name = "multiarec.%s" % type_ - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + registration_name = f"multiarec.{type_}" + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) host = "multahost.local." # New kwarg way @@ -1438,7 +1524,7 @@ async def test_bad_ip_addresses_ignored_in_cache(): const._TYPE_A, const._CLASS_IN, 10000, - b'\x7f\x00\x00\x01', + b"\x7f\x00\x00\x01", ), 0, ) @@ -1448,27 +1534,27 @@ async def test_bad_ip_addresses_ignored_in_cache(): const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, 10000, - b'\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==', + b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==", ), 0, ) # Manually add a bad record to the cache - aiozc.zeroconf.cache.async_add_records([DNSAddress(host, const._TYPE_A, const._CLASS_IN, 10000, b'\x00')]) + aiozc.zeroconf.cache.async_add_records([DNSAddress(host, const._TYPE_A, const._CLASS_IN, 10000, b"\x00")]) await aiozc.zeroconf.async_wait_for_start() await asyncio.sleep(0) aiozc.zeroconf.record_manager.async_updates_from_response(r.DNSIncoming(generated.packets()[0])) info = ServiceInfo(type_, registration_name) info.load_from_cache(aiozc.zeroconf) - assert info.addresses_by_version(IPVersion.V4Only) == [b'\x7f\x00\x00\x01'] + assert info.addresses_by_version(IPVersion.V4Only) == [b"\x7f\x00\x00\x01"] @pytest.mark.asyncio async def test_service_name_change_as_seen_has_ip_in_cache(): """Test that service name changes are seen by async_request when the ip is in the cache.""" type_ = "_http._tcp.local." - registration_name = "multiarec.%s" % type_ - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + registration_name = f"multiarec.{type_}" + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) host = "multahost.local." # New kwarg way @@ -1490,7 +1576,7 @@ async def test_service_name_change_as_seen_has_ip_in_cache(): const._TYPE_A, const._CLASS_IN, 10000, - b'\x7f\x00\x00\x01', + b"\x7f\x00\x00\x01", ), 0, ) @@ -1500,7 +1586,7 @@ async def test_service_name_change_as_seen_has_ip_in_cache(): const._TYPE_A, const._CLASS_IN, 10000, - b'\x7f\x00\x00\x02', + b"\x7f\x00\x00\x02", ), 0, ) @@ -1510,7 +1596,7 @@ async def test_service_name_change_as_seen_has_ip_in_cache(): const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, 10000, - b'\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==', + b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==", ), 0, ) @@ -1540,7 +1626,7 @@ async def test_service_name_change_as_seen_has_ip_in_cache(): info = ServiceInfo(type_, registration_name) await info.async_request(aiozc.zeroconf, timeout=200) - assert info.addresses_by_version(IPVersion.V4Only) == [b'\x7f\x00\x00\x02'] + assert info.addresses_by_version(IPVersion.V4Only) == [b"\x7f\x00\x00\x02"] await aiozc.async_close() @@ -1549,8 +1635,8 @@ async def test_service_name_change_as_seen_has_ip_in_cache(): async def test_service_name_change_as_seen_ip_not_in_cache(): """Test that service name changes are seen by async_request when the ip is not in the cache.""" type_ = "_http._tcp.local." - registration_name = "multiarec.%s" % type_ - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + registration_name = f"multiarec.{type_}" + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) host = "multahost.local." # New kwarg way @@ -1572,7 +1658,7 @@ async def test_service_name_change_as_seen_ip_not_in_cache(): const._TYPE_A, const._CLASS_IN, 10000, - b'\x7f\x00\x00\x01', + b"\x7f\x00\x00\x01", ), 0, ) @@ -1582,7 +1668,7 @@ async def test_service_name_change_as_seen_ip_not_in_cache(): const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, 10000, - b'\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==', + b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==", ), 0, ) @@ -1614,7 +1700,7 @@ async def test_service_name_change_as_seen_ip_not_in_cache(): const._TYPE_A, const._CLASS_IN, 10000, - b'\x7f\x00\x00\x02', + b"\x7f\x00\x00\x02", ), 0, ) @@ -1622,7 +1708,7 @@ async def test_service_name_change_as_seen_ip_not_in_cache(): info = ServiceInfo(type_, registration_name) await info.async_request(aiozc.zeroconf, timeout=200) - assert info.addresses_by_version(IPVersion.V4Only) == [b'\x7f\x00\x00\x02'] + assert info.addresses_by_version(IPVersion.V4Only) == [b"\x7f\x00\x00\x02"] await aiozc.async_close() @@ -1632,9 +1718,9 @@ async def test_service_name_change_as_seen_ip_not_in_cache(): async def test_release_wait_when_new_recorded_added_concurrency(): """Test that concurrent async_request returns as soon as new matching records are added to the cache.""" type_ = "_http._tcp.local." - registration_name = "multiareccon.%s" % type_ - desc = {'path': '/~paulsm/'} - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + registration_name = f"multiareccon.{type_}" + desc = {"path": "/~paulsm/"} + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) host = "multahostcon.local." await aiozc.zeroconf.async_wait_for_start() @@ -1675,7 +1761,7 @@ async def test_release_wait_when_new_recorded_added_concurrency(): const._TYPE_A, const._CLASS_IN, 10000, - b'\x7f\x00\x00\x01', + b"\x7f\x00\x00\x01", ), 0, ) @@ -1685,7 +1771,7 @@ async def test_release_wait_when_new_recorded_added_concurrency(): const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, 10000, - b'\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==', + b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==", ), 0, ) @@ -1695,7 +1781,7 @@ async def test_release_wait_when_new_recorded_added_concurrency(): aiozc.zeroconf.record_manager.async_updates_from_response(r.DNSIncoming(generated.packets()[0])) _, pending = await asyncio.wait(tasks, timeout=2) assert not pending - assert info.addresses == [b'\x7f\x00\x00\x01'] + assert info.addresses == [b"\x7f\x00\x00\x01"] await aiozc.async_close() @@ -1703,8 +1789,8 @@ async def test_release_wait_when_new_recorded_added_concurrency(): async def test_service_info_nsec_records(): """Test we can generate nsec records from ServiceInfo.""" type_ = "_http._tcp.local." - registration_name = "multiareccon.%s" % type_ - desc = {'path': '/~paulsm/'} + registration_name = f"multiareccon.{type_}" + desc = {"path": "/~paulsm/"} host = "multahostcon.local." info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, host) nsec_record = info.dns_nsec([const._TYPE_A, const._TYPE_AAAA], 50) @@ -1712,3 +1798,97 @@ async def test_service_info_nsec_records(): assert nsec_record.type == const._TYPE_NSEC assert nsec_record.ttl == 50 assert nsec_record.rdtypes == [const._TYPE_A, const._TYPE_AAAA] + + +@pytest.mark.asyncio +async def test_address_resolver(): + """Test that the address resolver works.""" + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) + await aiozc.zeroconf.async_wait_for_start() + resolver = r.AddressResolver("address_resolver_test.local.") + resolve_task = asyncio.create_task(resolver.async_request(aiozc.zeroconf, 3000)) + outgoing = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) + outgoing.add_answer_at_time( + r.DNSAddress( + "address_resolver_test.local.", + const._TYPE_A, + const._CLASS_IN, + 10000, + b"\x7f\x00\x00\x01", + ), + 0, + ) + + aiozc.zeroconf.async_send(outgoing) + assert await resolve_task + assert resolver.addresses == [b"\x7f\x00\x00\x01"] + + +@pytest.mark.asyncio +async def test_address_resolver_ipv4(): + """Test that the IPv4 address resolver works.""" + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) + await aiozc.zeroconf.async_wait_for_start() + resolver = r.AddressResolverIPv4("address_resolver_test_ipv4.local.") + resolve_task = asyncio.create_task(resolver.async_request(aiozc.zeroconf, 3000)) + outgoing = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) + outgoing.add_answer_at_time( + r.DNSAddress( + "address_resolver_test_ipv4.local.", + const._TYPE_A, + const._CLASS_IN, + 10000, + b"\x7f\x00\x00\x01", + ), + 0, + ) + + aiozc.zeroconf.async_send(outgoing) + assert await resolve_task + assert resolver.addresses == [b"\x7f\x00\x00\x01"] + + +@pytest.mark.asyncio +@unittest.skipIf(not has_working_ipv6(), "Requires IPv6") +@unittest.skipIf(os.environ.get("SKIP_IPV6"), "IPv6 tests disabled") +async def test_address_resolver_ipv6(): + """Test that the IPv6 address resolver works.""" + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) + await aiozc.zeroconf.async_wait_for_start() + resolver = r.AddressResolverIPv6("address_resolver_test_ipv6.local.") + resolve_task = asyncio.create_task(resolver.async_request(aiozc.zeroconf, 3000)) + outgoing = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) + outgoing.add_answer_at_time( + r.DNSAddress( + "address_resolver_test_ipv6.local.", + const._TYPE_AAAA, + const._CLASS_IN, + 10000, + socket.inet_pton(socket.AF_INET6, "fe80::52e:c2f2:bc5f:e9c6"), + ), + 0, + ) + + aiozc.zeroconf.async_send(outgoing) + assert await resolve_task + assert resolver.ip_addresses_by_version(IPVersion.All) == [ip_address("fe80::52e:c2f2:bc5f:e9c6")] + + +@pytest.mark.asyncio +async def test_unicast_flag_if_requested() -> None: + """Verify we try four times even with the random delay.""" + type_ = "_typethatisnothere._tcp.local." + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) + + def async_send(out: DNSOutgoing, addr: str | None = None, port: int = const._MDNS_PORT) -> None: + """Sends an outgoing packet.""" + for question in out.questions: + assert question.unicast + + # patch the zeroconf send + with patch.object(aiozc.zeroconf, "async_send", async_send): + await aiozc.async_get_service_info( + f"willnotbefound.{type_}", type_, question_type=r.DNSQuestionType.QU + ) + + await aiozc.async_close() diff --git a/tests/services/test_registry.py b/tests/services/test_registry.py index f8656e2fa..c3ae3a28b 100644 --- a/tests/services/test_registry.py +++ b/tests/services/test_registry.py @@ -1,8 +1,7 @@ -#!/usr/bin/env python - - """Unit tests for zeroconf._services.registry.""" +from __future__ import annotations + import socket import unittest @@ -16,9 +15,16 @@ def test_only_register_once(self): name = "xxxyyy" registration_name = f"{name}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( - type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration_name, + 80, + 0, + 0, + desc, + "ash-2.local.", + addresses=[socket.inet_aton("10.0.1.2")], ) registry = r.ServiceRegistry() @@ -34,12 +40,26 @@ def test_register_same_server(self): registration_name = f"{name}.{type_}" registration_name2 = f"{name2}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( - type_, registration_name, 80, 0, 0, desc, "same.local.", addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration_name, + 80, + 0, + 0, + desc, + "same.local.", + addresses=[socket.inet_aton("10.0.1.2")], ) info2 = ServiceInfo( - type_, registration_name2, 80, 0, 0, desc, "same.local.", addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration_name2, + 80, + 0, + 0, + desc, + "same.local.", + addresses=[socket.inet_aton("10.0.1.2")], ) registry = r.ServiceRegistry() registry.async_add(info) @@ -62,9 +82,16 @@ def test_unregister_multiple_times(self): name = "xxxyyy" registration_name = f"{name}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( - type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration_name, + 80, + 0, + 0, + desc, + "ash-2.local.", + addresses=[socket.inet_aton("10.0.1.2")], ) registry = r.ServiceRegistry() @@ -78,9 +105,16 @@ def test_lookups(self): name = "xxxyyy" registration_name = f"{name}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( - type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration_name, + 80, + 0, + 0, + desc, + "ash-2.local.", + addresses=[socket.inet_aton("10.0.1.2")], ) registry = r.ServiceRegistry() @@ -97,9 +131,16 @@ def test_lookups_upper_case_by_lower_case(self): name = "Xxxyyy" registration_name = f"{name}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( - type_, registration_name, 80, 0, 0, desc, "ASH-2.local.", addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration_name, + 80, + 0, + 0, + desc, + "ASH-2.local.", + addresses=[socket.inet_aton("10.0.1.2")], ) registry = r.ServiceRegistry() diff --git a/tests/services/test_types.py b/tests/services/test_types.py index 1afe6d530..632922465 100644 --- a/tests/services/test_types.py +++ b/tests/services/test_types.py @@ -1,8 +1,7 @@ -#!/usr/bin/env python - - """Unit tests for zeroconf._services.types.""" +from __future__ import annotations + import logging import os import socket @@ -14,7 +13,7 @@ from .. import _clear_cache, has_working_ipv6 -log = logging.getLogger('zeroconf') +log = logging.getLogger("zeroconf") original_logging_level = logging.NOTSET @@ -34,8 +33,8 @@ def test_integration_with_listener(disable_duplicate_packet_suppression): name = "xxxyyy" registration_name = f"{name}.{type_}" - zeroconf_registrar = Zeroconf(interfaces=['127.0.0.1']) - desc = {'path': '/~paulsm/'} + zeroconf_registrar = Zeroconf(interfaces=["127.0.0.1"]) + desc = {"path": "/~paulsm/"} info = ServiceInfo( type_, registration_name, @@ -48,7 +47,7 @@ def test_integration_with_listener(disable_duplicate_packet_suppression): ) zeroconf_registrar.registry.async_add(info) try: - service_types = ZeroconfServiceTypes.find(interfaces=['127.0.0.1'], timeout=2) + service_types = ZeroconfServiceTypes.find(interfaces=["127.0.0.1"], timeout=2) assert type_ in service_types _clear_cache(zeroconf_registrar) service_types = ZeroconfServiceTypes.find(zc=zeroconf_registrar, timeout=2) @@ -58,16 +57,16 @@ def test_integration_with_listener(disable_duplicate_packet_suppression): zeroconf_registrar.close() -@unittest.skipIf(not has_working_ipv6(), 'Requires IPv6') -@unittest.skipIf(os.environ.get('SKIP_IPV6'), 'IPv6 tests disabled') +@unittest.skipIf(not has_working_ipv6(), "Requires IPv6") +@unittest.skipIf(os.environ.get("SKIP_IPV6"), "IPv6 tests disabled") def test_integration_with_listener_v6_records(disable_duplicate_packet_suppression): type_ = "_test-listenv6rec-type._tcp.local." name = "xxxyyy" registration_name = f"{name}.{type_}" addr = "2606:2800:220:1:248:1893:25c8:1946" # example.com - zeroconf_registrar = Zeroconf(interfaces=['127.0.0.1']) - desc = {'path': '/~paulsm/'} + zeroconf_registrar = Zeroconf(interfaces=["127.0.0.1"]) + desc = {"path": "/~paulsm/"} info = ServiceInfo( type_, registration_name, @@ -80,7 +79,7 @@ def test_integration_with_listener_v6_records(disable_duplicate_packet_suppressi ) zeroconf_registrar.registry.async_add(info) try: - service_types = ZeroconfServiceTypes.find(interfaces=['127.0.0.1'], timeout=2) + service_types = ZeroconfServiceTypes.find(interfaces=["127.0.0.1"], timeout=2) assert type_ in service_types _clear_cache(zeroconf_registrar) service_types = ZeroconfServiceTypes.find(zc=zeroconf_registrar, timeout=2) @@ -90,8 +89,8 @@ def test_integration_with_listener_v6_records(disable_duplicate_packet_suppressi zeroconf_registrar.close() -@unittest.skipIf(not has_working_ipv6() or sys.platform == 'win32', 'Requires IPv6') -@unittest.skipIf(os.environ.get('SKIP_IPV6'), 'IPv6 tests disabled') +@unittest.skipIf(not has_working_ipv6() or sys.platform == "win32", "Requires IPv6") +@unittest.skipIf(os.environ.get("SKIP_IPV6"), "IPv6 tests disabled") def test_integration_with_listener_ipv6(disable_duplicate_packet_suppression): type_ = "_test-listenv6ip-type._tcp.local." name = "xxxyyy" @@ -99,7 +98,7 @@ def test_integration_with_listener_ipv6(disable_duplicate_packet_suppression): addr = "2606:2800:220:1:248:1893:25c8:1946" # example.com zeroconf_registrar = Zeroconf(ip_version=r.IPVersion.V6Only) - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( type_, registration_name, @@ -130,8 +129,8 @@ def test_integration_with_subtype_and_listener(disable_duplicate_packet_suppress discovery_type = f"{subtype_}.{type_}" registration_name = f"{name}.{type_}" - zeroconf_registrar = Zeroconf(interfaces=['127.0.0.1']) - desc = {'path': '/~paulsm/'} + zeroconf_registrar = Zeroconf(interfaces=["127.0.0.1"]) + desc = {"path": "/~paulsm/"} info = ServiceInfo( discovery_type, registration_name, @@ -144,7 +143,7 @@ def test_integration_with_subtype_and_listener(disable_duplicate_packet_suppress ) zeroconf_registrar.registry.async_add(info) try: - service_types = ZeroconfServiceTypes.find(interfaces=['127.0.0.1'], timeout=2) + service_types = ZeroconfServiceTypes.find(interfaces=["127.0.0.1"], timeout=2) assert discovery_type in service_types _clear_cache(zeroconf_registrar) service_types = ZeroconfServiceTypes.find(zc=zeroconf_registrar, timeout=2) diff --git a/tests/test_asyncio.py b/tests/test_asyncio.py index 382b1a3d7..b6e124aad 100644 --- a/tests/test_asyncio.py +++ b/tests/test_asyncio.py @@ -1,8 +1,7 @@ -#!/usr/bin/env python - - """Unit tests for aio.py.""" +from __future__ import annotations + import asyncio import logging import os @@ -50,7 +49,7 @@ time_changed_millis, ) -log = logging.getLogger('zeroconf') +log = logging.getLogger("zeroconf") original_logging_level = logging.NOTSET @@ -83,14 +82,14 @@ def verify_threads_ended(): @pytest.mark.asyncio async def test_async_basic_usage() -> None: """Test we can create and close the instance.""" - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) await aiozc.async_close() @pytest.mark.asyncio async def test_async_close_twice() -> None: """Test we can close twice.""" - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) await aiozc.async_close() await aiozc.async_close() @@ -98,7 +97,7 @@ async def test_async_close_twice() -> None: @pytest.mark.asyncio async def test_async_with_sync_passed_in() -> None: """Test we can create and close the instance when passing in a sync Zeroconf.""" - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) aiozc = AsyncZeroconf(zc=zc) assert aiozc.zeroconf is zc await aiozc.async_close() @@ -107,7 +106,7 @@ async def test_async_with_sync_passed_in() -> None: @pytest.mark.asyncio async def test_async_with_sync_passed_in_closed_in_async() -> None: """Test caller closes the sync version in async.""" - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) aiozc = AsyncZeroconf(zc=zc) assert aiozc.zeroconf is zc zc.close() @@ -119,7 +118,7 @@ async def test_sync_within_event_loop_executor() -> None: """Test sync version still works from an executor within an event loop.""" def sync_code(): - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) assert zc.get_service_info("_neverused._tcp.local.", "xneverused._neverused._tcp.local.", 10) is None zc.close() @@ -129,7 +128,7 @@ def sync_code(): @pytest.mark.asyncio async def test_async_service_registration() -> None: """Test registering services broadcasts the registration by default.""" - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) type_ = "_test1-srvc-type._tcp.local." name = "xxxyyy" registration_name = f"{name}.{type_}" @@ -150,7 +149,7 @@ def update_service(self, zeroconf: Zeroconf, type: str, name: str) -> None: aiozc.zeroconf.add_service_listener(type_, listener) - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( type_, registration_name, @@ -186,10 +185,10 @@ def update_service(self, zeroconf: Zeroconf, type: str, name: str) -> None: await aiozc.async_close() assert calls == [ - ('add', type_, registration_name), - ('update', type_, registration_name), - ('update', type_, registration_name), - ('remove', type_, registration_name), + ("add", type_, registration_name), + ("update", type_, registration_name), + ("update", type_, registration_name), + ("remove", type_, registration_name), ] @@ -200,7 +199,7 @@ async def test_async_service_registration_with_server_missing() -> None: For backwards compatibility, the server should be set to the name that was passed in. """ - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) type_ = "_test1-srvc-type._tcp.local." name = "xxxyyy" registration_name = f"{name}.{type_}" @@ -221,7 +220,7 @@ def update_service(self, zeroconf: Zeroconf, type: str, name: str) -> None: aiozc.zeroconf.add_service_listener(type_, listener) - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( type_, registration_name, @@ -254,16 +253,16 @@ def update_service(self, zeroconf: Zeroconf, type: str, name: str) -> None: await aiozc.async_close() assert calls == [ - ('add', type_, registration_name), - ('update', type_, registration_name), - ('remove', type_, registration_name), + ("add", type_, registration_name), + ("update", type_, registration_name), + ("remove", type_, registration_name), ] @pytest.mark.asyncio async def test_async_service_registration_same_server_different_ports() -> None: """Test registering services with the same server with different srv records.""" - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) type_ = "_test1-srvc-type._tcp.local." name = "xxxyyy" name2 = "xxxyyy2" @@ -287,7 +286,7 @@ def update_service(self, zeroconf: Zeroconf, type: str, name: str) -> None: aiozc.zeroconf.add_service_listener(type_, listener) - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( type_, registration_name, @@ -320,17 +319,17 @@ def update_service(self, zeroconf: Zeroconf, type: str, name: str) -> None: assert info2.dns_service() in entries await aiozc.async_close() assert calls == [ - ('add', type_, registration_name), - ('add', type_, registration_name2), - ('remove', type_, registration_name), - ('remove', type_, registration_name2), + ("add", type_, registration_name), + ("add", type_, registration_name2), + ("remove", type_, registration_name), + ("remove", type_, registration_name2), ] @pytest.mark.asyncio async def test_async_service_registration_same_server_same_ports() -> None: """Test registering services with the same server with the exact same srv record.""" - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) type_ = "_test1-srvc-type._tcp.local." name = "xxxyyy" name2 = "xxxyyy2" @@ -354,7 +353,7 @@ def update_service(self, zeroconf: Zeroconf, type: str, name: str) -> None: aiozc.zeroconf.add_service_listener(type_, listener) - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( type_, registration_name, @@ -387,22 +386,22 @@ def update_service(self, zeroconf: Zeroconf, type: str, name: str) -> None: assert info2.dns_service() in entries await aiozc.async_close() assert calls == [ - ('add', type_, registration_name), - ('add', type_, registration_name2), - ('remove', type_, registration_name), - ('remove', type_, registration_name2), + ("add", type_, registration_name), + ("add", type_, registration_name2), + ("remove", type_, registration_name), + ("remove", type_, registration_name2), ] @pytest.mark.asyncio async def test_async_service_registration_name_conflict() -> None: """Test registering services throws on name conflict.""" - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) type_ = "_test-srvc2-type._tcp.local." name = "xxxyyy" registration_name = f"{name}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( type_, registration_name, @@ -445,12 +444,12 @@ async def test_async_service_registration_name_conflict() -> None: @pytest.mark.asyncio async def test_async_service_registration_name_does_not_match_type() -> None: """Test registering services throws when the name does not match the type.""" - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) type_ = "_test-srvc3-type._tcp.local." name = "xxxyyy" registration_name = f"{name}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( type_, registration_name, @@ -471,13 +470,13 @@ async def test_async_service_registration_name_does_not_match_type() -> None: @pytest.mark.asyncio async def test_async_service_registration_name_strict_check() -> None: """Test registering services throws when the name does not comply.""" - zc = Zeroconf(interfaces=['127.0.0.1']) - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) type_ = "_ibisip_http._tcp.local." name = "CustomerInformationService-F4D4895E9EEB" registration_name = f"{name}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( type_, registration_name, @@ -507,7 +506,7 @@ async def test_async_service_registration_name_strict_check() -> None: async def test_async_tasks() -> None: """Test awaiting broadcast tasks""" - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) type_ = "_test-srvc4-type._tcp.local." name = "xxxyyy" registration_name = f"{name}.{type_}" @@ -527,7 +526,7 @@ def update_service(self, zeroconf: Zeroconf, type: str, name: str) -> None: listener = MyListener() aiozc.zeroconf.add_service_listener(type_, listener) - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( type_, registration_name, @@ -563,9 +562,9 @@ def update_service(self, zeroconf: Zeroconf, type: str, name: str) -> None: await aiozc.async_close() assert calls == [ - ('add', type_, registration_name), - ('update', type_, registration_name), - ('remove', type_, registration_name), + ("add", type_, registration_name), + ("update", type_, registration_name), + ("remove", type_, registration_name), ] @@ -573,12 +572,12 @@ def update_service(self, zeroconf: Zeroconf, type: str, name: str) -> None: async def test_async_wait_unblocks_on_update() -> None: """Test async_wait will unblock on update.""" - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) type_ = "_test-srvc4-type._tcp.local." name = "xxxyyy" registration_name = f"{name}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( type_, registration_name, @@ -608,10 +607,10 @@ async def test_async_wait_unblocks_on_update() -> None: @pytest.mark.asyncio async def test_service_info_async_request() -> None: """Test registering services broadcasts and query with AsyncServceInfo.async_request.""" - if not has_working_ipv6() or os.environ.get('SKIP_IPV6'): - pytest.skip('Requires IPv6') + if not has_working_ipv6() or os.environ.get("SKIP_IPV6"): + pytest.skip("Requires IPv6") - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) type_ = "_test1-srvc-type._tcp.local." name = "xxxyyy" name2 = "abc" @@ -624,7 +623,7 @@ async def test_service_info_async_request() -> None: await asyncio.sleep(_LISTENER_TIME / 1000 / 2) get_service_info_task2 = asyncio.ensure_future(aiozc.async_get_service_info(type_, registration_name)) - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( type_, registration_name, @@ -670,7 +669,10 @@ async def test_service_info_async_request() -> None: 0, desc, "ash-2.local.", - addresses=[socket.inet_aton("10.0.1.3"), socket.inet_pton(socket.AF_INET6, "6001:db8::1")], + addresses=[ + socket.inet_aton("10.0.1.3"), + socket.inet_pton(socket.AF_INET6, "6001:db8::1"), + ], ) task = await aiozc.async_update_service(new_info) @@ -714,7 +716,7 @@ async def test_service_info_async_request() -> None: @pytest.mark.asyncio async def test_async_service_browser() -> None: """Test AsyncServiceBrowser.""" - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) type_ = "_test9-srvc-type._tcp.local." name = "xxxyyy" registration_name = f"{name}.{type_}" @@ -734,7 +736,7 @@ def update_service(self, aiozc: Zeroconf, type: str, name: str) -> None: listener = MyListener() await aiozc.async_add_service_listener(type_, listener) - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( type_, registration_name, @@ -765,9 +767,9 @@ def update_service(self, aiozc: Zeroconf, type: str, name: str) -> None: await aiozc.async_close() assert calls == [ - ('add', type_, registration_name), - ('update', type_, registration_name), - ('remove', type_, registration_name), + ("add", type_, registration_name), + ("update", type_, registration_name), + ("remove", type_, registration_name), ] @@ -778,14 +780,14 @@ async def test_async_context_manager() -> None: name = "xxxyyy" registration_name = f"{name}.{type_}" - async with AsyncZeroconf(interfaces=['127.0.0.1']) as aiozc: + async with AsyncZeroconf(interfaces=["127.0.0.1"]) as aiozc: info = ServiceInfo( type_, registration_name, 80, 0, 0, - {'path': '/~paulsm/'}, + {"path": "/~paulsm/"}, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")], ) @@ -800,7 +802,7 @@ async def test_service_browser_cancel_async_context_manager(): """Test we can cancel an AsyncServiceBrowser with it being used as an async context manager.""" # instantiate a zeroconf instance - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) zc = aiozc.zeroconf type_ = "_hap._tcp.local." @@ -824,14 +826,14 @@ class MyServiceListener(ServiceListener): @pytest.mark.asyncio async def test_async_unregister_all_services() -> None: """Test unregistering all services.""" - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) type_ = "_test1-srvc-type._tcp.local." name = "xxxyyy" name2 = "abc" registration_name = f"{name}.{type_}" registration_name2 = f"{name2}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( type_, registration_name, @@ -886,8 +888,8 @@ async def test_async_zeroconf_service_types(): name = "xxxyyy" registration_name = f"{name}.{type_}" - zeroconf_registrar = AsyncZeroconf(interfaces=['127.0.0.1']) - desc = {'path': '/~paulsm/'} + zeroconf_registrar = AsyncZeroconf(interfaces=["127.0.0.1"]) + desc = {"path": "/~paulsm/"} info = ServiceInfo( type_, registration_name, @@ -904,7 +906,7 @@ async def test_async_zeroconf_service_types(): await asyncio.sleep(0.2) _clear_cache(zeroconf_registrar.zeroconf) try: - service_types = await AsyncZeroconfServiceTypes.async_find(interfaces=['127.0.0.1'], timeout=2) + service_types = await AsyncZeroconfServiceTypes.async_find(interfaces=["127.0.0.1"], timeout=2) assert type_ in service_types _clear_cache(zeroconf_registrar.zeroconf) service_types = await AsyncZeroconfServiceTypes.async_find(aiozc=zeroconf_registrar, timeout=2) @@ -917,7 +919,7 @@ async def test_async_zeroconf_service_types(): @pytest.mark.asyncio async def test_guard_against_running_serviceinfo_request_event_loop() -> None: """Test that running ServiceInfo.request from the event loop throws.""" - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) service_info = AsyncServiceInfo("_hap._tcp.local.", "doesnotmatter._hap._tcp.local.") with pytest.raises(RuntimeError): @@ -930,31 +932,28 @@ async def test_service_browser_instantiation_generates_add_events_from_cache(): """Test that the ServiceBrowser will generate Add events with the existing cache when starting.""" # instantiate a zeroconf instance - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) zc = aiozc.zeroconf type_ = "_hap._tcp.local." - registration_name = "xxxyyy.%s" % type_ + registration_name = f"xxxyyy.{type_}" callbacks = [] class MyServiceListener(ServiceListener): def add_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-def] - nonlocal callbacks if name == registration_name: callbacks.append(("add", type_, name)) def remove_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-def] - nonlocal callbacks if name == registration_name: callbacks.append(("remove", type_, name)) def update_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-def] - nonlocal callbacks if name == registration_name: callbacks.append(("update", type_, name)) listener = MyServiceListener() - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} address_parsed = "10.0.1.2" address = socket.inet_aton(address_parsed) info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[address]) @@ -967,7 +966,7 @@ def update_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-de await asyncio.sleep(0) assert callbacks == [ - ('add', type_, registration_name), + ("add", type_, registration_name), ] await browser.async_cancel() @@ -982,7 +981,7 @@ async def test_integration(): got_query = asyncio.Event() type_ = "_http._tcp.local." - registration_name = "xxxyyy.%s" % type_ + registration_name = f"xxxyyy.{type_}" def on_service_state_change(zeroconf, service_type, state_change, name): if name == registration_name: @@ -991,7 +990,7 @@ def on_service_state_change(zeroconf, service_type, state_change, name): elif state_change is ServiceStateChange.Removed: service_removed.set() - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) zeroconf_browser = aiozc.zeroconf zeroconf_browser.question_history = QuestionHistoryWithoutSuppression() await zeroconf_browser.async_wait_for_start() @@ -1023,7 +1022,7 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): assert len(zeroconf_browser.engine.protocols) == 2 - aio_zeroconf_registrar = AsyncZeroconf(interfaces=['127.0.0.1']) + aio_zeroconf_registrar = AsyncZeroconf(interfaces=["127.0.0.1"]) zeroconf_registrar = aio_zeroconf_registrar.zeroconf await aio_zeroconf_registrar.zeroconf.async_wait_for_start() @@ -1040,7 +1039,7 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): 80, 0, 0, - {'path': '/~paulsm/'}, + {"path": "/~paulsm/"}, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")], ) @@ -1126,15 +1125,22 @@ def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()): async def test_info_asking_default_is_asking_qm_questions_after_the_first_qu(): """Verify the service info first question is QU and subsequent ones are QM questions.""" type_ = "_quservice._tcp.local." - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) zeroconf_info = aiozc.zeroconf name = "xxxyyy" registration_name = f"{name}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( - type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration_name, + 80, + 0, + 0, + desc, + "ash-2.local.", + addresses=[socket.inet_aton("10.0.1.2")], ) zeroconf_info.registry.async_add(info) @@ -1174,31 +1180,28 @@ async def test_service_browser_ignores_unrelated_updates(): """Test that the ServiceBrowser ignores unrelated updates.""" # instantiate a zeroconf instance - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) zc = aiozc.zeroconf type_ = "_veryuniqueone._tcp.local." - registration_name = "xxxyyy.%s" % type_ + registration_name = f"xxxyyy.{type_}" callbacks = [] class MyServiceListener(ServiceListener): def add_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-def] - nonlocal callbacks if name == registration_name: callbacks.append(("add", type_, name)) def remove_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-def] - nonlocal callbacks if name == registration_name: callbacks.append(("remove", type_, name)) def update_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-def] - nonlocal callbacks if name == registration_name: callbacks.append(("update", type_, name)) listener = MyServiceListener() - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} address_parsed = "10.0.1.2" address = socket.inet_aton(address_parsed) info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[address]) @@ -1216,7 +1219,7 @@ def update_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-de 0, 0, 81, - 'unrelated.local.', + "unrelated.local.", ), ] ) @@ -1235,7 +1238,13 @@ def update_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-de 0, ) generated.add_answer_at_time( - DNSAddress("unrelated.local.", const._TYPE_A, const._CLASS_IN, const._DNS_HOST_TTL, b"1234"), + DNSAddress( + "unrelated.local.", + const._TYPE_A, + const._CLASS_IN, + const._DNS_HOST_TTL, + b"1234", + ), 0, ) generated.add_answer_at_time( @@ -1255,7 +1264,7 @@ def update_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-de await asyncio.sleep(0) assert callbacks == [ - ('add', type_, registration_name), + ("add", type_, registration_name), ] await aiozc.async_close() @@ -1263,7 +1272,7 @@ def update_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-de @pytest.mark.asyncio async def test_async_request_timeout(): """Test that the timeout does not throw an exception and finishes close to the actual timeout.""" - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) await aiozc.zeroconf.async_wait_for_start() start_time = current_time_millis() assert await aiozc.async_get_service_info("_notfound.local.", "notthere._notfound.local.") is None @@ -1277,7 +1286,7 @@ async def test_async_request_timeout(): @pytest.mark.asyncio async def test_async_request_non_running_instance(): """Test that the async_request throws when zeroconf is not running.""" - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) await aiozc.async_close() with pytest.raises(NotRunningException): await aiozc.async_get_service_info("_notfound.local.", "notthere._notfound.local.") @@ -1287,15 +1296,22 @@ async def test_async_request_non_running_instance(): async def test_legacy_unicast_response(run_isolated): """Verify legacy unicast responses include questions and correct id.""" type_ = "_mservice._tcp.local." - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) await aiozc.zeroconf.async_wait_for_start() name = "xxxyyy" registration_name = f"{name}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( - type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration_name, + 80, + 0, + 0, + desc, + "ash-2.local.", + addresses=[socket.inet_aton("10.0.1.2")], ) aiozc.zeroconf.registry.async_add(info) @@ -1305,11 +1321,11 @@ async def test_legacy_unicast_response(run_isolated): protocol = aiozc.zeroconf.engine.protocols[0] with patch.object(aiozc.zeroconf, "async_send") as send_mock: - protocol.datagram_received(query.packets()[0], ('127.0.0.1', 6503)) + protocol.datagram_received(query.packets()[0], ("127.0.0.1", 6503)) calls = send_mock.mock_calls - # Verify the response is sent back on the socket it was recieved from - assert calls == [call(ANY, '127.0.0.1', 6503, (), protocol.transport)] + # Verify the response is sent back on the socket it was received from + assert calls == [call(ANY, "127.0.0.1", 6503, (), protocol.transport)] outgoing = send_mock.call_args[0][0] assert isinstance(outgoing, DNSOutgoing) assert outgoing.questions == [question] @@ -1320,37 +1336,34 @@ async def test_legacy_unicast_response(run_isolated): @pytest.mark.asyncio async def test_update_with_uppercase_names(run_isolated): """Test an ip update from a shelly which uses uppercase names.""" - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) await aiozc.zeroconf.async_wait_for_start() callbacks = [] class MyServiceListener(ServiceListener): def add_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-def] - nonlocal callbacks callbacks.append(("add", type_, name)) def remove_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-def] - nonlocal callbacks callbacks.append(("remove", type_, name)) def update_service(self, zc, type_, name) -> None: # type: ignore[no-untyped-def] - nonlocal callbacks callbacks.append(("update", type_, name)) listener = MyServiceListener() browser = AsyncServiceBrowser(aiozc.zeroconf, "_http._tcp.local.", None, listener) protocol = aiozc.zeroconf.engine.protocols[0] - packet = b'\x00\x00\x84\x80\x00\x00\x00\n\x00\x00\x00\x00\t_services\x07_dns-sd\x04_udp\x05local\x00\x00\x0c\x00\x01\x00\x00\x11\x94\x00\x14\x07_shelly\x04_tcp\x05local\x00\t_services\x07_dns-sd\x04_udp\x05local\x00\x00\x0c\x00\x01\x00\x00\x11\x94\x00\x12\x05_http\x04_tcp\x05local\x00\x07_shelly\x04_tcp\x05local\x00\x00\x0c\x00\x01\x00\x00\x11\x94\x00.\x19shellypro4pm-94b97ec07650\x07_shelly\x04_tcp\x05local\x00\x19shellypro4pm-94b97ec07650\x07_shelly\x04_tcp\x05local\x00\x00!\x80\x01\x00\x00\x00x\x00\'\x00\x00\x00\x00\x00P\x19ShellyPro4PM-94B97EC07650\x05local\x00\x19shellypro4pm-94b97ec07650\x07_shelly\x04_tcp\x05local\x00\x00\x10\x80\x01\x00\x00\x00x\x00"\napp=Pro4PM\x10ver=0.10.0-beta5\x05gen=2\x05_http\x04_tcp\x05local\x00\x00\x0c\x00\x01\x00\x00\x11\x94\x00,\x19ShellyPro4PM-94B97EC07650\x05_http\x04_tcp\x05local\x00\x19ShellyPro4PM-94B97EC07650\x05_http\x04_tcp\x05local\x00\x00!\x80\x01\x00\x00\x00x\x00\'\x00\x00\x00\x00\x00P\x19ShellyPro4PM-94B97EC07650\x05local\x00\x19ShellyPro4PM-94B97EC07650\x05_http\x04_tcp\x05local\x00\x00\x10\x80\x01\x00\x00\x00x\x00\x06\x05gen=2\x19ShellyPro4PM-94B97EC07650\x05local\x00\x00\x01\x80\x01\x00\x00\x00x\x00\x04\xc0\xa8\xbc=\x19ShellyPro4PM-94B97EC07650\x05local\x00\x00/\x80\x01\x00\x00\x00x\x00$\x19ShellyPro4PM-94B97EC07650\x05local\x00\x00\x01@' # noqa: E501 - protocol.datagram_received(packet, ('127.0.0.1', 6503)) + packet = b"\x00\x00\x84\x80\x00\x00\x00\n\x00\x00\x00\x00\t_services\x07_dns-sd\x04_udp\x05local\x00\x00\x0c\x00\x01\x00\x00\x11\x94\x00\x14\x07_shelly\x04_tcp\x05local\x00\t_services\x07_dns-sd\x04_udp\x05local\x00\x00\x0c\x00\x01\x00\x00\x11\x94\x00\x12\x05_http\x04_tcp\x05local\x00\x07_shelly\x04_tcp\x05local\x00\x00\x0c\x00\x01\x00\x00\x11\x94\x00.\x19shellypro4pm-94b97ec07650\x07_shelly\x04_tcp\x05local\x00\x19shellypro4pm-94b97ec07650\x07_shelly\x04_tcp\x05local\x00\x00!\x80\x01\x00\x00\x00x\x00'\x00\x00\x00\x00\x00P\x19ShellyPro4PM-94B97EC07650\x05local\x00\x19shellypro4pm-94b97ec07650\x07_shelly\x04_tcp\x05local\x00\x00\x10\x80\x01\x00\x00\x00x\x00\"\napp=Pro4PM\x10ver=0.10.0-beta5\x05gen=2\x05_http\x04_tcp\x05local\x00\x00\x0c\x00\x01\x00\x00\x11\x94\x00,\x19ShellyPro4PM-94B97EC07650\x05_http\x04_tcp\x05local\x00\x19ShellyPro4PM-94B97EC07650\x05_http\x04_tcp\x05local\x00\x00!\x80\x01\x00\x00\x00x\x00'\x00\x00\x00\x00\x00P\x19ShellyPro4PM-94B97EC07650\x05local\x00\x19ShellyPro4PM-94B97EC07650\x05_http\x04_tcp\x05local\x00\x00\x10\x80\x01\x00\x00\x00x\x00\x06\x05gen=2\x19ShellyPro4PM-94B97EC07650\x05local\x00\x00\x01\x80\x01\x00\x00\x00x\x00\x04\xc0\xa8\xbc=\x19ShellyPro4PM-94B97EC07650\x05local\x00\x00/\x80\x01\x00\x00\x00x\x00$\x19ShellyPro4PM-94B97EC07650\x05local\x00\x00\x01@" # noqa: E501 + protocol.datagram_received(packet, ("127.0.0.1", 6503)) await asyncio.sleep(0) - packet = b'\x00\x00\x84\x80\x00\x00\x00\n\x00\x00\x00\x00\t_services\x07_dns-sd\x04_udp\x05local\x00\x00\x0c\x00\x01\x00\x00\x11\x94\x00\x14\x07_shelly\x04_tcp\x05local\x00\t_services\x07_dns-sd\x04_udp\x05local\x00\x00\x0c\x00\x01\x00\x00\x11\x94\x00\x12\x05_http\x04_tcp\x05local\x00\x07_shelly\x04_tcp\x05local\x00\x00\x0c\x00\x01\x00\x00\x11\x94\x00.\x19shellypro4pm-94b97ec07650\x07_shelly\x04_tcp\x05local\x00\x19shellypro4pm-94b97ec07650\x07_shelly\x04_tcp\x05local\x00\x00!\x80\x01\x00\x00\x00x\x00\'\x00\x00\x00\x00\x00P\x19ShellyPro4PM-94B97EC07650\x05local\x00\x19shellypro4pm-94b97ec07650\x07_shelly\x04_tcp\x05local\x00\x00\x10\x80\x01\x00\x00\x00x\x00"\napp=Pro4PM\x10ver=0.10.0-beta5\x05gen=2\x05_http\x04_tcp\x05local\x00\x00\x0c\x00\x01\x00\x00\x11\x94\x00,\x19ShellyPro4PM-94B97EC07650\x05_http\x04_tcp\x05local\x00\x19ShellyPro4PM-94B97EC07650\x05_http\x04_tcp\x05local\x00\x00!\x80\x01\x00\x00\x00x\x00\'\x00\x00\x00\x00\x00P\x19ShellyPro4PM-94B97EC07650\x05local\x00\x19ShellyPro4PM-94B97EC07650\x05_http\x04_tcp\x05local\x00\x00\x10\x80\x01\x00\x00\x00x\x00\x06\x05gen=2\x19ShellyPro4PM-94B97EC07650\x05local\x00\x00\x01\x80\x01\x00\x00\x00x\x00\x04\xc0\xa8\xbcA\x19ShellyPro4PM-94B97EC07650\x05local\x00\x00/\x80\x01\x00\x00\x00x\x00$\x19ShellyPro4PM-94B97EC07650\x05local\x00\x00\x01@' # noqa: E501 - protocol.datagram_received(packet, ('127.0.0.1', 6503)) + packet = b"\x00\x00\x84\x80\x00\x00\x00\n\x00\x00\x00\x00\t_services\x07_dns-sd\x04_udp\x05local\x00\x00\x0c\x00\x01\x00\x00\x11\x94\x00\x14\x07_shelly\x04_tcp\x05local\x00\t_services\x07_dns-sd\x04_udp\x05local\x00\x00\x0c\x00\x01\x00\x00\x11\x94\x00\x12\x05_http\x04_tcp\x05local\x00\x07_shelly\x04_tcp\x05local\x00\x00\x0c\x00\x01\x00\x00\x11\x94\x00.\x19shellypro4pm-94b97ec07650\x07_shelly\x04_tcp\x05local\x00\x19shellypro4pm-94b97ec07650\x07_shelly\x04_tcp\x05local\x00\x00!\x80\x01\x00\x00\x00x\x00'\x00\x00\x00\x00\x00P\x19ShellyPro4PM-94B97EC07650\x05local\x00\x19shellypro4pm-94b97ec07650\x07_shelly\x04_tcp\x05local\x00\x00\x10\x80\x01\x00\x00\x00x\x00\"\napp=Pro4PM\x10ver=0.10.0-beta5\x05gen=2\x05_http\x04_tcp\x05local\x00\x00\x0c\x00\x01\x00\x00\x11\x94\x00,\x19ShellyPro4PM-94B97EC07650\x05_http\x04_tcp\x05local\x00\x19ShellyPro4PM-94B97EC07650\x05_http\x04_tcp\x05local\x00\x00!\x80\x01\x00\x00\x00x\x00'\x00\x00\x00\x00\x00P\x19ShellyPro4PM-94B97EC07650\x05local\x00\x19ShellyPro4PM-94B97EC07650\x05_http\x04_tcp\x05local\x00\x00\x10\x80\x01\x00\x00\x00x\x00\x06\x05gen=2\x19ShellyPro4PM-94B97EC07650\x05local\x00\x00\x01\x80\x01\x00\x00\x00x\x00\x04\xc0\xa8\xbcA\x19ShellyPro4PM-94B97EC07650\x05local\x00\x00/\x80\x01\x00\x00\x00x\x00$\x19ShellyPro4PM-94B97EC07650\x05local\x00\x00\x01@" # noqa: E501 + protocol.datagram_received(packet, ("127.0.0.1", 6503)) await browser.async_cancel() await aiozc.async_close() assert callbacks == [ - ('add', '_http._tcp.local.', 'ShellyPro4PM-94B97EC07650._http._tcp.local.'), - ('update', '_http._tcp.local.', 'ShellyPro4PM-94B97EC07650._http._tcp.local.'), + ("add", "_http._tcp.local.", "ShellyPro4PM-94B97EC07650._http._tcp.local."), + ("update", "_http._tcp.local.", "ShellyPro4PM-94B97EC07650._http._tcp.local."), ] diff --git a/tests/test_cache.py b/tests/test_cache.py index aac7e0ca2..9d55435d5 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -1,17 +1,17 @@ -#!/usr/bin/env python - - -""" Unit tests for zeroconf._cache. """ +"""Unit tests for zeroconf._cache.""" +from __future__ import annotations import logging -import unittest import unittest.mock +from heapq import heapify, heappop + +import pytest import zeroconf as r from zeroconf import const -log = logging.getLogger('zeroconf') +log = logging.getLogger("zeroconf") original_logging_level = logging.NOTSET @@ -28,11 +28,11 @@ def teardown_module(): class TestDNSCache(unittest.TestCase): def test_order(self): - record1 = r.DNSAddress('a', const._TYPE_SOA, const._CLASS_IN, 1, b'a') - record2 = r.DNSAddress('a', const._TYPE_SOA, const._CLASS_IN, 1, b'b') + record1 = r.DNSAddress("a", const._TYPE_SOA, const._CLASS_IN, 1, b"a") + record2 = r.DNSAddress("a", const._TYPE_SOA, const._CLASS_IN, 1, b"b") cache = r.DNSCache() cache.async_add_records([record1, record2]) - entry = r.DNSEntry('a', const._TYPE_SOA, const._CLASS_IN) + entry = r.DNSEntry("a", const._TYPE_SOA, const._CLASS_IN) cached_record = cache.get(entry) assert cached_record == record2 @@ -42,8 +42,8 @@ def test_adding_same_record_to_cache_different_ttls_with_get(self): This ensures we only have one source of truth for TTLs as a record cannot be both expired and not expired. """ - record1 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 1, b'a') - record2 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 10, b'a') + record1 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a") + record2 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 10, b"a") cache = r.DNSCache() cache.async_add_records([record1, record2]) entry = r.DNSEntry(record2.name, const._TYPE_A, const._CLASS_IN) @@ -58,144 +58,427 @@ def test_adding_same_record_to_cache_different_ttls_with_get_all(self): only have one source of truth for TTLs as a record cannot be both expired and not expired. """ - record1 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 1, b'a') - record2 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 10, b'a') + record1 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a") + record2 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 10, b"a") cache = r.DNSCache() cache.async_add_records([record1, record2]) - cached_records = cache.get_all_by_details('a', const._TYPE_A, const._CLASS_IN) + cached_records = cache.get_all_by_details("a", const._TYPE_A, const._CLASS_IN) assert cached_records == [record2] def test_cache_empty_does_not_leak_memory_by_leaving_empty_list(self): - record1 = r.DNSAddress('a', const._TYPE_SOA, const._CLASS_IN, 1, b'a') - record2 = r.DNSAddress('a', const._TYPE_SOA, const._CLASS_IN, 1, b'b') + record1 = r.DNSAddress("a", const._TYPE_SOA, const._CLASS_IN, 1, b"a") + record2 = r.DNSAddress("a", const._TYPE_SOA, const._CLASS_IN, 1, b"b") cache = r.DNSCache() cache.async_add_records([record1, record2]) - assert 'a' in cache.cache + assert "a" in cache.cache cache.async_remove_records([record1, record2]) - assert 'a' not in cache.cache + assert "a" not in cache.cache def test_cache_empty_multiple_calls(self): - record1 = r.DNSAddress('a', const._TYPE_SOA, const._CLASS_IN, 1, b'a') - record2 = r.DNSAddress('a', const._TYPE_SOA, const._CLASS_IN, 1, b'b') + record1 = r.DNSAddress("a", const._TYPE_SOA, const._CLASS_IN, 1, b"a") + record2 = r.DNSAddress("a", const._TYPE_SOA, const._CLASS_IN, 1, b"b") cache = r.DNSCache() cache.async_add_records([record1, record2]) - assert 'a' in cache.cache + assert "a" in cache.cache cache.async_remove_records([record1, record2]) - assert 'a' not in cache.cache + assert "a" not in cache.cache class TestDNSAsyncCacheAPI(unittest.TestCase): def test_async_get_unique(self): - record1 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 1, b'a') - record2 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 1, b'b') + record1 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a") + record2 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"b") cache = r.DNSCache() cache.async_add_records([record1, record2]) assert cache.async_get_unique(record1) == record1 assert cache.async_get_unique(record2) == record2 def test_async_all_by_details(self): - record1 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 1, b'a') - record2 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 1, b'b') + record1 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a") + record2 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"b") cache = r.DNSCache() cache.async_add_records([record1, record2]) - assert set(cache.async_all_by_details('a', const._TYPE_A, const._CLASS_IN)) == {record1, record2} + assert set(cache.async_all_by_details("a", const._TYPE_A, const._CLASS_IN)) == { + record1, + record2, + } def test_async_entries_with_server(self): record1 = r.DNSService( - 'irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 85, 'ab' + "irrelevant", + const._TYPE_SRV, + const._CLASS_IN, + const._DNS_HOST_TTL, + 0, + 0, + 85, + "ab", ) record2 = r.DNSService( - 'irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 80, 'ab' + "irrelevant", + const._TYPE_SRV, + const._CLASS_IN, + const._DNS_HOST_TTL, + 0, + 0, + 80, + "ab", ) cache = r.DNSCache() cache.async_add_records([record1, record2]) - assert set(cache.async_entries_with_server('ab')) == {record1, record2} - assert set(cache.async_entries_with_server('AB')) == {record1, record2} + assert set(cache.async_entries_with_server("ab")) == {record1, record2} + assert set(cache.async_entries_with_server("AB")) == {record1, record2} def test_async_entries_with_name(self): record1 = r.DNSService( - 'irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 85, 'ab' + "irrelevant", + const._TYPE_SRV, + const._CLASS_IN, + const._DNS_HOST_TTL, + 0, + 0, + 85, + "ab", ) record2 = r.DNSService( - 'irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 80, 'ab' + "irrelevant", + const._TYPE_SRV, + const._CLASS_IN, + const._DNS_HOST_TTL, + 0, + 0, + 80, + "ab", ) cache = r.DNSCache() cache.async_add_records([record1, record2]) - assert set(cache.async_entries_with_name('irrelevant')) == {record1, record2} - assert set(cache.async_entries_with_name('Irrelevant')) == {record1, record2} + assert set(cache.async_entries_with_name("irrelevant")) == {record1, record2} + assert set(cache.async_entries_with_name("Irrelevant")) == {record1, record2} # These functions have been seen in other projects so # we try to maintain a stable API for all the threadsafe getters class TestDNSCacheAPI(unittest.TestCase): def test_get(self): - record1 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 1, b'a') - record2 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 1, b'b') - record3 = r.DNSAddress('a', const._TYPE_AAAA, const._CLASS_IN, 1, b'ipv6') + record1 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a") + record2 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"b") + record3 = r.DNSAddress("a", const._TYPE_AAAA, const._CLASS_IN, 1, b"ipv6") cache = r.DNSCache() cache.async_add_records([record1, record2, record3]) assert cache.get(record1) == record1 assert cache.get(record2) == record2 - assert cache.get(r.DNSEntry('a', const._TYPE_A, const._CLASS_IN)) == record2 - assert cache.get(r.DNSEntry('a', const._TYPE_AAAA, const._CLASS_IN)) == record3 - assert cache.get(r.DNSEntry('notthere', const._TYPE_A, const._CLASS_IN)) is None + assert cache.get(r.DNSEntry("a", const._TYPE_A, const._CLASS_IN)) == record2 + assert cache.get(r.DNSEntry("a", const._TYPE_AAAA, const._CLASS_IN)) == record3 + assert cache.get(r.DNSEntry("notthere", const._TYPE_A, const._CLASS_IN)) is None def test_get_by_details(self): - record1 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 1, b'a') - record2 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 1, b'b') + record1 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a") + record2 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"b") cache = r.DNSCache() cache.async_add_records([record1, record2]) - assert cache.get_by_details('a', const._TYPE_A, const._CLASS_IN) == record2 + assert cache.get_by_details("a", const._TYPE_A, const._CLASS_IN) == record2 def test_get_all_by_details(self): - record1 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 1, b'a') - record2 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 1, b'b') + record1 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a") + record2 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"b") cache = r.DNSCache() cache.async_add_records([record1, record2]) - assert set(cache.get_all_by_details('a', const._TYPE_A, const._CLASS_IN)) == {record1, record2} + assert set(cache.get_all_by_details("a", const._TYPE_A, const._CLASS_IN)) == { + record1, + record2, + } def test_entries_with_server(self): record1 = r.DNSService( - 'irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 85, 'ab' + "irrelevant", + const._TYPE_SRV, + const._CLASS_IN, + const._DNS_HOST_TTL, + 0, + 0, + 85, + "ab", ) record2 = r.DNSService( - 'irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 80, 'ab' + "irrelevant", + const._TYPE_SRV, + const._CLASS_IN, + const._DNS_HOST_TTL, + 0, + 0, + 80, + "ab", ) cache = r.DNSCache() cache.async_add_records([record1, record2]) - assert set(cache.entries_with_server('ab')) == {record1, record2} - assert set(cache.entries_with_server('AB')) == {record1, record2} + assert set(cache.entries_with_server("ab")) == {record1, record2} + assert set(cache.entries_with_server("AB")) == {record1, record2} def test_entries_with_name(self): record1 = r.DNSService( - 'irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 85, 'ab' + "irrelevant", + const._TYPE_SRV, + const._CLASS_IN, + const._DNS_HOST_TTL, + 0, + 0, + 85, + "ab", ) record2 = r.DNSService( - 'irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 80, 'ab' + "irrelevant", + const._TYPE_SRV, + const._CLASS_IN, + const._DNS_HOST_TTL, + 0, + 0, + 80, + "ab", ) cache = r.DNSCache() cache.async_add_records([record1, record2]) - assert set(cache.entries_with_name('irrelevant')) == {record1, record2} - assert set(cache.entries_with_name('Irrelevant')) == {record1, record2} + assert set(cache.entries_with_name("irrelevant")) == {record1, record2} + assert set(cache.entries_with_name("Irrelevant")) == {record1, record2} def test_current_entry_with_name_and_alias(self): record1 = r.DNSPointer( - 'irrelevant', const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, 'x.irrelevant' + "irrelevant", + const._TYPE_PTR, + const._CLASS_IN, + const._DNS_OTHER_TTL, + "x.irrelevant", ) record2 = r.DNSPointer( - 'irrelevant', const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, 'y.irrelevant' + "irrelevant", + const._TYPE_PTR, + const._CLASS_IN, + const._DNS_OTHER_TTL, + "y.irrelevant", ) cache = r.DNSCache() cache.async_add_records([record1, record2]) - assert cache.current_entry_with_name_and_alias('irrelevant', 'x.irrelevant') == record1 + assert cache.current_entry_with_name_and_alias("irrelevant", "x.irrelevant") == record1 def test_name(self): record1 = r.DNSService( - 'irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 85, 'ab' + "irrelevant", + const._TYPE_SRV, + const._CLASS_IN, + const._DNS_HOST_TTL, + 0, + 0, + 85, + "ab", ) record2 = r.DNSService( - 'irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 80, 'ab' + "irrelevant", + const._TYPE_SRV, + const._CLASS_IN, + const._DNS_HOST_TTL, + 0, + 0, + 80, + "ab", ) cache = r.DNSCache() cache.async_add_records([record1, record2]) - assert cache.names() == ['irrelevant'] + assert cache.names() == ["irrelevant"] + + +def test_async_entries_with_name_returns_newest_record(): + cache = r.DNSCache() + record1 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a", created=1.0) + record2 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a", created=2.0) + cache.async_add_records([record1]) + cache.async_add_records([record2]) + assert next(iter(cache.async_entries_with_name("a"))) is record2 + + +def test_async_entries_with_server_returns_newest_record(): + cache = r.DNSCache() + record1 = r.DNSService("a", const._TYPE_SRV, const._CLASS_IN, 1, 1, 1, 1, "a", created=1.0) + record2 = r.DNSService("a", const._TYPE_SRV, const._CLASS_IN, 1, 1, 1, 1, "a", created=2.0) + cache.async_add_records([record1]) + cache.async_add_records([record2]) + assert next(iter(cache.async_entries_with_server("a"))) is record2 + + +def test_async_get_returns_newest_record(): + cache = r.DNSCache() + record1 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a", created=1.0) + record2 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a", created=2.0) + cache.async_add_records([record1]) + cache.async_add_records([record2]) + assert cache.get(record2) is record2 + + +def test_async_get_returns_newest_nsec_record(): + cache = r.DNSCache() + record1 = r.DNSNsec("a", const._TYPE_NSEC, const._CLASS_IN, 1, "a", [], created=1.0) + record2 = r.DNSNsec("a", const._TYPE_NSEC, const._CLASS_IN, 1, "a", [], created=2.0) + cache.async_add_records([record1]) + cache.async_add_records([record2]) + assert cache.get(record2) is record2 + + +def test_get_by_details_returns_newest_record(): + cache = r.DNSCache() + record1 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a", created=1.0) + record2 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a", created=2.0) + cache.async_add_records([record1]) + cache.async_add_records([record2]) + assert cache.get_by_details("a", const._TYPE_A, const._CLASS_IN) is record2 + + +def test_get_all_by_details_returns_newest_record(): + cache = r.DNSCache() + record1 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a", created=1.0) + record2 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a", created=2.0) + cache.async_add_records([record1]) + cache.async_add_records([record2]) + records = cache.get_all_by_details("a", const._TYPE_A, const._CLASS_IN) + assert len(records) == 1 + assert records[0] is record2 + + +def test_async_get_all_by_details_returns_newest_record(): + cache = r.DNSCache() + record1 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a", created=1.0) + record2 = r.DNSAddress("a", const._TYPE_A, const._CLASS_IN, 1, b"a", created=2.0) + cache.async_add_records([record1]) + cache.async_add_records([record2]) + records = cache.async_all_by_details("a", const._TYPE_A, const._CLASS_IN) + assert len(records) == 1 + assert records[0] is record2 + + +def test_async_get_unique_returns_newest_record(): + cache = r.DNSCache() + record1 = r.DNSPointer("a", const._TYPE_PTR, const._CLASS_IN, 1, "a", created=1.0) + record2 = r.DNSPointer("a", const._TYPE_PTR, const._CLASS_IN, 1, "a", created=2.0) + cache.async_add_records([record1]) + cache.async_add_records([record2]) + record = cache.async_get_unique(record1) + assert record is record2 + record = cache.async_get_unique(record2) + assert record is record2 + + +@pytest.mark.asyncio +async def test_cache_heap_cleanup() -> None: + """Test that the heap gets cleaned up when there are many old expirations.""" + cache = r.DNSCache() + # The heap should not be cleaned up when there are less than 100 expiration changes + min_records_to_cleanup = 100 + now = r.current_time_millis() + name = "heap.local." + ttl_seconds = 100 + ttl_millis = ttl_seconds * 1000 + + for i in range(min_records_to_cleanup): + record = r.DNSAddress(name, const._TYPE_A, const._CLASS_IN, ttl_seconds, b"1", created=now + i) + cache.async_add_records([record]) + + assert len(cache._expire_heap) == min_records_to_cleanup + assert len(cache.async_entries_with_name(name)) == 1 + + # Now that we reached the minimum number of cookies to cleanup, + # add one more cookie to trigger the cleanup + record = r.DNSAddress( + name, const._TYPE_A, const._CLASS_IN, ttl_seconds, b"1", created=now + min_records_to_cleanup + ) + expected_expire_time = record.created + ttl_millis + cache.async_add_records([record]) + assert len(cache.async_entries_with_name(name)) == 1 + entry = next(iter(cache.async_entries_with_name(name))) + assert (entry.created + ttl_millis) == expected_expire_time + assert entry is record + + # Verify that the heap has been cleaned up + assert len(cache.async_entries_with_name(name)) == 1 + cache.async_expire(now) + + heap_copy = cache._expire_heap.copy() + heapify(heap_copy) + # Ensure heap order is maintained + assert cache._expire_heap == heap_copy + + # The heap should have been cleaned up + assert len(cache._expire_heap) == 1 + assert len(cache.async_entries_with_name(name)) == 1 + + entry = next(iter(cache.async_entries_with_name(name))) + assert entry is record + + assert (entry.created + ttl_millis) == expected_expire_time + + cache.async_expire(expected_expire_time) + assert not cache.async_entries_with_name(name), cache._expire_heap + + +@pytest.mark.asyncio +async def test_cache_heap_multi_name_cleanup() -> None: + """Test cleanup with multiple names.""" + cache = r.DNSCache() + # The heap should not be cleaned up when there are less than 100 expiration changes + min_records_to_cleanup = 100 + now = r.current_time_millis() + name = "heap.local." + name2 = "heap2.local." + ttl_seconds = 100 + ttl_millis = ttl_seconds * 1000 + + for i in range(min_records_to_cleanup): + record = r.DNSAddress(name, const._TYPE_A, const._CLASS_IN, ttl_seconds, b"1", created=now + i) + cache.async_add_records([record]) + expected_expire_time = record.created + ttl_millis + + for i in range(5): + record = r.DNSAddress( + name2, const._TYPE_A, const._CLASS_IN, ttl_seconds, bytes((i,)), created=now + i + ) + cache.async_add_records([record]) + + assert len(cache._expire_heap) == min_records_to_cleanup + 5 + assert len(cache.async_entries_with_name(name)) == 1 + assert len(cache.async_entries_with_name(name2)) == 5 + + cache.async_expire(now) + # The heap and expirations should have been cleaned up + assert len(cache._expire_heap) == 1 + 5 + assert len(cache._expirations) == 1 + 5 + + cache.async_expire(expected_expire_time) + assert not cache.async_entries_with_name(name), cache._expire_heap + + +@pytest.mark.asyncio +async def test_cache_heap_pops_order() -> None: + """Test cache heap is popped in order.""" + cache = r.DNSCache() + # The heap should not be cleaned up when there are less than 100 expiration changes + min_records_to_cleanup = 100 + now = r.current_time_millis() + name = "heap.local." + name2 = "heap2.local." + ttl_seconds = 100 + + for i in range(min_records_to_cleanup): + record = r.DNSAddress(name, const._TYPE_A, const._CLASS_IN, ttl_seconds, b"1", created=now + i) + cache.async_add_records([record]) + + for i in range(5): + record = r.DNSAddress( + name2, const._TYPE_A, const._CLASS_IN, ttl_seconds, bytes((i,)), created=now + i + ) + cache.async_add_records([record]) + + assert len(cache._expire_heap) == min_records_to_cleanup + 5 + assert len(cache.async_entries_with_name(name)) == 1 + assert len(cache.async_entries_with_name(name2)) == 5 + + start_ts = 0.0 + while cache._expire_heap: + ts, _ = heappop(cache._expire_heap) + assert ts >= start_ts + start_ts = ts diff --git a/tests/test_circular_imports.py b/tests/test_circular_imports.py new file mode 100644 index 000000000..74ed1f124 --- /dev/null +++ b/tests/test_circular_imports.py @@ -0,0 +1,32 @@ +"""Test to check for circular imports.""" + +from __future__ import annotations + +import asyncio +import sys + +import pytest + + +@pytest.mark.asyncio +@pytest.mark.timeout(30) # cloud can take > 9s +@pytest.mark.parametrize( + "module", + [ + "zeroconf", + "zeroconf.asyncio", + "zeroconf._protocol.incoming", + "zeroconf._protocol.outgoing", + "zeroconf.const", + "zeroconf._logger", + "zeroconf._transport", + "zeroconf._record_update", + "zeroconf._services.browser", + "zeroconf._services.info", + ], +) +async def test_circular_imports(module: str) -> None: + """Check that components can be imported without circular imports.""" + process = await asyncio.create_subprocess_exec(sys.executable, "-c", f"import {module}") + await process.communicate() + assert process.returncode == 0 diff --git a/tests/test_core.py b/tests/test_core.py index de4b2ef5b..8c53d2070 100644 --- a/tests/test_core.py +++ b/tests/test_core.py @@ -1,7 +1,6 @@ -#!/usr/bin/env python +"""Unit tests for zeroconf._core""" - -""" Unit tests for zeroconf._core """ +from __future__ import annotations import asyncio import logging @@ -12,13 +11,9 @@ import time import unittest import unittest.mock -from typing import Tuple, Union, cast -from unittest.mock import Mock, patch - -if sys.version_info[:3][1] < 8: - AsyncMock = Mock -else: - from unittest.mock import AsyncMock +import warnings +from typing import cast +from unittest.mock import AsyncMock, Mock, patch import pytest @@ -30,7 +25,7 @@ from . import _clear_cache, _inject_response, _wait_for_start, has_working_ipv6 -log = logging.getLogger('zeroconf') +log = logging.getLogger("zeroconf") original_logging_level = logging.NOTSET @@ -46,13 +41,13 @@ def teardown_module(): def threadsafe_query( - zc: 'Zeroconf', - protocol: 'AsyncListener', + zc: Zeroconf, + protocol: AsyncListener, msg: DNSIncoming, addr: str, port: int, transport: _WrappedTransport, - v6_flow_scope: Union[Tuple[()], Tuple[int, int]], + v6_flow_scope: tuple[()] | tuple[int, int], ) -> None: async def make_query(): protocol.handle_query_or_defer(msg, addr, port, transport, v6_flow_scope) @@ -88,34 +83,46 @@ def test_close_multiple_times(self): rv.close() rv.close() - @unittest.skipIf(not has_working_ipv6(), 'Requires IPv6') - @unittest.skipIf(os.environ.get('SKIP_IPV6'), 'IPv6 tests disabled') + @unittest.skipIf(not has_working_ipv6(), "Requires IPv6") + @unittest.skipIf(os.environ.get("SKIP_IPV6"), "IPv6 tests disabled") def test_launch_and_close_v4_v6(self): rv = r.Zeroconf(interfaces=r.InterfaceChoice.All, ip_version=r.IPVersion.All) rv.close() - rv = r.Zeroconf(interfaces=r.InterfaceChoice.Default, ip_version=r.IPVersion.All) - rv.close() + with warnings.catch_warnings(record=True) as warned: + rv = r.Zeroconf(interfaces=r.InterfaceChoice.Default, ip_version=r.IPVersion.All) + rv.close() + first_warning = warned[0] + assert "IPv6 multicast requests can't be sent using default interface" in str( + first_warning.message + ) - @unittest.skipIf(not has_working_ipv6(), 'Requires IPv6') - @unittest.skipIf(os.environ.get('SKIP_IPV6'), 'IPv6 tests disabled') + @unittest.skipIf(not has_working_ipv6(), "Requires IPv6") + @unittest.skipIf(os.environ.get("SKIP_IPV6"), "IPv6 tests disabled") def test_launch_and_close_v6_only(self): rv = r.Zeroconf(interfaces=r.InterfaceChoice.All, ip_version=r.IPVersion.V6Only) rv.close() - rv = r.Zeroconf(interfaces=r.InterfaceChoice.Default, ip_version=r.IPVersion.V6Only) - rv.close() + with warnings.catch_warnings(record=True) as warned: + rv = r.Zeroconf(interfaces=r.InterfaceChoice.Default, ip_version=r.IPVersion.V6Only) + rv.close() + first_warning = warned[0] + assert "IPv6 multicast requests can't be sent using default interface" in str( + first_warning.message + ) - @unittest.skipIf(sys.platform == 'darwin', reason="apple_p2p failure path not testable on mac") + @unittest.skipIf(sys.platform == "darwin", reason="apple_p2p failure path not testable on mac") def test_launch_and_close_apple_p2p_not_mac(self): with pytest.raises(RuntimeError): r.Zeroconf(apple_p2p=True) - @unittest.skipIf(sys.platform != 'darwin', reason="apple_p2p happy path only testable on mac") + @unittest.skipIf(sys.platform != "darwin", reason="apple_p2p happy path only testable on mac") def test_launch_and_close_apple_p2p_on_mac(self): rv = r.Zeroconf(apple_p2p=True) rv.close() def test_async_updates_from_response(self): - def mock_incoming_msg(service_state_change: r.ServiceStateChange) -> r.DNSIncoming: + def mock_incoming_msg( + service_state_change: r.ServiceStateChange, + ) -> r.DNSIncoming: ttl = 120 generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) @@ -136,7 +143,8 @@ def mock_incoming_msg(service_state_change: r.ServiceStateChange) -> r.DNSIncomi ttl = 0 generated.add_answer_at_time( - r.DNSPointer(service_type, const._TYPE_PTR, const._CLASS_IN, ttl, service_name), 0 + r.DNSPointer(service_type, const._TYPE_PTR, const._CLASS_IN, ttl, service_name), + 0, ) generated.add_answer_at_time( r.DNSService( @@ -153,7 +161,11 @@ def mock_incoming_msg(service_state_change: r.ServiceStateChange) -> r.DNSIncomi ) generated.add_answer_at_time( r.DNSText( - service_name, const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, ttl, service_text + service_name, + const._TYPE_TXT, + const._CLASS_IN | const._CLASS_UNIQUE, + ttl, + service_text, ), 0, ) @@ -170,7 +182,9 @@ def mock_incoming_msg(service_state_change: r.ServiceStateChange) -> r.DNSIncomi return r.DNSIncoming(generated.packets()[0]) - def mock_split_incoming_msg(service_state_change: r.ServiceStateChange) -> r.DNSIncoming: + def mock_split_incoming_msg( + service_state_change: r.ServiceStateChange, + ) -> r.DNSIncoming: """Mock an incoming message for the case where the packet is split.""" ttl = 120 generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) @@ -199,13 +213,13 @@ def mock_split_incoming_msg(service_state_change: r.ServiceStateChange) -> r.DNS ) return r.DNSIncoming(generated.packets()[0]) - service_name = 'name._type._tcp.local.' - service_type = '_type._tcp.local.' - service_server = 'ash-2.local.' - service_text = b'path=/~paulsm/' - service_address = '10.0.1.2' + service_name = "name._type._tcp.local." + service_type = "_type._tcp.local." + service_server = "ash-2.local." + service_text = b"path=/~paulsm/" + service_address = "10.0.1.2" - zeroconf = r.Zeroconf(interfaces=['127.0.0.1']) + zeroconf = r.Zeroconf(interfaces=["127.0.0.1"]) try: # service added @@ -225,7 +239,7 @@ def mock_split_incoming_msg(service_state_change: r.ServiceStateChange) -> r.DNS time.sleep(1.1) # service updated. currently only text record can be updated - service_text = b'path=/~humingchun/' + service_text = b"path=/~humingchun/" _inject_response(zeroconf, mock_incoming_msg(r.ServiceStateChange.Updated)) dns_text = zeroconf.cache.get_by_details(service_name, const._TYPE_TXT, const._CLASS_IN) assert dns_text is not None @@ -254,12 +268,19 @@ def mock_split_incoming_msg(service_state_change: r.ServiceStateChange) -> r.DNS def test_generate_service_query_set_qu_bit(): """Test generate_service_query sets the QU bit.""" - zeroconf_registrar = Zeroconf(interfaces=['127.0.0.1']) - desc = {'path': '/~paulsm/'} + zeroconf_registrar = Zeroconf(interfaces=["127.0.0.1"]) + desc = {"path": "/~paulsm/"} type_ = "._hap._tcp.local." registration_name = "this-host-is-not-used._hap._tcp.local." info = r.ServiceInfo( - type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration_name, + 80, + 0, + 0, + desc, + "ash-2.local.", + addresses=[socket.inet_aton("10.0.1.2")], ) out = zeroconf_registrar.generate_service_query(info) assert out.questions[0].unicast is True @@ -268,10 +289,10 @@ def test_generate_service_query_set_qu_bit(): def test_invalid_packets_ignored_and_does_not_cause_loop_exception(): """Ensure an invalid packet cannot cause the loop to collapse.""" - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) generated = r.DNSOutgoing(0) packet = generated.packets()[0] - packet = packet[:8] + b'deadbeef' + packet[8:] + packet = packet[:8] + b"deadbeef" + packet[8:] parsed = r.DNSIncoming(packet) assert parsed.valid is False @@ -291,7 +312,7 @@ def test_invalid_packets_ignored_and_does_not_cause_loop_exception(): const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, 500, - b'path=/~paulsm/', + b"path=/~paulsm/", ) assert isinstance(entry, r.DNSText) assert isinstance(entry, r.DNSRecord) @@ -306,14 +327,21 @@ def test_invalid_packets_ignored_and_does_not_cause_loop_exception(): def test_goodbye_all_services(): """Verify generating the goodbye query does not change with time.""" - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) out = zc.generate_unregister_all_services() assert out is None type_ = "_http._tcp.local." - registration_name = "xxxyyy.%s" % type_ - desc = {'path': '/~paulsm/'} + registration_name = f"xxxyyy.{type_}" + desc = {"path": "/~paulsm/"} info = r.ServiceInfo( - type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration_name, + 80, + 0, + 0, + desc, + "ash-2.local.", + addresses=[socket.inet_aton("10.0.1.2")], ) zc.registry.async_add(info) out = zc.generate_unregister_all_services() @@ -325,7 +353,7 @@ def test_goodbye_all_services(): second_packet = out.packets() assert second_packet == first_packet - # Verify the registery is empty + # Verify the registry is empty out3 = zc.generate_unregister_all_services() assert out3 is None assert zc.registry.async_get_service_infos() == [] @@ -337,18 +365,18 @@ def test_register_service_with_custom_ttl(): """Test a registering a service with a custom ttl.""" # instantiate a zeroconf instance - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) # start a browser type_ = "_homeassistant._tcp.local." name = "MyTestHome" info_service = r.ServiceInfo( type_, - f'{name}.{type_}', + f"{name}.{type_}", 80, 0, 0, - {'path': '/~paulsm/'}, + {"path": "/~paulsm/"}, "ash-90.local.", addresses=[socket.inet_aton("10.0.1.2")], ) @@ -364,55 +392,55 @@ def test_logging_packets(caplog): """Test packets are only logged with debug logging.""" # instantiate a zeroconf instance - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) # start a browser type_ = "_logging._tcp.local." name = "TLD" info_service = r.ServiceInfo( type_, - f'{name}.{type_}', + f"{name}.{type_}", 80, 0, 0, - {'path': '/~paulsm/'}, + {"path": "/~paulsm/"}, "ash-90.local.", addresses=[socket.inet_aton("10.0.1.2")], ) - logging.getLogger('zeroconf').setLevel(logging.DEBUG) + logging.getLogger("zeroconf").setLevel(logging.DEBUG) caplog.clear() zc.register_service(info_service, ttl=3000) assert "Sending to" in caplog.text record = zc.cache.get(info_service.dns_pointer()) assert record is not None assert record.ttl == 3000 - logging.getLogger('zeroconf').setLevel(logging.INFO) + logging.getLogger("zeroconf").setLevel(logging.INFO) caplog.clear() zc.unregister_service(info_service) assert "Sending to" not in caplog.text - logging.getLogger('zeroconf').setLevel(logging.DEBUG) + logging.getLogger("zeroconf").setLevel(logging.DEBUG) zc.close() def test_get_service_info_failure_path(): """Verify get_service_info return None when the underlying call returns False.""" - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) assert zc.get_service_info("_neverused._tcp.local.", "xneverused._neverused._tcp.local.", 10) is None zc.close() def test_sending_unicast(): """Test sending unicast response.""" - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) entry = r.DNSText( "didnotcrashincoming._crash._tcp.local.", const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, 500, - b'path=/~paulsm/', + b"path=/~paulsm/", ) generated.add_answer_at_time(entry, 0) zc.send(generated, "2001:db8::1", const._MDNS_PORT) # https://www.iana.org/go/rfc3849 @@ -437,7 +465,7 @@ def test_sending_unicast(): def test_tc_bit_defers(): - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) _wait_for_start(zc) type_ = "_tcbitdefer._tcp.local." name = "knownname" @@ -448,19 +476,40 @@ def test_tc_bit_defers(): registration2_name = f"{name2}.{type_}" registration3_name = f"{name3}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} server_name = "ash-2.local." server_name2 = "ash-3.local." server_name3 = "ash-4.local." info = r.ServiceInfo( - type_, registration_name, 80, 0, 0, desc, server_name, addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration_name, + 80, + 0, + 0, + desc, + server_name, + addresses=[socket.inet_aton("10.0.1.2")], ) info2 = r.ServiceInfo( - type_, registration2_name, 80, 0, 0, desc, server_name2, addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration2_name, + 80, + 0, + 0, + desc, + server_name2, + addresses=[socket.inet_aton("10.0.1.2")], ) info3 = r.ServiceInfo( - type_, registration3_name, 80, 0, 0, desc, server_name3, addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration3_name, + 80, + 0, + 0, + desc, + server_name3, + addresses=[socket.inet_aton("10.0.1.2")], ) zc.registry.async_add(info) zc.registry.async_add(info2) @@ -481,7 +530,7 @@ def test_tc_bit_defers(): packets = generated.packets() assert len(packets) == 4 expected_deferred = [] - source_ip = '203.0.113.13' + source_ip = "203.0.113.13" next_packet = r.DNSIncoming(packets.pop(0)) expected_deferred.append(next_packet) @@ -516,7 +565,7 @@ def test_tc_bit_defers(): def test_tc_bit_defers_last_response_missing(): - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) _wait_for_start(zc) type_ = "_knowndefer._tcp.local." name = "knownname" @@ -527,19 +576,40 @@ def test_tc_bit_defers_last_response_missing(): registration2_name = f"{name2}.{type_}" registration3_name = f"{name3}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} server_name = "ash-2.local." server_name2 = "ash-3.local." server_name3 = "ash-4.local." info = r.ServiceInfo( - type_, registration_name, 80, 0, 0, desc, server_name, addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration_name, + 80, + 0, + 0, + desc, + server_name, + addresses=[socket.inet_aton("10.0.1.2")], ) info2 = r.ServiceInfo( - type_, registration2_name, 80, 0, 0, desc, server_name2, addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration2_name, + 80, + 0, + 0, + desc, + server_name2, + addresses=[socket.inet_aton("10.0.1.2")], ) info3 = r.ServiceInfo( - type_, registration3_name, 80, 0, 0, desc, server_name3, addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration3_name, + 80, + 0, + 0, + desc, + server_name3, + addresses=[socket.inet_aton("10.0.1.2")], ) zc.registry.async_add(info) zc.registry.async_add(info2) @@ -548,7 +618,7 @@ def test_tc_bit_defers_last_response_missing(): protocol = zc.engine.protocols[0] now = r.current_time_millis() _clear_cache(zc) - source_ip = '203.0.113.12' + source_ip = "203.0.113.12" generated = r.DNSOutgoing(const._FLAGS_QR_QUERY) question = r.DNSQuestion(type_, const._TYPE_PTR, const._CLASS_IN) @@ -611,7 +681,7 @@ async def test_open_close_twice_from_async() -> None: """Test we can close twice from a coroutine when using Zeroconf. Ideally callers switch to using AsyncZeroconf, however there will - be a peroid where they still call the sync wrapper that we want + be a period where they still call the sync wrapper that we want to ensure will not deadlock on shutdown. This test is expected to throw warnings about tasks being destroyed @@ -620,7 +690,7 @@ async def test_open_close_twice_from_async() -> None: version they won't yield with an await like async_close we don't have much choice but to force things down. """ - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) zc.close() zc.close() await asyncio.sleep(0) @@ -631,8 +701,8 @@ async def test_multiple_sync_instances_stared_from_async_close(): """Test we can shutdown multiple sync instances from async.""" # instantiate a zeroconf instance - zc = Zeroconf(interfaces=['127.0.0.1']) - zc2 = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) + zc2 = Zeroconf(interfaces=["127.0.0.1"]) assert zc.loop is not None assert zc2.loop is not None @@ -642,7 +712,7 @@ async def test_multiple_sync_instances_stared_from_async_close(): zc2.close() assert zc2.loop.is_running() - zc3 = Zeroconf(interfaces=['127.0.0.1']) + zc3 = Zeroconf(interfaces=["127.0.0.1"]) assert zc3.loop == zc2.loop zc3.close() @@ -655,18 +725,18 @@ def test_shutdown_while_register_in_process(): """Test we can shutdown while registering a service in another thread.""" # instantiate a zeroconf instance - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) # start a browser type_ = "_homeassistant._tcp.local." name = "MyTestHome" info_service = r.ServiceInfo( type_, - f'{name}.{type_}', + f"{name}.{type_}", 80, 0, 0, - {'path': '/~paulsm/'}, + {"path": "/~paulsm/"}, "ash-90.local.", addresses=[socket.inet_aton("10.0.1.2")], ) @@ -683,12 +753,11 @@ def _background_register(): @pytest.mark.asyncio -@unittest.skipIf(sys.version_info[:3][1] < 8, 'Requires Python 3.8 or later to patch _async_setup') @patch("zeroconf._core._STARTUP_TIMEOUT", 0) @patch("zeroconf._core.AsyncEngine._async_setup", new_callable=AsyncMock) async def test_event_loop_blocked(mock_start): """Test we raise NotRunningException when waiting for startup that times out.""" - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) with pytest.raises(NotRunningException): await aiozc.zeroconf.async_wait_for_start() assert aiozc.zeroconf.started is False diff --git a/tests/test_dns.py b/tests/test_dns.py index 055621356..246c8dcfb 100644 --- a/tests/test_dns.py +++ b/tests/test_dns.py @@ -1,12 +1,10 @@ -#!/usr/bin/env python +"""Unit tests for zeroconf._dns.""" - -""" Unit tests for zeroconf._dns. """ +from __future__ import annotations import logging import os import socket -import unittest import unittest.mock import pytest @@ -17,7 +15,7 @@ from . import has_working_ipv6 -log = logging.getLogger('zeroconf') +log = logging.getLogger("zeroconf") original_logging_level = logging.NOTSET @@ -36,74 +34,69 @@ class TestDunder(unittest.TestCase): def test_dns_text_repr(self): # There was an issue on Python 3 that prevented DNSText's repr # from working when the text was longer than 10 bytes - text = DNSText('irrelevant', 0, 0, 0, b'12345678901') + text = DNSText("irrelevant", 0, 0, 0, b"12345678901") repr(text) - text = DNSText('irrelevant', 0, 0, 0, b'123') + text = DNSText("irrelevant", 0, 0, 0, b"123") repr(text) def test_dns_hinfo_repr_eq(self): - hinfo = DNSHinfo('irrelevant', const._TYPE_HINFO, 0, 0, 'cpu', 'os') + hinfo = DNSHinfo("irrelevant", const._TYPE_HINFO, 0, 0, "cpu", "os") assert hinfo == hinfo repr(hinfo) def test_dns_pointer_repr(self): - pointer = r.DNSPointer('irrelevant', const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, '123') + pointer = r.DNSPointer("irrelevant", const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, "123") repr(pointer) - @unittest.skipIf(not has_working_ipv6(), 'Requires IPv6') - @unittest.skipIf(os.environ.get('SKIP_IPV6'), 'IPv6 tests disabled') + @unittest.skipIf(not has_working_ipv6(), "Requires IPv6") + @unittest.skipIf(os.environ.get("SKIP_IPV6"), "IPv6 tests disabled") def test_dns_address_repr(self): - address = r.DNSAddress('irrelevant', const._TYPE_SOA, const._CLASS_IN, 1, b'a') + address = r.DNSAddress("irrelevant", const._TYPE_SOA, const._CLASS_IN, 1, b"a") assert repr(address).endswith("b'a'") address_ipv4 = r.DNSAddress( - 'irrelevant', const._TYPE_SOA, const._CLASS_IN, 1, socket.inet_pton(socket.AF_INET, '127.0.0.1') + "irrelevant", + const._TYPE_SOA, + const._CLASS_IN, + 1, + socket.inet_pton(socket.AF_INET, "127.0.0.1"), ) - assert repr(address_ipv4).endswith('127.0.0.1') + assert repr(address_ipv4).endswith("127.0.0.1") address_ipv6 = r.DNSAddress( - 'irrelevant', const._TYPE_SOA, const._CLASS_IN, 1, socket.inet_pton(socket.AF_INET6, '::1') + "irrelevant", + const._TYPE_SOA, + const._CLASS_IN, + 1, + socket.inet_pton(socket.AF_INET6, "::1"), ) - assert repr(address_ipv6).endswith('::1') + assert repr(address_ipv6).endswith("::1") def test_dns_question_repr(self): - question = r.DNSQuestion('irrelevant', const._TYPE_SRV, const._CLASS_IN | const._CLASS_UNIQUE) + question = r.DNSQuestion("irrelevant", const._TYPE_SRV, const._CLASS_IN | const._CLASS_UNIQUE) repr(question) assert not question != question def test_dns_service_repr(self): service = r.DNSService( - 'irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 80, 'a' + "irrelevant", + const._TYPE_SRV, + const._CLASS_IN, + const._DNS_HOST_TTL, + 0, + 0, + 80, + "a", ) repr(service) def test_dns_record_abc(self): - record = r.DNSRecord('irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL) + record = r.DNSRecord("irrelevant", const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL) self.assertRaises(r.AbstractMethodException, record.__eq__, record) with pytest.raises((r.AbstractMethodException, TypeError)): record.write(None) # type: ignore[arg-type] - def test_dns_record_reset_ttl(self): - start = r.current_time_millis() - record = r.DNSRecord( - 'irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, created=start - ) - later = start + 1000 - record2 = r.DNSRecord( - 'irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, created=later - ) - now = r.current_time_millis() - - assert record.created != record2.created - assert record.get_remaining_ttl(now) != record2.get_remaining_ttl(now) - - record.reset_ttl(record2) - - assert record.ttl == record2.ttl - assert record.created == record2.created - assert record.get_remaining_ttl(now) == record2.get_remaining_ttl(now) - def test_service_info_dunder(self): type_ = "_test-srvc-type._tcp.local." name = "xxxyyy" @@ -114,7 +107,7 @@ def test_service_info_dunder(self): 80, 0, 0, - b'', + b"", "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")], ) @@ -142,14 +135,14 @@ def test_dns_outgoing_repr(self): repr(dns_outgoing) def test_dns_record_is_expired(self): - record = r.DNSRecord('irrelevant', const._TYPE_SRV, const._CLASS_IN, 8) + record = r.DNSRecord("irrelevant", const._TYPE_SRV, const._CLASS_IN, 8) now = current_time_millis() assert record.is_expired(now) is False assert record.is_expired(now + (8 / 2 * 1000)) is False assert record.is_expired(now + (8 * 1000)) is True def test_dns_record_is_stale(self): - record = r.DNSRecord('irrelevant', const._TYPE_SRV, const._CLASS_IN, 8) + record = r.DNSRecord("irrelevant", const._TYPE_SRV, const._CLASS_IN, 8) now = current_time_millis() assert record.is_stale(now) is False assert record.is_stale(now + (8 / 4.1 * 1000)) is False @@ -158,7 +151,7 @@ def test_dns_record_is_stale(self): def test_dns_record_is_recent(self): now = current_time_millis() - record = r.DNSRecord('irrelevant', const._TYPE_SRV, const._CLASS_IN, 8) + record = r.DNSRecord("irrelevant", const._TYPE_SRV, const._CLASS_IN, 8) assert record.is_recent(now + (8 / 4.2 * 1000)) is True assert record.is_recent(now + (8 / 3 * 1000)) is False assert record.is_recent(now + (8 / 2 * 1000)) is False @@ -168,8 +161,8 @@ def test_dns_record_is_recent(self): def test_dns_question_hashablity(): """Test DNSQuestions are hashable.""" - record1 = r.DNSQuestion('irrelevant', const._TYPE_A, const._CLASS_IN) - record2 = r.DNSQuestion('irrelevant', const._TYPE_A, const._CLASS_IN) + record1 = r.DNSQuestion("irrelevant", const._TYPE_A, const._CLASS_IN) + record2 = r.DNSQuestion("irrelevant", const._TYPE_A, const._CLASS_IN) record_set = {record1, record2} assert len(record_set) == 1 @@ -177,14 +170,14 @@ def test_dns_question_hashablity(): record_set.add(record1) assert len(record_set) == 1 - record3_dupe = r.DNSQuestion('irrelevant', const._TYPE_A, const._CLASS_IN) + record3_dupe = r.DNSQuestion("irrelevant", const._TYPE_A, const._CLASS_IN) assert record2 == record3_dupe assert record2.__hash__() == record3_dupe.__hash__() record_set.add(record3_dupe) assert len(record_set) == 1 - record4_dupe = r.DNSQuestion('notsame', const._TYPE_A, const._CLASS_IN) + record4_dupe = r.DNSQuestion("notsame", const._TYPE_A, const._CLASS_IN) assert record2 != record4_dupe assert record2.__hash__() != record4_dupe.__hash__() @@ -196,8 +189,8 @@ def test_dns_record_hashablity_does_not_consider_ttl(): """Test DNSRecord are hashable.""" # Verify the TTL is not considered in the hash - record1 = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, const._DNS_OTHER_TTL, b'same') - record2 = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, const._DNS_HOST_TTL, b'same') + record1 = r.DNSAddress("irrelevant", const._TYPE_A, const._CLASS_IN, const._DNS_OTHER_TTL, b"same") + record2 = r.DNSAddress("irrelevant", const._TYPE_A, const._CLASS_IN, const._DNS_HOST_TTL, b"same") record_set = {record1, record2} assert len(record_set) == 1 @@ -205,7 +198,34 @@ def test_dns_record_hashablity_does_not_consider_ttl(): record_set.add(record1) assert len(record_set) == 1 - record3_dupe = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, const._DNS_HOST_TTL, b'same') + record3_dupe = r.DNSAddress("irrelevant", const._TYPE_A, const._CLASS_IN, const._DNS_HOST_TTL, b"same") + assert record2 == record3_dupe + assert record2.__hash__() == record3_dupe.__hash__() + + record_set.add(record3_dupe) + assert len(record_set) == 1 + + +def test_dns_record_hashablity_does_not_consider_created(): + """Test DNSRecord are hashable and created is not considered.""" + + # Verify the TTL is not considered in the hash + record1 = r.DNSAddress( + "irrelevant", const._TYPE_A, const._CLASS_IN, const._DNS_HOST_TTL, b"same", created=1.0 + ) + record2 = r.DNSAddress( + "irrelevant", const._TYPE_A, const._CLASS_IN, const._DNS_HOST_TTL, b"same", created=2.0 + ) + + record_set = {record1, record2} + assert len(record_set) == 1 + + record_set.add(record1) + assert len(record_set) == 1 + + record3_dupe = r.DNSAddress( + "irrelevant", const._TYPE_A, const._CLASS_IN, const._DNS_HOST_TTL, b"same", created=3.0 + ) assert record2 == record3_dupe assert record2.__hash__() == record3_dupe.__hash__() @@ -218,9 +238,13 @@ def test_dns_record_hashablity_does_not_consider_unique(): # Verify the unique value is not considered in the hash record1 = r.DNSAddress( - 'irrelevant', const._TYPE_A, const._CLASS_IN | const._CLASS_UNIQUE, const._DNS_OTHER_TTL, b'same' + "irrelevant", + const._TYPE_A, + const._CLASS_IN | const._CLASS_UNIQUE, + const._DNS_OTHER_TTL, + b"same", ) - record2 = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, const._DNS_OTHER_TTL, b'same') + record2 = r.DNSAddress("irrelevant", const._TYPE_A, const._CLASS_IN, const._DNS_OTHER_TTL, b"same") assert record1.class_ == record2.class_ assert record1.__hash__() == record2.__hash__() @@ -230,10 +254,10 @@ def test_dns_record_hashablity_does_not_consider_unique(): def test_dns_address_record_hashablity(): """Test DNSAddress are hashable.""" - address1 = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, 1, b'a') - address2 = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, 1, b'b') - address3 = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, 1, b'c') - address4 = r.DNSAddress('irrelevant', const._TYPE_AAAA, const._CLASS_IN, 1, b'c') + address1 = r.DNSAddress("irrelevant", const._TYPE_A, const._CLASS_IN, 1, b"a") + address2 = r.DNSAddress("irrelevant", const._TYPE_A, const._CLASS_IN, 1, b"b") + address3 = r.DNSAddress("irrelevant", const._TYPE_A, const._CLASS_IN, 1, b"c") + address4 = r.DNSAddress("irrelevant", const._TYPE_AAAA, const._CLASS_IN, 1, b"c") record_set = {address1, address2, address3, address4} assert len(record_set) == 4 @@ -241,7 +265,7 @@ def test_dns_address_record_hashablity(): record_set.add(address1) assert len(record_set) == 4 - address3_dupe = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, 1, b'c') + address3_dupe = r.DNSAddress("irrelevant", const._TYPE_A, const._CLASS_IN, 1, b"c") record_set.add(address3_dupe) assert len(record_set) == 4 @@ -254,8 +278,8 @@ def test_dns_address_record_hashablity(): def test_dns_hinfo_record_hashablity(): """Test DNSHinfo are hashable.""" - hinfo1 = r.DNSHinfo('irrelevant', const._TYPE_HINFO, 0, 0, 'cpu1', 'os') - hinfo2 = r.DNSHinfo('irrelevant', const._TYPE_HINFO, 0, 0, 'cpu2', 'os') + hinfo1 = r.DNSHinfo("irrelevant", const._TYPE_HINFO, 0, 0, "cpu1", "os") + hinfo2 = r.DNSHinfo("irrelevant", const._TYPE_HINFO, 0, 0, "cpu2", "os") record_set = {hinfo1, hinfo2} assert len(record_set) == 2 @@ -263,7 +287,7 @@ def test_dns_hinfo_record_hashablity(): record_set.add(hinfo1) assert len(record_set) == 2 - hinfo2_dupe = r.DNSHinfo('irrelevant', const._TYPE_HINFO, 0, 0, 'cpu2', 'os') + hinfo2_dupe = r.DNSHinfo("irrelevant", const._TYPE_HINFO, 0, 0, "cpu2", "os") assert hinfo2 == hinfo2_dupe assert hinfo2.__hash__() == hinfo2_dupe.__hash__() @@ -273,8 +297,8 @@ def test_dns_hinfo_record_hashablity(): def test_dns_pointer_record_hashablity(): """Test DNSPointer are hashable.""" - ptr1 = r.DNSPointer('irrelevant', const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, '123') - ptr2 = r.DNSPointer('irrelevant', const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, '456') + ptr1 = r.DNSPointer("irrelevant", const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, "123") + ptr2 = r.DNSPointer("irrelevant", const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, "456") record_set = {ptr1, ptr2} assert len(record_set) == 2 @@ -282,7 +306,7 @@ def test_dns_pointer_record_hashablity(): record_set.add(ptr1) assert len(record_set) == 2 - ptr2_dupe = r.DNSPointer('irrelevant', const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, '456') + ptr2_dupe = r.DNSPointer("irrelevant", const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, "456") assert ptr2 == ptr2 assert ptr2.__hash__() == ptr2_dupe.__hash__() @@ -292,18 +316,24 @@ def test_dns_pointer_record_hashablity(): def test_dns_pointer_comparison_is_case_insensitive(): """Test DNSPointer comparison is case insensitive.""" - ptr1 = r.DNSPointer('irrelevant', const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, '123') - ptr2 = r.DNSPointer('irrelevant'.upper(), const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, '123') + ptr1 = r.DNSPointer("irrelevant", const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, "123") + ptr2 = r.DNSPointer( + "irrelevant".upper(), + const._TYPE_PTR, + const._CLASS_IN, + const._DNS_OTHER_TTL, + "123", + ) assert ptr1 == ptr2 def test_dns_text_record_hashablity(): """Test DNSText are hashable.""" - text1 = r.DNSText('irrelevant', 0, 0, const._DNS_OTHER_TTL, b'12345678901') - text2 = r.DNSText('irrelevant', 1, 0, const._DNS_OTHER_TTL, b'12345678901') - text3 = r.DNSText('irrelevant', 0, 1, const._DNS_OTHER_TTL, b'12345678901') - text4 = r.DNSText('irrelevant', 0, 0, const._DNS_OTHER_TTL, b'ABCDEFGHIJK') + text1 = r.DNSText("irrelevant", 0, 0, const._DNS_OTHER_TTL, b"12345678901") + text2 = r.DNSText("irrelevant", 1, 0, const._DNS_OTHER_TTL, b"12345678901") + text3 = r.DNSText("irrelevant", 0, 1, const._DNS_OTHER_TTL, b"12345678901") + text4 = r.DNSText("irrelevant", 0, 0, const._DNS_OTHER_TTL, b"ABCDEFGHIJK") record_set = {text1, text2, text3, text4} @@ -312,7 +342,7 @@ def test_dns_text_record_hashablity(): record_set.add(text1) assert len(record_set) == 4 - text1_dupe = r.DNSText('irrelevant', 0, 0, const._DNS_OTHER_TTL, b'12345678901') + text1_dupe = r.DNSText("irrelevant", 0, 0, const._DNS_OTHER_TTL, b"12345678901") assert text1 == text1_dupe assert text1.__hash__() == text1_dupe.__hash__() @@ -322,10 +352,46 @@ def test_dns_text_record_hashablity(): def test_dns_service_record_hashablity(): """Test DNSService are hashable.""" - srv1 = r.DNSService('irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 80, 'a') - srv2 = r.DNSService('irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 1, 80, 'a') - srv3 = r.DNSService('irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 81, 'a') - srv4 = r.DNSService('irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 80, 'ab') + srv1 = r.DNSService( + "irrelevant", + const._TYPE_SRV, + const._CLASS_IN, + const._DNS_HOST_TTL, + 0, + 0, + 80, + "a", + ) + srv2 = r.DNSService( + "irrelevant", + const._TYPE_SRV, + const._CLASS_IN, + const._DNS_HOST_TTL, + 0, + 1, + 80, + "a", + ) + srv3 = r.DNSService( + "irrelevant", + const._TYPE_SRV, + const._CLASS_IN, + const._DNS_HOST_TTL, + 0, + 0, + 81, + "a", + ) + srv4 = r.DNSService( + "irrelevant", + const._TYPE_SRV, + const._CLASS_IN, + const._DNS_HOST_TTL, + 0, + 0, + 80, + "ab", + ) record_set = {srv1, srv2, srv3, srv4} @@ -335,7 +401,14 @@ def test_dns_service_record_hashablity(): assert len(record_set) == 4 srv1_dupe = r.DNSService( - 'irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 80, 'a' + "irrelevant", + const._TYPE_SRV, + const._CLASS_IN, + const._DNS_HOST_TTL, + 0, + 0, + 80, + "a", ) assert srv1 == srv1_dupe assert srv1.__hash__() == srv1_dupe.__hash__() @@ -347,21 +420,42 @@ def test_dns_service_record_hashablity(): def test_dns_service_server_key(): """Test DNSService server_key is lowercase.""" srv1 = r.DNSService( - 'X._tcp._http.local.', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 80, 'X.local.' + "X._tcp._http.local.", + const._TYPE_SRV, + const._CLASS_IN, + const._DNS_HOST_TTL, + 0, + 0, + 80, + "X.local.", ) - assert srv1.name == 'X._tcp._http.local.' - assert srv1.key == 'x._tcp._http.local.' - assert srv1.server == 'X.local.' - assert srv1.server_key == 'x.local.' + assert srv1.name == "X._tcp._http.local." + assert srv1.key == "x._tcp._http.local." + assert srv1.server == "X.local." + assert srv1.server_key == "x.local." def test_dns_service_server_comparison_is_case_insensitive(): """Test DNSService server comparison is case insensitive.""" srv1 = r.DNSService( - 'X._tcp._http.local.', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 80, 'X.local.' + "X._tcp._http.local.", + const._TYPE_SRV, + const._CLASS_IN, + const._DNS_HOST_TTL, + 0, + 0, + 80, + "X.local.", ) srv2 = r.DNSService( - 'X._tcp._http.local.', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 80, 'x.local.' + "X._tcp._http.local.", + const._TYPE_SRV, + const._CLASS_IN, + const._DNS_HOST_TTL, + 0, + 0, + 80, + "x.local.", ) assert srv1 == srv2 @@ -369,10 +463,20 @@ def test_dns_service_server_comparison_is_case_insensitive(): def test_dns_nsec_record_hashablity(): """Test DNSNsec are hashable.""" nsec1 = r.DNSNsec( - 'irrelevant', const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, 'irrelevant', [1, 2, 3] + "irrelevant", + const._TYPE_PTR, + const._CLASS_IN, + const._DNS_OTHER_TTL, + "irrelevant", + [1, 2, 3], ) nsec2 = r.DNSNsec( - 'irrelevant', const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, 'irrelevant', [1, 2] + "irrelevant", + const._TYPE_PTR, + const._CLASS_IN, + const._DNS_OTHER_TTL, + "irrelevant", + [1, 2], ) record_set = {nsec1, nsec2} @@ -382,7 +486,12 @@ def test_dns_nsec_record_hashablity(): assert len(record_set) == 2 nsec2_dupe = r.DNSNsec( - 'irrelevant', const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, 'irrelevant', [1, 2] + "irrelevant", + const._TYPE_PTR, + const._CLASS_IN, + const._DNS_OTHER_TTL, + "irrelevant", + [1, 2], ) assert nsec2 == nsec2_dupe assert nsec2.__hash__() == nsec2_dupe.__hash__() @@ -394,10 +503,10 @@ def test_dns_nsec_record_hashablity(): def test_rrset_does_not_consider_ttl(): """Test DNSRRSet does not consider the ttl in the hash.""" - longarec = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, 100, b'same') - shortarec = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, 10, b'same') - longaaaarec = r.DNSAddress('irrelevant', const._TYPE_AAAA, const._CLASS_IN, 100, b'same') - shortaaaarec = r.DNSAddress('irrelevant', const._TYPE_AAAA, const._CLASS_IN, 10, b'same') + longarec = r.DNSAddress("irrelevant", const._TYPE_A, const._CLASS_IN, 100, b"same") + shortarec = r.DNSAddress("irrelevant", const._TYPE_A, const._CLASS_IN, 10, b"same") + longaaaarec = r.DNSAddress("irrelevant", const._TYPE_AAAA, const._CLASS_IN, 100, b"same") + shortaaaarec = r.DNSAddress("irrelevant", const._TYPE_AAAA, const._CLASS_IN, 10, b"same") rrset = DNSRRSet([longarec, shortaaaarec]) @@ -406,10 +515,10 @@ def test_rrset_does_not_consider_ttl(): assert not rrset.suppresses(longaaaarec) assert rrset.suppresses(shortaaaarec) - verylongarec = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, 1000, b'same') - longarec = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, 100, b'same') - mediumarec = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, 60, b'same') - shortarec = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, 10, b'same') + verylongarec = r.DNSAddress("irrelevant", const._TYPE_A, const._CLASS_IN, 1000, b"same") + longarec = r.DNSAddress("irrelevant", const._TYPE_A, const._CLASS_IN, 100, b"same") + mediumarec = r.DNSAddress("irrelevant", const._TYPE_A, const._CLASS_IN, 60, b"same") + shortarec = r.DNSAddress("irrelevant", const._TYPE_A, const._CLASS_IN, 10, b"same") rrset2 = DNSRRSet([mediumarec]) assert not rrset2.suppresses(verylongarec) diff --git a/tests/test_engine.py b/tests/test_engine.py index dc6674dd2..b7a94c866 100644 --- a/tests/test_engine.py +++ b/tests/test_engine.py @@ -1,12 +1,10 @@ -#!/usr/bin/env python +"""Unit tests for zeroconf._engine""" - -""" Unit tests for zeroconf._engine """ +from __future__ import annotations import asyncio import itertools import logging -from typing import Set from unittest.mock import patch import pytest @@ -15,7 +13,7 @@ from zeroconf import _engine, const from zeroconf.asyncio import AsyncZeroconf -log = logging.getLogger('zeroconf') +log = logging.getLogger("zeroconf") original_logging_level = logging.NOTSET @@ -35,22 +33,22 @@ def teardown_module(): @pytest.mark.asyncio async def test_reaper(): with patch.object(_engine, "_CACHE_CLEANUP_INTERVAL", 0.01): - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) zeroconf = aiozc.zeroconf cache = zeroconf.cache original_entries = list(itertools.chain(*(cache.entries_with_name(name) for name in cache.names()))) - record_with_10s_ttl = r.DNSAddress('a', const._TYPE_SOA, const._CLASS_IN, 10, b'a') - record_with_1s_ttl = r.DNSAddress('a', const._TYPE_SOA, const._CLASS_IN, 1, b'b') + record_with_10s_ttl = r.DNSAddress("a", const._TYPE_SOA, const._CLASS_IN, 10, b"a") + record_with_1s_ttl = r.DNSAddress("a", const._TYPE_SOA, const._CLASS_IN, 1, b"b") zeroconf.cache.async_add_records([record_with_10s_ttl, record_with_1s_ttl]) question = r.DNSQuestion("_hap._tcp._local.", const._TYPE_PTR, const._CLASS_IN) now = r.current_time_millis() - other_known_answers: Set[r.DNSRecord] = { + other_known_answers: set[r.DNSRecord] = { r.DNSPointer( "_hap._tcp.local.", const._TYPE_PTR, const._CLASS_IN, 10000, - 'known-to-other._hap._tcp.local.', + "known-to-other._hap._tcp.local.", ) } zeroconf.question_history.add_question_at_time(question, now, other_known_answers) @@ -71,10 +69,10 @@ async def test_reaper(): async def test_reaper_aborts_when_done(): """Ensure cache cleanup stops when zeroconf is done.""" with patch.object(_engine, "_CACHE_CLEANUP_INTERVAL", 0.01): - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) zeroconf = aiozc.zeroconf - record_with_10s_ttl = r.DNSAddress('a', const._TYPE_SOA, const._CLASS_IN, 10, b'a') - record_with_1s_ttl = r.DNSAddress('a', const._TYPE_SOA, const._CLASS_IN, 1, b'b') + record_with_10s_ttl = r.DNSAddress("a", const._TYPE_SOA, const._CLASS_IN, 10, b"a") + record_with_1s_ttl = r.DNSAddress("a", const._TYPE_SOA, const._CLASS_IN, 1, b"b") zeroconf.cache.async_add_records([record_with_10s_ttl, record_with_1s_ttl]) assert zeroconf.cache.get(record_with_10s_ttl) is not None assert zeroconf.cache.get(record_with_1s_ttl) is not None diff --git a/tests/test_exceptions.py b/tests/test_exceptions.py index 6a37c6dbc..ab181db1f 100644 --- a/tests/test_exceptions.py +++ b/tests/test_exceptions.py @@ -1,16 +1,14 @@ -#!/usr/bin/env python +"""Unit tests for zeroconf._exceptions""" - -""" Unit tests for zeroconf._exceptions """ +from __future__ import annotations import logging -import unittest import unittest.mock import zeroconf as r from zeroconf import ServiceInfo, Zeroconf -log = logging.getLogger('zeroconf') +log = logging.getLogger("zeroconf") original_logging_level = logging.NOTSET @@ -30,7 +28,7 @@ class Exceptions(unittest.TestCase): @classmethod def setUpClass(cls): - cls.browser = Zeroconf(interfaces=['127.0.0.1']) + cls.browser = Zeroconf(interfaces=["127.0.0.1"]) @classmethod def tearDownClass(cls): @@ -42,58 +40,66 @@ def test_bad_service_info_name(self): def test_bad_service_names(self): bad_names_to_try = ( - '', - 'local', - '_tcp.local.', - '_udp.local.', - '._udp.local.', - '_@._tcp.local.', - '_A@._tcp.local.', - '_x--x._tcp.local.', - '_-x._udp.local.', - '_x-._tcp.local.', - '_22._udp.local.', - '_2-2._tcp.local.', - '\x00._x._udp.local.', + "", + "local", + "_tcp.local.", + "_udp.local.", + "._udp.local.", + "_@._tcp.local.", + "_A@._tcp.local.", + "_x--x._tcp.local.", + "_-x._udp.local.", + "_x-._tcp.local.", + "_22._udp.local.", + "_2-2._tcp.local.", + "\x00._x._udp.local.", ) for name in bad_names_to_try: - self.assertRaises(r.BadTypeInNameException, self.browser.get_service_info, name, 'x.' + name) + self.assertRaises( + r.BadTypeInNameException, + self.browser.get_service_info, + name, + "x." + name, + ) def test_bad_local_names_for_get_service_info(self): bad_names_to_try = ( - 'homekitdev._nothttp._tcp.local.', - 'homekitdev._http._udp.local.', + "homekitdev._nothttp._tcp.local.", + "homekitdev._http._udp.local.", ) for name in bad_names_to_try: self.assertRaises( - r.BadTypeInNameException, self.browser.get_service_info, '_http._tcp.local.', name + r.BadTypeInNameException, + self.browser.get_service_info, + "_http._tcp.local.", + name, ) def test_good_instance_names(self): - assert r.service_type_name('.._x._tcp.local.') == '_x._tcp.local.' - assert r.service_type_name('x.y._http._tcp.local.') == '_http._tcp.local.' - assert r.service_type_name('1.2.3._mqtt._tcp.local.') == '_mqtt._tcp.local.' - assert r.service_type_name('x.sub._http._tcp.local.') == '_http._tcp.local.' + assert r.service_type_name(".._x._tcp.local.") == "_x._tcp.local." + assert r.service_type_name("x.y._http._tcp.local.") == "_http._tcp.local." + assert r.service_type_name("1.2.3._mqtt._tcp.local.") == "_mqtt._tcp.local." + assert r.service_type_name("x.sub._http._tcp.local.") == "_http._tcp.local." assert ( - r.service_type_name('6d86f882b90facee9170ad3439d72a4d6ee9f511._zget._http._tcp.local.') - == '_http._tcp.local.' + r.service_type_name("6d86f882b90facee9170ad3439d72a4d6ee9f511._zget._http._tcp.local.") + == "_http._tcp.local." ) def test_good_instance_names_without_protocol(self): good_names_to_try = ( "Rachio-C73233.local.", - 'YeelightColorBulb-3AFD.local.', - 'YeelightTunableBulb-7220.local.', + "YeelightColorBulb-3AFD.local.", + "YeelightTunableBulb-7220.local.", "AlexanderHomeAssistant 74651D.local.", - 'iSmartGate-152.local.', - 'MyQ-FGA.local.', - 'lutron-02c4392a.local.', - 'WICED-hap-3E2734.local.', - 'MyHost.local.', - 'MyHost.sub.local.', + "iSmartGate-152.local.", + "MyQ-FGA.local.", + "lutron-02c4392a.local.", + "WICED-hap-3E2734.local.", + "MyHost.local.", + "MyHost.sub.local.", ) for name in good_names_to_try: - assert r.service_type_name(name, strict=False) == 'local.' + assert r.service_type_name(name, strict=False) == "local." for name in good_names_to_try: # Raises without strict=False @@ -101,48 +107,48 @@ def test_good_instance_names_without_protocol(self): def test_bad_types(self): bad_names_to_try = ( - '._x._tcp.local.', - 'a' * 64 + '._sub._http._tcp.local.', - 'a' * 62 + 'â._sub._http._tcp.local.', + "._x._tcp.local.", + "a" * 64 + "._sub._http._tcp.local.", + "a" * 62 + "â._sub._http._tcp.local.", ) for name in bad_names_to_try: self.assertRaises(r.BadTypeInNameException, r.service_type_name, name) def test_bad_sub_types(self): bad_names_to_try = ( - '_sub._http._tcp.local.', - '._sub._http._tcp.local.', - '\x7f._sub._http._tcp.local.', - '\x1f._sub._http._tcp.local.', + "_sub._http._tcp.local.", + "._sub._http._tcp.local.", + "\x7f._sub._http._tcp.local.", + "\x1f._sub._http._tcp.local.", ) for name in bad_names_to_try: self.assertRaises(r.BadTypeInNameException, r.service_type_name, name) def test_good_service_names(self): good_names_to_try = ( - ('_x._tcp.local.', '_x._tcp.local.'), - ('_x._udp.local.', '_x._udp.local.'), - ('_12345-67890-abc._udp.local.', '_12345-67890-abc._udp.local.'), - ('x._sub._http._tcp.local.', '_http._tcp.local.'), - ('a' * 63 + '._sub._http._tcp.local.', '_http._tcp.local.'), - ('a' * 61 + 'â._sub._http._tcp.local.', '_http._tcp.local.'), + ("_x._tcp.local.", "_x._tcp.local."), + ("_x._udp.local.", "_x._udp.local."), + ("_12345-67890-abc._udp.local.", "_12345-67890-abc._udp.local."), + ("x._sub._http._tcp.local.", "_http._tcp.local."), + ("a" * 63 + "._sub._http._tcp.local.", "_http._tcp.local."), + ("a" * 61 + "â._sub._http._tcp.local.", "_http._tcp.local."), ) for name, result in good_names_to_try: assert r.service_type_name(name) == result - assert r.service_type_name('_one_two._tcp.local.', strict=False) == '_one_two._tcp.local.' + assert r.service_type_name("_one_two._tcp.local.", strict=False) == "_one_two._tcp.local." def test_invalid_addresses(self): type_ = "_test-srvc-type._tcp.local." name = "xxxyyy" registration_name = f"{name}.{type_}" - bad = (b'127.0.0.1', b'::1') + bad = (b"127.0.0.1", b"::1") for addr in bad: self.assertRaisesRegex( TypeError, - 'Addresses must either ', + "Addresses must either ", ServiceInfo, type_, registration_name, diff --git a/tests/test_handlers.py b/tests/test_handlers.py index a13824e03..31354980a 100644 --- a/tests/test_handlers.py +++ b/tests/test_handlers.py @@ -1,7 +1,6 @@ -#!/usr/bin/env python +"""Unit tests for zeroconf._handlers""" - -""" Unit tests for zeroconf._handlers """ +from __future__ import annotations import asyncio import logging @@ -10,7 +9,7 @@ import time import unittest import unittest.mock -from typing import List, cast +from typing import cast from unittest.mock import patch import pytest @@ -26,7 +25,7 @@ from . import _clear_cache, _inject_response, has_working_ipv6 -log = logging.getLogger('zeroconf') +log = logging.getLogger("zeroconf") original_logging_level = logging.NOTSET @@ -44,14 +43,14 @@ def teardown_module(): class TestRegistrar(unittest.TestCase): def test_ttl(self): # instantiate a zeroconf instance - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) # service definition type_ = "_test-srvc-type._tcp.local." name = "xxxyyy" registration_name = f"{name}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( type_, registration_name, @@ -68,16 +67,15 @@ def test_ttl(self): def get_ttl(record_type): if expected_ttl is not None: return expected_ttl - elif record_type in [const._TYPE_A, const._TYPE_SRV, const._TYPE_NSEC]: + if record_type in [const._TYPE_A, const._TYPE_SRV, const._TYPE_NSEC]: return const._DNS_HOST_TTL - else: - return const._DNS_OTHER_TTL + return const._DNS_OTHER_TTL def _process_outgoing_packet(out): """Sends an outgoing packet.""" nonlocal nbr_answers, nbr_additionals, nbr_authorities - for answer, time_ in out.answers: + for answer, _ in out.answers: nbr_answers += 1 assert answer.ttl == get_ttl(answer.type) for answer in out.additionals: @@ -110,7 +108,7 @@ def _process_outgoing_packet(out): assert question_answers _process_outgoing_packet(construct_outgoing_multicast_answers(question_answers.mcast_aggregate)) - # The additonals should all be suppresed since they are all in the answers section + # The additonals should all be suppressed since they are all in the answers section # There will be one NSEC additional to indicate the lack of AAAA record # assert nbr_answers == 4 and nbr_additionals == 1 and nbr_authorities == 0 @@ -164,7 +162,7 @@ def _process_outgoing_packet(out): def test_name_conflicts(self): # instantiate a zeroconf instance - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) type_ = "_homeassistant._tcp.local." name = "Home" registration_name = f"{name}.{type_}" @@ -193,7 +191,7 @@ def test_name_conflicts(self): def test_register_and_lookup_type_by_uppercase_name(self): # instantiate a zeroconf instance - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) type_ = "_mylowertype._tcp.local." name = "Home" registration_name = f"{name}.{type_}" @@ -225,16 +223,23 @@ def test_register_and_lookup_type_by_uppercase_name(self): def test_ptr_optimization(): # instantiate a zeroconf instance - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) # service definition type_ = "_test-srvc-type._tcp.local." name = "xxxyyy" registration_name = f"{name}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( - type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration_name, + 80, + 0, + 0, + desc, + "ash-2.local.", + addresses=[socket.inet_aton("10.0.1.2")], ) # register @@ -289,15 +294,15 @@ def test_ptr_optimization(): zc.close() -@unittest.skipIf(not has_working_ipv6(), 'Requires IPv6') -@unittest.skipIf(os.environ.get('SKIP_IPV6'), 'IPv6 tests disabled') +@unittest.skipIf(not has_working_ipv6(), "Requires IPv6") +@unittest.skipIf(os.environ.get("SKIP_IPV6"), "IPv6 tests disabled") def test_any_query_for_ptr(): """Test that queries for ANY will return PTR records and the response is aggregated.""" - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) type_ = "_anyptr._tcp.local." name = "knownname" registration_name = f"{name}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} server_name = "ash-2.local." ipv6_address = socket.inet_pton(socket.AF_INET6, "2001:db8::1") info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, server_name, addresses=[ipv6_address]) @@ -318,15 +323,15 @@ def test_any_query_for_ptr(): zc.close() -@unittest.skipIf(not has_working_ipv6(), 'Requires IPv6') -@unittest.skipIf(os.environ.get('SKIP_IPV6'), 'IPv6 tests disabled') +@unittest.skipIf(not has_working_ipv6(), "Requires IPv6") +@unittest.skipIf(os.environ.get("SKIP_IPV6"), "IPv6 tests disabled") def test_aaaa_query(): """Test that queries for AAAA records work and should respond right away.""" - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) type_ = "_knownaaaservice._tcp.local." name = "knownname" registration_name = f"{name}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} server_name = "ash-2.local." ipv6_address = socket.inet_pton(socket.AF_INET6, "2001:db8::1") info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, server_name, addresses=[ipv6_address]) @@ -345,15 +350,15 @@ def test_aaaa_query(): zc.close() -@unittest.skipIf(not has_working_ipv6(), 'Requires IPv6') -@unittest.skipIf(os.environ.get('SKIP_IPV6'), 'IPv6 tests disabled') +@unittest.skipIf(not has_working_ipv6(), "Requires IPv6") +@unittest.skipIf(os.environ.get("SKIP_IPV6"), "IPv6 tests disabled") def test_aaaa_query_upper_case(): """Test that queries for AAAA records work and should respond right away with an upper case name.""" - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) type_ = "_knownaaaservice._tcp.local." name = "knownname" registration_name = f"{name}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} server_name = "ash-2.local." ipv6_address = socket.inet_pton(socket.AF_INET6, "2001:db8::1") info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, server_name, addresses=[ipv6_address]) @@ -372,20 +377,27 @@ def test_aaaa_query_upper_case(): zc.close() -@unittest.skipIf(not has_working_ipv6(), 'Requires IPv6') -@unittest.skipIf(os.environ.get('SKIP_IPV6'), 'IPv6 tests disabled') +@unittest.skipIf(not has_working_ipv6(), "Requires IPv6") +@unittest.skipIf(os.environ.get("SKIP_IPV6"), "IPv6 tests disabled") def test_a_and_aaaa_record_fate_sharing(): """Test that queries for AAAA always return A records in the additionals and should respond right away.""" - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) type_ = "_a-and-aaaa-service._tcp.local." name = "knownname" registration_name = f"{name}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} server_name = "ash-2.local." ipv6_address = socket.inet_pton(socket.AF_INET6, "2001:db8::1") ipv4_address = socket.inet_aton("10.0.1.2") info = ServiceInfo( - type_, registration_name, 80, 0, 0, desc, server_name, addresses=[ipv6_address, ipv4_address] + type_, + registration_name, + 80, + 0, + 0, + desc, + server_name, + addresses=[ipv6_address, ipv4_address], ) aaaa_record = info.dns_addresses(version=r.IPVersion.V6Only)[0] a_record = info.dns_addresses(version=r.IPVersion.V4Only)[0] @@ -426,15 +438,22 @@ def test_a_and_aaaa_record_fate_sharing(): def test_unicast_response(): """Ensure we send a unicast response when the source port is not the MDNS port.""" # instantiate a zeroconf instance - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) # service definition type_ = "_test-srvc-type._tcp.local." name = "xxxyyy" registration_name = f"{name}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( - type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration_name, + 80, + 0, + 0, + desc, + "ash-2.local.", + addresses=[socket.inet_aton("10.0.1.2")], ) # register zc.registry.async_add(info) @@ -478,15 +497,22 @@ def test_unicast_response(): async def test_probe_answered_immediately(): """Verify probes are responded to immediately.""" # instantiate a zeroconf instance - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) # service definition type_ = "_test-srvc-type._tcp.local." name = "xxxyyy" registration_name = f"{name}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( - type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration_name, + 80, + 0, + 0, + desc, + "ash-2.local.", + addresses=[socket.inet_aton("10.0.1.2")], ) zc.registry.async_add(info) query = r.DNSOutgoing(const._FLAGS_QR_QUERY) @@ -522,15 +548,22 @@ async def test_probe_answered_immediately(): async def test_probe_answered_immediately_with_uppercase_name(): """Verify probes are responded to immediately with an uppercase name.""" # instantiate a zeroconf instance - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) # service definition type_ = "_test-srvc-type._tcp.local." name = "xxxyyy" registration_name = f"{name}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( - type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration_name, + 80, + 0, + 0, + desc, + "ash-2.local.", + addresses=[socket.inet_aton("10.0.1.2")], ) zc.registry.async_add(info) query = r.DNSOutgoing(const._FLAGS_QR_QUERY) @@ -565,7 +598,7 @@ async def test_probe_answered_immediately_with_uppercase_name(): def test_qu_response(): """Handle multicast incoming with the QU bit set.""" # instantiate a zeroconf instance - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) # service definition type_ = "_test-srvc-type._tcp.local." @@ -573,9 +606,16 @@ def test_qu_response(): name = "xxxyyy" registration_name = f"{name}.{type_}" registration_name2 = f"{name}.{other_type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( - type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration_name, + 80, + 0, + 0, + desc, + "ash-2.local.", + addresses=[socket.inet_aton("10.0.1.2")], ) info2 = ServiceInfo( other_type_, @@ -643,7 +683,8 @@ def _validate_complete_response(answers): assert not question_answers.mcast_aggregate _validate_complete_response(question_answers.mcast_now) - # With QU set and an authorative answer (probe) should respond to both unitcast and multicast since the response hasn't been seen since 75% of the ttl + # With QU set and an authoritative answer (probe) should respond to both unitcast + # and multicast since the response hasn't been seen since 75% of the ttl query = r.DNSOutgoing(const._FLAGS_QR_QUERY) question = r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN) question.unicast = True # Set the QU bit @@ -658,7 +699,8 @@ def _validate_complete_response(answers): _validate_complete_response(question_answers.mcast_now) _inject_response( - zc, r.DNSIncoming(construct_outgoing_multicast_answers(question_answers.mcast_now).packets()[0]) + zc, + r.DNSIncoming(construct_outgoing_multicast_answers(question_answers.mcast_now).packets()[0]), ) # With the cache repopulated; should respond to only unicast when the answer has been recently multicast query = r.DNSOutgoing(const._FLAGS_QR_QUERY) @@ -680,20 +722,27 @@ def _validate_complete_response(answers): def test_known_answer_supression(): - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) type_ = "_knownanswersv8._tcp.local." name = "knownname" registration_name = f"{name}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} server_name = "ash-2.local." info = ServiceInfo( - type_, registration_name, 80, 0, 0, desc, server_name, addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration_name, + 80, + 0, + 0, + desc, + server_name, + addresses=[socket.inet_aton("10.0.1.2")], ) zc.registry.async_add(info) now = current_time_millis() _clear_cache(zc) - # Test PTR supression + # Test PTR suppression generated = r.DNSOutgoing(const._FLAGS_QR_QUERY) question = r.DNSQuestion(type_, const._TYPE_PTR, const._CLASS_IN) generated.add_question(question) @@ -717,7 +766,7 @@ def test_known_answer_supression(): assert not question_answers.mcast_aggregate assert not question_answers.mcast_aggregate_last_second - # Test A supression + # Test A suppression generated = r.DNSOutgoing(const._FLAGS_QR_QUERY) question = r.DNSQuestion(server_name, const._TYPE_A, const._CLASS_IN) generated.add_question(question) @@ -752,13 +801,13 @@ def test_known_answer_supression(): question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False) assert question_answers assert not question_answers.ucast - expected_nsec_record = cast(r.DNSNsec, list(question_answers.mcast_now)[0]) + expected_nsec_record = cast(r.DNSNsec, next(iter(question_answers.mcast_now))) assert const._TYPE_A not in expected_nsec_record.rdtypes assert const._TYPE_AAAA in expected_nsec_record.rdtypes assert not question_answers.mcast_aggregate assert not question_answers.mcast_aggregate_last_second - # Test SRV supression + # Test SRV suppression generated = r.DNSOutgoing(const._FLAGS_QR_QUERY) question = r.DNSQuestion(registration_name, const._TYPE_SRV, const._CLASS_IN) generated.add_question(question) @@ -782,7 +831,7 @@ def test_known_answer_supression(): assert not question_answers.mcast_aggregate assert not question_answers.mcast_aggregate_last_second - # Test TXT supression + # Test TXT suppression generated = r.DNSOutgoing(const._FLAGS_QR_QUERY) question = r.DNSQuestion(registration_name, const._TYPE_TXT, const._CLASS_IN) generated.add_question(question) @@ -812,7 +861,7 @@ def test_known_answer_supression(): def test_multi_packet_known_answer_supression(): - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) type_ = "_handlermultis._tcp.local." name = "knownname" name2 = "knownname2" @@ -822,19 +871,40 @@ def test_multi_packet_known_answer_supression(): registration2_name = f"{name2}.{type_}" registration3_name = f"{name3}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} server_name = "ash-2.local." server_name2 = "ash-3.local." server_name3 = "ash-4.local." info = ServiceInfo( - type_, registration_name, 80, 0, 0, desc, server_name, addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration_name, + 80, + 0, + 0, + desc, + server_name, + addresses=[socket.inet_aton("10.0.1.2")], ) info2 = ServiceInfo( - type_, registration2_name, 80, 0, 0, desc, server_name2, addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration2_name, + 80, + 0, + 0, + desc, + server_name2, + addresses=[socket.inet_aton("10.0.1.2")], ) info3 = ServiceInfo( - type_, registration3_name, 80, 0, 0, desc, server_name3, addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration3_name, + 80, + 0, + 0, + desc, + server_name3, + addresses=[socket.inet_aton("10.0.1.2")], ) zc.registry.async_add(info) zc.registry.async_add(info2) @@ -842,7 +912,7 @@ def test_multi_packet_known_answer_supression(): now = current_time_millis() _clear_cache(zc) - # Test PTR supression + # Test PTR suppression generated = r.DNSOutgoing(const._FLAGS_QR_QUERY) question = r.DNSQuestion(type_, const._TYPE_PTR, const._CLASS_IN) generated.add_question(question) @@ -867,30 +937,44 @@ def test_multi_packet_known_answer_supression(): def test_known_answer_supression_service_type_enumeration_query(): - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) type_ = "_otherknown._tcp.local." name = "knownname" registration_name = f"{name}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} server_name = "ash-2.local." info = ServiceInfo( - type_, registration_name, 80, 0, 0, desc, server_name, addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration_name, + 80, + 0, + 0, + desc, + server_name, + addresses=[socket.inet_aton("10.0.1.2")], ) zc.registry.async_add(info) type_2 = "_otherknown2._tcp.local." name = "knownname" registration_name2 = f"{name}.{type_2}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} server_name2 = "ash-3.local." info2 = ServiceInfo( - type_2, registration_name2, 80, 0, 0, desc, server_name2, addresses=[socket.inet_aton("10.0.1.2")] + type_2, + registration_name2, + 80, + 0, + 0, + desc, + server_name2, + addresses=[socket.inet_aton("10.0.1.2")], ) zc.registry.async_add(info2) now = current_time_millis() _clear_cache(zc) - # Test PTR supression + # Test PTR suppression generated = r.DNSOutgoing(const._FLAGS_QR_QUERY) question = r.DNSQuestion(const._SERVICE_TYPE_ENUMERATION_NAME, const._TYPE_PTR, const._CLASS_IN) generated.add_question(question) @@ -940,29 +1024,43 @@ def test_known_answer_supression_service_type_enumeration_query(): def test_upper_case_enumeration_query(): - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) type_ = "_otherknown._tcp.local." name = "knownname" registration_name = f"{name}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} server_name = "ash-2.local." info = ServiceInfo( - type_, registration_name, 80, 0, 0, desc, server_name, addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration_name, + 80, + 0, + 0, + desc, + server_name, + addresses=[socket.inet_aton("10.0.1.2")], ) zc.registry.async_add(info) type_2 = "_otherknown2._tcp.local." name = "knownname" registration_name2 = f"{name}.{type_2}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} server_name2 = "ash-3.local." info2 = ServiceInfo( - type_2, registration_name2, 80, 0, 0, desc, server_name2, addresses=[socket.inet_aton("10.0.1.2")] + type_2, + registration_name2, + 80, + 0, + 0, + desc, + server_name2, + addresses=[socket.inet_aton("10.0.1.2")], ) zc.registry.async_add(info2) _clear_cache(zc) - # Test PTR supression + # Test PTR suppression generated = r.DNSOutgoing(const._FLAGS_QR_QUERY) question = r.DNSQuestion(const._SERVICE_TYPE_ENUMERATION_NAME.upper(), const._TYPE_PTR, const._CLASS_IN) generated.add_question(question) @@ -980,7 +1078,7 @@ def test_upper_case_enumeration_query(): def test_enumeration_query_with_no_registered_services(): - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) _clear_cache(zc) generated = r.DNSOutgoing(const._FLAGS_QR_QUERY) question = r.DNSQuestion(const._SERVICE_TYPE_ENUMERATION_NAME.upper(), const._TYPE_PTR, const._CLASS_IN) @@ -998,26 +1096,40 @@ def test_enumeration_query_with_no_registered_services(): async def test_qu_response_only_sends_additionals_if_sends_answer(): """Test that a QU response does not send additionals unless it sends the answer as well.""" # instantiate a zeroconf instance - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) zc = aiozc.zeroconf type_ = "_addtest1._tcp.local." name = "knownname" registration_name = f"{name}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} server_name = "ash-2.local." info = ServiceInfo( - type_, registration_name, 80, 0, 0, desc, server_name, addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration_name, + 80, + 0, + 0, + desc, + server_name, + addresses=[socket.inet_aton("10.0.1.2")], ) zc.registry.async_add(info) type_2 = "_addtest2._tcp.local." name = "knownname" registration_name2 = f"{name}.{type_2}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} server_name2 = "ash-3.local." info2 = ServiceInfo( - type_2, registration_name2, 80, 0, 0, desc, server_name2, addresses=[socket.inet_aton("10.0.1.2")] + type_2, + registration_name2, + 80, + 0, + 0, + desc, + server_name2, + addresses=[socket.inet_aton("10.0.1.2")], ) zc.registry.async_add(info2) @@ -1028,10 +1140,9 @@ async def test_qu_response_only_sends_additionals_if_sends_answer(): # Add the A record to the cache with 50% ttl remaining a_record = info.dns_addresses()[0] - a_record.set_created_ttl(current_time_millis() - (a_record.ttl * 1000 / 2), a_record.ttl) + zc.cache._async_set_created_ttl(a_record, current_time_millis() - (a_record.ttl * 1000 / 2), a_record.ttl) assert not a_record.is_recent(current_time_millis()) info._dns_address_cache = None # we are mutating the record so clear the cache - zc.cache.async_add_records([a_record]) # With QU should respond to only unicast when the answer has been recently multicast # even if the additional has not been recently multicast @@ -1079,9 +1190,10 @@ async def test_qu_response_only_sends_additionals_if_sends_answer(): # Remove the 100% PTR record and add a 50% PTR record zc.cache.async_remove_records([ptr_record]) - ptr_record.set_created_ttl(current_time_millis() - (ptr_record.ttl * 1000 / 2), ptr_record.ttl) + zc.cache._async_set_created_ttl( + ptr_record, current_time_millis() - (ptr_record.ttl * 1000 / 2), ptr_record.ttl + ) assert not ptr_record.is_recent(current_time_millis()) - zc.cache.async_add_records([ptr_record]) # With QU should respond to only multicast since the has less # than 75% of its ttl remaining query = r.DNSOutgoing(const._FLAGS_QR_QUERY) @@ -1104,7 +1216,8 @@ async def test_qu_response_only_sends_additionals_if_sends_answer(): assert ptr_record in question_answers.mcast_now # Ask 2 QU questions, with info the PTR is at 50%, with info2 the PTR is at 100% - # We should get back a unicast reply for info2, but info should be multicasted since its within 75% of its TTL + # We should get back a unicast reply for info2, but info should be + # multicasted since its within 75% of its TTL # With QU should respond to only multicast since the has less # than 75% of its ttl remaining query = r.DNSOutgoing(const._FLAGS_QR_QUERY) @@ -1149,16 +1262,23 @@ async def test_qu_response_only_sends_additionals_if_sends_answer(): async def test_cache_flush_bit(): """Test that the cache flush bit sets the TTL to one for matching records.""" # instantiate a zeroconf instance - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) zc = aiozc.zeroconf type_ = "_cacheflush._tcp.local." name = "knownname" registration_name = f"{name}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} server_name = "server-uu1.local." info = ServiceInfo( - type_, registration_name, 80, 0, 0, desc, server_name, addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration_name, + 80, + 0, + 0, + desc, + server_name, + addresses=[socket.inet_aton("10.0.1.2")], ) a_record = info.dns_addresses()[0] zc.cache.async_add_records([info.dns_pointer(), a_record, info.dns_text(), info.dns_service()]) @@ -1193,10 +1313,13 @@ async def test_cache_flush_bit(): for record in new_records: assert zc.cache.async_get_unique(record) is not None - cached_records = [zc.cache.async_get_unique(record) for record in new_records] - for cached_record in cached_records: - assert cached_record is not None - cached_record.created = current_time_millis() - 1500 + cached_record_group = [ + zc.cache.async_all_by_details(record.name, record.type, record.class_) for record in new_records + ] + for cached_records in cached_record_group: + for cached_record in cached_records: + assert cached_record is not None + cached_record.created = current_time_millis() - 1500 fresh_address = socket.inet_aton("4.4.4.4") info.addresses = [fresh_address] @@ -1206,9 +1329,18 @@ async def test_cache_flush_bit(): out.add_answer_at_time(answer, 0) for packet in out.packets(): zc.record_manager.async_updates_from_response(r.DNSIncoming(packet)) - for cached_record in cached_records: - assert cached_record is not None - assert cached_record.ttl == 1 + + cached_record_group = [ + zc.cache.async_all_by_details(record.name, record.type, record.class_) for record in new_records + ] + for cached_records in cached_record_group: + for cached_record in cached_records: + # the new record should not be set to 1 + if cached_record == answer: + assert cached_record.ttl != 1 + continue + assert cached_record is not None + assert cached_record.ttl == 1 for entry in zc.cache.async_all_by_details(server_name, const._TYPE_A, const._CLASS_IN): assert isinstance(entry, r.DNSAddress) @@ -1233,24 +1365,31 @@ async def test_cache_flush_bit(): async def test_record_update_manager_add_listener_callsback_existing_records(): """Test that the RecordUpdateManager will callback existing records.""" - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) zc: Zeroconf = aiozc.zeroconf updated = [] class MyListener(r.RecordUpdateListener): """A RecordUpdateListener that does not implement update_records.""" - def async_update_records(self, zc: 'Zeroconf', now: float, records: List[r.RecordUpdate]) -> None: + def async_update_records(self, zc: Zeroconf, now: float, records: list[r.RecordUpdate]) -> None: """Update multiple records in one shot.""" updated.extend(records) type_ = "_cacheflush._tcp.local." name = "knownname" registration_name = f"{name}.{type_}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} server_name = "server-uu1.local." info = ServiceInfo( - type_, registration_name, 80, 0, 0, desc, server_name, addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration_name, + 80, + 0, + 0, + desc, + server_name, + addresses=[socket.inet_aton("10.0.1.2")], ) a_record = info.dns_addresses()[0] ptr_record = info.dns_pointer() @@ -1278,7 +1417,7 @@ def async_update_records(self, zc: 'Zeroconf', now: float, records: List[r.Recor @pytest.mark.asyncio async def test_questions_query_handler_populates_the_question_history_from_qm_questions(): - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) zc = aiozc.zeroconf now = current_time_millis() _clear_cache(zc) @@ -1301,7 +1440,11 @@ async def test_questions_query_handler_populates_the_question_history_from_qm_qu question = r.DNSQuestion("_hap._tcp.local.", const._TYPE_PTR, const._CLASS_IN) question.unicast = False known_answer = r.DNSPointer( - "_hap._tcp.local.", const._TYPE_PTR, const._CLASS_IN, 10000, 'known-to-other._hap._tcp.local.' + "_hap._tcp.local.", + const._TYPE_PTR, + const._CLASS_IN, + 10000, + "known-to-other._hap._tcp.local.", ) generated.add_question(question) generated.add_answer_at_time(known_answer, 0) @@ -1320,7 +1463,7 @@ async def test_questions_query_handler_populates_the_question_history_from_qm_qu @pytest.mark.asyncio async def test_questions_query_handler_does_not_put_qu_questions_in_history(): - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) zc = aiozc.zeroconf now = current_time_millis() _clear_cache(zc) @@ -1339,7 +1482,11 @@ async def test_questions_query_handler_does_not_put_qu_questions_in_history(): question = r.DNSQuestion("_hap._tcp.local.", const._TYPE_PTR, const._CLASS_IN) question.unicast = True known_answer = r.DNSPointer( - "_hap._tcp.local.", const._TYPE_PTR, const._CLASS_IN, 10000, 'notqu._hap._tcp.local.' + "_hap._tcp.local.", + const._TYPE_PTR, + const._CLASS_IN, + 10000, + "notqu._hap._tcp.local.", ) generated.add_question(question) generated.add_answer_at_time(known_answer, 0) @@ -1359,13 +1506,13 @@ async def test_questions_query_handler_does_not_put_qu_questions_in_history(): @pytest.mark.asyncio async def test_guard_against_low_ptr_ttl(): - """Ensure we enforce a minimum for PTR record ttls to avoid excessive refresh queries from ServiceBrowsers. + """Ensure we enforce a min for PTR record ttls to avoid excessive refresh queries from ServiceBrowsers. Some poorly designed IoT devices can set excessively low PTR TTLs would will cause ServiceBrowsers to flood the network with excessive refresh queries. """ - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) zc = aiozc.zeroconf # Apple uses a 15s minimum TTL, however we do not have the same # level of rate limit and safe guards so we use 1/4 of the recommended value @@ -1374,21 +1521,21 @@ async def test_guard_against_low_ptr_ttl(): const._TYPE_PTR, const._CLASS_IN | const._CLASS_UNIQUE, 2, - 'low.local.', + "low.local.", ) answer_with_normal_ttl = r.DNSPointer( "myservicelow_tcp._tcp.local.", const._TYPE_PTR, const._CLASS_IN | const._CLASS_UNIQUE, const._DNS_OTHER_TTL, - 'normal.local.', + "normal.local.", ) good_bye_answer = r.DNSPointer( "myservicelow_tcp._tcp.local.", const._TYPE_PTR, const._CLASS_IN | const._CLASS_UNIQUE, 0, - 'goodbye.local.', + "goodbye.local.", ) # TTL should be adjusted to a safe value response = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) @@ -1411,21 +1558,21 @@ async def test_guard_against_low_ptr_ttl(): @pytest.mark.asyncio async def test_duplicate_goodbye_answers_in_packet(): """Ensure we do not throw an exception when there are duplicate goodbye records in a packet.""" - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) zc = aiozc.zeroconf answer_with_normal_ttl = r.DNSPointer( "myservicelow_tcp._tcp.local.", const._TYPE_PTR, const._CLASS_IN | const._CLASS_UNIQUE, const._DNS_OTHER_TTL, - 'host.local.', + "host.local.", ) good_bye_answer = r.DNSPointer( "myservicelow_tcp._tcp.local.", const._TYPE_PTR, const._CLASS_IN | const._CLASS_UNIQUE, 0, - 'host.local.', + "host.local.", ) response = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) response.add_answer_at_time(answer_with_normal_ttl, 0) @@ -1442,12 +1589,12 @@ async def test_duplicate_goodbye_answers_in_packet(): @pytest.mark.asyncio async def test_response_aggregation_timings(run_isolated): - """Verify multicast respones are aggregated.""" + """Verify multicast responses are aggregated.""" type_ = "_mservice._tcp.local." type_2 = "_mservice2._tcp.local." type_3 = "_mservice3._tcp.local." - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) await aiozc.zeroconf.async_wait_for_start() name = "xxxyyy" @@ -1455,15 +1602,36 @@ async def test_response_aggregation_timings(run_isolated): registration_name2 = f"{name}.{type_2}" registration_name3 = f"{name}.{type_3}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( - type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration_name, + 80, + 0, + 0, + desc, + "ash-2.local.", + addresses=[socket.inet_aton("10.0.1.2")], ) info2 = ServiceInfo( - type_2, registration_name2, 80, 0, 0, desc, "ash-4.local.", addresses=[socket.inet_aton("10.0.1.3")] + type_2, + registration_name2, + 80, + 0, + 0, + desc, + "ash-4.local.", + addresses=[socket.inet_aton("10.0.1.3")], ) info3 = ServiceInfo( - type_3, registration_name3, 80, 0, 0, desc, "ash-4.local.", addresses=[socket.inet_aton("10.0.1.3")] + type_3, + registration_name3, + 80, + 0, + 0, + desc, + "ash-4.local.", + addresses=[socket.inet_aton("10.0.1.3")], ) aiozc.zeroconf.registry.async_add(info) aiozc.zeroconf.registry.async_add(info2) @@ -1489,9 +1657,9 @@ async def test_response_aggregation_timings(run_isolated): protocol = zc.engine.protocols[0] with patch.object(aiozc.zeroconf, "async_send") as send_mock: - protocol.datagram_received(query.packets()[0], ('127.0.0.1', const._MDNS_PORT)) - protocol.datagram_received(query2.packets()[0], ('127.0.0.1', const._MDNS_PORT)) - protocol.datagram_received(query.packets()[0], ('127.0.0.1', const._MDNS_PORT)) + protocol.datagram_received(query.packets()[0], ("127.0.0.1", const._MDNS_PORT)) + protocol.datagram_received(query2.packets()[0], ("127.0.0.1", const._MDNS_PORT)) + protocol.datagram_received(query.packets()[0], ("127.0.0.1", const._MDNS_PORT)) await asyncio.sleep(0.7) # Should aggregate into a single answer with up to a 500ms + 120ms delay @@ -1504,7 +1672,7 @@ async def test_response_aggregation_timings(run_isolated): assert info2.dns_pointer() in incoming.answers() send_mock.reset_mock() - protocol.datagram_received(query3.packets()[0], ('127.0.0.1', const._MDNS_PORT)) + protocol.datagram_received(query3.packets()[0], ("127.0.0.1", const._MDNS_PORT)) await asyncio.sleep(0.3) # Should send within 120ms since there are no other @@ -1520,7 +1688,7 @@ async def test_response_aggregation_timings(run_isolated): # Because the response was sent in the last second we need to make # sure the next answer is delayed at least a second aiozc.zeroconf.engine.protocols[0].datagram_received( - query4.packets()[0], ('127.0.0.1', const._MDNS_PORT) + query4.packets()[0], ("127.0.0.1", const._MDNS_PORT) ) await asyncio.sleep(0.5) @@ -1548,15 +1716,22 @@ async def test_response_aggregation_timings_multiple(run_isolated, disable_dupli 620ms is the maximum random delay of 120ms and 500ms additional for aggregation.""" type_2 = "_mservice2._tcp.local." - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) await aiozc.zeroconf.async_wait_for_start() name = "xxxyyy" registration_name2 = f"{name}.{type_2}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info2 = ServiceInfo( - type_2, registration_name2, 80, 0, 0, desc, "ash-4.local.", addresses=[socket.inet_aton("10.0.1.3")] + type_2, + registration_name2, + 80, + 0, + 0, + desc, + "ash-4.local.", + addresses=[socket.inet_aton("10.0.1.3")], ) aiozc.zeroconf.registry.async_add(info2) @@ -1569,7 +1744,7 @@ async def test_response_aggregation_timings_multiple(run_isolated, disable_dupli with patch.object(aiozc.zeroconf, "async_send") as send_mock: send_mock.reset_mock() - protocol.datagram_received(query2.packets()[0], ('127.0.0.1', const._MDNS_PORT)) + protocol.datagram_received(query2.packets()[0], ("127.0.0.1", const._MDNS_PORT)) protocol.last_time = 0 # manually reset the last time to avoid duplicate packet suppression await asyncio.sleep(0.2) calls = send_mock.mock_calls @@ -1580,7 +1755,7 @@ async def test_response_aggregation_timings_multiple(run_isolated, disable_dupli assert info2.dns_pointer() in incoming.answers() send_mock.reset_mock() - protocol.datagram_received(query2.packets()[0], ('127.0.0.1', const._MDNS_PORT)) + protocol.datagram_received(query2.packets()[0], ("127.0.0.1", const._MDNS_PORT)) protocol.last_time = 0 # manually reset the last time to avoid duplicate packet suppression await asyncio.sleep(1.2) calls = send_mock.mock_calls @@ -1591,9 +1766,9 @@ async def test_response_aggregation_timings_multiple(run_isolated, disable_dupli assert info2.dns_pointer() in incoming.answers() send_mock.reset_mock() - protocol.datagram_received(query2.packets()[0], ('127.0.0.1', const._MDNS_PORT)) + protocol.datagram_received(query2.packets()[0], ("127.0.0.1", const._MDNS_PORT)) protocol.last_time = 0 # manually reset the last time to avoid duplicate packet suppression - protocol.datagram_received(query2.packets()[0], ('127.0.0.1', const._MDNS_PORT)) + protocol.datagram_received(query2.packets()[0], ("127.0.0.1", const._MDNS_PORT)) protocol.last_time = 0 # manually reset the last time to avoid duplicate packet suppression # The delay should increase with two packets and # 900ms is beyond the maximum aggregation delay @@ -1636,23 +1811,59 @@ async def test_response_aggregation_random_delay(): registration_name4 = f"{name}.{type_4}" registration_name5 = f"{name}.{type_5}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( - type_, registration_name, 80, 0, 0, desc, "ash-1.local.", addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration_name, + 80, + 0, + 0, + desc, + "ash-1.local.", + addresses=[socket.inet_aton("10.0.1.2")], ) info2 = ServiceInfo( - type_2, registration_name2, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.3")] + type_2, + registration_name2, + 80, + 0, + 0, + desc, + "ash-2.local.", + addresses=[socket.inet_aton("10.0.1.3")], ) info3 = ServiceInfo( - type_3, registration_name3, 80, 0, 0, desc, "ash-3.local.", addresses=[socket.inet_aton("10.0.1.2")] + type_3, + registration_name3, + 80, + 0, + 0, + desc, + "ash-3.local.", + addresses=[socket.inet_aton("10.0.1.2")], ) info4 = ServiceInfo( - type_4, registration_name4, 80, 0, 0, desc, "ash-4.local.", addresses=[socket.inet_aton("10.0.1.2")] + type_4, + registration_name4, + 80, + 0, + 0, + desc, + "ash-4.local.", + addresses=[socket.inet_aton("10.0.1.2")], ) info5 = ServiceInfo( - type_5, registration_name5, 80, 0, 0, desc, "ash-5.local.", addresses=[socket.inet_aton("10.0.1.2")] + type_5, + registration_name5, + 80, + 0, + 0, + desc, + "ash-5.local.", + addresses=[socket.inet_aton("10.0.1.2")], ) mocked_zc = unittest.mock.MagicMock() + mocked_zc.loop = asyncio.get_running_loop() outgoing_queue = MulticastOutgoingQueue(mocked_zc, 0, 500) now = current_time_millis() @@ -1698,14 +1909,29 @@ async def test_future_answers_are_removed_on_send(): registration_name = f"{name}.{type_}" registration_name2 = f"{name}.{type_2}" - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info = ServiceInfo( - type_, registration_name, 80, 0, 0, desc, "ash-1.local.", addresses=[socket.inet_aton("10.0.1.2")] + type_, + registration_name, + 80, + 0, + 0, + desc, + "ash-1.local.", + addresses=[socket.inet_aton("10.0.1.2")], ) info2 = ServiceInfo( - type_2, registration_name2, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.3")] + type_2, + registration_name2, + 80, + 0, + 0, + desc, + "ash-2.local.", + addresses=[socket.inet_aton("10.0.1.3")], ) mocked_zc = unittest.mock.MagicMock() + mocked_zc.loop = asyncio.get_running_loop() outgoing_queue = MulticastOutgoingQueue(mocked_zc, 0, 0) now = current_time_millis() @@ -1735,7 +1961,7 @@ async def test_future_answers_are_removed_on_send(): # The answer should get removed because we just sent it assert info.dns_pointer() not in outgoing_queue.queue[0].answers - # But the one we have not sent yet shoudl still go out later + # But the one we have not sent yet should still go out later assert info2.dns_pointer() in outgoing_queue.queue[0].answers @@ -1743,14 +1969,14 @@ async def test_future_answers_are_removed_on_send(): async def test_add_listener_warns_when_not_using_record_update_listener(caplog): """Log when a listener is added that is not using RecordUpdateListener as a base class.""" - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) zc: Zeroconf = aiozc.zeroconf updated = [] class MyListener: """A RecordUpdateListener that does not implement update_records.""" - def async_update_records(self, zc: 'Zeroconf', now: float, records: List[r.RecordUpdate]) -> None: + def async_update_records(self, zc: Zeroconf, now: float, records: list[r.RecordUpdate]) -> None: """Update multiple records in one shot.""" updated.extend(records) @@ -1758,7 +1984,7 @@ def async_update_records(self, zc: 'Zeroconf', now: float, records: List[r.Recor await asyncio.sleep(0) # flush out any call soons assert ( "listeners passed to async_add_listener must inherit from RecordUpdateListener" in caplog.text - or "TypeError: Argument \'listener\' has incorrect type" in caplog.text + or "TypeError: Argument 'listener' has incorrect type" in caplog.text ) await aiozc.async_close() @@ -1768,7 +1994,7 @@ def async_update_records(self, zc: 'Zeroconf', now: float, records: List[r.Recor async def test_async_updates_iteration_safe(): """Ensure we can safely iterate over the async_updates.""" - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) zc: Zeroconf = aiozc.zeroconf updated = [] good_bye_answer = r.DNSPointer( @@ -1776,13 +2002,13 @@ async def test_async_updates_iteration_safe(): const._TYPE_PTR, const._CLASS_IN | const._CLASS_UNIQUE, 0, - 'goodbye.local.', + "goodbye.local.", ) class OtherListener(r.RecordUpdateListener): """A RecordUpdateListener that does not implement update_records.""" - def async_update_records(self, zc: 'Zeroconf', now: float, records: List[r.RecordUpdate]) -> None: + def async_update_records(self, zc: Zeroconf, now: float, records: list[r.RecordUpdate]) -> None: """Update multiple records in one shot.""" updated.extend(records) @@ -1791,7 +2017,7 @@ def async_update_records(self, zc: 'Zeroconf', now: float, records: List[r.Recor class ListenerThatAddsListener(r.RecordUpdateListener): """A RecordUpdateListener that does not implement update_records.""" - def async_update_records(self, zc: 'Zeroconf', now: float, records: List[r.RecordUpdate]) -> None: + def async_update_records(self, zc: Zeroconf, now: float, records: list[r.RecordUpdate]) -> None: """Update multiple records in one shot.""" updated.extend(records) zc.async_add_listener(other, None) @@ -1812,7 +2038,7 @@ def async_update_records(self, zc: 'Zeroconf', now: float, records: List[r.Recor async def test_async_updates_complete_iteration_safe(): """Ensure we can safely iterate over the async_updates_complete.""" - aiozc = AsyncZeroconf(interfaces=['127.0.0.1']) + aiozc = AsyncZeroconf(interfaces=["127.0.0.1"]) zc: Zeroconf = aiozc.zeroconf class OtherListener(r.RecordUpdateListener): diff --git a/tests/test_history.py b/tests/test_history.py index fca57be2a..e9254168e 100644 --- a/tests/test_history.py +++ b/tests/test_history.py @@ -1,12 +1,9 @@ -#!/usr/bin/env python - - """Unit tests for _history.py.""" -from typing import Set +from __future__ import annotations import zeroconf as r -import zeroconf.const as const +from zeroconf import const from zeroconf._history import QuestionHistory @@ -15,14 +12,22 @@ def test_question_suppression(): question = r.DNSQuestion("_hap._tcp._local.", const._TYPE_PTR, const._CLASS_IN) now = r.current_time_millis() - other_known_answers: Set[r.DNSRecord] = { + other_known_answers: set[r.DNSRecord] = { r.DNSPointer( - "_hap._tcp.local.", const._TYPE_PTR, const._CLASS_IN, 10000, 'known-to-other._hap._tcp.local.' + "_hap._tcp.local.", + const._TYPE_PTR, + const._CLASS_IN, + 10000, + "known-to-other._hap._tcp.local.", ) } - our_known_answers: Set[r.DNSRecord] = { + our_known_answers: set[r.DNSRecord] = { r.DNSPointer( - "_hap._tcp.local.", const._TYPE_PTR, const._CLASS_IN, 10000, 'known-to-us._hap._tcp.local.' + "_hap._tcp.local.", + const._TYPE_PTR, + const._CLASS_IN, + 10000, + "known-to-us._hap._tcp.local.", ) } @@ -49,13 +54,13 @@ def test_question_expire(): now = r.current_time_millis() question = r.DNSQuestion("_hap._tcp._local.", const._TYPE_PTR, const._CLASS_IN) - other_known_answers: Set[r.DNSRecord] = { + other_known_answers: set[r.DNSRecord] = { r.DNSPointer( "_hap._tcp.local.", const._TYPE_PTR, const._CLASS_IN, 10000, - 'known-to-other._hap._tcp.local.', + "known-to-other._hap._tcp.local.", created=now, ) } diff --git a/tests/test_init.py b/tests/test_init.py index 1d1f7086b..5ccb9ef63 100644 --- a/tests/test_init.py +++ b/tests/test_init.py @@ -1,12 +1,10 @@ -#!/usr/bin/env python +"""Unit tests for zeroconf.py""" - -""" Unit tests for zeroconf.py """ +from __future__ import annotations import logging import socket import time -import unittest import unittest.mock from unittest.mock import patch @@ -15,7 +13,7 @@ from . import _inject_responses -log = logging.getLogger('zeroconf') +log = logging.getLogger("zeroconf") original_logging_level = logging.NOTSET @@ -34,27 +32,29 @@ class Names(unittest.TestCase): def test_long_name(self): generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) question = r.DNSQuestion( - "this.is.a.very.long.name.with.lots.of.parts.in.it.local.", const._TYPE_SRV, const._CLASS_IN + "this.is.a.very.long.name.with.lots.of.parts.in.it.local.", + const._TYPE_SRV, + const._CLASS_IN, ) generated.add_question(question) r.DNSIncoming(generated.packets()[0]) def test_exceedingly_long_name(self): generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) - name = "%slocal." % ("part." * 1000) + name = f"{'part.' * 1000}local." question = r.DNSQuestion(name, const._TYPE_SRV, const._CLASS_IN) generated.add_question(question) r.DNSIncoming(generated.packets()[0]) def test_extra_exceedingly_long_name(self): generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) - name = "%slocal." % ("part." * 4000) + name = f"{'part.' * 4000}local." question = r.DNSQuestion(name, const._TYPE_SRV, const._CLASS_IN) generated.add_question(question) r.DNSIncoming(generated.packets()[0]) def test_exceedingly_long_name_part(self): - name = "%s.local." % ("a" * 1000) + name = f"{'a' * 1000}.local." generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) question = r.DNSQuestion(name, const._TYPE_SRV, const._CLASS_IN) generated.add_question(question) @@ -70,11 +70,11 @@ def test_same_name(self): def test_verify_name_change_with_lots_of_names(self): # instantiate a zeroconf instance - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) # create a bunch of servers type_ = "_my-service._tcp.local." - name = 'a wonderful service' + name = "a wonderful service" server_count = 300 self.generate_many_hosts(zc, type_, name, server_count) @@ -87,15 +87,16 @@ def test_large_packet_exception_log_handling(self): """Verify we downgrade debug after warning.""" # instantiate a zeroconf instance - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) - with patch('zeroconf._logger.log.warning') as mocked_log_warn, patch( - 'zeroconf._logger.log.debug' - ) as mocked_log_debug: + with ( + patch("zeroconf._logger.log.warning") as mocked_log_warn, + patch("zeroconf._logger.log.debug") as mocked_log_debug, + ): # now that we have a long packet in our possession, let's verify the # exception handling. out = r.DNSOutgoing(const._FLAGS_QR_RESPONSE | const._FLAGS_AA) - out.data.append(b'\0' * 10000) + out.data.append(b"\0" * 10000) # mock the zeroconf logger and check for the correct logging backoff call_counts = mocked_log_warn.call_count, mocked_log_debug.call_count @@ -112,7 +113,7 @@ def test_large_packet_exception_log_handling(self): zc.send(out, const._MDNS_ADDR, const._MDNS_PORT) time.sleep(0.3) r.log.debug( - 'warn %d debug %d was %s', + "warn %d debug %d was %s", mocked_log_warn.call_count, mocked_log_debug.call_count, call_counts, @@ -123,10 +124,10 @@ def test_large_packet_exception_log_handling(self): zc.close() def verify_name_change(self, zc, type_, name, number_hosts): - desc = {'path': '/~paulsm/'} + desc = {"path": "/~paulsm/"} info_service = ServiceInfo( type_, - f'{name}.{type_}', + f"{name}.{type_}", 80, 0, 0, @@ -146,7 +147,7 @@ def verify_name_change(self, zc, type_, name, number_hosts): # in the registry info_service2 = ServiceInfo( type_, - f'{name}.{type_}', + f"{name}.{type_}", 80, 0, 0, @@ -155,23 +156,24 @@ def verify_name_change(self, zc, type_, name, number_hosts): addresses=[socket.inet_aton("10.0.1.2")], ) zc.register_service(info_service2, allow_name_change=True) - assert info_service2.name.split('.')[0] == '%s-%d' % (name, number_hosts + 1) + assert info_service2.name.split(".")[0] == f"{name}-{number_hosts + 1}" def generate_many_hosts(self, zc, type_, name, number_hosts): block_size = 25 number_hosts = int((number_hosts - 1) / block_size + 1) * block_size out = r.DNSOutgoing(const._FLAGS_QR_RESPONSE | const._FLAGS_AA) for i in range(1, number_hosts + 1): - next_name = name if i == 1 else '%s-%d' % (name, i) + next_name = name if i == 1 else f"{name}-{i}" self.generate_host(out, next_name, type_) _inject_responses(zc, [r.DNSIncoming(packet) for packet in out.packets()]) @staticmethod def generate_host(out, host_name, type_): - name = '.'.join((host_name, type_)) + name = ".".join((host_name, type_)) out.add_answer_at_time( - r.DNSPointer(type_, const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, name), 0 + r.DNSPointer(type_, const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, name), + 0, ) out.add_answer_at_time( r.DNSService( diff --git a/tests/test_listener.py b/tests/test_listener.py index bd8022736..4897eabe0 100644 --- a/tests/test_listener.py +++ b/tests/test_listener.py @@ -1,12 +1,10 @@ -#!/usr/bin/env python +"""Unit tests for zeroconf._listener""" - -""" Unit tests for zeroconf._listener """ +from __future__ import annotations import logging import unittest import unittest.mock -from typing import Tuple, Union from unittest.mock import MagicMock, patch import zeroconf as r @@ -23,7 +21,7 @@ from . import QuestionHistoryWithoutSuppression -log = logging.getLogger('zeroconf') +log = logging.getLogger("zeroconf") original_logging_level = logging.NOTSET @@ -43,26 +41,27 @@ def test_guard_against_oversized_packets(): These packets can quickly overwhelm the system. """ - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) - for i in range(5000): + for _i in range(5000): generated.add_answer_at_time( r.DNSText( "packet{i}.local.", const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, 500, - b'path=/~paulsm/', + b"path=/~paulsm/", ), 0, ) try: # We are patching to generate an oversized packet - with patch.object(outgoing, "_MAX_MSG_ABSOLUTE", 100000), patch.object( - outgoing, "_MAX_MSG_TYPICAL", 100000 + with ( + patch.object(outgoing, "_MAX_MSG_ABSOLUTE", 100000), + patch.object(outgoing, "_MAX_MSG_TYPICAL", 100000), ): over_sized_packet = generated.packets()[0] assert len(over_sized_packet) > const._MAX_MSG_ABSOLUTE @@ -77,7 +76,7 @@ def test_guard_against_oversized_packets(): const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, 500, - b'path=/~paulsm/', + b"path=/~paulsm/", ) generated.add_answer_at_time( @@ -91,10 +90,10 @@ def test_guard_against_oversized_packets(): listener = _listener.AsyncListener(zc) listener.transport = unittest.mock.MagicMock() - listener.datagram_received(ok_packet, ('127.0.0.1', const._MDNS_PORT)) + listener.datagram_received(ok_packet, ("127.0.0.1", const._MDNS_PORT)) assert zc.cache.async_get_unique(okpacket_record) is not None - listener.datagram_received(over_sized_packet, ('127.0.0.1', const._MDNS_PORT)) + listener.datagram_received(over_sized_packet, ("127.0.0.1", const._MDNS_PORT)) assert ( zc.cache.async_get_unique( r.DNSText( @@ -102,15 +101,15 @@ def test_guard_against_oversized_packets(): const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, 500, - b'path=/~paulsm/', + b"path=/~paulsm/", ) ) is None ) - logging.getLogger('zeroconf').setLevel(logging.INFO) + logging.getLogger("zeroconf").setLevel(logging.INFO) - listener.datagram_received(over_sized_packet, ('::1', const._MDNS_PORT, 1, 1)) + listener.datagram_received(over_sized_packet, ("::1", const._MDNS_PORT, 1, 1)) assert ( zc.cache.async_get_unique( r.DNSText( @@ -118,7 +117,7 @@ def test_guard_against_oversized_packets(): const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, 500, - b'path=/~paulsm/', + b"path=/~paulsm/", ) ) is None @@ -131,9 +130,14 @@ def test_guard_against_duplicate_packets(): """Ensure we do not process duplicate packets. These packets can quickly overwhelm the system. """ - zc = Zeroconf(interfaces=['127.0.0.1']) + zc = Zeroconf(interfaces=["127.0.0.1"]) zc.registry.async_add( - ServiceInfo("_http._tcp.local.", "Test._http._tcp.local.", server="Test._http._tcp.local.", port=4) + ServiceInfo( + "_http._tcp.local.", + "Test._http._tcp.local.", + server="Test._http._tcp.local.", + port=4, + ) ) zc.question_history = QuestionHistoryWithoutSuppression() @@ -144,7 +148,7 @@ def handle_query_or_defer( addr: str, port: int, transport: _engine._WrappedTransport, - v6_flow_scope: Union[Tuple[()], Tuple[int, int]] = (), + v6_flow_scope: tuple[()] | tuple[int, int] = (), ) -> None: """Handle a query or defer it for later processing.""" super().handle_query_or_defer(msg, addr, port, transport, v6_flow_scope) @@ -174,14 +178,22 @@ def handle_query_or_defer( start_time = current_time_millis() listener._process_datagram_at_time( - False, len(packet_with_qm_question), start_time, packet_with_qm_question, addrs + False, + len(packet_with_qm_question), + start_time, + packet_with_qm_question, + addrs, ) _handle_query_or_defer.assert_called_once() _handle_query_or_defer.reset_mock() # Now call with the same packet again and handle_query_or_defer should not fire listener._process_datagram_at_time( - False, len(packet_with_qm_question), start_time, packet_with_qm_question, addrs + False, + len(packet_with_qm_question), + start_time, + packet_with_qm_question, + addrs, ) _handle_query_or_defer.assert_not_called() _handle_query_or_defer.reset_mock() @@ -190,35 +202,55 @@ def handle_query_or_defer( new_time = start_time + 1100 # Now call with the same packet again and handle_query_or_defer should fire listener._process_datagram_at_time( - False, len(packet_with_qm_question), new_time, packet_with_qm_question, addrs + False, + len(packet_with_qm_question), + new_time, + packet_with_qm_question, + addrs, ) _handle_query_or_defer.assert_called_once() _handle_query_or_defer.reset_mock() # Now call with the different packet and handle_query_or_defer should fire listener._process_datagram_at_time( - False, len(packet_with_qm_question2), new_time, packet_with_qm_question2, addrs + False, + len(packet_with_qm_question2), + new_time, + packet_with_qm_question2, + addrs, ) _handle_query_or_defer.assert_called_once() _handle_query_or_defer.reset_mock() # Now call with the different packet and handle_query_or_defer should fire listener._process_datagram_at_time( - False, len(packet_with_qm_question), new_time, packet_with_qm_question, addrs + False, + len(packet_with_qm_question), + new_time, + packet_with_qm_question, + addrs, ) _handle_query_or_defer.assert_called_once() _handle_query_or_defer.reset_mock() # Now call with the different packet with qu question and handle_query_or_defer should fire listener._process_datagram_at_time( - False, len(packet_with_qu_question), new_time, packet_with_qu_question, addrs + False, + len(packet_with_qu_question), + new_time, + packet_with_qu_question, + addrs, ) _handle_query_or_defer.assert_called_once() _handle_query_or_defer.reset_mock() # Now call again with the same packet that has a qu question and handle_query_or_defer should fire listener._process_datagram_at_time( - False, len(packet_with_qu_question), new_time, packet_with_qu_question, addrs + False, + len(packet_with_qu_question), + new_time, + packet_with_qu_question, + addrs, ) _handle_query_or_defer.assert_called_once() _handle_query_or_defer.reset_mock() @@ -227,20 +259,28 @@ def handle_query_or_defer( # Call with the QM packet again listener._process_datagram_at_time( - False, len(packet_with_qm_question), new_time, packet_with_qm_question, addrs + False, + len(packet_with_qm_question), + new_time, + packet_with_qm_question, + addrs, ) _handle_query_or_defer.assert_called_once() _handle_query_or_defer.reset_mock() # Now call with the same packet again and handle_query_or_defer should not fire listener._process_datagram_at_time( - False, len(packet_with_qm_question), new_time, packet_with_qm_question, addrs + False, + len(packet_with_qm_question), + new_time, + packet_with_qm_question, + addrs, ) _handle_query_or_defer.assert_not_called() _handle_query_or_defer.reset_mock() # Now call with garbage - listener._process_datagram_at_time(False, len(b'garbage'), new_time, b'garbage', addrs) + listener._process_datagram_at_time(False, len(b"garbage"), new_time, b"garbage", addrs) _handle_query_or_defer.assert_not_called() _handle_query_or_defer.reset_mock() diff --git a/tests/test_logger.py b/tests/test_logger.py index 84a46f89d..4e09aa3b1 100644 --- a/tests/test_logger.py +++ b/tests/test_logger.py @@ -1,8 +1,7 @@ -#!/usr/bin/env python - - """Unit tests for logger.py.""" +from __future__ import annotations + import logging from unittest.mock import call, patch @@ -11,16 +10,16 @@ def test_loading_logger(): """Test loading logger does not change level unless it is unset.""" - log = logging.getLogger('zeroconf') + log = logging.getLogger("zeroconf") log.setLevel(logging.CRITICAL) set_logger_level_if_unset() - log = logging.getLogger('zeroconf') + log = logging.getLogger("zeroconf") assert log.level == logging.CRITICAL - log = logging.getLogger('zeroconf') + log = logging.getLogger("zeroconf") log.setLevel(logging.NOTSET) set_logger_level_if_unset() - log = logging.getLogger('zeroconf') + log = logging.getLogger("zeroconf") assert log.level == logging.WARNING @@ -28,17 +27,19 @@ def test_log_warning_once(): """Test we only log with warning level once.""" QuietLogger._seen_logs = {} quiet_logger = QuietLogger() - with patch("zeroconf._logger.log.warning") as mock_log_warning, patch( - "zeroconf._logger.log.debug" - ) as mock_log_debug: + with ( + patch("zeroconf._logger.log.warning") as mock_log_warning, + patch("zeroconf._logger.log.debug") as mock_log_debug, + ): quiet_logger.log_warning_once("the warning") assert mock_log_warning.mock_calls assert not mock_log_debug.mock_calls - with patch("zeroconf._logger.log.warning") as mock_log_warning, patch( - "zeroconf._logger.log.debug" - ) as mock_log_debug: + with ( + patch("zeroconf._logger.log.warning") as mock_log_warning, + patch("zeroconf._logger.log.debug") as mock_log_debug, + ): quiet_logger.log_warning_once("the warning") assert not mock_log_warning.mock_calls @@ -49,17 +50,19 @@ def test_log_exception_warning(): """Test we only log with warning level once.""" QuietLogger._seen_logs = {} quiet_logger = QuietLogger() - with patch("zeroconf._logger.log.warning") as mock_log_warning, patch( - "zeroconf._logger.log.debug" - ) as mock_log_debug: + with ( + patch("zeroconf._logger.log.warning") as mock_log_warning, + patch("zeroconf._logger.log.debug") as mock_log_debug, + ): quiet_logger.log_exception_warning("the exception warning") assert mock_log_warning.mock_calls assert not mock_log_debug.mock_calls - with patch("zeroconf._logger.log.warning") as mock_log_warning, patch( - "zeroconf._logger.log.debug" - ) as mock_log_debug: + with ( + patch("zeroconf._logger.log.warning") as mock_log_warning, + patch("zeroconf._logger.log.debug") as mock_log_debug, + ): quiet_logger.log_exception_warning("the exception warning") assert not mock_log_warning.mock_calls @@ -73,12 +76,12 @@ def test_llog_exception_debug(): with patch("zeroconf._logger.log.debug") as mock_log_debug: quiet_logger.log_exception_debug("the exception") - assert mock_log_debug.mock_calls == [call('the exception', exc_info=True)] + assert mock_log_debug.mock_calls == [call("the exception", exc_info=True)] with patch("zeroconf._logger.log.debug") as mock_log_debug: quiet_logger.log_exception_debug("the exception") - assert mock_log_debug.mock_calls == [call('the exception', exc_info=False)] + assert mock_log_debug.mock_calls == [call("the exception", exc_info=False)] def test_log_exception_once(): @@ -86,17 +89,19 @@ def test_log_exception_once(): QuietLogger._seen_logs = {} quiet_logger = QuietLogger() exc = Exception() - with patch("zeroconf._logger.log.warning") as mock_log_warning, patch( - "zeroconf._logger.log.debug" - ) as mock_log_debug: + with ( + patch("zeroconf._logger.log.warning") as mock_log_warning, + patch("zeroconf._logger.log.debug") as mock_log_debug, + ): quiet_logger.log_exception_once(exc, "the exceptional exception warning") assert mock_log_warning.mock_calls assert not mock_log_debug.mock_calls - with patch("zeroconf._logger.log.warning") as mock_log_warning, patch( - "zeroconf._logger.log.debug" - ) as mock_log_debug: + with ( + patch("zeroconf._logger.log.warning") as mock_log_warning, + patch("zeroconf._logger.log.debug") as mock_log_debug, + ): quiet_logger.log_exception_once(exc, "the exceptional exception warning") assert not mock_log_warning.mock_calls diff --git a/tests/test_protocol.py b/tests/test_protocol.py index 6990917a2..edd87c2e7 100644 --- a/tests/test_protocol.py +++ b/tests/test_protocol.py @@ -1,14 +1,12 @@ -#!/usr/bin/env python +"""Unit tests for zeroconf._protocol""" - -""" Unit tests for zeroconf._protocol """ +from __future__ import annotations import copy import logging import os import socket import struct -import unittest import unittest.mock from typing import cast @@ -19,7 +17,7 @@ from . import has_working_ipv6 -log = logging.getLogger('zeroconf') +log = logging.getLogger("zeroconf") original_logging_level = logging.NOTSET @@ -54,11 +52,11 @@ def test_parse_own_packet_question(self): def test_parse_own_packet_nsec(self): answer = r.DNSNsec( - 'eufy HomeBase2-2464._hap._tcp.local.', + "eufy HomeBase2-2464._hap._tcp.local.", const._TYPE_NSEC, const._CLASS_IN | const._CLASS_UNIQUE, const._DNS_OTHER_TTL, - 'eufy HomeBase2-2464._hap._tcp.local.', + "eufy HomeBase2-2464._hap._tcp.local.", [const._TYPE_TXT, const._TYPE_SRV], ) @@ -69,11 +67,11 @@ def test_parse_own_packet_nsec(self): # Now with the higher RD type first answer = r.DNSNsec( - 'eufy HomeBase2-2464._hap._tcp.local.', + "eufy HomeBase2-2464._hap._tcp.local.", const._TYPE_NSEC, const._CLASS_IN | const._CLASS_UNIQUE, const._DNS_OTHER_TTL, - 'eufy HomeBase2-2464._hap._tcp.local.', + "eufy HomeBase2-2464._hap._tcp.local.", [const._TYPE_SRV, const._TYPE_TXT], ) @@ -84,30 +82,30 @@ def test_parse_own_packet_nsec(self): # Types > 255 should raise an exception answer_invalid_types = r.DNSNsec( - 'eufy HomeBase2-2464._hap._tcp.local.', + "eufy HomeBase2-2464._hap._tcp.local.", const._TYPE_NSEC, const._CLASS_IN | const._CLASS_UNIQUE, const._DNS_OTHER_TTL, - 'eufy HomeBase2-2464._hap._tcp.local.', + "eufy HomeBase2-2464._hap._tcp.local.", [const._TYPE_TXT, const._TYPE_SRV, 1000], ) generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) generated.add_answer_at_time(answer_invalid_types, 0) - with pytest.raises(ValueError, match='rdtype 1000 is too large for NSEC'): + with pytest.raises(ValueError, match="rdtype 1000 is too large for NSEC"): generated.packets() # Empty rdtypes are not allowed answer_invalid_types = r.DNSNsec( - 'eufy HomeBase2-2464._hap._tcp.local.', + "eufy HomeBase2-2464._hap._tcp.local.", const._TYPE_NSEC, const._CLASS_IN | const._CLASS_UNIQUE, const._DNS_OTHER_TTL, - 'eufy HomeBase2-2464._hap._tcp.local.', + "eufy HomeBase2-2464._hap._tcp.local.", [], ) generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) generated.add_answer_at_time(answer_invalid_types, 0) - with pytest.raises(ValueError, match='NSEC must have at least one rdtype'): + with pytest.raises(ValueError, match="NSEC must have at least one rdtype"): generated.packets() def test_parse_own_packet_response(self): @@ -198,7 +196,7 @@ def test_suppress_answer(self): "testname2.local.", const._TYPE_SRV, const._CLASS_IN | const._CLASS_UNIQUE, - const._DNS_HOST_TTL / 2, + int(const._DNS_HOST_TTL / 2), 0, 0, 80, @@ -250,18 +248,18 @@ def test_suppress_answer(self): def test_dns_hinfo(self): generated = r.DNSOutgoing(0) - generated.add_additional_answer(DNSHinfo('irrelevant', const._TYPE_HINFO, 0, 0, 'cpu', 'os')) + generated.add_additional_answer(DNSHinfo("irrelevant", const._TYPE_HINFO, 0, 0, "cpu", "os")) parsed = r.DNSIncoming(generated.packets()[0]) answer = cast(r.DNSHinfo, parsed.answers()[0]) - assert answer.cpu == 'cpu' - assert answer.os == 'os' + assert answer.cpu == "cpu" + assert answer.os == "os" generated = r.DNSOutgoing(0) - generated.add_additional_answer(DNSHinfo('irrelevant', const._TYPE_HINFO, 0, 0, 'cpu', 'x' * 257)) + generated.add_additional_answer(DNSHinfo("irrelevant", const._TYPE_HINFO, 0, 0, "cpu", "x" * 257)) self.assertRaises(r.NamePartTooLongException, generated.packets) def test_many_questions(self): - """Test many questions get seperated into multiple packets.""" + """Test many questions get separated into multiple packets.""" generated = r.DNSOutgoing(const._FLAGS_QR_QUERY) questions = [] for i in range(100): @@ -281,7 +279,7 @@ def test_many_questions(self): assert len(parsed2.questions) == 15 def test_many_questions_with_many_known_answers(self): - """Test many questions and known answers get seperated into multiple packets.""" + """Test many questions and known answers get separated into multiple packets.""" generated = r.DNSOutgoing(const._FLAGS_QR_QUERY) questions = [] for _ in range(30): @@ -296,7 +294,7 @@ def test_many_questions_with_many_known_answers(self): const._TYPE_PTR, const._CLASS_IN | const._CLASS_UNIQUE, const._DNS_OTHER_TTL, - '123.local.', + "123.local.", ) generated.add_answer_at_time(known_answer, now) packets = generated.packets() @@ -319,12 +317,14 @@ def test_many_questions_with_many_known_answers(self): assert not parsed3.truncated def test_massive_probe_packet_split(self): - """Test probe with many authorative answers.""" + """Test probe with many authoritative answers.""" generated = r.DNSOutgoing(const._FLAGS_QR_QUERY | const._FLAGS_AA) questions = [] for _ in range(30): question = r.DNSQuestion( - "_hap._tcp.local.", const._TYPE_PTR, const._CLASS_IN | const._CLASS_UNIQUE + "_hap._tcp.local.", + const._TYPE_PTR, + const._CLASS_IN | const._CLASS_UNIQUE, ) generated.add_question(question) questions.append(question) @@ -335,7 +335,7 @@ def test_massive_probe_packet_split(self): const._TYPE_PTR, const._CLASS_IN | const._CLASS_UNIQUE, const._DNS_OTHER_TTL, - '123.local.', + "123.local.", ) generated.add_authorative_answer(authorative_answer) packets = generated.packets() @@ -366,7 +366,7 @@ def test_only_one_answer_can_by_large(self): """ generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) query = r.DNSIncoming(r.DNSOutgoing(const._FLAGS_QR_QUERY).packets()[0]) - for i in range(3): + for _i in range(3): generated.add_answer( query, r.DNSText( @@ -374,7 +374,7 @@ def test_only_one_answer_can_by_large(self): const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, 1200, - b'\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==' * 100, + b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==" * 100, ), ) generated.add_answer( @@ -480,7 +480,7 @@ def test_response_header_bits(self): def test_numbers(self): generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) bytes = generated.packets()[0] - (num_questions, num_answers, num_authorities, num_additionals) = struct.unpack('!4H', bytes[4:12]) + (num_questions, num_answers, num_authorities, num_additionals) = struct.unpack("!4H", bytes[4:12]) assert num_questions == 0 assert num_answers == 0 assert num_authorities == 0 @@ -489,10 +489,10 @@ def test_numbers(self): def test_numbers_questions(self): generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE) question = r.DNSQuestion("testname.local.", const._TYPE_SRV, const._CLASS_IN) - for i in range(10): + for _i in range(10): generated.add_question(question) bytes = generated.packets()[0] - (num_questions, num_answers, num_authorities, num_additionals) = struct.unpack('!4H', bytes[4:12]) + (num_questions, num_answers, num_authorities, num_additionals) = struct.unpack("!4H", bytes[4:12]) assert num_questions == 10 assert num_answers == 0 assert num_authorities == 0 @@ -503,14 +503,14 @@ class TestDnsIncoming(unittest.TestCase): def test_incoming_exception_handling(self): generated = r.DNSOutgoing(0) packet = generated.packets()[0] - packet = packet[:8] + b'deadbeef' + packet[8:] + packet = packet[:8] + b"deadbeef" + packet[8:] parsed = r.DNSIncoming(packet) parsed = r.DNSIncoming(packet) assert parsed.valid is False def test_incoming_unknown_type(self): generated = r.DNSOutgoing(0) - answer = r.DNSAddress('a', const._TYPE_SOA, const._CLASS_IN, 1, b'a') + answer = r.DNSAddress("a", const._TYPE_SOA, const._CLASS_IN, 1, b"a") generated.add_additional_answer(answer) packet = generated.packets()[0] parsed = r.DNSIncoming(packet) @@ -520,20 +520,20 @@ def test_incoming_unknown_type(self): def test_incoming_circular_reference(self): assert not r.DNSIncoming( bytes.fromhex( - '01005e0000fb542a1bf0577608004500006897934000ff11d81bc0a86a31e00000fb' - '14e914e90054f9b2000084000000000100000000095f7365727669636573075f646e' - '732d7364045f756470056c6f63616c00000c0001000011940018105f73706f746966' - '792d636f6e6e656374045f746370c023' + "01005e0000fb542a1bf0577608004500006897934000ff11d81bc0a86a31e00000fb" + "14e914e90054f9b2000084000000000100000000095f7365727669636573075f646e" + "732d7364045f756470056c6f63616c00000c0001000011940018105f73706f746966" + "792d636f6e6e656374045f746370c023" ) ).valid - @unittest.skipIf(not has_working_ipv6(), 'Requires IPv6') - @unittest.skipIf(os.environ.get('SKIP_IPV6'), 'IPv6 tests disabled') + @unittest.skipIf(not has_working_ipv6(), "Requires IPv6") + @unittest.skipIf(os.environ.get("SKIP_IPV6"), "IPv6 tests disabled") def test_incoming_ipv6(self): addr = "2606:2800:220:1:248:1893:25c8:1946" # example.com packed = socket.inet_pton(socket.AF_INET6, addr) generated = r.DNSOutgoing(0) - answer = r.DNSAddress('domain', const._TYPE_AAAA, const._CLASS_IN | const._CLASS_UNIQUE, 1, packed) + answer = r.DNSAddress("domain", const._TYPE_AAAA, const._CLASS_IN | const._CLASS_UNIQUE, 1, packed) generated.add_additional_answer(answer) packet = generated.packets()[0] parsed = r.DNSIncoming(packet) @@ -650,8 +650,8 @@ def test_dns_compression_rollback_for_corruption(): const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, const._DNS_OTHER_TTL, - b'\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1' - b'\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==', + b"\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1" + b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==", ), 0, ) @@ -708,12 +708,12 @@ def test_tc_bit_in_query_packet(): for i in range(30): out.add_answer_at_time( DNSText( - ("HASS Bridge W9DN %s._hap._tcp.local." % i), + f"HASS Bridge W9DN {i}._hap._tcp.local.", const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, const._DNS_OTHER_TTL, - b'\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1' - b'\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==', + b"\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1" + b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==", ), 0, ) @@ -740,12 +740,12 @@ def test_tc_bit_not_set_in_answer_packet(): for i in range(30): out.add_answer_at_time( DNSText( - ("HASS Bridge W9DN %s._hap._tcp.local." % i), + f"HASS Bridge W9DN {i}._hap._tcp.local.", const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, const._DNS_OTHER_TTL, - b'\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1' - b'\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==', + b"\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1" + b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==", ), 0, ) @@ -766,23 +766,23 @@ def test_tc_bit_not_set_in_answer_packet(): assert third_packet.valid is True -# 4003 15.973052 192.168.107.68 224.0.0.251 MDNS 76 Standard query 0xffc4 PTR _raop._tcp.local, "QM" question +# MDNS 76 Standard query 0xffc4 PTR _raop._tcp.local, "QM" question def test_qm_packet_parser(): """Test we can parse a query packet with the QM bit.""" qm_packet = ( - b'\xff\xc4\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x05_raop\x04_tcp\x05local\x00\x00\x0c\x00\x01' + b"\xff\xc4\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x05_raop\x04_tcp\x05local\x00\x00\x0c\x00\x01" ) parsed = DNSIncoming(qm_packet) assert parsed.questions[0].unicast is False assert ",QM," in str(parsed.questions[0]) -# 389951 1450.577370 192.168.107.111 224.0.0.251 MDNS 115 Standard query 0x0000 PTR _companion-link._tcp.local, "QU" question OPT +# MDNS 115 Standard query 0x0000 PTR _companion-link._tcp.local, "QU" question OPT def test_qu_packet_parser(): """Test we can parse a query packet with the QU bit.""" qu_packet = ( - b'\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x01\x0f_companion-link\x04_tcp\x05local' - b'\x00\x00\x0c\x80\x01\x00\x00)\x05\xa0\x00\x00\x11\x94\x00\x12\x00\x04\x00\x0e\x00dz{\x8a6\x9czF\x84,\xcaQ\xff' + b"\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x01\x0f_companion-link\x04_tcp\x05local" + b"\x00\x00\x0c\x80\x01\x00\x00)\x05\xa0\x00\x00\x11\x94\x00\x12\x00\x04\x00\x0e\x00dz{\x8a6\x9czF\x84,\xcaQ\xff" ) parsed = DNSIncoming(qu_packet) assert parsed.questions[0].unicast is True @@ -814,12 +814,12 @@ def test_records_same_packet_share_fate(): for i in range(30): out.add_answer_at_time( DNSText( - ("HASS Bridge W9DN %s._hap._tcp.local." % i), + f"HASS Bridge W9DN {i}._hap._tcp.local.", const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, const._DNS_OTHER_TTL, - b'\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1' - b'\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==', + b"\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1" + b"\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==", ), 0, ) @@ -834,19 +834,19 @@ def test_records_same_packet_share_fate(): def test_dns_compression_invalid_skips_bad_name_compress_in_question(): """Test our wire parser can skip bad compression in questions.""" packet = ( - b'\x00\x00\x00\x00\x00\x04\x00\x00\x00\x07\x00\x00\x11homeassistant1128\x05l' - b'ocal\x00\x00\xff\x00\x014homeassistant1128 [534a4794e5ed41879ecf012252d3e02' - b'a]\x0c_workstation\x04_tcp\xc0\x1e\x00\xff\x00\x014homeassistant1127 [534a47' - b'94e5ed41879ecf012252d3e02a]\xc0^\x00\xff\x00\x014homeassistant1123 [534a479' - b'4e5ed41879ecf012252d3e02a]\xc0^\x00\xff\x00\x014homeassistant1118 [534a4794' - b'e5ed41879ecf012252d3e02a]\xc0^\x00\xff\x00\x01\xc0\x0c\x00\x01\x80' - b'\x01\x00\x00\x00x\x00\x04\xc0\xa8<\xc3\xc0v\x00\x10\x80\x01\x00\x00\x00' - b'x\x00\x01\x00\xc0v\x00!\x80\x01\x00\x00\x00x\x00\x1f\x00\x00\x00\x00' - b'\x00\x00\x11homeassistant1127\x05local\x00\xc0\xb1\x00\x10\x80' - b'\x01\x00\x00\x00x\x00\x01\x00\xc0\xb1\x00!\x80\x01\x00\x00\x00x\x00\x1f' - b'\x00\x00\x00\x00\x00\x00\x11homeassistant1123\x05local\x00\xc0)\x00\x10\x80' - b'\x01\x00\x00\x00x\x00\x01\x00\xc0)\x00!\x80\x01\x00\x00\x00x\x00\x1f' - b'\x00\x00\x00\x00\x00\x00\x11homeassistant1128\x05local\x00' + b"\x00\x00\x00\x00\x00\x04\x00\x00\x00\x07\x00\x00\x11homeassistant1128\x05l" + b"ocal\x00\x00\xff\x00\x014homeassistant1128 [534a4794e5ed41879ecf012252d3e02" + b"a]\x0c_workstation\x04_tcp\xc0\x1e\x00\xff\x00\x014homeassistant1127 [534a47" + b"94e5ed41879ecf012252d3e02a]\xc0^\x00\xff\x00\x014homeassistant1123 [534a479" + b"4e5ed41879ecf012252d3e02a]\xc0^\x00\xff\x00\x014homeassistant1118 [534a4794" + b"e5ed41879ecf012252d3e02a]\xc0^\x00\xff\x00\x01\xc0\x0c\x00\x01\x80" + b"\x01\x00\x00\x00x\x00\x04\xc0\xa8<\xc3\xc0v\x00\x10\x80\x01\x00\x00\x00" + b"x\x00\x01\x00\xc0v\x00!\x80\x01\x00\x00\x00x\x00\x1f\x00\x00\x00\x00" + b"\x00\x00\x11homeassistant1127\x05local\x00\xc0\xb1\x00\x10\x80" + b"\x01\x00\x00\x00x\x00\x01\x00\xc0\xb1\x00!\x80\x01\x00\x00\x00x\x00\x1f" + b"\x00\x00\x00\x00\x00\x00\x11homeassistant1123\x05local\x00\xc0)\x00\x10\x80" + b"\x01\x00\x00\x00x\x00\x01\x00\xc0)\x00!\x80\x01\x00\x00\x00x\x00\x1f" + b"\x00\x00\x00\x00\x00\x00\x11homeassistant1128\x05local\x00" ) parsed = r.DNSIncoming(packet) assert len(parsed.questions) == 4 @@ -855,8 +855,8 @@ def test_dns_compression_invalid_skips_bad_name_compress_in_question(): def test_dns_compression_all_invalid(caplog): """Test our wire parser can skip all invalid data.""" packet = ( - b'\x00\x00\x84\x00\x00\x00\x00\x01\x00\x00\x00\x00!roborock-vacuum-s5e_miio416' - b'112328\x00\x00/\x80\x01\x00\x00\x00x\x00\t\xc0P\x00\x05@\x00\x00\x00\x00' + b"\x00\x00\x84\x00\x00\x00\x00\x01\x00\x00\x00\x00!roborock-vacuum-s5e_miio416" + b"112328\x00\x00/\x80\x01\x00\x00\x00x\x00\t\xc0P\x00\x05@\x00\x00\x00\x00" ) parsed = r.DNSIncoming(packet, ("2.4.5.4", 5353)) assert len(parsed.questions) == 0 @@ -871,9 +871,9 @@ def test_invalid_next_name_ignored(): The RFC states it should be ignored when used with mDNS. """ packet = ( - b'\x00\x00\x00\x00\x00\x01\x00\x02\x00\x00\x00\x00\x07Android\x05local\x00\x00' - b'\xff\x00\x01\xc0\x0c\x00/\x00\x01\x00\x00\x00x\x00\x08\xc02\x00\x04@' - b'\x00\x00\x08\xc0\x0c\x00\x01\x00\x01\x00\x00\x00x\x00\x04\xc0\xa8X<' + b"\x00\x00\x00\x00\x00\x01\x00\x02\x00\x00\x00\x00\x07Android\x05local\x00\x00" + b"\xff\x00\x01\xc0\x0c\x00/\x00\x01\x00\x00\x00x\x00\x08\xc02\x00\x04@" + b"\x00\x00\x08\xc0\x0c\x00\x01\x00\x01\x00\x00\x00x\x00\x04\xc0\xa8X<" ) parsed = r.DNSIncoming(packet) assert len(parsed.questions) == 1 @@ -893,11 +893,11 @@ def test_dns_compression_invalid_skips_record(): ) parsed = r.DNSIncoming(packet) answer = r.DNSNsec( - 'eufy HomeBase2-2464._hap._tcp.local.', + "eufy HomeBase2-2464._hap._tcp.local.", const._TYPE_NSEC, const._CLASS_IN | const._CLASS_UNIQUE, const._DNS_OTHER_TTL, - 'eufy HomeBase2-2464._hap._tcp.local.', + "eufy HomeBase2-2464._hap._tcp.local.", [const._TYPE_TXT, const._TYPE_SRV], ) assert answer in parsed.answers() @@ -918,11 +918,11 @@ def test_dns_compression_points_forward(): ) parsed = r.DNSIncoming(packet) answer = r.DNSNsec( - 'TV Beneden (2)._androidtvremote._tcp.local.', + "TV Beneden (2)._androidtvremote._tcp.local.", const._TYPE_NSEC, const._CLASS_IN | const._CLASS_UNIQUE, const._DNS_OTHER_TTL, - 'TV Beneden (2)._androidtvremote._tcp.local.', + "TV Beneden (2)._androidtvremote._tcp.local.", [const._TYPE_TXT, const._TYPE_SRV], ) assert answer in parsed.answers() @@ -942,9 +942,9 @@ def test_dns_compression_points_to_itself(): def test_dns_compression_points_beyond_packet(): """Test our wire parser does not fail when the compression pointer points beyond the packet.""" packet = ( - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x06domain\x05local\x00\x00\x01' - b'\x80\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x05\xe7\x0f\x00\x01\x80\x01\x00\x00' - b'\x00\x01\x00\x04\xc0\xa8\xd0\x06' + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x06domain\x05local\x00\x00\x01" + b"\x80\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x05\xe7\x0f\x00\x01\x80\x01\x00\x00" + b"\x00\x01\x00\x04\xc0\xa8\xd0\x06" ) parsed = r.DNSIncoming(packet) assert len(parsed.answers()) == 1 @@ -953,9 +953,9 @@ def test_dns_compression_points_beyond_packet(): def test_dns_compression_generic_failure(caplog): """Test our wire parser does not loop forever when dns compression is corrupt.""" packet = ( - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x06domain\x05local\x00\x00\x01' - b'\x80\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x05-\x0c\x00\x01\x80\x01\x00\x00' - b'\x00\x01\x00\x04\xc0\xa8\xd0\x06' + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x06domain\x05local\x00\x00\x01" + b"\x80\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x05-\x0c\x00\x01\x80\x01\x00\x00" + b"\x00\x01\x00\x04\xc0\xa8\xd0\x06" ) parsed = r.DNSIncoming(packet, ("1.2.3.4", 5353)) assert len(parsed.answers()) == 1 @@ -965,17 +965,17 @@ def test_dns_compression_generic_failure(caplog): def test_label_length_attack(): """Test our wire parser does not loop forever when the name exceeds 253 chars.""" packet = ( - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x01d\x01d\x01d\x01d\x01d\x01d' - b'\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d' - b'\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d' - b'\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d' - b'\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d' - b'\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d' - b'\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d' - b'\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d' - b'\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x00\x00\x01\x80' - b'\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x05\xc0\x0c\x00\x01\x80\x01\x00\x00\x00' - b'\x01\x00\x04\xc0\xa8\xd0\x06' + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x01d\x01d\x01d\x01d\x01d\x01d" + b"\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d" + b"\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d" + b"\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d" + b"\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d" + b"\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d" + b"\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d" + b"\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d" + b"\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x00\x00\x01\x80" + b"\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x05\xc0\x0c\x00\x01\x80\x01\x00\x00\x00" + b"\x01\x00\x04\xc0\xa8\xd0\x06" ) parsed = r.DNSIncoming(packet) assert len(parsed.answers()) == 0 @@ -984,28 +984,28 @@ def test_label_length_attack(): def test_label_compression_attack(): """Test our wire parser does not loop forever when exceeding the maximum number of labels.""" packet = ( - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x03atk\x00\x00\x01\x80' - b'\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x05\x03atk\x03atk\x03atk\x03atk\x03' - b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03' - b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03' - b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03' - b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03' - b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03' - b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03' - b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03' - b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03' - b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03' - b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03' - b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03' - b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03' - b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03' - b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03' - b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03' - b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03' - b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03' - b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03' - b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\xc0' - b'\x0c\x00\x01\x80\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x06' + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x03atk\x00\x00\x01\x80" + b"\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x05\x03atk\x03atk\x03atk\x03atk\x03" + b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03" + b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03" + b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03" + b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03" + b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03" + b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03" + b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03" + b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03" + b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03" + b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03" + b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03" + b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03" + b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03" + b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03" + b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03" + b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03" + b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03" + b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03" + b"atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\xc0" + b"\x0c\x00\x01\x80\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x06" ) parsed = r.DNSIncoming(packet) assert len(parsed.answers()) == 1 @@ -1014,15 +1014,15 @@ def test_label_compression_attack(): def test_dns_compression_loop_attack(): """Test our wire parser does not loop forever when dns compression is in a loop.""" packet = ( - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x03atk\x03dns\x05loc' - b'al\xc0\x10\x00\x01\x80\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x05\x04a' - b'tk2\x04dns2\xc0\x14\x00\x01\x80\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x05' - b'\x04atk3\xc0\x10\x00\x01\x80\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0' - b'\x05\x04atk4\x04dns5\xc0\x14\x00\x01\x80\x01\x00\x00\x00\x01\x00\x04\xc0' - b'\xa8\xd0\x05\x04atk5\x04dns2\xc0^\x00\x01\x80\x01\x00\x00\x00\x01\x00' - b'\x04\xc0\xa8\xd0\x05\xc0s\x00\x01\x80\x01\x00\x00\x00\x01\x00' - b'\x04\xc0\xa8\xd0\x05\xc0s\x00\x01\x80\x01\x00\x00\x00\x01\x00' - b'\x04\xc0\xa8\xd0\x05' + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x03atk\x03dns\x05loc" + b"al\xc0\x10\x00\x01\x80\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x05\x04a" + b"tk2\x04dns2\xc0\x14\x00\x01\x80\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x05" + b"\x04atk3\xc0\x10\x00\x01\x80\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0" + b"\x05\x04atk4\x04dns5\xc0\x14\x00\x01\x80\x01\x00\x00\x00\x01\x00\x04\xc0" + b"\xa8\xd0\x05\x04atk5\x04dns2\xc0^\x00\x01\x80\x01\x00\x00\x00\x01\x00" + b"\x04\xc0\xa8\xd0\x05\xc0s\x00\x01\x80\x01\x00\x00\x00\x01\x00" + b"\x04\xc0\xa8\xd0\x05\xc0s\x00\x01\x80\x01\x00\x00\x00\x01\x00" + b"\x04\xc0\xa8\xd0\x05" ) parsed = r.DNSIncoming(packet) assert len(parsed.answers()) == 0 @@ -1031,28 +1031,28 @@ def test_dns_compression_loop_attack(): def test_txt_after_invalid_nsec_name_still_usable(): """Test that we can see the txt record after the invalid nsec record.""" packet = ( - b'\x00\x00\x84\x00\x00\x00\x00\x06\x00\x00\x00\x00\x06_sonos\x04_tcp\x05loc' - b'al\x00\x00\x0c\x00\x01\x00\x00\x11\x94\x00\x15\x12Sonos-542A1BC9220E' - b'\xc0\x0c\x12Sonos-542A1BC9220E\xc0\x18\x00/\x80\x01\x00\x00\x00x\x00' - b'\x08\xc1t\x00\x04@\x00\x00\x08\xc0)\x00/\x80\x01\x00\x00\x11\x94\x00' - b'\t\xc0)\x00\x05\x00\x00\x80\x00@\xc0)\x00!\x80\x01\x00\x00\x00x' - b'\x00\x08\x00\x00\x00\x00\x05\xa3\xc0>\xc0>\x00\x01\x80\x01\x00\x00\x00x' - b'\x00\x04\xc0\xa8\x02:\xc0)\x00\x10\x80\x01\x00\x00\x11\x94\x01*2info=/api' - b'/v1/players/RINCON_542A1BC9220E01400/info\x06vers=3\x10protovers=1.24.1\nbo' - b'otseq=11%hhid=Sonos_rYn9K9DLXJe0f3LP9747lbvFvh;mhhid=Sonos_rYn9K9DLXJe0f3LP9' - b'747lbvFvh.Q45RuMaeC07rfXh7OJGm\xc0>\x00\x01\x80\x01\x00\x00\x00x" + b"\x00\x04\xc0\xa8\x02:\xc0)\x00\x10\x80\x01\x00\x00\x11\x94\x01*2info=/api" + b"/v1/players/RINCON_542A1BC9220E01400/info\x06vers=3\x10protovers=1.24.1\nbo" + b"otseq=11%hhid=Sonos_rYn9K9DLXJe0f3LP9747lbvFvh;mhhid=Sonos_rYn9K9DLXJe0f3LP9" + b"747lbvFvh.Q45RuMaeC07rfXh7OJGm None: - nonlocal updates + def update_record(self, zc: Zeroconf, now: float, record: r.DNSRecord) -> None: updates.append(record) listener = LegacyRecordUpdateListener() @@ -65,11 +65,11 @@ def on_service_state_change(zeroconf, service_type, state_change, name): info_service = ServiceInfo( type_, - f'{name}.{type_}', + f"{name}.{type_}", 80, 0, 0, - {'path': '/~paulsm/'}, + {"path": "/~paulsm/"}, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")], ) @@ -80,7 +80,7 @@ def on_service_state_change(zeroconf, service_type, state_change, name): browser.cancel() - assert len(updates) + assert updates assert len([isinstance(update, r.DNSPointer) and update.name == type_ for update in updates]) >= 1 zc.remove_listener(listener) @@ -92,8 +92,8 @@ def on_service_state_change(zeroconf, service_type, state_change, name): def test_record_update_compat(): """Test a RecordUpdate can fetch by index.""" - new = r.DNSPointer('irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 'new') - old = r.DNSPointer('irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 'old') + new = r.DNSPointer("irrelevant", const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, "new") + old = r.DNSPointer("irrelevant", const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, "old") update = RecordUpdate(new, old) assert update[0] == new assert update[1] == old diff --git a/tests/utils/__init__.py b/tests/utils/__init__.py index 2ef4b15b1..584a74eca 100644 --- a/tests/utils/__init__.py +++ b/tests/utils/__init__.py @@ -1,21 +1,23 @@ -""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine - Copyright 2003 Paul Scott-Murphy, 2014 William McBrine +"""Multicast DNS Service Discovery for Python, v0.14-wmcbrine +Copyright 2003 Paul Scott-Murphy, 2014 William McBrine - This module provides a framework for the use of DNS Service Discovery - using IP multicast. +This module provides a framework for the use of DNS Service Discovery +using IP multicast. - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA """ + +from __future__ import annotations diff --git a/tests/utils/test_asyncio.py b/tests/utils/test_asyncio.py index a03855157..7989a82cf 100644 --- a/tests/utils/test_asyncio.py +++ b/tests/utils/test_asyncio.py @@ -1,14 +1,12 @@ -#!/usr/bin/env python - - """Unit tests for zeroconf._utils.asyncio.""" +from __future__ import annotations + import asyncio import concurrent.futures import contextlib import threading import time -from typing import Optional from unittest.mock import patch import pytest @@ -29,7 +27,7 @@ async def test_async_get_all_tasks() -> None: loop = aioutils.get_running_loop() assert loop is not None await aioutils._async_get_all_tasks(loop) - if not hasattr(asyncio, 'all_tasks'): + if not hasattr(asyncio, "all_tasks"): return with patch("zeroconf._utils.asyncio.asyncio.all_tasks", side_effect=RuntimeError): await aioutils._async_get_all_tasks(loop) @@ -47,16 +45,17 @@ def test_get_running_loop_no_loop() -> None: @pytest.mark.asyncio -async def test_wait_event_or_timeout_times_out() -> None: - """Test wait_event_or_timeout will timeout.""" - test_event = asyncio.Event() - await aioutils.wait_event_or_timeout(test_event, 0.1) +async def test_wait_future_or_timeout_times_out() -> None: + """Test wait_future_or_timeout will timeout.""" + loop = asyncio.get_running_loop() + test_future = loop.create_future() + await aioutils.wait_future_or_timeout(test_future, 0.1) - task = asyncio.ensure_future(test_event.wait()) + task = asyncio.ensure_future(test_future) await asyncio.sleep(0.1) async def _async_wait_or_timeout(): - await aioutils.wait_event_or_timeout(test_event, 0.1) + await aioutils.wait_future_or_timeout(test_future, 0.1) # Test high lock contention await asyncio.gather(*[_async_wait_or_timeout() for _ in range(100)]) @@ -123,7 +122,7 @@ def test_cumulative_timeouts_less_than_close_plus_buffer(): async def test_run_coro_with_timeout() -> None: """Test running a coroutine with a timeout raises EventLoopBlocked.""" loop = asyncio.get_event_loop() - task: Optional[asyncio.Task] = None + task: asyncio.Task | None = None async def _saved_sleep_task(): nonlocal task diff --git a/tests/utils/test_ipaddress.py b/tests/utils/test_ipaddress.py index 73c5ab7e2..4379f458b 100644 --- a/tests/utils/test_ipaddress.py +++ b/tests/utils/test_ipaddress.py @@ -1,10 +1,6 @@ -#!/usr/bin/env python - """Unit tests for zeroconf._utils.ipaddress.""" -import sys - -import pytest +from __future__ import annotations from zeroconf import const from zeroconf._dns import DNSAddress @@ -13,61 +9,91 @@ def test_cached_ip_addresses_wrapper(): """Test the cached_ip_addresses_wrapper.""" - assert ipaddress.cached_ip_addresses('') is None - assert ipaddress.cached_ip_addresses('foo') is None + assert ipaddress.cached_ip_addresses("") is None + assert ipaddress.cached_ip_addresses("foo") is None assert ( - str(ipaddress.cached_ip_addresses(b'&\x06(\x00\x02 \x00\x01\x02H\x18\x93%\xc8\x19F')) - == '2606:2800:220:1:248:1893:25c8:1946' + str(ipaddress.cached_ip_addresses(b"&\x06(\x00\x02 \x00\x01\x02H\x18\x93%\xc8\x19F")) + == "2606:2800:220:1:248:1893:25c8:1946" ) - assert ipaddress.cached_ip_addresses('::1') == ipaddress.IPv6Address('::1') + loop_back_ipv6 = ipaddress.cached_ip_addresses("::1") + assert loop_back_ipv6 == ipaddress.IPv6Address("::1") + assert loop_back_ipv6.is_loopback is True + + assert hash(loop_back_ipv6) == hash(ipaddress.IPv6Address("::1")) + + loop_back_ipv4 = ipaddress.cached_ip_addresses("127.0.0.1") + assert loop_back_ipv4 == ipaddress.IPv4Address("127.0.0.1") + assert loop_back_ipv4.is_loopback is True - ipv4 = ipaddress.cached_ip_addresses('169.254.0.0') + assert hash(loop_back_ipv4) == hash(ipaddress.IPv4Address("127.0.0.1")) + + ipv4 = ipaddress.cached_ip_addresses("169.254.0.0") assert ipv4 is not None assert ipv4.is_link_local is True assert ipv4.is_unspecified is False - ipv4 = ipaddress.cached_ip_addresses('0.0.0.0') + ipv4 = ipaddress.cached_ip_addresses("0.0.0.0") assert ipv4 is not None assert ipv4.is_link_local is False assert ipv4.is_unspecified is True - ipv6 = ipaddress.cached_ip_addresses('fe80::1') + ipv6 = ipaddress.cached_ip_addresses("fe80::1") assert ipv6 is not None assert ipv6.is_link_local is True assert ipv6.is_unspecified is False - ipv6 = ipaddress.cached_ip_addresses('0:0:0:0:0:0:0:0') + ipv6 = ipaddress.cached_ip_addresses("0:0:0:0:0:0:0:0") assert ipv6 is not None assert ipv6.is_link_local is False assert ipv6.is_unspecified is True -@pytest.mark.skipif(sys.version_info < (3, 9, 0), reason='scope_id is not supported') def test_get_ip_address_object_from_record(): """Test the get_ip_address_object_from_record.""" # not link local - packed = b'&\x06(\x00\x02 \x00\x01\x02H\x18\x93%\xc8\x19F' + packed = b"&\x06(\x00\x02 \x00\x01\x02H\x18\x93%\xc8\x19F" record = DNSAddress( - 'domain.local', const._TYPE_AAAA, const._CLASS_IN | const._CLASS_UNIQUE, 1, packed, scope_id=3 + "domain.local", + const._TYPE_AAAA, + const._CLASS_IN | const._CLASS_UNIQUE, + 1, + packed, + scope_id=3, ) assert record.scope_id == 3 assert ipaddress.get_ip_address_object_from_record(record) == ipaddress.IPv6Address( - '2606:2800:220:1:248:1893:25c8:1946' + "2606:2800:220:1:248:1893:25c8:1946" ) # link local - packed = b'\xfe\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01' + packed = b"\xfe\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01" record = DNSAddress( - 'domain.local', const._TYPE_AAAA, const._CLASS_IN | const._CLASS_UNIQUE, 1, packed, scope_id=3 + "domain.local", + const._TYPE_AAAA, + const._CLASS_IN | const._CLASS_UNIQUE, + 1, + packed, + scope_id=3, ) assert record.scope_id == 3 - assert ipaddress.get_ip_address_object_from_record(record) == ipaddress.IPv6Address('fe80::1%3') - record = DNSAddress('domain.local', const._TYPE_AAAA, const._CLASS_IN | const._CLASS_UNIQUE, 1, packed) + assert ipaddress.get_ip_address_object_from_record(record) == ipaddress.IPv6Address("fe80::1%3") + record = DNSAddress( + "domain.local", + const._TYPE_AAAA, + const._CLASS_IN | const._CLASS_UNIQUE, + 1, + packed, + ) assert record.scope_id is None - assert ipaddress.get_ip_address_object_from_record(record) == ipaddress.IPv6Address('fe80::1') + assert ipaddress.get_ip_address_object_from_record(record) == ipaddress.IPv6Address("fe80::1") record = DNSAddress( - 'domain.local', const._TYPE_A, const._CLASS_IN | const._CLASS_UNIQUE, 1, packed, scope_id=0 + "domain.local", + const._TYPE_A, + const._CLASS_IN | const._CLASS_UNIQUE, + 1, + packed, + scope_id=0, ) assert record.scope_id == 0 # Ensure scope_id of 0 is not appended to the address - assert ipaddress.get_ip_address_object_from_record(record) == ipaddress.IPv6Address('fe80::1') + assert ipaddress.get_ip_address_object_from_record(record) == ipaddress.IPv6Address("fe80::1") diff --git a/tests/utils/test_name.py b/tests/utils/test_name.py index 9604b7758..1feb77131 100644 --- a/tests/utils/test_name.py +++ b/tests/utils/test_name.py @@ -1,7 +1,7 @@ -#!/usr/bin/env python +"""Unit tests for zeroconf._utils.name.""" +from __future__ import annotations -"""Unit tests for zeroconf._utils.name.""" import socket import pytest @@ -36,12 +36,19 @@ def test_service_type_name_overlong_full_name(): ) def test_service_type_name_non_strict_compliant_names(instance_name, service_type): """Test service_type_name for valid names, but not strict-compliant.""" - desc = {'path': '/~paulsm/'} - service_name = f'{instance_name}.{service_type}' - service_server = 'ash-1.local.' + desc = {"path": "/~paulsm/"} + service_name = f"{instance_name}.{service_type}" + service_server = "ash-1.local." service_address = socket.inet_aton("10.0.1.2") info = ServiceInfo( - service_type, service_name, 22, 0, 0, desc, service_server, addresses=[service_address] + service_type, + service_name, + 22, + 0, + 0, + desc, + service_server, + addresses=[service_address], ) assert info.get_name() == instance_name @@ -56,21 +63,21 @@ def test_service_type_name_non_strict_compliant_names(instance_name, service_typ def test_possible_types(): """Test possible types from name.""" - assert nameutils.possible_types('.') == set() - assert nameutils.possible_types('local.') == set() - assert nameutils.possible_types('_tcp.local.') == set() - assert nameutils.possible_types('_test-srvc-type._tcp.local.') == {'_test-srvc-type._tcp.local.'} - assert nameutils.possible_types('_any._tcp.local.') == {'_any._tcp.local.'} - assert nameutils.possible_types('.._x._tcp.local.') == {'_x._tcp.local.'} - assert nameutils.possible_types('x.y._http._tcp.local.') == {'_http._tcp.local.'} - assert nameutils.possible_types('1.2.3._mqtt._tcp.local.') == {'_mqtt._tcp.local.'} - assert nameutils.possible_types('x.sub._http._tcp.local.') == {'_http._tcp.local.'} - assert nameutils.possible_types('6d86f882b90facee9170ad3439d72a4d6ee9f511._zget._http._tcp.local.') == { - '_http._tcp.local.', - '_zget._http._tcp.local.', + assert nameutils.possible_types(".") == set() + assert nameutils.possible_types("local.") == set() + assert nameutils.possible_types("_tcp.local.") == set() + assert nameutils.possible_types("_test-srvc-type._tcp.local.") == {"_test-srvc-type._tcp.local."} + assert nameutils.possible_types("_any._tcp.local.") == {"_any._tcp.local."} + assert nameutils.possible_types(".._x._tcp.local.") == {"_x._tcp.local."} + assert nameutils.possible_types("x.y._http._tcp.local.") == {"_http._tcp.local."} + assert nameutils.possible_types("1.2.3._mqtt._tcp.local.") == {"_mqtt._tcp.local."} + assert nameutils.possible_types("x.sub._http._tcp.local.") == {"_http._tcp.local."} + assert nameutils.possible_types("6d86f882b90facee9170ad3439d72a4d6ee9f511._zget._http._tcp.local.") == { + "_http._tcp.local.", + "_zget._http._tcp.local.", } - assert nameutils.possible_types('my._printer._sub._http._tcp.local.') == { - '_http._tcp.local.', - '_sub._http._tcp.local.', - '_printer._sub._http._tcp.local.', + assert nameutils.possible_types("my._printer._sub._http._tcp.local.") == { + "_http._tcp.local.", + "_sub._http._tcp.local.", + "_printer._sub._http._tcp.local.", } diff --git a/tests/utils/test_net.py b/tests/utils/test_net.py index 29844d575..7de106618 100644 --- a/tests/utils/test_net.py +++ b/tests/utils/test_net.py @@ -1,27 +1,30 @@ -#!/usr/bin/env python +"""Unit tests for zeroconf._utils.net.""" +from __future__ import annotations -"""Unit tests for zeroconf._utils.net.""" import errno import socket +import sys import unittest -from unittest.mock import MagicMock, Mock, patch +import warnings +from unittest.mock import MagicMock, Mock, call, patch import ifaddr import pytest import zeroconf as r +from zeroconf import get_all_addresses, get_all_addresses_v6 from zeroconf._utils import net as netutils def _generate_mock_adapters(): mock_lo0 = Mock(spec=ifaddr.Adapter) mock_lo0.nice_name = "lo0" - mock_lo0.ips = [ifaddr.IP("127.0.0.1", 8, "lo0")] + mock_lo0.ips = [ifaddr.IP("127.0.0.1", 8, "lo0"), ifaddr.IP(("::1", 0, 0), 128, "lo")] mock_lo0.index = 0 mock_eth0 = Mock(spec=ifaddr.Adapter) mock_eth0.nice_name = "eth0" - mock_eth0.ips = [ifaddr.IP(("2001:db8::", 1, 1), 8, "eth0")] + mock_eth0.ips = [ifaddr.IP(("2001:db8::", 1, 1), 8, "eth0"), ifaddr.IP(("fd00:db8::", 1, 1), 8, "eth0")] mock_eth0.index = 1 mock_eth1 = Mock(spec=ifaddr.Adapter) mock_eth1.nice_name = "eth1" @@ -34,11 +37,51 @@ def _generate_mock_adapters(): return [mock_eth0, mock_lo0, mock_eth1, mock_vtun0] +def test_get_all_addresses() -> None: + """Test public get_all_addresses API.""" + with ( + patch( + "zeroconf._utils.net.ifaddr.get_adapters", + return_value=_generate_mock_adapters(), + ), + warnings.catch_warnings(record=True) as warned, + ): + addresses = get_all_addresses() + assert isinstance(addresses, list) + assert len(addresses) == 3 + assert len(warned) == 1 + first_warning = warned[0] + assert "get_all_addresses is deprecated" in str(first_warning.message) + + +def test_get_all_addresses_v6() -> None: + """Test public get_all_addresses_v6 API.""" + with ( + patch( + "zeroconf._utils.net.ifaddr.get_adapters", + return_value=_generate_mock_adapters(), + ), + warnings.catch_warnings(record=True) as warned, + ): + addresses = get_all_addresses_v6() + assert isinstance(addresses, list) + assert len(addresses) == 3 + assert len(warned) == 1 + first_warning = warned[0] + assert "get_all_addresses_v6 is deprecated" in str(first_warning.message) + + def test_ip6_to_address_and_index(): """Test we can extract from mocked adapters.""" adapters = _generate_mock_adapters() - assert netutils.ip6_to_address_and_index(adapters, "2001:db8::") == (('2001:db8::', 1, 1), 1) - assert netutils.ip6_to_address_and_index(adapters, "2001:db8::%1") == (('2001:db8::', 1, 1), 1) + assert netutils.ip6_to_address_and_index(adapters, "2001:db8::") == ( + ("2001:db8::", 1, 1), + 1, + ) + assert netutils.ip6_to_address_and_index(adapters, "2001:db8::%1") == ( + ("2001:db8::", 1, 1), + 1, + ) with pytest.raises(RuntimeError): assert netutils.ip6_to_address_and_index(adapters, "2005:db8::") @@ -46,7 +89,7 @@ def test_ip6_to_address_and_index(): def test_interface_index_to_ip6_address(): """Test we can extract from mocked adapters.""" adapters = _generate_mock_adapters() - assert netutils.interface_index_to_ip6_address(adapters, 1) == ('2001:db8::', 1, 1) + assert netutils.interface_index_to_ip6_address(adapters, 1) == ("2001:db8::", 1, 1) # call with invalid adapter with pytest.raises(RuntimeError): @@ -60,19 +103,27 @@ def test_interface_index_to_ip6_address(): def test_ip6_addresses_to_indexes(): """Test we can extract from mocked adapters.""" interfaces = [1] - with patch("zeroconf._utils.net.ifaddr.get_adapters", return_value=_generate_mock_adapters()): - assert netutils.ip6_addresses_to_indexes(interfaces) == [(('2001:db8::', 1, 1), 1)] + with patch( + "zeroconf._utils.net.ifaddr.get_adapters", + return_value=_generate_mock_adapters(), + ): + assert netutils.ip6_addresses_to_indexes(interfaces) == [(("2001:db8::", 1, 1), 1)] - interfaces_2 = ['2001:db8::'] - with patch("zeroconf._utils.net.ifaddr.get_adapters", return_value=_generate_mock_adapters()): - assert netutils.ip6_addresses_to_indexes(interfaces_2) == [(('2001:db8::', 1, 1), 1)] + interfaces_2 = ["2001:db8::"] + with patch( + "zeroconf._utils.net.ifaddr.get_adapters", + return_value=_generate_mock_adapters(), + ): + assert netutils.ip6_addresses_to_indexes(interfaces_2) == [(("2001:db8::", 1, 1), 1)] def test_normalize_interface_choice_errors(): """Test we generate exception on invalid input.""" - with patch("zeroconf._utils.net.get_all_addresses", return_value=[]), patch( - "zeroconf._utils.net.get_all_addresses_v6", return_value=[] - ), pytest.raises(RuntimeError): + with ( + patch("zeroconf._utils.net.get_all_addresses_ipv4", return_value=[]), + patch("zeroconf._utils.net.get_all_addresses_ipv6", return_value=[]), + pytest.raises(RuntimeError), + ): netutils.normalize_interface_choice(r.InterfaceChoice.All) with pytest.raises(TypeError): @@ -81,7 +132,12 @@ def test_normalize_interface_choice_errors(): @pytest.mark.parametrize( "errno,expected_result", - [(errno.EADDRINUSE, False), (errno.EADDRNOTAVAIL, False), (errno.EINVAL, False), (0, True)], + [ + (errno.EADDRINUSE, False), + (errno.EADDRNOTAVAIL, False), + (errno.EINVAL, False), + (0, True), + ], ) def test_add_multicast_member_socket_errors(errno, expected_result): """Test we handle socket errors when adding multicast members.""" @@ -107,96 +163,128 @@ def test_disable_ipv6_only_or_raise(): errors_logged = [] def _log_error(*args): - nonlocal errors_logged errors_logged.append(args) - sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - with pytest.raises(OSError), patch.object(netutils.log, "error", _log_error), patch( - "socket.socket.setsockopt", side_effect=OSError + with ( + socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock, + pytest.raises(OSError), + patch.object(netutils.log, "error", _log_error), + patch("socket.socket.setsockopt", side_effect=OSError), ): netutils.disable_ipv6_only_or_raise(sock) assert ( errors_logged[0][0] - == 'Support for dual V4-V6 sockets is not present, use IPVersion.V4 or IPVersion.V6' + == "Support for dual V4-V6 sockets is not present, use IPVersion.V4 or IPVersion.V6" ) -@pytest.mark.skipif(not hasattr(socket, 'SO_REUSEPORT'), reason="System does not have SO_REUSEPORT") +@pytest.mark.skipif(not hasattr(socket, "SO_REUSEPORT"), reason="System does not have SO_REUSEPORT") def test_set_so_reuseport_if_available_is_present(): """Test that setting socket.SO_REUSEPORT only OSError errno.ENOPROTOOPT is trapped.""" - sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - with pytest.raises(OSError), patch("socket.socket.setsockopt", side_effect=OSError): - netutils.set_so_reuseport_if_available(sock) + with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock: + with pytest.raises(OSError), patch("socket.socket.setsockopt", side_effect=OSError): + netutils.set_so_reuseport_if_available(sock) - with patch("socket.socket.setsockopt", side_effect=OSError(errno.ENOPROTOOPT, None)): - netutils.set_so_reuseport_if_available(sock) + with patch("socket.socket.setsockopt", side_effect=OSError(errno.ENOPROTOOPT, None)): + netutils.set_so_reuseport_if_available(sock) -@pytest.mark.skipif(hasattr(socket, 'SO_REUSEPORT'), reason="System has SO_REUSEPORT") +@pytest.mark.skipif(hasattr(socket, "SO_REUSEPORT"), reason="System has SO_REUSEPORT") def test_set_so_reuseport_if_available_not_present(): """Test that we do not try to set SO_REUSEPORT if it is not present.""" - sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - with patch("socket.socket.setsockopt", side_effect=OSError): + with ( + socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock, + patch("socket.socket.setsockopt", side_effect=OSError), + ): netutils.set_so_reuseport_if_available(sock) -def test_set_mdns_port_socket_options_for_ip_version(): - """Test OSError with errno with EINVAL and bind address '' from setsockopt IP_MULTICAST_TTL does not raise.""" - sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) +def test_set_respond_socket_multicast_options(): + """Test OSError with errno with EINVAL and bind address ''. - # Should raise on EPERM always - with pytest.raises(OSError), patch("socket.socket.setsockopt", side_effect=OSError(errno.EPERM, None)): - netutils.set_mdns_port_socket_options_for_ip_version(sock, ('',), r.IPVersion.V4Only) - - # Should raise on EINVAL always when bind address is not '' - with pytest.raises(OSError), patch("socket.socket.setsockopt", side_effect=OSError(errno.EINVAL, None)): - netutils.set_mdns_port_socket_options_for_ip_version(sock, ('127.0.0.1',), r.IPVersion.V4Only) - - # Should not raise on EINVAL when bind address is '' - with patch("socket.socket.setsockopt", side_effect=OSError(errno.EINVAL, None)): - netutils.set_mdns_port_socket_options_for_ip_version(sock, ('',), r.IPVersion.V4Only) - - -def test_add_multicast_member(): - sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - interface = '127.0.0.1' - - # EPERM should always raise - with pytest.raises(OSError), patch("socket.socket.setsockopt", side_effect=OSError(errno.EPERM, None)): - netutils.add_multicast_member(sock, interface) - - # EADDRINUSE should return False - with patch("socket.socket.setsockopt", side_effect=OSError(errno.EADDRINUSE, None)): - assert netutils.add_multicast_member(sock, interface) is False - - # EADDRNOTAVAIL should return False - with patch("socket.socket.setsockopt", side_effect=OSError(errno.EADDRNOTAVAIL, None)): - assert netutils.add_multicast_member(sock, interface) is False - - # EINVAL should return False - with patch("socket.socket.setsockopt", side_effect=OSError(errno.EINVAL, None)): - assert netutils.add_multicast_member(sock, interface) is False - - # ENOPROTOOPT should return False - with patch("socket.socket.setsockopt", side_effect=OSError(errno.ENOPROTOOPT, None)): - assert netutils.add_multicast_member(sock, interface) is False - - # ENODEV should raise for ipv4 - with pytest.raises(OSError), patch("socket.socket.setsockopt", side_effect=OSError(errno.ENODEV, None)): - netutils.add_multicast_member(sock, interface) is False - - # ENODEV should return False for ipv6 - with patch("socket.socket.setsockopt", side_effect=OSError(errno.ENODEV, None)): - assert netutils.add_multicast_member(sock, ('2001:db8::', 1, 1)) is False # type: ignore[arg-type] - - # No IPv6 support should return False for IPv6 - with patch("socket.inet_pton", side_effect=OSError()): - assert netutils.add_multicast_member(sock, ('2001:db8::', 1, 1)) is False # type: ignore[arg-type] + from setsockopt IP_MULTICAST_TTL does not raise.""" + # Should raise on EINVAL always + with ( + socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock, + pytest.raises(OSError), + patch("socket.socket.setsockopt", side_effect=OSError(errno.EINVAL, None)), + ): + netutils.set_respond_socket_multicast_options(sock, r.IPVersion.V4Only) - # No error should return True - with patch("socket.socket.setsockopt"): - assert netutils.add_multicast_member(sock, interface) is True + with pytest.raises(RuntimeError): + netutils.set_respond_socket_multicast_options(sock, r.IPVersion.All) + + +def test_add_multicast_member(caplog: pytest.LogCaptureFixture) -> None: + with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock: + interface = "127.0.0.1" + + # EPERM should always raise + with ( + pytest.raises(OSError), + patch("socket.socket.setsockopt", side_effect=OSError(errno.EPERM, None)), + ): + netutils.add_multicast_member(sock, interface) + + # EADDRINUSE should return False + with patch("socket.socket.setsockopt", side_effect=OSError(errno.EADDRINUSE, None)): + assert netutils.add_multicast_member(sock, interface) is False + + # EADDRNOTAVAIL should return False + with patch("socket.socket.setsockopt", side_effect=OSError(errno.EADDRNOTAVAIL, None)): + assert netutils.add_multicast_member(sock, interface) is False + + # EINVAL should return False + with patch("socket.socket.setsockopt", side_effect=OSError(errno.EINVAL, None)): + assert netutils.add_multicast_member(sock, interface) is False + + # ENOPROTOOPT should return False + with patch("socket.socket.setsockopt", side_effect=OSError(errno.ENOPROTOOPT, None)): + assert netutils.add_multicast_member(sock, interface) is False + + # ENODEV should raise for ipv4 + with ( + pytest.raises(OSError), + patch("socket.socket.setsockopt", side_effect=OSError(errno.ENODEV, None)), + ): + assert netutils.add_multicast_member(sock, interface) is False + + # ENODEV should return False for ipv6 + with patch("socket.socket.setsockopt", side_effect=OSError(errno.ENODEV, None)): + assert netutils.add_multicast_member(sock, ("2001:db8::", 1, 1)) is False # type: ignore[arg-type] + + # No IPv6 support should return False for IPv6 + with patch("socket.inet_pton", side_effect=OSError()): + assert netutils.add_multicast_member(sock, ("2001:db8::", 1, 1)) is False # type: ignore[arg-type] + + # No error should return True + with patch("socket.socket.setsockopt"): + assert netutils.add_multicast_member(sock, interface) is True + + # Ran out of IGMP memberships is forgiving and logs about igmp_max_memberships on linux + caplog.clear() + with ( + patch.object(sys, "platform", "linux"), + patch( + "socket.socket.setsockopt", side_effect=OSError(errno.ENOBUFS, "No buffer space available") + ), + ): + assert netutils.add_multicast_member(sock, interface) is False + assert "No buffer space available" in caplog.text + assert "net.ipv4.igmp_max_memberships" in caplog.text + + # Ran out of IGMP memberships is forgiving and logs + caplog.clear() + with ( + patch.object(sys, "platform", "darwin"), + patch( + "socket.socket.setsockopt", side_effect=OSError(errno.ENOBUFS, "No buffer space available") + ), + ): + assert netutils.add_multicast_member(sock, interface) is False + assert "No buffer space available" in caplog.text + assert "net.ipv4.igmp_max_memberships" not in caplog.text def test_bind_raises_skips_address(): @@ -216,7 +304,132 @@ def _mock_socket(*args, **kwargs): netutils.new_socket(("0.0.0.0", 0)) # type: ignore[arg-type] +def test_bind_raises_address_in_use(caplog: pytest.LogCaptureFixture) -> None: + """Test bind failing in new_socket returns None on EADDRINUSE.""" + + def _mock_socket(*args, **kwargs): + sock = MagicMock() + sock.bind = MagicMock(side_effect=OSError(errno.EADDRINUSE, f"Error: {errno.EADDRINUSE}")) + return sock + + with ( + pytest.raises(OSError), + patch.object(sys, "platform", "darwin"), + patch("socket.socket", _mock_socket), + ): + netutils.new_socket(("0.0.0.0", 0)) # type: ignore[arg-type] + assert ( + "On BSD based systems sharing the same port with " + "another stack may require processes to run with the same UID" + ) in caplog.text + assert ( + "When using avahi, make sure disallow-other-stacks is set to no in avahi-daemon.conf" in caplog.text + ) + + caplog.clear() + with pytest.raises(OSError), patch.object(sys, "platform", "linux"), patch("socket.socket", _mock_socket): + netutils.new_socket(("0.0.0.0", 0)) # type: ignore[arg-type] + assert ( + "On BSD based systems sharing the same port with " + "another stack may require processes to run with the same UID" + ) not in caplog.text + assert ( + "When using avahi, make sure disallow-other-stacks is set to no in avahi-daemon.conf" in caplog.text + ) + + def test_new_respond_socket_new_socket_returns_none(): """Test new_respond_socket returns None if new_socket returns None.""" with patch.object(netutils, "new_socket", return_value=None): assert netutils.new_respond_socket(("0.0.0.0", 0)) is None # type: ignore[arg-type] + + +def test_create_sockets_interfaces_all_unicast(): + """Test create_sockets with unicast.""" + + with ( + patch("zeroconf._utils.net.new_socket") as mock_new_socket, + patch( + "zeroconf._utils.net.ifaddr.get_adapters", + return_value=_generate_mock_adapters(), + ), + ): + mock_socket = Mock(spec=socket.socket) + mock_new_socket.return_value = mock_socket + + listen_socket, respond_sockets = r.create_sockets( + interfaces=r.InterfaceChoice.All, unicast=True, ip_version=r.IPVersion.All + ) + + assert listen_socket is None + mock_new_socket.assert_any_call( + port=0, + ip_version=r.IPVersion.V6Only, + apple_p2p=False, + bind_addr=("2001:db8::", 1, 1), + ) + mock_new_socket.assert_any_call( + port=0, + ip_version=r.IPVersion.V4Only, + apple_p2p=False, + bind_addr=("192.168.1.5",), + ) + + +def test_create_sockets_interfaces_all() -> None: + """Test create_sockets with all interfaces. + + Tests if a responder socket is created for every successful multicast + join. + """ + adapters = _generate_mock_adapters() + + # Additional IPv6 addresses usually fail to add membership + failure_interface = ("fd00:db8::", 1, 1) + + expected_calls = [] + for adapter in adapters: + for ip in adapter.ips: + if ip.ip == failure_interface: + continue + + if ip.is_IPv4: + bind_addr = (ip.ip,) + ip_version = r.IPVersion.V4Only + else: + bind_addr = ip.ip + ip_version = r.IPVersion.V6Only + + expected_calls.append( + call( + port=5353, + ip_version=ip_version, + apple_p2p=False, + bind_addr=bind_addr, + ) + ) + + def _patched_add_multicast_member(sock, interface): + return interface[0] != failure_interface + + with ( + patch("zeroconf._utils.net.new_socket") as mock_new_socket, + patch( + "zeroconf._utils.net.ifaddr.get_adapters", + return_value=adapters, + ), + patch("zeroconf._utils.net.add_multicast_member", side_effect=_patched_add_multicast_member), + ): + mock_socket = Mock(spec=socket.socket) + mock_new_socket.return_value = mock_socket + + r.create_sockets(interfaces=r.InterfaceChoice.All, ip_version=r.IPVersion.All) + + def call_to_tuple(c): + return (c.args, tuple(sorted(c.kwargs.items()))) + + # Exclude first new_socket call as this is the listen socket + actual_calls_set = {call_to_tuple(c) for c in mock_new_socket.call_args_list[1:]} + expected_calls_set = {call_to_tuple(c) for c in expected_calls} + + assert actual_calls_set == expected_calls_set