diff --git a/.appveyor.yml b/.appveyor.yml
deleted file mode 100644
index 1ebc291..0000000
--- a/.appveyor.yml
+++ /dev/null
@@ -1,28 +0,0 @@
-skip_tags: true
-
-os: Visual Studio 2015
-
-environment:
- matrix:
- - PYTHON: "C:\\Python35"
- - PYTHON: "C:\\Python35-x64"
- - PYTHON: "C:\\Python36"
- - PYTHON: "C:\\Python36-x64"
-
-build_script:
- - "git --no-pager log -n2"
- - "echo %APPVEYOR_REPO_COMMIT%"
- - "SET PATH=%PYTHON%;%PYTHON%\\Scripts;;%PATH%"
- - "python --version"
- - "python -c \"import struct; print(struct.calcsize('P') * 8)\""
- - "pip install ."
- - "pip install -Ur test-requirements.txt"
- - "pip install codecov"
-
-test_script:
- - "mkdir empty"
- - "cd empty"
- # Make sure it's being imported from where we expect
- - "python -c \"import os, pytest_trio; print(os.path.dirname(pytest_trio.__file__))\""
- - "python -u -m coverage run --rcfile=../.coveragerc -m pytest -W error -ra -v -s --pyargs pytest_trio"
- - "codecov"
diff --git a/.coveragerc b/.coveragerc
deleted file mode 100644
index 5687b73..0000000
--- a/.coveragerc
+++ /dev/null
@@ -1,9 +0,0 @@
-[run]
-branch=True
-source=pytest_trio
-
-[report]
-precision = 1
-exclude_lines =
- pragma: no cover
- abc.abstractmethod
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 0000000..e3cbb9d
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,89 @@
+name: CI
+
+on:
+ push:
+ branches-ignore:
+ - "dependabot/**"
+ pull_request:
+
+jobs:
+ Windows:
+ name: 'Windows (${{ matrix.python }})'
+ runs-on: 'windows-latest'
+ strategy:
+ fail-fast: false
+ matrix:
+ python: ['3.8', '3.9', '3.10', '3.11', '3.12']
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ - name: Setup python
+ uses: actions/setup-python@v2
+ with:
+ python-version: '${{ matrix.python }}'
+ - name: Run tests
+ run: ./ci.sh
+ shell: bash
+ env:
+ # Should match 'name:' up above
+ JOB_NAME: 'Windows (${{ matrix.python }})'
+
+ Ubuntu:
+ name: 'Ubuntu (${{ matrix.python }}${{ matrix.extra_name }})'
+ timeout-minutes: 10
+ runs-on: 'ubuntu-latest'
+ strategy:
+ fail-fast: false
+ matrix:
+ python: ['pypy-3.8', '3.8', 'pypy-3.9', '3.9', 'pypy-3.10', '3.10', '3.11', '3.12']
+ check_formatting: ['0']
+ check_docs: ['0']
+ extra_name: ['']
+ include:
+ - python: '3.9'
+ check_formatting: '1'
+ extra_name: ', check formatting'
+ - python: '3.9'
+ check_docs: '1'
+ extra_name: ', check docs'
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ - name: Setup python
+ uses: actions/setup-python@v2
+ if: "!endsWith(matrix.python, '-dev')"
+ with:
+ python-version: '${{ matrix.python }}'
+ - name: Setup python (dev)
+ uses: deadsnakes/action@v2.0.2
+ if: endsWith(matrix.python, '-dev')
+ with:
+ python-version: '${{ matrix.python }}'
+ - name: Run tests
+ run: ./ci.sh
+ env:
+ CHECK_FORMATTING: '${{ matrix.check_formatting }}'
+ CHECK_DOCS: '${{ matrix.check_docs }}'
+ # Should match 'name:' up above
+ JOB_NAME: 'Ubuntu (${{ matrix.python }}${{ matrix.extra_name }})'
+
+ macOS:
+ name: 'macOS (${{ matrix.python }})'
+ timeout-minutes: 10
+ runs-on: 'macos-latest'
+ strategy:
+ fail-fast: false
+ matrix:
+ python: ['3.8', '3.9', '3.10', '3.11', '3.12']
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ - name: Setup python
+ uses: actions/setup-python@v2
+ with:
+ python-version: '${{ matrix.python }}'
+ - name: Run tests
+ run: ./ci.sh
+ env:
+ # Should match 'name:' up above
+ JOB_NAME: 'macOS (${{ matrix.python }})'
diff --git a/.gitignore b/.gitignore
index afdfef3..79d6635 100644
--- a/.gitignore
+++ b/.gitignore
@@ -40,6 +40,8 @@ htmlcov/
.cache
nosetests.xml
coverage.xml
+.hypothesis/
+.pytest_cache/
# Translations
*.mo
diff --git a/.readthedocs.yml b/.readthedocs.yml
index da6abdf..8ca3e92 100644
--- a/.readthedocs.yml
+++ b/.readthedocs.yml
@@ -1,10 +1,18 @@
-# https://docs.readthedocs.io/en/latest/yaml-config.html
+# https://docs.readthedocs.io/en/latest/config-file/index.html
+version: 2
+
+build:
+ os: ubuntu-22.04
+ tools:
+ python: "3.9"
+
formats:
- htmlzip
- epub
-requirements_file: ci/rtd-requirements.txt
-
python:
- version: 3
- pip_install: True
+ install:
+ - requirements: docs-requirements.txt
+
+sphinx:
+ fail_on_warning: true
diff --git a/.style.yapf b/.style.yapf
deleted file mode 100644
index 2c71c9d..0000000
--- a/.style.yapf
+++ /dev/null
@@ -1,180 +0,0 @@
-[style]
-# Align closing bracket with visual indentation.
-align_closing_bracket_with_visual_indent=True
-
-# Allow dictionary keys to exist on multiple lines. For example:
-#
-# x = {
-# ('this is the first element of a tuple',
-# 'this is the second element of a tuple'):
-# value,
-# }
-allow_multiline_dictionary_keys=False
-
-# Allow lambdas to be formatted on more than one line.
-allow_multiline_lambdas=False
-
-# Insert a blank line before a class-level docstring.
-blank_line_before_class_docstring=False
-
-# Insert a blank line before a 'def' or 'class' immediately nested
-# within another 'def' or 'class'. For example:
-#
-# class Foo:
-# # <------ this blank line
-# def method():
-# ...
-blank_line_before_nested_class_or_def=False
-
-# Do not split consecutive brackets. Only relevant when
-# dedent_closing_brackets is set. For example:
-#
-# call_func_that_takes_a_dict(
-# {
-# 'key1': 'value1',
-# 'key2': 'value2',
-# }
-# )
-#
-# would reformat to:
-#
-# call_func_that_takes_a_dict({
-# 'key1': 'value1',
-# 'key2': 'value2',
-# })
-coalesce_brackets=False
-
-# The column limit.
-column_limit=79
-
-# Indent width used for line continuations.
-continuation_indent_width=4
-
-# Put closing brackets on a separate line, dedented, if the bracketed
-# expression can't fit in a single line. Applies to all kinds of brackets,
-# including function definitions and calls. For example:
-#
-# config = {
-# 'key1': 'value1',
-# 'key2': 'value2',
-# } # <--- this bracket is dedented and on a separate line
-#
-# time_series = self.remote_client.query_entity_counters(
-# entity='dev3246.region1',
-# key='dns.query_latency_tcp',
-# transform=Transformation.AVERAGE(window=timedelta(seconds=60)),
-# start_ts=now()-timedelta(days=3),
-# end_ts=now(),
-# ) # <--- this bracket is dedented and on a separate line
-dedent_closing_brackets=True
-
-# Place each dictionary entry onto its own line.
-each_dict_entry_on_separate_line=True
-
-# The regex for an i18n comment. The presence of this comment stops
-# reformatting of that line, because the comments are required to be
-# next to the string they translate.
-i18n_comment=
-
-# The i18n function call names. The presence of this function stops
-# reformattting on that line, because the string it has cannot be moved
-# away from the i18n comment.
-i18n_function_call=
-
-# Indent the dictionary value if it cannot fit on the same line as the
-# dictionary key. For example:
-#
-# config = {
-# 'key1':
-# 'value1',
-# 'key2': value1 +
-# value2,
-# }
-indent_dictionary_value=True
-
-# The number of columns to use for indentation.
-indent_width=4
-
-# Join short lines into one line. E.g., single line 'if' statements.
-join_multiple_lines=False
-
-# Use spaces around default or named assigns.
-spaces_around_default_or_named_assign=False
-
-# Use spaces around the power operator.
-spaces_around_power_operator=False
-
-# The number of spaces required before a trailing comment.
-spaces_before_comment=2
-
-# Insert a space between the ending comma and closing bracket of a list,
-# etc.
-space_between_ending_comma_and_closing_bracket=False
-
-# Split before arguments if the argument list is terminated by a
-# comma.
-split_arguments_when_comma_terminated=True
-
-# Set to True to prefer splitting before '&', '|' or '^' rather than
-# after.
-split_before_bitwise_operator=True
-
-# Split before a dictionary or set generator (comp_for). For example, note
-# the split before the 'for':
-#
-# foo = {
-# variable: 'Hello world, have a nice day!'
-# for variable in bar if variable != 42
-# }
-split_before_dict_set_generator=True
-
-# If an argument / parameter list is going to be split, then split before
-# the first argument.
-split_before_first_argument=True
-
-# Set to True to prefer splitting before 'and' or 'or' rather than
-# after.
-split_before_logical_operator=True
-
-# Split named assignments onto individual lines.
-split_before_named_assigns=True
-
-# The penalty for splitting right after the opening bracket.
-split_penalty_after_opening_bracket=30
-
-# The penalty for splitting the line after a unary operator.
-split_penalty_after_unary_operator=10000
-
-# The penalty for splitting right before an if expression.
-split_penalty_before_if_expr=0
-
-# The penalty of splitting the line around the '&', '|', and '^'
-# operators.
-split_penalty_bitwise_operator=300
-
-# The penalty for characters over the column limit.
-split_penalty_excess_character=4500
-
-# The penalty incurred by adding a line split to the unwrapped line. The
-# more line splits added the higher the penalty.
-split_penalty_for_added_line_split=30
-
-# The penalty of splitting a list of "import as" names. For example:
-#
-# from a_very_long_or_indented_module_name_yada_yad import (long_argument_1,
-# long_argument_2,
-# long_argument_3)
-#
-# would reformat to something like:
-#
-# from a_very_long_or_indented_module_name_yada_yad import (
-# long_argument_1, long_argument_2, long_argument_3)
-split_penalty_import_names=0
-
-# The penalty of splitting the line around the 'and' and 'or'
-# operators.
-split_penalty_logical_operator=0
-
-# Use the Tab character for indentation.
-use_tabs=False
-
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 271a7c9..0000000
--- a/.travis.yml
+++ /dev/null
@@ -1,31 +0,0 @@
-language: python
-python:
-- 3.5.0
-- 3.5.2
-- 3.5-dev
-- 3.6
-- 3.6-dev
-- 3.7-dev
-sudo: false
-dist: trusty
-matrix:
- include:
- - os: linux
- language: generic
- env: USE_PYPY_RELEASE_VERSION=5.9-beta
- - os: osx
- language: generic
- env: MACPYTHON=3.5.4
- - os: osx
- language: generic
- env: MACPYTHON=3.6.3
- - os: linux
- language: python
- python: 3.6
- env: CHECK_DOCS=1
- - os: linux
- language: python
- python: 3.6
- env: CHECK_FORMATTING=1
-script:
-- ci/travis.sh
diff --git a/CHEATSHEET.rst b/CHEATSHEET.rst
index 49a8b8d..aeacf0a 100644
--- a/CHEATSHEET.rst
+++ b/CHEATSHEET.rst
@@ -9,15 +9,7 @@ To run tests
* Actually run the tests: ``pytest pytest_trio``
-
-To run yapf
------------
-
-* Show what changes yapf wants to make: ``yapf -rpd setup.py
- pytest_trio``
-
-* Apply all changes directly to the source tree: ``yapf -rpi setup.py
- pytest_trio``
+* Format the code with ``black .``
To make a release
diff --git a/MANIFEST.in b/MANIFEST.in
index 4edd4b1..afc90e2 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,5 +1,5 @@
include README.rst CHEATSHEET.rst LICENSE* CODE_OF_CONDUCT* CONTRIBUTING*
-include .coveragerc .style.yapf
+include .coveragerc
include test-requirements.txt
recursive-include docs *
prune docs/build
diff --git a/README.rst b/README.rst
index fe1b031..0d3b1a3 100644
--- a/README.rst
+++ b/README.rst
@@ -1,17 +1,45 @@
pytest-trio
===========
-.. image:: https://travis-ci.org/python-trio/pytest-trio.svg?branch=master
- :target: https://travis-ci.org/python-trio/pytest-trio
+.. image:: https://img.shields.io/badge/chat-join%20now-blue.svg
+ :target: https://gitter.im/python-trio/general
+ :alt: Join chatroom
-.. image:: https://ci.appveyor.com/api/projects/status/aq0pklx7hanx031x?svg=true
- :target: https://ci.appveyor.com/project/touilleMan/pytest-trio
+.. image:: https://img.shields.io/badge/docs-read%20now-blue.svg
+ :target: https://pytest-trio.readthedocs.io/en/latest/?badge=latest
+ :alt: Documentation Status
+
+.. image:: https://img.shields.io/pypi/v/pytest-trio.svg
+ :target: https://pypi.org/project/pytest-trio
+ :alt: Latest PyPi version
+
+.. image:: https://github.com/python-trio/pytest-trio/workflows/CI/badge.svg?branch=master
+ :target: https://github.com/python-trio/pytest-trio/actions
+ :alt: Automated test status
.. image:: https://codecov.io/gh/python-trio/pytest-trio/branch/master/graph/badge.svg
- :target: https://codecov.io/gh/python-trio/pytest-trio
+ :target: https://codecov.io/gh/python-trio/pytest-trio
+ :alt: Test coverage
+
+This is a pytest plugin to help you test projects that use `Trio
+`__, a friendly library for concurrency
+and async I/O in Python. For an overview of features, see our `manual
+`__, or jump straight to the
+`quickstart guide
+`__.
+
+
+Vital statistics
+----------------
+
+**Documentation:** https://pytest-trio.readthedocs.io
-Welcome to `pytest-trio `__!
+**Bug tracker and source code:**
+https://github.com/python-trio/pytest-trio
-Pytest plugin for trio
+**License:** MIT or Apache 2, your choice.
-License: Your choice of MIT or Apache License 2.0
+**Code of conduct:** Contributors are requested to follow our `code of
+conduct
+`__
+in all project spaces.
diff --git a/ci.sh b/ci.sh
new file mode 100755
index 0000000..0a6c4d6
--- /dev/null
+++ b/ci.sh
@@ -0,0 +1,78 @@
+#!/bin/bash
+
+set -ex -o pipefail
+
+# Log some general info about the environment
+uname -a
+env | sort
+
+# Curl's built-in retry system is not very robust; it gives up on lots of
+# network errors that we want to retry on. Wget might work better, but it's
+# not installed on azure pipelines's windows boxes. So... let's try some good
+# old-fashioned brute force. (This is also a convenient place to put options
+# we always want, like -f to tell curl to give an error if the server sends an
+# error response, and -L to follow redirects.)
+function curl-harder() {
+ for BACKOFF in 0 1 2 4 8 15 15 15 15; do
+ sleep $BACKOFF
+ if curl -fL --connect-timeout 5 "$@"; then
+ return 0
+ fi
+ done
+ return 1
+}
+
+
+python -m pip install -U pip build
+python -m pip --version
+
+python -m build
+python -m pip install dist/*.whl
+
+if [ "$CHECK_FORMATTING" = "1" ]; then
+ pip install black
+ if ! black --check . ; then
+ cat <= 1.6.1
-sphinx_rtd_theme
-sphinxcontrib-trio
diff --git a/ci/travis.sh b/ci/travis.sh
deleted file mode 100755
index a59d32f..0000000
--- a/ci/travis.sh
+++ /dev/null
@@ -1,98 +0,0 @@
-#!/bin/bash
-
-set -ex
-
-# See https://github.com/python-trio/trio/issues/334
-YAPF_VERSION=0.17.0
-
-if [ "$TRAVIS_OS_NAME" = "osx" ]; then
- curl -Lo macpython.pkg https://www.python.org/ftp/python/${MACPYTHON}/python-${MACPYTHON}-macosx10.6.pkg
- sudo installer -pkg macpython.pkg -target /
- ls /Library/Frameworks/Python.framework/Versions/*/bin/
- PYTHON_EXE=/Library/Frameworks/Python.framework/Versions/*/bin/python3
- sudo $PYTHON_EXE -m pip install virtualenv
- $PYTHON_EXE -m virtualenv testenv
- source testenv/bin/activate
-fi
-
-if [ "$USE_PYPY_NIGHTLY" = "1" ]; then
- curl -fLo pypy.tar.bz2 http://buildbot.pypy.org/nightly/py3.5/pypy-c-jit-latest-linux64.tar.bz2
- if [ ! -s pypy.tar.bz2 ]; then
- # We know:
- # - curl succeeded (200 response code; -f means "exit with error if
- # server returns 4xx or 5xx")
- # - nonetheless, pypy.tar.bz2 does not exist, or contains no data
- # This isn't going to work, and the failure is not informative of
- # anything involving this package.
- ls -l
- echo "PyPy3 nightly build failed to download – something is wrong on their end."
- echo "Skipping testing against the nightly build for right now."
- exit 0
- fi
- tar xaf pypy.tar.bz2
- # something like "pypy-c-jit-89963-748aa3022295-linux64"
- PYPY_DIR=$(echo pypy-c-jit-*)
- PYTHON_EXE=$PYPY_DIR/bin/pypy3
- ($PYTHON_EXE -m ensurepip \
- && $PYTHON_EXE -m pip install virtualenv \
- && $PYTHON_EXE -m virtualenv testenv) \
- || (echo "pypy nightly is broken; skipping tests"; exit 0)
- source testenv/bin/activate
-fi
-
-if [ "$USE_PYPY_RELEASE_VERSION" != "" ]; then
- curl -fLo pypy.tar.bz2 https://bitbucket.org/squeaky/portable-pypy/downloads/pypy3.5-${USE_PYPY_RELEASE_VERSION}-linux_x86_64-portable.tar.bz2
- tar xaf pypy.tar.bz2
- # something like "pypy3.5-5.7.1-beta-linux_x86_64-portable"
- PYPY_DIR=$(echo pypy3.5-*)
- PYTHON_EXE=$PYPY_DIR/bin/pypy3
- $PYTHON_EXE -m ensurepip
- $PYTHON_EXE -m pip install virtualenv
- $PYTHON_EXE -m virtualenv testenv
- source testenv/bin/activate
-fi
-
-pip install -U pip setuptools wheel
-
-if [ "$CHECK_FORMATTING" = "1" ]; then
- pip install yapf==${YAPF_VERSION}
- if ! yapf -rpd setup.py pytest_trio; then
- cat <= 1.6.1
+sphinx_rtd_theme
+sphinxcontrib-trio
+# Workaround for this weird issue:
+# https://travis-ci.org/python-trio/pytest-trio/jobs/407495415
+attrs >= 17.4.0
+# != 19.9.0 for https://github.com/twisted/towncrier/issues/180
+# != 21.3.0 for https://github.com/twisted/towncrier/issues/346
+towncrier != 19.9.0,!= 21.3.0
+
+# pytest-trio's own dependencies
+trio >= 0.22.0
+outcome >= 1.1.0
+pytest >= 8.2.0
diff --git a/docs-requirements.txt b/docs-requirements.txt
new file mode 100644
index 0000000..efe32cb
--- /dev/null
+++ b/docs-requirements.txt
@@ -0,0 +1,111 @@
+#
+# This file is autogenerated by pip-compile with Python 3.10
+# by the following command:
+#
+# pip-compile docs-requirements.in
+#
+alabaster==0.7.12
+ # via sphinx
+async-generator==1.10
+ # via trio
+attrs==22.1.0
+ # via
+ # -r docs-requirements.in
+ # outcome
+ # trio
+babel==2.10.3
+ # via sphinx
+certifi==2022.9.24
+ # via requests
+charset-normalizer==2.1.1
+ # via requests
+click==8.1.3
+ # via
+ # click-default-group
+ # towncrier
+click-default-group==1.2.2
+ # via towncrier
+docutils==0.17.1
+ # via
+ # sphinx
+ # sphinx-rtd-theme
+exceptiongroup==1.0.0
+ # via
+ # pytest
+ # trio
+idna==3.4
+ # via
+ # requests
+ # trio
+imagesize==1.4.1
+ # via sphinx
+incremental==22.10.0
+ # via towncrier
+iniconfig==1.1.1
+ # via pytest
+jinja2==3.1.2
+ # via
+ # sphinx
+ # towncrier
+markupsafe==2.1.1
+ # via jinja2
+outcome==1.2.0
+ # via
+ # -r docs-requirements.in
+ # trio
+packaging==21.3
+ # via
+ # pytest
+ # sphinx
+pluggy==1.5.0
+ # via pytest
+pygments==2.13.0
+ # via sphinx
+pyparsing==3.0.9
+ # via packaging
+pytest==8.2.2
+ # via -r docs-requirements.in
+pytz==2022.5
+ # via babel
+requests==2.28.1
+ # via sphinx
+sniffio==1.3.0
+ # via trio
+snowballstemmer==2.2.0
+ # via sphinx
+sortedcontainers==2.4.0
+ # via trio
+sphinx==5.3.0
+ # via
+ # -r docs-requirements.in
+ # sphinx-rtd-theme
+ # sphinxcontrib-trio
+sphinx-rtd-theme==1.0.0
+ # via -r docs-requirements.in
+sphinxcontrib-applehelp==1.0.2
+ # via sphinx
+sphinxcontrib-devhelp==1.0.2
+ # via sphinx
+sphinxcontrib-htmlhelp==2.0.0
+ # via sphinx
+sphinxcontrib-jsmath==1.0.1
+ # via sphinx
+sphinxcontrib-qthelp==1.0.3
+ # via sphinx
+sphinxcontrib-serializinghtml==1.1.5
+ # via sphinx
+sphinxcontrib-trio==1.1.2
+ # via -r docs-requirements.in
+tomli==2.0.1
+ # via
+ # pytest
+ # towncrier
+towncrier==22.8.0
+ # via -r docs-requirements.in
+trio==0.22.0
+ # via -r docs-requirements.in
+urllib3==1.26.12
+ # via requests
+
+# The following packages are considered to be unsafe in a requirements file:
+# setuptools
diff --git a/docs/source/_static/favicon-32.png b/docs/source/_static/favicon-32.png
new file mode 100644
index 0000000..73a394c
Binary files /dev/null and b/docs/source/_static/favicon-32.png differ
diff --git a/docs/source/_static/favicon.svg b/docs/source/_static/favicon.svg
new file mode 100644
index 0000000..361e062
--- /dev/null
+++ b/docs/source/_static/favicon.svg
@@ -0,0 +1,177 @@
+
+
+
+
diff --git a/docs/source/_static/hackrtd.css b/docs/source/_static/hackrtd.css
new file mode 100644
index 0000000..ea8f5a5
--- /dev/null
+++ b/docs/source/_static/hackrtd.css
@@ -0,0 +1,112 @@
+/* Temporary hack to work around bug in rtd theme 2.0 through 2.4
+ See https://github.com/rtfd/sphinx_rtd_theme/pull/382
+*/
+pre {
+ line-height: normal !important;
+}
+
+/* Make .. deprecation:: blocks visible
+ * (by default they're entirely unstyled)
+ */
+.deprecated {
+ background-color: #ffe13b;
+}
+
+/* Add a snakey triskelion ornament to
+ * https://stackoverflow.com/questions/8862344/css-hr-with-ornament/18541258#18541258
+ * but only do it to s in the content box, b/c the RTD popup control panel
+ * thingummy also has an in it, and putting the ornament on that looks
+ * *really weird*. (In particular, the background color is wrong.)
+ */
+.rst-content hr:after {
+ /* This .svg gets displayed on top of the middle of the hrule. It has a box
+ * behind the logo that's colored to match the RTD theme body background
+ * color (#fcfcfc), which hides the middle part of the hrule to make it
+ * look like there's a gap in it. The size of the box determines the size
+ * of the gap.
+ */
+ background: url('ornament.svg') no-repeat top center;
+ background-size: contain;
+ content: "";
+ display: block;
+ height: 30px;
+ position: relative;
+ top: -15px;
+}
+
+/* Hacks to make the upper-left logo area look nicer */
+
+.wy-side-nav-search {
+ /* Lighter background color to match logo */
+ background-color: #d2e7fa !important;
+}
+
+.wy-side-nav-search > a {
+ color: #306998 !important;
+}
+
+.wy-side-nav-search > a.logo {
+ display: block !important;
+ padding-bottom: 0.809em !important;
+}
+
+.wy-side-nav-search > a img.logo {
+ display: inline !important;
+ padding: 20 !important;
+}
+
+.trio-version {
+ display: inline;
+ /* I *cannot* figure out how to get the version text vertically centered
+ on the logo. Oh well...
+ height: 32px;
+ line-height: 32px;
+ */
+}
+
+.wy-side-nav-search > a {
+ /* Mostly this is just to simplify things, so we don't have margin/padding
+ * on both the and the inside it */
+ margin: 0 !important;
+ padding: 0 !important;
+}
+
+/* Get rid of the weird super dark "Contents" label that wastes vertical space
+ */
+.wy-menu-vertical > p.caption {
+ display: none !important;
+}
+
+/* I do not like RTD's use of Roboto Slab for headlines. So force it back to
+ * Lato (or whatever fallback it's using if Lato isn't available for some
+ * reason). I also experimented with using Montserrat to be extra obnoxiously
+ * on brand, but honestly you couldn't really tell so there wasn't much point
+ * in adding page weight for that, and this is going to match the body text
+ * better. (Montserrat for body text *definitely* didn't look good, alas.)
+ */
+h1, h2, h3, h4, h5, h6, legend, .rst-content .toctree-wrapper p.caption {
+ font-family: inherit !important;
+}
+
+/* Get rid of the horrible red for literal content */
+.rst-content tt.literal, .rst-content tt.literal, .rst-content code.literal {
+ color: #222 !important;
+}
+
+/* Style the "Need help?" text just underneath the search box */
+.trio-help-hint {
+ line-height: normal;
+ margin-bottom: 0;
+ /* font-size: 12px; */
+ font-size: 80%; /* matches the "Search docs" box */
+ padding-top: 6px;
+ color: #306998;
+ text-align: center;
+}
+
+a.trio-help-hint, .trio-help-hint a:link, .trio-help-hint a:visited {
+ color: inherit;
+ /* Like text-decoration: underline, but with a thinner line */
+ text-decoration: none;
+ border-bottom: 1px solid;
+}
diff --git a/docs/source/_static/ornament.svg b/docs/source/_static/ornament.svg
new file mode 100644
index 0000000..e4793e3
--- /dev/null
+++ b/docs/source/_static/ornament.svg
@@ -0,0 +1,188 @@
+
+
+
+
diff --git a/docs/source/_templates/layout.html b/docs/source/_templates/layout.html
new file mode 100644
index 0000000..f8ec93e
--- /dev/null
+++ b/docs/source/_templates/layout.html
@@ -0,0 +1,27 @@
+{#
+https://stackoverflow.com/questions/25243482/how-to-add-sphinx-generated-index-to-the-sidebar-when-using-read-the-docs-theme
+#}
+{% extends "!layout.html" %}
+
+{% block sidebartitle %}
+
+ {{ project }}
+ {%- set nav_version = version %}
+ {% if READTHEDOCS and current_version %}
+ {%- set nav_version = current_version %}
+ {% endif %}
+ {# don't show the version on RTD if it's the default #}
+ {% if nav_version != 'latest' %}
+
+{% endblock %}
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 5e508ef..ff74f8b 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -19,8 +19,23 @@
#
import os
import sys
+
# So autodoc can import our package
-sys.path.insert(0, os.path.abspath('../..'))
+sys.path.insert(0, os.path.abspath("../.."))
+
+# https://docs.readthedocs.io/en/stable/builds.html#build-environment
+if "READTHEDOCS" in os.environ:
+ import glob
+
+ if glob.glob("../../newsfragments/*.*.rst"):
+ print("-- Found newsfragments; running towncrier --", flush=True)
+ import subprocess
+
+ subprocess.run(
+ ["towncrier", "--yes", "--date", "not released yet"],
+ cwd="../..",
+ check=True,
+ )
# Warn about all references to unknown targets
nitpicky = True
@@ -30,6 +45,16 @@
("py:obj", "bytes-like"),
]
+
+# XX hack the RTD theme until
+# https://github.com/rtfd/sphinx_rtd_theme/pull/382
+# is shipped (should be in the release after 0.2.4)
+# ...note that this has since grown to contain a bunch of other CSS hacks too
+# though.
+def setup(app):
+ app.add_css_file("hackrtd.css")
+
+
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
@@ -40,36 +65,36 @@
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
- 'sphinx.ext.autodoc',
- 'sphinx.ext.intersphinx',
- 'sphinx.ext.coverage',
- 'sphinx.ext.napoleon',
- 'sphinxcontrib_trio',
+ "sphinx.ext.autodoc",
+ "sphinx.ext.intersphinx",
+ "sphinx.ext.coverage",
+ "sphinx.ext.napoleon",
+ "sphinxcontrib_trio",
]
intersphinx_mapping = {
- "python": ('https://docs.python.org/3', None),
- "trio": ('https://trio.readthedocs.io/en/stable', None),
+ "python": ("https://docs.python.org/3", None),
+ "trio": ("https://trio.readthedocs.io/en/stable", None),
}
autodoc_member_order = "bysource"
# Add any paths that contain templates here, relative to this directory.
-templates_path = []
+templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
-source_suffix = '.rst'
+source_suffix = ".rst"
# The master toctree document.
-master_doc = 'index'
+master_doc = "index"
# General information about the project.
-project = 'pytest-trio'
-copyright = 'The pytest-trio authors'
-author = 'The pytest-trio authors'
+project = "pytest-trio"
+copyright = "The pytest-trio authors"
+author = "The pytest-trio authors"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@@ -77,16 +102,20 @@
#
# The short X.Y version.
import pytest_trio
+
version = pytest_trio.__version__
# The full version, including alpha/beta/rc tags.
release = version
+html_favicon = "_static/favicon-32.png"
+html_logo = "../../logo/wordmark-transparent.svg"
+
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
-language = None
+language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
@@ -94,27 +123,29 @@
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = "default"
-# The default language for :: blocks
-highlight_language = 'python3'
+highlight_language = "python3"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
+# This avoids a warning by the epub builder that it can't figure out
+# the MIME type for our favicon.
+suppress_warnings = ["epub.unknown_project_files"]
+
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
-#
-#html_theme = 'alabaster'
# We have to set this ourselves, not only because it's useful for local
# testing, but also because if we don't then RTD will throw away our
# html_theme_options.
import sphinx_rtd_theme
-html_theme = 'sphinx_rtd_theme'
+
+html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
@@ -128,19 +159,20 @@
# I'm not 100% sure this actually does anything with our current
# versions/settings...
"navigation_depth": 4,
- "logo_only": True,
+ "logo_only": False,
+ "prev_next_buttons_location": "both",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
+html_static_path = ["_static"]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
-htmlhelp_basename = 'pytest-triodoc'
+htmlhelp_basename = "pytest-triodoc"
# -- Options for LaTeX output ---------------------------------------------
@@ -149,15 +181,12 @@
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
-
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
-
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
-
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
@@ -167,8 +196,7 @@
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
- (master_doc, 'pytest-trio.tex', 'Trio Documentation',
- author, 'manual'),
+ (master_doc, "pytest-trio.tex", "pytest-trio Documentation", author, "manual"),
]
@@ -177,8 +205,7 @@
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
- (master_doc, 'pytest-trio', 'pytest-trio Documentation',
- [author], 1)
+ (master_doc, "pytest-trio", "pytest-trio Documentation", [author], 1),
]
@@ -188,7 +215,13 @@
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
- (master_doc, 'pytest-trio', 'pytest-trio Documentation',
- author, 'pytest-trio', 'Pytest plugin for trio',
- 'Miscellaneous'),
+ (
+ master_doc,
+ "pytest-trio",
+ "pytest-trio Documentation",
+ author,
+ "pytest-trio",
+ "pytest plugin for Trio",
+ "Miscellaneous",
+ ),
]
diff --git a/docs/source/history.rst b/docs/source/history.rst
index d1ff8a3..c02178a 100644
--- a/docs/source/history.rst
+++ b/docs/source/history.rst
@@ -5,7 +5,142 @@ Release history
.. towncrier release notes start
-Pytest_Trio 0.3.0 (2018-01-03)
+pytest-trio 0.8.0 (2022-11-01)
+------------------------------
+
+Features
+~~~~~~~~
+
+- If a test raises an ``ExceptionGroup`` (or nested ``ExceptionGroup``\ s) with only
+ a single 'leaf' exception from ``pytest.xfail()`` or ``pytest.skip()``\ , we now
+ unwrap it to have the desired effect on Pytest. ``ExceptionGroup``\ s with two or
+ more leaf exceptions, even of the same type, are not changed and will be treated
+ as ordinary test failures.
+
+ See `pytest-dev/pytest#9680 `__
+ for design discussion. This feature is particularly useful if you've enabled
+ `the new strict_exception_groups=True option
+ `__. (`#104 `__)
+
+
+Bugfixes
+~~~~~~~~
+
+- Fix an issue where if two fixtures are being set up concurrently, and
+ one crashes and the other hangs, then the test as a whole would hang,
+ rather than being cancelled and unwound after the crash. (`#120 `__)
+
+
+Misc
+~~~~
+
+- Trio 0.22.0 deprecated ``MultiError`` in favor of the standard-library
+ (or `backported `__) ``ExceptionGroup``
+ type; ``pytest-trio`` now uses ``ExceptionGroup`` exclusively and therefore requires
+ Trio 0.22.0 or later. (`#128 `__)
+
+- Dropped support for end-of-life Python 3.6, and the ``async_generator`` library
+ necessary to support it, and started testing on Python 3.10 and 3.11. (`#129 `__)
+
+
+pytest-trio 0.7.0 (2020-10-15)
+------------------------------
+
+Features
+~~~~~~~~
+
+- Support added for :ref:`alternative Trio run functions ` via the ``trio_run`` configuration variable and ``@pytest.mark.trio(run=...)``. Presently supports Trio and QTrio. (`#105 `__)
+
+
+Deprecations and Removals
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Python 3.5 support removed. (`#96 `__)
+
+
+pytest-trio 0.6.0 (2020-05-20)
+----------------------------------
+
+Features
+~~~~~~~~
+
+- Incompatible change: if you use ``yield`` inside a Trio fixture, and
+ the ``yield`` gets cancelled (for example, due to a background task
+ crashing), then the ``yield`` will now raise :exc:`trio.Cancelled`.
+ See :ref:`cancel-yield` for details. Also, in this same case,
+ pytest-trio will now reliably mark the test as failed, even if the
+ fixture doesn't go on to raise an exception. (`#75 `__)
+
+- Updated for compatibility with Trio v0.15.0.
+
+
+pytest-trio 0.5.2 (2019-02-13)
+------------------------------
+
+Features
+~~~~~~~~
+
+- pytest-trio now makes the Trio scheduler deterministic while running
+ inside a Hypothesis test. Hopefully you won't see any change, but if
+ you had scheduler-dependent bugs Hypothesis will be more effective now. (`#73 `__)
+
+- Updated for compatibility with trio v0.11.0.
+
+pytest-trio 0.5.1 (2018-09-28)
+------------------------------
+
+Bugfixes
+~~~~~~~~
+
+- The pytest 3.8.1 release broke pytest-trio's handling of trio tests
+ defined as class methods. We fixed it again. (`#64 `__)
+
+
+pytest-trio 0.5.0 (2018-08-26)
+------------------------------
+
+This is a major release, including a rewrite of large portions of the
+internals. We believe it should be backwards compatible with existing
+projects. Major new features include:
+
+* "trio mode": no more writing ``@pytest.mark.trio`` everywhere!
+* it's now safe to use nurseries inside fixtures (`#55
+ `__)
+* new ``@trio_fixture`` decorator to explicitly mark a fixture as a
+ trio fixture
+* a number of easy-to-make mistakes are now caught and raise
+ informative errors
+* the :data:`nursery` fixture is now 87% more magical
+
+For more details, see the manual. Oh right, speaking of which: we
+finally have a manual! You should read it.
+
+
+pytest-trio 0.4.2 (2018-06-29)
+------------------------------
+
+Features
+~~~~~~~~
+
+- pytest-trio now integrates with `Hypothesis
+ `_ to support ``@given`` on async tests
+ using Trio. (`#42 `__)
+
+
+pytest-trio 0.4.1 (2018-04-14)
+------------------------------
+
+No significant changes.
+
+
+pytest-trio 0.4.0 (2018-04-14)
+------------------------------
+
+- Fix compatibility with trio 0.4.0 (`#25
+ `__)
+
+
+pytest-trio 0.3.0 (2018-01-03)
------------------------------
Features
@@ -15,7 +150,7 @@ Features
`__)
-Pytest_Trio 0.2.0 (2017-12-15)
+pytest-trio 0.2.0 (2017-12-15)
------------------------------
- Heavy improvements, add async yield fixture, fix bugs, add tests etc. (`#17
@@ -29,14 +164,14 @@ Deprecations and Removals
`__)
-Pytest_Trio 0.1.1 (2017-12-08)
+pytest-trio 0.1.1 (2017-12-08)
------------------------------
Disable intersphinx for trio (cause crash in CI for the moment due to 404
in readthedoc).
-Pytest_Trio 0.1.0 (2017-12-08)
+pytest-trio 0.1.0 (2017-12-08)
------------------------------
Initial release.
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 118c45b..fd29f82 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -8,9 +8,64 @@
pytest-trio: Pytest plugin for trio
===================================
+This is a pytest plugin to help you test projects that use `Trio
+`__, a friendly library for concurrency
+and async I/O in Python. Features include:
+
+* Async tests without the boilerplate: just write ``async def
+ test_whatever(): ...``.
+
+* Useful fixtures included: use :data:`autojump_clock` for easy
+ testing of code with timeouts, or :data:`nursery` to easily set up
+ background tasks.
+
+* Write your own async fixtures: set up an async database connection
+ or start a server inside a fixture, and then use it in your tests.
+
+* If you have multiple async fixtures, pytest-trio will even do
+ setup/teardown concurrently whenever possible. (Though honestly,
+ we're not sure whether this is a good idea or not and might remove
+ it in the future. If it makes your tests harder to debug, or
+ conversely provides you with big speedups, `please let us know
+ `__.)
+
+* Integration with the fabulous `Hypothesis
+ `__ library, so your async tests can use
+ property-based testing: just use ``@given`` like you're used to.
+
+* Support for testing projects that use Trio exclusively and want to
+ use pytest-trio everywhere, and also for testing projects that
+ support multiple async libraries and only want to enable
+ pytest-trio's features for a subset of their test suite.
+
+
+Vital statistics
+================
+
+* Install: ``pip install pytest-trio``
+
+* Documentation: https://pytest-trio.readthedocs.io
+
+* Issue tracker, source code: https://github.com/python-trio/pytest-trio
+
+* License: MIT or Apache 2, your choice
+
+* Contributor guide: https://trio.readthedocs.io/en/latest/contributing.html
+
+* Code of conduct: Contributors are requested to follow our `code of
+ conduct
+ `__ in
+ all project spaces.
+
.. toctree::
:maxdepth: 2
+ quickstart.rst
+ reference.rst
+
+.. toctree::
+ :maxdepth: 1
+
history.rst
====================
diff --git a/docs/source/quickstart.rst b/docs/source/quickstart.rst
new file mode 100644
index 0000000..fe23cd1
--- /dev/null
+++ b/docs/source/quickstart.rst
@@ -0,0 +1,411 @@
+Quickstart
+==========
+
+Enabling Trio mode and running your first async tests
+-----------------------------------------------------
+
+.. note:: If you used `cookiecutter-trio
+ `__ to set up
+ your project, then pytest-trio and Trio mode are already
+ configured! You can write ``async def test_whatever(): ...`` and it
+ should just work. Feel free to skip to the next section.
+
+Let's make a temporary directory to work in, and write two trivial
+tests: one that we expect should pass, and one that we expect should
+fail::
+
+ # test_example.py
+ import trio
+
+ async def test_sleep():
+ start_time = trio.current_time()
+ await trio.sleep(1)
+ end_time = trio.current_time()
+ assert end_time - start_time >= 1
+
+ async def test_should_fail():
+ assert False
+
+If we run this under pytest normally, then the tests are skipped and we get
+a warning explaining how pytest itself does not directly support async def
+tests. Note that in versions of pytest prior to v4.4.0 the tests end up
+being reported as passing with other warnings despite not actually having
+been properly run.
+
+.. code-block:: none
+
+ $ pytest test_example.py
+ ======================== test session starts =========================
+ platform linux -- Python 3.8.5, pytest-6.0.1, py-1.9.0, pluggy-0.13.1
+ rootdir: /tmp
+ collected 2 items
+
+ test_example.py ss [100%]
+
+ ========================== warnings summary ==========================
+ test_example.py::test_sleep
+ test_example.py::test_should_fail
+ .../_pytest/python.py:169: PytestUnhandledCoroutineWarning: async
+ def functions are not natively supported and have been skipped.
+ You need to install a suitable plugin for your async framework, for
+ example:
+ - pytest-asyncio
+ - pytest-trio
+ - pytest-tornasync
+ - pytest-twisted
+ warnings.warn(PytestUnhandledCoroutineWarning(msg.format(nodeid)))
+
+ -- Docs: https://docs.pytest.org/en/stable/warnings.html
+ =================== 2 skipped, 2 warnings in 0.26s ===================
+
+Here's the fix:
+
+1. Install pytest-trio: ``pip install pytest-trio``
+
+2. In your project root, create a file called ``pytest.ini`` with
+ contents:
+
+ .. code-block:: none
+
+ [pytest]
+ trio_mode = true
+
+And we're done! Let's try running pytest again:
+
+.. code-block:: none
+
+ $ pip install pytest-trio
+
+ $ cat <pytest.ini
+ [pytest]
+ trio_mode = true
+ EOF
+
+ $ pytest test_example.py
+ ======================== test session starts =========================
+ platform linux -- Python 3.8.5, pytest-6.0.1, py-1.9.0, pluggy-0.13.1
+ rootdir: /tmp, configfile: pytest.ini
+ plugins: trio-0.6.0
+ collected 2 items
+
+ test_example.py .F [100%]
+
+ ============================== FAILURES ==============================
+ __________________________ test_should_fail __________________________
+
+ async def test_should_fail():
+ > assert False
+ E assert False
+
+ test_example.py:11: AssertionError
+ ====================== short test summary info =======================
+ FAILED test_example.py::test_should_fail - assert False
+ ==================== 1 failed, 1 passed in 1.23s =====================
+
+Notice that now it says ``plugins: trio``, which means that
+pytest-trio is installed, and the results make sense: the good test
+passed, the bad test failed, no warnings, and it took just over 1
+second, like we'd expect.
+
+
+Trio's magic autojump clock
+---------------------------
+
+Tests involving time are often slow and flaky. But we can
+fix that. Just add the ``autojump_clock`` fixture to your test, and
+it will run in a mode where Trio's clock is virtualized and
+deterministic. Essentially, the clock doesn't move, except that whenever all
+tasks are blocked waiting, it jumps forward until the next time when
+something will happen::
+
+ # Notice the 'autojump_clock' argument: that's all it takes!
+ async def test_sleep_efficiently_and_reliably(autojump_clock):
+ start_time = trio.current_time()
+ await trio.sleep(1)
+ end_time = trio.current_time()
+ assert end_time - start_time == 1
+
+In the version of this test we saw before that used real time, at the
+end we had to use a ``>=`` comparison, in order to account for
+scheduler jitter and so forth. If there were a bug that caused
+:func:`trio.sleep` to take 10 seconds, our test wouldn't have noticed.
+But now we're using virtual time, so the call to ``await
+trio.sleep(1)`` takes *exactly* 1 virtual second, and the ``==`` test
+will pass every time. Before, we had to wait around for the test to
+complete; now, it completes essentially instantaneously. (Try it!)
+And, while here our example is super simple, its integration with
+Trio's core scheduling logic allows this to work for arbitrarily
+complex programs (as long as they aren't interacting with the outside
+world).
+
+
+Async fixtures
+--------------
+
+We can write async fixtures::
+
+ @pytest.fixture
+ async def db_connection():
+ return await some_async_db_library.connect(...)
+
+ async def test_example(db_connection):
+ await db_connection.execute("SELECT * FROM ...")
+
+If you need to run teardown code, you can use ``yield``, just like a
+regular pytest fixture::
+
+ # DB connection that wraps each test in a transaction and rolls it
+ # back afterwards
+ @pytest.fixture
+ async def rollback_db_connection():
+ # Setup code
+ connection = await some_async_db_library.connect(...)
+ await connection.execute("START TRANSACTION")
+
+ # The value of this fixture
+ yield connection
+
+ # Teardown code, executed after the test is done
+ await connection.execute("ROLLBACK")
+
+
+.. _server-fixture-example:
+
+Running a background server from a fixture
+------------------------------------------
+
+Here's some code to implement an echo server. It's supposed to take in
+arbitrary data, and then send it back out again::
+
+ async def echo_server_handler(stream):
+ while True:
+ data = await stream.receive_some(1000)
+ if not data:
+ break
+ await stream.send_all(data)
+
+ # Usage: await trio.serve_tcp(echo_server_handler, ...)
+
+Now we need to test it, to make sure it's working correctly. In fact,
+since this is such complicated and sophisticated code, we're going to
+write lots of tests for it. And they'll all follow the same basic
+pattern: we'll start the echo server running in a background task,
+then connect to it, send it some test data, and see how it responds.
+Here's a first attempt::
+
+ # Let's cross our fingers and hope no-one else is using this port...
+ PORT = 14923
+
+ # Don't copy this -- we can do better
+ async def test_attempt_1():
+ async with trio.open_nursery() as nursery:
+ # Start server running in the background
+ nursery.start_soon(
+ partial(trio.serve_tcp, echo_server_handler, port=PORT)
+ )
+
+ # Connect to the server.
+ echo_client = await trio.open_tcp_stream("127.0.0.1", PORT)
+ # Send some test data, and check that it gets echoed back
+ async with echo_client:
+ for test_byte in [b"a", b"b", b"c"]:
+ await echo_client.send_all(test_byte)
+ assert await echo_client.receive_some(1) == test_byte
+
+This will mostly work, but it has a few problems. The most obvious one
+is that when we run it, even if everything works perfectly, it will
+hang at the end of the test - we never shut down the server, so the
+nursery block will wait forever for it to exit.
+
+To avoid this, we should cancel the nursery at the end of the test:
+
+.. code-block:: python3
+ :emphasize-lines: 7,20,21
+
+ # Let's cross our fingers and hope no-one else is using this port...
+ PORT = 14923
+
+ # Don't copy this -- we can do better
+ async def test_attempt_2():
+ async with trio.open_nursery() as nursery:
+ try:
+ # Start server running in the background
+ nursery.start_soon(
+ partial(trio.serve_tcp, echo_server_handler, port=PORT)
+ )
+
+ # Connect to the server.
+ echo_client = await trio.open_tcp_stream("127.0.0.1", PORT)
+ # Send some test data, and check that it gets echoed back
+ async with echo_client:
+ for test_byte in [b"a", b"b", b"c"]:
+ await echo_client.send_all(test_byte)
+ assert await echo_client.receive_some(1) == test_byte
+ finally:
+ nursery.cancel_scope.cancel()
+
+In fact, this pattern is *so* common, that pytest-trio provides a
+handy :data:`nursery` fixture to let you skip the boilerplate. Just
+add ``nursery`` to your test function arguments, and pytest-trio will
+open a nursery, pass it in to your function, and then cancel it for
+you afterwards:
+
+.. code-block:: python3
+ :emphasize-lines: 5
+
+ # Let's cross our fingers and hope no-one else is using this port...
+ PORT = 14923
+
+ # Don't copy this -- we can do better
+ async def test_attempt_3(nursery):
+ # Start server running in the background
+ nursery.start_soon(
+ partial(trio.serve_tcp, echo_server_handler, port=PORT)
+ )
+
+ # Connect to the server.
+ echo_client = await trio.open_tcp_stream("127.0.0.1", PORT)
+ # Send some test data, and check that it gets echoed back
+ async with echo_client:
+ for test_byte in [b"a", b"b", b"c"]:
+ await echo_client.send_all(test_byte)
+ assert await echo_client.receive_some(1) == test_byte
+
+Next problem: we have a race condition. We spawn a background task to
+call ``serve_tcp``, and then immediately try to connect to that
+server. Sometimes this will work fine. But it takes a little while for
+the server to start up and be ready to accept connections - so other
+times, randomly, our connection attempt will happen too quickly, and
+error out. After all - ``nursery.start_soon`` only promises that the
+task will be started *soon*, not that it has actually happened. So this
+test will be flaky, and flaky tests are the worst.
+
+Fortunately, Trio makes this easy to solve, by switching to using
+``await nursery.start(...)``. You can `read its docs for full details
+`__,
+but basically the idea is that both ``nursery.start_soon(...)`` and
+``await nursery.start(...)`` create background tasks, but only
+``start`` waits for the new task to finish getting itself set up. This
+requires some cooperation from the background task: it has to notify
+``nursery.start`` when it's ready. Fortunately, :func:`trio.serve_tcp`
+already knows how to cooperate with ``nursery.start``, so we can
+write:
+
+.. code-block:: python3
+ :emphasize-lines: 6-10
+
+ # Let's cross our fingers and hope no-one else is using this port...
+ PORT = 14923
+
+ # Don't copy this -- we can do better
+ async def test_attempt_4(nursery):
+ # Start server running in the background
+ # AND wait for it to finish starting up before continuing
+ await nursery.start(
+ partial(trio.serve_tcp, echo_server_handler, port=PORT)
+ )
+
+ # Connect to the server
+ echo_client = await trio.open_tcp_stream("127.0.0.1", PORT)
+ async with echo_client:
+ for test_byte in [b"a", b"b", b"c"]:
+ await echo_client.send_all(test_byte)
+ assert await echo_client.receive_some(1) == test_byte
+
+That solves our race condition. Next issue: hardcoding the port number
+like this is a bad idea, because port numbers are a machine-wide
+resource, so if we're unlucky some other program might already be
+using it. What we really want to do is to tell :func:`~trio.serve_tcp`
+to pick a random port that no-one else is using. It turns out that
+this is easy: if you request port 0, then the operating system will
+pick an unused one for you automatically. Problem solved!
+
+But wait... if the operating system is picking the port for us, how do
+we know which one it picked, so we can connect to it later?
+
+Well, there's no way to predict the port ahead of time. But after
+:func:`~trio.serve_tcp` has opened a port, it can check and see what
+it got. So we need some way to pass this data back out of
+:func:`~trio.serve_tcp`. Fortunately, ``nursery.start`` handles this
+too: it lets the task pass out a piece of data after it has started. And
+it just so happens that what :func:`~trio.serve_tcp` passes out is a
+list of :class:`~trio.SocketListener` objects. And there's a handy
+function called :func:`trio.testing.open_stream_to_socket_listener`
+that can take a :class:`~trio.SocketListener` and make a connection to
+it.
+
+Putting it all together:
+
+.. code-block:: python3
+ :emphasize-lines: 1,8,13-16
+
+ from trio.testing import open_stream_to_socket_listener
+
+ # Don't copy this -- it finally works, but we can still do better!
+ async def test_attempt_5(nursery):
+ # Start server running in the background
+ # AND wait for it to finish starting up before continuing
+ # AND find out where it's actually listening
+ listeners = await nursery.start(
+ partial(trio.serve_tcp, echo_server_handler, port=0)
+ )
+
+ # Connect to the server.
+ # There might be multiple listeners (example: IPv4 and
+ # IPv6), but we don't care which one we connect to, so we
+ # just use the first.
+ echo_client = await open_stream_to_socket_listener(listeners[0])
+ async with echo_client:
+ for test_byte in [b"a", b"b", b"c"]:
+ await echo_client.send_all(test_byte)
+ assert await echo_client.receive_some(1) == test_byte
+
+Now, this works - but there's still a lot of boilerplate. Remember, we
+need to write lots of tests for this server, and we don't want to have
+to copy-paste all that stuff into every test. Let's factor out the
+setup into a fixture::
+
+ @pytest.fixture
+ async def echo_client(nursery):
+ listeners = await nursery.start(
+ partial(trio.serve_tcp, echo_server_handler, port=0)
+ )
+ echo_client = await open_stream_to_socket_listener(listeners[0])
+ async with echo_client:
+ yield echo_client
+
+And now in tests, all we have to do is request the ``echo_client``
+fixture, and we get a background server and a client stream connected
+to it. So here's our complete, final version::
+
+ # Final version -- copy this!
+ from functools import partial
+ import pytest
+ import trio
+ from trio.testing import open_stream_to_socket_listener
+
+ # The code being tested:
+ async def echo_server_handler(stream):
+ while True:
+ data = await stream.receive_some(1000)
+ if not data:
+ break
+ await stream.send_all(data)
+
+ # The fixture:
+ @pytest.fixture
+ async def echo_client(nursery):
+ listeners = await nursery.start(
+ partial(trio.serve_tcp, echo_server_handler, port=0)
+ )
+ echo_client = await open_stream_to_socket_listener(listeners[0])
+ async with echo_client:
+ yield echo_client
+
+ # A test using the fixture:
+ async def test_final(echo_client):
+ for test_byte in [b"a", b"b", b"c"]:
+ await echo_client.send_all(test_byte)
+ assert await echo_client.receive_some(1) == test_byte
+
+No hangs, no race conditions, simple, clean, and reusable.
diff --git a/docs/source/reference.rst b/docs/source/reference.rst
new file mode 100644
index 0000000..351e8c1
--- /dev/null
+++ b/docs/source/reference.rst
@@ -0,0 +1,422 @@
+Reference
+=========
+
+Trio mode
+---------
+
+Most users will want to enable "Trio mode". Without Trio mode:
+
+* Pytest-trio only handles tests that have been decorated with
+ ``@pytest.mark.trio``
+* Pytest-trio only handles fixtures if they're async *and* used by a
+ test that's decorated with ``@pytest.mark.trio``, or if they're
+ decorated with ``@pytest_trio.trio_fixture`` (instead of
+ ``@pytest.fixture``).
+
+When Trio mode is enabled, two extra things happen:
+
+* Async tests automatically have the ``trio`` mark added, so you don't
+ have to do it yourself.
+* Async fixtures using ``@pytest.fixture`` automatically get converted
+ to Trio fixtures. (The main effect of this is that it helps you
+ catch mistakes like using an async fixture with a non-async
+ test.)
+
+There are two ways to enable Trio mode.
+
+The first option is to **use a pytest configuration file**. The exact
+rules for how pytest finds configuration files are `a bit complicated
+`__, but you want to
+end up with something like:
+
+.. code-block:: ini
+
+ # pytest.ini
+ [pytest]
+ trio_mode = true
+
+The second option is **use a conftest.py file**. Inside your tests
+directory, create a file called ``conftest.py``, with the following
+contents::
+
+ # conftest.py
+ from pytest_trio.enable_trio_mode import *
+
+This does exactly the same thing as setting ``trio_mode = true`` in
+``pytest.ini``, except for two things:
+
+* Some people like to ship their tests as part of their library, so
+ they (or their users) can test the final installed software by
+ running ``pytest --pyargs PACKAGENAME``. In this mode,
+ ``pytest.ini`` files don't work, but ``conftest.py`` files do.
+
+* Enabling Trio mode in ``pytest.ini`` always enables it globally for
+ your entire testsuite. Enabling it in ``conftest.py`` only enables
+ it for test files that are in the same directory as the
+ ``conftest.py``, or its subdirectories.
+
+If you have software that uses multiple async libraries, then you can
+use ``conftest.py`` to enable Trio mode for just the part of your
+testsuite that uses Trio; or, if you need even finer-grained control,
+you can leave Trio mode disabled and use ``@pytest.mark.trio``
+explicitly on all your Trio tests.
+
+
+Trio fixtures
+-------------
+
+Normally, pytest runs fixture code before starting the test, and
+teardown code afterwards. For technical reasons, we can't wrap this
+whole process in :func:`trio.run` – only the test itself. As a
+workaround, pytest-trio introduces the concept of a "Trio fixture",
+which acts like a normal fixture for most purposes, but actually does
+the setup and teardown inside the test's call to :func:`trio.run`.
+
+The following fixtures are treated as Trio fixtures:
+
+* Any function decorated with ``@pytest_trio.trio_fixture``.
+* Any async function decorated with ``@pytest.fixture``, *if*
+ Trio mode is enabled *or* this fixture is being requested by a Trio
+ test.
+* Any fixture which depends on a Trio fixture.
+
+The most notable difference between regular fixtures and Trio fixtures
+is that regular fixtures can't use Trio APIs, but Trio fixtures can.
+Most of the time you don't need to worry about this, because you
+normally only call Trio APIs from async functions, and when Trio mode
+is enabled, all async fixtures are automatically Trio fixtures.
+However, if for some reason you do want to use Trio APIs from a
+synchronous fixture, then you'll have to use
+``@pytest_trio.trio_fixture``::
+
+ # This fixture is not very useful
+ # But it is an example where @pytest.fixture doesn't work
+ @pytest_trio.trio_fixture
+ def trio_time():
+ return trio.current_time()
+
+Only Trio tests can use Trio fixtures. If you have a regular
+(synchronous) test that tries to use a Trio fixture, then that's an
+error.
+
+And finally, regular fixtures can be `scoped to the test, class,
+module, or session
+`__,
+but Trio fixtures **must be test scoped**. Class, module, and session
+scope are not supported.
+
+
+.. _cancel-yield:
+
+An important note about ``yield`` fixtures
+------------------------------------------
+
+Like any pytest fixture, Trio fixtures can contain both setup and
+teardown code separated by a ``yield``::
+
+ @pytest.fixture
+ async def my_fixture():
+ ... setup code ...
+ yield
+ ... teardown code ...
+
+When pytest-trio executes this fixture, it creates a new task, and
+runs the setup code until it reaches the ``yield``. Then the fixture's
+task goes to sleep. Once the test has finished, the fixture task wakes
+up again and resumes at the ``yield``, so it can execute the teardown
+code.
+
+So the ``yield`` in a fixture is sort of like calling ``await
+wait_for_test_to_finish()``. And in Trio, any ``await``\-able
+operation can be cancelled. For example, we could put a timeout on the
+``yield``::
+
+ @pytest.fixture
+ async def my_fixture():
+ ... setup code ...
+ with trio.move_on_after(5):
+ yield # this yield gets cancelled after 5 seconds
+ ... teardown code ...
+
+Now if the test takes more than 5 seconds to execute, this fixture
+will cancel the ``yield``.
+
+That's kind of a strange thing to do, but there's another version of
+this that's extremely common. Suppose your fixture spawns a background
+task, and then the background task raises an exception. Whenever a
+background task raises an exception, it automatically cancels
+everything inside the nursery's scope – which includes our ``yield``::
+
+ @pytest.fixture
+ async def my_fixture(nursery):
+ nursery.start_soon(function_that_raises_exception)
+ yield # this yield gets cancelled after the background task crashes
+ ... teardown code ...
+
+If you use fixtures with background tasks, you'll probably end up
+cancelling one of these ``yield``\s sooner or later. So what happens
+if the ``yield`` gets cancelled?
+
+First, pytest-trio assumes that something has gone wrong and there's
+no point in continuing the test. If the top-level test function is
+running, then it cancels it.
+
+Then, pytest-trio waits for the test function to finish, and
+then begins tearing down fixtures as normal.
+
+During this teardown process, it will eventually reach the fixture
+that cancelled its ``yield``. This fixture gets resumed to execute its
+teardown logic, but with a special twist: since the ``yield`` was
+cancelled, the ``yield`` raises :exc:`trio.Cancelled`.
+
+Now, here's the punchline: this means that in our examples above, the
+teardown code might not be executed at all! **This is different from
+how pytest fixtures normally work.** Normally, the ``yield`` in a
+pytest fixture never raises an exception, so you can be certain that
+any code you put after it will execute as normal. But if you have a
+fixture with background tasks, and they crash, then your ``yield``
+might raise an exception, and Python will skip executing the code
+after the ``yield``.
+
+In our experience, most fixtures are fine with this, and it prevents
+some `weird problems
+`__ that can
+happen otherwise. But it's something to be aware of.
+
+If you have a fixture where the ``yield`` might be cancelled but you
+still need to run teardown code, then you can use a ``finally``
+block::
+
+ @pytest.fixture
+ async def my_fixture(nursery):
+ nursery.start_soon(function_that_crashes)
+ try:
+ # This yield could be cancelled...
+ yield
+ finally:
+ # But this code will run anyway
+ ... teardown code ...
+
+(But, watch out: the teardown code is still running in a cancelled
+context, so if it has any ``await``\s it could raise
+:exc:`trio.Cancelled` again.)
+
+Or if you use ``with`` to handle teardown, then you don't have to
+worry about this because ``with`` blocks always perform cleanup even
+if there's an exception::
+
+ @pytest.fixture
+ async def my_fixture(nursery):
+ with get_obj_that_must_be_torn_down() as obj:
+ nursery.start_soon(function_that_crashes, obj)
+ # This could raise trio.Cancelled...
+ # ...but that's OK, the 'with' block will still tear down 'obj'
+ yield obj
+
+
+Concurrent setup/teardown
+-------------------------
+
+If your test uses multiple fixtures, then for speed, pytest-trio will
+try to run their setup and teardown code concurrently whenever this is
+possible while respecting the fixture dependencies.
+
+Here's an example, where a test depends on ``fix_b`` and ``fix_c``,
+and these both depend on ``fix_a``::
+
+ @trio_fixture
+ def fix_a():
+ ...
+
+ @trio_fixture
+ def fix_b(fix_a):
+ ...
+
+ @trio_fixture
+ def fix_c(fix_a):
+ ...
+
+ @pytest.mark.trio
+ async def test_example(fix_b, fix_c):
+ ...
+
+When running ``test_example``, pytest-trio will perform the following
+sequence of actions:
+
+1. Set up ``fix_a``
+2. Set up ``fix_b`` and ``fix_c``, concurrently.
+3. Run the test.
+4. Tear down ``fix_b`` and ``fix_c``, concurrently.
+5. Tear down ``fix_a``.
+
+We're `seeking feedback
+`__ on whether
+this feature's benefits outweigh its negatives.
+
+
+Handling of ContextVars
+-----------------------
+
+The :mod:`contextvars` module lets you create
+:class:`~contextvars.ContextVar` objects to represent task-local
+variables. Normally, in Trio, each task gets its own
+:class:`~contextvars.Context`, so that changes to
+:class:`~contextvars.ContextVar` objects are only visible inside the
+task that performs them. But pytest-trio overrides this, and for each
+test it uses a single :class:`~contextvars.Context` which is shared by
+all fixtures and the test function itself.
+
+The benefit of this is that you can set
+:class:`~contextvars.ContextVar` values inside a fixture, and your
+settings will be visible in dependent fixtures and the test itself.
+For example, `trio-asyncio `__
+uses a :class:`~contextvars.ContextVar` to hold the current asyncio
+loop object, so this lets you open a loop inside a fixture and then
+use it inside other fixtures or the test itself.
+
+The downside is that if two fixtures are run concurrently (see
+previous section), and both mutate the same
+:class:`~contextvars.ContextVar`, then there will be a race condition
+and the the final value will be unpredictable. If you make one fixture
+depend on the other, then this will force an ordering and make the
+final value predictable again.
+
+
+Built-in fixtures
+-----------------
+
+These fixtures are automatically available to any code using
+pytest-trio.
+
+.. data:: autojump_clock
+
+ A :class:`trio.testing.MockClock`, configured with ``rate=0,
+ autojump_threshold=0``.
+
+.. data:: mock_clock
+
+ A :class:`trio.testing.MockClock`, with its default configuration
+ (``rate=0, autojump_threshold=inf``).
+
+What makes these particularly useful is that whenever pytest-trio runs
+a test, it checks the fixtures to see if one of them is a
+:class:`trio.abc.Clock` object. If so, it passes that object to
+:func:`trio.run`. So if your test requests one of these fixtures, it
+automatically uses that clock.
+
+If you implement your own :class:`~trio.abc.Clock`, and implement a
+fixture that returns it, then it will work the same way.
+
+Of course, like any pytest fixture, you also get the actual object
+available. For example, you can call
+:meth:`~trio.testing.MockClock.jump`::
+
+ async def test_time_travel(mock_clock):
+ assert trio.current_time() == 0
+ mock_clock.jump(10)
+ assert trio.current_time() == 10
+
+.. data:: nursery
+
+ A nursery created and managed by pytest-trio itself, which
+ surrounds the test/fixture that requested it, and is automatically
+ cancelled after the test/fixture completes. Basically, these are
+ equivalent::
+
+ # Boring way
+ async def test_with_background_task():
+ async with trio.open_nursery() as nursery:
+ try:
+ ...
+ finally:
+ nursery.cancel_scope.cancel()
+
+ # Fancy way
+ async def test_with_background_task(nursery):
+ ...
+
+ For a fixture, the cancellation always happens after the fixture
+ completes its teardown phase. (Or if it doesn't have a teardown
+ phase, then the cancellation happens after the teardown phase
+ *would* have happened.)
+
+ This fixture is even more magical than most pytest fixtures,
+ because if it gets requested several times within the same test,
+ then it creates multiple nurseries, one for each fixture/test that
+ requested it.
+
+ See :ref:`server-fixture-example` for an example of how this can be
+ used.
+
+
+Integration with the Hypothesis library
+---------------------------------------
+
+There isn't too much to say here, since the obvious thing just works::
+
+ from hypothesis import given
+ import hypothesis.strategies as st
+
+ @given(st.binary())
+ async def test_trio_and_hypothesis(data):
+ ...
+
+Under the hood, this requires some coordination between Hypothesis and
+pytest-trio. Hypothesis runs your test multiple times with different
+examples of random data. For each example, pytest-trio calls
+:func:`trio.run` again (so you get a fresh clean Trio environment),
+sets up any Trio fixtures, runs the actual test, and then tears down
+any Trio fixtures. Notice that this is a bit different than regular
+pytest fixtures, which are `instantiated once and then re-used for all
+`__. Most of the time
+this shouldn't matter (and `is probably what you want anyway
+`__), but in
+some unusual cases it could surprise you. And this only applies to
+Trio fixtures – if a Trio test uses a mix of regular fixtures and Trio
+fixtures, then the regular fixtures will be reused, while the Trio
+fixtures will be repeatedly reinstantiated.
+
+Also, pytest-trio only handles ``@given``\-based tests. If you want to
+write `stateful tests
+`__ for
+Trio-based libraries, then check out `hypothesis-trio
+`__.
+
+
+.. _trio-run-config:
+
+Using alternative Trio runners
+------------------------------
+
+If you are working with a library that provides integration with Trio,
+such as via :ref:`guest mode `, it can be used with
+pytest-trio as well. Setting ``trio_run`` in the pytest configuration
+makes your choice the global default for both tests explicitly marked
+with ``@pytest.mark.trio`` and those automatically marked by Trio mode.
+``trio_run`` presently supports ``trio`` and ``qtrio``.
+
+.. code-block:: ini
+
+ # pytest.ini
+ [pytest]
+ trio_mode = true
+ trio_run = qtrio
+
+.. code-block:: python
+
+ import pytest
+
+ @pytest.mark.trio
+ async def test():
+ assert True
+
+If you want more granular control or need to use a specific function,
+it can be passed directly to the marker.
+
+.. code-block:: python
+
+ import pytest
+
+ @pytest.mark.trio(run=qtrio.run)
+ async def test():
+ assert True
diff --git a/logo/wordmark-transparent.svg b/logo/wordmark-transparent.svg
new file mode 100644
index 0000000..d8590e8
--- /dev/null
+++ b/logo/wordmark-transparent.svg
@@ -0,0 +1,136 @@
+
+
+
+
diff --git a/newsfragments/132.bugfix.rst b/newsfragments/132.bugfix.rst
new file mode 100644
index 0000000..4850310
--- /dev/null
+++ b/newsfragments/132.bugfix.rst
@@ -0,0 +1 @@
+Fix a bad interaction with custom pytest items that do not include an ``.obj`` attribute.
diff --git a/newsfragments/143.misc.rst b/newsfragments/143.misc.rst
new file mode 100644
index 0000000..f20319d
--- /dev/null
+++ b/newsfragments/143.misc.rst
@@ -0,0 +1,3 @@
+Move some Hypothesis support into Trio itself, via the new plugins system.
+As a result, ``pytest-trio`` will not import Hypothesis, while still
+integrating seamlessly if _you_ import it.
diff --git a/newsfragments/README.rst b/newsfragments/README.rst
index 291bfea..4df2975 100644
--- a/newsfragments/README.rst
+++ b/newsfragments/README.rst
@@ -9,7 +9,7 @@ message and PR description, which are a description of the change as
relevant to people working on the code itself.)
Each file should be named like ``..rst``, where
-```` is an issue numbers, and ```` is one of:
+```` is an issue number, and ```` is one of:
* ``feature``
* ``bugfix``
diff --git a/pyproject.toml b/pyproject.toml
index 77448c7..48270b0 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,98 @@
+[build-system]
+requires = ["setuptools >= 64"]
+build-backend = "setuptools.build_meta"
+
+[project]
+name = "pytest-trio"
+dynamic = ["version"]
+authors = [
+ { name="Emmanuel Leblond", email="emmanuel.leblond@gmail.com" },
+]
+description = "Pytest plugin for trio"
+readme = {file = "README.md", content-type = "text/markdown"}
+license = {file = "LICENSE"}
+requires-python = ">=3.8"
+classifiers = [
+ "Development Status :: 4 - Beta",
+ "License :: OSI Approved :: MIT License",
+ "License :: OSI Approved :: Apache Software License",
+ "Natural Language :: English",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
+ "Programming Language :: Python :: 3 :: Only",
+ "Programming Language :: Python :: Implementation :: CPython",
+ "Programming Language :: Python :: Implementation :: PyPy",
+ "Operating System :: POSIX :: Linux",
+ "Operating System :: MacOS :: MacOS X",
+ "Operating System :: Microsoft :: Windows",
+ "Operating System :: OS Independent",
+ "Topic :: System :: Networking",
+ "Topic :: Software Development :: Testing",
+ "Framework :: Hypothesis",
+ "Framework :: Pytest",
+ "Framework :: Trio",
+]
+keywords = [
+ "async",
+ "pytest",
+ "testing",
+ "trio",
+]
+dependencies = [
+ "trio >= 0.25.1", # for upstream Hypothesis integration
+ "outcome >= 1.1.0",
+ "pytest >= 7.2.0", # for ExceptionGroup support
+]
+
+[tool.setuptools.dynamic]
+version = {attr = "pytest_trio._version.__version__"}
+
+[project.urls]
+"Homepage" = "https://github.com/python-trio/pytest-trio"
+"Source" = "https://github.com/python-trio/pytest-trio"
+"Bug Tracker" = "https://github.com/python-trio/pytest-trio/issues"
+
+[project.entry-points.pytest11]
+trio = "pytest_trio.plugin"
+
+[tool.setuptools.packages]
+find = {namespaces = false}
+
[tool.towncrier]
package = "pytest_trio"
filename = "docs/source/history.rst"
directory = "newsfragments"
+title_format = "pytest-trio {version} ({project_date})"
underlines = ["-", "~", "^"]
issue_format = "`#{issue} `__"
+
+[tool.coverage.run]
+branch = true
+source_pkgs = ["pytest_trio"]
+
+[tool.coverage.report]
+precision = 1
+skip_covered = true
+exclude_lines = [
+ "pragma: no cover",
+ "abc.abstractmethod",
+ "if TYPE_CHECKING.*:",
+ "if _t.TYPE_CHECKING:",
+ "if t.TYPE_CHECKING:",
+ "@overload",
+ 'class .*\bProtocol\b.*\):',
+ "raise NotImplementedError",
+]
+partial_branches = [
+ "pragma: no branch",
+ "if not TYPE_CHECKING:",
+ "if not _t.TYPE_CHECKING:",
+ "if not t.TYPE_CHECKING:",
+ "if .* or not TYPE_CHECKING:",
+ "if .* or not _t.TYPE_CHECKING:",
+ "if .* or not t.TYPE_CHECKING:",
+]
diff --git a/pytest.ini b/pytest.ini
new file mode 100644
index 0000000..67ed440
--- /dev/null
+++ b/pytest.ini
@@ -0,0 +1,7 @@
+[pytest]
+addopts = -ra -v --pyargs pytest_trio --verbose --cov
+filterwarnings =
+ error
+ default::pytest.PytestAssertRewriteWarning
+ default::pytest.PytestDeprecationWarning
+ default::pytest.PytestUnraisableExceptionWarning
diff --git a/pytest_trio/__init__.py b/pytest_trio/__init__.py
index ffd3b89..5fbd91f 100644
--- a/pytest_trio/__init__.py
+++ b/pytest_trio/__init__.py
@@ -1,3 +1,6 @@
"""Top-level package for pytest-trio."""
from ._version import __version__
+from .plugin import trio_fixture
+
+__all__ = ["trio_fixture"]
diff --git a/pytest_trio/_tests/helpers.py b/pytest_trio/_tests/helpers.py
new file mode 100644
index 0000000..e54506b
--- /dev/null
+++ b/pytest_trio/_tests/helpers.py
@@ -0,0 +1,23 @@
+import pytest
+
+
+def enable_trio_mode_via_pytest_ini(testdir):
+ testdir.makefile(".ini", pytest="[pytest]\ntrio_mode = true\n")
+
+
+def enable_trio_mode_trio_run_via_pytest_ini(testdir):
+ testdir.makefile(".ini", pytest="[pytest]\ntrio_mode = true\ntrio_run = trio\n")
+
+
+def enable_trio_mode_via_conftest_py(testdir):
+ testdir.makeconftest("from pytest_trio.enable_trio_mode import *")
+
+
+enable_trio_mode = pytest.mark.parametrize(
+ "enable_trio_mode",
+ [
+ enable_trio_mode_via_pytest_ini,
+ enable_trio_mode_trio_run_via_pytest_ini,
+ enable_trio_mode_via_conftest_py,
+ ],
+)
diff --git a/pytest_trio/_tests/test_async_fixture.py b/pytest_trio/_tests/test_async_fixture.py
index 8ca95c4..3703f73 100644
--- a/pytest_trio/_tests/test_async_fixture.py
+++ b/pytest_trio/_tests/test_async_fixture.py
@@ -2,7 +2,6 @@
def test_single_async_fixture(testdir):
-
testdir.makepyfile(
"""
import pytest
@@ -25,7 +24,6 @@ async def test_simple(fix1):
def test_async_fixture_recomputed_for_each_test(testdir):
-
testdir.makepyfile(
"""
import pytest
@@ -56,7 +54,6 @@ async def test_second(fix1):
def test_nested_async_fixture(testdir):
-
testdir.makepyfile(
"""
import pytest
@@ -89,7 +86,6 @@ async def test_both(fix1, fix2):
def test_async_within_sync_fixture(testdir):
-
testdir.makepyfile(
"""
import pytest
@@ -116,13 +112,12 @@ async def test_simple(sync_fix):
# In pytest, ERROR status occurs when an exception is raised in fixture code.
-# The trouble is our async fixtures must be run whithin a trio context, hence
+# The trouble is our async fixtures must be run within a trio context, hence
# they are actually run just before the test, providing no way to make the
-# difference between an exception comming from the real test or from an
+# difference between an exception coming from the real test or from an
# async fixture...
-@pytest.mark.xfail(reason='Not implemented yet')
+@pytest.mark.xfail(reason="Not implemented yet")
def test_raise_in_async_fixture_cause_pytest_error(testdir):
-
testdir.makepyfile(
"""
import pytest
@@ -133,10 +128,10 @@ async def fix1():
@pytest.mark.trio
async def test_base(fix1):
- pass # Crash should have occures before arriving here
+ pass # Crash should have occurred before arriving here
"""
)
result = testdir.runpytest()
- result.assert_outcomes(error=1)
+ result.assert_outcomes(errors=1)
diff --git a/pytest_trio/_tests/test_async_yield_fixture.py b/pytest_trio/_tests/test_async_yield_fixture.py
index 27b62e4..9505eb2 100644
--- a/pytest_trio/_tests/test_async_yield_fixture.py
+++ b/pytest_trio/_tests/test_async_yield_fixture.py
@@ -1,10 +1,4 @@
-import sys
-import pytest
-
-
-@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6")
def test_single_async_yield_fixture(testdir):
-
testdir.makepyfile(
"""
import pytest
@@ -43,9 +37,7 @@ def test_after():
result.assert_outcomes(passed=3)
-@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6")
def test_nested_async_yield_fixture(testdir):
-
testdir.makepyfile(
"""
import pytest
@@ -99,9 +91,7 @@ def test_after():
result.assert_outcomes(passed=3)
-@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6")
def test_async_yield_fixture_within_sync_fixture(testdir):
-
testdir.makepyfile(
"""
import pytest
@@ -146,9 +136,7 @@ def test_after():
result.assert_outcomes(passed=3)
-@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6")
def test_async_yield_fixture_within_sync_yield_fixture(testdir):
-
testdir.makepyfile(
"""
import pytest
@@ -198,9 +186,7 @@ def test_after():
result.assert_outcomes(passed=3)
-@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6")
def test_async_yield_fixture_with_multiple_yields(testdir):
-
testdir.makepyfile(
"""
import pytest
@@ -222,13 +208,11 @@ async def test_actual_test(fix1):
result = testdir.runpytest()
# TODO: should trigger error instead of failure
- # result.assert_outcomes(error=1)
+ # result.assert_outcomes(errors=1)
result.assert_outcomes(failed=1)
-@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6")
def test_async_yield_fixture_with_nursery(testdir):
-
testdir.makepyfile(
"""
import pytest
@@ -261,3 +245,55 @@ async def test_actual_test(server):
result = testdir.runpytest()
result.assert_outcomes(passed=1)
+
+
+def test_async_yield_fixture_crashed_teardown_allow_other_teardowns(testdir):
+ testdir.makepyfile(
+ """
+ import pytest
+ import trio
+
+ setup_events = set()
+ teardown_events = set()
+
+ @pytest.fixture
+ async def good_fixture():
+ async with trio.open_nursery() as nursery:
+ setup_events.add('good_fixture setup')
+ yield None
+ teardown_events.add('good_fixture teardown')
+
+ @pytest.fixture
+ async def bad_fixture():
+ async with trio.open_nursery() as nursery:
+ setup_events.add('bad_fixture setup')
+ yield None
+ teardown_events.add('bad_fixture teardown')
+ raise RuntimeError('Crash during fixture teardown')
+
+ def test_before():
+ assert not setup_events
+ assert not teardown_events
+
+ @pytest.mark.trio
+ async def test_actual_test(bad_fixture, good_fixture):
+ pass
+
+ def test_after():
+ assert setup_events == {
+ 'good_fixture setup',
+ 'bad_fixture setup',
+ }
+ assert teardown_events == {
+ 'bad_fixture teardown',
+ 'good_fixture teardown',
+ }
+ """
+ )
+
+ result = testdir.runpytest()
+
+ result.assert_outcomes(failed=1, passed=2)
+ result.stdout.re_match_lines(
+ [r"(E\W+| +\| )RuntimeError: Crash during fixture teardown"]
+ )
diff --git a/pytest_trio/_tests/test_basic.py b/pytest_trio/_tests/test_basic.py
index 6ae233f..f95538f 100644
--- a/pytest_trio/_tests/test_basic.py
+++ b/pytest_trio/_tests/test_basic.py
@@ -2,7 +2,6 @@
def test_async_test_is_executed(testdir):
-
testdir.makepyfile(
"""
import pytest
@@ -21,13 +20,12 @@ def test_check_async_test_called():
"""
)
- result = testdir.runpytest()
+ result = testdir.runpytest("-s")
result.assert_outcomes(passed=2)
def test_async_test_as_class_method(testdir):
-
testdir.makepyfile(
"""
import pytest
@@ -58,9 +56,8 @@ def test_check_async_test_called():
result.assert_outcomes(passed=2)
-@pytest.mark.xfail(reason='Raises pytest internal error so far...')
+@pytest.mark.xfail(reason="Raises pytest internal error so far...")
def test_sync_function_with_trio_mark(testdir):
-
testdir.makepyfile(
"""
import pytest
@@ -73,4 +70,52 @@ def test_invalid():
result = testdir.runpytest()
- result.assert_outcomes(error=1)
+ result.assert_outcomes(errors=1)
+
+
+def test_skip_and_xfail(testdir):
+ testdir.makepyfile(
+ """
+ import functools
+ import pytest
+ import trio
+
+ trio.run = functools.partial(trio.run, strict_exception_groups=True)
+
+ @pytest.mark.trio
+ async def test_xfail():
+ pytest.xfail()
+
+ @pytest.mark.trio
+ async def test_skip():
+ pytest.skip()
+
+ async def callback(fn):
+ fn()
+
+ async def fail():
+ raise RuntimeError
+
+ @pytest.mark.trio
+ async def test_xfail_and_fail():
+ async with trio.open_nursery() as nursery:
+ nursery.start_soon(callback, pytest.xfail)
+ nursery.start_soon(fail)
+
+ @pytest.mark.trio
+ async def test_skip_and_fail():
+ async with trio.open_nursery() as nursery:
+ nursery.start_soon(callback, pytest.skip)
+ nursery.start_soon(fail)
+
+ @pytest.mark.trio
+ async def test_xfail_and_skip():
+ async with trio.open_nursery() as nursery:
+ nursery.start_soon(callback, pytest.skip)
+ nursery.start_soon(callback, pytest.xfail)
+ """
+ )
+
+ result = testdir.runpytest("-s")
+
+ result.assert_outcomes(skipped=1, xfailed=1, failed=3)
diff --git a/pytest_trio/_tests/test_contextvars.py b/pytest_trio/_tests/test_contextvars.py
new file mode 100644
index 0000000..33bf147
--- /dev/null
+++ b/pytest_trio/_tests/test_contextvars.py
@@ -0,0 +1,37 @@
+import pytest
+from pytest_trio import trio_fixture
+
+import contextvars
+
+cv = contextvars.ContextVar("cv", default=None)
+
+
+@trio_fixture
+def cv_checker():
+ assert cv.get() is None
+ yield
+ assert cv.get() is None
+
+
+@trio_fixture
+def cv_setter(cv_checker):
+ assert cv.get() is None
+ token = cv.set("cv_setter")
+ yield
+ assert cv.get() == "cv_setter2"
+ cv.reset(token)
+ assert cv.get() is None
+
+
+@trio_fixture
+def cv_setter2(cv_setter):
+ assert cv.get() == "cv_setter"
+ # Intentionally leak, so can check that this is visible back in cv_setter
+ cv.set("cv_setter2")
+ yield
+ assert cv.get() == "cv_setter2"
+
+
+@pytest.mark.trio
+async def test_contextvars(cv_setter2):
+ assert cv.get() == "cv_setter2"
diff --git a/pytest_trio/_tests/test_fixture_mistakes.py b/pytest_trio/_tests/test_fixture_mistakes.py
new file mode 100644
index 0000000..7bd38ed
--- /dev/null
+++ b/pytest_trio/_tests/test_fixture_mistakes.py
@@ -0,0 +1,167 @@
+import pytest
+from pytest_trio import trio_fixture
+
+from .helpers import enable_trio_mode
+
+
+def test_trio_fixture_with_non_trio_test(testdir):
+ testdir.makepyfile(
+ """
+ import trio
+ from pytest_trio import trio_fixture
+ import pytest
+
+ @trio_fixture
+ def trio_time():
+ return trio.current_time()
+
+ @pytest.fixture
+ def indirect_trio_time(trio_time):
+ return trio_time + 1
+
+ @pytest.mark.trio
+ async def test_async(mock_clock, trio_time, indirect_trio_time):
+ assert trio_time == 0
+ assert indirect_trio_time == 1
+
+ def test_sync(trio_time):
+ pass
+
+ def test_sync_indirect(indirect_trio_time):
+ pass
+ """
+ )
+
+ result = testdir.runpytest()
+
+ result.assert_outcomes(passed=1, errors=2)
+ result.stdout.fnmatch_lines(["*: Trio fixtures can only be used by Trio tests*"])
+
+
+def test_trio_fixture_with_wrong_scope_without_trio_mode(testdir):
+ # There's a trick here: when you have a non-function-scope fixture, it's
+ # not instantiated for any particular function (obviously). So... when our
+ # pytest_fixture_setup hook tries to check for marks, it can't normally
+ # see @pytest.mark.trio. So... it's actually almost impossible to have an
+ # async fixture get treated as a Trio fixture *and* have it be
+ # non-function-scope. But, class-scoped fixtures can see marks on the
+ # class, so this is one way (the only way?) it can happen:
+ testdir.makepyfile(
+ """
+ import pytest
+
+ @pytest.fixture(scope="class")
+ async def async_class_fixture():
+ pass
+
+ @pytest.mark.trio
+ class TestFoo:
+ async def test_foo(self, async_class_fixture):
+ pass
+ """
+ )
+
+ result = testdir.runpytest()
+
+ result.assert_outcomes(errors=1)
+ result.stdout.fnmatch_lines(["*: Trio fixtures must be function-scope*"])
+
+
+@enable_trio_mode
+def test_trio_fixture_with_wrong_scope_in_trio_mode(testdir, enable_trio_mode):
+ enable_trio_mode(testdir)
+
+ testdir.makepyfile(
+ """
+ import pytest
+
+ @pytest.fixture(scope="session")
+ async def async_session_fixture():
+ pass
+
+
+ async def test_whatever(async_session_fixture):
+ pass
+ """
+ )
+
+ result = testdir.runpytest()
+
+ result.assert_outcomes(errors=1)
+ result.stdout.fnmatch_lines(["*: Trio fixtures must be function-scope*"])
+
+
+@enable_trio_mode
+def test_async_fixture_with_sync_test_in_trio_mode(testdir, enable_trio_mode):
+ enable_trio_mode(testdir)
+
+ testdir.makepyfile(
+ """
+ import pytest
+
+ @pytest.fixture
+ async def async_fixture():
+ pass
+
+
+ def test_whatever(async_fixture):
+ pass
+ """
+ )
+
+ result = testdir.runpytest()
+
+ result.assert_outcomes(errors=1)
+ result.stdout.fnmatch_lines(["*: Trio fixtures can only be used by Trio tests*"])
+
+
+@enable_trio_mode
+def test_fixture_cancels_test_but_doesnt_raise(testdir, enable_trio_mode):
+ enable_trio_mode(testdir)
+
+ testdir.makepyfile(
+ """
+ import pytest
+ import trio
+
+ @pytest.fixture
+ async def async_fixture():
+ with trio.CancelScope() as cscope:
+ cscope.cancel()
+ yield
+
+
+ async def test_whatever(async_fixture):
+ pass
+ """
+ )
+
+ result = testdir.runpytest()
+
+ result.assert_outcomes(failed=1)
+ result.stdout.fnmatch_lines(["*async_fixture*cancelled the test*"])
+
+
+@enable_trio_mode
+def test_too_many_clocks(testdir, enable_trio_mode):
+ enable_trio_mode(testdir)
+
+ testdir.makepyfile(
+ """
+ import pytest
+
+ @pytest.fixture
+ def extra_clock(mock_clock):
+ return mock_clock
+
+ async def test_whatever(mock_clock, extra_clock):
+ pass
+ """
+ )
+
+ result = testdir.runpytest()
+
+ result.assert_outcomes(failed=1)
+ result.stdout.fnmatch_lines(
+ ["*ValueError: Expected at most one Clock in kwargs, got *"]
+ )
diff --git a/pytest_trio/_tests/test_fixture_names.py b/pytest_trio/_tests/test_fixture_names.py
new file mode 100644
index 0000000..54c531c
--- /dev/null
+++ b/pytest_trio/_tests/test_fixture_names.py
@@ -0,0 +1,18 @@
+import pytest
+from pytest_trio import trio_fixture
+import trio
+
+
+@trio_fixture
+def fixture_with_unique_name(nursery):
+ nursery.start_soon(trio.sleep_forever)
+
+
+@pytest.mark.trio
+async def test_fixture_names(fixture_with_unique_name):
+ # This might be a bit fragile ... if we rearrange the nursery hierarchy
+ # somehow so it breaks, then we can make it more robust.
+ task = trio.lowlevel.current_task()
+ assert task.name == ""
+ sibling_names = {task.name for task in task.parent_nursery.child_tasks}
+ assert "" in sibling_names
diff --git a/pytest_trio/_tests/test_nursery_fixture.py b/pytest_trio/_tests/test_fixture_nursery.py
similarity index 88%
rename from pytest_trio/_tests/test_nursery_fixture.py
rename to pytest_trio/_tests/test_fixture_nursery.py
index 7719fa1..503ad5e 100644
--- a/pytest_trio/_tests/test_nursery_fixture.py
+++ b/pytest_trio/_tests/test_fixture_nursery.py
@@ -17,6 +17,6 @@ async def server(nursery):
@pytest.mark.trio
async def test_try(server):
stream = await trio.testing.open_stream_to_socket_listener(server)
- await stream.send_all(b'ping')
+ await stream.send_all(b"ping")
rep = await stream.receive_some(4)
- assert rep == b'ping'
+ assert rep == b"ping"
diff --git a/pytest_trio/_tests/test_fixture_ordering.py b/pytest_trio/_tests/test_fixture_ordering.py
new file mode 100644
index 0000000..a180958
--- /dev/null
+++ b/pytest_trio/_tests/test_fixture_ordering.py
@@ -0,0 +1,363 @@
+import pytest
+
+
+# Tests that:
+# - leaf_fix gets set up first and torn down last
+# - the two fix_concurrent_{1,2} fixtures run their setup/teardown code
+# at the same time -- their execution can be interleaved.
+def test_fixture_basic_ordering(testdir):
+ testdir.makepyfile(
+ """
+ import pytest
+ from pytest_trio import trio_fixture
+ from trio.testing import Sequencer
+
+ setup_events = []
+ teardown_events = []
+
+ @trio_fixture
+ def seq():
+ return Sequencer()
+
+ @pytest.fixture
+ async def leaf_fix():
+ setup_events.append("leaf_fix setup")
+ yield
+ teardown_events.append("leaf_fix teardown")
+
+ assert teardown_events == [
+ "fix_concurrent_1 teardown 1",
+ "fix_concurrent_2 teardown 1",
+ "fix_concurrent_1 teardown 2",
+ "fix_concurrent_2 teardown 2",
+ "leaf_fix teardown",
+ ]
+
+ @pytest.fixture
+ async def fix_concurrent_1(leaf_fix, seq):
+ async with seq(0):
+ setup_events.append("fix_concurrent_1 setup 1")
+ async with seq(2):
+ setup_events.append("fix_concurrent_1 setup 2")
+ yield
+ async with seq(4):
+ teardown_events.append("fix_concurrent_1 teardown 1")
+ async with seq(6):
+ teardown_events.append("fix_concurrent_1 teardown 2")
+
+ @pytest.fixture
+ async def fix_concurrent_2(leaf_fix, seq):
+ async with seq(1):
+ setup_events.append("fix_concurrent_2 setup 1")
+ async with seq(3):
+ setup_events.append("fix_concurrent_2 setup 2")
+ yield
+ async with seq(5):
+ teardown_events.append("fix_concurrent_2 teardown 1")
+ async with seq(7):
+ teardown_events.append("fix_concurrent_2 teardown 2")
+
+ @pytest.mark.trio
+ async def test_root(fix_concurrent_1, fix_concurrent_2):
+ assert setup_events == [
+ "leaf_fix setup",
+ "fix_concurrent_1 setup 1",
+ "fix_concurrent_2 setup 1",
+ "fix_concurrent_1 setup 2",
+ "fix_concurrent_2 setup 2",
+ ]
+ assert teardown_events == []
+
+ """
+ )
+
+ result = testdir.runpytest()
+ result.assert_outcomes(passed=1)
+
+
+def test_nursery_fixture_teardown_ordering(testdir):
+ testdir.makepyfile(
+ """
+ import pytest
+ from pytest_trio import trio_fixture
+ import trio
+ from trio.testing import wait_all_tasks_blocked
+
+ events = []
+
+ async def record_cancel(msg):
+ try:
+ await trio.sleep_forever()
+ finally:
+ events.append(msg)
+
+ @pytest.fixture
+ def fix0():
+ yield
+ assert events == [
+ "test",
+ "test cancel",
+ "fix2 teardown",
+ "fix2 cancel",
+ "fix1 teardown",
+ "fix1 cancel",
+ ]
+
+ @trio_fixture
+ def fix1(nursery):
+ nursery.start_soon(record_cancel, "fix1 cancel")
+ yield
+ events.append("fix1 teardown")
+
+ @trio_fixture
+ def fix2(fix1, nursery):
+ nursery.start_soon(record_cancel, "fix2 cancel")
+ yield
+ events.append("fix2 teardown")
+
+ @pytest.mark.trio
+ async def test_root(fix2, nursery):
+ nursery.start_soon(record_cancel, "test cancel")
+ await wait_all_tasks_blocked()
+ events.append("test")
+ """
+ )
+
+ result = testdir.runpytest()
+ result.assert_outcomes(passed=1)
+
+
+def test_error_collection(testdir):
+ # We want to make sure that pytest ultimately reports all the different
+ # exceptions. We call .upper() on all the exceptions so that we have
+ # tokens to look for in the output corresponding to each exception, where
+ # those tokens don't appear at all the source (so we can't get a false
+ # positive due to pytest printing out the source file).
+
+ # We sleep at the beginning of all the fixtures b/c currently if any
+ # fixture crashes, we skip setting up unrelated fixtures whose setup
+ # hasn't even started yet. Maybe we shouldn't? But for now the sleeps make
+ # sure that all the fixtures have started before any of them start
+ # crashing.
+ testdir.makepyfile(
+ """
+ import pytest
+ from pytest_trio import trio_fixture
+ import trio
+
+ test_started = False
+
+ @trio_fixture
+ async def crash_nongen():
+ with trio.CancelScope(shield=True):
+ await trio.sleep(2)
+ raise RuntimeError("crash_nongen".upper())
+
+ @trio_fixture
+ async def crash_early_agen():
+ with trio.CancelScope(shield=True):
+ await trio.sleep(2)
+ raise RuntimeError("crash_early_agen".upper())
+ yield
+
+ @trio_fixture
+ async def crash_late_agen():
+ yield
+ raise RuntimeError("crash_late_agen".upper())
+
+ async def crash(when, token):
+ with trio.CancelScope(shield=True):
+ await trio.sleep(when)
+ raise RuntimeError(token.upper())
+
+ @trio_fixture
+ def crash_background(nursery):
+ nursery.start_soon(crash, 1, "crash_background_early")
+ nursery.start_soon(crash, 3, "crash_background_late")
+
+ @pytest.mark.trio
+ async def test_all_the_crashes(
+ autojump_clock,
+ crash_nongen, crash_early_agen, crash_late_agen, crash_background,
+ ):
+ global test_started
+ test_started = True
+
+ def test_followup():
+ assert not test_started
+
+ """
+ )
+
+ result = testdir.runpytest()
+ result.assert_outcomes(passed=1, failed=1)
+ result.stdout.fnmatch_lines_random(
+ [
+ "*CRASH_NONGEN*",
+ "*CRASH_EARLY_AGEN*",
+ "*CRASH_LATE_AGEN*",
+ "*CRASH_BACKGROUND_EARLY*",
+ "*CRASH_BACKGROUND_LATE*",
+ ]
+ )
+
+
+@pytest.mark.parametrize("bgmode", ["nursery fixture", "manual nursery"])
+def test_background_crash_cancellation_propagation(bgmode, testdir):
+ crashyfix_using_nursery_fixture = """
+ @trio_fixture
+ def crashyfix(nursery):
+ nursery.start_soon(crashy)
+ with pytest.raises(trio.Cancelled):
+ yield
+ # We should be cancelled here
+ teardown_deadlines["crashyfix"] = trio.current_effective_deadline()
+ """
+
+ crashyfix_using_manual_nursery = """
+ @trio_fixture
+ async def crashyfix():
+ async with trio.open_nursery() as nursery:
+ nursery.start_soon(crashy)
+ with pytest.raises(trio.Cancelled):
+ yield
+ # We should be cancelled here
+ teardown_deadlines["crashyfix"] = trio.current_effective_deadline()
+ """
+
+ if bgmode == "nursery fixture":
+ crashyfix = crashyfix_using_nursery_fixture
+ else:
+ crashyfix = crashyfix_using_manual_nursery
+
+ testdir.makepyfile(
+ """
+ import pytest
+ from pytest_trio import trio_fixture
+ import trio
+
+ teardown_deadlines = {}
+ final_time = None
+
+ async def crashy():
+ await trio.sleep(1)
+ raise RuntimeError
+
+ CRASHYFIX_HERE
+
+ @trio_fixture
+ def sidefix():
+ yield
+ # We should NOT be cancelled here
+ teardown_deadlines["sidefix"] = trio.current_effective_deadline()
+
+ @trio_fixture
+ def userfix(crashyfix):
+ yield
+ # Currently we should NOT be cancelled here... though maybe this
+ # should change?
+ teardown_deadlines["userfix"] = trio.current_effective_deadline()
+
+ @pytest.mark.trio
+ async def test_it(userfix, sidefix, autojump_clock):
+ try:
+ await trio.sleep_forever()
+ finally:
+ global final_time
+ final_time = trio.current_time()
+
+
+ def test_post():
+ assert teardown_deadlines == {
+ "crashyfix": -float("inf"),
+ "sidefix": float("inf"),
+ "userfix": float("inf"),
+ }
+ assert final_time == 1
+ """.replace(
+ "CRASHYFIX_HERE", crashyfix
+ )
+ )
+
+ result = testdir.runpytest()
+ result.assert_outcomes(passed=1, failed=1)
+
+
+# See the thread starting at
+# https://github.com/python-trio/pytest-trio/pull/77#issuecomment-499979536
+# for details on the real case that this was minimized from
+def test_complex_cancel_interaction_regression(testdir):
+ testdir.makepyfile(
+ """
+ import pytest
+ import trio
+ from contextlib import asynccontextmanager
+
+ async def die_soon():
+ raise RuntimeError('oops'.upper())
+
+ @asynccontextmanager
+ async def async_finalizer():
+ try:
+ yield
+ finally:
+ await trio.sleep(0)
+
+ @pytest.fixture
+ async def fixture(nursery):
+ async with trio.open_nursery() as nursery1:
+ async with async_finalizer():
+ async with trio.open_nursery() as nursery2:
+ nursery2.start_soon(die_soon)
+ yield
+ nursery1.cancel_scope.cancel()
+
+ @pytest.mark.trio
+ async def test_try(fixture):
+ await trio.sleep_forever()
+ """
+ )
+
+ result = testdir.runpytest()
+ result.assert_outcomes(passed=0, failed=1)
+ result.stdout.fnmatch_lines_random(["*OOPS*"])
+
+
+# Makes sure that
+# See https://github.com/python-trio/pytest-trio/issues/120
+def test_fixtures_crash_and_hang_concurrently(testdir):
+ testdir.makepyfile(
+ """
+ import trio
+ import pytest
+
+
+ @pytest.fixture
+ async def hanging_fixture():
+ print("hanging_fixture:start")
+ await trio.Event().wait()
+ yield
+ print("hanging_fixture:end")
+
+
+ @pytest.fixture
+ async def exploding_fixture():
+ print("exploding_fixture:start")
+ raise Exception
+ yield
+ print("exploding_fixture:end")
+
+
+ @pytest.mark.trio
+ async def test_fails_right_away(exploding_fixture):
+ ...
+
+
+ @pytest.mark.trio
+ async def test_fails_needs_some_scopes(exploding_fixture, hanging_fixture):
+ ...
+ """
+ )
+
+ result = testdir.runpytest()
+ result.assert_outcomes(passed=0, failed=2)
diff --git a/pytest_trio/_tests/test_hypothesis_interaction.py b/pytest_trio/_tests/test_hypothesis_interaction.py
new file mode 100644
index 0000000..cb95a96
--- /dev/null
+++ b/pytest_trio/_tests/test_hypothesis_interaction.py
@@ -0,0 +1,69 @@
+import pytest
+import trio
+from hypothesis import given, settings, strategies as st
+
+from pytest_trio.plugin import _trio_test_runner_factory
+
+# deadline=None avoids unpredictable warnings/errors when CI happens to be
+# slow (example: https://travis-ci.org/python-trio/pytest-trio/jobs/406738296)
+# max_examples=5 speeds things up a bit
+our_settings = settings(deadline=None, max_examples=5)
+
+
+@our_settings
+@given(st.integers())
+@pytest.mark.trio
+async def test_mark_inner(n):
+ assert isinstance(n, int)
+
+
+@our_settings
+@pytest.mark.trio
+@given(st.integers())
+async def test_mark_outer(n):
+ assert isinstance(n, int)
+
+
+@our_settings
+@pytest.mark.parametrize("y", [1, 2])
+@given(x=st.none())
+@pytest.mark.trio
+async def test_mark_and_parametrize(x, y):
+ assert x is None
+ assert y in (1, 2)
+
+
+async def scheduler_trace():
+ """Returns a scheduler-dependent value we can use to check determinism."""
+ trace = []
+
+ async def tracer(name):
+ for i in range(10):
+ trace.append((name, i))
+ await trio.sleep(0)
+
+ async with trio.open_nursery() as nursery:
+ for i in range(5):
+ nursery.start_soon(tracer, i)
+
+ return tuple(trace)
+
+
+def test_the_trio_scheduler_is_deterministic_under_hypothesis():
+ traces = []
+
+ @our_settings
+ @given(st.integers())
+ @pytest.mark.trio
+ async def inner(_):
+ traces.append(await scheduler_trace())
+
+ # The pytest.mark.trio doesn't do it's magic thing to
+ # inner functions, so we invoke it explicitly here.
+ inner.hypothesis.inner_test = _trio_test_runner_factory(
+ None, inner.hypothesis.inner_test
+ )
+ inner() # Tada, now it's a sync function!
+
+ assert len(traces) >= 5
+ assert len(set(traces)) == 1
diff --git a/pytest_trio/_tests/test_sync_fixture.py b/pytest_trio/_tests/test_sync_fixture.py
index 961df23..c6060a4 100644
--- a/pytest_trio/_tests/test_sync_fixture.py
+++ b/pytest_trio/_tests/test_sync_fixture.py
@@ -3,16 +3,15 @@
@pytest.fixture
def sync_fix():
- return 'sync_fix'
+ return "sync_fix"
@pytest.mark.trio
async def test_single_sync_fixture(sync_fix):
- assert sync_fix == 'sync_fix'
+ assert sync_fix == "sync_fix"
def test_single_yield_fixture(testdir):
-
testdir.makepyfile(
"""
import pytest
@@ -47,7 +46,6 @@ def test_after():
def test_single_yield_fixture_with_async_deps(testdir):
-
testdir.makepyfile(
"""
import pytest
@@ -87,3 +85,57 @@ def test_after():
result = testdir.runpytest()
result.assert_outcomes(passed=3)
+
+
+def test_sync_yield_fixture_crashed_teardown_allow_other_teardowns(testdir):
+ testdir.makepyfile(
+ """
+ import pytest
+ import trio
+
+ setup_events = set()
+ teardown_events = set()
+
+ @pytest.fixture
+ async def force_async_fixture():
+ pass
+
+ @pytest.fixture
+ def good_fixture(force_async_fixture):
+ setup_events.add('good_fixture setup')
+ yield
+ teardown_events.add('good_fixture teardown')
+
+ @pytest.fixture
+ def bad_fixture(force_async_fixture):
+ setup_events.add('bad_fixture setup')
+ yield
+ teardown_events.add('bad_fixture teardown')
+ raise RuntimeError('Crash during fixture teardown')
+
+ def test_before():
+ assert not setup_events
+ assert not teardown_events
+
+ @pytest.mark.trio
+ async def test_actual_test(bad_fixture, good_fixture):
+ pass
+
+ def test_after():
+ assert setup_events == {
+ 'good_fixture setup',
+ 'bad_fixture setup',
+ }
+ assert teardown_events == {
+ 'bad_fixture teardown',
+ 'good_fixture teardown',
+ }
+ """
+ )
+
+ result = testdir.runpytest()
+
+ result.assert_outcomes(failed=1, passed=2)
+ result.stdout.re_match_lines(
+ [r"(E\W+| +\| )RuntimeError: Crash during fixture teardown"]
+ )
diff --git a/pytest_trio/_tests/test_trio_mode.py b/pytest_trio/_tests/test_trio_mode.py
new file mode 100644
index 0000000..19efdc1
--- /dev/null
+++ b/pytest_trio/_tests/test_trio_mode.py
@@ -0,0 +1,177 @@
+import pytest
+
+from .helpers import enable_trio_mode
+
+test_text = """
+import pytest
+import trio
+from hypothesis import given, settings, strategies
+
+async def test_pass():
+ await trio.sleep(0)
+
+async def test_fail():
+ await trio.sleep(0)
+ assert False
+
+@settings(deadline=None, max_examples=5)
+@given(strategies.binary())
+async def test_hypothesis_pass(b):
+ await trio.sleep(0)
+ assert isinstance(b, bytes)
+
+@settings(deadline=None, max_examples=5)
+@given(strategies.binary())
+async def test_hypothesis_fail(b):
+ await trio.sleep(0)
+ assert isinstance(b, int)
+"""
+
+
+@enable_trio_mode
+def test_trio_mode(testdir, enable_trio_mode):
+ enable_trio_mode(testdir)
+
+ testdir.makepyfile(test_text)
+
+ result = testdir.runpytest()
+ result.assert_outcomes(passed=2, failed=2)
+
+
+# This is faking qtrio due to real qtrio's dependence on either
+# PyQt5 or PySide2. They are both large and require special
+# handling in CI. The testing here is able to focus on the
+# pytest-trio features with just this minimal substitute.
+qtrio_text = """
+import trio
+
+fake_used = False
+
+def run(*args, **kwargs):
+ global fake_used
+ fake_used = True
+
+ return trio.run(*args, **kwargs)
+"""
+
+
+def test_trio_mode_and_qtrio_run_configuration(testdir):
+ testdir.makefile(".ini", pytest="[pytest]\ntrio_mode = true\ntrio_run = qtrio\n")
+
+ testdir.makepyfile(qtrio=qtrio_text)
+
+ test_text = """
+ import qtrio
+ import trio
+
+ async def test_fake_qtrio_used():
+ await trio.sleep(0)
+ assert qtrio.fake_used
+ """
+ testdir.makepyfile(test_text)
+
+ result = testdir.runpytest()
+ result.assert_outcomes(passed=1)
+
+
+def test_trio_mode_and_qtrio_marker(testdir):
+ testdir.makefile(".ini", pytest="[pytest]\ntrio_mode = true\n")
+
+ testdir.makepyfile(qtrio=qtrio_text)
+
+ test_text = """
+ import pytest
+ import qtrio
+ import trio
+
+ @pytest.mark.trio(run=qtrio.run)
+ async def test_fake_qtrio_used():
+ await trio.sleep(0)
+ assert qtrio.fake_used
+ """
+ testdir.makepyfile(test_text)
+
+ result = testdir.runpytest()
+ result.assert_outcomes(passed=1)
+
+
+def test_qtrio_just_run_configuration(testdir):
+ testdir.makefile(".ini", pytest="[pytest]\ntrio_run = qtrio\n")
+
+ testdir.makepyfile(qtrio=qtrio_text)
+
+ test_text = """
+ import pytest
+ import qtrio
+ import trio
+
+ @pytest.mark.trio
+ async def test_fake_qtrio_used():
+ await trio.sleep(0)
+ assert qtrio.fake_used
+ """
+ testdir.makepyfile(test_text)
+
+ result = testdir.runpytest()
+ result.assert_outcomes(passed=1)
+
+
+def test_invalid_trio_run_fails(testdir):
+ run_name = "invalid_trio_run"
+
+ testdir.makefile(
+ ".ini", pytest=f"[pytest]\ntrio_mode = true\ntrio_run = {run_name}\n"
+ )
+
+ test_text = """
+ async def test():
+ pass
+ """
+ testdir.makepyfile(test_text)
+
+ result = testdir.runpytest()
+ result.assert_outcomes()
+ result.stdout.fnmatch_lines(
+ [
+ f"*ValueError: {run_name!r} not valid for 'trio_run' config. Must be one of: *"
+ ]
+ )
+
+
+def test_closest_explicit_run_wins(testdir):
+ testdir.makefile(".ini", pytest=f"[pytest]\ntrio_mode = true\ntrio_run = trio\n")
+ testdir.makepyfile(qtrio=qtrio_text)
+
+ test_text = """
+ import pytest
+ import pytest_trio
+ import qtrio
+
+ @pytest.mark.trio(run='should be ignored')
+ @pytest.mark.trio(run=qtrio.run)
+ async def test():
+ assert qtrio.fake_used
+ """
+ testdir.makepyfile(test_text)
+
+ result = testdir.runpytest()
+ result.assert_outcomes(passed=1)
+
+
+def test_ini_run_wins_with_blank_marker(testdir):
+ testdir.makefile(".ini", pytest=f"[pytest]\ntrio_mode = true\ntrio_run = qtrio\n")
+ testdir.makepyfile(qtrio=qtrio_text)
+
+ test_text = """
+ import pytest
+ import pytest_trio
+ import qtrio
+
+ @pytest.mark.trio
+ async def test():
+ assert qtrio.fake_used
+ """
+ testdir.makepyfile(test_text)
+
+ result = testdir.runpytest()
+ result.assert_outcomes(passed=1)
diff --git a/pytest_trio/_version.py b/pytest_trio/_version.py
index f879b44..2333e2d 100644
--- a/pytest_trio/_version.py
+++ b/pytest_trio/_version.py
@@ -1,3 +1,3 @@
-# This file is imported from __init__.py and exec'd from setup.py
+# This file is imported from __init__.py and parsed by setuptools
-__version__ = "0.3.0"
+__version__ = "0.8.0+dev"
diff --git a/pytest_trio/enable_trio_mode.py b/pytest_trio/enable_trio_mode.py
new file mode 100644
index 0000000..0c13614
--- /dev/null
+++ b/pytest_trio/enable_trio_mode.py
@@ -0,0 +1,11 @@
+__all__ = ["pytest_collection_modifyitems", "pytest_fixture_setup"]
+
+from .plugin import automark, handle_fixture
+
+
+def pytest_collection_modifyitems(items):
+ automark(items)
+
+
+def pytest_fixture_setup(fixturedef, request):
+ return handle_fixture(fixturedef, request, force_trio_mode=True)
diff --git a/pytest_trio/plugin.py b/pytest_trio/plugin.py
index 53d0bf3..ebfcbee 100644
--- a/pytest_trio/plugin.py
+++ b/pytest_trio/plugin.py
@@ -1,231 +1,541 @@
"""pytest-trio implementation."""
-from traceback import format_exception
-from inspect import iscoroutinefunction, isgeneratorfunction
-try:
- from async_generator import isasyncgenfunction
-except ImportError:
- from inspect import isasyncgenfunction
+import sys
+from functools import wraps, partial
+from collections.abc import Coroutine, Generator
+from contextlib import asynccontextmanager
+from inspect import isasyncgen, isasyncgenfunction, iscoroutinefunction
+import contextvars
+import outcome
import pytest
import trio
-from trio.testing import MockClock, trio_test
-from async_generator import async_generator, yield_, asynccontextmanager
+from trio.abc import Clock, Instrument
+from trio.testing import MockClock
+from _pytest.outcomes import Skipped, XFailed
+
+if sys.version_info[:2] < (3, 11):
+ from exceptiongroup import BaseExceptionGroup
+
+################################################################
+# Basic setup
+################################################################
+
+
+def pytest_addoption(parser):
+ parser.addini(
+ "trio_mode",
+ "should pytest-trio handle all async functions?",
+ type="bool",
+ default=False,
+ )
+ parser.addini(
+ "trio_run",
+ "what runner should pytest-trio use? [trio, qtrio]",
+ default="trio",
+ )
def pytest_configure(config):
- """Inject documentation."""
+ # So that it shows up in 'pytest --markers' output:
config.addinivalue_line(
- "markers", "trio: "
- "mark the test as an async trio test; "
- "it will be run using trio.run"
+ "markers",
+ "trio: mark the test as an async trio test; it will be run using trio.run",
)
-def _trio_test_runner_factory(item):
- testfunc = item.function
+################################################################
+# Core support for trio fixtures and trio tests
+################################################################
+
+# This is more complicated than you might expect.
+
+# The first complication is that all of pytest's machinery for setting up,
+# running a test, and then tearing it down again is synchronous. But we want
+# to have async setup, async tests, and async teardown.
+#
+# Our trick: from pytest's point of view, trio fixtures return an unevaluated
+# placeholder value, a TrioFixture object. This contains all the information
+# needed to do the actual setup/teardown, but doesn't actually perform these
+# operations.
+#
+# Then, pytest runs what it thinks of as "the test", we enter trio, and use
+# our own logic to setup the trio fixtures, run the actual test, and then tear
+# down the trio fixtures. This works pretty well, though it has some
+# limitations:
+# - trio fixtures have to be test-scoped
+# - normally pytest considers a fixture crash to be an ERROR, but when a trio
+# fixture crashes, it gets classified as a FAIL.
+
+# The other major complication is that we really want to allow trio fixtures
+# to yield inside a nursery. (See gh-55 for more discussion.) And then while
+# the fixture function is suspended, a task inside that nursery might crash.
+#
+# Why is this a problem? Two reasons. First, a technical one: Trio's cancel
+# scope machinery assumes that it can inject a Cancelled exception into any
+# code inside the cancel scope, and that exception will eventually make its
+# way back to the 'with' block.
+#
+# A fixture that yields inside a nursery violates this rule: the cancel scope
+# remains "active" from when the fixture yields until when it's reentered, but
+# if a Cancelled exception is raised during this time, then it *won't* go into
+# the fixture. (And we can't throw it in there either, because that's just not
+# how pytest fixtures work. Whoops.)
+#
+# And second, our setup/test/teardown process needs to account for the
+# possibility that any fixture's background task might crash at any moment,
+# and do something sensible with it.
+#
+# You should think of fixtures as a dependency graph: each fixtures *uses*
+# zero or more other fixtures, and is *used by* zero or more other fixtures.
+# A fixture should be setup before any of its dependees are setup, and torn
+# down once all of its dependees have terminated.
+# At the root of this dependency graph, we have the test itself,
+# which is just like a fixture except that instead of having a separate setup
+# and teardown phase, it runs straight through.
+#
+# To implement this, we isolate each fixture into its own task: this makes
+# sure that crashes in one can't trigger implicit cancellation in another.
+# Then we use trio.Event objects to implement the ordering described above.
+#
+# If a fixture crashes, whether during setup, teardown, or in a background
+# task at any other point, then we mark the whole test run as "crashed". When
+# a run is "crashed", two things happen: (1) if any fixtures or the test
+# itself haven't started yet, then we don't start them, and treat them as if
+# they've already exited. (2) if the test is running, we cancel it. That's
+# all. In particular, if a fixture has a background crash, we don't propagate
+# that to any other fixtures, we still follow the normal teardown sequence,
+# and so on – but since the test is cancelled, the teardown sequence should
+# start immediately.
+
+canary = contextvars.ContextVar("pytest-trio canary")
+
+
+class TrioTestContext:
+ def __init__(self):
+ self.crashed = False
+ # This holds cancel scopes for whatever setup steps are currently
+ # running -- initially it's the fixtures that are in the middle of
+ # evaluating themselves, and then once fixtures are set up it's the
+ # test itself. Basically, at any given moment, it's the stuff we need
+ # to cancel if we want to start tearing down our fixture DAG.
+ self.active_cancel_scopes = set()
+ self.fixtures_with_errors = set()
+ self.fixtures_with_cancel = set()
+ self.error_list = []
+
+ def crash(self, fixture, exc):
+ if exc is None:
+ self.fixtures_with_cancel.add(fixture)
+ else:
+ self.error_list.append(exc)
+ self.fixtures_with_errors.add(fixture)
+ self.crashed = True
+ for cscope in self.active_cancel_scopes:
+ cscope.cancel()
+
- @trio_test
- async def _bootstrap_fixture_and_run_test(**kwargs):
+class TrioFixture:
+ """
+ Represent a fixture that need to be run in a trio context to be resolved.
+
+ The name is actually a misnomer, because we use it to represent the actual
+ test itself as well, since the test is basically just a fixture with no
+ dependents and no teardown.
+ """
+
+ def __init__(self, name, func, pytest_kwargs, is_test=False):
+ self.name = name
+ self._func = func
+ self._pytest_kwargs = pytest_kwargs
+ self._is_test = is_test
+ self._teardown_done = trio.Event()
+
+ # These attrs are all accessed from other objects:
+ # Downstream users read this value.
+ self.fixture_value = None
+ # This event notifies downstream users that we're done setting up.
+ # Invariant: if this is set, then either fixture_value is usable *or*
+ # test_ctx.crashed is True.
+ self.setup_done = trio.Event()
+ # Downstream users *modify* this value, by adding their _teardown_done
+ # events to it, so we know who we need to wait for before tearing
+ # down.
+ self.user_done_events = set()
+
+ def register_and_collect_dependencies(self):
+ # Returns the set of all TrioFixtures that this fixture depends on,
+ # directly or indirectly, and sets up all their user_done_events.
+ deps = set()
+ deps.add(self)
+ for value in self._pytest_kwargs.values():
+ if isinstance(value, TrioFixture):
+ value.user_done_events.add(self._teardown_done)
+ deps.update(value.register_and_collect_dependencies())
+ return deps
+
+ @asynccontextmanager
+ async def _fixture_manager(self, test_ctx):
__tracebackhide__ = True
- user_exc = None
- # Open the nursery exposed as fixture
- async with trio.open_nursery() as nursery:
- item._trio_nursery = nursery
+ try:
+ async with trio.open_nursery() as nursery_fixture:
+ try:
+ yield nursery_fixture
+ finally:
+ nursery_fixture.cancel_scope.cancel()
+ except BaseException as exc:
+ test_ctx.crash(self, exc)
+ finally:
+ self.setup_done.set()
+ self._teardown_done.set()
+
+ async def run(self, test_ctx, contextvars_ctx):
+ __tracebackhide__ = True
+
+ # This is a gross hack. I guess Trio should provide a context=
+ # argument to start_soon/start?
+ task = trio.lowlevel.current_task()
+ assert canary not in task.context
+ task.context = contextvars_ctx
+ # Force a yield so we pick up the new context
+ await trio.sleep(0)
+ # Check that it worked, since technically trio doesn't *guarantee*
+ # that sleep(0) will actually yield.
+ assert canary.get() == "in correct context"
+
+ # This 'with' block handles the nursery fixture lifetime, the
+ # teardone_done event, and crashing the context if there's an
+ # unhandled exception.
+ async with self._fixture_manager(test_ctx) as nursery_fixture:
+ # Resolve our kwargs
+ resolved_kwargs = {}
+ for name, value in self._pytest_kwargs.items():
+ if isinstance(value, TrioFixture):
+ await value.setup_done.wait()
+ if value.fixture_value is NURSERY_FIXTURE_PLACEHOLDER:
+ resolved_kwargs[name] = nursery_fixture
+ else:
+ resolved_kwargs[name] = value.fixture_value
+ else:
+ resolved_kwargs[name] = value
+
+ # If something's already crashed before we're ready to start, then
+ # there's no point in even setting up.
+ if test_ctx.crashed:
+ return
+
+ # Run actual fixture setup step
+ # If another fixture crashes while we're in the middle of setting
+ # up, we want to be cancelled immediately, so we'll save an
+ # encompassing cancel scope where self._crash can find it.
+ test_ctx.active_cancel_scopes.add(nursery_fixture.cancel_scope)
+ if self._is_test:
+ # Tests are exactly like fixtures, except that they to be
+ # regular async functions.
+ assert not self.user_done_events
+ func_value = None
+ assert not test_ctx.crashed
+ await self._func(**resolved_kwargs)
+ else:
+ func_value = self._func(**resolved_kwargs)
+ if isinstance(func_value, Coroutine):
+ self.fixture_value = await func_value
+ elif isasyncgen(func_value):
+ self.fixture_value = await func_value.asend(None)
+ elif isinstance(func_value, Generator):
+ self.fixture_value = func_value.send(None)
+ else:
+ # Regular synchronous function
+ self.fixture_value = func_value
+ # Now that we're done setting up, we don't want crashes to cancel
+ # us immediately; instead we want them to cancel our downstream
+ # dependents, and then eventually let us clean up normally. So
+ # remove this from the set of cancel scopes affected by self._crash.
+ test_ctx.active_cancel_scopes.remove(nursery_fixture.cancel_scope)
+
+ # self.fixture_value is ready, so notify users that they can
+ # continue. (Or, maybe we crashed and were cancelled, in which
+ # case our users will check test_ctx.crashed and immediately exit,
+ # which is fine too.)
+ self.setup_done.set()
+
+ # Wait for users to be finished
+ #
+ # At this point we're in a very strange state: if the fixture
+ # yielded inside a nursery or cancel scope, then we are still
+ # "inside" that scope even though its with block is not on the
+ # stack. In particular this means that if they get cancelled, then
+ # our waiting might get a Cancelled error, that we cannot really
+ # deal with – it should get thrown back into the fixture
+ # generator, but pytest fixture generators don't work that way:
+ # https://github.com/python-trio/pytest-trio/issues/55
+ # And besides, we can't start tearing down until all our users
+ # have finished.
+ #
+ # So if we get an exception here, we crash the context (which
+ # cancels the test and starts the cleanup process), save any
+ # exception that *isn't* Cancelled (because if its Cancelled then
+ # we can't route it to the right place, and anyway the teardown
+ # code will get it again if it matters), and then use a shield to
+ # keep waiting for the teardown to finish without having to worry
+ # about cancellation.
+ yield_outcome = outcome.Value(None)
try:
- async with _setup_async_fixtures_in(kwargs) as resolved_kwargs:
- try:
- await testfunc(**resolved_kwargs)
- except BaseException as exc:
- # Regular pytest fixture don't have access to the test
- # exception in there teardown, we mimic this behavior here.
- user_exc = exc
+ for event in self.user_done_events:
+ await event.wait()
except BaseException as exc:
- # If we are here, the exception comes from the fixtures setup
- # or teardown
- if user_exc:
- raise exc from user_exc
+ assert isinstance(exc, trio.Cancelled)
+ yield_outcome = outcome.Error(exc)
+ test_ctx.crash(self, None)
+ with trio.CancelScope(shield=True):
+ for event in self.user_done_events:
+ await event.wait()
+
+ # Do our teardown
+ if isasyncgen(func_value):
+ try:
+ await yield_outcome.asend(func_value)
+ except StopAsyncIteration:
+ pass
else:
- raise exc
- finally:
- # No matter what the nursery fixture should be closed when test is over
- nursery.cancel_scope.cancel()
+ raise RuntimeError("too many yields in fixture")
+ elif isinstance(func_value, Generator):
+ try:
+ yield_outcome.send(func_value)
+ except StopIteration:
+ pass
+ else:
+ raise RuntimeError("too many yields in fixture")
- # Finally re-raise or original exception coming from the test if needed
- if user_exc:
- raise user_exc
- return _bootstrap_fixture_and_run_test
+def _trio_test(run):
+ """Use:
+ @trio_test
+ async def test_whatever():
+ await ...
+ Also: if a pytest fixture is passed in that subclasses the ``Clock`` abc, then
+ that clock is passed to ``trio.run()``.
+ """
-@asynccontextmanager
-@async_generator
-async def _setup_async_fixtures_in(deps):
- __tracebackhide__ = True
+ def decorator(fn):
+ @wraps(fn)
+ def wrapper(**kwargs):
+ __tracebackhide__ = True
+ clocks = {k: c for k, c in kwargs.items() if isinstance(c, Clock)}
+ if not clocks:
+ clock = None
+ elif len(clocks) == 1:
+ clock = list(clocks.values())[0]
+ else:
+ raise ValueError(
+ f"Expected at most one Clock in kwargs, got {clocks!r}"
+ )
+ instruments = [i for i in kwargs.values() if isinstance(i, Instrument)]
+ try:
+ return run(partial(fn, **kwargs), clock=clock, instruments=instruments)
+ except BaseExceptionGroup as eg:
+ queue = [eg]
+ leaves = []
+ while queue:
+ ex = queue.pop()
+ if isinstance(ex, BaseExceptionGroup):
+ queue.extend(ex.exceptions)
+ else:
+ leaves.append(ex)
+ if len(leaves) == 1:
+ if isinstance(leaves[0], XFailed):
+ pytest.xfail()
+ if isinstance(leaves[0], Skipped):
+ pytest.skip()
+ # Since our leaf exceptions don't consist of exactly one 'magic'
+ # skipped or xfailed exception, re-raise the whole group.
+ raise
+
+ return wrapper
+
+ return decorator
+
+
+def _trio_test_runner_factory(item, testfunc=None):
+ if testfunc:
+ run = trio.run
+ else:
+ testfunc = item.obj
+
+ for marker in item.iter_markers("trio"):
+ maybe_run = marker.kwargs.get("run")
+ if maybe_run is not None:
+ run = maybe_run
+ break
+ else:
+ # no marker found that explicitly specifiers the runner so use config
+ run = choose_run(config=item.config)
- need_resolved_deps_stack = [
- (k, v) for k, v in deps.items() if isinstance(v, BaseAsyncFixture)
- ]
+ if getattr(testfunc, "_trio_test_runner_wrapped", False):
+ # We have already wrapped this, perhaps because we combined Hypothesis
+ # with pytest.mark.parametrize
+ return testfunc
- if not need_resolved_deps_stack:
- await yield_(deps)
- return
+ if not iscoroutinefunction(testfunc):
+ pytest.fail("test function `%r` is marked trio but is not async" % item)
- @asynccontextmanager
- @async_generator
- async def _recursive_setup(deps_stack):
+ @_trio_test(run=run)
+ async def _bootstrap_fixtures_and_run_test(**kwargs):
__tracebackhide__ = True
- name, dep = deps_stack.pop()
- async with dep.setup() as resolved:
- if not deps_stack:
- await yield_([(name, resolved)])
- else:
- async with _recursive_setup(
- deps_stack
- ) as remains_deps_stack_resolved:
- await yield_(
- remains_deps_stack_resolved + [(name, resolved)]
- )
-
- async with _recursive_setup(
- need_resolved_deps_stack
- ) as resolved_deps_stack:
- await yield_({**deps, **dict(resolved_deps_stack)})
+ test_ctx = TrioTestContext()
+ test = TrioFixture(
+ "".format(testfunc.__name__), testfunc, kwargs, is_test=True
+ )
-class BaseAsyncFixture:
- """
- Represent a fixture that need to be run in a trio context to be resolved.
- """
+ contextvars_ctx = contextvars.copy_context()
+ contextvars_ctx.run(canary.set, "in correct context")
- def __init__(self, fixturedef, deps={}):
- self.fixturedef = fixturedef
- self.deps = deps
- self.setup_done = False
- self.result = None
-
- @asynccontextmanager
- @async_generator
- async def setup(self):
- __tracebackhide__ = True
- if self.setup_done:
- await yield_(self.result)
- else:
- async with _setup_async_fixtures_in(self.deps) as resolved_deps:
- async with self._setup(resolved_deps) as self.result:
- self.setup_done = True
- await yield_(self.result)
+ async with trio.open_nursery() as nursery:
+ for fixture in test.register_and_collect_dependencies():
+ nursery.start_soon(
+ fixture.run, test_ctx, contextvars_ctx, name=fixture.name
+ )
+
+ silent_cancellers = (
+ test_ctx.fixtures_with_cancel - test_ctx.fixtures_with_errors
+ )
+ if silent_cancellers:
+ for fixture in silent_cancellers:
+ test_ctx.error_list.append(
+ RuntimeError(
+ "{} cancelled the test but didn't "
+ "raise an error".format(fixture.name)
+ )
+ )
- async def _setup(self):
- raise NotImplementedError()
+ if len(test_ctx.error_list) == 1:
+ raise test_ctx.error_list[0]
+ elif test_ctx.error_list:
+ raise BaseExceptionGroup(
+ "errors in async test and trio fixtures", test_ctx.error_list
+ )
+ _bootstrap_fixtures_and_run_test._trio_test_runner_wrapped = True
+ return _bootstrap_fixtures_and_run_test
-class AsyncYieldFixture(BaseAsyncFixture):
- """
- Async generator fixture.
- """
- @asynccontextmanager
- @async_generator
- async def _setup(self, resolved_deps):
- __tracebackhide__ = True
- agen = self.fixturedef.func(**resolved_deps)
+################################################################
+# Hooking up the test/fixture machinery to pytest
+################################################################
- await yield_(await agen.asend(None))
- try:
- await agen.asend(None)
- except StopAsyncIteration:
- pass
+@pytest.hookimpl(hookwrapper=True)
+def pytest_runtest_call(item):
+ if item.get_closest_marker("trio") is not None:
+ if hasattr(item.obj, "hypothesis"):
+ # If it's a Hypothesis test, we go in a layer.
+ item.obj.hypothesis.inner_test = _trio_test_runner_factory(
+ item, item.obj.hypothesis.inner_test
+ )
+ elif getattr(item.obj, "is_hypothesis_test", False): # pragma: no cover
+ pytest.fail(
+ "test function `%r` is using Hypothesis, but pytest-trio "
+ "only works with Hypothesis 3.64.0 or later." % item
+ )
else:
- raise RuntimeError('Only one yield in fixture is allowed')
+ item.obj = _trio_test_runner_factory(item)
+ yield
-class SyncFixtureWithAsyncDeps(BaseAsyncFixture):
- """
- Synchronous function fixture with asynchronous dependencies fixtures.
- """
- @asynccontextmanager
- @async_generator
- async def _setup(self, resolved_deps):
- __tracebackhide__ = True
- await yield_(self.fixturedef.func(**resolved_deps))
+# It's intentionally impossible to use this to create a non-function-scoped
+# fixture (since that would require exposing a way to pass scope= to
+# pytest.fixture).
+def trio_fixture(func):
+ func._force_trio_fixture = True
+ return pytest.fixture(func)
+
+
+def _is_trio_fixture(func, coerce_async, kwargs):
+ if getattr(func, "_force_trio_fixture", False):
+ return True
+ if coerce_async and (iscoroutinefunction(func) or isasyncgenfunction(func)):
+ return True
+ if any(isinstance(value, TrioFixture) for value in kwargs.values()):
+ return True
+ return False
+
+
+def handle_fixture(fixturedef, request, force_trio_mode):
+ is_trio_test = request.node.get_closest_marker("trio") is not None
+ if force_trio_mode:
+ is_trio_mode = True
+ else:
+ is_trio_mode = request.node.config.getini("trio_mode")
+ coerce_async = is_trio_test or is_trio_mode
+ kwargs = {name: request.getfixturevalue(name) for name in fixturedef.argnames}
+ if _is_trio_fixture(fixturedef.func, coerce_async, kwargs):
+ if request.scope != "function":
+ raise RuntimeError("Trio fixtures must be function-scope")
+ if not is_trio_test:
+ raise RuntimeError("Trio fixtures can only be used by Trio tests")
+ fixture = TrioFixture(
+ "".format(fixturedef.argname),
+ fixturedef.func,
+ kwargs,
+ )
+ fixturedef.cached_result = (fixture, request.param_index, None)
+ return fixture
-class SyncYieldFixtureWithAsyncDeps(BaseAsyncFixture):
- """
- Synchronous generator fixture with asynchronous dependencies fixtures.
- """
+def pytest_fixture_setup(fixturedef, request):
+ return handle_fixture(fixturedef, request, force_trio_mode=False)
- @asynccontextmanager
- @async_generator
- async def _setup(self, resolved_deps):
- __tracebackhide__ = True
- gen = self.fixturedef.func(**resolved_deps)
- await yield_(gen.send(None))
+################################################################
+# Trio mode
+################################################################
- try:
- gen.send(None)
- except StopIteration:
- pass
+
+def automark(items, run=trio.run):
+ for item in items:
+ if not hasattr(item, "obj"):
+ # Rare and a little strange, but happens with some doctest-like plugins
+ continue
+ if hasattr(item.obj, "hypothesis"):
+ test_func = item.obj.hypothesis.inner_test
else:
- raise RuntimeError('Only one yield in fixture is allowed')
+ test_func = item.obj
+ if iscoroutinefunction(test_func):
+ item.add_marker(pytest.mark.trio(run=run))
-class AsyncFixture(BaseAsyncFixture):
- """
- Regular async fixture (i.e. coroutine).
- """
+def choose_run(config):
+ run_string = config.getini("trio_run")
- @asynccontextmanager
- @async_generator
- async def _setup(self, resolved_deps):
- __tracebackhide__ = True
- await yield_(await self.fixturedef.func(**resolved_deps))
-
-
-def _install_async_fixture_if_needed(fixturedef, request):
- asyncfix = None
- deps = {dep: request.getfixturevalue(dep) for dep in fixturedef.argnames}
- if iscoroutinefunction(fixturedef.func):
- asyncfix = AsyncFixture(fixturedef, deps)
- elif isasyncgenfunction(fixturedef.func):
- asyncfix = AsyncYieldFixture(fixturedef, deps)
- elif any(dep for dep in deps.values()
- if isinstance(dep, BaseAsyncFixture)):
- if isgeneratorfunction(fixturedef.func):
- asyncfix = SyncYieldFixtureWithAsyncDeps(fixturedef, deps)
- else:
- asyncfix = SyncFixtureWithAsyncDeps(fixturedef, deps)
- if asyncfix:
- fixturedef.cached_result = (asyncfix, request.param_index, None)
- return asyncfix
+ if run_string == "trio":
+ run = trio.run
+ elif run_string == "qtrio":
+ import qtrio
+ run = qtrio.run
+ else:
+ raise ValueError(
+ f"{run_string!r} not valid for 'trio_run' config."
+ + " Must be one of: trio, qtrio"
+ )
-@pytest.hookimpl(hookwrapper=True)
-def pytest_runtest_call(item):
- if 'trio' in item.keywords:
- if not iscoroutinefunction(item.obj):
- pytest.fail(
- 'test function `%r` is marked trio but is not async' % item
- )
- item.obj = _trio_test_runner_factory(item)
+ return run
- yield
+def pytest_collection_modifyitems(config, items):
+ if config.getini("trio_mode"):
+ automark(items, run=choose_run(config=config))
-@pytest.hookimpl()
-def pytest_fixture_setup(fixturedef, request):
- if 'trio' in request.keywords:
- return _install_async_fixture_if_needed(fixturedef, request)
+################################################################
+# Built-in fixtures
+################################################################
-@pytest.hookimpl(tryfirst=True)
-def pytest_exception_interact(node, call, report):
- if issubclass(call.excinfo.type, trio.MultiError):
- # TODO: not really elegant (pytest cannot output color with this hack)
- report.longrepr = ''.join(format_exception(*call.excinfo._excinfo))
+
+class NURSERY_FIXTURE_PLACEHOLDER:
+ pass
@pytest.fixture
@@ -238,6 +548,6 @@ def autojump_clock():
return MockClock(autojump_threshold=0)
-@pytest.fixture
-async def nursery(request):
- return request.node._trio_nursery
+@trio_fixture
+def nursery(request):
+ return NURSERY_FIXTURE_PLACEHOLDER
diff --git a/setup.py b/setup.py
deleted file mode 100644
index 03a577e..0000000
--- a/setup.py
+++ /dev/null
@@ -1,43 +0,0 @@
-from setuptools import setup, find_packages
-
-exec(open("pytest_trio/_version.py", encoding="utf-8").read())
-
-LONG_DESC = open("README.rst", encoding="utf-8").read()
-
-setup(
- name="pytest-trio",
- version=__version__,
- description="Pytest plugin for trio",
- url="https://github.com/python-trio/pytest-trio",
- long_description=open("README.rst").read(),
- author="Emmanuel Leblond",
- author_email="emmanuel.leblond@gmail.com",
- license="MIT -or- Apache License 2.0",
- packages=find_packages(),
- entry_points={'pytest11': ['trio = pytest_trio.plugin']},
- install_requires=[
- "trio",
- "async_generator >= 1.9",
- ],
- keywords=[
- 'async',
- 'pytest',
- 'testing',
- 'trio',
- ],
- python_requires=">=3.5",
- classifiers=[
- "License :: OSI Approved :: MIT License",
- "License :: OSI Approved :: Apache Software License",
- "Operating System :: POSIX :: Linux",
- "Operating System :: MacOS :: MacOS X",
- "Operating System :: Microsoft :: Windows",
- "Programming Language :: Python :: 3 :: Only",
- "Programming Language :: Python :: Implementation :: CPython",
- "Programming Language :: Python :: Implementation :: PyPy",
- "Topic :: System :: Networking",
- "Topic :: Software Development :: Testing",
- "Framework :: Pytest",
- "Framework :: Trio",
- ],
-)
diff --git a/test-requirements.in b/test-requirements.in
new file mode 100644
index 0000000..49db040
--- /dev/null
+++ b/test-requirements.in
@@ -0,0 +1,3 @@
+pytest>=8.2.2
+pytest-cov>=5.0.0
+hypothesis>=6.108.0
diff --git a/test-requirements.txt b/test-requirements.txt
index 9955dec..6615483 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,2 +1,34 @@
-pytest
-pytest-cov
+#
+# This file is autogenerated by pip-compile with Python 3.10
+# by the following command:
+#
+# pip-compile test-requirements.in
+#
+attrs==23.2.0
+ # via hypothesis
+coverage[toml]==7.6.0
+ # via pytest-cov
+exceptiongroup==1.2.2
+ # via
+ # hypothesis
+ # pytest
+hypothesis==6.108.0
+ # via -r test-requirements.in
+iniconfig==2.0.0
+ # via pytest
+packaging==24.1
+ # via pytest
+pluggy==1.5.0
+ # via pytest
+pytest==8.2.2
+ # via
+ # -r test-requirements.in
+ # pytest-cov
+pytest-cov==5.0.0
+ # via -r test-requirements.in
+sortedcontainers==2.4.0
+ # via hypothesis
+tomli==2.0.1
+ # via
+ # coverage
+ # pytest