diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 3acffa71cec..7a42e082063 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -4,7 +4,7 @@ updates: - package-ecosystem: pip directory: / schedule: - interval: weekly + interval: monthly - package-ecosystem: github-actions directory: / diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c33c64d3a0c..5cb39166301 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -4,23 +4,36 @@ on: push: branches: - master + - 1.9.X-fixes tags: - '**' - pull_request: {} jobs: lint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: set up python - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: 3.9 + - uses: actions/cache@v3 + id: cache + with: + path: | + ${{ env.pythonLocation }} + .mypy_cache + key: > + lint + ${{ runner.os }} + ${{ env.pythonLocation }} + ${{ hashFiles('tests/requirements-linting.txt') }} + - name: install + if: steps.cache.outputs.cache-hit != 'true' run: | make install-linting pip freeze @@ -37,30 +50,52 @@ jobs: - name: check dist run: make check-dist + - name: install node for pyright + uses: actions/setup-node@v3 + with: + node-version: '14' + + - run: npm install -g pyright + + - run: make pyright + docs-build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: set up python - uses: actions/setup-python@v1 + uses: actions/setup-python@v3 with: - python-version: 3.8 + python-version: 3.9 + + - uses: actions/cache@v3 + id: cache + with: + path: ${{ env.pythonLocation }} + key: > + docs-build + ${{ runner.os }} + ${{ env.pythonLocation }} + ${{ hashFiles('setup.py') }} + ${{ hashFiles('requirements.txt') }} + ${{ hashFiles('docs/requirements.txt') }} - name: install + if: steps.cache.outputs.cache-hit != 'true' run: make install-docs - name: build site run: make docs - name: Store docs site - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: docs path: site - test-linux: - name: test py${{ matrix.python-version }} on linux + test-linux-compiled: + name: test py${{ matrix.python-version }} on linux compiled runs-on: ubuntu-latest strategy: fail-fast: false @@ -71,17 +106,27 @@ jobs: OS: ubuntu steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: set up python - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: ${{ matrix.python-version }} + - uses: actions/cache@v3 + id: cache + with: + path: ${{ env.pythonLocation }} + key: > + test-linux-compiled + ${{ runner.os }} + ${{ env.pythonLocation }} + ${{ hashFiles('setup.py') }} + ${{ hashFiles('requirements.txt') }} + ${{ hashFiles('tests/requirements-testing.txt') }} + - name: install - run: | - make install-testing - pip freeze + run: make install-testing - name: compile run: | @@ -92,46 +137,26 @@ jobs: - run: mkdir coverage - - name: test compiled and deps - run: make test - env: - COVERAGE_FILE: coverage/.coverage.linux-py${{ matrix.python-version }}-cY-dY - CONTEXT: linux-py${{ matrix.python-version }}-compiled-yes-deps-yes - - - name: uninstall deps - run: pip uninstall -y cython email-validator devtools python-dotenv - - - name: test compiled without deps - run: make test - env: - COVERAGE_FILE: coverage/.coverage.linux-py${{ matrix.python-version }}-cY-dN - CONTEXT: linux-py${{ matrix.python-version }}-compiled-yes-deps-no - - - name: remove compiled binaries - run: | - rm -r pydantic/*.so pydantic/*.c pydantic/__pycache__ - ls -alh - ls -alh pydantic/ - - - name: test uncompiled without deps + - name: test run: make test env: - COVERAGE_FILE: coverage/.coverage.linux-py${{ matrix.python-version }}-cN-dN - CONTEXT: linux-py${{ matrix.python-version }}-compiled-no-deps-no + COVERAGE_FILE: coverage/.coverage.linux-py${{ matrix.python-version }}-compiled + CONTEXT: linux-py${{ matrix.python-version }}-compiled - name: store coverage files - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: coverage path: coverage - test-windows-mac: + test-not-compiled: name: test py${{ matrix.python-version }} on ${{ matrix.os }} strategy: fail-fast: false matrix: - os: [macos, windows] + os: [ubuntu, macos, windows] python-version: ['3.6', '3.7', '3.8', '3.9', '3.10'] + env: PYTHON: ${{ matrix.python-version }} OS: ${{ matrix.os }} @@ -140,26 +165,49 @@ jobs: runs-on: ${{ matrix.os }}-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: set up python - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: ${{ matrix.python-version }} + - uses: actions/cache@v3 + id: cache + with: + path: ${{ env.pythonLocation }} + key: > + test-not-compiled + ${{ runner.os }} + ${{ env.pythonLocation }} + ${{ hashFiles('setup.py') }} + ${{ hashFiles('requirements.txt') }} + ${{ hashFiles('tests/requirements-testing.txt') }} + - name: install run: make install-testing + - run: pip freeze + - run: mkdir coverage - - name: test + - name: test with deps + run: make test + env: + COVERAGE_FILE: coverage/.coverage.${{ runner.os }}-py${{ matrix.python-version }}-with-deps + CONTEXT: ${{ runner.os }}-py${{ matrix.python-version }}-with-deps + + - name: uninstall deps + run: pip uninstall -y cython email-validator devtools python-dotenv + + - name: test without deps run: make test env: - COVERAGE_FILE: coverage/.coverage.${{ matrix.os }}-py${{ matrix.python-version }} - CONTEXT: ${{ matrix.os }}-py${{ matrix.python-version }} + COVERAGE_FILE: coverage/.coverage.${{ runner.os }}-py${{ matrix.python-version }}-without-deps + CONTEXT: ${{ runner.os }}-py${{ matrix.python-version }}-without-deps - name: store coverage files - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: coverage path: coverage @@ -170,13 +218,13 @@ jobs: strategy: fail-fast: false matrix: - mypy-version: ['0.910', '0.920', '0.921'] + mypy-version: ['0.910', '0.921', '0.931', '0.942', '0.950'] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: set up python - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: '3.10' @@ -200,37 +248,37 @@ jobs: CONTEXT: linux-py3.10-mypy${{ matrix.mypy-version }} - name: store coverage files - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: coverage path: coverage coverage-combine: - needs: [test-linux, test-windows-mac, test-old-mypy] + needs: [test-linux-compiled, test-not-compiled, test-old-mypy] runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - - uses: actions/setup-python@v1 + - uses: actions/setup-python@v3 with: python-version: '3.8' - name: get coverage files - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: name: coverage path: coverage - run: pip install coverage - - run: ls -la + - run: ls -la coverage - run: coverage combine coverage - run: coverage report - run: coverage html --show-contexts - name: Store coverage html - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: coverage-html path: htmlcov @@ -239,12 +287,12 @@ jobs: name: test fastAPI runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: set up python - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: - python-version: '3.7' + python-version: '3.10' - name: install run: make install-testing @@ -252,32 +300,9 @@ jobs: - name: test run: make test-fastapi - benchmark: - name: run benchmarks - runs-on: ubuntu-latest - env: - BENCHMARK_REPEATS: 1 - - steps: - - uses: actions/checkout@v2 - - - name: set up python - uses: actions/setup-python@v2 - with: - python-version: '3.8' - - - name: install and build - run: | - make build - make install-benchmarks - - - run: make benchmark-pydantic - - run: make benchmark-all - - run: make benchmark-json - build: name: build py3.${{ matrix.python-version }} on ${{ matrix.platform || matrix.os }} - needs: [lint, test-linux, test-windows-mac, test-old-mypy, test-fastapi, benchmark] + needs: [lint, test-linux-compiled, test-not-compiled, test-old-mypy, test-fastapi] if: "success() && (startsWith(github.ref, 'refs/tags/') || github.ref == 'refs/heads/master')" strategy: fail-fast: false @@ -292,10 +317,10 @@ jobs: runs-on: ${{ matrix.os }}-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: set up python - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: '3.8' @@ -331,7 +356,7 @@ jobs: twine check dist/* - name: Store dist artifacts - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: pypi_files path: dist @@ -343,21 +368,21 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: set up python - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: '3.8' - name: get dist artifacts - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: name: pypi_files path: dist - name: get docs - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: name: docs path: site diff --git a/.github/workflows/combine-dependabot.yml b/.github/workflows/combine-dependabot.yml new file mode 100644 index 00000000000..1b141a75f0b --- /dev/null +++ b/.github/workflows/combine-dependabot.yml @@ -0,0 +1,137 @@ +# from https://github.com/hrvey/combine-prs-workflow/blob/master/combine-prs.yml +name: 'Combine Dependabot PRs' + +on: + workflow_dispatch: + inputs: + branchPrefix: + description: 'Branch prefix to find combinable PRs based on' + required: true + default: 'dependabot/' + mustBeGreen: + description: 'Only combine PRs that are green' + required: true + default: true + combineBranchName: + description: 'Name of the branch to combine PRs into' + required: true + default: 'combine-dependabot-bumps' + ignoreLabel: + description: 'Exclude PRs with this label' + required: true + default: 'nocombine' + +jobs: + combine-prs: + runs-on: ubuntu-latest + + steps: + - uses: actions/github-script@v6 + id: fetch-branch-names + name: Fetch branch names + with: + github-token: ${{secrets.GITHUB_TOKEN}} + script: | + const pulls = await github.paginate('GET /repos/:owner/:repo/pulls', { + owner: context.repo.owner, + repo: context.repo.repo + }); + branches = []; + prs = []; + base_branch = null; + for (const pull of pulls) { + const branch = pull['head']['ref']; + console.log('Pull for branch: ' + branch); + if (branch.startsWith('${{ github.event.inputs.branchPrefix }}')) { + console.log('Branch matched: ' + branch); + statusOK = true; + if(${{ github.event.inputs.mustBeGreen }}) { + console.log('Checking green status: ' + branch); + const statuses = await github.paginate('GET /repos/{owner}/{repo}/commits/{ref}/status', { + owner: context.repo.owner, + repo: context.repo.repo, + ref: branch + }); + if(statuses.length > 0) { + const latest_status = statuses[0]['state']; + console.log('Validating status: ' + latest_status); + if(latest_status != 'success') { + console.log('Discarding ' + branch + ' with status ' + latest_status); + statusOK = false; + } + } + } + console.log('Checking labels: ' + branch); + const labels = pull['labels']; + for(const label of labels) { + const labelName = label['name']; + console.log('Checking label: ' + labelName); + if(labelName == '${{ github.event.inputs.ignoreLabel }}') { + console.log('Discarding ' + branch + ' with label ' + labelName); + statusOK = false; + } + } + if (statusOK) { + console.log('Adding branch to array: ' + branch); + branches.push(branch); + prs.push('#' + pull['number'] + ' ' + pull['title']); + base_branch = pull['base']['ref']; + } + } + } + if (branches.length == 0) { + core.setFailed('No PRs/branches matched criteria'); + return; + } + core.setOutput('base-branch', base_branch); + core.setOutput('prs-string', prs.join('\n')); + + combined = branches.join(' ') + console.log('Combined: ' + combined); + return combined + + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + # Creates a branch with other PR branches merged together + - name: Created combined branch + env: + BASE_BRANCH: ${{ steps.fetch-branch-names.outputs.base-branch }} + BRANCHES_TO_COMBINE: ${{ steps.fetch-branch-names.outputs.result }} + COMBINE_BRANCH_NAME: ${{ github.event.inputs.combineBranchName }} + run: | + echo "$BRANCHES_TO_COMBINE" + sourcebranches="${BRANCHES_TO_COMBINE%\"}" + sourcebranches="${sourcebranches#\"}" + + basebranch="${BASE_BRANCH%\"}" + basebranch="${basebranch#\"}" + + git config pull.rebase false + git config user.name github-actions + git config user.email github-actions@github.com + + git branch $COMBINE_BRANCH_NAME $basebranch + git checkout $COMBINE_BRANCH_NAME + git pull origin $sourcebranches --no-edit + git push origin $COMBINE_BRANCH_NAME + + # Creates a PR with the new combined branch + - uses: actions/github-script@v6 + name: Create Combined Pull Request + env: + PRS_STRING: ${{ steps.fetch-branch-names.outputs.prs-string }} + with: + github-token: ${{secrets.GITHUB_TOKEN}} + script: | + const prString = process.env.PRS_STRING; + const body = 'This PR was created by the Combine PRs action by combining the following PRs:\n' + prString; + await github.pulls.create({ + owner: context.repo.owner, + repo: context.repo.repo, + title: 'Combined Dependabot Bumps', + head: '${{ github.event.inputs.combineBranchName }}', + base: '${{ steps.fetch-branch-names.outputs.base-branch }}', + body: body + }); diff --git a/.github/workflows/upload-previews.yml b/.github/workflows/upload-previews.yml index 5325c4939db..2ebcf4bb026 100644 --- a/.github/workflows/upload-previews.yml +++ b/.github/workflows/upload-previews.yml @@ -14,9 +14,9 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/setup-python@v1 + - uses: actions/setup-python@v3 with: - python-version: '3.8' + python-version: '3.9' - run: pip install smokeshow diff --git a/.gitignore b/.gitignore index e0c46182de1..55c22395276 100644 --- a/.gitignore +++ b/.gitignore @@ -1,10 +1,7 @@ .idea/ env/ venv/ -env36/ -env37/ -env38/ -env39/ +env3*/ Pipfile *.lock *.py[cod] diff --git a/HISTORY.md b/HISTORY.md index 63ba284c00c..b5ba45b80e0 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,3 +1,32 @@ +## v1.9.1 (2022-05-19) + +Thank you to pydantic's sponsors: +@tiangolo, @stellargraph, @JonasKs, @grillazz, @Mazyod, @kevinalh, @chdsbd, @povilasb, @povilasb, @jina-ai, +@mainframeindustries, @robusta-dev, @SendCloud, @rszamszur, @jodal, @hardbyte, @corleyma, @daddycocoaman, +@Rehket, @jokull, @reillysiemens, @westonsteimel, @primer-io, @koxudaxi, @browniebroke, @stradivari96, +@adriangb, @kamalgill, @jqueguiner, @dev-zero, @datarootsio, @RedCarpetUp +for their kind support. + +* Limit the size of `generics._generic_types_cache` and `generics._assigned_parameters` + to avoid unlimited increase in memory usage, #4083 by @samuelcolvin +* Add Jupyverse and FPS as Jupyter projects using pydantic, #4082 by @davidbrochart +* Speedup `__isinstancecheck__` on pydantic models when the type is not a model, may also avoid memory "leaks", #4081 by @samuelcolvin +* Fix in-place modification of `FieldInfo` that caused problems with PEP 593 type aliases, #4067 by @adriangb +* Add support for autocomplete in VS Code via `__dataclass_transform__` when using `pydantic.dataclasses.dataclass`, #4006 by @giuliano-oliveira +* Remove benchmarks from codebase and docs, #3973 by @samuelcolvin +* Typing checking with pyright in CI, improve docs on vscode/pylance/pyright, #3972 by @samuelcolvin +* Fix nested Python dataclass schema regression, #3819 by @himbeles +* Update documentation about lazy evaluation of sources for Settings, #3806 by @garyd203 +* Prevent subclasses of bytes being converted to bytes, #3706 by @samuelcolvin +* Fixed "error checking inheritance of" when using PEP585 and PEP604 type hints, #3681 by @aleksul +* Allow self referencing `ClassVar`s in models, #3679 by @samuelcolvin +* Fix issue with self-referencing dataclass, #3675 by @uriyyo +* Include non-standard port numbers in rendered URLs, #3652 by @dolfinus +* `Config.copy_on_model_validation` does a deep copy and not a shallow one, #3641 by @PrettyWood +* fix: clarify that discriminated unions do not support singletons, #3636 by @tommilligan +* Add `read_text(encoding='utf-8')` for `setup.py`, #3625 by @hswong3i +* Fix JSON Schema generation for Discriminated Unions within lists, #3608 by @samuelcolvin + ## v1.9.0 (2021-12-31) Thank you to pydantic's sponsors: diff --git a/Makefile b/Makefile index 89bef90a605..cf846064eaf 100644 --- a/Makefile +++ b/Makefile @@ -21,10 +21,6 @@ install-testing: install-pydantic install-docs: install-pydantic pip install -U -r docs/requirements.txt -.PHONY: install-benchmarks -install-benchmarks: install-pydantic - pip install -U -r benchmarks/requirements.txt - .PHONY: install install: install-testing install-linting install-docs @echo 'installed development requirements' @@ -58,6 +54,10 @@ check-dist: mypy: mypy pydantic +.PHONY: pyright +pyright: + cd tests/pyright && pyright + .PHONY: test test: pytest --cov=pydantic @@ -85,18 +85,6 @@ test-fastapi: .PHONY: all all: lint mypy testcov -.PHONY: benchmark-all -benchmark-all: - python benchmarks/run.py - -.PHONY: benchmark-pydantic -benchmark-pydantic: - python benchmarks/run.py pydantic-only - -.PHONY: benchmark-json -benchmark-json: - TEST_JSON=1 python benchmarks/run.py - .PHONY: clean clean: rm -rf `find . -name __pycache__` diff --git a/README.md b/README.md index 98e022ac602..c5c8a62c073 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ [![versions](https://img.shields.io/pypi/pyversions/pydantic.svg)](https://github.com/samuelcolvin/pydantic) [![license](https://img.shields.io/github/license/samuelcolvin/pydantic.svg)](https://github.com/samuelcolvin/pydantic/blob/master/LICENSE) -Data validation and settings management using Python type hinting. +Data validation and settings management using Python type hints. Fast and extensible, *pydantic* plays nicely with your linters/IDE/brain. Define how data should be in pure, canonical Python 3.6+; validate it with *pydantic*. diff --git a/benchmarks/profile.py b/benchmarks/profile.py deleted file mode 100644 index 53e4ab43eec..00000000000 --- a/benchmarks/profile.py +++ /dev/null @@ -1,39 +0,0 @@ -import json - -from line_profiler import LineProfiler - -import pydantic.datetime_parse -import pydantic.validators -from pydantic import validate_model -from pydantic.fields import ModelField -from test_pydantic import TestPydantic - -with open('./benchmarks/cases.json') as f: - cases = json.load(f) - - -def run(): - count, pass_count = 0, 0 - test = TestPydantic(False) - for case in cases: - passed, result = test.validate(case) - count += 1 - pass_count += passed - print('success percentage:', pass_count / count * 100) - - -funcs_to_profile = [validate_model, ModelField.validate, ModelField._validate_singleton, ModelField._apply_validators] -module_objects = {**vars(pydantic.validators), **vars(pydantic.datetime_parse), **vars(ModelField)} -funcs_to_profile += [v for k, v in module_objects.items() if not k.startswith('_') and str(v).startswith('{lpad}} ({i+1:>{len(str(repeats))}}/{repeats}) time={time:0.3f}s, success={success:0.2f}%') - times.append(time) - print(f'{p:>{lpad}} best={min(times):0.3f}s, avg={mean(times):0.3f}s, stdev={stdev(times):0.3f}s') - model_count = 3 * len(cases) - avg = mean(times) / model_count * 1e6 - sd = stdev(times) / model_count * 1e6 - results.append(f'{p:>{lpad}} best={min(times) / model_count * 1e6:0.3f}μs/iter ' - f'avg={avg:0.3f}μs/iter stdev={sd:0.3f}μs/iter version={test_class.version}') - csv_results.append([p, test_class.version, avg]) - print() - - return results, csv_results - -def main(): - json_path = THIS_DIR / 'cases.json' - if not json_path.exists(): - print('generating test cases...') - cases = [generate_case() for _ in range(2000)] - with json_path.open('w') as f: - json.dump(cases, f, indent=2, sort_keys=True) - else: - with json_path.open() as f: - cases = json.load(f) - - tests = [TestPydantic] - if 'pydantic-only' not in sys.argv: - tests += active_other_tests - - repeats = int(os.getenv('BENCHMARK_REPEATS', '5')) - test_json = 'TEST_JSON' in os.environ - results, csv_results = run_tests(tests, cases, repeats, test_json) - - for r in results: - print(r) - - if 'SAVE' in os.environ: - save_md(csv_results) - - -def save_md(data): - headings = 'Package', 'Version', 'Relative Performance', 'Mean validation time' - rows = [headings, ['---' for _ in headings]] - - first_avg = None - for package, version, avg in sorted(data, key=itemgetter(2)): - if first_avg: - relative = f'{avg / first_avg:0.1f}x slower' - else: - relative = '' - first_avg = avg - rows.append([package, f'`{version}`', relative, f'{avg:0.1f}μs']) - - table = '\n'.join(' | '.join(row) for row in rows) - text = f"""\ -[//]: <> (Generated with benchmarks/run.py, DO NOT EDIT THIS FILE DIRECTLY, instead run `SAVE=1 python ./run.py`.) - -{table} -""" - (Path(__file__).parent / '..' / 'docs' / '.benchmarks_table.md').write_text(text) - - -def diff(): - json_path = THIS_DIR / 'cases.json' - with json_path.open() as f: - cases = json.load(f) - - allow_extra = True - pydantic = TestPydantic(allow_extra) - others = [t(allow_extra) for t in active_other_tests] - - for case in cases: - pydantic_passed, pydantic_result = pydantic.validate(case) - for other in others: - other_passed, other_result = other.validate(case) - if other_passed != pydantic_passed: - print(f'⨯ pydantic {pydantic_passed} != {other.package} {other_passed}') - debug(case, pydantic_result, other_result) - return - print('✓ data passes match for all packages') - - -if __name__ == '__main__': - if 'diff' in sys.argv: - diff() - else: - main() - - # if None in other_tests: - # print('not all libraries could be imported!') - # sys.exit(1) diff --git a/benchmarks/test_cattrs.py b/benchmarks/test_cattrs.py deleted file mode 100644 index 617c68dfa9f..00000000000 --- a/benchmarks/test_cattrs.py +++ /dev/null @@ -1,99 +0,0 @@ -from datetime import datetime -from typing import List, Optional - -import attr -import cattr -from dateutil.parser import parse - - -class TestCAttrs: - package = 'attrs + cattrs' - version = attr.__version__ - - def __init__(self, allow_extra): - # cf. https://github.com/Tinche/cattrs/issues/26 why at least structure_str is needed - def structure_str(s, _): - if not isinstance(s, str): - raise ValueError() - return s - - def structure_int(i, _): - if not isinstance(i, int): - raise ValueError() - return i - - class PositiveInt(int): - ... - - def structure_posint(i, x): - i = PositiveInt(i) - if not isinstance(i, PositiveInt): - raise ValueError() - if i <= 0: - raise ValueError() - return i - - cattr.register_structure_hook(datetime, lambda isostring, _: parse(isostring)) - cattr.register_structure_hook(str, structure_str) - cattr.register_structure_hook(int, structure_int) - cattr.register_structure_hook(PositiveInt, structure_posint) - - def str_len_val(max_len: int, min_len: int = 0, required: bool = False): - # validate the max len of a string and optionally its min len and whether None is - # an acceptable value - def _check_str_len(self, attribute, value): - if value is None: - if required: - raise ValueError("") - else: - return - if len(value) > max_len: - raise ValueError("") - if min_len and len(value) < min_len: - raise ValueError("") - - return _check_str_len - - def pos_int(self, attribute, value): - # Validate that value is a positive >0 integer; None is allowed - if value is None: - return - if value <= 0: - raise ValueError("") - - @attr.s(auto_attribs=True, frozen=True, kw_only=True) - class Skill: - subject: str - subject_id: int - category: str - qual_level: str - qual_level_id: int - qual_level_ranking: float = 0 - - @attr.s(auto_attribs=True, frozen=True, kw_only=True) - class Location: - latitude: float = None - longitude: float = None - - @attr.s(auto_attribs=True, frozen=True, kw_only=True) - class Model: - id: int - sort_index: float - client_name: str = attr.ib(validator=str_len_val(255)) - # client_email: EmailStr = None - client_phone: Optional[str] = attr.ib(default=None, validator=str_len_val(255)) - location: Optional[Location] = None - - contractor: Optional[PositiveInt] - upstream_http_referrer: Optional[str] = attr.ib(default=None, validator=str_len_val(1023)) - grecaptcha_response: str = attr.ib(validator=str_len_val(1000, 20, required=True)) - last_updated: Optional[datetime] = None - skills: List[Skill] = [] - - self.model = Model - - def validate(self, data): - try: - return True, cattr.structure(data, self.model) - except (ValueError, TypeError, KeyError) as e: - return False, str(e) diff --git a/benchmarks/test_cerberus.py b/benchmarks/test_cerberus.py deleted file mode 100644 index 966d069cce3..00000000000 --- a/benchmarks/test_cerberus.py +++ /dev/null @@ -1,49 +0,0 @@ -from cerberus import Validator, __version__ -from dateutil.parser import parse as datetime_parse - - -class TestCerberus: - package = 'cerberus' - version = str(__version__) - - def __init__(self, allow_extra): - schema = { - 'id': {'type': 'integer', 'required': True}, - 'client_name': {'type': 'string', 'maxlength': 255, 'required': True}, - 'sort_index': {'type': 'float', 'required': True}, - 'client_phone': {'type': 'string', 'maxlength': 255, 'nullable': True}, - 'location': { - 'type': 'dict', - 'schema': {'latitude': {'type': 'float'}, 'longitude': {'type': 'float'}}, - 'nullable': True, - }, - 'contractor': {'type': 'integer', 'min': 0, 'nullable': True, 'coerce': int}, - 'upstream_http_referrer': {'type': 'string', 'maxlength': 1023, 'nullable': True}, - 'grecaptcha_response': {'type': 'string', 'minlength': 20, 'maxlength': 1000, 'required': True}, - 'last_updated': {'type': 'datetime', 'nullable': True, 'coerce': datetime_parse}, - 'skills': { - 'type': 'list', - 'default': [], - 'schema': { - 'type': 'dict', - 'schema': { - 'subject': {'type': 'string', 'required': True}, - 'subject_id': {'type': 'integer', 'required': True}, - 'category': {'type': 'string', 'required': True}, - 'qual_level': {'type': 'string', 'required': True}, - 'qual_level_id': {'type': 'integer', 'required': True}, - 'qual_level_ranking': {'type': 'float', 'default': 0, 'required': True}, - }, - }, - }, - } - - self.v = Validator(schema) - self.v.allow_unknown = allow_extra - - def validate(self, data): - validated = self.v.validated(data) - if validated is None: - return False, self.v.errors - else: - return True, validated diff --git a/benchmarks/test_drf.py b/benchmarks/test_drf.py deleted file mode 100644 index 638a0dfaeea..00000000000 --- a/benchmarks/test_drf.py +++ /dev/null @@ -1,53 +0,0 @@ -import django -from django.conf import settings - -settings.configure( - INSTALLED_APPS=['django.contrib.auth', 'django.contrib.contenttypes'] -) -django.setup() - -from rest_framework import __version__, serializers - - -class TestDRF: - package = 'django-rest-framework' - version = __version__ - - def __init__(self, allow_extra): - class Model(serializers.Serializer): - id = serializers.IntegerField() - client_name = serializers.CharField(max_length=255, trim_whitespace=False) - sort_index = serializers.FloatField() - # client_email = serializers.EmailField(required=False, allow_null=True) - client_phone = serializers.CharField(max_length=255, trim_whitespace=False, required=False, allow_null=True) - - class Location(serializers.Serializer): - latitude = serializers.FloatField(required=False, allow_null=True) - longitude = serializers.FloatField(required=False, allow_null=True) - location = Location(required=False, allow_null=True) - - contractor = serializers.IntegerField(required=False, allow_null=True, min_value=0) - upstream_http_referrer = serializers.CharField( - max_length=1023, trim_whitespace=False, required=False, allow_null=True - ) - grecaptcha_response = serializers.CharField(min_length=20, max_length=1000, trim_whitespace=False) - last_updated = serializers.DateTimeField(required=False, allow_null=True) - - class Skill(serializers.Serializer): - subject = serializers.CharField() - subject_id = serializers.IntegerField() - category = serializers.CharField() - qual_level = serializers.CharField() - qual_level_id = serializers.IntegerField() - qual_level_ranking = serializers.FloatField(default=0) - skills = serializers.ListField(child=Skill()) - - self.allow_extra = allow_extra # unused - self.serializer = Model - - def validate(self, data): - s = self.serializer(data=data) - if s.is_valid(): - return True, dict(s.data) - else: - return False, dict(s.errors) diff --git a/benchmarks/test_marshmallow.py b/benchmarks/test_marshmallow.py deleted file mode 100644 index 8ebc0355c27..00000000000 --- a/benchmarks/test_marshmallow.py +++ /dev/null @@ -1,45 +0,0 @@ -from marshmallow import Schema, ValidationError, __version__, fields, validate - - -class TestMarshmallow: - package = 'marshmallow' - version = __version__ - - def __init__(self, allow_extra): - class LocationSchema(Schema): - latitude = fields.Float(allow_none=True) - longitude = fields.Float(allow_none=True) - - class SkillSchema(Schema): - subject = fields.Str(required=True) - subject_id = fields.Integer(required=True) - category = fields.Str(required=True) - qual_level = fields.Str(required=True) - qual_level_id = fields.Integer(required=True) - qual_level_ranking = fields.Float(default=0) - - class Model(Schema): - id = fields.Integer(required=True) - client_name = fields.Str(validate=validate.Length(max=255), required=True) - sort_index = fields.Float(required=True) - # client_email = fields.Email() - client_phone = fields.Str(validate=validate.Length(max=255), allow_none=True) - - location = fields.Nested(LocationSchema) - - contractor = fields.Integer(validate=validate.Range(min=0), allow_none=True) - upstream_http_referrer = fields.Str(validate=validate.Length(max=1023), allow_none=True) - grecaptcha_response = fields.Str(validate=validate.Length(min=20, max=1000), required=True) - last_updated = fields.DateTime(allow_none=True) - skills = fields.Nested(SkillSchema, many=True) - - self.allow_extra = allow_extra # unused - self.schema = Model() - - def validate(self, data): - try: - result = self.schema.load(data) - except ValidationError as e: - return False, e.normalized_messages() - else: - return True, result diff --git a/benchmarks/test_pydantic.py b/benchmarks/test_pydantic.py deleted file mode 100644 index ff99dceb69c..00000000000 --- a/benchmarks/test_pydantic.py +++ /dev/null @@ -1,52 +0,0 @@ -from datetime import datetime -from typing import List - -from pydantic import VERSION, BaseModel, Extra, PositiveInt, ValidationError, constr - - -class TestPydantic: - package = 'pydantic' - version = str(VERSION) - - def __init__(self, allow_extra): - class Model(BaseModel): - id: int - client_name: constr(max_length=255) - sort_index: float - # client_email: EmailStr = None - client_phone: constr(max_length=255) = None - - class Location(BaseModel): - latitude: float = None - longitude: float = None - - location: Location = None - - contractor: PositiveInt = None - upstream_http_referrer: constr(max_length=1023) = None - grecaptcha_response: constr(min_length=20, max_length=1000) - last_updated: datetime = None - - class Skill(BaseModel): - subject: str - subject_id: int - category: str - qual_level: str - qual_level_id: int - qual_level_ranking: float = 0 - - skills: List[Skill] = [] - - class Config: - extra = Extra.allow if allow_extra else Extra.forbid - - self.model = Model - - def validate(self, data): - try: - return True, self.model(**data) - except ValidationError as e: - return False, e.errors() - - def to_json(self, model): - return model.json() diff --git a/benchmarks/test_schematics.py b/benchmarks/test_schematics.py deleted file mode 100644 index 94af84637d0..00000000000 --- a/benchmarks/test_schematics.py +++ /dev/null @@ -1,50 +0,0 @@ -from schematics import __version__ -from schematics.exceptions import DataError, ValidationError -from schematics.models import Model as PModel -from schematics.types import IntType, StringType -from schematics.types.base import DateType, FloatType -from schematics.types.compound import ListType, ModelType - - -class TestSchematics: - package = 'schematics' - version = __version__ - - def __init__(self, allow_extra): - class Model(PModel): - id = IntType(required=True) - client_name = StringType(max_length=255, required=True) - sort_index = FloatType(required=True) - client_phone = StringType(max_length=255, default=None) - - class Location(PModel): - latitude = FloatType(default=None) - longitude = FloatType(default=None) - - location = ModelType(model_spec=Location, default=None) - - contractor = IntType(min_value=1, default=None) - upstream_http_referrer = StringType(max_length=1023, default=None) - grecaptcha_response = StringType(min_length=20, max_length=1000, required=True) - last_updated = DateType(formats='%Y-%m-%dT%H:%M:%S') - - class Skill(PModel): - subject = StringType(required=True) - subject_id = IntType(required=True) - category = StringType(required=True) - qual_level = StringType(required=True) - qual_level_id = IntType(required=True) - qual_level_ranking = FloatType(default=0, required=True) - - skills = ListType(ModelType(Skill), default=[]) - - self.model = Model - - def validate(self, data): - try: - obj = self.model(data) - return True, obj.validate() - except DataError as e: - return False, e - except ValidationError as e: - return False, e diff --git a/benchmarks/test_trafaret.py b/benchmarks/test_trafaret.py deleted file mode 100644 index 546c165d1f2..00000000000 --- a/benchmarks/test_trafaret.py +++ /dev/null @@ -1,46 +0,0 @@ -from dateutil.parser import parse -import trafaret as t - - -class TestTrafaret: - package = 'trafaret' - version = '.'.join(map(str, t.__VERSION__)) - - def __init__(self, allow_extra): - self.schema = t.Dict({ - 'id': t.Int(), - 'client_name': t.String(max_length=255), - 'sort_index': t.Float, - # t.Key('client_email', optional=True): t.Or(t.Null | t.Email()), - t.Key('client_phone', optional=True): t.Or(t.Null | t.String(max_length=255)), - - t.Key('location', optional=True): t.Or(t.Null | t.Dict({ - 'latitude': t.Or(t.Float | t.Null), - 'longitude': t.Or(t.Float | t.Null), - })), - - t.Key('contractor', optional=True): t.Or(t.Null | t.Int(gt=0)), - t.Key('upstream_http_referrer', optional=True): t.Or(t.Null | t.String(max_length=1023)), - t.Key('grecaptcha_response'): t.String(min_length=20, max_length=1000), - - t.Key('last_updated', optional=True): t.Or(t.Null | t.String >> parse), - - t.Key('skills', default=[]): t.List(t.Dict({ - 'subject': t.String, - 'subject_id': t.Int, - 'category': t.String, - 'qual_level': t.String, - 'qual_level_id': t.Int, - t.Key('qual_level_ranking', default=0): t.Float, - })), - }) - if allow_extra: - self.schema.allow_extra('*') - - def validate(self, data): - try: - return True, self.schema.check(data) - except t.DataError: - return False, None - except ValueError: - return False, None diff --git a/benchmarks/test_valideer.py b/benchmarks/test_valideer.py deleted file mode 100644 index 353122acec4..00000000000 --- a/benchmarks/test_valideer.py +++ /dev/null @@ -1,47 +0,0 @@ -import re -import subprocess - -import dateutil.parser -import valideer as V - -# valideer appears to provide no way of getting the installed version -p = subprocess.run(['pip', 'freeze'], stdout=subprocess.PIPE, encoding='utf8', check=True) -valideer_version = re.search(r'valideer==(.+)', p.stdout).group(1) - - -class TestValideer: - package = 'valideer' - version = valideer_version - - def __init__(self, allow_extra): - schema = { - '+id': int, - '+client_name': V.String(max_length=255), - '+sort_index': float, - 'client_phone': V.Nullable(V.String(max_length=255)), - 'location': {'latitude': float, 'longitude': float}, - 'contractor': V.Range(V.AdaptTo(int), min_value=1), - 'upstream_http_referrer': V.Nullable(V.String(max_length=1023)), - '+grecaptcha_response': V.String(min_length=20, max_length=1000), - 'last_updated': V.AdaptBy(dateutil.parser.parse), - 'skills': V.Nullable( - [ - { - '+subject': str, - '+subject_id': int, - '+category': str, - '+qual_level': str, - '+qual_level_id': int, - 'qual_level_ranking': V.Nullable(float, default=0), - } - ], - default=[], - ), - } - self.validator = V.parse(schema, additional_properties=allow_extra) - - def validate(self, data): - try: - return True, self.validator.validate(data) - except V.ValidationError as e: - return False, str(e) diff --git a/benchmarks/test_voluptuous.py b/benchmarks/test_voluptuous.py deleted file mode 100644 index 0ac46f5d653..00000000000 --- a/benchmarks/test_voluptuous.py +++ /dev/null @@ -1,51 +0,0 @@ -from dateutil.parser import parse as parse_datetime -import voluptuous as v -from voluptuous.humanize import humanize_error - - -class TestVoluptuous: - package = 'voluptuous' - version = v.__version__ - - def __init__(self, allow_extra): - self.schema = v.Schema( - { - v.Required('id'): int, - v.Required('client_name'): v.All(str, v.Length(max=255)), - v.Required('sort_index'): float, - # v.Optional('client_email'): v.Maybe(v.Email), - v.Optional('client_phone'): v.Maybe(v.All(str, v.Length(max=255))), - v.Optional('location'): v.Maybe( - v.Schema( - { - 'latitude': v.Maybe(float), - 'longitude': v.Maybe(float) - }, - required=True - ) - ), - v.Optional('contractor'): v.Maybe(v.All(v.Coerce(int), v.Range(min=1))), - v.Optional('upstream_http_referrer'): v.Maybe(v.All(str, v.Length(max=1023))), - v.Required('grecaptcha_response'): v.All(str, v.Length(min=20, max=1000)), - v.Optional('last_updated'): v.Maybe(parse_datetime), - v.Required('skills', default=[]): [ - v.Schema( - { - v.Required('subject'): str, - v.Required('subject_id'): int, - v.Required('category'): str, - v.Required('qual_level'): str, - v.Required('qual_level_id'): int, - v.Required('qual_level_ranking', default=0): float, - } - ) - ], - }, - extra=allow_extra, - ) - - def validate(self, data): - try: - return True, self.schema(data) - except v.MultipleInvalid as e: - return False, humanize_error(data, e) diff --git a/docs/benchmarks.md b/docs/benchmarks.md deleted file mode 100644 index 94364bf3470..00000000000 --- a/docs/benchmarks.md +++ /dev/null @@ -1,8 +0,0 @@ -Below are the results of crude benchmarks comparing *pydantic* to other validation libraries. - -{!.benchmarks_table.md!} - -See [the benchmarks code](https://github.com/samuelcolvin/pydantic/tree/master/benchmarks) -for more details on the test case. Feel free to suggest more packages to benchmark or improve an existing one. - -Benchmarks were run with Python 3.8.6 and the package versions listed above installed via pypi on macOS Big Sur. diff --git a/docs/extra/redirects.js b/docs/extra/redirects.js index d8aec3e981e..b6b1a3c0f55 100644 --- a/docs/extra/redirects.js +++ b/docs/extra/redirects.js @@ -82,8 +82,6 @@ const lookup = { 'id7': '/usage/postponed_annotations/', 'id8': '/usage/postponed_annotations/', 'usage-of-union-in-annotations-and-type-order': '/usage/types/#unions', - 'benchmarks': '/benchmarks/', - 'benchmarks-tag': '/benchmarks/', 'contributing-to-pydantic': '/contributing/', 'pycharm-plugin': '/pycharm_plugin/', 'id9': '/pycharm_plugin/', diff --git a/docs/extra/tweaks.css b/docs/extra/tweaks.css index 53e4dae037c..2713d66363f 100644 --- a/docs/extra/tweaks.css +++ b/docs/extra/tweaks.css @@ -14,14 +14,18 @@ background: hsla(0, 0%, 92.5%, 0.5); } -#_default_ a._default_, #_default_ .default-text { - width: 100% !important; +.sponsors { + display: flex; + justify-content: space-between; + align-items: center; + margin: 1rem 0; } -.md-nav__link[data-md-state=blur]:not(.md-nav__link--active) { - color: rgba(0, 0, 0, 0.87); +.sponsors div { + text-align: center; } -.md-nav__link--active { - font-weight: 700; +.sponsors img { + width: 75%; + border-radius: 5px; } diff --git a/docs/index.md b/docs/index.md index 772a6ea9993..c3ef572d38d 100644 --- a/docs/index.md +++ b/docs/index.md @@ -13,6 +13,59 @@ Data validation and settings management using python type annotations. Define how data should be in pure, canonical python; validate it with *pydantic*. +## Sponsors + +Development of *pydantic* is made possible by the following sponsors: + +
+
+ + Salesforce + Salesforce + +
+
+ + FastApi + FastApi + +
+
+ + TutorCruncher + TutorCruncher + +
+
+ + ExoFlare + ExoFlare + +
+
+ + Robusta + Robusta + +
+
+ + SendCloud + SendCloud + +
+
+ +And many more who kindly sponsor Samuel Colvin on [GitHub Sponsors](https://github.com/sponsors/samuelcolvin#sponsors). + + + ## Example ```py @@ -56,7 +109,8 @@ So *pydantic* uses some cool new language features, but why should I actually go be read from environment variables, and more complex objects like DSNs and python objects are often required. **fast** -: In [benchmarks](benchmarks.md) *pydantic* is faster than all other tested libraries. +: *pydantic* has always taken performance seriously, most of the library is compiled with cython giving a ~50% speedup, + it's generally as fast or faster than most similar libraries. **validate complex structures** : use of [recursive *pydantic* models](usage/models.md#recursive-models), `typing`'s @@ -83,7 +137,9 @@ Hundreds of organisations and packages are using *pydantic*, including: [Project Jupyter](https://jupyter.org/) : developers of the Jupyter notebook are using *pydantic* - [for subprojects](https://github.com/samuelcolvin/pydantic/issues/773). + [for subprojects](https://github.com/samuelcolvin/pydantic/issues/773), through the FastAPI-based Jupyter server + [Jupyverse](https://github.com/jupyter-server/jupyverse), and for [FPS](https://github.com/jupyter-server/fps)'s + configuration management. **Microsoft** : are using *pydantic* (via FastAPI) for @@ -114,6 +170,11 @@ Hundreds of organisations and packages are using *pydantic*, including: : trusts *pydantic* (via FastAPI) and [*arq*](https://github.com/samuelcolvin/arq) (Samuel's excellent asynchronous task queue) to reliably power multiple mission-critical microservices. +[Robusta.dev](https://robusta.dev/) +: are using *pydantic* to automate Kubernetes troubleshooting and maintenance. For example, their open source + [tools to debug and profile Python applications on Kubernetes](https://home.robusta.dev/python/) use + *pydantic* models. + For a more comprehensive list of open-source projects using *pydantic* see the [list of dependents on github](https://github.com/samuelcolvin/pydantic/network/dependents). diff --git a/docs/requirements.txt b/docs/requirements.txt index 88f06ea3f75..348505f4d92 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,12 +1,12 @@ ansi2html==1.6.0 flake8==4.0.1 flake8-quotes==3.3.1 -hypothesis==6.31.6 +hypothesis==6.46.3 markdown-include==0.6.0 mdx-truly-sane-lists==1.2 -mkdocs==1.2.3 +mkdocs==1.3.0 mkdocs-exclude==1.0.2 -mkdocs-material==8.1.3 +mkdocs-material==8.2.14 sqlalchemy orjson ujson diff --git a/docs/sponsor_logos/exoflare.png b/docs/sponsor_logos/exoflare.png new file mode 100644 index 00000000000..1f18c0e7eba Binary files /dev/null and b/docs/sponsor_logos/exoflare.png differ diff --git a/docs/sponsor_logos/fastapi.png b/docs/sponsor_logos/fastapi.png new file mode 100644 index 00000000000..4e7a830e112 Binary files /dev/null and b/docs/sponsor_logos/fastapi.png differ diff --git a/docs/sponsor_logos/robusta.png b/docs/sponsor_logos/robusta.png new file mode 100644 index 00000000000..02c7ef29783 Binary files /dev/null and b/docs/sponsor_logos/robusta.png differ diff --git a/docs/sponsor_logos/salesforce.png b/docs/sponsor_logos/salesforce.png new file mode 100644 index 00000000000..9264adb9339 Binary files /dev/null and b/docs/sponsor_logos/salesforce.png differ diff --git a/docs/sponsor_logos/sendcloud.png b/docs/sponsor_logos/sendcloud.png new file mode 100644 index 00000000000..04a6f046e34 Binary files /dev/null and b/docs/sponsor_logos/sendcloud.png differ diff --git a/docs/sponsor_logos/tutorcruncher.png b/docs/sponsor_logos/tutorcruncher.png new file mode 100644 index 00000000000..573cd48a1b5 Binary files /dev/null and b/docs/sponsor_logos/tutorcruncher.png differ diff --git a/docs/usage/settings.md b/docs/usage/settings.md index 1de49c0a583..6bb752a1ea3 100644 --- a/docs/usage/settings.md +++ b/docs/usage/settings.md @@ -71,7 +71,7 @@ by treating the environment variable's value as a JSON-encoded string. Another way to populate nested complex variables is to configure your model with the `env_nested_delimiter` config setting, then use an env variable with a name pointing to the nested module fields. -What it does is simply explodes yor variable into nested models or dicts. +What it does is simply explodes your variable into nested models or dicts. So if you define a variable `FOO__BAR__BAZ=123` it will convert it into `FOO={'BAR': {'BAZ': 123}}` If you have multiple variables with the same structure they will be merged. @@ -278,6 +278,3 @@ You might also want to disable a source: {!.tmp_examples/settings_disable_source.py!} ``` _(This script is complete, it should run "as is", here you might need to set the `my_api_key` environment variable)_ - -Because of the callables approach of `customise_sources`, evaluation of sources is lazy so unused sources don't -have an adverse effect on performance. diff --git a/docs/usage/types.md b/docs/usage/types.md index 3fcd61d318a..86a437ba6e1 100644 --- a/docs/usage/types.md +++ b/docs/usage/types.md @@ -297,6 +297,12 @@ _(This script is complete, it should run "as is")_ Using the [Annotated Fields syntax](../schema/#typingannotated-fields) can be handy to regroup the `Union` and `discriminator` information. See below for an example! +!!! warning + Discriminated unions cannot be used with only a single variant, such as `Union[Cat]`. + + Python changes `Union[T]` into `T` at interpretation time, so it is not possible for `pydantic` to + distinguish fields of `Union[T]` from `T`. + #### Nested Discriminated Unions Only one discriminator can be set for a field but sometimes you want to combine multiple discriminators. diff --git a/docs/usage/validation_decorator.md b/docs/usage/validation_decorator.md index 914b9319865..74fcc8aa311 100644 --- a/docs/usage/validation_decorator.md +++ b/docs/usage/validation_decorator.md @@ -149,7 +149,7 @@ to use this, it may even become the default for the decorator. ### Performance -We've made a big effort to make *pydantic* as performant as possible (see [the benchmarks](../benchmarks.md)) +We've made a big effort to make *pydantic* as performant as possible and argument inspect and model creation is only performed once when the function is defined, however there will still be a performance impact to using the `validate_arguments` decorator compared to calling the raw function. diff --git a/docs/visual_studio_code.md b/docs/visual_studio_code.md index 98c58fb01f4..7ec37b5930f 100644 --- a/docs/visual_studio_code.md +++ b/docs/visual_studio_code.md @@ -130,10 +130,18 @@ Below are several techniques to achieve it. You can disable the errors for a specific line using a comment of: -``` +```py # type: ignore ``` +or (to be specific to pylance/pyright): + +```py +# pyright: ignore +``` + +([pyright](https://github.com/microsoft/pyright) is the language server used by Pylance.). + coming back to the example with `age='23'`, it would be: ```Python hl_lines="10" @@ -146,7 +154,7 @@ class Knight(BaseModel): color: str = 'blue' -lancelot = Knight(title='Sir Lancelot', age='23') # type: ignore +lancelot = Knight(title='Sir Lancelot', age='23') # pyright: ignore ``` that way Pylance and mypy will ignore errors in that line. @@ -243,10 +251,44 @@ The specific configuration **`frozen`** (in beta) has a special meaning. It prevents other code from changing a model instance once it's created, keeping it **"frozen"**. -When using the second version to declare `frozen=True` (with **keyword arguments** in the class definition), Pylance can use it to help you check in your code and **detect errors** when something is trying to set values in a model that is "frozen". +When using the second version to declare `frozen=True` (with **keyword arguments** in the class definition), +Pylance can use it to help you check in your code and **detect errors** when something is trying to set values +in a model that is "frozen". ![VS Code strict type errors with model](./img/vs_code_08.png) +## BaseSettings and ignoring Pylance/pyright errors + +Pylance/pyright does not work well with [`BaseSettings`](./usage/settings.md) - fields in settings classes can be +configured via environment variables and therefore "required" fields do not have to be explicitly set when +initialising a settings instance. However, pyright considers these fields as "required" and will therefore +show an error when they're not set. + +See [#3753](https://github.com/samuelcolvin/pydantic/issues/3753#issuecomment-1087417884) for an explanation of the +reasons behind this, and why we can't avoid the problem. + +There are two potential workarounds: + +* use an ignore comment (`# pyright: ignore`) when initialising `settings` +* or, use `settings.parse_obj({})` to avoid the warning + +## Adding a default with `Field` + +Pylance/pyright requires `default` to be a keyword argument to `Field` in order to infer that the field is optional. + +```py +from pydantic import BaseModel, Field + + +class Knight(BaseModel): + title: str = Field(default='Sir Lancelot') # this is okay + age: int = Field(23) # this works fine at runtime but will case an error for pyright + +lance = Knight() # error: Argument missing for parameter "age" +``` + +Like the issue with `BaseSettings`, this is a limitation of dataclass transforms and cannot be fixed in pydantic. + ## Technical Details !!! warning diff --git a/mkdocs.yml b/mkdocs.yml index 537bca4e3e7..2e5933b7986 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -56,7 +56,6 @@ nav: - 'Usage with mypy': usage/mypy.md - 'Usage with devtools': usage/devtools.md - Contributing to pydantic: contributing.md -- benchmarks.md - 'Mypy plugin': mypy_plugin.md - 'PyCharm plugin': pycharm_plugin.md - 'Visual Studio Code': visual_studio_code.md diff --git a/pydantic/_hypothesis_plugin.py b/pydantic/_hypothesis_plugin.py index 79d787e9c63..890e192ccaf 100644 --- a/pydantic/_hypothesis_plugin.py +++ b/pydantic/_hypothesis_plugin.py @@ -358,7 +358,7 @@ def resolve_constr(cls): # type: ignore[no-untyped-def] # pragma: no cover # Finally, register all previously-defined types, and patch in our new function -for typ in pydantic.types._DEFINED_TYPES: +for typ in list(pydantic.types._DEFINED_TYPES): _registered(typ) pydantic.types._registered = _registered st.register_type_strategy(pydantic.Json, resolve_json) diff --git a/pydantic/class_validators.py b/pydantic/class_validators.py index ecaeef393cc..a9cd0dbab90 100644 --- a/pydantic/class_validators.py +++ b/pydantic/class_validators.py @@ -329,7 +329,7 @@ def _generic_validator_basic(validator: AnyCallable, sig: 'Signature', args: Set def gather_all_validators(type_: 'ModelOrDc') -> Dict[str, 'AnyClassMethod']: - all_attributes = ChainMap(*[cls.__dict__ for cls in type_.__mro__]) + all_attributes = ChainMap(*[cls.__dict__ for cls in type_.__mro__]) # type: ignore[arg-type,var-annotated] return { k: v for k, v in all_attributes.items() diff --git a/pydantic/dataclasses.py b/pydantic/dataclasses.py index 12d4c588a58..692bfb9f3a7 100644 --- a/pydantic/dataclasses.py +++ b/pydantic/dataclasses.py @@ -1,10 +1,10 @@ -from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Type, TypeVar, Union, overload +from typing import TYPE_CHECKING, Any, Callable, ClassVar, Dict, Optional, Type, TypeVar, Union, overload from .class_validators import gather_all_validators from .error_wrappers import ValidationError from .errors import DataclassTypeError from .fields import Field, FieldInfo, Required, Undefined -from .main import create_model, validate_model +from .main import __dataclass_transform__, create_model, validate_model from .typing import resolve_annotations from .utils import ClassAttribute @@ -16,11 +16,11 @@ DataclassT = TypeVar('DataclassT', bound='Dataclass') class Dataclass: - __pydantic_model__: Type[BaseModel] - __initialised__: bool - __post_init_original__: Optional[Callable[..., None]] - __processed__: Optional[ClassAttribute] - __has_field_info_default__: bool # whether or not a `pydantic.Field` is used as default value + __pydantic_model__: ClassVar[Type[BaseModel]] + __initialised__: ClassVar[bool] + __post_init_original__: ClassVar[Optional[Callable[..., None]]] + __processed__: ClassVar[Optional[ClassAttribute]] + __has_field_info_default__: ClassVar[bool] # whether or not a `pydantic.Field` is used as default value def __init__(self, *args: Any, **kwargs: Any) -> None: pass @@ -184,7 +184,12 @@ def _process_class( validators = gather_all_validators(cls) cls.__pydantic_model__ = create_model( - cls.__name__, __config__=config, __module__=_cls.__module__, __validators__=validators, **field_definitions + cls.__name__, + __config__=config, + __module__=_cls.__module__, + __validators__=validators, + __cls_kwargs__={'__resolve_forward_refs__': False}, + **field_definitions, ) cls.__initialised__ = False @@ -196,9 +201,12 @@ def _process_class( if cls.__pydantic_model__.__config__.validate_assignment and not frozen: cls.__setattr__ = setattr_validate_assignment # type: ignore[assignment] + cls.__pydantic_model__.__try_update_forward_refs__(**{cls.__name__: cls}) + return cls +@__dataclass_transform__(kw_only_default=True, field_descriptors=(Field, FieldInfo)) @overload def dataclass( *, @@ -213,6 +221,7 @@ def dataclass( ... +@__dataclass_transform__(kw_only_default=True, field_descriptors=(Field, FieldInfo)) @overload def dataclass( _cls: Type[Any], @@ -228,6 +237,7 @@ def dataclass( ... +@__dataclass_transform__(kw_only_default=True, field_descriptors=(Field, FieldInfo)) def dataclass( _cls: Optional[Type[Any]] = None, *, diff --git a/pydantic/fields.py b/pydantic/fields.py index 7f0094cc462..df79edfda53 100644 --- a/pydantic/fields.py +++ b/pydantic/fields.py @@ -1,3 +1,4 @@ +import copy from collections import Counter as CollectionCounter, defaultdict, deque from collections.abc import Hashable as CollectionsHashable, Iterable as CollectionsIterable from typing import ( @@ -34,6 +35,7 @@ Callable, ForwardRef, NoArgAnyCallable, + convert_generics, display_as_type, get_args, get_origin, @@ -394,7 +396,7 @@ def __init__( self.name: str = name self.has_alias: bool = bool(alias) self.alias: str = alias or name - self.type_: Any = type_ + self.type_: Any = convert_generics(type_) self.outer_type_: Any = type_ self.class_validators = class_validators or {} self.default: Any = default @@ -446,6 +448,7 @@ def _get_field_info( raise ValueError(f'cannot specify multiple `Annotated` `Field`s for {field_name!r}') field_info = next(iter(field_infos), None) if field_info is not None: + field_info = copy.copy(field_info) field_info.update_from_config(field_info_from_config) if field_info.default is not Undefined: raise ValueError(f'`Field` default cannot be set in `Annotated` for {field_name!r}') @@ -600,7 +603,7 @@ def _type_analysis(self) -> None: # noqa: C901 (ignore complexity) return if self.discriminator_key is not None and not is_union(origin): - raise TypeError('`discriminator` can only be used with `Union` type') + raise TypeError('`discriminator` can only be used with `Union` type with more than one variant') # add extra check for `collections.abc.Hashable` for python 3.10+ where origin is not `None` if origin is None or origin is CollectionsHashable: diff --git a/pydantic/generics.py b/pydantic/generics.py index a712d26f2fc..1ec9a7da081 100644 --- a/pydantic/generics.py +++ b/pydantic/generics.py @@ -24,20 +24,20 @@ from .main import BaseModel, create_model from .types import JsonWrapper from .typing import display_as_type, get_all_type_hints, get_args, get_origin, typing_base -from .utils import all_identical, lenient_issubclass +from .utils import LimitedDict, all_identical, lenient_issubclass -_generic_types_cache: Dict[Tuple[Type[Any], Union[Any, Tuple[Any, ...]]], Type[BaseModel]] = {} GenericModelT = TypeVar('GenericModelT', bound='GenericModel') TypeVarType = Any # since mypy doesn't allow the use of TypeVar as a type Parametrization = Mapping[TypeVarType, Type[Any]] +_generic_types_cache: 'LimitedDict[Tuple[Type[Any], Union[Any, Tuple[Any, ...]]], Type[BaseModel]]' = LimitedDict() # _assigned_parameters is a Mapping from parametrized version of generic models to assigned types of parametrizations # as captured during construction of the class (not instances). # E.g., for generic model `Model[A, B]`, when parametrized model `Model[int, str]` is created, # `Model[int, str]`: {A: int, B: str}` will be stored in `_assigned_parameters`. # (This information is only otherwise available after creation from the class name string). -_assigned_parameters: Dict[Type[Any], Parametrization] = {} +_assigned_parameters: 'LimitedDict[Type[Any], Parametrization]' = LimitedDict() class GenericModel(BaseModel): @@ -98,6 +98,7 @@ def __class_getitem__(cls: Type[GenericModelT], params: Union[Type[Any], Tuple[T __base__=(cls,) + tuple(cls.__parameterized_bases__(typevars_map)), __config__=None, __validators__=validators, + __cls_kwargs__=None, **fields, ), ) diff --git a/pydantic/json.py b/pydantic/json.py index ce956fea263..cab8b800859 100644 --- a/pydantic/json.py +++ b/pydantic/json.py @@ -33,7 +33,7 @@ def decimal_encoder(dec_value: Decimal) -> Union[int, float]: This is useful when we use ConstrainedDecimal to represent Numeric(x,0) where a integer (but not int typed) is used. Encoding this as a float - results in failed round-tripping between encode and prase. + results in failed round-tripping between encode and parse. Our Id type is a prime example of this. >>> decimal_encoder(Decimal("1.0")) diff --git a/pydantic/main.py b/pydantic/main.py index eea8abcbbe8..0c20d9e69d5 100644 --- a/pydantic/main.py +++ b/pydantic/main.py @@ -154,6 +154,7 @@ def __new__(mcs, name, bases, namespace, **kwargs): # noqa C901 class_vars.update(base.__class_vars__) hash_func = base.__hash__ + resolve_forward_refs = kwargs.pop('__resolve_forward_refs__', True) allowed_config_kwargs: SetStr = { key for key in dir(config) @@ -289,10 +290,19 @@ def is_untouched(v: Any) -> bool: cls = super().__new__(mcs, name, bases, new_namespace, **kwargs) # set __signature__ attr only for model class, but not for its instances cls.__signature__ = ClassAttribute('__signature__', generate_model_signature(cls.__init__, fields, config)) - cls.__try_update_forward_refs__() + if resolve_forward_refs: + cls.__try_update_forward_refs__() return cls + def __instancecheck__(self, instance: Any) -> bool: + """ + Avoid calling ABC _abc_subclasscheck unless we're pretty sure. + + See #3829 and python/cpython#92810 + """ + return hasattr(instance, '__fields__') and super().__instancecheck__(instance) + object_setattr = object.__setattr__ @@ -666,7 +676,7 @@ def __get_validators__(cls) -> 'CallableGenerator': def validate(cls: Type['Model'], value: Any) -> 'Model': if isinstance(value, cls): if cls.__config__.copy_on_model_validation: - return value._copy_and_set_values(value.__dict__, value.__fields_set__, deep=False) + return value._copy_and_set_values(value.__dict__, value.__fields_set__, deep=True) else: return value @@ -765,12 +775,12 @@ def _get_value( return v @classmethod - def __try_update_forward_refs__(cls) -> None: + def __try_update_forward_refs__(cls, **localns: Any) -> None: """ Same as update_forward_refs but will not raise exception when forward references are not defined. """ - update_model_forward_refs(cls, cls.__fields__.values(), cls.__config__.json_encoders, {}, (NameError,)) + update_model_forward_refs(cls, cls.__fields__.values(), cls.__config__.json_encoders, localns, (NameError,)) @classmethod def update_forward_refs(cls, **localns: Any) -> None: @@ -892,6 +902,7 @@ def create_model( __base__: None = None, __module__: str = __name__, __validators__: Dict[str, 'AnyClassMethod'] = None, + __cls_kwargs__: Dict[str, Any] = None, **field_definitions: Any, ) -> Type['BaseModel']: ... @@ -905,6 +916,7 @@ def create_model( __base__: Union[Type['Model'], Tuple[Type['Model'], ...]], __module__: str = __name__, __validators__: Dict[str, 'AnyClassMethod'] = None, + __cls_kwargs__: Dict[str, Any] = None, **field_definitions: Any, ) -> Type['Model']: ... @@ -917,6 +929,7 @@ def create_model( __base__: Union[None, Type['Model'], Tuple[Type['Model'], ...]] = None, __module__: str = __name__, __validators__: Dict[str, 'AnyClassMethod'] = None, + __cls_kwargs__: Dict[str, Any] = None, **field_definitions: Any, ) -> Type['Model']: """ @@ -926,6 +939,7 @@ def create_model( :param __base__: base class for the new model to inherit from :param __module__: module of the created model :param __validators__: a dict of method names and @validator class methods + :param __cls_kwargs__: a dict for class creation :param field_definitions: fields of the model (or extra fields if a base is supplied) in the format `=(, )` or `=, e.g. `foobar=(str, ...)` or `foobar=123`, or, for complex use-cases, in the format @@ -940,6 +954,8 @@ def create_model( else: __base__ = (cast(Type['Model'], BaseModel),) + __cls_kwargs__ = __cls_kwargs__ or {} + fields = {} annotations = {} @@ -969,7 +985,7 @@ def create_model( if __config__: namespace['Config'] = inherit_config(__config__, BaseConfig) - return type(__model_name, __base__, namespace) + return type(__model_name, __base__, namespace, **__cls_kwargs__) _missing = object() diff --git a/pydantic/networks.py b/pydantic/networks.py index 18a042ca979..0626d2c3550 100644 --- a/pydantic/networks.py +++ b/pydantic/networks.py @@ -54,6 +54,10 @@ class Parts(TypedDict, total=False): else: email_validator = None + class Parts(dict): + pass + + NetworkType = Union[str, bytes, int, Tuple[Union[str, bytes, int], Union[str, int]]] __all__ = [ @@ -122,7 +126,7 @@ def int_domain_regex() -> Pattern[str]: class AnyUrl(str): strip_whitespace = True min_length = 1 - max_length = 2 ** 16 + max_length = 2**16 allowed_schemes: Optional[Collection[str]] = None tld_required: bool = False user_required: bool = False @@ -176,6 +180,18 @@ def build( fragment: Optional[str] = None, **_kwargs: str, ) -> str: + parts = Parts( + scheme=scheme, + user=user, + password=password, + host=host, + port=port, + path=path, + query=query, + fragment=fragment, + **_kwargs, # type: ignore[misc] + ) + url = scheme + '://' if user: url += user @@ -184,7 +200,7 @@ def build( if user or password: url += '@' url += host - if port and 'port' not in cls.hidden_parts: + if port and ('port' not in cls.hidden_parts or cls.get_default_parts(parts).get('port') != port): url += ':' + port if path: url += path @@ -265,7 +281,7 @@ def validate_parts(cls, parts: 'Parts') -> 'Parts': def validate_host(cls, parts: 'Parts') -> Tuple[str, Optional[str], str, bool]: host, tld, host_type, rebuild = None, None, None, False for f in ('domain', 'ipv4', 'ipv6'): - host = parts[f] # type: ignore[misc] + host = parts[f] # type: ignore[literal-required] if host: host_type = f break @@ -310,8 +326,8 @@ def get_default_parts(parts: 'Parts') -> 'Parts': @classmethod def apply_default_parts(cls, parts: 'Parts') -> 'Parts': for key, value in cls.get_default_parts(parts).items(): - if not parts[key]: # type: ignore[misc] - parts[key] = value # type: ignore[misc] + if not parts[key]: # type: ignore[literal-required] + parts[key] = value # type: ignore[literal-required] return parts def __repr__(self) -> str: @@ -386,7 +402,7 @@ def stricturl( *, strip_whitespace: bool = True, min_length: int = 1, - max_length: int = 2 ** 16, + max_length: int = 2**16, tld_required: bool = True, host_required: bool = True, allowed_schemes: Optional[Collection[str]] = None, diff --git a/pydantic/schema.py b/pydantic/schema.py index e979678c229..d37e82206c2 100644 --- a/pydantic/schema.py +++ b/pydantic/schema.py @@ -265,36 +265,6 @@ def field_schema( known_models=known_models or set(), ) - # https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#discriminator-object - if field.discriminator_key is not None: - assert field.sub_fields_mapping is not None - - discriminator_models_refs: Dict[str, Union[str, Dict[str, Any]]] = {} - - for discriminator_value, sub_field in field.sub_fields_mapping.items(): - # sub_field is either a `BaseModel` or directly an `Annotated` `Union` of many - if is_union(get_origin(sub_field.type_)): - sub_models = get_sub_types(sub_field.type_) - discriminator_models_refs[discriminator_value] = { - model_name_map[sub_model]: get_schema_ref( - model_name_map[sub_model], ref_prefix, ref_template, False - ) - for sub_model in sub_models - } - else: - sub_field_type = sub_field.type_ - if hasattr(sub_field_type, '__pydantic_model__'): - sub_field_type = sub_field_type.__pydantic_model__ - - discriminator_model_name = model_name_map[sub_field_type] - discriminator_model_ref = get_schema_ref(discriminator_model_name, ref_prefix, ref_template, False) - discriminator_models_refs[discriminator_value] = discriminator_model_ref['$ref'] - - s['discriminator'] = { - 'propertyName': field.discriminator_alias, - 'mapping': discriminator_models_refs, - } - # $ref will only be returned when there are no schema_overrides if '$ref' in f_schema: return f_schema, f_definitions, f_nested_models @@ -419,10 +389,13 @@ def get_flat_models_from_field(field: ModelField, known_models: TypeModelSet) -> # Handle dataclass-based models if is_builtin_dataclass(field.type_): field.type_ = dataclass(field.type_) + was_dataclass = True + else: + was_dataclass = False field_type = field.type_ if lenient_issubclass(getattr(field_type, '__pydantic_model__', None), BaseModel): field_type = field_type.__pydantic_model__ - if field.sub_fields and not lenient_issubclass(field_type, BaseModel): + if field.sub_fields and (not lenient_issubclass(field_type, BaseModel) or was_dataclass): flat_models |= get_flat_models_from_fields(field.sub_fields, known_models=known_models) elif lenient_issubclass(field_type, BaseModel) and field_type not in known_models: flat_models |= get_flat_models_from_model(field_type, known_models=known_models) @@ -715,7 +688,7 @@ def enum_process_schema(enum: Type[Enum], *, field: Optional[ModelField] = None) def field_singleton_sub_fields_schema( - sub_fields: Sequence[ModelField], + field: ModelField, *, by_alias: bool, model_name_map: Dict[TypeModelOrEnum, str], @@ -730,6 +703,7 @@ def field_singleton_sub_fields_schema( Take a list of Pydantic ``ModelField`` from the declaration of a type with parameters, and generate their schema. I.e., fields used as "type parameters", like ``str`` and ``int`` in ``Tuple[str, int]``. """ + sub_fields = cast(List[ModelField], field.sub_fields) definitions = {} nested_models: Set[str] = set() if len(sub_fields) == 1: @@ -743,6 +717,37 @@ def field_singleton_sub_fields_schema( known_models=known_models, ) else: + s: Dict[str, Any] = {} + # https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#discriminator-object + if field.discriminator_key is not None: + assert field.sub_fields_mapping is not None + + discriminator_models_refs: Dict[str, Union[str, Dict[str, Any]]] = {} + + for discriminator_value, sub_field in field.sub_fields_mapping.items(): + # sub_field is either a `BaseModel` or directly an `Annotated` `Union` of many + if is_union(get_origin(sub_field.type_)): + sub_models = get_sub_types(sub_field.type_) + discriminator_models_refs[discriminator_value] = { + model_name_map[sub_model]: get_schema_ref( + model_name_map[sub_model], ref_prefix, ref_template, False + ) + for sub_model in sub_models + } + else: + sub_field_type = sub_field.type_ + if hasattr(sub_field_type, '__pydantic_model__'): + sub_field_type = sub_field_type.__pydantic_model__ + + discriminator_model_name = model_name_map[sub_field_type] + discriminator_model_ref = get_schema_ref(discriminator_model_name, ref_prefix, ref_template, False) + discriminator_models_refs[discriminator_value] = discriminator_model_ref['$ref'] + + s['discriminator'] = { + 'propertyName': field.discriminator_alias, + 'mapping': discriminator_models_refs, + } + sub_field_schemas = [] for sf in sub_fields: sub_schema, sub_definitions, sub_nested_models = field_type_schema( @@ -760,9 +765,14 @@ def field_singleton_sub_fields_schema( # object. Otherwise we will end up with several allOf inside anyOf. # See https://github.com/samuelcolvin/pydantic/issues/1209 sub_schema = sub_schema['allOf'][0] + + if sub_schema.keys() == {'discriminator', 'anyOf'}: + # we don't want discriminator information inside anyOf choices, this is dealt with elsewhere + sub_schema.pop('discriminator') sub_field_schemas.append(sub_schema) nested_models.update(sub_nested_models) - return {'anyOf': sub_field_schemas}, definitions, nested_models + s['anyOf'] = sub_field_schemas + return s, definitions, nested_models # Order is important, e.g. subclasses of str must go before str @@ -846,7 +856,7 @@ def field_singleton_schema( # noqa: C901 (ignore complexity) (field.field_info and field.field_info.const) or not lenient_issubclass(field_type, BaseModel) ): return field_singleton_sub_fields_schema( - field.sub_fields, + field, by_alias=by_alias, model_name_map=model_name_map, schema_overrides=schema_overrides, diff --git a/pydantic/types.py b/pydantic/types.py index e3d6c1277dd..2d0cc18f8d7 100644 --- a/pydantic/types.py +++ b/pydantic/types.py @@ -1010,18 +1010,18 @@ def _get_brand(card_number: str) -> PaymentCardBrand: BYTE_SIZES = { 'b': 1, - 'kb': 10 ** 3, - 'mb': 10 ** 6, - 'gb': 10 ** 9, - 'tb': 10 ** 12, - 'pb': 10 ** 15, - 'eb': 10 ** 18, - 'kib': 2 ** 10, - 'mib': 2 ** 20, - 'gib': 2 ** 30, - 'tib': 2 ** 40, - 'pib': 2 ** 50, - 'eib': 2 ** 60, + 'kb': 10**3, + 'mb': 10**6, + 'gb': 10**9, + 'tb': 10**12, + 'pb': 10**15, + 'eb': 10**18, + 'kib': 2**10, + 'mib': 2**20, + 'gib': 2**30, + 'tib': 2**40, + 'pib': 2**50, + 'eib': 2**60, } BYTE_SIZES.update({k.lower()[0]: v for k, v in BYTE_SIZES.items() if 'i' not in k}) byte_string_re = re.compile(r'^\s*(\d*\.?\d+)\s*(\w+)?', re.IGNORECASE) diff --git a/pydantic/typing.py b/pydantic/typing.py index 730dc46442c..f9297ba197b 100644 --- a/pydantic/typing.py +++ b/pydantic/typing.py @@ -35,6 +35,12 @@ # python < 3.9 does not have GenericAlias (list[int], tuple[str, ...] and so on) TypingGenericAlias = () +try: + from types import UnionType as TypesUnionType # type: ignore +except ImportError: + # python < 3.10 does not have UnionType (str | int, byte | bool and so on) + TypesUnionType = () + if sys.version_info < (3, 7): if TYPE_CHECKING: @@ -186,6 +192,63 @@ def get_args(tp: Type[Any]) -> Tuple[Any, ...]: return _typing_get_args(tp) or getattr(tp, '__args__', ()) or _generic_get_args(tp) +if sys.version_info < (3, 9): + + def convert_generics(tp: Type[Any]) -> Type[Any]: + """Python 3.9 and older only supports generics from `typing` module. + They convert strings to ForwardRef automatically. + + Examples:: + typing.List['Hero'] == typing.List[ForwardRef('Hero')] + """ + return tp + +else: + from typing import _UnionGenericAlias # type: ignore + + from typing_extensions import _AnnotatedAlias + + def convert_generics(tp: Type[Any]) -> Type[Any]: + """ + Recursively searches for `str` type hints and replaces them with ForwardRef. + + Examples:: + convert_generics(list['Hero']) == list[ForwardRef('Hero')] + convert_generics(dict['Hero', 'Team']) == dict[ForwardRef('Hero'), ForwardRef('Team')] + convert_generics(typing.Dict['Hero', 'Team']) == typing.Dict[ForwardRef('Hero'), ForwardRef('Team')] + convert_generics(list[str | 'Hero'] | int) == list[str | ForwardRef('Hero')] | int + """ + origin = get_origin(tp) + if not origin or not hasattr(tp, '__args__'): + return tp + + args = get_args(tp) + + # typing.Annotated needs special treatment + if origin is Annotated: + return _AnnotatedAlias(convert_generics(args[0]), args[1:]) + + # recursively replace `str` instances inside of `GenericAlias` with `ForwardRef(arg)` + converted = tuple( + ForwardRef(arg) if isinstance(arg, str) and isinstance(tp, TypingGenericAlias) else convert_generics(arg) + for arg in args + ) + + if converted == args: + return tp + elif isinstance(tp, TypingGenericAlias): + return TypingGenericAlias(origin, converted) + elif isinstance(tp, TypesUnionType): + # recreate types.UnionType (PEP604, Python >= 3.10) + return _UnionGenericAlias(origin, converted) + else: + try: + setattr(tp, '__args__', converted) + except AttributeError: + pass + return tp + + if sys.version_info < (3, 10): def is_union(tp: Optional[Type[Any]]) -> bool: @@ -439,7 +502,15 @@ def _check_classvar(v: Optional[Type[Any]]) -> bool: def is_classvar(ann_type: Type[Any]) -> bool: - return _check_classvar(ann_type) or _check_classvar(get_origin(ann_type)) + if _check_classvar(ann_type) or _check_classvar(get_origin(ann_type)): + return True + + # this is an ugly workaround for class vars that contain forward references and are therefore themselves + # forward references, see #3679 + if ann_type.__class__ == ForwardRef and ann_type.__forward_arg__.startswith('ClassVar['): + return True + + return False def update_field_forward_refs(field: 'ModelField', globalns: Any, localns: Any) -> None: diff --git a/pydantic/utils.py b/pydantic/utils.py index 2a3960f6d7a..a3c87f4bf39 100644 --- a/pydantic/utils.py +++ b/pydantic/utils.py @@ -16,6 +16,7 @@ Iterator, List, Mapping, + MutableMapping, Optional, Set, Tuple, @@ -73,6 +74,7 @@ 'ROOT_KEY', 'get_unique_discriminator_alias', 'get_discriminator_alias_and_values', + 'LimitedDict', ) ROOT_KEY = '__root__' @@ -749,3 +751,39 @@ def _get_union_alias_and_all_values( # unzip: [('alias_a',('v1', 'v2)), ('alias_b', ('v3',))] => [('alias_a', 'alias_b'), (('v1', 'v2'), ('v3',))] all_aliases, all_values = zip(*zipped_aliases_values) return get_unique_discriminator_alias(all_aliases, discriminator_key), all_values + + +KT = TypeVar('KT') +VT = TypeVar('VT') +if TYPE_CHECKING: + # Annoying inheriting from `MutableMapping` and `dict` breaks cython, hence this work around + class LimitedDict(dict, MutableMapping[KT, VT]): # type: ignore[type-arg] + def __init__(self, size_limit: int = 1000): + ... + +else: + + class LimitedDict(dict): + """ + Limit the size/length of a dict used for caching to avoid unlimited increase in memory usage. + + Since the dict is ordered, and we always remove elements from the beginning, this is effectively a FIFO cache. + + Annoying inheriting from `MutableMapping` breaks cython. + """ + + def __init__(self, size_limit: int = 1000): + self.size_limit = size_limit + super().__init__() + + def __setitem__(self, __key: Any, __value: Any) -> None: + super().__setitem__(__key, __value) + if len(self) > self.size_limit: + excess = len(self) - self.size_limit + self.size_limit // 10 + to_remove = list(self.keys())[:excess] + for key in to_remove: + del self[key] + + def __class_getitem__(cls, *args: Any) -> Any: # pragma: no cover + # just in case LimitedDict is used in type annotations + pass diff --git a/pydantic/validators.py b/pydantic/validators.py index 63b7a59e080..d4783d97b12 100644 --- a/pydantic/validators.py +++ b/pydantic/validators.py @@ -76,7 +76,7 @@ def strict_str_validator(v: Any) -> Union[str]: raise errors.StrError() -def bytes_validator(v: Any) -> bytes: +def bytes_validator(v: Any) -> Union[bytes]: if isinstance(v, bytes): return v elif isinstance(v, bytearray): diff --git a/pydantic/version.py b/pydantic/version.py index 5b1ebc33e98..3c885370c22 100644 --- a/pydantic/version.py +++ b/pydantic/version.py @@ -1,6 +1,6 @@ __all__ = 'VERSION', 'version_info' -VERSION = '1.9.0' +VERSION = '1.9.1' def version_info() -> str: diff --git a/requirements.txt b/requirements.txt index 95aef085232..28ab08875b6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,8 +1,8 @@ # requirements for compilation and from setup.py so dependabot prompts us to test with latest version of these packages -Cython==0.29.26;sys_platform!='win32' +Cython==0.29.28;sys_platform!='win32' devtools==0.8.0 -email-validator==1.1.3 +email-validator==1.2.1 dataclasses==0.6; python_version < '3.7' -typing-extensions==4.0.1 -python-dotenv==0.19.2 +typing-extensions==4.1.0 +python-dotenv==0.20.0 diff --git a/setup.py b/setup.py index fb22f35fbb6..031a386f8ae 100644 --- a/setup.py +++ b/setup.py @@ -56,15 +56,15 @@ def extra(self): return '\n\n' + '\n'.join(sorted(self.links)) + '\n' -description = 'Data validation and settings management using python 3.6 type hinting' +description = 'Data validation and settings management using python type hints' THIS_DIR = Path(__file__).resolve().parent try: - history = (THIS_DIR / 'HISTORY.md').read_text() + history = (THIS_DIR / 'HISTORY.md').read_text(encoding='utf-8') history = re.sub(r'#(\d+)', r'[#\1](https://github.com/samuelcolvin/pydantic/issues/\1)', history) history = re.sub(r'( +)@([\w\-]+)', r'\1[@\2](https://github.com/\2)', history, flags=re.I) history = re.sub('@@', '@', history) - long_description = (THIS_DIR / 'README.md').read_text() + '\n\n' + history + long_description = (THIS_DIR / 'README.md').read_text(encoding='utf-8') + '\n\n' + history except FileNotFoundError: long_description = description + '.\n\nSee https://pydantic-docs.helpmanual.io/ for documentation.' diff --git a/tests/pyright/pyproject.toml b/tests/pyright/pyproject.toml new file mode 100644 index 00000000000..991559aeafd --- /dev/null +++ b/tests/pyright/pyproject.toml @@ -0,0 +1,4 @@ +[tool.pyright] +extraPaths = ['../..'] +reportUnnecessaryTypeIgnoreComment = true +pythonVersion = '3.10' diff --git a/tests/pyright/pyright_example.py b/tests/pyright/pyright_example.py new file mode 100644 index 00000000000..0819afc3c4b --- /dev/null +++ b/tests/pyright/pyright_example.py @@ -0,0 +1,38 @@ +""" +This file is used to test pyright's ability to check pydantic code. + +In particular pydantic provides the `@__dataclass_transform__` for `BaseModel` +and all subclasses (including `BaseSettings`), see #2721. +""" + +from typing import List + +from pydantic import BaseModel, BaseSettings, Field + + +class MyModel(BaseModel): + x: str + y: List[int] + + +m1 = MyModel(x='hello', y=[1, 2, 3]) + +m2 = MyModel(x='hello') # pyright: ignore + + +class Knight(BaseModel): + title: str = Field(default='Sir Lancelot') # this is okay + age: int = Field(23) # this works fine at runtime but will case an error for pyright + + +k = Knight() # pyright: ignore + + +class Settings(BaseSettings): + x: str + y: int + + +s1 = Settings.parse_obj({}) + +s2 = Settings() # pyright: ignore[reportGeneralTypeIssues] diff --git a/tests/requirements-linting.txt b/tests/requirements-linting.txt index 9a3f0464b33..54faa414c80 100644 --- a/tests/requirements-linting.txt +++ b/tests/requirements-linting.txt @@ -1,10 +1,10 @@ -black==21.12b0 +black==22.3.0 flake8==4.0.1 flake8-quotes==3.3.1 hypothesis==6.31.6 isort==5.10.1 -mypy==0.930 -pre-commit==2.16.0 +mypy==0.950 +pre-commit==2.17.0 pycodestyle==2.8.0 pyflakes==2.4.0 -twine==3.7.1 +twine==4.0.0 diff --git a/tests/requirements-testing.txt b/tests/requirements-testing.txt index 1ee135251bb..c4bd5e5e198 100644 --- a/tests/requirements-testing.txt +++ b/tests/requirements-testing.txt @@ -2,8 +2,8 @@ coverage==6.2 hypothesis==6.31.6 # pin importlib-metadata as upper versions need typing-extensions to work if on python < 3.8 importlib-metadata==3.1.0;python_version<"3.8" -mypy==0.930 -pytest==6.2.5 +mypy==0.950 +pytest==7.0.1 pytest-cov==3.0.0 pytest-mock==3.6.1 pytest-sugar==0.9.4 diff --git a/tests/test_annotated.py b/tests/test_annotated.py index b5c27a8c52f..bf2cad4b882 100644 --- a/tests/test_annotated.py +++ b/tests/test_annotated.py @@ -1,3 +1,5 @@ +from typing import List + import pytest from typing_extensions import Annotated @@ -132,3 +134,21 @@ class Config: assert Foo.schema(by_alias=True)['properties'] == { 'a': {'title': 'A', 'description': 'descr', 'foobar': 'hello', 'type': 'integer'}, } + + +def test_annotated_alias() -> None: + # https://github.com/samuelcolvin/pydantic/issues/2971 + + StrAlias = Annotated[str, Field(max_length=3)] + IntAlias = Annotated[int, Field(default_factory=lambda: 2)] + + Nested = Annotated[List[StrAlias], Field(description='foo')] + + class MyModel(BaseModel): + a: StrAlias = 'abc' + b: StrAlias + c: IntAlias + d: IntAlias + e: Nested + + assert MyModel(b='def', e=['xyz']) == MyModel(a='abc', b='def', c=2, d=2, e=['xyz']) diff --git a/tests/test_construction.py b/tests/test_construction.py index 19e912b398e..e7b12a0a76d 100644 --- a/tests/test_construction.py +++ b/tests/test_construction.py @@ -63,8 +63,8 @@ class Model(BaseModel): a: bytes b: str - content_bytes = b'x' * (2 ** 16 + 1) - content_str = 'x' * (2 ** 16 + 1) + content_bytes = b'x' * (2**16 + 1) + content_str = 'x' * (2**16 + 1) m = Model(a=content_bytes, b=content_str) assert m.a == content_bytes assert m.b == content_str diff --git a/tests/test_dataclasses.py b/tests/test_dataclasses.py index e99a9c72343..ef5968ce07f 100644 --- a/tests/test_dataclasses.py +++ b/tests/test_dataclasses.py @@ -989,3 +989,11 @@ def __new__(cls, *args, **kwargs): instance = cls(a=test_string) assert instance._special_property == 1 assert instance.a == test_string + + +def test_self_reference_dataclass(): + @pydantic.dataclasses.dataclass + class MyDataclass: + self_reference: 'MyDataclass' + + assert MyDataclass.__pydantic_model__.__fields__['self_reference'].type_ is MyDataclass diff --git a/tests/test_discrimated_union.py b/tests/test_discrimated_union.py index c7cd5f4e5ad..33fed5fb5ce 100644 --- a/tests/test_discrimated_union.py +++ b/tests/test_discrimated_union.py @@ -12,12 +12,23 @@ def test_discriminated_union_only_union(): - with pytest.raises(TypeError, match='`discriminator` can only be used with `Union` type'): + with pytest.raises( + TypeError, match='`discriminator` can only be used with `Union` type with more than one variant' + ): class Model(BaseModel): x: str = Field(..., discriminator='qwe') +def test_discriminated_union_single_variant(): + with pytest.raises( + TypeError, match='`discriminator` can only be used with `Union` type with more than one variant' + ): + + class Model(BaseModel): + x: Union[str] = Field(..., discriminator='qwe') + + def test_discriminated_union_invalid_type(): with pytest.raises(TypeError, match="Type 'str' is not a valid `BaseModel` or `dataclass`"): diff --git a/tests/test_edge_cases.py b/tests/test_edge_cases.py index 227881b8f30..be4c17a9fc2 100644 --- a/tests/test_edge_cases.py +++ b/tests/test_edge_cases.py @@ -1909,3 +1909,43 @@ class Config: arbitrary_types_allowed = True assert Model().x == Foo() + + +def test_bytes_subclass(): + class MyModel(BaseModel): + my_bytes: bytes + + class BytesSubclass(bytes): + def __new__(cls, data: bytes): + self = bytes.__new__(cls, data) + return self + + m = MyModel(my_bytes=BytesSubclass(b'foobar')) + assert m.my_bytes.__class__ == BytesSubclass + + +def test_int_subclass(): + class MyModel(BaseModel): + my_int: int + + class IntSubclass(int): + def __new__(cls, data: int): + self = int.__new__(cls, data) + return self + + m = MyModel(my_int=IntSubclass(123)) + assert m.my_int.__class__ == IntSubclass + + +def test_model_issubclass(): + assert not issubclass(int, BaseModel) + + class MyModel(BaseModel): + x: int + + assert issubclass(MyModel, BaseModel) + + class Custom: + __fields__ = True + + assert not issubclass(Custom, BaseModel) diff --git a/tests/test_fastapi.sh b/tests/test_fastapi.sh index cfaca2c5859..ff5a3e3ad84 100755 --- a/tests/test_fastapi.sh +++ b/tests/test_fastapi.sh @@ -12,4 +12,4 @@ git checkout "${latest_tag}" pip install -U flit flit install -PYTHONPATH=./docs/src pytest +./scripts/test.sh diff --git a/tests/test_forward_ref.py b/tests/test_forward_ref.py index df378810cd2..1580cb2e5bc 100644 --- a/tests/test_forward_ref.py +++ b/tests/test_forward_ref.py @@ -683,3 +683,61 @@ class Config: m = module.User(name='anne', friends=[{'name': 'ben'}, {'name': 'charlie'}]) assert m.json(models_as_dict=False) == '{"name": "anne", "friends": ["User(ben)", "User(charlie)"]}' + + +skip_pep585 = pytest.mark.skipif( + sys.version_info < (3, 9), reason='PEP585 generics only supported for python 3.9 and above' +) + + +@skip_pep585 +def test_pep585_self_referencing_generics(): + class SelfReferencing(BaseModel): + names: list['SelfReferencing'] # noqa: F821 + + SelfReferencing.update_forward_refs() # will raise an exception if the forward ref isn't resolvable + # test the class + assert SelfReferencing.__fields__['names'].type_ is SelfReferencing + # NOTE: outer_type_ is not converted + assert SelfReferencing.__fields__['names'].outer_type_ == list['SelfReferencing'] + # test that object creation works + obj = SelfReferencing(names=[SelfReferencing(names=[])]) + assert obj.names == [SelfReferencing(names=[])] + + +@skip_pep585 +def test_pep585_recursive_generics(create_module): + @create_module + def module(): + from pydantic import BaseModel + + class Team(BaseModel): + name: str + heroes: list['Hero'] # noqa: F821 + + class Hero(BaseModel): + name: str + teams: list[Team] + + Team.update_forward_refs() + + assert module.Team.__fields__['heroes'].type_ is module.Hero + assert module.Hero.__fields__['teams'].type_ is module.Team + + module.Hero(name='Ivan', teams=[module.Team(name='TheBest', heroes=[])]) + + +@pytest.mark.skipif(sys.version_info < (3, 9), reason='needs 3.9 or newer') +def test_class_var_forward_ref(create_module): + # see #3679 + create_module( + # language=Python + """ +from __future__ import annotations +from typing import ClassVar +from pydantic import BaseModel + +class WithClassVar(BaseModel): + Instances: ClassVar[dict[str, WithClassVar]] = {} +""" + ) diff --git a/tests/test_hypothesis_plugin.py b/tests/test_hypothesis_plugin.py index 95dc59c979e..ad3538826d8 100644 --- a/tests/test_hypothesis_plugin.py +++ b/tests/test_hypothesis_plugin.py @@ -108,7 +108,7 @@ class EmailsModel(pydantic.BaseModel): @pytest.mark.parametrize('model', gen_models()) -@settings(suppress_health_check={HealthCheck.too_slow}) +@settings(suppress_health_check={HealthCheck.too_slow}, deadline=None) @given(data=st.data()) def test_can_construct_models_with_all_fields(data, model): # The value of this test is to confirm that Hypothesis knows how to provide diff --git a/tests/test_main.py b/tests/test_main.py index 71da72544b2..8cf290cbebb 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -433,13 +433,13 @@ class Foo(BaseModel): x: int def __hash__(self): - return self.x ** 2 + return self.x**2 class Bar(Foo): y: int def __hash__(self): - return self.y ** 3 + return self.y**3 class Buz(Bar): z: int @@ -1561,12 +1561,28 @@ class Config: assert t.user is not my_user assert t.user.hobbies == ['scuba diving'] - assert t.user.hobbies is my_user.hobbies # `Config.copy_on_model_validation` only does a shallow copy + assert t.user.hobbies is not my_user.hobbies # `Config.copy_on_model_validation` does a deep copy assert t.user._priv == 13 assert t.user.password.get_secret_value() == 'hashedpassword' assert t.dict() == {'id': '1234567890', 'user': {'id': 42, 'hobbies': ['scuba diving']}} +def test_validation_deep_copy(): + """By default, Config.copy_on_model_validation should do a deep copy""" + + class A(BaseModel): + name: str + + class B(BaseModel): + list_a: List[A] + + a = A(name='a') + b = B(list_a=[a]) + assert b.list_a == [A(name='a')] + a.name = 'b' + assert b.list_a == [A(name='a')] + + @pytest.mark.parametrize( 'kinds', [ diff --git a/tests/test_networks.py b/tests/test_networks.py index 5ced4ac4c69..4d768b20fa9 100644 --- a/tests/test_networks.py +++ b/tests/test_networks.py @@ -2,6 +2,7 @@ from pydantic import ( AmqpDsn, + AnyHttpUrl, AnyUrl, BaseModel, EmailStr, @@ -564,6 +565,41 @@ def test_build_url(kwargs, expected): assert AnyUrl(None, **kwargs) == expected +@pytest.mark.parametrize( + 'kwargs,expected', + [ + (dict(scheme='http', host='example.net'), 'http://example.net'), + (dict(scheme='https', host='example.net'), 'https://example.net'), + (dict(scheme='http', user='foo', host='example.net'), 'http://foo@example.net'), + (dict(scheme='https', user='foo', host='example.net'), 'https://foo@example.net'), + (dict(scheme='http', user='foo', host='example.net', port='123'), 'http://foo@example.net:123'), + (dict(scheme='https', user='foo', host='example.net', port='123'), 'https://foo@example.net:123'), + (dict(scheme='http', user='foo', password='x', host='example.net'), 'http://foo:x@example.net'), + (dict(scheme='http2', user='foo', password='x', host='example.net'), 'http2://foo:x@example.net'), + (dict(scheme='http', host='example.net', query='a=b', fragment='c=d'), 'http://example.net?a=b#c=d'), + (dict(scheme='http2', host='example.net', query='a=b', fragment='c=d'), 'http2://example.net?a=b#c=d'), + (dict(scheme='http', host='example.net', port='1234'), 'http://example.net:1234'), + (dict(scheme='https', host='example.net', port='1234'), 'https://example.net:1234'), + ], +) +@pytest.mark.parametrize('klass', [AnyHttpUrl, HttpUrl]) +def test_build_any_http_url(klass, kwargs, expected): + assert klass(None, **kwargs) == expected + + +@pytest.mark.parametrize( + 'klass, kwargs,expected', + [ + (AnyHttpUrl, dict(scheme='http', user='foo', host='example.net', port='80'), 'http://foo@example.net:80'), + (AnyHttpUrl, dict(scheme='https', user='foo', host='example.net', port='443'), 'https://foo@example.net:443'), + (HttpUrl, dict(scheme='http', user='foo', host='example.net', port='80'), 'http://foo@example.net'), + (HttpUrl, dict(scheme='https', user='foo', host='example.net', port='443'), 'https://foo@example.net'), + ], +) +def test_build_http_url_port(klass, kwargs, expected): + assert klass(None, **kwargs) == expected + + def test_son(): class Model(BaseModel): v: HttpUrl diff --git a/tests/test_networks_ipaddress.py b/tests/test_networks_ipaddress.py index cd1351b5928..c73c512307d 100644 --- a/tests/test_networks_ipaddress.py +++ b/tests/test_networks_ipaddress.py @@ -114,7 +114,7 @@ class Model(BaseModel): [{'loc': ('ip',), 'msg': 'value is not a valid IPv4 or IPv6 address', 'type': 'value_error.ipvanyaddress'}], ), ( - 2 ** 128 + 1, + 2**128 + 1, [{'loc': ('ip',), 'msg': 'value is not a valid IPv4 or IPv6 address', 'type': 'value_error.ipvanyaddress'}], ), ], @@ -141,7 +141,7 @@ class Model(BaseModel): ), (-1, [{'loc': ('ipv4',), 'msg': 'value is not a valid IPv4 address', 'type': 'value_error.ipv4address'}]), ( - 2 ** 32 + 1, + 2**32 + 1, [{'loc': ('ipv4',), 'msg': 'value is not a valid IPv4 address', 'type': 'value_error.ipv4address'}], ), ( @@ -172,7 +172,7 @@ class Model(BaseModel): ), (-1, [{'loc': ('ipv6',), 'msg': 'value is not a valid IPv6 address', 'type': 'value_error.ipv6address'}]), ( - 2 ** 128 + 1, + 2**128 + 1, [{'loc': ('ipv6',), 'msg': 'value is not a valid IPv6 address', 'type': 'value_error.ipv6address'}], ), ( @@ -203,7 +203,7 @@ class Model(BaseModel): ('192.168.0.0/24', IPv4Network), ('192.168.128.0/30', IPv4Network), ('2001:db00::0/120', IPv6Network), - (2 ** 32 - 1, IPv4Network), # no mask equals to mask /32 + (2**32 - 1, IPv4Network), # no mask equals to mask /32 (20_282_409_603_651_670_423_947_251_286_015, IPv6Network), # /128 (b'\xff\xff\xff\xff', IPv4Network), # /32 (b'\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff', IPv6Network), @@ -224,7 +224,7 @@ class Model(BaseModel): [ ('192.168.0.0/24', IPv4Network), ('192.168.128.0/30', IPv4Network), - (2 ** 32 - 1, IPv4Network), # no mask equals to mask /32 + (2**32 - 1, IPv4Network), # no mask equals to mask /32 (b'\xff\xff\xff\xff', IPv4Network), # /32 (('192.168.0.0', 24), IPv4Network), (IPv4Network('192.168.0.0/24'), IPv4Network), @@ -270,7 +270,7 @@ class Model(BaseModel): [{'loc': ('ip',), 'msg': 'value is not a valid IPv4 or IPv6 network', 'type': 'value_error.ipvanynetwork'}], ), ( - 2 ** 128 + 1, + 2**128 + 1, [{'loc': ('ip',), 'msg': 'value is not a valid IPv4 or IPv6 network', 'type': 'value_error.ipvanynetwork'}], ), ], @@ -297,7 +297,7 @@ class Model(BaseModel): ), (-1, [{'loc': ('ip',), 'msg': 'value is not a valid IPv4 network', 'type': 'value_error.ipv4network'}]), ( - 2 ** 128 + 1, + 2**128 + 1, [{'loc': ('ip',), 'msg': 'value is not a valid IPv4 network', 'type': 'value_error.ipv4network'}], ), ( @@ -328,7 +328,7 @@ class Model(BaseModel): ), (-1, [{'loc': ('ip',), 'msg': 'value is not a valid IPv6 network', 'type': 'value_error.ipv6network'}]), ( - 2 ** 128 + 1, + 2**128 + 1, [{'loc': ('ip',), 'msg': 'value is not a valid IPv6 network', 'type': 'value_error.ipv6network'}], ), ( @@ -362,8 +362,8 @@ class Model(BaseModel): ('192.168.128.1/30', IPv4Interface), ('2001:db00::0/120', IPv6Interface), ('2001:db00::1/120', IPv6Interface), - (2 ** 32 - 1, IPv4Interface), # no mask equals to mask /32 - (2 ** 32 - 1, IPv4Interface), # so ``strict`` has no effect + (2**32 - 1, IPv4Interface), # no mask equals to mask /32 + (2**32 - 1, IPv4Interface), # so ``strict`` has no effect (20_282_409_603_651_670_423_947_251_286_015, IPv6Interface), # /128 (20_282_409_603_651_670_423_947_251_286_014, IPv6Interface), (b'\xff\xff\xff\xff', IPv4Interface), # /32 @@ -394,8 +394,8 @@ class Model(BaseModel): ('192.168.0.1/24', IPv4Interface), ('192.168.128.0/30', IPv4Interface), ('192.168.128.1/30', IPv4Interface), - (2 ** 32 - 1, IPv4Interface), # no mask equals to mask /32 - (2 ** 32 - 1, IPv4Interface), # so ``strict`` has no effect + (2**32 - 1, IPv4Interface), # no mask equals to mask /32 + (2**32 - 1, IPv4Interface), # so ``strict`` has no effect (b'\xff\xff\xff\xff', IPv4Interface), # /32 (b'\xff\xff\xff\xff', IPv4Interface), (('192.168.0.0', 24), IPv4Interface), @@ -467,7 +467,7 @@ class Model(BaseModel): ], ), ( - 2 ** 128 + 1, + 2**128 + 1, [ { 'loc': ('ip',), @@ -500,7 +500,7 @@ class Model(BaseModel): ), (-1, [{'loc': ('ip',), 'msg': 'value is not a valid IPv4 interface', 'type': 'value_error.ipv4interface'}]), ( - 2 ** 128 + 1, + 2**128 + 1, [{'loc': ('ip',), 'msg': 'value is not a valid IPv4 interface', 'type': 'value_error.ipv4interface'}], ), ], @@ -527,7 +527,7 @@ class Model(BaseModel): ), (-1, [{'loc': ('ip',), 'msg': 'value is not a valid IPv6 interface', 'type': 'value_error.ipv6interface'}]), ( - 2 ** 128 + 1, + 2**128 + 1, [{'loc': ('ip',), 'msg': 'value is not a valid IPv6 interface', 'type': 'value_error.ipv6interface'}], ), ], diff --git a/tests/test_schema.py b/tests/test_schema.py index eb1f081e828..9369ef74671 100644 --- a/tests/test_schema.py +++ b/tests/test_schema.py @@ -799,7 +799,7 @@ class Model(BaseModel): @pytest.mark.parametrize( 'field_type,expected_schema', [ - (AnyUrl, {'title': 'A', 'type': 'string', 'format': 'uri', 'minLength': 1, 'maxLength': 2 ** 16}), + (AnyUrl, {'title': 'A', 'type': 'string', 'format': 'uri', 'minLength': 1, 'maxLength': 2**16}), ( stricturl(min_length=5, max_length=10), {'title': 'A', 'type': 'string', 'format': 'uri', 'minLength': 5, 'maxLength': 10}, @@ -2899,3 +2899,120 @@ class Model(BaseModel): }, }, } + + +def test_nested_python_dataclasses(): + """ + Test schema generation for nested python dataclasses + """ + + from dataclasses import dataclass as python_dataclass + + @python_dataclass + class ChildModel: + name: str + + @python_dataclass + class NestedModel: + child: List[ChildModel] + + assert model_schema(dataclass(NestedModel)) == { + 'title': 'NestedModel', + 'type': 'object', + 'properties': {'child': {'title': 'Child', 'type': 'array', 'items': {'$ref': '#/definitions/ChildModel'}}}, + 'required': ['child'], + 'definitions': { + 'ChildModel': { + 'title': 'ChildModel', + 'type': 'object', + 'properties': {'name': {'title': 'Name', 'type': 'string'}}, + 'required': ['name'], + } + }, + } + + +def test_discriminated_union_in_list(): + class BlackCat(BaseModel): + pet_type: Literal['cat'] + color: Literal['black'] + black_name: str + + class WhiteCat(BaseModel): + pet_type: Literal['cat'] + color: Literal['white'] + white_name: str + + Cat = Annotated[Union[BlackCat, WhiteCat], Field(discriminator='color')] + + class Dog(BaseModel): + pet_type: Literal['dog'] + name: str + + Pet = Annotated[Union[Cat, Dog], Field(discriminator='pet_type')] + + class Model(BaseModel): + pets: Pet + n: int + + assert Model.schema() == { + 'title': 'Model', + 'type': 'object', + 'properties': { + 'pets': { + 'title': 'Pets', + 'discriminator': { + 'propertyName': 'pet_type', + 'mapping': { + 'cat': { + 'BlackCat': {'$ref': '#/definitions/BlackCat'}, + 'WhiteCat': {'$ref': '#/definitions/WhiteCat'}, + }, + 'dog': '#/definitions/Dog', + }, + }, + 'anyOf': [ + { + 'anyOf': [ + {'$ref': '#/definitions/BlackCat'}, + {'$ref': '#/definitions/WhiteCat'}, + ], + }, + {'$ref': '#/definitions/Dog'}, + ], + }, + 'n': {'title': 'N', 'type': 'integer'}, + }, + 'required': ['pets', 'n'], + 'definitions': { + 'BlackCat': { + 'title': 'BlackCat', + 'type': 'object', + 'properties': { + 'pet_type': {'title': 'Pet Type', 'enum': ['cat'], 'type': 'string'}, + 'color': {'title': 'Color', 'enum': ['black'], 'type': 'string'}, + 'black_name': {'title': 'Black Name', 'type': 'string'}, + }, + 'required': ['pet_type', 'color', 'black_name'], + }, + 'WhiteCat': { + 'title': 'WhiteCat', + 'type': 'object', + 'properties': { + 'pet_type': {'title': 'Pet Type', 'enum': ['cat'], 'type': 'string'}, + 'color': {'title': 'Color', 'enum': ['white'], 'type': 'string'}, + 'white_name': {'title': 'White Name', 'type': 'string'}, + }, + 'required': ['pet_type', 'color', 'white_name'], + }, + 'Dog': { + 'title': 'Dog', + 'type': 'object', + 'properties': { + 'pet_type': {'title': 'Pet Type', 'enum': ['dog'], 'type': 'string'}, + 'name': {'title': 'Name', 'type': 'string'}, + }, + 'required': ['pet_type', 'name'], + }, + }, + } diff --git a/tests/test_types.py b/tests/test_types.py index bbf4c23b1e4..1e9b231b22d 100644 --- a/tests/test_types.py +++ b/tests/test_types.py @@ -1144,7 +1144,7 @@ class Model(BaseModel): ([1, 2, '3'], [1, 2, '3']), ((1, 2, '3'), [1, 2, '3']), ({1, 2, '3'}, list({1, 2, '3'})), - ((i ** 2 for i in range(5)), [0, 1, 4, 9, 16]), + ((i**2 for i in range(5)), [0, 1, 4, 9, 16]), ((deque((1, 2, 3)), list(deque((1, 2, 3))))), ), ) @@ -1184,7 +1184,7 @@ class Model(BaseModel): ([1, 2, '3'], (1, 2, '3')), ((1, 2, '3'), (1, 2, '3')), ({1, 2, '3'}, tuple({1, 2, '3'})), - ((i ** 2 for i in range(5)), (0, 1, 4, 9, 16)), + ((i**2 for i in range(5)), (0, 1, 4, 9, 16)), (deque([1, 2, 3]), (1, 2, 3)), ), ) @@ -1210,7 +1210,7 @@ class Model(BaseModel): ( ([1, 2, '3'], int, (1, 2, 3)), ((1, 2, '3'), int, (1, 2, 3)), - ((i ** 2 for i in range(5)), int, (0, 1, 4, 9, 16)), + ((i**2 for i in range(5)), int, (0, 1, 4, 9, 16)), (('a', 'b', 'c'), str, ('a', 'b', 'c')), ), ) @@ -1250,7 +1250,7 @@ class Model(BaseModel): ({1, 2, 2, '3'}, {1, 2, '3'}), ((1, 2, 2, '3'), {1, 2, '3'}), ([1, 2, 2, '3'], {1, 2, '3'}), - ({i ** 2 for i in range(5)}, {0, 1, 4, 9, 16}), + ({i**2 for i in range(5)}, {0, 1, 4, 9, 16}), ), ) def test_set_success(value, result): diff --git a/tests/test_typing.py b/tests/test_typing.py index af0b8d96955..7d2b703d0d7 100644 --- a/tests/test_typing.py +++ b/tests/test_typing.py @@ -1,9 +1,12 @@ +import sys from collections import namedtuple -from typing import Callable as TypingCallable, NamedTuple +from typing import Any, Callable as TypingCallable, Dict, List, NamedTuple, NewType, Union # noqa: F401 import pytest +from typing_extensions import Annotated # noqa: F401 -from pydantic.typing import Literal, is_namedtuple, is_none_type, is_typeddict +from pydantic import Field # noqa: F401 +from pydantic.typing import Literal, convert_generics, is_namedtuple, is_none_type, is_typeddict try: from typing import TypedDict as typing_TypedDict @@ -21,6 +24,12 @@ except ImportError: mypy_extensions_TypedDict = None +try: + from typing import ForwardRef +except ImportError: + # ForwardRef is only available in Python 3.6+ + pass + ALL_TYPEDDICT_KINDS = (typing_TypedDict, typing_extensions_TypedDict, mypy_extensions_TypedDict) @@ -66,3 +75,58 @@ def test_is_none_type(): # `collections.abc.Callable` (even with python >= 3.9) as they behave # differently assert is_none_type(TypingCallable) is False + + +class Hero: + pass + + +class Team: + pass + + +@pytest.mark.skipif(sys.version_info < (3, 9), reason='PEP585 generics only supported for python 3.9 and above.') +@pytest.mark.parametrize( + ['type_', 'expectations'], + [ + ('int', 'int'), + ('Union[list["Hero"], int]', 'Union[list[ForwardRef("Hero")], int]'), + ('list["Hero"]', 'list[ForwardRef("Hero")]'), + ('dict["Hero", "Team"]', 'dict[ForwardRef("Hero"), ForwardRef("Team")]'), + ('dict["Hero", list["Team"]]', 'dict[ForwardRef("Hero"), list[ForwardRef("Team")]]'), + ('dict["Hero", List["Team"]]', 'dict[ForwardRef("Hero"), List[ForwardRef("Team")]]'), + ('Dict["Hero", list["Team"]]', 'Dict[ForwardRef("Hero"), list[ForwardRef("Team")]]'), + ( + 'Annotated[list["Hero"], Field(min_length=2)]', + 'Annotated[list[ForwardRef("Hero")], Field(min_length=2)]', + ), + ], +) +def test_convert_generics(type_, expectations): + assert str(convert_generics(eval(type_))) == str(eval(expectations)) + + +@pytest.mark.skipif(sys.version_info < (3, 10), reason='NewType class was added in python 3.10.') +def test_convert_generics_unsettable_args(): + class User(NewType): + + __origin__ = type(list[str]) + __args__ = (list['Hero'],) + + def __init__(self, name: str, tp: type) -> None: + super().__init__(name, tp) + + def __setattr__(self, __name: str, __value: Any) -> None: + if __name == '__args__': + raise AttributeError # will be thrown during the generics conversion + return super().__setattr__(__name, __value) + + # tests that convert_generics will not throw an exception even if __args__ isn't settable + assert convert_generics(User('MyUser', str)).__args__ == (list['Hero'],) + + +@pytest.mark.skipif(sys.version_info < (3, 10), reason='PEP604 unions only supported for python 3.10 and above.') +def test_convert_generics_pep604(): + assert ( + convert_generics(dict['Hero', list['Team']] | int) == dict[ForwardRef('Hero'), list[ForwardRef('Team')]] | int + ) diff --git a/tests/test_utils.py b/tests/test_utils.py index 9c0ab4fb942..e27a2d9b990 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -28,6 +28,7 @@ from pydantic.utils import ( BUILTIN_COLLECTIONS, ClassAttribute, + LimitedDict, ValueItems, all_identical, deep_update, @@ -526,3 +527,43 @@ def test_all_identical(): def test_undefined_pickle(): undefined2 = pickle.loads(pickle.dumps(Undefined)) assert undefined2 is Undefined + + +def test_limited_dict(): + d = LimitedDict(10) + d[1] = '1' + d[2] = '2' + assert list(d.items()) == [(1, '1'), (2, '2')] + for no in '34567890': + d[int(no)] = no + assert list(d.items()) == [ + (1, '1'), + (2, '2'), + (3, '3'), + (4, '4'), + (5, '5'), + (6, '6'), + (7, '7'), + (8, '8'), + (9, '9'), + (0, '0'), + ] + d[11] = '11' + + # reduce size to 9 after setting 11 + assert len(d) == 9 + assert list(d.items()) == [ + (3, '3'), + (4, '4'), + (5, '5'), + (6, '6'), + (7, '7'), + (8, '8'), + (9, '9'), + (0, '0'), + (11, '11'), + ] + d[12] = '12' + assert len(d) == 10 + d[13] = '13' + assert len(d) == 9